├── .github
├── dependabot.yml
├── docker.override.conf
└── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── .rustfmt.toml
├── CHANGELOG.md
├── CONTRIBUTING.md
├── Cargo.toml
├── DESIGN_PRINCIPLES.md
├── LICENSE-Apache-2.0
├── LICENSE-MIT
├── Pipfile
├── Pipfile.lock
├── README.md
├── docs
├── _headers
├── _redirects
├── bounty.md
├── contributing_docs.md
├── css
│ ├── extra.css
│ └── tc-header.css
├── favicon.ico
├── features
│ ├── configuration.md
│ ├── exec_commands.md
│ └── wait_strategies.md
├── getting_help.md
├── icons
│ ├── github.svg
│ ├── slack.svg
│ ├── stackoverflow.svg
│ └── twitter.svg
├── index.md
├── js
│ └── tc-header.js
├── language-logos
│ ├── dotnet.svg
│ ├── go.svg
│ ├── haskell.svg
│ ├── java.svg
│ ├── nodejs.svg
│ ├── python.svg
│ ├── ruby.svg
│ └── rust.svg
├── logo.png
├── logo.svg
├── quickstart
│ ├── community_modules.md
│ └── testcontainers.md
├── system_requirements
│ └── docker.md
├── testcontainers-logo.svg
└── theme
│ ├── main.html
│ └── partials
│ ├── header.html
│ ├── nav.html
│ └── tc-header.html
├── mkdocs.yml
├── release-plz.toml
├── requirements.txt
├── runtime.txt
├── testcontainers
├── Cargo.toml
├── README.md
├── src
│ ├── core.rs
│ ├── core
│ │ ├── async_drop.rs
│ │ ├── client.rs
│ │ ├── client
│ │ │ ├── bollard_client.rs
│ │ │ ├── exec.rs
│ │ │ └── factory.rs
│ │ ├── containers
│ │ │ ├── async_container.rs
│ │ │ ├── async_container
│ │ │ │ └── exec.rs
│ │ │ ├── mod.rs
│ │ │ ├── request.rs
│ │ │ ├── sync_container.rs
│ │ │ └── sync_container
│ │ │ │ ├── exec.rs
│ │ │ │ └── sync_reader.rs
│ │ ├── copy.rs
│ │ ├── env.rs
│ │ ├── env
│ │ │ └── config.rs
│ │ ├── error.rs
│ │ ├── image.rs
│ │ ├── image
│ │ │ ├── exec.rs
│ │ │ └── image_ext.rs
│ │ ├── logs.rs
│ │ ├── logs
│ │ │ ├── consumer.rs
│ │ │ ├── consumer
│ │ │ │ └── logging_consumer.rs
│ │ │ └── stream.rs
│ │ ├── mounts.rs
│ │ ├── network.rs
│ │ ├── ports.rs
│ │ └── wait
│ │ │ ├── cmd_wait.rs
│ │ │ ├── exit_strategy.rs
│ │ │ ├── health_strategy.rs
│ │ │ ├── http_strategy.rs
│ │ │ ├── log_strategy.rs
│ │ │ └── mod.rs
│ ├── images
│ │ ├── generic.rs
│ │ └── mod.rs
│ ├── lib.rs
│ ├── runners
│ │ ├── async_runner.rs
│ │ ├── mod.rs
│ │ └── sync_runner.rs
│ └── watchdog.rs
└── tests
│ ├── async_runner.rs
│ ├── dual_stack_host_ports.rs
│ └── sync_runner.rs
└── testimages
├── .dockerignore
├── Cargo.toml
├── README.md
├── build.rs
└── src
├── bin
├── no_expose_port.rs
└── simple_web_server.rs
├── dockerfiles
├── no_expose_port.dockerfile
└── simple_web_server.dockerfile
└── lib.rs
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 | commit-message:
8 | prefix: ci
9 | include: scope
10 | - package-ecosystem: "cargo"
11 | directory: "/"
12 | schedule:
13 | interval: "weekly"
14 | groups:
15 | bollard:
16 | patterns:
17 | - "bollard"
18 | - "bollard-stubs"
19 | commit-message:
20 | prefix: chore
21 | prefix-development: test
22 | include: scope
23 |
--------------------------------------------------------------------------------
/.github/docker.override.conf:
--------------------------------------------------------------------------------
1 | # This is a systemd unit override file that enables the Docker Remote API on localhost.
2 | # To take effect, it should be placed at /etc/systemd/system/docker.service.d/override.conf.
3 | [Service]
4 | ExecStart=
5 | ExecStart=/usr/bin/dockerd -H fd:// -H tcp://127.0.0.1:2375
6 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: Continuous Integration
2 |
3 | on:
4 | pull_request:
5 | merge_group:
6 | push:
7 | branches: [ main ]
8 |
9 | concurrency:
10 | group: ${{ github.ref }}
11 | cancel-in-progress: true
12 |
13 | jobs:
14 | msrv:
15 | name: Build with MSRV
16 | runs-on: ubuntu-latest
17 | steps:
18 | - name: Checkout sources
19 | uses: actions/checkout@v4
20 | - uses: Swatinem/rust-cache@v2
21 | - name: Get current MSRV from Cargo.toml
22 | id: current_msrv
23 | run: |
24 | msrv=$(cat Cargo.toml | grep rust-version | sed 's/.* = "//; s/"//')
25 | echo "msrv=$msrv" >> $GITHUB_OUTPUT
26 | - name: Setup Rust version
27 | uses: dtolnay/rust-toolchain@master
28 | with:
29 | toolchain: ${{steps.current_msrv.outputs.msrv}}
30 | - uses: Swatinem/rust-cache@v2.7.3
31 | - uses: taiki-e/install-action@v2
32 | with:
33 | tool: cargo-hack
34 | - name: Build
35 | run: cargo hack build --at-least-one-of ring,aws-lc-rs,ssl --feature-powerset --depth 2 --keep-going
36 |
37 | test:
38 | name: Test
39 | runs-on: ubuntu-latest
40 | strategy:
41 | fail-fast: false
42 | matrix:
43 | toolchain:
44 | - stable
45 | - nightly
46 | steps:
47 | - name: Checkout sources
48 | uses: actions/checkout@v4
49 | - uses: Swatinem/rust-cache@v2
50 | - name: Setup Rust
51 | uses: dtolnay/rust-toolchain@master
52 | with:
53 | toolchain: ${{ matrix.toolchain }}
54 | - name: Enable Docker Remote API on Localhost
55 | shell: bash
56 | run: |
57 | sudo mkdir -p /etc/systemd/system/docker.service.d/
58 | sudo cp ./.github/docker.override.conf /etc/systemd/system/docker.service.d/override.conf
59 | sudo systemctl daemon-reload
60 | sudo systemctl restart docker
61 | - uses: taiki-e/install-action@v2
62 | with:
63 | tool: cargo-hack
64 | - name: Tests
65 | run: cargo hack test --at-least-one-of ring,aws-lc-rs,ssl --feature-powerset --depth 2 --clean-per-run
66 |
67 | fmt:
68 | name: Rustfmt check
69 | runs-on: ubuntu-latest
70 | steps:
71 | - name: Checkout sources
72 | uses: actions/checkout@v4
73 | - uses: Swatinem/rust-cache@v2
74 | - uses: actions-rs/toolchain@v1
75 | with:
76 | profile: minimal
77 | toolchain: nightly
78 | components: rustfmt
79 | override: true
80 | - name: Rustfmt check
81 | uses: actions-rs/cargo@v1
82 | with:
83 | command: fmt
84 | args: --all -- --check
85 |
86 | clippy:
87 | name: Clippy check
88 | runs-on: ubuntu-latest
89 | strategy:
90 | fail-fast: false
91 | matrix:
92 | toolchain:
93 | - stable
94 | - nightly
95 | steps:
96 | - name: Checkout sources
97 | uses: actions/checkout@v4
98 | - uses: Swatinem/rust-cache@v2
99 | - name: Setup Rust
100 | uses: dtolnay/rust-toolchain@master
101 | with:
102 | toolchain: ${{ matrix.toolchain }}
103 | components: clippy
104 | - name: Clippy check
105 | uses: actions-rs/clippy-check@v1
106 | with:
107 | token: ${{ secrets.GITHUB_TOKEN }}
108 | args: --all-features
109 |
110 | prlint:
111 | name: PR name check
112 | runs-on: ubuntu-latest
113 | if: github.event_name == 'pull_request'
114 | steps:
115 | - uses: actions/checkout@v4
116 | with:
117 | fetch-depth: 0
118 | - uses: CondeNast/conventional-pull-request-action@v0.2.0
119 | env:
120 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
121 | with:
122 | # For PRs we gonna use squash-strategy, so commits names not so matter
123 | ignoreCommits: "true"
124 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | permissions:
4 | pull-requests: write
5 | contents: write
6 |
7 | on:
8 | # Only manual trigger of releaase
9 | workflow_dispatch:
10 |
11 | jobs:
12 | releas:
13 | name: Release
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout repository
17 | uses: actions/checkout@v4
18 | with:
19 | fetch-depth: 0
20 | - name: Install Rust toolchain
21 | uses: dtolnay/rust-toolchain@stable
22 | - name: Run release-plz
23 | uses: MarcoIeni/release-plz-action@v0.5
24 | env:
25 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
26 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
27 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### Rust template
3 | # Generated by Cargo
4 | # will have compiled files and executables
5 | /target/
6 |
7 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries
8 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html
9 | Cargo.lock
10 |
11 | # These are backup files generated by rustfmt
12 | **/*.rs.bk
13 | ### JetBrains template
14 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
15 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
16 |
17 | .idea
18 |
19 | ### Emacs template
20 | # -*- mode: gitignore; -*-
21 | *~
22 | \#*\#
23 | /.emacs.desktop
24 | /.emacs.desktop.lock
25 | *.elc
26 | auto-save-list
27 | tramp
28 | .\#*
29 |
30 | # Org-mode
31 | .org-id-locations
32 | *_archive
33 |
34 | # flymake-mode
35 | *_flymake.*
36 |
37 | # eshell files
38 | /eshell/history
39 | /eshell/lastdir
40 |
41 | # elpa packages
42 | /elpa/
43 |
44 | # reftex files
45 | *.rel
46 |
47 | # AUCTeX auto folder
48 | /auto/
49 |
50 | # cask packages
51 | .cask/
52 | dist/
53 |
54 | # Flycheck
55 | flycheck_*.el
56 |
57 | # server auth directory
58 | /server/
59 |
60 | # projectiles files
61 | .projectile
62 |
63 | # directory configuration
64 | .dir-locals.el
65 | ### macOS template
66 | # General
67 | .DS_Store
68 | .AppleDouble
69 | .LSOverride
70 |
71 | # Icon must end with two \r
72 | Icon
73 |
74 | # Thumbnails
75 | ._*
76 |
77 | # Files that might appear in the root of a volume
78 | .DocumentRevisions-V100
79 | .fseventsd
80 | .Spotlight-V100
81 | .TemporaryItems
82 | .Trashes
83 | .VolumeIcon.icns
84 | .com.apple.timemachine.donotpresent
85 |
86 | # Directories potentially created on remote AFP share
87 | .AppleDB
88 | .AppleDesktop
89 | Network Trash Folder
90 | Temporary Items
91 | .apdisk
92 | ### Linux template
93 | *~
94 |
95 | # temporary files which can be created if a process still has a handle open of a deleted file
96 | .fuse_hidden*
97 |
98 | # KDE directory preferences
99 | .directory
100 |
101 | # Linux trash folder which might appear on any partition or disk
102 | .Trash-*
103 |
104 | # .nfs files are created when an open file is removed but is still being accessed
105 | .nfs*
106 |
107 | # Intellij IDEA files
108 | .idea
109 | *.iml
110 | *.iws
111 | *.ipr
112 |
--------------------------------------------------------------------------------
/.rustfmt.toml:
--------------------------------------------------------------------------------
1 | group_imports = "StdExternalCrate"
2 | imports_granularity = "Crate"
3 | indent_style = "Block"
4 | merge_derives = true
5 | reorder_imports = true
6 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | First, thank you for contributing to `testcontainers-rs`.
4 |
5 | ## Licensing
6 |
7 | Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
8 |
9 | ## Code Contributions
10 |
11 | ### Setting up local development
12 |
13 | - Ensure you have an [up-to-date Rust toolchain](https://rustup.rs/), with `clippy` and `rustfmt` components installed
14 | - Install the [cargo-hack](https://github.com/taiki-e/cargo-hack) subcommand (recommended)
15 | - Fork this repository
16 |
17 | ### Formatting
18 |
19 | We rely on `rustfmt` (`nightly`):
20 | ```shell
21 | cargo +nightly fmt --all -- --check
22 | ```
23 |
24 | ### Commits
25 |
26 | Strive for creating atomic commits.
27 | That is, commits should capture a single feature, including its tests.
28 | Ideally, each commits passes all CI checks (builds, tests, formatting, linter, ...).
29 |
30 | When in doubt, squashing everything into a single but larger commit is preferable over commits that don't compile or are otherwise incomplete.
31 |
32 | For writing good commit messages, you may find [this](https://chris.beams.io/posts/git-commit/) guide helpful.
33 |
34 | ## Contact
35 |
36 | Feel free to drop by in the [testcontainers-rust channel](https://testcontainers.slack.com/archives/C048EPGRCER) of our [Slack workspace](https://testcontainers.slack.com).
37 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | resolver = "2"
3 | members = [
4 | "testcontainers",
5 | "testimages",
6 | ]
7 |
8 | [workspace.package]
9 | authors = ["Thomas Eizinger", "Artem Medvedev ", "Mervyn McCreight"]
10 | edition = "2021"
11 | keywords = ["docker", "testcontainers"]
12 | license = "MIT OR Apache-2.0"
13 | readme = "README.md"
14 | repository = "https://github.com/testcontainers/testcontainers-rs"
15 | rust-version = "1.82"
16 |
17 | [workspace.dependencies]
18 | testimages = { path = "testimages" }
19 |
--------------------------------------------------------------------------------
/DESIGN_PRINCIPLES.md:
--------------------------------------------------------------------------------
1 | # Design principles
2 |
3 | This document aims to capture the design principles that went into this library.
4 | It should serve as a reference point when making decisions on what features to include or exclude.
5 |
6 | ## Simple
7 |
8 | One of the most important goals that we want to adhere to is creating a _simple_ API.
9 | Overall, this means keeping the API as small as possible to get the task done.
10 | When in doubt, we'd rather not add flags or configuration options for certain use cases.
11 |
12 | Tests should be easier to write, easy to understand and easy to maintain.
13 | `testcontainers` aims to support this as much as possible.
14 | Having too many configuration options makes it harder for users to achieve this goal.
15 |
16 | Another advantage of a small, public API is that we have to make fewer breaking changes.
17 | This makes upgrades easier for our users.
18 |
19 | ## Reliable
20 |
21 | Tests need to be reliable to provide value.
22 | We strive to make `testcontainers` as reliable as possible and try to control as many aspects of the container to make sure they work consistently.
23 |
24 | One consequence of this decision is that the container _tag_ is typically not configurable for images that ship with `testcontainers`.
25 | Whilst an image behind a tag can also change, image authors tend to preserve compatibility there.
26 | If we were to allow users to change the `tag` of an image, we wouldn't be able to guarantee that it works because we cannot test all combinations.
27 |
28 | ## Ease of use
29 |
30 | The library should be easy to use.
31 | For example, users should be able to make their own implementation of `Image` without much boilerplate.
32 | In fact, the path forward may very well be that we stop shipping a lot of images in the crate and instead require users to create their own images.
33 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | Copyright (c) 2018-2023 CoBloX
2 | Copyright (c) 2024 testcontainers
3 | Copyright (c) 2024 Artem Medvedev
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6 |
7 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10 |
--------------------------------------------------------------------------------
/Pipfile:
--------------------------------------------------------------------------------
1 | [[source]]
2 | name = "pypi"
3 | url = "https://pypi.org/simple"
4 | verify_ssl = true
5 |
6 | [dev-packages]
7 |
8 | [packages]
9 | mkdocs = "==1.5.3"
10 | mkdocs-codeinclude-plugin = "==0.2.1"
11 | mkdocs-include-markdown-plugin = "==6.0.4"
12 | mkdocs-material = "==9.5.18"
13 | mkdocs-markdownextradata-plugin = "==0.2.5"
14 |
15 | [requires]
16 | python_version = "3.8"
17 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Testcontainers-rs
2 |
3 | 
4 | [](https://crates.io/crates/testcontainers)
5 | [](https://docs.rs/testcontainers)
6 | [](https://join.slack.com/t/testcontainers/shared_invite/zt-2gra37tid-n9xDJGjjVb7hMRanGjowkw)
7 |
8 | Testcontainers-rs is the official Rust language fork of [http://testcontainers.org](http://testcontainers.org).
9 |
10 | ## Usage
11 |
12 | ### `testcontainers` is the core crate
13 |
14 | The crate provides an API for working with containers in a test environment.
15 |
16 | 1. Depend on `testcontainers`
17 | 2. Implement `testcontainers::core::Image` for necessary docker-images
18 | 3. Run it with any available runner `testcontainers::runners::*` (use `blocking` feature for synchronous API)
19 |
20 | #### Example:
21 |
22 | - Blocking API (under `blocking` feature)
23 |
24 | ```rust
25 | use testcontainers::{core::{IntoContainerPort, WaitFor}, runners::SyncRunner, GenericImage, ImageExt};
26 |
27 | #[test]
28 | fn test_redis() {
29 | let container = GenericImage::new("redis", "7.2.4")
30 | .with_exposed_port(6379.tcp())
31 | .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections"))
32 | .with_network("bridge")
33 | .with_env_var("DEBUG", "1")
34 | .start()
35 | .expect("Failed to start Redis");
36 | }
37 | ```
38 |
39 | - Async API
40 |
41 | ```rust
42 | use testcontainers::{core::{IntoContainerPort, WaitFor}, runners::AsyncRunner, GenericImage, ImageExt};
43 |
44 | #[tokio::test]
45 | async fn test_redis() {
46 | let container = GenericImage::new("redis", "7.2.4")
47 | .with_exposed_port(6379.tcp())
48 | .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections"))
49 | .with_network("bridge")
50 | .with_env_var("DEBUG", "1")
51 | .start()
52 | .await
53 | .expect("Failed to start Redis");
54 | }
55 | ```
56 |
57 | ### Ready-to-use images
58 |
59 | The easiest way to use `testcontainers` is to depend on ready-to-use images (aka modules).
60 |
61 | Modules are available as a community-maintained crate: [testcontainers-modules](https://github.com/testcontainers/testcontainers-rs-modules-community)
62 |
63 | ## License
64 |
65 | Licensed under either of
66 |
67 | - Apache License, Version 2.0
68 | ([LICENSE-APACHE](LICENSE-Apache-2.0) or http://www.apache.org/licenses/LICENSE-2.0)
69 | - MIT license
70 | ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
71 |
72 | at your option.
73 |
--------------------------------------------------------------------------------
/docs/_headers:
--------------------------------------------------------------------------------
1 | /search/search_index.json
2 | Access-Control-Allow-Origin: *
3 |
--------------------------------------------------------------------------------
/docs/_redirects:
--------------------------------------------------------------------------------
1 | # Each redirect rule must be listed on a separate line, with the original path followed by the new path or URL.
2 |
--------------------------------------------------------------------------------
/docs/bounty.md:
--------------------------------------------------------------------------------
1 | # Testcontainers issue bounty policy
2 |
3 | ## General
4 |
5 | We want to use issue bounties to encourage contributions in areas that are important to our sponsors, or tricky to solve.
6 | This includes bug fixes and new features.
7 | We hope that this will provide incentives to tackle issues, and gives sponsors a way to influence where development time is expended.
8 | We also want to reward our contributors, some of whom make huge efforts to improve Testcontainers and help their fellow developers!
9 |
10 | !!! note
11 | It's early days for our use of sponsorship, so we expect to evolve this policy over time, possibly without notice. In the event of any ambiguity or dispute, the [Testcontainers org core maintainers](#organisation-core-maintainers) have the final say.
12 |
13 | If you'd like to suggest an improvement to this policy, we'd be grateful for your input - please raise a pull request!
14 |
15 | ## For Sponsors
16 |
17 | Sponsors will be able to create a number of 'bounties' per month, varying according to sponsorship tier.
18 |
19 | As a sponsor, the process for creating a bounty is as follows:
20 |
21 | * Raise an issue, or find an existing issue that describes the bug or feature.
22 | * Start a discussion with the [Testcontainers org core maintainers](#organisation-core-maintainers) to agree that the issue is suitable for a bounty, and how much the reward amount should be.
23 | * Once agreed, we will assign a label to the issue so that interested developers can find it.
24 |
25 | Sponsors can create up to 1 or 3 bounties (according to tier) _per calendar month_ - i.e. the counter resets on the 1st of each month.
26 | If a sponsor does not use their full quota of bounty credits in a calendar month, they cannot be rolled over to the next month.
27 |
28 | Bounties will expire 90 days after creation - after this time, if they have not been resolved we will close them.
29 |
30 | ## For Contributors
31 |
32 | As a contributor, the process for working on an issue with a bounty attached is:
33 |
34 | * Find an issue with a bounty attached to it and no assignee, clarify the requirements if necessary, and consider how you would approach working on it.
35 | * Start a discussion with the [Testcontainers org core maintainers](#organisation-core-maintainers) and the bounty owner. To avoid unpleasant surprises at review time, we'll try to confirm that we're happy with your proposed solution.
36 | * If we're happy with your proposed solution, we will assign the ticket to you.
37 | * Once work is complete, we will go through the PR process as usual and merge the work when finished.
38 | * To receive the bounty reward, [raise an invoice](https://opencollective.com/testcontainers/expenses/new) on Open Collective, following the expenses policy on that page.
39 |
40 | Note that a 20% cut of the bounty amount will normally be assigned to project maintainers for PR review work.
41 | We believe this reflects that PR review can often be a significant amount of work for some issues - and also gives maintainers an incentive to complete the review and unlock the bounty reward!
42 | Some pull requests are so well done that very little review is necessary. If that happens, the maintainers may choose not to take a cut of the bounty, and instead release the full amount to the contributor.
43 |
44 | ## Organisation core maintainers
45 |
46 | The organisation core maintainers are:
47 |
48 | * Richard North (@rnorth)
49 | * Sergei Egorov (@bsideup)
50 | * Kevin Wittek (@kiview)
51 |
--------------------------------------------------------------------------------
/docs/contributing_docs.md:
--------------------------------------------------------------------------------
1 | # Contributing to documentation
2 |
3 | The Testcontainers for Rust documentation is a static site built with [MkDocs](https://www.mkdocs.org/).
4 | We use the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme, which offers a number of useful extensions to MkDocs.
5 |
6 | In addition we use a [custom plugin](https://github.com/rnorth/mkdocs-codeinclude-plugin) for inclusion of code snippets.
7 |
8 | We publish our documentation using Netlify.
9 |
10 | ## Previewing rendered content
11 |
12 | ### Using Python locally
13 |
14 | * Ensure that you have Python 3.8.0 or higher.
15 | * Create a Python virtualenv. E.g. `python3 -m venv tc-venv`.
16 | * Activate the virtualenv. E.g. `source tc-venv/bin/activate`.
17 | * Run `pip3 install -r requirements.txt && ./tc-venv/bin/mkdocs serve` from the `testcontainers-rs` root directory. It will start a local auto-updating MkDocs server.
18 |
19 | ### PR Preview deployments
20 |
21 | Note that documentation for pull requests will automatically be published by Netlify as 'deploy previews'.
22 | These deployment previews can be accessed via the `deploy/netlify` check that appears for each pull request.
23 |
--------------------------------------------------------------------------------
/docs/css/extra.css:
--------------------------------------------------------------------------------
1 | h1, h2, h3, h4, h5, h6 {
2 | font-family: 'Rubik', sans-serif;
3 | }
4 |
5 | [data-md-color-scheme="testcontainers"] {
6 | --md-primary-fg-color: #00bac2;
7 | --md-accent-fg-color: #361E5B;
8 | --md-typeset-a-color: #0C94AA;
9 | --md-primary-fg-color--dark: #291A3F;
10 | --md-default-fg-color--lightest: #F2F4FE;
11 | --md-footer-fg-color: #361E5B;
12 | --md-footer-fg-color--light: #746C8F;
13 | --md-footer-fg-color--lighter: #C3BEDE;
14 | --md-footer-bg-color: #F7F9FD;
15 | --md-footer-bg-color--dark: #F7F9FD;
16 | }
17 |
18 | .card-grid {
19 | display: grid;
20 | gap: 10px;
21 | }
22 |
23 | .tc-version {
24 | font-size: 1.1em;
25 | text-align: center;
26 | margin: 0;
27 | }
28 |
29 | @media (min-width: 680px) {
30 | .card-grid {
31 | grid-template-columns: repeat(3, 1fr);
32 | }
33 | }
34 |
35 | body .card-grid-item {
36 | display: flex;
37 | align-items: center;
38 | gap: 20px;
39 | border: 1px solid #C3BEDE;
40 | border-radius: 6px;
41 | padding: 16px;
42 | font-weight: 600;
43 | color: #9991B5;
44 | background: #F2F4FE;
45 | }
46 |
47 | body .card-grid-item:hover,
48 | body .card-grid-item:focus {
49 | color: #9991B5;
50 | }
51 |
52 | .card-grid-item[href] {
53 | color: var(--md-primary-fg-color--dark);
54 | background: transparent;
55 | }
56 |
57 | .card-grid-item[href]:hover,
58 | .card-grid-item[href]:focus {
59 | background: #F2F4FE;
60 | color: var(--md-primary-fg-color--dark);
61 | }
62 |
63 | .community-callout-wrapper {
64 | padding: 30px 10px 0 10px;
65 | }
66 |
67 | .community-callout {
68 | color: #F2F4FE;
69 | background: linear-gradient(10.88deg, rgba(102, 56, 242, 0.4) 9.56%, #6638F2 100%), #291A3F;
70 | box-shadow: 0px 20px 45px rgba(#9991B5, 0.75);
71 | border-radius: 10px;
72 | padding: 20px;
73 | }
74 |
75 | .community-callout h2 {
76 | font-size: 1.15em;
77 | margin: 0 0 20px 0;
78 | color: #F2F4FE;
79 | text-align: center;
80 | }
81 |
82 | .community-callout ul {
83 | list-style: none;
84 | padding: 0;
85 | display: flex;
86 | justify-content: space-between;
87 | gap: 10px;
88 | margin-top: 20px;
89 | margin-bottom: 0;
90 | }
91 |
92 | .community-callout a {
93 | transition: opacity 0.2s ease;
94 | }
95 |
96 | .community-callout a:hover {
97 | opacity: 0.5;
98 | }
99 |
100 | .community-callout a img {
101 | height: 1.75em;
102 | width: auto;
103 | aspect-ratio: 1;
104 | }
105 |
106 | @media (min-width: 1220px) {
107 | .community-callout-wrapper {
108 | padding: 40px 0 0;
109 | }
110 |
111 | .community-callout h2 {
112 | font-size: 1.25em;
113 | }
114 |
115 | .community-callout a img {
116 | height: 2em;
117 | }
118 | }
119 |
120 | @media (min-width: 1600px) {
121 | .community-callout h2 {
122 | font-size: 1.15em;
123 | }
124 |
125 | .community-callout a img {
126 | height: 1.75em;
127 | }
128 | }
--------------------------------------------------------------------------------
/docs/css/tc-header.css:
--------------------------------------------------------------------------------
1 |
2 | :root {
3 | --color-catskill: #F2F4FE;
4 | --color-catskill-45: rgba(242, 244, 254, 0.45);
5 | --color-mist: #E7EAFB;
6 | --color-fog: #C3C7E6;
7 | --color-smoke: #9991B5;
8 | --color-smoke-75: rgba(153, 145, 181, 0.75);
9 | --color-storm: #746C8F;
10 | --color-topaz: #00BAC2;
11 | --color-pacific: #17A6B2;
12 | --color-teal: #027F9E;
13 | --color-eggplant: #291A3F;
14 | --color-plum: #361E5B;
15 |
16 | }
17 |
18 | #site-header {
19 | color: var(--color-storm);
20 | background: #fff;
21 | font-family: 'Rubik', Arial, Helvetica, sans-serif;
22 | font-size: 12px;
23 | line-height: 1.5;
24 | position: relative;
25 | width: 100%;
26 | z-index: 4;
27 | display: flex;
28 | align-items: center;
29 | justify-content: space-between;
30 | gap: 20px;
31 | padding: 20px;
32 | }
33 |
34 | body.tc-header-active #site-header {
35 | z-index: 5;
36 | }
37 |
38 | #site-header .brand {
39 | display: flex;
40 | justify-content: space-between;
41 | gap: 20px;
42 | width: 100%;
43 | }
44 |
45 | #site-header .logo {
46 | display: flex;
47 | }
48 |
49 | #site-header .logo img,
50 | #site-header .logo svg {
51 | height: 30px;
52 | width: auto;
53 | max-width: 100%;
54 | }
55 |
56 | #site-header #mobile-menu-toggle {
57 | background: none;
58 | border: none;
59 | display: flex;
60 | align-items: center;
61 | gap: 10px;
62 | cursor: pointer;
63 | color: var(--color-eggplant);
64 | padding: 0;
65 | margin: 0;
66 | font-weight: 500;
67 | }
68 |
69 | body.mobile-menu #site-header #mobile-menu-toggle {
70 | color: var(--color-topaz);
71 | }
72 |
73 | #site-header ul {
74 | list-style: none;
75 | padding: 0;
76 | margin: 0;
77 | }
78 |
79 | #site-header nav {
80 | display: none;
81 | }
82 |
83 | #site-header .menu-item {
84 | display: flex;
85 | }
86 |
87 | #site-header .menu-item button,
88 | #site-header .menu-item a {
89 | min-height: 30px;
90 | display: flex;
91 | gap: 6px;
92 | align-items: center;
93 | border: none;
94 | background: none;
95 | cursor: pointer;
96 | padding: 0;
97 | font-weight: 500;
98 | color: var(--color-eggplant);
99 | text-decoration: none;
100 | font-size: 14px;
101 | transition: color 0.2s ease;
102 | white-space: nowrap;
103 | }
104 |
105 | #site-header .menu-item .badge {
106 | color: white;
107 | font-size: 10px;
108 | padding: 2px 6px;
109 | background-color: #0FD5C6;
110 | text-align: center;
111 | text-decoration: none;
112 | display: inline-block;
113 | border-radius: 6px;
114 | &:hover {
115 |
116 | }
117 | }
118 |
119 | #site-header .menu-item button:hover,
120 | #site-header .menu-item a:hover {
121 | color: var(--color-topaz);
122 | }
123 |
124 | #site-header .menu-item button .icon-external,
125 | #site-header .menu-item a .icon-externa {
126 | margin-left: auto;
127 | opacity: .3;
128 | flex-shrink: 0;
129 | }
130 |
131 | #site-header .menu-item button .icon-caret,
132 | #site-header .menu-item a .icon-caret {
133 | opacity: .3;
134 | height: 8px;
135 | }
136 |
137 | #site-header .menu-item button .icon-slack,
138 | #site-header .menu-item a .icon-slack,
139 | #site-header .menu-item button .icon-github,
140 | #site-header .menu-item a .icon-github {
141 | height: 18px;
142 | }
143 |
144 | #site-header .menu-item .menu-dropdown {
145 | flex-direction: column;
146 | }
147 |
148 | body #site-header .menu-item .menu-dropdown {
149 | display: none;
150 | }
151 |
152 | #site-header .menu-item.has-children.active .menu-dropdown {
153 | display: flex;
154 | z-index: 10;
155 | }
156 |
157 | #site-header .menu-dropdown-item + .menu-dropdown-item {
158 | border-top: 1px solid var(--color-mist);
159 | }
160 |
161 | #site-header .menu-dropdown-item a {
162 | display: flex;
163 | gap: 10px;
164 | align-items: center;
165 | padding: 10px 20px;
166 | font-weight: 500;
167 | color: var(--color-eggplant);
168 | text-decoration: none;
169 | transition:
170 | color 0.2s ease,
171 | background 0.2s ease;
172 | }
173 |
174 | #site-header .menu-dropdown-item a .icon-external {
175 | margin-left: auto;
176 | color: var(--color-fog);
177 | flex-shrink: 0;
178 | opacity: 1;
179 | }
180 |
181 | #site-header .menu-dropdown-item a:hover {
182 | background-color: var(--color-catskill-45);
183 | }
184 |
185 | #site-header .menu-dropdown-item a:hover .icon-external {
186 | color: var(--color-topaz);
187 | }
188 |
189 | #site-header .menu-dropdown-item a img {
190 | height: 24px;
191 | }
192 |
193 | .md-header {
194 | background-color: var(--color-catskill);
195 | color: var(--color-eggplant);
196 | }
197 |
198 | .md-header.md-header--shadow {
199 | box-shadow: none;
200 | }
201 |
202 | .md-header__inner.md-grid {
203 | max-width: 100%;
204 | padding: 1.5px 20px;
205 | }
206 |
207 | [dir=ltr] .md-header__title {
208 | margin: 0;
209 | }
210 |
211 | .md-header__topic:first-child {
212 | font-size: 16px;
213 | font-weight: 500;
214 | font-family: 'Rubik', Arial, Helvetica, sans-serif;
215 | }
216 |
217 | .md-header__title.md-header__title--active .md-header__topic,
218 | .md-header__title[data-md-state=active] .md-header__topic {
219 | opacity: 1;
220 | pointer-events: all;
221 | transform: translateX(0);
222 | transition: none;
223 | z-index: 0;
224 | }
225 |
226 | .md-header__topic a {
227 | max-width: 100%;
228 | overflow: hidden;
229 | text-overflow: ellipsis;
230 | transition: color .2s ease;
231 | }
232 |
233 | .md-header__topic a:hover {
234 | color: var(--color-topaz);
235 | }
236 |
237 | div.md-header__source {
238 | width: auto;
239 | }
240 |
241 | div.md-source__repository {
242 | max-width: 100%;
243 | }
244 |
245 | .md-main {
246 | padding: 0 12px;
247 | }
248 |
249 | @media screen and (min-width: 60em) {
250 | form.md-search__form {
251 | background-color: #FBFBFF;
252 | color: var(--color-storm);
253 | }
254 |
255 | form.md-search__form:hover {
256 | background-color: #fff;
257 | }
258 |
259 | .md-search__input + .md-search__icon {
260 | color: var(--color-plum);
261 | }
262 |
263 | .md-search__input::placeholder {
264 | color: var(--color-smoke);
265 | }
266 | }
267 |
268 | @media (min-width: 500px) {
269 | #site-header {
270 | font-size: 16px;
271 | padding: 20px 40px;
272 | }
273 | #site-header .logo img,
274 | #site-header .logo svg {
275 | height: 48px;
276 | }
277 |
278 | #site-header .menu-item button .icon-caret,
279 | #site-header .menu-item a .icon-caret {
280 | height: 10px;
281 | }
282 |
283 | #site-header .menu-item button .icon-slack,
284 | #site-header .menu-item a .icon-slack,
285 | #site-header .menu-item button .icon-github,
286 | #site-header .menu-item a .icon-github {
287 | height: 24px;
288 | }
289 |
290 | .md-header__inner.md-grid {
291 | padding: 5px 40px;
292 | }
293 |
294 | .md-main {
295 | padding: 0 32px;
296 | }
297 | }
298 |
299 | @media (min-width: 1024px) {
300 | #site-header #mobile-menu-toggle {
301 | display: none;
302 | }
303 |
304 | #site-header nav {
305 | display: block;
306 | }
307 |
308 | #site-header .menu {
309 | display: flex;
310 | justify-content: center;
311 | gap: 30px;
312 | }
313 |
314 | #site-header .menu-item {
315 | align-items: center;
316 | position: relative;
317 | }
318 |
319 | #site-header .menu-item button,
320 | #site-header .menu-item a {
321 | min-height: 48px;
322 | gap: 8px;
323 | font-size: 16px;
324 | }
325 |
326 | #site-header .menu-item .menu-dropdown {
327 | position: absolute;
328 | top: 100%;
329 | right: -8px;
330 | border: 1px solid var(--color-mist);
331 | border-radius: 6px;
332 | background: #fff;
333 | box-shadow: 0px 30px 35px var(--color-smoke-75);
334 | min-width: 200px;
335 | }
336 | }
337 |
338 |
339 | @media (max-width: 1023px) {
340 | #site-header {
341 | flex-direction: column;
342 | }
343 |
344 | body.mobile-tc-header-active #site-header {
345 | z-index: 5;
346 | }
347 |
348 | body.mobile-menu #site-header nav {
349 | display: flex;
350 | }
351 |
352 | #site-header nav {
353 | position: absolute;
354 | top: calc(100% - 5px);
355 | width: calc(100% - 80px);
356 | flex-direction: column;
357 | border: 1px solid var(--color-mist);
358 | border-radius: 6px;
359 | background: #fff;
360 | box-shadow: 0px 30px 35px var(--color-smoke-75);
361 | min-width: 200px;
362 | }
363 |
364 | #site-header .menu-item {
365 | flex-direction: column;
366 | }
367 | #site-header .menu-item + .menu-item {
368 | border-top: 1px solid var(--color-mist);
369 | }
370 |
371 | #site-header .menu-item button,
372 | #site-header .menu-item a {
373 | padding: 10px 20px;
374 | }
375 |
376 | #site-header .menu-item.has-children.active .menu-dropdown {
377 | border-top: 1px solid var(--color-mist);
378 | }
379 |
380 | #site-header .menu-dropdown-item a {
381 | padding: 10px 20px 10px 30px;
382 | }
383 | }
384 |
385 | @media (max-width: 499px) {
386 | #site-header nav {
387 | width: calc(100% - 40px);
388 | }
389 | }
--------------------------------------------------------------------------------
/docs/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/testcontainers/testcontainers-rs/49671234edddde15a2bcc5e93d94b1033aa7228d/docs/favicon.ico
--------------------------------------------------------------------------------
/docs/features/configuration.md:
--------------------------------------------------------------------------------
1 | # Custom configuration
2 |
3 | You can override some default properties if your environment requires that.
4 |
5 | ## Configuration locations
6 |
7 | The configuration may be loaded from multiple locations. Properties are considered in the following order:
8 |
9 | 1. Environment variables
10 | 2. `~/.testcontainers.properties` file (a Java properties file, enabled by the `properties-config` feature)
11 | Example locations:
12 | **Linux:** `/home/myuser/.testcontainers.properties`
13 | **Windows:** `C:/Users/myuser/.testcontainers.properties`
14 | **macOS:** `/Users/myuser/.testcontainers.properties`
15 |
16 | ## Docker host resolution
17 |
18 | The host is resolved in the following order:
19 |
20 | 1. Docker host from the `tc.host` property in the `~/.testcontainers.properties` file.
21 | 2. `DOCKER_HOST` environment variable.
22 | 3. Docker host from the "docker.host" property in the `~/.testcontainers.properties` file.
23 | 4. Else, the default Docker socket will be returned.
24 |
25 | ## Docker authentication
26 |
27 | Sometimes the Docker images you use live in a private Docker registry.
28 | For that reason, Testcontainers for Rust gives you the ability to read the Docker configuration and retrieve the authentication for a given registry.
29 | Configuration is fetched in the following order:
30 |
31 | 1. `DOCKER_AUTH_CONFIG` environment variable, unmarshalling the string value from its JSON representation and using it as the Docker config.
32 | 2. `DOCKER_CONFIG` environment variable, as an alternative path to the directory containing Docker `config.json` file.
33 | 3. else it will load the default Docker config file, which lives in the user's home, e.g. `~/.docker/config.json`.
34 |
35 | ## bollard, rustls and SSL Cryptography providers
36 | `testcontainers` uses [`bollard`](https://docs.rs/bollard/latest/bollard/) to interact with the Docker API.
37 |
38 | `bollard` in turn has options provided by `rustls` to configure its SSL cryptography providers.
39 |
40 | The `testcontainers` feature flags to control this are as follows:
41 | * `ring` - use `rustls` with `ring` as the cryptography provider (default)
42 | * `aws-lc-rs` - use `rustls` with `aws-lc-rs` as the cryptography provider
43 | * `ssl` - use `rustls` with a custom cryptography provider configuration - see [bollard](https://docs.rs/bollard/latest/bollard/#feature-flags) and [rustls](https://docs.rs/rustls/latest/rustls/#cryptography-providers) documentation for more.
44 |
--------------------------------------------------------------------------------
/docs/features/exec_commands.md:
--------------------------------------------------------------------------------
1 |
2 | # Command Execution in Containers
3 |
4 | Some test scenarios require running commands within a container.
5 | In order to achieve this, the `testcontainers` library provides the ability
6 | to execute commands in 3 different ways:
7 |
8 | - `exec` method of [ContainerAsync] (or [Container])
9 |
10 | Allows to run a command in an already running container.
11 |
12 | - [Image::exec_after_start](https://docs.rs/testcontainers/latest/testcontainers/core/trait.Image.html#method.exec_after_start)
13 |
14 | Only if you implement your own `Image`: Allows to define commands
15 | to be executed after the container is started and ready.
16 |
17 | - [Image::exec_before_ready](https://docs.rs/testcontainers/latest/testcontainers/core/trait.Image.html#method.exec_before_ready)
18 |
19 | Only if you implement your own `Image`: Allows to define commands
20 | to be executed executed after the container has started,
21 | but before the `Image::ready_conditions` are awaited for.
22 |
23 | Here we will focus on the first option, which is the most common one.
24 | The method expects an [ExecCommand] struct,
25 | and returns an [ExecResult](or [SyncExecResult]) struct.
26 |
27 | ## [ExecCommand]
28 |
29 | The [ExecCommand] struct represents a command to be executed within a container.
30 | It includes the command itself and conditions to be checked
31 | on the command output and the container.
32 |
33 | ### ExecCommand Usage
34 |
35 | To create a new [ExecCommand]:
36 |
37 | ```rust
38 | let command = ExecCommand::new(vec!["echo", "Hello, World!"])
39 | .with_container_ready_conditions(vec![/* conditions */])
40 | .with_cmd_ready_condition(CmdWaitFor::message_on_stdout("Hello, World!"));
41 | ```
42 |
43 | ## [CmdWaitFor]
44 |
45 | The [CmdWaitFor] enum defines conditions to be checked on the command's output.
46 |
47 | ## [ExecResult] / [SyncExecResult]
48 |
49 | For async version, the [ExecResult] struct represents the result
50 | of an executed command in a container.
51 | For non-async (`blocking` feature), the [SyncExecResult] struct is used instead.
52 |
53 | The structs represents the result of an executed command in a container.
54 |
55 | ### ExecResult Usage
56 |
57 | To execute a command and handle the result:
58 |
59 | ```rust
60 | let result = container.exec(command).await?;
61 | let exit_code = result.exit_code().await?;
62 | let stdout = result.stdout_to_vec().await?;
63 | let stderr = result.stderr_to_vec().await?;
64 | ```
65 |
66 | [Container]: https://docs.rs/testcontainers/latest/testcontainers/core/struct.Container.html
67 | [ContainerAsync]: https://docs.rs/testcontainers/latest/testcontainers/core/struct.ContainerAsync.html
68 | [ExecCommand]: https://docs.rs/testcontainers/latest/testcontainers/core/struct.ExecCommand.html
69 | [CmdWaitFor]: https://docs.rs/testcontainers/latest/testcontainers/core/enum.CmdWaitFor.html
70 | [ExecResult]: https://docs.rs/testcontainers/latest/testcontainers/core/struct.ExecResult.html
71 | [SyncExecResult]: https://docs.rs/testcontainers/latest/testcontainers/core/struct.SyncExecResult.html
72 |
--------------------------------------------------------------------------------
/docs/features/wait_strategies.md:
--------------------------------------------------------------------------------
1 | # Waiting for containers to start or be ready
2 |
3 | There are scenarios where your tests need the external services they rely on to reach a specific state that is particularly useful for testing. This is generally approximated as 'Can we talk to this container over the network?' or 'Let's wait until the container is running an reaches certain state'.
4 |
5 | _Testcontainers for
6 | Rust_ comes with the concept of `wait strategy`, which allows your tests to actually wait for
7 | the most useful conditions to be met, before continuing with their execution.
8 |
9 | The strategy is defined by the [`WaitFor`](https://docs.rs/testcontainers/latest/testcontainers/core/enum.WaitFor.html)
10 | enum with the following variants:
11 |
12 | * `StdOutMessage` - wait for a specific message to appear on the container's stdout
13 | * `StdErrMessage` - wait for a specific message to appear on the container's stderr
14 | * `Healthcheck` - wait for the container to be healthy
15 | * `Http` - wait for an HTTP(S) response with predefined conditions (see [`HttpWaitStrategy`](https://docs.rs/testcontainers/latest/testcontainers/core/wait/struct.HttpWaitStrategy.html) for more details)
16 | * `Duration` - wait for a specific duration. Usually less preferable and better to combine with other strategies.
17 |
18 | [`Image`](https://docs.rs/testcontainers/latest/testcontainers/core/trait.Image.html) implementation
19 | is responsible for returning the appropriate `WaitFor` strategies.
20 | For [`GenericImage`](https://docs.rs/testcontainers/latest/testcontainers/struct.GenericImage.html)
21 | you can use the `with_wait_for` method to specify the wait strategy.
22 |
23 | ## Startup timeout and Poll interval
24 |
25 | Ordinarily Testcontainers will wait for up to 60 seconds for containers to start.
26 | If the default 60s timeout is not sufficient, it can be updated with the
27 | [`ImageExt::with_startup_timeout(duration)`](https://docs.rs/testcontainers/latest/testcontainers/core/trait.ImageExt.html#method.with_startup_timeout) method.
28 |
--------------------------------------------------------------------------------
/docs/getting_help.md:
--------------------------------------------------------------------------------
1 | # Getting help
2 |
3 | We hope that you find Testcontainers intuitive to use and reliable.
4 | However, sometimes things don't go the way we'd expect, and we'd like to try and help out if we can.
5 |
6 | To contact the Testcontainers team and other users you can:
7 |
8 | * Join our [Slack team](https://slack.testcontainers.org)
9 | * [Search our issues tracker](https://github.com/testcontainers/testcontainers-rs/issues), or raise a new issue if you find any bugs or have suggested improvements
10 | * [Search Stack Overflow](https://stackoverflow.com/questions/tagged/testcontainers), especially among posts tagged with `testcontainers`
11 |
--------------------------------------------------------------------------------
/docs/icons/github.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
--------------------------------------------------------------------------------
/docs/icons/slack.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
6 |
8 |
10 |
--------------------------------------------------------------------------------
/docs/icons/stackoverflow.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
--------------------------------------------------------------------------------
/docs/icons/twitter.svg:
--------------------------------------------------------------------------------
1 |
2 |
4 |
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # Welcome to Testcontainers for Rust!
2 |
3 |
Not using Rust? Here are other supported languages!
4 |
14 |
15 | ## About Testcontainers for Rust
16 |
17 | _Testcontainers for Rust_ is a Rust library that makes it simple to create and clean up container-based dependencies for
18 | automated integration/smoke tests. The clean, easy-to-use API enables developers to programmatically define containers
19 | that should be run as part of a test and clean up those resources when the test is done.
20 |
21 | To start using _Testcontainers for Rust_ please read our quickstart guide for:
22 |
23 | * [Ready-to-use `testcontainers-modules` crate](quickstart/community_modules.md)
24 | * [Core `testcontainers` crate](quickstart/testcontainers.md)
25 |
26 | ## License
27 |
28 | This project is opensource and you can have a look at the code on
29 | [GitHub](https://github.com/testcontainers/testcontainers-rs). See [LICENSE](https://github.com/testcontainers/testcontainers-rs/blob/main/LICENSE-MIT).
30 |
31 |
--------------------------------------------------------------------------------
/docs/js/tc-header.js:
--------------------------------------------------------------------------------
1 | const mobileToggle = document.getElementById("mobile-menu-toggle");
2 | const mobileSubToggle = document.getElementById("mobile-submenu-toggle");
3 | function toggleMobileMenu() {
4 | document.body.classList.toggle('mobile-menu');
5 | document.body.classList.toggle("mobile-tc-header-active");
6 | }
7 | function toggleMobileSubmenu() {
8 | document.body.classList.toggle('mobile-submenu');
9 | }
10 | if (mobileToggle)
11 | mobileToggle.addEventListener("click", toggleMobileMenu);
12 | if (mobileSubToggle)
13 | mobileSubToggle.addEventListener("click", toggleMobileSubmenu);
14 |
15 | const allParentMenuItems = document.querySelectorAll("#site-header .menu-item.has-children");
16 | function clearActiveMenuItem() {
17 | document.body.classList.remove("tc-header-active");
18 | allParentMenuItems.forEach((item) => {
19 | item.classList.remove("active");
20 | });
21 | }
22 | function setActiveMenuItem(e) {
23 | clearActiveMenuItem();
24 | e.currentTarget.closest(".menu-item").classList.add("active");
25 | document.body.classList.add("tc-header-active");
26 | }
27 | allParentMenuItems.forEach((item) => {
28 | const trigger = item.querySelector(":scope > a, :scope > button");
29 |
30 | trigger.addEventListener("click", (e) => {
31 | if (e.currentTarget.closest(".menu-item").classList.contains("active")) {
32 | clearActiveMenuItem();
33 | } else {
34 | setActiveMenuItem(e);
35 | }
36 | });
37 |
38 | trigger.addEventListener("mouseenter", (e) => {
39 | setActiveMenuItem(e);
40 | });
41 |
42 | item.addEventListener("mouseleave", (e) => {
43 | clearActiveMenuItem();
44 | });
45 | });
--------------------------------------------------------------------------------
/docs/language-logos/dotnet.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/docs/language-logos/go.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
6 |
9 |
10 |
--------------------------------------------------------------------------------
/docs/language-logos/haskell.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/docs/language-logos/java.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
12 |
13 |
14 |
15 |
16 |
17 |
--------------------------------------------------------------------------------
/docs/language-logos/nodejs.svg:
--------------------------------------------------------------------------------
1 |
2 |
5 |
--------------------------------------------------------------------------------
/docs/language-logos/python.svg:
--------------------------------------------------------------------------------
1 |
2 |
5 |
8 |
--------------------------------------------------------------------------------
/docs/language-logos/ruby.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
--------------------------------------------------------------------------------
/docs/language-logos/rust.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
--------------------------------------------------------------------------------
/docs/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/testcontainers/testcontainers-rs/49671234edddde15a2bcc5e93d94b1033aa7228d/docs/logo.png
--------------------------------------------------------------------------------
/docs/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
3 |
5 |
64 |
65 |
66 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
--------------------------------------------------------------------------------
/docs/quickstart/community_modules.md:
--------------------------------------------------------------------------------
1 | _Testcontainers for Rust_ are provided as two separate crates: `testcontainers` and `testcontainers-modules`.
2 |
3 | While `testcontainers` is the core crate that provides an API for working with containers in a test environment,
4 | `testcontainers-modules` is a community-maintained crate that provides ready-to-use images (aka modules).
5 |
6 | Usually, it's easier to depend on ready-to-use images, as it saves time and effort.
7 | This guide will show you how to use it.
8 |
9 | ## 1. Usage
10 |
11 | 1. Depend on [testcontainers-modules] with necessary features (e.g `postgres`, `minio` etc.)
12 | - Enable `blocking` feature if you want to use modules
13 | within synchronous tests (feature-gate for `SyncRunner`)
14 | 2. Then start using the modules inside your tests with either `AsyncRunner` or `SyncRunner`
15 |
16 | Simple example of using `postgres` module with `SyncRunner` (`blocking` and `postgres` features enabled):
17 |
18 | ```rust
19 | use testcontainers_modules::{postgres, testcontainers::runners::SyncRunner};
20 |
21 | #[test]
22 | fn test_with_postgres() {
23 | let container = postgres::Postgres::default().start().unwrap();
24 | let host_port = container.get_host_port_ipv4(5432).unwrap();
25 | let connection_string = &format!(
26 | "postgres://postgres:postgres@127.0.0.1:{host_port}/postgres",
27 | );
28 | }
29 | ```
30 |
31 | > You don't need to explicitly depend on `testcontainers` as it's re-exported dependency
32 | > of `testcontainers-modules` with aligned version between these crates.
33 | > For example:
34 | >
35 | >```rust
36 | >use testcontainers_modules::testcontainers::ImageExt;
37 | >```
38 |
39 | You can also see [examples](https://github.com/testcontainers/testcontainers-rs-modules-community/tree/main/examples)
40 | for more details.
41 |
42 | ## 2. How to override module defaults
43 |
44 | Sometimes it's necessary to override default settings of the module (e.g `tag`, `name`, environment variables etc.)
45 | In order to do that, just use extension trait [ImageExt](https://docs.rs/testcontainers/latest/testcontainers/core/trait.ImageExt.html)
46 | that returns a customized [ContainerRequest](https://docs.rs/testcontainers/latest/testcontainers/core/struct.ContainerRequest.html):
47 |
48 | ```rust
49 | use testcontainers_modules::{
50 | redis::Redis,
51 | testcontainers::{ContainerRequest, ImageExt},
52 | };
53 |
54 |
55 | /// Create a Redis module with `6.2-alpine` tag and custom password
56 | fn create_redis() -> ContainerRequest {
57 | Redis::default()
58 | .with_tag("6.2-alpine")
59 | .with_env_var(("REDIS_PASSWORD", "my_secret_password"))
60 | }
61 | ```
62 |
--------------------------------------------------------------------------------
/docs/quickstart/testcontainers.md:
--------------------------------------------------------------------------------
1 | _Testcontainers for Rust_ plays well with the native `cargo test`.
2 |
3 | The ideal use case is for integration or end to end tests. It helps you to spin
4 | up and manage the dependencies life cycle via Docker.
5 |
6 | ## 1. System requirements
7 |
8 | Please read the [system requirements](../system_requirements/) page before you start.
9 |
10 | ## 2. Install _Testcontainers for Rust_
11 |
12 | - If your tests are async:
13 | ```sh
14 | cargo add testcontainers
15 | ```
16 | - If you don't use async and want to use the blocking API:
17 | ```sh
18 | cargo add testcontainers --features blocking
19 | ```
20 |
21 | ## 3. Spin up Redis
22 |
23 | ```rust
24 | use testcontainers::{
25 | core::{IntoContainerPort, WaitFor},
26 | runners::AsyncRunner,
27 | GenericImage,
28 | };
29 |
30 | #[tokio::test]
31 | async fn test_redis() {
32 | let _container = GenericImage::new("redis", "7.2.4")
33 | .with_exposed_port(6379.tcp())
34 | .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections"))
35 | .start()
36 | .await
37 | .unwrap();
38 | }
39 | ```
40 |
41 | Here we use the `GenericImage` struct to create a Redis container.
42 |
43 | * `GenericImage::new` accepts the image name and tag.
44 | * `with_exposed_port` adds a port to be exposed from the container (can be called multiple times).
45 | * `with_wait_for` allows to pass conditions (`WaitFor`) of container rediness. It
46 | is important to get this set because it helps to know when the container is
47 | ready to receive any traffic. In this case, we check for the logs we know come
48 | from Redis, telling us that it is ready to accept requests.
49 | * `start` is a function of the `AsyncRunner` trait that starts the container.
50 | The same logic is applicable for `SyncRunner` if you are using `blocking` feature.
51 |
52 | When you use `with_exposed_port` you have to imagine yourself using `docker run -p
53 | `. When you do so, `dockerd` maps the selected `` from inside the
54 | container to a random one available on your host.
55 |
56 | In the previous example, we expose `6379` for `tcp` traffic to the outside. This
57 | allows Redis to be reachable from your code that runs outside the container, but
58 | it also makes parallelization possible. When you run multiple cargo tests in parallel,
59 | each test starts a Redis container, and each of them is exposed on a different random port.
60 |
61 | All the containers must be removed at some point, otherwise they will run until
62 | the host is overloaded. In order to provide a clean environment, we rely on `RAII` semantic
63 | of containers (`Drop` trait). Thus, when the container goes out of scope, it is removed by default.
64 | However, you can change this behavior by setting the `TESTCONTAINERS_COMMAND` environment
65 | variable to `keep`.
66 |
67 | ## 4. Make your code to talk with the container
68 |
69 | We will use [redis](https://github.com/redis-rs/redis-rs) as a client in this example.
70 | This code gets the endpoint from the container we just started, and it configures the client.
71 |
72 | > This is just an example, you can choose any client you want (e.g [`fred`](https://github.com/aembke/fred.rs))
73 |
74 | ```rust
75 | use redis::Client;
76 | use testcontainers::{core::{IntoContainerPort, WaitFor}, runners::AsyncRunner, GenericImage};
77 |
78 | #[tokio::test]
79 | async fn test_redis() -> Result<(), Box> {
80 | let container = GenericImage::new("redis", "7.2.4")
81 | .with_exposed_port(6379.tcp())
82 | .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections"))
83 | .start()?
84 | .await;
85 | let host = container.get_host()?;
86 | let host_port = container.get_host_port_ipv4(REDIS_PORT)?;
87 |
88 | let url = format!("redis://{host}:{host_port}");
89 | let client = redis::Client::open(url.as_ref())?;
90 | // do something with the client
91 | }
92 | ```
93 |
94 | * `get_host` returns the host that this container may be reached on (may not be the local machine).
95 | In most of the cases it will be `localhost`.
96 | * `get_host_port_ipv4` returns the mapped host port for an internal port of this docker container.
97 | In this case it returns the port that was exposed by the container.
98 |
99 | ## 5. Run the test
100 |
101 | You can run the test via `cargo test`
102 |
--------------------------------------------------------------------------------
/docs/system_requirements/docker.md:
--------------------------------------------------------------------------------
1 | # General Docker requirements
2 |
3 | Testcontainers requires a Docker-API compatible container runtime.
4 | During development, Testcontainers is actively tested against recent versions of Docker on Linux, as well as against Docker Desktop on Mac and Windows.
5 | These Docker environments are automatically detected and used by Testcontainers without any additional configuration being necessary.
6 |
7 | It is possible to configure Testcontainers to work for other Docker setups, such as a remote Docker host or Docker alternatives.
8 | However, these are not actively tested in the main development workflow, so not all Testcontainers features might be available and additional manual configuration might be necessary.
9 |
10 | See [custom configuration](../features/configuration.md) for more information on how to configure Testcontainers for your specific Docker setup.
11 |
12 | If you have further questions about configuration details for your setup or whether it supports running Testcontainers-based tests,
13 | please contact the Testcontainers team and other users from the Testcontainers community on [Slack](https://slack.testcontainers.org/).
14 |
--------------------------------------------------------------------------------
/docs/theme/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block analytics %}
4 |
5 | {% endblock %}
6 |
7 | {% block extrahead %}
8 |
9 |
10 | {% endblock %}
--------------------------------------------------------------------------------
/docs/theme/partials/header.html:
--------------------------------------------------------------------------------
1 |
22 |
23 |
24 | {% set class = "md-header" %}
25 | {% if "navigation.tabs.sticky" in features %}
26 | {% set class = class ~ " md-header--shadow md-header--lifted" %}
27 | {% elif "navigation.tabs" not in features %}
28 | {% set class = class ~ " md-header--shadow" %}
29 | {% endif %}
30 |
31 | {% include "partials/tc-header.html" %}
32 |
33 |
34 |
35 |
144 |
145 |
146 | {% if "navigation.tabs.sticky" in features %}
147 | {% if "navigation.tabs" in features %}
148 | {% include "partials/tabs.html" %}
149 | {% endif %}
150 | {% endif %}
151 |
--------------------------------------------------------------------------------
/docs/theme/partials/nav.html:
--------------------------------------------------------------------------------
1 |
19 |
20 |
21 | {% import "partials/nav-item.html" as item with context %}
22 | {% set class = "md-nav md-nav--primary" %}
23 | {% if "navigation.tabs" in features %}
24 | {% set class = class ~ " md-nav--lifted" %}
25 | {% endif %}
26 | {% if "toc.integrate" in features %}
27 | {% set class = class ~ " md-nav--integrated" %}
28 | {% endif %}
29 |
30 |
31 |
32 |
33 |
34 |
35 | Content
36 |
37 |
38 |
39 | {% if config.repo_url %}
40 |
41 | {% include "partials/source.html" %}
42 |
43 | {% endif %}
44 |
45 |
46 |
47 | {% for nav_item in nav %}
48 | {% set path = "__nav_" ~ loop.index %}
49 | {{ item.render(nav_item, path, 1) }}
50 | {% endfor %}
51 |
78 |
79 |
--------------------------------------------------------------------------------
/docs/theme/partials/tc-header.html:
--------------------------------------------------------------------------------
1 | {% set header = ({
2 | "siteUrl": "https://testcontainers.com/",
3 | "menuItems": [
4 | {
5 | "label": "Desktop NEW ",
6 | "url": "https://testcontainers.com/desktop/"
7 | },
8 | {
9 | "label": "Cloud",
10 | "url": "https://testcontainers.com/cloud/"
11 | },
12 | {
13 | "label": "Getting Started",
14 | "url": "https://testcontainers.com/getting-started/"
15 | },
16 | {
17 | "label": "Guides",
18 | "url": "https://testcontainers.com/guides/"
19 | },
20 | {
21 | "label": "Modules",
22 | "url": "https://testcontainers.com/modules/"
23 | },
24 | {
25 | "label": "Docs",
26 | "children": [
27 | {
28 | "label": "Testcontainers for Java",
29 | "url": "https://java.testcontainers.org/",
30 | "image": "/language-logos/java.svg",
31 | },
32 | {
33 | "label": "Testcontainers for Go",
34 | "url": "https://golang.testcontainers.org/",
35 | "image": "/language-logos/go.svg",
36 | },
37 | {
38 | "label": "Testcontainers for .NET",
39 | "url": "https://dotnet.testcontainers.org/",
40 | "image": "/language-logos/dotnet.svg",
41 | },
42 | {
43 | "label": "Testcontainers for Node.js",
44 | "url": "https://node.testcontainers.org/",
45 | "image": "/language-logos/nodejs.svg",
46 | },
47 | {
48 | "label": "Testcontainers for Python",
49 | "url": "https://testcontainers-python.readthedocs.io/en/latest/",
50 | "image": "/language-logos/python.svg",
51 | "external": true,
52 | },
53 | {
54 | "label": "Testcontainers for Rust",
55 | "url": "https://rust.testcontainers.org/",
56 | "image": "/language-logos/rust.svg",
57 | "external": true,
58 | },
59 | {
60 | "label": "Testcontainers for Haskell",
61 | "url": "https://github.com/testcontainers/testcontainers-hs",
62 | "image": "/language-logos/haskell.svg",
63 | "external": true,
64 | },
65 | {
66 | "label": "Testcontainers for Ruby",
67 | "url": "https://github.com/testcontainers/testcontainers-ruby",
68 | "image": "/language-logos/ruby.svg",
69 | "external": true,
70 | },
71 | ]
72 | },
73 | {
74 | "label": "Slack",
75 | "url": "https://slack.testcontainers.org/",
76 | "icon": "icon-slack",
77 | },
78 | {
79 | "label": "GitHub",
80 | "url": "https://github.com/testcontainers",
81 | "icon": "icon-github",
82 | },
83 | ]
84 | }) %}
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
--------------------------------------------------------------------------------
/mkdocs.yml:
--------------------------------------------------------------------------------
1 | # This file is autogenerated by the 'modulegen' tool.
2 | site_name: Testcontainers for Rust
3 | site_url: https://rust.testcontainers.org
4 | plugins:
5 | - search
6 | - codeinclude
7 | - include-markdown
8 | - markdownextradata
9 | theme:
10 | name: material
11 | custom_dir: docs/theme
12 | palette:
13 | scheme: testcontainers
14 | font:
15 | text: Roboto
16 | code: Roboto Mono
17 | logo: logo.svg
18 | favicon: favicon.ico
19 | extra_css:
20 | - css/extra.css
21 | - css/tc-header.css
22 | repo_name: testcontainers-rs
23 | repo_url: https://github.com/testcontainers/testcontainers-rs
24 | markdown_extensions:
25 | - admonition
26 | - codehilite:
27 | linenums: false
28 | - pymdownx.superfences
29 | - pymdownx.tabbed:
30 | alternate_style: true
31 | - pymdownx.snippets
32 | - toc:
33 | permalink: true
34 | - attr_list
35 | - pymdownx.emoji:
36 | emoji_generator: !!python/name:material.extensions.emoji.to_svg
37 | emoji_index: !!python/name:material.extensions.emoji.twemoji
38 | nav:
39 | - Home: index.md
40 | - Quickstart:
41 | - quickstart/testcontainers.md
42 | - quickstart/community_modules.md
43 | - Features:
44 | - features/configuration.md
45 | - features/wait_strategies.md
46 | - features/exec_commands.md
47 | - System Requirements:
48 | - system_requirements/docker.md
49 | - Contributing:
50 | - contributing_docs.md
51 | - Getting help: getting_help.md
52 | edit_uri: edit/main/docs/
53 | extra:
54 | latest_version: 0.16.7
55 |
--------------------------------------------------------------------------------
/release-plz.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | git_release_enable = true # enable GitHub releases
3 | pr_labels = ["release"] # add the `release` label to the release Pull Request
4 |
5 | [[package]]
6 | name = "testcontainers"
7 | git_tag_name = "{{ version }}"
8 | changelog_path = "CHANGELOG.md"
9 |
10 | [[package]]
11 | name = "testimages"
12 | release = false
13 | publish = false
14 | changelog_update = false
15 |
16 | [changelog]
17 | tag_pattern = "[0-9].*"
18 | sort_commits = "oldest"
19 | trim = true
20 | protect_breaking_commits = true
21 | commit_preprocessors = []
22 | commit_parsers = [
23 | { message = "^feat", group = "Features" },
24 | { message = "^fix", group = "Bug Fixes" },
25 | { message = "^doc", group = "Documentation" },
26 | { message = "^perf", group = "Performance" },
27 | { message = "^refactor", group = "Refactor" },
28 | { message = "^style", group = "Styling" },
29 | { message = "^test", group = "Testing" },
30 | { message = "^ci\\(deps\\)", skip = true },
31 | { message = "^build\\(deps\\)", skip = true },
32 | { message = "^chore: release", skip = true },
33 | { message = "^chore\\(pr\\)", skip = true },
34 | { message = "^chore\\(pull\\)", skip = true },
35 | { message = "^chore|ci", group = "Miscellaneous Tasks" },
36 | { body = ".*security", group = "Security" },
37 | { message = "^revert", group = "Revert" },
38 | ]
39 | header = """
40 | # Changelog\n
41 | All notable changes to this project will be documented in this file.
42 |
43 | """
44 | body = """
45 | {%- macro remote_url() -%}
46 | https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
47 | {%- endmacro -%}
48 |
49 | {% if version -%}
50 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
51 | {% else -%}
52 | ## [Unreleased]
53 | {% endif -%}
54 |
55 | ### Details\
56 |
57 | {% for group, commits in commits | group_by(attribute="group") %}
58 | #### {{ group | upper_first }}
59 | {%- for commit in commits %}
60 | - {% if commit.breaking %}[❗] {% endif %}{{ commit.message | upper_first | trim }}\
61 | {% if commit.github.username %} by @{{ commit.github.username }}{%- endif -%}
62 | {% if commit.github.pr_number %} in \
63 | [#{{ commit.github.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.github.pr_number }}) \
64 | {%- endif -%}
65 | {% endfor %}
66 | {% endfor %}
67 | """
68 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | mkdocs==1.5.3
2 | mkdocs-codeinclude-plugin==0.2.1
3 | mkdocs-include-markdown-plugin==6.0.4
4 | mkdocs-material==9.5.18
5 | mkdocs-markdownextradata-plugin==0.2.5
6 |
--------------------------------------------------------------------------------
/runtime.txt:
--------------------------------------------------------------------------------
1 | 3.8
2 |
--------------------------------------------------------------------------------
/testcontainers/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "testcontainers"
3 | version = "0.24.0"
4 | categories = ["development-tools::testing"]
5 | readme = "README.md"
6 | authors.workspace = true
7 | edition.workspace = true
8 | keywords.workspace = true
9 | license.workspace = true
10 | repository.workspace = true
11 | rust-version.workspace = true
12 | description = "A library for integration-testing against docker containers from within Rust."
13 |
14 | [package.metadata.docs.rs]
15 | all-features = true
16 | rustdoc-args = ["--cfg", "docsrs"]
17 |
18 | [dependencies]
19 | async-trait = { version = "0.1" }
20 | bollard = { version = "0.18.1"}
21 | bollard-stubs = "=1.47.1-rc.27.3.1"
22 | bytes = "1.6.0"
23 | conquer-once = { version = "0.4", optional = true }
24 | docker_credential = "1.3.1"
25 | either = "1.12.0"
26 | etcetera = "0.10.0"
27 | futures = "0.3"
28 | log = "0.4"
29 | memchr = "2.7.2"
30 | parse-display = "0.9.0"
31 | pin-project-lite = "0.2.14"
32 | reqwest = { version = "0.12.5", features = ["rustls-tls", "rustls-tls-native-roots", "hickory-dns", "json", "charset", "http2"], default-features = false, optional = true }
33 | serde = { version = "1", features = ["derive"] }
34 | serde-java-properties = { version = "0.2.0", optional = true }
35 | serde_json = "1"
36 | serde_with = "3.7.0"
37 | signal-hook = { version = "0.3", optional = true }
38 | thiserror = "2.0.3"
39 | tokio = { version = "1", features = ["macros", "fs", "rt-multi-thread"] }
40 | tokio-stream = "0.1.15"
41 | tokio-tar = "0.3.1"
42 | tokio-util = { version = "0.7.10", features = ["io"] }
43 | ulid = { version = "1.1.3", optional = true }
44 | url = { version = "2", features = ["serde"] }
45 |
46 | [features]
47 | default = ["ring"]
48 | ring = ["bollard/ssl"]
49 | aws-lc-rs = ["bollard/aws-lc-rs"]
50 | ssl = ["bollard/ssl_providerless"]
51 | blocking = []
52 | watchdog = ["signal-hook", "conquer-once"]
53 | http_wait = ["reqwest"]
54 | properties-config = ["serde-java-properties"]
55 | reusable-containers = ["dep:ulid"]
56 |
57 | [dev-dependencies]
58 | anyhow = "1.0.86"
59 | pretty_env_logger = "0.5"
60 | reqwest = { version = "0.12.4", features = ["blocking"], default-features = false }
61 | temp-dir = "0.1.13"
62 | testimages.workspace = true
63 | tokio = { version = "1", features = ["macros"] }
64 |
--------------------------------------------------------------------------------
/testcontainers/README.md:
--------------------------------------------------------------------------------
1 | ../README.md
--------------------------------------------------------------------------------
/testcontainers/src/core.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "reusable-containers")]
2 | pub use self::image::ReuseDirective;
3 | pub use self::{
4 | containers::*,
5 | image::{ContainerState, ExecCommand, Image, ImageExt},
6 | mounts::{AccessMode, Mount, MountType},
7 | ports::{ContainerPort, IntoContainerPort},
8 | wait::{cmd_wait::CmdWaitFor, WaitFor},
9 | };
10 |
11 | mod image;
12 |
13 | pub(crate) mod async_drop;
14 | pub mod client;
15 | pub(crate) mod containers;
16 | pub(crate) mod copy;
17 | pub(crate) mod env;
18 | pub mod error;
19 | pub mod logs;
20 | pub(crate) mod mounts;
21 | pub(crate) mod network;
22 | pub mod ports;
23 | pub mod wait;
24 |
--------------------------------------------------------------------------------
/testcontainers/src/core/async_drop.rs:
--------------------------------------------------------------------------------
1 | use std::sync::OnceLock;
2 |
3 | use futures::future::BoxFuture;
4 |
5 | static DROP_TASK_SENDER: OnceLock>> =
6 | OnceLock::new();
7 |
8 | /// A helper to perform async operations in `Drop` implementation.
9 | ///
10 | /// The behavior depends on the runtime flavor used in the test:
11 | /// - `multi-threaded` runtime: it will use `tokio::task::block_in_place` to run the provided future
12 | /// - `current-thread` runtime: it spawns a separate tokio runtime in a dedicated thread to run the provided futures.
13 | /// * Only 1 drop-worker for the process, regardless of number of containers and drops.
14 | // We can consider creating `AsyncDrop` trait + `AsyncDropGuard` wrapper to make it more ergonomic.
15 | // However, we have a only couple of places where we need this functionality.
16 | pub(crate) fn async_drop(future: impl std::future::Future + Send + 'static) {
17 | let handle = tokio::runtime::Handle::current();
18 | match handle.runtime_flavor() {
19 | tokio::runtime::RuntimeFlavor::CurrentThread => {
20 | let (tx, rx) = std::sync::mpsc::sync_channel(1);
21 | dropper_task_sender()
22 | .send(Box::pin(async move {
23 | future.await;
24 | let _ = tx.send(());
25 | }))
26 | .expect("drop-worker must be running: failed to send drop task");
27 | let _ = rx.recv();
28 | }
29 | tokio::runtime::RuntimeFlavor::MultiThread => {
30 | tokio::task::block_in_place(move || handle.block_on(future))
31 | }
32 | _ => unreachable!("unsupported runtime flavor"),
33 | }
34 | }
35 |
36 | fn dropper_task_sender() -> &'static tokio::sync::mpsc::UnboundedSender> {
37 | DROP_TASK_SENDER.get_or_init(|| {
38 | let (dropper_tx, mut dropper_rx) = tokio::sync::mpsc::unbounded_channel();
39 | std::thread::spawn(move || {
40 | tokio::runtime::Builder::new_current_thread()
41 | .thread_name("testcontainers-drop-worker")
42 | .enable_all()
43 | .build()
44 | .expect("failed to create dropper runtime")
45 | .block_on(async move {
46 | while let Some(future) = dropper_rx.recv().await {
47 | future.await;
48 | }
49 | });
50 | });
51 |
52 | dropper_tx
53 | })
54 | }
55 |
--------------------------------------------------------------------------------
/testcontainers/src/core/client/bollard_client.rs:
--------------------------------------------------------------------------------
1 | use std::{str::FromStr, time::Duration};
2 |
3 | use bollard::{Docker, API_DEFAULT_VERSION};
4 | use url::Url;
5 |
6 | use crate::core::env;
7 |
8 | const DEFAULT_TIMEOUT: Duration = Duration::from_secs(2 * 60);
9 |
10 | pub(super) fn init(config: &env::Config) -> Result {
11 | let host = &config.docker_host();
12 | let host_url = Url::from_str(host)?;
13 |
14 | match host_url.scheme() {
15 | "https" => connect_with_ssl(config),
16 | "http" | "tcp" => {
17 | if config.tls_verify() {
18 | connect_with_ssl(config)
19 | } else {
20 | Docker::connect_with_http(host, DEFAULT_TIMEOUT.as_secs(), API_DEFAULT_VERSION)
21 | }
22 | }
23 | #[cfg(unix)]
24 | "unix" => Docker::connect_with_unix(host, DEFAULT_TIMEOUT.as_secs(), API_DEFAULT_VERSION),
25 | #[cfg(windows)]
26 | "npipe" => {
27 | Docker::connect_with_named_pipe(host, DEFAULT_TIMEOUT.as_secs(), API_DEFAULT_VERSION)
28 | }
29 | _ => Err(bollard::errors::Error::UnsupportedURISchemeError {
30 | uri: host.to_string(),
31 | }),
32 | }
33 | }
34 |
35 | fn connect_with_ssl(config: &env::Config) -> Result {
36 | let cert_path = config.cert_path().expect("cert path not found");
37 |
38 | Docker::connect_with_ssl(
39 | &config.docker_host(),
40 | &cert_path.join("key.pem"),
41 | &cert_path.join("cert.pem"),
42 | &cert_path.join("ca.pem"),
43 | DEFAULT_TIMEOUT.as_secs(),
44 | API_DEFAULT_VERSION,
45 | )
46 | }
47 |
--------------------------------------------------------------------------------
/testcontainers/src/core/client/exec.rs:
--------------------------------------------------------------------------------
1 | use crate::core::logs::WaitingStreamWrapper;
2 |
3 | pub(crate) struct ExecResult {
4 | pub(crate) id: String,
5 | pub(crate) stdout: WaitingStreamWrapper,
6 | pub(crate) stderr: WaitingStreamWrapper,
7 | }
8 |
9 | impl ExecResult {
10 | pub(crate) fn id(&self) -> &str {
11 | &self.id
12 | }
13 |
14 | pub(crate) fn stdout(&mut self) -> &mut WaitingStreamWrapper {
15 | &mut self.stdout
16 | }
17 |
18 | pub(crate) fn stderr(&mut self) -> &mut WaitingStreamWrapper {
19 | &mut self.stderr
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/testcontainers/src/core/client/factory.rs:
--------------------------------------------------------------------------------
1 | use std::sync::{Arc, OnceLock, Weak};
2 |
3 | use tokio::sync::Mutex;
4 |
5 | use crate::core::client::{Client, ClientError};
6 |
7 | // We use `Weak` in order not to prevent `Drop` of being called.
8 | // Instead, we re-create the client if it was dropped and asked one more time.
9 | // This way we provide on `Drop` guarantees and avoid unnecessary instantiation at the same time.
10 | static DOCKER_CLIENT: OnceLock>> = OnceLock::new();
11 |
12 | impl Client {
13 | /// Returns a client instance, reusing already created or initializing a new one.
14 | pub(crate) async fn lazy_client() -> Result, ClientError> {
15 | let mut guard = DOCKER_CLIENT
16 | .get_or_init(|| Mutex::new(Weak::new()))
17 | .lock()
18 | .await;
19 | let maybe_client = guard.upgrade();
20 |
21 | if let Some(client) = maybe_client {
22 | Ok(client)
23 | } else {
24 | let client = Arc::new(Client::new().await?);
25 | *guard = Arc::downgrade(&client);
26 |
27 | Ok(client)
28 | }
29 | }
30 | }
31 |
32 | /// Returns a configured Docker client instance.
33 | ///
34 | /// This function provides access to the underlying Docker client ([`bollard`]).
35 | /// While this method is publicly exposed, it is not intended for frequent use.
36 | /// It can be useful in scenarios where you need to interact with the Docker API directly using an already configured client.
37 | ///
38 | /// This method returns a lazily-created client, reusing an existing one if available.
39 | pub async fn docker_client_instance() -> Result {
40 | Client::lazy_client().await.map(|c| c.bollard.clone())
41 | }
42 |
--------------------------------------------------------------------------------
/testcontainers/src/core/containers/async_container/exec.rs:
--------------------------------------------------------------------------------
1 | use std::{fmt, io, pin::Pin, sync::Arc};
2 |
3 | use bytes::Bytes;
4 | use futures::stream::BoxStream;
5 | use tokio::io::{AsyncBufRead, AsyncReadExt};
6 |
7 | use crate::core::{client::Client, error::Result};
8 |
9 | /// Represents the result of an executed command in a container.
10 | pub struct ExecResult {
11 | pub(super) client: Arc,
12 | pub(crate) id: String,
13 | pub(super) stdout: BoxStream<'static, std::result::Result>,
14 | pub(super) stderr: BoxStream<'static, std::result::Result>,
15 | }
16 |
17 | impl ExecResult {
18 | /// Returns the exit code of the executed command.
19 | /// If the command has not yet exited, this will return `None`.
20 | pub async fn exit_code(&self) -> Result> {
21 | let res = self.client.inspect_exec(&self.id).await?;
22 | Ok(res.exit_code)
23 | }
24 |
25 | /// Returns an asynchronous reader for stdout. It follows log stream until the command exits.
26 | pub fn stdout<'b>(&'b mut self) -> Pin> {
27 | Box::pin(tokio_util::io::StreamReader::new(&mut self.stdout))
28 | }
29 |
30 | /// Returns an asynchronous reader for stderr. It follows log stream until the command exits.
31 | pub fn stderr<'b>(&'b mut self) -> Pin> {
32 | Box::pin(tokio_util::io::StreamReader::new(&mut self.stderr))
33 | }
34 |
35 | /// Returns stdout as a vector of bytes.
36 | /// Keep in mind that this will block until the command exits.
37 | ///
38 | /// If you want to read stdout in asynchronous manner, use [`ExecResult::stdout`] instead.
39 | pub async fn stdout_to_vec(&mut self) -> Result> {
40 | let mut stdout = Vec::new();
41 | self.stdout().read_to_end(&mut stdout).await?;
42 | Ok(stdout)
43 | }
44 |
45 | /// Returns stderr as a vector of bytes.
46 | /// Keep in mind that this will block until the command exits.
47 | ///
48 | /// If you want to read stderr in asynchronous manner, use [`ExecResult::stderr`] instead.
49 | pub async fn stderr_to_vec(&mut self) -> Result> {
50 | let mut stderr = Vec::new();
51 | self.stderr().read_to_end(&mut stderr).await?;
52 | Ok(stderr)
53 | }
54 | }
55 |
56 | impl fmt::Debug for ExecResult {
57 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
58 | f.debug_struct("ExecResult").field("id", &self.id).finish()
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/testcontainers/src/core/containers/mod.rs:
--------------------------------------------------------------------------------
1 | pub(crate) mod async_container;
2 | pub(crate) mod request;
3 | #[cfg(feature = "blocking")]
4 | pub(crate) mod sync_container;
5 |
6 | pub use async_container::{exec::ExecResult, ContainerAsync};
7 | pub use request::{CgroupnsMode, ContainerRequest, Host, PortMapping};
8 | #[cfg(feature = "blocking")]
9 | #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))]
10 | pub use sync_container::{exec::SyncExecResult, Container};
11 |
--------------------------------------------------------------------------------
/testcontainers/src/core/containers/sync_container/exec.rs:
--------------------------------------------------------------------------------
1 | use std::{fmt, io::BufRead, sync::Arc};
2 |
3 | use crate::{
4 | core::{async_container, sync_container::sync_reader},
5 | TestcontainersError,
6 | };
7 |
8 | /// Represents the result of an executed command in a container.
9 | pub struct SyncExecResult {
10 | pub(super) inner: async_container::exec::ExecResult,
11 | pub(super) runtime: Arc,
12 | }
13 |
14 | impl SyncExecResult {
15 | /// Returns the exit code of the executed command.
16 | /// If the command has not yet exited, this will return `None`.
17 | pub fn exit_code(&self) -> Result, TestcontainersError> {
18 | self.runtime.block_on(self.inner.exit_code())
19 | }
20 |
21 | /// Returns an asynchronous reader for stdout.
22 | pub fn stdout<'b>(&'b mut self) -> Box {
23 | Box::new(sync_reader::SyncReadBridge::new(
24 | self.inner.stdout(),
25 | self.runtime.clone(),
26 | ))
27 | }
28 |
29 | /// Returns an asynchronous reader for stderr.
30 | pub fn stderr<'b>(&'b mut self) -> Box {
31 | Box::new(sync_reader::SyncReadBridge::new(
32 | self.inner.stderr(),
33 | self.runtime.clone(),
34 | ))
35 | }
36 |
37 | /// Returns stdout as a vector of bytes.
38 | /// Keep in mind that this will block until the command exits.
39 | ///
40 | /// If you want to read stderr in chunks, use [`SyncExecResult::stdout`] instead.
41 | pub fn stdout_to_vec(&mut self) -> Result, TestcontainersError> {
42 | self.runtime.block_on(self.inner.stdout_to_vec())
43 | }
44 |
45 | /// Returns stderr as a vector of bytes.
46 | /// Keep in mind that this will block until the command exits.
47 | ///
48 | /// If you want to read stderr in chunks, use [`SyncExecResult::stderr`] instead.
49 | pub fn stderr_to_vec(&mut self) -> Result, TestcontainersError> {
50 | self.runtime.block_on(self.inner.stderr_to_vec())
51 | }
52 | }
53 |
54 | impl fmt::Debug for SyncExecResult {
55 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56 | f.debug_struct("ExecResult")
57 | .field("id", &self.inner.id)
58 | .finish()
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/testcontainers/src/core/containers/sync_container/sync_reader.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io::{BufRead, Read},
3 | sync::Arc,
4 | };
5 |
6 | use tokio::io::{AsyncBufRead, AsyncBufReadExt, AsyncRead, AsyncReadExt};
7 |
8 | /// Allows to use [`tokio::io::AsyncRead`] synchronously as [`std::io::Read`].
9 | /// In fact, it's almost the same as [`tokio_util::io::SyncIoBridge`], but utilizes [`tokio::runtime::Runtime`] instead of [`tokio::runtime::Handle`].
10 | /// This is needed because [`tokio::runtime::Handle::block_on`] can't drive the IO on `current_thread` runtime.
11 | pub(super) struct SyncReadBridge {
12 | inner: T,
13 | runtime: Arc,
14 | }
15 |
16 | impl SyncReadBridge {
17 | pub fn new(inner: T, runtime: Arc) -> Self {
18 | Self { inner, runtime }
19 | }
20 | }
21 |
22 | impl BufRead for SyncReadBridge {
23 | fn fill_buf(&mut self) -> std::io::Result<&[u8]> {
24 | let inner = &mut self.inner;
25 | self.runtime.block_on(AsyncBufReadExt::fill_buf(inner))
26 | }
27 |
28 | fn consume(&mut self, amt: usize) {
29 | let inner = &mut self.inner;
30 | AsyncBufReadExt::consume(inner, amt)
31 | }
32 |
33 | fn read_until(&mut self, byte: u8, buf: &mut Vec) -> std::io::Result {
34 | let inner = &mut self.inner;
35 | self.runtime
36 | .block_on(AsyncBufReadExt::read_until(inner, byte, buf))
37 | }
38 | fn read_line(&mut self, buf: &mut String) -> std::io::Result {
39 | let inner = &mut self.inner;
40 | self.runtime
41 | .block_on(AsyncBufReadExt::read_line(inner, buf))
42 | }
43 | }
44 |
45 | impl Read for SyncReadBridge {
46 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result {
47 | let inner = &mut self.inner;
48 | self.runtime.block_on(AsyncReadExt::read(inner, buf))
49 | }
50 |
51 | fn read_to_end(&mut self, buf: &mut Vec) -> std::io::Result {
52 | let inner = &mut self.inner;
53 | self.runtime.block_on(inner.read_to_end(buf))
54 | }
55 |
56 | fn read_to_string(&mut self, buf: &mut String) -> std::io::Result {
57 | let inner = &mut self.inner;
58 | self.runtime.block_on(inner.read_to_string(buf))
59 | }
60 |
61 | fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> {
62 | let inner = &mut self.inner;
63 | // The AsyncRead trait returns the count, synchronous doesn't.
64 | let _n = self.runtime.block_on(inner.read_exact(buf))?;
65 | Ok(())
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/testcontainers/src/core/copy.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io,
3 | path::{Path, PathBuf},
4 | };
5 |
6 | #[derive(Debug, Clone)]
7 | pub struct CopyToContainer {
8 | target: String,
9 | source: CopyDataSource,
10 | }
11 |
12 | #[derive(Debug, Clone)]
13 | pub enum CopyDataSource {
14 | File(PathBuf),
15 | Data(Vec),
16 | }
17 |
18 | #[derive(Debug, thiserror::Error)]
19 | pub enum CopyToContainerError {
20 | #[error("io failed with error: {0}")]
21 | IoError(io::Error),
22 | #[error("failed to get the path name: {0}")]
23 | PathNameError(String),
24 | }
25 |
26 | impl CopyToContainer {
27 | pub fn new(source: impl Into, target: impl Into) -> Self {
28 | Self {
29 | source: source.into(),
30 | target: target.into(),
31 | }
32 | }
33 |
34 | pub(crate) async fn tar(&self) -> Result {
35 | self.source.tar(&self.target).await
36 | }
37 | }
38 |
39 | impl From<&Path> for CopyDataSource {
40 | fn from(value: &Path) -> Self {
41 | CopyDataSource::File(value.to_path_buf())
42 | }
43 | }
44 | impl From for CopyDataSource {
45 | fn from(value: PathBuf) -> Self {
46 | CopyDataSource::File(value)
47 | }
48 | }
49 | impl From> for CopyDataSource {
50 | fn from(value: Vec) -> Self {
51 | CopyDataSource::Data(value)
52 | }
53 | }
54 |
55 | impl CopyDataSource {
56 | pub(crate) async fn tar(
57 | &self,
58 | target_path: impl Into,
59 | ) -> Result {
60 | let target_path: String = target_path.into();
61 |
62 | let bytes = match self {
63 | CopyDataSource::File(source_file_path) => {
64 | tar_file(source_file_path, &target_path).await?
65 | }
66 | CopyDataSource::Data(data) => tar_bytes(data, &target_path).await?,
67 | };
68 |
69 | Ok(bytes::Bytes::copy_from_slice(bytes.as_slice()))
70 | }
71 | }
72 |
73 | async fn tar_file(
74 | source_file_path: &Path,
75 | target_path: &str,
76 | ) -> Result, CopyToContainerError> {
77 | let target_path = make_path_relative(target_path);
78 | let meta = tokio::fs::metadata(source_file_path)
79 | .await
80 | .map_err(CopyToContainerError::IoError)?;
81 |
82 | let mut ar = tokio_tar::Builder::new(Vec::new());
83 | if meta.is_dir() {
84 | ar.append_dir_all(target_path, source_file_path)
85 | .await
86 | .map_err(CopyToContainerError::IoError)?;
87 | } else {
88 | let f = &mut tokio::fs::File::open(source_file_path)
89 | .await
90 | .map_err(CopyToContainerError::IoError)?;
91 |
92 | ar.append_file(target_path, f)
93 | .await
94 | .map_err(CopyToContainerError::IoError)?;
95 | };
96 |
97 | let res = ar
98 | .into_inner()
99 | .await
100 | .map_err(CopyToContainerError::IoError)?;
101 |
102 | Ok(res)
103 | }
104 |
105 | async fn tar_bytes(data: &Vec, target_path: &str) -> Result, CopyToContainerError> {
106 | let relative_target_path = make_path_relative(target_path);
107 |
108 | let mut header = tokio_tar::Header::new_gnu();
109 | header.set_size(data.len() as u64);
110 | header.set_mode(0o0644);
111 | header.set_cksum();
112 |
113 | let mut ar = tokio_tar::Builder::new(Vec::new());
114 | ar.append_data(&mut header, relative_target_path, data.as_slice())
115 | .await
116 | .map_err(CopyToContainerError::IoError)?;
117 |
118 | let res = ar
119 | .into_inner()
120 | .await
121 | .map_err(CopyToContainerError::IoError)?;
122 |
123 | Ok(res)
124 | }
125 |
126 | fn make_path_relative(path: &str) -> String {
127 | // TODO support also absolute windows paths like "C:\temp\foo.txt"
128 | if path.starts_with("/") {
129 | path.trim_start_matches("/").to_string()
130 | } else {
131 | path.to_string()
132 | }
133 | }
134 |
--------------------------------------------------------------------------------
/testcontainers/src/core/env.rs:
--------------------------------------------------------------------------------
1 | mod config;
2 |
3 | pub use config::ConfigurationError;
4 | pub(crate) use config::{Command, Config};
5 |
6 | /// Abstracts over reading a value from the environment.
7 | pub trait GetEnvValue {
8 | fn get_env_value(key: &str) -> Option;
9 | }
10 |
11 | /// Represents the operating system environment for use within a production environment.
12 | #[derive(Debug)]
13 | pub struct Os;
14 |
15 | impl GetEnvValue for Os {
16 | fn get_env_value(key: &str) -> Option {
17 | ::std::env::var(key).ok()
18 | }
19 | }
20 |
21 | #[cfg(test)]
22 | mod tests {
23 | use super::*;
24 |
25 | #[derive(Debug)]
26 | struct FakeEnvAlwaysKeep;
27 |
28 | impl GetEnvValue for FakeEnvAlwaysKeep {
29 | fn get_env_value(key: &str) -> Option {
30 | match key {
31 | "TESTCONTAINERS_COMMAND" => Some("keep".to_owned()),
32 | _ => None,
33 | }
34 | }
35 | }
36 |
37 | #[test]
38 | fn errors_on_unknown_command() {
39 | let res = "foobar".parse::();
40 | assert!(res.is_err());
41 | }
42 |
43 | #[test]
44 | fn command_looks_up_testcontainers_env_variables() {
45 | let cmd = FakeEnvAlwaysKeep::get_env_value("TESTCONTAINERS_COMMAND").unwrap();
46 |
47 | assert!(matches!(cmd.parse::(), Ok(Command::Keep)),)
48 | }
49 |
50 | #[test]
51 | fn default_command_is_remove() {
52 | let cmd = Command::default();
53 |
54 | assert_eq!(cmd, Command::Remove)
55 | }
56 | }
57 |
--------------------------------------------------------------------------------
/testcontainers/src/core/error.rs:
--------------------------------------------------------------------------------
1 | use std::error::Error;
2 |
3 | use crate::core::logs::WaitLogError;
4 | pub use crate::core::{client::ClientError, env::ConfigurationError, ContainerPort};
5 |
6 | pub type Result = std::result::Result;
7 |
8 | /// Enum to represent various types of errors that can occur in Testcontainers
9 | #[derive(Debug, thiserror::Error)]
10 | pub enum TestcontainersError {
11 | /// Represents an error that occurred in the client of Docker API.
12 | #[error(transparent)]
13 | Client(#[from] ClientError),
14 | #[error("container is not ready: {0}")]
15 | WaitContainer(#[from] WaitContainerError),
16 | /// Represents an error when a container does not expose a specified port
17 | #[error("container '{id}' does not expose port {port}")]
18 | PortNotExposed { id: String, port: ContainerPort },
19 | /// Represents an error when a container is missing some information
20 | #[error(transparent)]
21 | MissingInfo(#[from] ContainerMissingInfo),
22 | /// Represents an error when an exec operation fails
23 | #[error("exec operation failed: {0}")]
24 | Exec(#[from] ExecError),
25 | #[error("I/O error: {0}")]
26 | Io(#[from] std::io::Error),
27 | /// Represents any other error that does not fit into the above categories
28 | #[error("other error: {0}")]
29 | Other(Box),
30 | }
31 |
32 | #[derive(Debug, thiserror::Error)]
33 | #[error("container '{id}' does not have: {path}")]
34 | pub struct ContainerMissingInfo {
35 | /// Container ID
36 | id: String,
37 | /// Path to the missing information (e.g `NetworkSettings.Networks`).
38 | path: String,
39 | }
40 |
41 | /// Error type for exec operation.
42 | #[derive(Debug, thiserror::Error)]
43 | pub enum ExecError {
44 | #[error("exec process exited with code {actual}, expected {expected}")]
45 | ExitCodeMismatch { expected: i64, actual: i64 },
46 | #[error("failed to wait for exec log: {0}")]
47 | WaitLog(#[from] WaitLogError),
48 | }
49 |
50 | /// Error type for waiting for container readiness based on [`crate::core::WaitFor`] conditions.
51 | #[derive(Debug, thiserror::Error)]
52 | pub enum WaitContainerError {
53 | #[error("failed to wait for container log: {0}")]
54 | WaitLog(#[from] WaitLogError),
55 | #[error("container state is unavailable")]
56 | StateUnavailable,
57 | #[error("container is not ready: {0}")]
58 | #[cfg(feature = "http_wait")]
59 | #[cfg_attr(docsrs, doc(cfg(feature = "http_wait")))]
60 | HttpWait(#[from] crate::core::wait::http_strategy::HttpWaitError),
61 | #[error("healthcheck is not configured for container: {0}")]
62 | HealthCheckNotConfigured(String),
63 | #[error("container is unhealthy")]
64 | Unhealthy,
65 | #[error("container startup timeout")]
66 | StartupTimeout,
67 | #[error("container exited with unexpected code: expected {expected}, actual {actual:?}")]
68 | UnexpectedExitCode { expected: i64, actual: Option },
69 | }
70 |
71 | impl TestcontainersError {
72 | /// Creates a new `TestcontainersError` from an arbitrary error payload.
73 | ///
74 | /// It's preferably to use the more specific error constructors if possible.
75 | /// But this method is useful when you need to:
76 | /// - wrap an error that doesn't fit into the other categories
77 | /// - avoid introducing a new kind of error in order to keep the error handling simple
78 | /// - create a custom error from client code.
79 | pub fn other(error: E) -> Self
80 | where
81 | E: Into>,
82 | {
83 | Self::Other(error.into())
84 | }
85 | }
86 |
87 | impl ContainerMissingInfo {
88 | pub(crate) fn new(id: impl Into, path: impl Into) -> Self {
89 | Self {
90 | id: id.into(),
91 | path: path.into(),
92 | }
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/testcontainers/src/core/image.rs:
--------------------------------------------------------------------------------
1 | use std::{borrow::Cow, fmt::Debug};
2 |
3 | pub use exec::ExecCommand;
4 | pub use image_ext::ImageExt;
5 | #[cfg(feature = "reusable-containers")]
6 | pub use image_ext::ReuseDirective;
7 | use url::Host;
8 |
9 | use crate::{
10 | core::{
11 | copy::CopyToContainer,
12 | error::Result,
13 | mounts::Mount,
14 | ports::{ContainerPort, Ports},
15 | WaitFor,
16 | },
17 | ContainerAsync, TestcontainersError,
18 | };
19 |
20 | mod exec;
21 | mod image_ext;
22 |
23 | /// Represents a docker image.
24 | ///
25 | /// Implementations are required to implement Default. The default instance of an [`Image`]
26 | /// should have a meaningful configuration! It should be possible to [`run`][docker_run] the default
27 | /// instance of an Image and get back a working container!
28 | ///
29 | /// [`Image`]: trait.Image.html
30 | /// [docker_run]: trait.Docker.html#tymethod.run
31 | pub trait Image
32 | where
33 | Self: Sized + Sync + Send,
34 | {
35 | /// The name of the docker image to pull from the Docker Hub registry.
36 | fn name(&self) -> &str;
37 |
38 | /// Implementations are encouraged to include a tag that will not change (i.e. NOT latest)
39 | /// in order to prevent test code from randomly breaking because the underlying docker
40 | /// suddenly changed.
41 | fn tag(&self) -> &str;
42 |
43 | /// Returns a list of conditions that need to be met before a started container is considered ready.
44 | ///
45 | /// This method is the **🍞 and butter** of the whole testcontainers library. Containers are
46 | /// rarely instantly available as soon as they are started. Most of them take some time to boot
47 | /// up.
48 | ///
49 | /// The conditions returned from this method are evaluated **in the order** they are returned. Therefore
50 | /// you most likely want to start with a [`WaitFor::Log`] or [`WaitFor::Http`].
51 | fn ready_conditions(&self) -> Vec;
52 |
53 | /// Returns the environment variables that needs to be set when a container is created.
54 | fn env_vars(
55 | &self,
56 | ) -> impl IntoIterator- >, impl Into
>)> {
57 | std::iter::empty::<(String, String)>()
58 | }
59 |
60 | /// Returns the mounts that needs to be created when a container is created.
61 | fn mounts(&self) -> impl IntoIterator- {
62 | std::iter::empty()
63 | }
64 |
65 | /// Returns the files to be copied into the container at startup.
66 | fn copy_to_sources(&self) -> impl IntoIterator
- {
67 | std::iter::empty()
68 | }
69 |
70 | /// Returns the [entrypoint](`https://docs.docker.com/reference/dockerfile/#entrypoint`) this image needs to be created with.
71 | fn entrypoint(&self) -> Option<&str> {
72 | None
73 | }
74 |
75 | /// Returns the [`CMD`](https://docs.docker.com/reference/dockerfile/#cmd) this image needs to be created with.
76 | fn cmd(&self) -> impl IntoIterator
- >> {
77 | std::iter::empty::
()
78 | }
79 |
80 | /// Returns the ports that needs to be exposed when a container is created.
81 | ///
82 | /// This method is useful when there is a need to expose some ports, but there is
83 | /// no `EXPOSE` instruction in the Dockerfile of an image.
84 | fn expose_ports(&self) -> &[ContainerPort] {
85 | &[]
86 | }
87 |
88 | /// Returns the commands that needs to be executed after a container is started i.e. commands
89 | /// to be run in a running container.
90 | ///
91 | /// Notice, that you can return an error from this method, for example if container's state is unexpected.
92 | /// In this case, you can use `TestcontainersError::other` to wrap an arbitrary error.
93 | ///
94 | /// This method is useful when certain re-configuration is required after the start
95 | /// of container for the container to be considered ready for use in tests.
96 | #[allow(unused_variables)]
97 | fn exec_after_start(&self, cs: ContainerState) -> Result> {
98 | Ok(Default::default())
99 | }
100 |
101 | /// Returns commands that will be executed after the container has started, but before the
102 | /// [Image::ready_conditions] are awaited for.
103 | ///
104 | /// Use this when you, e.g., need to configure something based on the container's ports and host
105 | /// (for example an application that needs to know its own address).
106 | #[allow(unused_variables)]
107 | fn exec_before_ready(&self, cs: ContainerState) -> Result> {
108 | Ok(Default::default())
109 | }
110 | }
111 |
112 | #[derive(Debug)]
113 | pub struct ContainerState {
114 | id: String,
115 | host: Host,
116 | ports: Ports,
117 | }
118 |
119 | impl ContainerState {
120 | pub async fn from_container(container: &ContainerAsync) -> Result
121 | where
122 | I: Image,
123 | {
124 | Ok(Self {
125 | id: container.id().into(),
126 | host: container.get_host().await?,
127 | ports: container.ports().await?,
128 | })
129 | }
130 |
131 | pub fn host(&self) -> &Host {
132 | &self.host
133 | }
134 |
135 | /// Returns the host port for the given internal container's port (`IPv4`).
136 | ///
137 | /// Results in an error ([`TestcontainersError::PortNotExposed`]) if the port is not exposed.
138 | pub fn host_port_ipv4(&self, internal_port: ContainerPort) -> Result {
139 | self.ports
140 | .map_to_host_port_ipv4(internal_port)
141 | .ok_or_else(|| TestcontainersError::PortNotExposed {
142 | id: self.id.clone(),
143 | port: internal_port,
144 | })
145 | }
146 |
147 | /// Returns the host port for the given internal container's port (`IPv6`).
148 | ///
149 | /// Results in an error ([`TestcontainersError::PortNotExposed`]) if the port is not exposed.
150 | pub fn host_port_ipv6(&self, internal_port: ContainerPort) -> Result {
151 | self.ports
152 | .map_to_host_port_ipv6(internal_port)
153 | .ok_or_else(|| TestcontainersError::PortNotExposed {
154 | id: self.id.clone(),
155 | port: internal_port,
156 | })
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/testcontainers/src/core/image/exec.rs:
--------------------------------------------------------------------------------
1 | use crate::core::{CmdWaitFor, WaitFor};
2 |
3 | #[derive(Debug)]
4 | pub struct ExecCommand {
5 | pub(crate) cmd: Vec,
6 | pub(crate) cmd_ready_condition: CmdWaitFor,
7 | pub(crate) container_ready_conditions: Vec,
8 | }
9 |
10 | impl ExecCommand {
11 | /// Command to be executed
12 | pub fn new(cmd: impl IntoIterator- >) -> Self {
13 | Self {
14 | cmd: cmd.into_iter().map(Into::into).collect(),
15 | cmd_ready_condition: CmdWaitFor::Nothing,
16 | container_ready_conditions: vec![],
17 | }
18 | }
19 |
20 | /// Conditions to be checked on related container
21 | pub fn with_container_ready_conditions(mut self, ready_conditions: Vec
) -> Self {
22 | self.container_ready_conditions = ready_conditions;
23 | self
24 | }
25 |
26 | /// Conditions to be checked on executed command output
27 | pub fn with_cmd_ready_condition(mut self, ready_conditions: impl Into) -> Self {
28 | self.cmd_ready_condition = ready_conditions.into();
29 | self
30 | }
31 | }
32 |
33 | impl Default for ExecCommand {
34 | fn default() -> Self {
35 | Self::new(Vec::::new())
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/testcontainers/src/core/logs.rs:
--------------------------------------------------------------------------------
1 | use std::{borrow::Cow, fmt, io};
2 |
3 | use bytes::Bytes;
4 | use futures::{stream::BoxStream, StreamExt};
5 | use memchr::memmem::Finder;
6 |
7 | pub mod consumer;
8 | pub(crate) mod stream;
9 |
10 | #[derive(Debug, Clone)]
11 | pub enum LogFrame {
12 | StdOut(Bytes),
13 | StdErr(Bytes),
14 | }
15 |
16 | /// Defines error cases when waiting for a message in a stream.
17 | #[derive(Debug, thiserror::Error)]
18 | pub enum WaitLogError {
19 | /// Indicates the stream ended before finding the log line you were looking for.
20 | /// Contains all the lines that were read for debugging purposes.
21 | #[error("End of stream reached before finding message: {:?}", display_bytes(.0))]
22 | EndOfStream(Vec),
23 | #[error(transparent)]
24 | Io(#[from] io::Error),
25 | }
26 |
27 | #[derive(Copy, Clone, Debug, parse_display::Display)]
28 | #[display(style = "lowercase")]
29 | pub enum LogSource {
30 | StdOut,
31 | StdErr,
32 | BothStd,
33 | }
34 |
35 | impl LogSource {
36 | pub(super) fn includes_stdout(self) -> bool {
37 | matches!(self, Self::StdOut | Self::BothStd)
38 | }
39 |
40 | pub(super) fn includes_stderr(self) -> bool {
41 | matches!(self, Self::StdErr | Self::BothStd)
42 | }
43 | }
44 |
45 | impl LogFrame {
46 | pub fn source(&self) -> LogSource {
47 | match self {
48 | LogFrame::StdOut(_) => LogSource::StdOut,
49 | LogFrame::StdErr(_) => LogSource::StdErr,
50 | }
51 | }
52 |
53 | pub fn bytes(&self) -> &Bytes {
54 | match self {
55 | LogFrame::StdOut(bytes) => bytes,
56 | LogFrame::StdErr(bytes) => bytes,
57 | }
58 | }
59 | }
60 |
61 | // TODO: extract caching functionality to a separate wrapper
62 | pub(crate) struct WaitingStreamWrapper {
63 | inner: BoxStream<'static, Result>,
64 | cache: Vec>,
65 | enable_cache: bool,
66 | }
67 |
68 | impl WaitingStreamWrapper {
69 | pub fn new(stream: BoxStream<'static, Result>) -> Self {
70 | Self {
71 | inner: stream,
72 | cache: vec![],
73 | enable_cache: false,
74 | }
75 | }
76 |
77 | pub fn enable_cache(mut self) -> Self {
78 | self.enable_cache = true;
79 | self
80 | }
81 |
82 | pub(crate) async fn wait_for_message(
83 | &mut self,
84 | message: impl AsRef<[u8]>,
85 | times: usize,
86 | ) -> Result<(), WaitLogError> {
87 | let msg_finder = Finder::new(message.as_ref());
88 | let mut messages = Vec::new();
89 | let mut found_times: usize = 0;
90 | while let Some(message) = self.inner.next().await.transpose()? {
91 | messages.push(message.clone());
92 | if self.enable_cache {
93 | self.cache.push(Ok(message.clone()));
94 | }
95 |
96 | let find_iter = msg_finder.find_iter(message.as_ref());
97 | for _ in find_iter {
98 | found_times += 1; // can't overflow, because of check below
99 | if found_times == times {
100 | log::debug!(
101 | "Message found {times} times after comparing {} lines",
102 | messages.len()
103 | );
104 | return Ok(());
105 | }
106 | }
107 | }
108 |
109 | log::warn!(
110 | "Failed to find message '{}' {times} times after comparing {} lines.",
111 | String::from_utf8_lossy(message.as_ref()),
112 | messages.len()
113 | );
114 | Err(WaitLogError::EndOfStream(messages))
115 | }
116 |
117 | pub(crate) fn into_inner(self) -> BoxStream<'static, Result> {
118 | futures::stream::iter(self.cache).chain(self.inner).boxed()
119 | }
120 | }
121 |
122 | fn display_bytes(bytes: &[Bytes]) -> Vec> {
123 | bytes
124 | .iter()
125 | .map(|m| String::from_utf8_lossy(m.as_ref()))
126 | .collect::>()
127 | }
128 |
129 | impl fmt::Debug for WaitingStreamWrapper {
130 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
131 | f.debug_struct("LogStreamAsync").finish()
132 | }
133 | }
134 |
135 | #[cfg(test)]
136 | mod tests {
137 | use super::*;
138 |
139 | #[tokio::test]
140 | async fn given_logs_when_line_contains_message_should_find_it() {
141 | let _ = pretty_env_logger::try_init();
142 | let log_stream = || {
143 | WaitingStreamWrapper::new(Box::pin(futures::stream::iter([
144 | Ok(r"
145 | Message one
146 | Message two
147 | Message three
148 | Message three
149 | "
150 | .into()),
151 | Ok("Message three".into()),
152 | ])))
153 | };
154 |
155 | let result = log_stream().wait_for_message("Message one", 1).await;
156 | assert!(result.is_ok());
157 |
158 | let result = log_stream().wait_for_message("Message two", 2).await;
159 | assert!(result.is_err());
160 |
161 | let result = log_stream().wait_for_message("Message three", 1).await;
162 | assert!(result.is_ok());
163 |
164 | let result = log_stream().wait_for_message("Message three", 3).await;
165 | assert!(result.is_ok());
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/testcontainers/src/core/logs/consumer.rs:
--------------------------------------------------------------------------------
1 | use futures::{future::BoxFuture, FutureExt};
2 |
3 | use crate::core::logs::LogFrame;
4 |
5 | pub mod logging_consumer;
6 |
7 | /// Log consumer is a trait that allows to consume log frames.
8 | /// Consumers will be called for each log frame that is produced by the container for the whole lifecycle of the container.
9 | pub trait LogConsumer: Send + Sync {
10 | fn accept<'a>(&'a self, record: &'a LogFrame) -> BoxFuture<'a, ()>;
11 | }
12 |
13 | impl LogConsumer for F
14 | where
15 | F: Fn(&LogFrame) + Send + Sync,
16 | {
17 | fn accept<'a>(&'a self, record: &'a LogFrame) -> BoxFuture<'a, ()> {
18 | // preferably to spawn blocking task
19 | async move {
20 | self(record);
21 | }
22 | .boxed()
23 | }
24 | }
25 |
--------------------------------------------------------------------------------
/testcontainers/src/core/logs/consumer/logging_consumer.rs:
--------------------------------------------------------------------------------
1 | use std::borrow::Cow;
2 |
3 | use futures::{future::BoxFuture, FutureExt};
4 |
5 | use crate::core::logs::{consumer::LogConsumer, LogFrame};
6 |
7 | /// A consumer that logs the output of container with the [`log`] crate.
8 | ///
9 | /// By default, both standard out and standard error will both be emitted at INFO level.
10 | #[derive(Debug)]
11 | pub struct LoggingConsumer {
12 | stdout_level: log::Level,
13 | stderr_level: log::Level,
14 | prefix: Option,
15 | }
16 |
17 | impl LoggingConsumer {
18 | /// Creates a new instance of the logging consumer.
19 | pub fn new() -> Self {
20 | Self {
21 | stdout_level: log::Level::Info,
22 | stderr_level: log::Level::Info,
23 | prefix: None,
24 | }
25 | }
26 |
27 | /// Sets the log level for standard out. By default, this is `INFO`.
28 | pub fn with_stdout_level(mut self, level: log::Level) -> Self {
29 | self.stdout_level = level;
30 | self
31 | }
32 |
33 | /// Sets the log level for standard error. By default, this is `INFO`.
34 | pub fn with_stderr_level(mut self, level: log::Level) -> Self {
35 | self.stderr_level = level;
36 | self
37 | }
38 |
39 | /// Sets a prefix to be added to each log message (space will be added between prefix and message).
40 | pub fn with_prefix(mut self, prefix: impl Into) -> Self {
41 | self.prefix = Some(prefix.into());
42 | self
43 | }
44 |
45 | fn format_message<'a>(&self, message: &'a str) -> Cow<'a, str> {
46 | // Remove trailing newlines
47 | let message = message.trim_end_matches(['\n', '\r']);
48 |
49 | if let Some(prefix) = &self.prefix {
50 | Cow::Owned(format!("{prefix} {message}"))
51 | } else {
52 | Cow::Borrowed(message)
53 | }
54 | }
55 | }
56 |
57 | impl Default for LoggingConsumer {
58 | fn default() -> Self {
59 | Self::new()
60 | }
61 | }
62 |
63 | impl LogConsumer for LoggingConsumer {
64 | fn accept<'a>(&'a self, record: &'a LogFrame) -> BoxFuture<'a, ()> {
65 | async move {
66 | match record {
67 | LogFrame::StdOut(bytes) => {
68 | log::log!(
69 | self.stdout_level,
70 | "{}",
71 | self.format_message(&String::from_utf8_lossy(bytes))
72 | );
73 | }
74 | LogFrame::StdErr(bytes) => {
75 | log::log!(
76 | self.stderr_level,
77 | "{}",
78 | self.format_message(&String::from_utf8_lossy(bytes))
79 | );
80 | }
81 | }
82 | }
83 | .boxed()
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/testcontainers/src/core/logs/stream.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | io,
3 | pin::Pin,
4 | sync::Arc,
5 | task::{Context, Poll},
6 | };
7 |
8 | use bytes::Bytes;
9 | use futures::{stream::BoxStream, Stream, StreamExt, TryStreamExt};
10 | use tokio_stream::wrappers::UnboundedReceiverStream;
11 |
12 | use crate::core::logs::LogFrame;
13 |
14 | pub(crate) type RawLogStream = BoxStream<'static, Result>;
15 |
16 | pin_project_lite::pin_project! {
17 | pub(crate) struct LogStream {
18 | #[pin]
19 | inner: BoxStream<'static, Result>,
20 | }
21 | }
22 |
23 | impl LogStream {
24 | pub fn new(stream: BoxStream<'static, Result>) -> Self {
25 | Self { inner: stream }
26 | }
27 |
28 | /// Filters the log stream to only include stdout messages.
29 | pub(crate) fn into_stdout(self) -> RawLogStream {
30 | self.inner
31 | .filter_map(|record| async move {
32 | match record {
33 | Ok(LogFrame::StdOut(bytes)) => Some(Ok(bytes)),
34 | Ok(LogFrame::StdErr(_)) => None,
35 | Err(e) => Some(Err(e)),
36 | }
37 | })
38 | .boxed()
39 | }
40 |
41 | /// Filters the log stream to only include stderr messages.
42 | pub(crate) fn into_stderr(self) -> RawLogStream {
43 | self.inner
44 | .filter_map(|record| async move {
45 | match record {
46 | Ok(LogFrame::StdErr(bytes)) => Some(Ok(bytes)),
47 | Ok(LogFrame::StdOut(_)) => None,
48 | Err(e) => Some(Err(e)),
49 | }
50 | })
51 | .boxed()
52 | }
53 |
54 | /// Log stream with messages from bith stdout and stderr.
55 | pub(crate) fn into_both_std(self) -> RawLogStream {
56 | self.inner
57 | .map_ok(|frame| match frame {
58 | LogFrame::StdErr(bytes) => bytes,
59 | LogFrame::StdOut(bytes) => bytes,
60 | })
61 | .boxed()
62 | }
63 |
64 | /// Splits the log stream into two streams, one for stdout and one for stderr.
65 | pub(crate) async fn split(self) -> (RawLogStream, RawLogStream) {
66 | let (stdout_tx, stdout_rx) = tokio::sync::mpsc::unbounded_channel();
67 | let (stderr_tx, stderr_rx) = tokio::sync::mpsc::unbounded_channel();
68 |
69 | tokio::spawn(async move {
70 | macro_rules! handle_error {
71 | ($res:expr) => {
72 | if let Err(err) = $res {
73 | log::debug!(
74 | "Receiver has been dropped, stop producing messages: {}",
75 | err
76 | );
77 | break;
78 | }
79 | };
80 | }
81 | let mut output = self;
82 | while let Some(chunk) = output.next().await {
83 | match chunk {
84 | Ok(LogFrame::StdOut(message)) => {
85 | handle_error!(stdout_tx.send(Ok(message)));
86 | }
87 | Ok(LogFrame::StdErr(message)) => {
88 | handle_error!(stderr_tx.send(Ok(message)));
89 | }
90 | Err(err) => {
91 | let err = Arc::new(err);
92 | handle_error!(stdout_tx.send(Err(io::Error::other(err.clone()))));
93 | handle_error!(stderr_tx.send(Err(io::Error::other(err))));
94 | }
95 | }
96 | }
97 | });
98 |
99 | let stdout = UnboundedReceiverStream::new(stdout_rx).boxed();
100 | let stderr = UnboundedReceiverStream::new(stderr_rx).boxed();
101 | (stdout, stderr)
102 | }
103 | }
104 |
105 | impl Stream for LogStream {
106 | type Item = Result;
107 |
108 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
109 | let this = self.project();
110 | this.inner.poll_next(cx)
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/testcontainers/src/core/mounts.rs:
--------------------------------------------------------------------------------
1 | /// Represents a filesystem mount.
2 | /// For more information see [Docker Storage](https://docs.docker.com/storage/) documentation.
3 | #[derive(Debug, Clone)]
4 | pub struct Mount {
5 | access_mode: AccessMode,
6 | mount_type: MountType,
7 | source: Option,
8 | target: Option,
9 | }
10 |
11 | #[derive(parse_display::Display, Debug, Copy, Clone)]
12 | #[display(style = "snake_case")]
13 | pub enum MountType {
14 | Bind,
15 | Volume,
16 | Tmpfs,
17 | }
18 |
19 | #[derive(parse_display::Display, Debug, Copy, Clone)]
20 | pub enum AccessMode {
21 | #[display("ro")]
22 | ReadOnly,
23 | #[display("rw")]
24 | ReadWrite,
25 | }
26 |
27 | impl Mount {
28 | /// Creates a `bind-mount`.
29 | /// Can be used to mount a file or directory on the host system into a container.
30 | ///
31 | /// See [bind-mounts documentation](https://docs.docker.com/storage/bind-mounts/) for more information.
32 | pub fn bind_mount(host_path: impl Into, container_path: impl Into) -> Self {
33 | Self {
34 | access_mode: AccessMode::ReadWrite,
35 | mount_type: MountType::Bind,
36 | source: Some(host_path.into()),
37 | target: Some(container_path.into()),
38 | }
39 | }
40 |
41 | /// Creates a named `volume`.
42 | /// Can be used to share data between containers or persist data on the host system.
43 | /// The volume isn't removed when the container is removed.
44 | ///
45 | /// See [volumes documentation](https://docs.docker.com/storage/volumes/) for more information.
46 | pub fn volume_mount(name: impl Into, container_path: impl Into) -> Self {
47 | Self {
48 | access_mode: AccessMode::ReadWrite,
49 | mount_type: MountType::Volume,
50 | source: Some(name.into()),
51 | target: Some(container_path.into()),
52 | }
53 | }
54 |
55 | /// Creates a `tmpfs` mount.
56 | /// Can be used to mount a temporary filesystem in the container's memory.
57 | /// `tmpfs` mount is removed when the container is removed.
58 | ///
59 | /// See [tmpfs documentation](https://docs.docker.com/storage/tmpfs/) for more information.
60 | pub fn tmpfs_mount(container_path: impl Into) -> Self {
61 | Self {
62 | access_mode: AccessMode::ReadWrite,
63 | mount_type: MountType::Tmpfs,
64 | source: None,
65 | target: Some(container_path.into()),
66 | }
67 | }
68 |
69 | /// Sets the access mode for the mount.
70 | /// Default is `AccessMode::ReadWrite`.
71 | pub fn with_access_mode(mut self, access_mode: AccessMode) -> Self {
72 | self.access_mode = access_mode;
73 | self
74 | }
75 |
76 | /// Docker mount access mode.
77 | pub fn access_mode(&self) -> AccessMode {
78 | self.access_mode
79 | }
80 |
81 | /// Docker mount type.
82 | pub fn mount_type(&self) -> MountType {
83 | self.mount_type
84 | }
85 |
86 | /// Absolute path of a file, a directory or volume to mount on the host system.
87 | pub fn source(&self) -> Option<&str> {
88 | self.source.as_deref()
89 | }
90 |
91 | /// Absolute path of a file or directory to mount in the container.
92 | pub fn target(&self) -> Option<&str> {
93 | self.target.as_deref()
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/testcontainers/src/core/network.rs:
--------------------------------------------------------------------------------
1 | use std::{
2 | collections::HashMap,
3 | fmt,
4 | sync::{Arc, OnceLock, Weak},
5 | };
6 |
7 | use tokio::sync::Mutex;
8 |
9 | use crate::core::{
10 | async_drop,
11 | client::{Client, ClientError},
12 | env,
13 | };
14 |
15 | pub(crate) static CREATED_NETWORKS: OnceLock>>> =
16 | OnceLock::new();
17 |
18 | fn created_networks() -> &'static Mutex>> {
19 | CREATED_NETWORKS.get_or_init(|| Mutex::new(HashMap::new()))
20 | }
21 |
22 | pub(crate) struct Network {
23 | name: String,
24 | id: String,
25 | client: Arc,
26 | }
27 |
28 | impl Network {
29 | pub(crate) async fn new(
30 | name: impl Into,
31 | client: Arc,
32 | ) -> Result>, ClientError> {
33 | let name = name.into();
34 | let mut guard = created_networks().lock().await;
35 | let network = if let Some(network) = guard.get(&name).and_then(Weak::upgrade) {
36 | network
37 | } else {
38 | if client.network_exists(&name).await? {
39 | // Networks already exists and created outside the testcontainers
40 | return Ok(None);
41 | }
42 |
43 | let id = client.create_network(&name).await?;
44 |
45 | let created = Arc::new(Self {
46 | name: name.clone(),
47 | id,
48 | client,
49 | });
50 |
51 | guard.insert(name, Arc::downgrade(&created));
52 |
53 | created
54 | };
55 |
56 | Ok(Some(network))
57 | }
58 | }
59 |
60 | impl Drop for Network {
61 | fn drop(&mut self) {
62 | if self.client.config.command() == env::Command::Remove {
63 | let client = self.client.clone();
64 | let name = self.name.clone();
65 |
66 | let drop_task = async move {
67 | log::trace!("Drop was called for network {name}, cleaning up");
68 | let mut guard = created_networks().lock().await;
69 |
70 | // check the strong count under the lock to avoid any possible race-conditions.
71 | let is_network_in_use = guard
72 | .get(&name)
73 | .filter(|weak| weak.strong_count() > 0)
74 | .is_some();
75 |
76 | if is_network_in_use {
77 | log::trace!("Network {name} was not dropped because it is still in use");
78 | } else {
79 | guard.remove(&name);
80 | match client.remove_network(&name).await {
81 | Ok(_) => {
82 | log::trace!("Network {name} was successfully dropped");
83 | }
84 | Err(_) => {
85 | log::error!("Failed to remove network {name} on drop");
86 | }
87 | }
88 | }
89 | };
90 |
91 | async_drop::async_drop(drop_task);
92 | }
93 | }
94 | }
95 |
96 | impl fmt::Debug for Network {
97 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
98 | f.debug_struct("Network")
99 | .field("id", &self.id)
100 | .field("name", &self.name)
101 | .finish()
102 | }
103 | }
104 |
--------------------------------------------------------------------------------
/testcontainers/src/core/wait/cmd_wait.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | use bytes::Bytes;
4 |
5 | #[derive(Debug, Eq, PartialEq, Clone)]
6 | pub enum CmdWaitFor {
7 | /// An empty condition. Useful for default cases or fallbacks.
8 | Nothing,
9 | /// Wait for a message on the stdout stream of the command's output.
10 | StdOutMessage { message: Bytes },
11 | /// Wait for a message on the stderr stream of the command's output.
12 | StdErrMessage { message: Bytes },
13 | /// Wait for a certain amount of time.
14 | Duration { length: Duration },
15 | /// Wait for the command to exit and optionally check the exit code.
16 | Exit { code: Option },
17 | }
18 |
19 | impl CmdWaitFor {
20 | /// Wait for a message on the stdout stream of the command's output.
21 | pub fn message_on_stdout(message: impl AsRef<[u8]>) -> Self {
22 | Self::StdOutMessage {
23 | message: Bytes::from(message.as_ref().to_vec()),
24 | }
25 | }
26 |
27 | /// Wait for a message on the stderr stream of the command's output.
28 | pub fn message_on_stderr(message: impl AsRef<[u8]>) -> Self {
29 | Self::StdErrMessage {
30 | message: Bytes::from(message.as_ref().to_vec()),
31 | }
32 | }
33 |
34 | /// Wait for the command to exit (regardless of the exit code).
35 | pub fn exit() -> Self {
36 | Self::Exit { code: None }
37 | }
38 |
39 | /// Wait for the command's exit code to be equal to the provided one.
40 | pub fn exit_code(code: i64) -> Self {
41 | Self::Exit { code: Some(code) }
42 | }
43 |
44 | /// Wait for a certain amount of time.
45 | pub fn duration(duration: Duration) -> Self {
46 | Self::Duration { length: duration }
47 | }
48 |
49 | /// Wait for a certain amount of time (in seconds).
50 | pub fn seconds(secs: u64) -> Self {
51 | Self::duration(Duration::from_secs(secs))
52 | }
53 |
54 | /// Wait for a certain amount of time (in millis)
55 | pub fn millis(millis: u64) -> Self {
56 | Self::duration(Duration::from_millis(millis))
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/testcontainers/src/core/wait/exit_strategy.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | use crate::{
4 | core::{client::Client, error::WaitContainerError, wait::WaitStrategy},
5 | ContainerAsync, Image,
6 | };
7 |
8 | #[derive(Debug, Clone)]
9 | pub struct ExitWaitStrategy {
10 | expected_code: Option,
11 | poll_interval: Duration,
12 | }
13 |
14 | impl ExitWaitStrategy {
15 | /// Create a new `ExitWaitStrategy` with default settings.
16 | pub fn new() -> Self {
17 | Self {
18 | expected_code: None,
19 | poll_interval: Duration::from_millis(100),
20 | }
21 | }
22 |
23 | /// Set the poll interval for checking the container's status.
24 | pub fn with_poll_interval(mut self, poll_interval: Duration) -> Self {
25 | self.poll_interval = poll_interval;
26 | self
27 | }
28 |
29 | /// Set the expected exit code of the container.
30 | pub fn with_exit_code(mut self, expected_code: i64) -> Self {
31 | self.expected_code = Some(expected_code);
32 | self
33 | }
34 | }
35 |
36 | impl WaitStrategy for ExitWaitStrategy {
37 | async fn wait_until_ready(
38 | self,
39 | client: &Client,
40 | container: &ContainerAsync,
41 | ) -> crate::core::error::Result<()> {
42 | loop {
43 | let container_state = client
44 | .inspect(container.id())
45 | .await?
46 | .state
47 | .ok_or(WaitContainerError::StateUnavailable)?;
48 |
49 | let is_running = container_state.running.unwrap_or_default();
50 |
51 | if is_running {
52 | tokio::time::sleep(self.poll_interval).await;
53 | continue;
54 | }
55 |
56 | if let Some(expected_code) = self.expected_code {
57 | let exit_code = container_state.exit_code;
58 | if exit_code != Some(expected_code) {
59 | return Err(WaitContainerError::UnexpectedExitCode {
60 | expected: expected_code,
61 | actual: exit_code,
62 | }
63 | .into());
64 | }
65 | }
66 | break;
67 | }
68 | Ok(())
69 | }
70 | }
71 |
72 | impl Default for ExitWaitStrategy {
73 | fn default() -> Self {
74 | Self::new()
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/testcontainers/src/core/wait/health_strategy.rs:
--------------------------------------------------------------------------------
1 | use std::time::Duration;
2 |
3 | use bollard::models::HealthStatusEnum::*;
4 |
5 | use crate::{
6 | core::{client::Client, error::WaitContainerError, wait::WaitStrategy},
7 | ContainerAsync, Image,
8 | };
9 |
10 | #[derive(Debug, Clone)]
11 | pub struct HealthWaitStrategy {
12 | poll_interval: Duration,
13 | }
14 |
15 | impl HealthWaitStrategy {
16 | /// Create a new `HealthWaitStrategy` with default settings.
17 | pub fn new() -> Self {
18 | Self {
19 | poll_interval: Duration::from_millis(100),
20 | }
21 | }
22 |
23 | /// Set the poll interval for checking the container's health status.
24 | pub fn with_poll_interval(mut self, poll_interval: Duration) -> Self {
25 | self.poll_interval = poll_interval;
26 | self
27 | }
28 | }
29 |
30 | impl WaitStrategy for HealthWaitStrategy {
31 | async fn wait_until_ready(
32 | self,
33 | client: &Client,
34 | container: &ContainerAsync,
35 | ) -> crate::core::error::Result<()> {
36 | loop {
37 | let health_status = client
38 | .inspect(container.id())
39 | .await?
40 | .state
41 | .ok_or(WaitContainerError::StateUnavailable)?
42 | .health
43 | .and_then(|health| health.status);
44 |
45 | match health_status {
46 | Some(HEALTHY) => break,
47 | None | Some(EMPTY) | Some(NONE) => Err(
48 | WaitContainerError::HealthCheckNotConfigured(container.id().to_string()),
49 | )?,
50 | Some(UNHEALTHY) => Err(WaitContainerError::Unhealthy)?,
51 | Some(STARTING) => {
52 | tokio::time::sleep(self.poll_interval).await;
53 | }
54 | }
55 | }
56 | Ok(())
57 | }
58 | }
59 |
60 | impl Default for HealthWaitStrategy {
61 | fn default() -> Self {
62 | Self::new()
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/testcontainers/src/core/wait/log_strategy.rs:
--------------------------------------------------------------------------------
1 | use bytes::Bytes;
2 |
3 | use crate::{
4 | core::{
5 | client::Client,
6 | error::WaitContainerError,
7 | logs::{LogSource, WaitingStreamWrapper},
8 | wait::WaitStrategy,
9 | },
10 | ContainerAsync, Image,
11 | };
12 |
13 | #[derive(Debug, Clone)]
14 | pub struct LogWaitStrategy {
15 | source: LogSource,
16 | message: Bytes,
17 | times: usize,
18 | }
19 |
20 | impl LogWaitStrategy {
21 | /// Create a new [`LogWaitStrategy`] that waits for the given message to appear in the standard output logs.
22 | /// Shortcut for `LogWaitStrategy::new(LogSource::StdOut, message)`.
23 | pub fn stdout(message: impl AsRef<[u8]>) -> Self {
24 | Self::new(LogSource::StdOut, message)
25 | }
26 |
27 | /// Create a new [`LogWaitStrategy`] that waits for the given message to appear in the standard error logs.
28 | /// Shortcut for `LogWaitStrategy::new(LogSource::StdErr, message)`.
29 | pub fn stderr(message: impl AsRef<[u8]>) -> Self {
30 | Self::new(LogSource::StdErr, message)
31 | }
32 |
33 | /// Create a new [`LogWaitStrategy`] that waits for the given message to appear in either
34 | /// standard output logs or standard error logs.
35 | /// Shortcut for `LogWaitStrategy::new(LogSource::BothStd, message)`.
36 | pub fn stdout_or_stderr(message: impl AsRef<[u8]>) -> Self {
37 | Self::new(LogSource::BothStd, message)
38 | }
39 |
40 | /// Create a new `LogWaitStrategy` with the given log source and message.
41 | /// The message is expected to appear in the logs exactly once by default.
42 | pub fn new(source: LogSource, message: impl AsRef<[u8]>) -> Self {
43 | Self {
44 | source,
45 | message: Bytes::from(message.as_ref().to_vec()),
46 | times: 1,
47 | }
48 | }
49 |
50 | /// Set the number of times the message should appear in the logs.
51 | pub fn with_times(mut self, times: usize) -> Self {
52 | self.times = times;
53 | self
54 | }
55 | }
56 |
57 | impl WaitStrategy for LogWaitStrategy {
58 | async fn wait_until_ready(
59 | self,
60 | client: &Client,
61 | container: &ContainerAsync,
62 | ) -> crate::core::error::Result<()> {
63 | let log_stream = match self.source {
64 | LogSource::StdOut => client.stdout_logs(container.id(), true),
65 | LogSource::StdErr => client.stderr_logs(container.id(), true),
66 | LogSource::BothStd => client.both_std_logs(container.id(), true),
67 | };
68 |
69 | WaitingStreamWrapper::new(log_stream)
70 | .wait_for_message(self.message, self.times)
71 | .await
72 | .map_err(WaitContainerError::from)?;
73 |
74 | Ok(())
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/testcontainers/src/core/wait/mod.rs:
--------------------------------------------------------------------------------
1 | use std::{env::var, fmt::Debug, time::Duration};
2 |
3 | pub use exit_strategy::ExitWaitStrategy;
4 | pub use health_strategy::HealthWaitStrategy;
5 | #[cfg(feature = "http_wait")]
6 | #[cfg_attr(docsrs, doc(cfg(feature = "http_wait")))]
7 | pub use http_strategy::HttpWaitStrategy;
8 | pub use log_strategy::LogWaitStrategy;
9 |
10 | use crate::{
11 | core::{client::Client, logs::LogSource},
12 | ContainerAsync, Image,
13 | };
14 |
15 | pub(crate) mod cmd_wait;
16 | pub(crate) mod exit_strategy;
17 | pub(crate) mod health_strategy;
18 | #[cfg(feature = "http_wait")]
19 | pub(crate) mod http_strategy;
20 | pub(crate) mod log_strategy;
21 |
22 | pub(crate) trait WaitStrategy {
23 | async fn wait_until_ready(
24 | self,
25 | client: &Client,
26 | container: &ContainerAsync,
27 | ) -> crate::core::error::Result<()>;
28 | }
29 |
30 | /// Represents a condition that needs to be met before a container is considered ready.
31 | #[derive(Debug, Clone)]
32 | pub enum WaitFor {
33 | /// An empty condition. Useful for default cases or fallbacks.
34 | Nothing,
35 | /// Wait for a certain message to appear in the container's logs.
36 | Log(LogWaitStrategy),
37 | /// Wait for a certain amount of time.
38 | Duration { length: Duration },
39 | /// Wait for the container's status to become `healthy`.
40 | Healthcheck(HealthWaitStrategy),
41 | /// Wait for a certain HTTP response.
42 | #[cfg(feature = "http_wait")]
43 | #[cfg_attr(docsrs, doc(cfg(feature = "http_wait")))]
44 | Http(Box),
45 | /// Wait for the container to exit.
46 | Exit(ExitWaitStrategy),
47 | }
48 |
49 | impl WaitFor {
50 | /// Wait for the message to appear on the container's stdout.
51 | pub fn message_on_stdout(message: impl AsRef<[u8]>) -> WaitFor {
52 | Self::log(LogWaitStrategy::new(LogSource::StdOut, message))
53 | }
54 |
55 | /// Wait for the message to appear on the container's stderr.
56 | pub fn message_on_stderr(message: impl AsRef<[u8]>) -> WaitFor {
57 | Self::log(LogWaitStrategy::new(LogSource::StdErr, message))
58 | }
59 |
60 | /// Wait for the message to appear on either container's stdout or stderr.
61 | pub fn message_on_either_std(message: impl AsRef<[u8]>) -> WaitFor {
62 | Self::log(LogWaitStrategy::new(LogSource::BothStd, message))
63 | }
64 |
65 | /// Wait for the message to appear on the container's stdout.
66 | pub fn log(log_strategy: LogWaitStrategy) -> WaitFor {
67 | WaitFor::Log(log_strategy)
68 | }
69 |
70 | /// Wait for the container to become healthy.
71 | ///
72 | /// If you need to customize polling interval, use [`HealthWaitStrategy::with_poll_interval`]
73 | /// and create the strategy [`WaitFor::Healthcheck`] manually.
74 | pub fn healthcheck() -> WaitFor {
75 | WaitFor::Healthcheck(HealthWaitStrategy::default())
76 | }
77 |
78 | /// Wait for a certain HTTP response.
79 | #[cfg(feature = "http_wait")]
80 | #[cfg_attr(docsrs, doc(cfg(feature = "http_wait")))]
81 | pub fn http(http_strategy: HttpWaitStrategy) -> WaitFor {
82 | WaitFor::Http(Box::new(http_strategy))
83 | }
84 |
85 | /// Wait for the container to exit.
86 | pub fn exit(exit_strategy: ExitWaitStrategy) -> WaitFor {
87 | WaitFor::Exit(exit_strategy)
88 | }
89 |
90 | /// Wait for a certain amount of seconds.
91 | ///
92 | /// Generally, it's not recommended to use this method, as it's better to wait for a specific condition to be met.
93 | pub fn seconds(length: u64) -> WaitFor {
94 | WaitFor::Duration {
95 | length: Duration::from_secs(length),
96 | }
97 | }
98 |
99 | /// Wait for a certain amount of millis.
100 | ///
101 | /// Generally, it's not recommended to use this method, as it's better to wait for a specific condition to be met.
102 | pub fn millis(length: u64) -> WaitFor {
103 | WaitFor::Duration {
104 | length: Duration::from_millis(length),
105 | }
106 | }
107 |
108 | /// Wait for a certain amount of millis specified in the environment variable.
109 | ///
110 | /// Generally, it's not recommended to use this method, as it's better to wait for a specific condition to be met.
111 | pub fn millis_in_env_var(name: &'static str) -> WaitFor {
112 | let additional_sleep_period = var(name).map(|value| value.parse());
113 |
114 | (|| {
115 | let length = additional_sleep_period.ok()?.ok()?;
116 |
117 | Some(WaitFor::Duration {
118 | length: Duration::from_millis(length),
119 | })
120 | })()
121 | .unwrap_or(WaitFor::Nothing)
122 | }
123 | }
124 |
125 | #[cfg(feature = "http_wait")]
126 | #[cfg_attr(docsrs, doc(cfg(feature = "http_wait")))]
127 | impl From for WaitFor {
128 | fn from(value: HttpWaitStrategy) -> Self {
129 | Self::Http(Box::new(value))
130 | }
131 | }
132 |
133 | impl WaitStrategy for WaitFor {
134 | async fn wait_until_ready(
135 | self,
136 | client: &Client,
137 | container: &ContainerAsync,
138 | ) -> crate::core::error::Result<()> {
139 | match self {
140 | WaitFor::Log(strategy) => strategy.wait_until_ready(client, container).await?,
141 | WaitFor::Duration { length } => {
142 | tokio::time::sleep(length).await;
143 | }
144 | WaitFor::Healthcheck(strategy) => {
145 | strategy.wait_until_ready(client, container).await?;
146 | }
147 | #[cfg(feature = "http_wait")]
148 | WaitFor::Http(strategy) => {
149 | strategy.wait_until_ready(client, container).await?;
150 | }
151 | WaitFor::Exit(strategy) => {
152 | strategy.wait_until_ready(client, container).await?;
153 | }
154 | WaitFor::Nothing => {}
155 | }
156 | Ok(())
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/testcontainers/src/images/generic.rs:
--------------------------------------------------------------------------------
1 | use crate::{
2 | core::{ports::ContainerPort, WaitFor},
3 | Image,
4 | };
5 |
6 | /// A configurable image from which a [`Container`] or [`ContainerAsync`] can be started.
7 | ///
8 | /// The various methods on this struct allow for configuring the resulting container using the
9 | /// builder pattern. Further configuration is available through the [`ImageExt`] extension trait.
10 | /// Make sure to invoke the configuration methods on [`GenericImage`] first, before those from
11 | /// [`ImageExt`].
12 | ///
13 | /// For example:
14 | ///
15 | /// ```
16 | /// use testcontainers::{
17 | /// core::{IntoContainerPort, WaitFor}, runners::AsyncRunner, GenericImage, ImageExt
18 | /// };
19 | ///
20 | /// # /*
21 | /// #[tokio::test]
22 | /// # */
23 | /// async fn test_redis() {
24 | /// let container = GenericImage::new("redis", "7.2.4")
25 | /// .with_exposed_port(6379.tcp())
26 | /// .with_wait_for(WaitFor::message_on_stdout("Ready to accept connections"))
27 | /// .with_network("bridge")
28 | /// .with_env_var("DEBUG", "1")
29 | /// .start()
30 | /// .await
31 | /// .expect("Redis started");
32 | /// # container.stop().await.unwrap();
33 | /// }
34 | /// # let rt = tokio::runtime::Runtime::new().unwrap();
35 | /// # rt.block_on(test_redis());
36 | /// ```
37 | ///
38 | /// The extension traits [`SyncRunner`] and [`AsyncRunner`] each provide the method `start()` to
39 | /// start the container once it is configured.
40 | ///
41 | /// [`Container`]: crate::Container
42 | /// [`ContainerAsync`]: crate::ContainerAsync
43 | /// [`ImageExt`]: crate::core::ImageExt
44 | /// [`SyncRunner`]: crate::runners::SyncRunner
45 | /// [`AsyncRunner`]: crate::runners::AsyncRunner
46 | #[must_use]
47 | #[derive(Debug, Clone)]
48 | pub struct GenericImage {
49 | name: String,
50 | tag: String,
51 | wait_for: Vec,
52 | entrypoint: Option,
53 | exposed_ports: Vec,
54 | }
55 |
56 | impl GenericImage {
57 | pub fn new>(name: S, tag: S) -> GenericImage {
58 | Self {
59 | name: name.into(),
60 | tag: tag.into(),
61 | wait_for: Vec::new(),
62 | entrypoint: None,
63 | exposed_ports: Vec::new(),
64 | }
65 | }
66 |
67 | pub fn with_wait_for(mut self, wait_for: WaitFor) -> Self {
68 | self.wait_for.push(wait_for);
69 | self
70 | }
71 |
72 | pub fn with_entrypoint(mut self, entrypoint: &str) -> Self {
73 | self.entrypoint = Some(entrypoint.to_string());
74 | self
75 | }
76 |
77 | pub fn with_exposed_port(mut self, port: ContainerPort) -> Self {
78 | self.exposed_ports.push(port);
79 | self
80 | }
81 | }
82 |
83 | impl Image for GenericImage {
84 | fn name(&self) -> &str {
85 | &self.name
86 | }
87 |
88 | fn tag(&self) -> &str {
89 | &self.tag
90 | }
91 |
92 | fn ready_conditions(&self) -> Vec {
93 | self.wait_for.clone()
94 | }
95 |
96 | fn entrypoint(&self) -> Option<&str> {
97 | self.entrypoint.as_deref()
98 | }
99 |
100 | fn expose_ports(&self) -> &[ContainerPort] {
101 | &self.exposed_ports
102 | }
103 | }
104 |
105 | #[cfg(test)]
106 | mod tests {
107 | use super::*;
108 | use crate::ImageExt;
109 |
110 | #[test]
111 | fn should_return_env_vars() {
112 | let image = GenericImage::new("hello-world", "latest")
113 | .with_env_var("one-key", "one-value")
114 | .with_env_var("two-key", "two-value");
115 |
116 | let mut env_vars = image.env_vars();
117 | let (first_key, first_value) = env_vars.next().unwrap();
118 | let (second_key, second_value) = env_vars.next().unwrap();
119 |
120 | assert_eq!(first_key, "one-key");
121 | assert_eq!(first_value, "one-value");
122 | assert_eq!(second_key, "two-key");
123 | assert_eq!(second_value, "two-value");
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/testcontainers/src/images/mod.rs:
--------------------------------------------------------------------------------
1 | pub mod generic;
2 |
--------------------------------------------------------------------------------
/testcontainers/src/lib.rs:
--------------------------------------------------------------------------------
1 | #![deny(missing_debug_implementations)]
2 | #![warn(rust_2018_idioms)]
3 | #![cfg_attr(docsrs, deny(rustdoc::broken_intra_doc_links))]
4 | #![cfg_attr(docsrs, feature(doc_cfg))]
5 | #![forbid(unsafe_code)]
6 |
7 | //! A library for integration testing against docker containers from within Rust.
8 | //!
9 | //! This crate is the official Rust language fork of [`Testcontainers`][tc_website].
10 | //!
11 | //! Tests should be self-contained and isolated. While this is usually easy for unit-tests, integration-tests typically require a more complex environment.
12 | //! The testcontainers ecosystem facilitates self-contained and isolated integration tests. It allows to easily spin up Docker containers from within your tests and removes them afterwards.
13 | //!
14 | //! A very typical usecase for testcontainers are integration-tests of persistence layers. These require an actual database to be present. Using testcontainers, your tests can spin up database containers themselves, without the need for any other setup.
15 | //!
16 | //! # Main benefits
17 | //!
18 | //! - Run integration tests in parallel (because each test sets up its own environment)
19 | //! - Run integration tests the same way you run unit tests (`cargo test` and you are fine)
20 | //!
21 | //! # Usage
22 | //!
23 | //! Unsurprisingly, working with testcontainers is very similar to working with Docker itself.
24 | //!
25 | //! First, you need to define the [`Image`] that you want to run, and then simply call the `start` method on it from either the [`AsyncRunner`] or [`SyncRunner`] trait.
26 | //! This will return you [`ContainerAsync`] or [`Container`] respectively.
27 | //! Containers implement `Drop`. As soon as they go out of scope, the underlying docker container is removed.
28 | //! To disable this behavior, you can set ENV variable `TESTCONTAINERS_COMMAND` to `keep`.
29 | //!
30 | //! See examples in the corresponding runner ([`AsyncRunner`] and [`SyncRunner`])
31 | //!
32 | //! ### Docker host resolution
33 | //!
34 | //! You can change the configuration of the Docker host used by the client in two ways:
35 | //! - environment variables
36 | //! - `~/.testcontainers.properties` file (a Java properties file, enabled by the `properties-config` feature)
37 | //!
38 | //! ##### The host is resolved in the following order:
39 | //!
40 | //! 1. Docker host from the `tc.host` property in the `~/.testcontainers.properties` file.
41 | //! 2. `DOCKER_HOST` environment variable.
42 | //! 3. Docker host from the "docker.host" property in the `~/.testcontainers.properties` file.
43 | //! 4. Read the default Docker socket path, without the unix schema. E.g. `/var/run/docker.sock`.
44 | //! 5. Read the rootless Docker socket path, checking in the following alternative locations:
45 | //! 1. `${XDG_RUNTIME_DIR}/.docker/run/docker.sock`.
46 | //! 2. `${HOME}/.docker/run/docker.sock`.
47 | //! 3. `${HOME}/.docker/desktop/docker.sock`.
48 | //! 6. The default Docker socket including schema will be returned if none of the above are set.
49 | //!
50 | //! ### Docker authentication
51 | //!
52 | //! Sometimes the Docker images you use live in a private Docker registry.
53 | //! For that reason, Testcontainers for Rust gives you the ability to read the Docker configuration and retrieve the authentication for a given registry.
54 | //! Configuration is fetched in the following order:
55 | //!
56 | //! 1. `DOCKER_AUTH_CONFIG` environment variable, unmarshalling the string value from its JSON representation and using it as the Docker config.
57 | //! 2. `DOCKER_CONFIG` environment variable, as an alternative path to the directory containing Docker `config.json` file.
58 | //! 3. else it will load the default Docker config file, which lives in the user's home, e.g. `~/.docker/config.json`.
59 | //!
60 | //! # Ecosystem
61 | //!
62 | //! `testcontainers` is the core crate that provides an API for working with containers in a test environment.
63 | //! The only image that is provided by the core crate is the [`GenericImage`], which is a simple wrapper around any docker image.
64 | //!
65 | //! However, it does not provide ready-to-use modules, you can implement your [`Image`]s using the library directly or use community supported [`testcontainers-modules`].
66 | //!
67 | //! # Usage in production code
68 | //!
69 | //! Although nothing inherently prevents testcontainers from being used in production code, the library itself was not designed with that in mind.
70 | //!
71 | //! [tc_website]: https://testcontainers.org
72 | //! [`Docker`]: https://docker.com
73 | //! [`AsyncRunner`]: runners::AsyncRunner
74 | //! [`SyncRunner`]: runners::SyncRunner
75 | //! [`testcontainers-modules`]: https://crates.io/crates/testcontainers-modules
76 |
77 | pub mod core;
78 | #[cfg(feature = "blocking")]
79 | #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))]
80 | pub use crate::core::Container;
81 | #[cfg(feature = "reusable-containers")]
82 | pub use crate::core::ReuseDirective;
83 | pub use crate::core::{
84 | copy::{CopyDataSource, CopyToContainer, CopyToContainerError},
85 | error::TestcontainersError,
86 | ContainerAsync, ContainerRequest, Image, ImageExt,
87 | };
88 |
89 | #[cfg(feature = "watchdog")]
90 | #[cfg_attr(docsrs, doc(cfg(feature = "watchdog")))]
91 | pub(crate) mod watchdog;
92 |
93 | /// All available Docker images.
94 | mod images;
95 | pub use images::generic::GenericImage;
96 |
97 | pub mod runners;
98 |
99 | /// Re-export of the `bollard` crate to allow direct interaction with the Docker API.
100 | /// This also solves potential version conflicts between `testcontainers` and `bollard` deps.
101 | pub use bollard;
102 | pub use bollard_stubs;
103 |
--------------------------------------------------------------------------------
/testcontainers/src/runners/mod.rs:
--------------------------------------------------------------------------------
1 | pub(crate) mod async_runner;
2 | #[cfg(feature = "blocking")]
3 | pub(crate) mod sync_runner;
4 |
5 | pub use self::async_runner::AsyncRunner;
6 | #[cfg(feature = "blocking")]
7 | #[cfg_attr(docsrs, doc(cfg(feature = "blocking")))]
8 | pub use self::sync_runner::SyncRunner;
9 |
--------------------------------------------------------------------------------
/testcontainers/src/watchdog.rs:
--------------------------------------------------------------------------------
1 | //! Watchdog that stops and removes containers on SIGTERM, SIGINT, or SIGQUIT
2 | //!
3 | //! By default, the watchdog is disabled. To enable it, enable the `watchdog` feature.
4 | //! Note that it works in background thread and may panic.
5 |
6 | use std::{collections::BTreeSet, sync::Mutex, thread};
7 |
8 | use conquer_once::Lazy;
9 | use signal_hook::{
10 | consts::{SIGINT, SIGQUIT, SIGTERM},
11 | iterator::Signals,
12 | };
13 |
14 | use crate::core::client::Client;
15 |
16 | static WATCHDOG: Lazy> = Lazy::new(|| {
17 | thread::spawn(move || {
18 | let runtime = tokio::runtime::Builder::new_current_thread()
19 | .enable_all()
20 | .build()
21 | .expect("failed to start watchdog runtime in background");
22 |
23 | runtime.block_on(async {
24 | let signal_docker = Client::lazy_client()
25 | .await
26 | .expect("failed to create docker client");
27 | let mut signals = Signals::new([SIGTERM, SIGINT, SIGQUIT])
28 | .expect("failed to register signal handler");
29 |
30 | for signal in &mut signals {
31 | for container_id in WATCHDOG
32 | .lock()
33 | .map(|s| s.containers.clone())
34 | .unwrap_or_default()
35 | {
36 | signal_docker
37 | .stop(&container_id, None)
38 | .await
39 | .expect("failed to stop container");
40 | signal_docker
41 | .rm(&container_id)
42 | .await
43 | .expect("failed to remove container")
44 | }
45 |
46 | let _ = signal_hook::low_level::emulate_default_handler(signal);
47 | }
48 | });
49 | });
50 |
51 | Mutex::new(Watchdog::default())
52 | });
53 |
54 | #[derive(Default)]
55 | pub(crate) struct Watchdog {
56 | containers: BTreeSet,
57 | }
58 |
59 | /// Register a container for observation
60 | pub(crate) fn register(container_id: String) {
61 | WATCHDOG
62 | .lock()
63 | .expect("failed to access watchdog")
64 | .containers
65 | .insert(container_id);
66 | }
67 | /// Unregisters a container for observation
68 | pub(crate) fn unregister(container_id: &str) {
69 | WATCHDOG
70 | .lock()
71 | .expect("failed to access watchdog")
72 | .containers
73 | .remove(container_id);
74 | }
75 |
--------------------------------------------------------------------------------
/testcontainers/tests/async_runner.rs:
--------------------------------------------------------------------------------
1 | use std::time::{Duration, Instant};
2 |
3 | use bollard::Docker;
4 | use testcontainers::{
5 | core::{
6 | logs::{consumer::logging_consumer::LoggingConsumer, LogFrame},
7 | wait::{ExitWaitStrategy, LogWaitStrategy},
8 | CmdWaitFor, ExecCommand, WaitFor,
9 | },
10 | runners::AsyncRunner,
11 | GenericImage, Image, ImageExt,
12 | };
13 | use tokio::io::AsyncReadExt;
14 |
15 | #[derive(Debug, Default)]
16 | pub struct HelloWorld;
17 |
18 | impl Image for HelloWorld {
19 | fn name(&self) -> &str {
20 | "hello-world"
21 | }
22 |
23 | fn tag(&self) -> &str {
24 | "latest"
25 | }
26 |
27 | fn ready_conditions(&self) -> Vec {
28 | vec![
29 | WaitFor::message_on_stdout("Hello from Docker!"),
30 | WaitFor::exit(ExitWaitStrategy::new().with_exit_code(0)),
31 | ]
32 | }
33 | }
34 |
35 | #[tokio::test(flavor = "multi_thread")]
36 | async fn bollard_can_run_hello_world_with_multi_thread() -> anyhow::Result<()> {
37 | let _ = pretty_env_logger::try_init();
38 |
39 | let _container = HelloWorld.start().await?;
40 | Ok(())
41 | }
42 |
43 | async fn cleanup_hello_world_image() -> anyhow::Result<()> {
44 | let docker = Docker::connect_with_local_defaults()?;
45 |
46 | futures::future::join_all(
47 | docker
48 | .list_images::(None)
49 | .await?
50 | .into_iter()
51 | .flat_map(|image| image.repo_tags.into_iter())
52 | .filter(|tag| tag.starts_with("hello-world"))
53 | .map(|tag| async {
54 | let tag_captured = tag;
55 | docker.remove_image(&tag_captured, None, None).await
56 | }),
57 | )
58 | .await;
59 | Ok(())
60 | }
61 |
62 | #[tokio::test]
63 | async fn bollard_pull_missing_image_hello_world() -> anyhow::Result<()> {
64 | let _ = pretty_env_logger::try_init();
65 | cleanup_hello_world_image().await?;
66 | let _container = HelloWorld.start().await?;
67 | Ok(())
68 | }
69 |
70 | #[tokio::test]
71 | async fn explicit_call_to_pull_missing_image_hello_world() -> anyhow::Result<()> {
72 | let _ = pretty_env_logger::try_init();
73 | cleanup_hello_world_image().await?;
74 | let _container = HelloWorld.pull_image().await?.start().await?;
75 | Ok(())
76 | }
77 |
78 | #[tokio::test]
79 | async fn start_containers_in_parallel() -> anyhow::Result<()> {
80 | let _ = pretty_env_logger::try_init();
81 |
82 | let image = GenericImage::new("hello-world", "latest").with_wait_for(WaitFor::seconds(2));
83 |
84 | // Make sure the image is already pulled, since otherwise pulling it may cause the deadline
85 | // below to be exceeded.
86 | let _ = image.clone().pull_image().await?;
87 |
88 | let run_1 = image.clone().start();
89 | let run_2 = image.clone().start();
90 | let run_3 = image.clone().start();
91 | let run_4 = image.start();
92 |
93 | let run_all = futures::future::join_all(vec![run_1, run_2, run_3, run_4]);
94 |
95 | // if we truly run all containers in parallel, we should finish < 5 sec
96 | // actually, we should be finishing in 2 seconds but that is too unstable
97 | // a sequential start would mean 8 seconds, hence 5 seconds proves some form of parallelism
98 | let timeout = Duration::from_secs(5);
99 | let _containers = tokio::time::timeout(timeout, run_all).await?;
100 | Ok(())
101 | }
102 |
103 | #[tokio::test]
104 | async fn async_run_exec() -> anyhow::Result<()> {
105 | let _ = pretty_env_logger::try_init();
106 |
107 | let image = GenericImage::new("simple_web_server", "latest")
108 | .with_wait_for(WaitFor::message_on_stderr("server will be listening to"))
109 | .with_wait_for(WaitFor::log(
110 | LogWaitStrategy::stdout("server is ready").with_times(2),
111 | ))
112 | .with_wait_for(WaitFor::seconds(1));
113 | let container = image.start().await?;
114 |
115 | // exit regardless of the code
116 | let before = Instant::now();
117 | let res = container
118 | .exec(ExecCommand::new(["sleep", "2"]).with_cmd_ready_condition(CmdWaitFor::exit()))
119 | .await?;
120 | assert_eq!(res.exit_code().await?, Some(0));
121 | assert!(
122 | before.elapsed().as_secs() > 1,
123 | "should have waited for 2 seconds"
124 | );
125 |
126 | // exit code, it waits for result
127 | let before = Instant::now();
128 | let res = container
129 | .exec(ExecCommand::new(["sleep", "2"]).with_cmd_ready_condition(CmdWaitFor::exit_code(0)))
130 | .await?;
131 | assert_eq!(res.exit_code().await?, Some(0));
132 | assert!(
133 | before.elapsed().as_secs() > 1,
134 | "should have waited for 2 seconds"
135 | );
136 |
137 | // stdout
138 | let mut res = container
139 | .exec(
140 | ExecCommand::new(["ls"]).with_cmd_ready_condition(CmdWaitFor::message_on_stdout("foo")),
141 | )
142 | .await?;
143 |
144 | let stdout = String::from_utf8(res.stdout_to_vec().await?)?;
145 | assert!(stdout.contains("foo"), "stdout must contain 'foo'");
146 |
147 | // stdout and stderr readers
148 | let mut res = container
149 | .exec(ExecCommand::new([
150 | "/bin/bash",
151 | "-c",
152 | "echo 'stdout 1' >&1 && echo 'stderr 1' >&2 \
153 | && echo 'stderr 2' >&2 && echo 'stdout 2' >&1",
154 | ]))
155 | .await?;
156 |
157 | let mut stdout = String::new();
158 | res.stdout().read_to_string(&mut stdout).await?;
159 | assert_eq!(stdout, "stdout 1\nstdout 2\n");
160 |
161 | let mut stderr = String::new();
162 | res.stderr().read_to_string(&mut stderr).await?;
163 | assert_eq!(stderr, "stderr 1\nstderr 2\n");
164 | Ok(())
165 | }
166 |
167 | #[cfg(feature = "http_wait")]
168 | #[tokio::test]
169 | async fn async_wait_for_http() -> anyhow::Result<()> {
170 | use reqwest::StatusCode;
171 | use testcontainers::core::{wait::HttpWaitStrategy, IntoContainerPort};
172 |
173 | let _ = pretty_env_logger::try_init();
174 |
175 | let image = GenericImage::new("simple_web_server", "latest")
176 | .with_exposed_port(80.tcp())
177 | .with_wait_for(WaitFor::http(
178 | HttpWaitStrategy::new("/").with_expected_status_code(StatusCode::OK),
179 | ));
180 | let _container = image.start().await?;
181 | Ok(())
182 | }
183 |
184 | #[tokio::test]
185 | async fn async_run_exec_fails_due_to_unexpected_code() -> anyhow::Result<()> {
186 | let _ = pretty_env_logger::try_init();
187 |
188 | let image = GenericImage::new("simple_web_server", "latest")
189 | .with_wait_for(WaitFor::message_on_stdout("server is ready"))
190 | .with_wait_for(WaitFor::seconds(1));
191 | let container = image.start().await?;
192 |
193 | // exit code, it waits for result
194 | let res = container
195 | .exec(
196 | ExecCommand::new(vec!["ls".to_string()])
197 | .with_cmd_ready_condition(CmdWaitFor::exit_code(-1)),
198 | )
199 | .await;
200 | assert!(res.is_err());
201 | Ok(())
202 | }
203 |
204 | #[tokio::test]
205 | async fn async_run_with_log_consumer() -> anyhow::Result<()> {
206 | let _ = pretty_env_logger::try_init();
207 |
208 | let (tx, rx) = std::sync::mpsc::sync_channel(1);
209 | let _container = HelloWorld
210 | .with_log_consumer(move |frame: &LogFrame| {
211 | // notify when the expected message is found
212 | if String::from_utf8_lossy(frame.bytes()) == "Hello from Docker!\n" {
213 | let _ = tx.send(());
214 | }
215 | })
216 | .with_log_consumer(LoggingConsumer::new().with_stderr_level(log::Level::Error))
217 | .start()
218 | .await?;
219 | rx.recv()?; // notification from consumer
220 | Ok(())
221 | }
222 |
223 | #[tokio::test]
224 | async fn async_copy_bytes_to_container() -> anyhow::Result<()> {
225 | let container = GenericImage::new("alpine", "latest")
226 | .with_wait_for(WaitFor::seconds(2))
227 | .with_copy_to("/tmp/somefile", "foobar".to_string().into_bytes())
228 | .with_cmd(vec!["cat", "/tmp/somefile"])
229 | .start()
230 | .await?;
231 |
232 | let mut out = String::new();
233 | container.stdout(false).read_to_string(&mut out).await?;
234 |
235 | assert!(out.contains("foobar"));
236 |
237 | Ok(())
238 | }
239 |
240 | #[tokio::test]
241 | async fn async_copy_files_to_container() -> anyhow::Result<()> {
242 | let temp_dir = temp_dir::TempDir::new()?;
243 | let f1 = temp_dir.child("foo.txt");
244 |
245 | let sub_dir = temp_dir.child("subdir");
246 | std::fs::create_dir(&sub_dir)?;
247 | let mut f2 = sub_dir.clone();
248 | f2.push("bar.txt");
249 |
250 | std::fs::write(&f1, "foofoofoo")?;
251 | std::fs::write(&f2, "barbarbar")?;
252 |
253 | let container = GenericImage::new("alpine", "latest")
254 | .with_wait_for(WaitFor::seconds(2))
255 | .with_copy_to("/tmp/somefile", f1)
256 | .with_copy_to("/", temp_dir.path())
257 | .with_cmd(vec!["cat", "/tmp/somefile", "&&", "cat", "/subdir/bar.txt"])
258 | .start()
259 | .await?;
260 |
261 | let mut out = String::new();
262 | container.stdout(false).read_to_string(&mut out).await?;
263 |
264 | println!("{}", out);
265 | assert!(out.contains("foofoofoo"));
266 | assert!(out.contains("barbarbar"));
267 |
268 | Ok(())
269 | }
270 |
--------------------------------------------------------------------------------
/testcontainers/tests/dual_stack_host_ports.rs:
--------------------------------------------------------------------------------
1 | #![cfg(feature = "blocking")]
2 |
3 | use std::net::{Ipv6Addr, TcpListener};
4 |
5 | use testcontainers::{
6 | core::{IntoContainerPort, WaitFor},
7 | runners::SyncRunner,
8 | GenericImage,
9 | };
10 |
11 | /// Test the functionality of exposing container ports over both IPv4 and IPv6.
12 | #[test]
13 | fn test_ipv4_ipv6_host_ports() -> anyhow::Result<()> {
14 | let _ = pretty_env_logger::try_init();
15 |
16 | let image = GenericImage::new("simple_web_server", "latest")
17 | .with_wait_for(WaitFor::message_on_stdout("server is ready"))
18 | .with_wait_for(WaitFor::seconds(1));
19 |
20 | // Run one container, and check what ephemeral ports it uses. Perform test HTTP requests to
21 | // both bound ports.
22 | let first_container = image.clone().start()?;
23 | let first_ipv4_port = first_container.get_host_port_ipv4(80.tcp())?;
24 | let first_ipv6_port = first_container.get_host_port_ipv6(80.tcp())?;
25 | assert_eq!(
26 | "foo",
27 | reqwest::blocking::get(format!("http://127.0.0.1:{first_ipv4_port}"))?.text()?,
28 | );
29 | assert_eq!(
30 | "foo",
31 | reqwest::blocking::get(format!("http://[::1]:{first_ipv6_port}"))?.text()?,
32 | );
33 |
34 | // Bind to several subsequent ports in the ephemeral range, only on IPv6. This should cause
35 | // Docker's IPv4 and IPv6 port allocation to no longer be in lock step, (if they were before)
36 | // as the IPv6 allocator would have to skip the ports we grabbed.
37 | let mut sockets = Vec::new();
38 | for port in first_ipv6_port + 1..first_ipv6_port + 9 {
39 | if let Ok(socket) = TcpListener::bind((Ipv6Addr::LOCALHOST, port)) {
40 | sockets.push(socket);
41 | }
42 | }
43 |
44 | // Run a second container, and repeat test HTTP requests with it. This confirms that handling
45 | // of both IPv4 and IPv6 host port bindings is correct, because at this point,
46 | // `second_ipv4_port` and `second_ipv6_port` are very unlikely to be the same.
47 | let second_container = image.start()?;
48 | let second_ipv4_port = second_container.get_host_port_ipv4(80.tcp())?;
49 | let second_ipv6_port = second_container.get_host_port_ipv6(80.tcp())?;
50 | assert_eq!(
51 | "foo",
52 | reqwest::blocking::get(format!("http://127.0.0.1:{second_ipv4_port}"))?.text()?,
53 | );
54 | assert_eq!(
55 | "foo",
56 | reqwest::blocking::get(format!("http://[::1]:{second_ipv6_port}"))?.text()?,
57 | );
58 | Ok(())
59 | }
60 |
--------------------------------------------------------------------------------
/testcontainers/tests/sync_runner.rs:
--------------------------------------------------------------------------------
1 | #![cfg(feature = "blocking")]
2 |
3 | use std::time::Instant;
4 |
5 | use testcontainers::{
6 | core::{
7 | logs::{consumer::logging_consumer::LoggingConsumer, LogFrame},
8 | wait::LogWaitStrategy,
9 | CmdWaitFor, ExecCommand, Host, IntoContainerPort, WaitFor,
10 | },
11 | runners::SyncRunner,
12 | *,
13 | };
14 |
15 | fn get_server_container(msg: Option) -> GenericImage {
16 | let msg = msg.unwrap_or(WaitFor::message_on_stdout("server is ready"));
17 | GenericImage::new("simple_web_server", "latest").with_wait_for(msg)
18 | }
19 |
20 | #[derive(Debug, Default)]
21 | pub struct HelloWorld;
22 |
23 | impl Image for HelloWorld {
24 | fn name(&self) -> &str {
25 | "hello-world"
26 | }
27 |
28 | fn tag(&self) -> &str {
29 | "latest"
30 | }
31 |
32 | fn ready_conditions(&self) -> Vec {
33 | vec![WaitFor::message_on_stdout("Hello from Docker!")]
34 | }
35 | }
36 |
37 | #[test]
38 | fn sync_can_run_hello_world() -> anyhow::Result<()> {
39 | let _ = pretty_env_logger::try_init();
40 | let _container = HelloWorld.start()?;
41 | Ok(())
42 | }
43 |
44 | #[cfg(feature = "http_wait")]
45 | #[test]
46 | fn sync_wait_for_http() -> anyhow::Result<()> {
47 | use crate::core::wait::HttpWaitStrategy;
48 |
49 | let _ = pretty_env_logger::try_init();
50 | use reqwest::StatusCode;
51 |
52 | let image = GenericImage::new("simple_web_server", "latest")
53 | .with_exposed_port(80.tcp())
54 | .with_wait_for(WaitFor::http(
55 | HttpWaitStrategy::new("/").with_expected_status_code(StatusCode::OK),
56 | ));
57 | let _container = image.start()?;
58 | Ok(())
59 | }
60 |
61 | #[test]
62 | fn generic_image_with_custom_entrypoint() -> anyhow::Result<()> {
63 | let generic = get_server_container(None);
64 |
65 | let node = generic.start()?;
66 | let port = node.get_host_port_ipv4(80.tcp())?;
67 | assert_eq!(
68 | "foo",
69 | reqwest::blocking::get(format!("http://{}:{port}", node.get_host()?))?.text()?
70 | );
71 |
72 | let generic = get_server_container(None).with_entrypoint("./bar");
73 |
74 | let node = generic.start()?;
75 | let port = node.get_host_port_ipv4(80.tcp())?;
76 | assert_eq!(
77 | "bar",
78 | reqwest::blocking::get(format!("http://{}:{port}", node.get_host()?))?.text()?
79 | );
80 | Ok(())
81 | }
82 |
83 | #[test]
84 | fn generic_image_exposed_ports() -> anyhow::Result<()> {
85 | let _ = pretty_env_logger::try_init();
86 |
87 | let target_port = 8080;
88 |
89 | // This server does not EXPOSE ports in its image.
90 | let generic_server = GenericImage::new("no_expose_port", "latest")
91 | .with_wait_for(WaitFor::message_on_stdout("listening on 0.0.0.0:8080"))
92 | // Explicitly expose the port, which otherwise would not be available.
93 | .with_exposed_port(target_port.tcp());
94 |
95 | let node = generic_server.start()?;
96 | let port = node.get_host_port_ipv4(target_port.tcp())?;
97 | assert!(reqwest::blocking::get(format!("http://127.0.0.1:{port}"))?
98 | .status()
99 | .is_success());
100 | Ok(())
101 | }
102 |
103 | #[test]
104 | fn generic_image_running_with_extra_hosts_added() -> anyhow::Result<()> {
105 | let server_1 = get_server_container(None);
106 | let node = server_1.start()?;
107 | let port = node.get_host_port_ipv4(80.tcp())?;
108 |
109 | let msg = WaitFor::message_on_stdout("foo");
110 | let server_2 = GenericImage::new("curlimages/curl", "latest")
111 | .with_wait_for(msg)
112 | .with_entrypoint("curl");
113 |
114 | // Override hosts for server_2 adding
115 | // custom-host as an alias for localhost
116 | let server_2 = server_2
117 | .with_cmd([format!("http://custom-host:{port}")])
118 | .with_host("custom-host", Host::HostGateway);
119 |
120 | server_2.start()?;
121 | Ok(())
122 | }
123 |
124 | #[test]
125 | fn generic_image_port_not_exposed() -> anyhow::Result<()> {
126 | let _ = pretty_env_logger::try_init();
127 |
128 | let target_port = 8080;
129 |
130 | // This image binds to 0.0.0.0:8080, does not EXPOSE ports in its dockerfile.
131 | let generic_server = GenericImage::new("no_expose_port", "latest")
132 | .with_wait_for(WaitFor::message_on_stdout("listening on 0.0.0.0:8080"));
133 | let node = generic_server.start()?;
134 |
135 | // Without exposing the port with `with_exposed_port()`, we cannot get a mapping to it.
136 | let res = node.get_host_port_ipv4(target_port.tcp());
137 | assert!(res.is_err());
138 | Ok(())
139 | }
140 |
141 | #[test]
142 | fn start_multiple_containers() -> anyhow::Result<()> {
143 | let _ = pretty_env_logger::try_init();
144 |
145 | let image = GenericImage::new("hello-world", "latest").with_wait_for(WaitFor::seconds(2));
146 |
147 | let _container_1 = image.clone().start()?;
148 | let _container_2 = image.clone().start()?;
149 | let _container_3 = image.start()?;
150 | Ok(())
151 | }
152 |
153 | #[test]
154 | fn sync_run_exec() -> anyhow::Result<()> {
155 | let _ = pretty_env_logger::try_init();
156 |
157 | let image = GenericImage::new("simple_web_server", "latest")
158 | .with_wait_for(WaitFor::log(
159 | LogWaitStrategy::stdout("server is ready").with_times(2),
160 | ))
161 | .with_wait_for(WaitFor::seconds(1));
162 | let container = image.start()?;
163 |
164 | // exit regardless of the code
165 | let before = Instant::now();
166 | let res = container
167 | .exec(ExecCommand::new(["sleep", "2"]).with_cmd_ready_condition(CmdWaitFor::exit()))?;
168 | assert_eq!(res.exit_code()?, Some(0));
169 | assert!(
170 | before.elapsed().as_secs() > 1,
171 | "should have waited for 2 seconds"
172 | );
173 |
174 | // exit code, it waits for result
175 | let before = Instant::now();
176 | let res = container.exec(
177 | ExecCommand::new(["sleep", "2"]).with_cmd_ready_condition(CmdWaitFor::exit_code(0)),
178 | )?;
179 | assert_eq!(res.exit_code()?, Some(0));
180 | assert!(
181 | before.elapsed().as_secs() > 1,
182 | "should have waited for 2 seconds"
183 | );
184 |
185 | // stdout
186 | let mut res = container.exec(
187 | ExecCommand::new(vec!["ls".to_string()])
188 | .with_cmd_ready_condition(CmdWaitFor::message_on_stdout("foo")),
189 | )?;
190 | let stdout = String::from_utf8(res.stdout_to_vec()?)?;
191 | assert!(stdout.contains("foo"), "stdout must contain 'foo'");
192 |
193 | // stdout and stderr to vec
194 | let mut res = container.exec(ExecCommand::new([
195 | "/bin/bash",
196 | "-c",
197 | "echo 'stdout 1' >&1 && echo 'stderr 1' >&2 \
198 | && echo 'stderr 2' >&2 && echo 'stdout 2' >&1",
199 | ]))?;
200 |
201 | let stdout = String::from_utf8(res.stdout_to_vec()?)?;
202 | assert_eq!(stdout, "stdout 1\nstdout 2\n");
203 |
204 | let stderr = String::from_utf8(res.stderr_to_vec()?)?;
205 | assert_eq!(stderr, "stderr 1\nstderr 2\n");
206 |
207 | // stdout and stderr readers
208 | let mut res = container.exec(ExecCommand::new([
209 | "/bin/bash",
210 | "-c",
211 | "echo 'stdout 1' >&1 && echo 'stderr 1' >&2 \
212 | && echo 'stderr 2' >&2 && echo 'stdout 2' >&1",
213 | ]))?;
214 |
215 | let mut stdout = String::new();
216 | res.stdout().read_to_string(&mut stdout)?;
217 | assert_eq!(stdout, "stdout 1\nstdout 2\n");
218 |
219 | let mut stderr = String::new();
220 | res.stderr().read_to_string(&mut stderr)?;
221 | assert_eq!(stderr, "stderr 1\nstderr 2\n");
222 | Ok(())
223 | }
224 |
225 | #[test]
226 | fn sync_run_with_log_consumer() -> anyhow::Result<()> {
227 | let _ = pretty_env_logger::try_init();
228 |
229 | let (tx, rx) = std::sync::mpsc::sync_channel(1);
230 | let _container = HelloWorld
231 | .with_log_consumer(move |frame: &LogFrame| {
232 | // notify when the expected message is found
233 | if String::from_utf8_lossy(frame.bytes()) == "Hello from Docker!\n" {
234 | let _ = tx.send(());
235 | }
236 | })
237 | .with_log_consumer(LoggingConsumer::new().with_stderr_level(log::Level::Error))
238 | .start()?;
239 | rx.recv()?; // notification from consumer
240 | Ok(())
241 | }
242 |
243 | #[test]
244 | fn sync_copy_bytes_to_container() -> anyhow::Result<()> {
245 | let _ = pretty_env_logger::try_init();
246 |
247 | let container = GenericImage::new("alpine", "latest")
248 | .with_wait_for(WaitFor::seconds(2))
249 | .with_copy_to("/tmp/somefile", "foobar".to_string().into_bytes())
250 | .with_cmd(vec!["cat", "/tmp/somefile"])
251 | .start()?;
252 |
253 | let mut out = String::new();
254 | container.stdout(false).read_to_string(&mut out)?;
255 |
256 | assert!(out.contains("foobar"));
257 |
258 | Ok(())
259 | }
260 |
261 | #[test]
262 | fn sync_copy_files_to_container() -> anyhow::Result<()> {
263 | let temp_dir = temp_dir::TempDir::new()?;
264 | let f1 = temp_dir.child("foo.txt");
265 |
266 | let sub_dir = temp_dir.child("subdir");
267 | std::fs::create_dir(&sub_dir)?;
268 | let mut f2 = sub_dir.clone();
269 | f2.push("bar.txt");
270 |
271 | std::fs::write(&f1, "foofoofoo")?;
272 | std::fs::write(&f2, "barbarbar")?;
273 |
274 | let container = GenericImage::new("alpine", "latest")
275 | .with_wait_for(WaitFor::seconds(2))
276 | .with_copy_to("/tmp/somefile", f1)
277 | .with_copy_to("/", temp_dir.path())
278 | .with_cmd(vec!["cat", "/tmp/somefile", "&&", "cat", "/subdir/bar.txt"])
279 | .start()?;
280 |
281 | let mut out = String::new();
282 | container.stdout(false).read_to_string(&mut out)?;
283 |
284 | println!("{}", out);
285 | assert!(out.contains("foofoofoo"));
286 | assert!(out.contains("barbarbar"));
287 |
288 | Ok(())
289 | }
290 |
--------------------------------------------------------------------------------
/testimages/.dockerignore:
--------------------------------------------------------------------------------
1 | .dockerignore
2 | build.rs
3 | README.md
4 | src/dockerfiles/
5 | target/
6 | tests/
7 |
--------------------------------------------------------------------------------
/testimages/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "testimages"
3 | version = "0.1.0"
4 | edition = "2021"
5 | publish = false
6 |
7 | [dependencies]
8 | anyhow = "1.0.56"
9 | axum = { version = "0.8.1", features = ["http2"] }
10 | tokio = { version = "1.17.0", features = ["full"] }
11 |
12 | [build-dependencies]
13 | anyhow = "1.0.56"
14 |
--------------------------------------------------------------------------------
/testimages/README.md:
--------------------------------------------------------------------------------
1 | # testimages
2 |
3 | This crate allows us to co-locate the source of the docker images used in tests with the
4 | testcontainers-rs library itself. This allows us to create small, lightweight images that test the
5 | specific scenarios we need and avoids unnecessary external dependencies.
6 |
7 | ## Adding New Images
8 |
9 | Images are implemented in Rust. New images can be added by:
10 |
11 | 1. Creating a new binary in `src/bin/`
12 | 2. Creating a corresponding dockerfile in `src/dockerfiles/`. Ideally, these dockerfiles should
13 | build small, minimal images.
14 | 3. Finally, a new docker command should be added to `build.rs` to actually build the new image.
15 |
16 | See the `no_expose_port` image as an example.
17 |
--------------------------------------------------------------------------------
/testimages/build.rs:
--------------------------------------------------------------------------------
1 | use std::{env, process::Command};
2 |
3 | use anyhow::{bail, Result};
4 |
5 | fn main() -> Result<()> {
6 | let cwd = env::var("CARGO_MANIFEST_DIR")?;
7 |
8 | // Build the test images in the repository
9 | let output = Command::new("docker")
10 | .arg("build")
11 | .arg("--file")
12 | .arg(format!("{cwd}/src/dockerfiles/no_expose_port.dockerfile"))
13 | .arg("--force-rm")
14 | .arg("--tag")
15 | .arg("no_expose_port:latest")
16 | .arg(".")
17 | .output()?;
18 | if !output.status.success() {
19 | eprintln!("stderr: {}", String::from_utf8(output.stderr)?);
20 | bail!("unable to build no_expose_port:latest");
21 | }
22 | eprintln!("Built no_expose_port:latest");
23 |
24 | let output = Command::new("docker")
25 | .arg("build")
26 | .arg("--file")
27 | .arg(format!(
28 | "{cwd}/src/dockerfiles/simple_web_server.dockerfile"
29 | ))
30 | .arg("--force-rm")
31 | .arg("--tag")
32 | .arg("simple_web_server:latest")
33 | .arg(".")
34 | .output()?;
35 | if !output.status.success() {
36 | eprintln!("stderr: {}", String::from_utf8(output.stderr)?);
37 | bail!("unable to build simple_web_server:latest");
38 | }
39 | eprintln!("Built simple_web_server:latest");
40 |
41 | // trigger recompilation when dockerfiles are modified
42 | println!("cargo:rerun-if-changed=src/dockerfiles");
43 | println!("cargo:rerun-if-changed=.dockerignore");
44 |
45 | Ok(())
46 | }
47 |
--------------------------------------------------------------------------------
/testimages/src/bin/no_expose_port.rs:
--------------------------------------------------------------------------------
1 | //! A simple hello-world server.
2 | use std::net::SocketAddr;
3 |
4 | use axum::{routing::get, Router};
5 | use tokio::signal;
6 |
7 | #[tokio::main]
8 | async fn main() {
9 | // build our application with a route
10 | let app = Router::new().route("/", get(handler));
11 |
12 | // run it
13 | let addr = SocketAddr::from(([0, 0, 0, 0], 8080));
14 | let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
15 | println!("listening on {addr}");
16 | axum::serve(listener, app.into_make_service())
17 | .with_graceful_shutdown(shutdown_signal())
18 | .await
19 | .unwrap();
20 | }
21 |
22 | async fn handler() -> &'static str {
23 | "Hello, World!"
24 | }
25 |
26 | async fn shutdown_signal() {
27 | let ctrl_c = async {
28 | signal::ctrl_c()
29 | .await
30 | .expect("failed to install Ctrl+C handler");
31 | };
32 |
33 | #[cfg(unix)]
34 | let terminate = async {
35 | signal::unix::signal(signal::unix::SignalKind::terminate())
36 | .expect("failed to install signal handler")
37 | .recv()
38 | .await;
39 | };
40 |
41 | #[cfg(not(unix))]
42 | let terminate = std::future::pending::<()>();
43 |
44 | tokio::select! {
45 | _ = ctrl_c => {},
46 | _ = terminate => {},
47 | }
48 |
49 | println!("signal received, starting graceful shutdown");
50 | }
51 |
--------------------------------------------------------------------------------
/testimages/src/bin/simple_web_server.rs:
--------------------------------------------------------------------------------
1 | //! A basic HTTP server, to test overriding a container's ENTRYPOINT.
2 | use std::{env, net::SocketAddr, path::PathBuf};
3 |
4 | use axum::{routing::get, Router};
5 | use tokio::signal;
6 |
7 | #[tokio::main]
8 | async fn main() {
9 | // build our application with a route
10 | let app = Router::new().route("/", get(handler));
11 |
12 | // run it
13 | let addr = SocketAddr::from(([0, 0, 0, 0], 80));
14 | let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
15 | eprintln!("server will be listening to the port 80");
16 | println!("server is ready");
17 | println!("server is ready"); // duplicate line to test `times` parameter of `WaitFor::Log`
18 | axum::serve(listener, app.into_make_service())
19 | .with_graceful_shutdown(shutdown_signal())
20 | .await
21 | .unwrap();
22 | }
23 |
24 | async fn handler() -> String {
25 | let argv_0: PathBuf = env::args_os().next().unwrap().into();
26 | argv_0.file_name().unwrap().to_str().unwrap().to_string()
27 | }
28 |
29 | async fn shutdown_signal() {
30 | let ctrl_c = async {
31 | signal::ctrl_c()
32 | .await
33 | .expect("failed to install Ctrl+C handler");
34 | };
35 |
36 | #[cfg(unix)]
37 | let terminate = async {
38 | signal::unix::signal(signal::unix::SignalKind::terminate())
39 | .expect("failed to install signal handler")
40 | .recv()
41 | .await;
42 | };
43 |
44 | #[cfg(not(unix))]
45 | let terminate = std::future::pending::<()>();
46 |
47 | tokio::select! {
48 | _ = ctrl_c => {},
49 | _ = terminate => {},
50 | }
51 |
52 | println!("signal received, starting graceful shutdown");
53 | }
54 |
--------------------------------------------------------------------------------
/testimages/src/dockerfiles/no_expose_port.dockerfile:
--------------------------------------------------------------------------------
1 | FROM lukemathwalker/cargo-chef:latest-rust-latest as chef
2 | WORKDIR /app
3 | RUN apt update && apt install lld clang -y
4 |
5 | FROM chef as planner
6 | COPY . .
7 | # Compute a lock-like file for our project
8 | RUN cargo chef prepare --recipe-path recipe.json
9 |
10 | FROM chef as builder
11 | COPY --from=planner /app/recipe.json recipe.json
12 | # Build our project dependencies, not our application!
13 | RUN cargo chef cook --release --recipe-path recipe.json
14 | COPY . .
15 | # Build our project
16 | RUN cargo build -v --release --bin no_expose_port
17 |
18 | FROM debian:bookworm-slim AS runtime
19 | WORKDIR /app
20 | RUN apt-get update -y \
21 | && apt-get install -y --no-install-recommends openssl \
22 | # Clean up
23 | && apt-get autoremove -y \
24 | && apt-get clean -y \
25 | && rm -rf /var/lib/apt/lists/*
26 | COPY --from=builder /app/target/release/no_expose_port no_expose_port
27 | ENTRYPOINT ["./no_expose_port"]
28 |
--------------------------------------------------------------------------------
/testimages/src/dockerfiles/simple_web_server.dockerfile:
--------------------------------------------------------------------------------
1 | FROM lukemathwalker/cargo-chef:latest-rust-latest as chef
2 | WORKDIR /app
3 | RUN apt update && apt install lld clang -y
4 |
5 | FROM chef as planner
6 | COPY . .
7 | # Compute a lock-like file for our project
8 | RUN cargo chef prepare --recipe-path recipe.json
9 |
10 | FROM chef as builder
11 | COPY --from=planner /app/recipe.json recipe.json
12 | # Build our project dependencies, not our application!
13 | RUN cargo chef cook --release --recipe-path recipe.json
14 | COPY . .
15 | # Build our project
16 | RUN cargo build -v --release --bin simple_web_server
17 |
18 | FROM debian:bookworm-slim AS runtime
19 | WORKDIR /app
20 | RUN apt-get update -y \
21 | && apt-get install -y --no-install-recommends openssl \
22 | # Clean up
23 | && apt-get autoremove -y \
24 | && apt-get clean -y \
25 | && rm -rf /var/lib/apt/lists/*
26 | COPY --from=builder /app/target/release/simple_web_server foo
27 | COPY --from=builder /app/target/release/simple_web_server bar
28 | EXPOSE 80
29 | ENTRYPOINT ["./foo"]
30 |
--------------------------------------------------------------------------------
/testimages/src/lib.rs:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------