├── .github
├── DOCS.md
├── codecov.yml
├── dependabot.yml
└── workflows
│ ├── check.yml
│ ├── safety.yml
│ ├── scheduled.yml
│ └── test.yml
├── .gitignore
├── Cargo.lock
├── Cargo.toml
├── README.md
├── rustfmt.toml
├── src
├── abort.rs
├── iter.rs
├── lib.rs
├── ready_to_run_queue.rs
└── task.rs
└── tests
├── chan.rs
├── iter.rs
└── tcp.rs
/.github/DOCS.md:
--------------------------------------------------------------------------------
1 | # Github config and workflows
2 |
3 | In this folder there is configuration for codecoverage, dependabot, and ci
4 | workflows that check the library more deeply than the default configurations.
5 |
6 | This folder can be or was merged using a --allow-unrelated-histories merge
7 | strategy from which provides a
8 | reasonably sensible base for writing your own ci on. By using this strategy
9 | the history of the CI repo is included in your repo, and future updates to
10 | the CI can be merged later.
11 |
12 | To perform this merge run:
13 |
14 | ```shell
15 | git remote add ci https://github.com/jonhoo/rust-ci-conf.git
16 | git fetch ci
17 | git merge --allow-unrelated-histories ci/main
18 | ```
19 |
20 | An overview of the files in this project is available at:
21 | , which contains some
22 | rationale for decisions and runs through an example of solving minimal version
23 | and OpenSSL issues.
24 |
--------------------------------------------------------------------------------
/.github/codecov.yml:
--------------------------------------------------------------------------------
1 | # ref: https://docs.codecov.com/docs/codecovyml-reference
2 | coverage:
3 | # Hold ourselves to a high bar
4 | range: 85..100
5 | round: down
6 | precision: 1
7 | status:
8 | # ref: https://docs.codecov.com/docs/commit-status
9 | project:
10 | default:
11 | # Avoid false negatives
12 | threshold: 1%
13 |
14 | # Test files aren't important for coverage
15 | ignore:
16 | - "tests"
17 |
18 | # Make comments less noisy
19 | comment:
20 | layout: "files"
21 | require_changes: true
22 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: github-actions
4 | directory: /
5 | schedule:
6 | interval: daily
7 | - package-ecosystem: cargo
8 | directory: /
9 | schedule:
10 | interval: daily
11 | ignore:
12 | - dependency-name: "*"
13 | # patch and minor updates don't matter for libraries as consumers of this library build
14 | # with their own lockfile, rather than the version specified in this library's lockfile
15 | # remove this ignore rule if your package has binaries to ensure that the binaries are
16 | # built with the exact set of dependencies and those are up to date.
17 | update-types:
18 | - "version-update:semver-patch"
19 | - "version-update:semver-minor"
20 |
--------------------------------------------------------------------------------
/.github/workflows/check.yml:
--------------------------------------------------------------------------------
1 | # This workflow runs whenever a PR is opened or updated, or a commit is pushed to main. It runs
2 | # several checks:
3 | # - fmt: checks that the code is formatted according to rustfmt
4 | # - clippy: checks that the code does not contain any clippy warnings
5 | # - doc: checks that the code can be documented without errors
6 | # - hack: check combinations of feature flags
7 | # - msrv: check that the msrv specified in the crate is correct
8 | permissions:
9 | contents: read
10 | # This configuration allows maintainers of this repo to create a branch and pull request based on
11 | # the new branch. Restricting the push trigger to the main branch ensures that the PR only gets
12 | # built once.
13 | on:
14 | push:
15 | branches: [main]
16 | pull_request:
17 | # If new code is pushed to a PR branch, then cancel in progress workflows for that PR. Ensures that
18 | # we don't waste CI time, and returns results quicker https://github.com/jonhoo/rust-ci-conf/pull/5
19 | concurrency:
20 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
21 | cancel-in-progress: true
22 | name: check
23 | jobs:
24 | fmt:
25 | runs-on: ubuntu-latest
26 | name: stable / fmt
27 | steps:
28 | - uses: actions/checkout@v4
29 | with:
30 | submodules: true
31 | - name: Install stable
32 | uses: dtolnay/rust-toolchain@stable
33 | with:
34 | components: rustfmt
35 | - name: cargo fmt --check
36 | run: cargo fmt --check
37 | clippy:
38 | runs-on: ubuntu-latest
39 | name: ${{ matrix.toolchain }} / clippy
40 | permissions:
41 | contents: read
42 | checks: write
43 | strategy:
44 | fail-fast: false
45 | matrix:
46 | # Get early warning of new lints which are regularly introduced in beta channels.
47 | toolchain: [stable, beta]
48 | steps:
49 | - uses: actions/checkout@v4
50 | with:
51 | submodules: true
52 | - name: Install ${{ matrix.toolchain }}
53 | uses: dtolnay/rust-toolchain@master
54 | with:
55 | toolchain: ${{ matrix.toolchain }}
56 | components: clippy
57 | - name: cargo clippy
58 | uses: giraffate/clippy-action@v1
59 | with:
60 | reporter: 'github-pr-check'
61 | github_token: ${{ secrets.GITHUB_TOKEN }}
62 | semver:
63 | runs-on: ubuntu-latest
64 | name: semver
65 | steps:
66 | - uses: actions/checkout@v4
67 | with:
68 | submodules: true
69 | - name: Install stable
70 | uses: dtolnay/rust-toolchain@stable
71 | with:
72 | components: rustfmt
73 | - name: cargo-semver-checks
74 | uses: obi1kenobi/cargo-semver-checks-action@v2
75 | doc:
76 | # run docs generation on nightly rather than stable. This enables features like
77 | # https://doc.rust-lang.org/beta/unstable-book/language-features/doc-cfg.html which allows an
78 | # API be documented as only available in some specific platforms.
79 | runs-on: ubuntu-latest
80 | name: nightly / doc
81 | steps:
82 | - uses: actions/checkout@v4
83 | with:
84 | submodules: true
85 | - name: Install nightly
86 | uses: dtolnay/rust-toolchain@nightly
87 | - name: Install cargo-docs-rs
88 | uses: dtolnay/install@cargo-docs-rs
89 | - name: cargo docs-rs
90 | run: cargo docs-rs
91 | hack:
92 | # cargo-hack checks combinations of feature flags to ensure that features are all additive
93 | # which is required for feature unification
94 | runs-on: ubuntu-latest
95 | name: ubuntu / stable / features
96 | steps:
97 | - uses: actions/checkout@v4
98 | with:
99 | submodules: true
100 | - name: Install stable
101 | uses: dtolnay/rust-toolchain@stable
102 | - name: cargo install cargo-hack
103 | uses: taiki-e/install-action@cargo-hack
104 | # intentionally no target specifier; see https://github.com/jonhoo/rust-ci-conf/pull/4
105 | # --feature-powerset runs for every combination of features
106 | - name: cargo hack
107 | run: cargo hack --feature-powerset check
108 | msrv:
109 | # check that we can build using the minimal rust version that is specified by this crate
110 | runs-on: ubuntu-latest
111 | # we use a matrix here just because env can't be used in job names
112 | # https://docs.github.com/en/actions/learn-github-actions/contexts#context-availability
113 | strategy:
114 | matrix:
115 | msrv: ["1.61.0"] # memchr
116 | name: ubuntu / ${{ matrix.msrv }}
117 | steps:
118 | - uses: actions/checkout@v4
119 | with:
120 | submodules: true
121 | - name: Install ${{ matrix.msrv }}
122 | uses: dtolnay/rust-toolchain@master
123 | with:
124 | toolchain: ${{ matrix.msrv }}
125 | - name: cargo +${{ matrix.msrv }} check
126 | run: cargo check
127 |
--------------------------------------------------------------------------------
/.github/workflows/safety.yml:
--------------------------------------------------------------------------------
1 | # This workflow runs checks for unsafe code. In crates that don't have any unsafe code, this can be
2 | # removed. Runs:
3 | # - miri - detects undefined behavior and memory leaks
4 | # - address sanitizer - detects memory errors
5 | # - leak sanitizer - detects memory leaks
6 | # - loom - Permutation testing for concurrent code https://crates.io/crates/loom
7 | # See check.yml for information about how the concurrency cancellation and workflow triggering works
8 | permissions:
9 | contents: read
10 | on:
11 | push:
12 | branches: [main]
13 | pull_request:
14 | concurrency:
15 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
16 | cancel-in-progress: true
17 | name: safety
18 | jobs:
19 | sanitizers:
20 | runs-on: ubuntu-latest
21 | steps:
22 | - uses: actions/checkout@v4
23 | with:
24 | submodules: true
25 | - name: Install nightly
26 | uses: dtolnay/rust-toolchain@nightly
27 | - run: |
28 | # to get the symbolizer for debug symbol resolution
29 | sudo apt install llvm
30 | # to fix buggy leak analyzer:
31 | # https://github.com/japaric/rust-san#unrealiable-leaksanitizer
32 | # ensure there's a profile.dev section
33 | if ! grep -qE '^[ \t]*[profile.dev]' Cargo.toml; then
34 | echo >> Cargo.toml
35 | echo '[profile.dev]' >> Cargo.toml
36 | fi
37 | # remove pre-existing opt-levels in profile.dev
38 | sed -i '/^\s*\[profile.dev\]/,/^\s*\[/ {/^\s*opt-level/d}' Cargo.toml
39 | # now set opt-level to 1
40 | sed -i '/^\s*\[profile.dev\]/a opt-level = 1' Cargo.toml
41 | cat Cargo.toml
42 | name: Enable debug symbols
43 | - name: cargo test -Zsanitizer=address
44 | # only --lib --tests b/c of https://github.com/rust-lang/rust/issues/53945
45 | run: cargo test --lib --tests --all-features --target x86_64-unknown-linux-gnu
46 | env:
47 | ASAN_OPTIONS: "detect_odr_violation=0:detect_leaks=0"
48 | RUSTFLAGS: "-Z sanitizer=address"
49 | - name: cargo test -Zsanitizer=leak
50 | if: always()
51 | run: cargo test --all-features --target x86_64-unknown-linux-gnu
52 | env:
53 | LSAN_OPTIONS: "suppressions=lsan-suppressions.txt"
54 | RUSTFLAGS: "-Z sanitizer=leak"
55 | miri:
56 | runs-on: ubuntu-latest
57 | steps:
58 | - uses: actions/checkout@v4
59 | with:
60 | submodules: true
61 | - run: |
62 | echo "NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)" >> "$GITHUB_ENV"
63 | - name: Install ${{ env.NIGHTLY }}
64 | uses: dtolnay/rust-toolchain@master
65 | with:
66 | toolchain: ${{ env.NIGHTLY }}
67 | components: miri
68 | - name: cargo miri test
69 | run: cargo miri test
70 | env:
71 | MIRIFLAGS: "-Zmiri-tree-borrows"
72 |
--------------------------------------------------------------------------------
/.github/workflows/scheduled.yml:
--------------------------------------------------------------------------------
1 | # Run scheduled (rolling) jobs on a nightly basis, as your crate may break independently of any
2 | # given PR. E.g., updates to rust nightly and updates to this crates dependencies. See check.yml for
3 | # information about how the concurrency cancellation and workflow triggering works
4 | permissions:
5 | contents: read
6 | on:
7 | push:
8 | branches: [main]
9 | pull_request:
10 | schedule:
11 | - cron: '7 7 * * *'
12 | concurrency:
13 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
14 | cancel-in-progress: true
15 | name: rolling
16 | jobs:
17 | # https://twitter.com/mycoliza/status/1571295690063753218
18 | nightly:
19 | runs-on: ubuntu-latest
20 | name: ubuntu / nightly
21 | steps:
22 | - uses: actions/checkout@v4
23 | with:
24 | submodules: true
25 | - name: Install nightly
26 | uses: dtolnay/rust-toolchain@nightly
27 | - name: cargo generate-lockfile
28 | if: hashFiles('Cargo.lock') == ''
29 | run: cargo generate-lockfile
30 | - name: cargo test --locked
31 | run: cargo test --locked --all-features --all-targets
32 | # https://twitter.com/alcuadrado/status/1571291687837732873
33 | update:
34 | # This action checks that updating the dependencies of this crate to the latest available that
35 | # satisfy the versions in Cargo.toml does not break this crate. This is important as consumers
36 | # of this crate will generally use the latest available crates. This is subject to the standard
37 | # Cargo semver rules (i.e cargo does not update to a new major version unless explicitly told
38 | # to).
39 | runs-on: ubuntu-latest
40 | name: ubuntu / beta / updated
41 | # There's no point running this if no Cargo.lock was checked in in the first place, since we'd
42 | # just redo what happened in the regular test job. Unfortunately, hashFiles only works in if on
43 | # steps, so we repeat it.
44 | steps:
45 | - uses: actions/checkout@v4
46 | with:
47 | submodules: true
48 | - name: Install beta
49 | if: hashFiles('Cargo.lock') != ''
50 | uses: dtolnay/rust-toolchain@beta
51 | - name: cargo update
52 | if: hashFiles('Cargo.lock') != ''
53 | run: cargo update
54 | - name: cargo test
55 | if: hashFiles('Cargo.lock') != ''
56 | run: cargo test --locked --all-features --all-targets
57 | env:
58 | RUSTFLAGS: -D deprecated
59 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | # This is the main CI workflow that runs the test suite on all pushes to main and all pull requests.
2 | # It runs the following jobs:
3 | # - required: runs the test suite on ubuntu with stable and beta rust toolchains
4 | # - minimal: runs the test suite with the minimal versions of the dependencies that satisfy the
5 | # requirements of this crate, and its dependencies
6 | # - os-check: runs the test suite on mac and windows
7 | # - coverage: runs the test suite and collects coverage information
8 | # See check.yml for information about how the concurrency cancellation and workflow triggering works
9 | permissions:
10 | contents: read
11 | on:
12 | push:
13 | branches: [main]
14 | pull_request:
15 | concurrency:
16 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
17 | cancel-in-progress: true
18 | name: test
19 | jobs:
20 | required:
21 | runs-on: ubuntu-latest
22 | name: ubuntu / ${{ matrix.toolchain }}
23 | strategy:
24 | matrix:
25 | # run on stable and beta to ensure that tests won't break on the next version of the rust
26 | # toolchain
27 | toolchain: [stable, beta]
28 | steps:
29 | - uses: actions/checkout@v4
30 | with:
31 | submodules: true
32 | - name: Install ${{ matrix.toolchain }}
33 | uses: dtolnay/rust-toolchain@master
34 | with:
35 | toolchain: ${{ matrix.toolchain }}
36 | - name: cargo generate-lockfile
37 | # enable this ci template to run regardless of whether the lockfile is checked in or not
38 | if: hashFiles('Cargo.lock') == ''
39 | run: cargo generate-lockfile
40 | # https://twitter.com/jonhoo/status/1571290371124260865
41 | - name: cargo test --locked
42 | run: cargo test --locked --all-features --all-targets
43 | # https://github.com/rust-lang/cargo/issues/6669
44 | - name: cargo test --doc
45 | run: cargo test --locked --all-features --doc
46 | minimal:
47 | # This action chooses the oldest version of the dependencies permitted by Cargo.toml to ensure
48 | # that this crate is compatible with the minimal version that this crate and its dependencies
49 | # require. This will pickup issues where this create relies on functionality that was introduced
50 | # later than the actual version specified (e.g., when we choose just a major version, but a
51 | # method was added after this version).
52 | #
53 | # This particular check can be difficult to get to succeed as often transitive dependencies may
54 | # be incorrectly specified (e.g., a dependency specifies 1.0 but really requires 1.1.5). There
55 | # is an alternative flag available -Zdirect-minimal-versions that uses the minimal versions for
56 | # direct dependencies of this crate, while selecting the maximal versions for the transitive
57 | # dependencies. Alternatively, you can add a line in your Cargo.toml to artificially increase
58 | # the minimal dependency, which you do with e.g.:
59 | # ```toml
60 | # # for minimal-versions
61 | # [target.'cfg(any())'.dependencies]
62 | # openssl = { version = "0.10.55", optional = true } # needed to allow foo to build with -Zminimal-versions
63 | # ```
64 | # The optional = true is necessary in case that dependency isn't otherwise transitively required
65 | # by your library, and the target bit is so that this dependency edge never actually affects
66 | # Cargo build order. See also
67 | # https://github.com/jonhoo/fantoccini/blob/fde336472b712bc7ebf5b4e772023a7ba71b2262/Cargo.toml#L47-L49.
68 | # This action is run on ubuntu with the stable toolchain, as it is not expected to fail
69 | runs-on: ubuntu-latest
70 | name: ubuntu / stable / minimal-versions
71 | steps:
72 | - uses: actions/checkout@v4
73 | with:
74 | submodules: true
75 | - name: Install stable
76 | uses: dtolnay/rust-toolchain@stable
77 | - name: Install nightly for -Zminimal-versions
78 | uses: dtolnay/rust-toolchain@nightly
79 | - name: rustup default stable
80 | run: rustup default stable
81 | - name: cargo update -Zminimal-versions
82 | run: cargo +nightly update -Zminimal-versions
83 | - name: cargo test
84 | run: cargo test --locked --all-features --all-targets
85 | os-check:
86 | # run cargo test on mac and windows
87 | runs-on: ${{ matrix.os }}
88 | name: ${{ matrix.os }} / stable
89 | strategy:
90 | fail-fast: false
91 | matrix:
92 | os: [macos-latest, windows-latest]
93 | steps:
94 | # if your project needs OpenSSL, uncomment this to fix Windows builds.
95 | # it's commented out by default as the install command takes 5-10m.
96 | # - run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
97 | # if: runner.os == 'Windows'
98 | # - run: vcpkg install openssl:x64-windows-static-md
99 | # if: runner.os == 'Windows'
100 | - uses: actions/checkout@v4
101 | with:
102 | submodules: true
103 | - name: Install stable
104 | uses: dtolnay/rust-toolchain@stable
105 | - name: cargo generate-lockfile
106 | if: hashFiles('Cargo.lock') == ''
107 | run: cargo generate-lockfile
108 | - name: cargo test
109 | run: cargo test --locked --all-features --all-targets
110 | coverage:
111 | # use llvm-cov to build and collect coverage and outputs in a format that
112 | # is compatible with codecov.io
113 | #
114 | # note that codecov as of v4 requires that CODECOV_TOKEN from
115 | #
116 | # https://app.codecov.io/gh///settings
117 | #
118 | # is set in two places on your repo:
119 | #
120 | # - https://github.com/jonhoo/guardian/settings/secrets/actions
121 | # - https://github.com/jonhoo/guardian/settings/secrets/dependabot
122 | #
123 | # (the former is needed for codecov uploads to work with Dependabot PRs)
124 | #
125 | # PRs coming from forks of your repo will not have access to the token, but
126 | # for those, codecov allows uploading coverage reports without a token.
127 | # it's all a little weird and inconvenient. see
128 | #
129 | # https://github.com/codecov/feedback/issues/112
130 | #
131 | # for lots of more discussion
132 | runs-on: ubuntu-latest
133 | name: ubuntu / stable / coverage
134 | steps:
135 | - uses: actions/checkout@v4
136 | with:
137 | submodules: true
138 | - name: Install stable
139 | uses: dtolnay/rust-toolchain@stable
140 | with:
141 | components: llvm-tools-preview
142 | - name: cargo install cargo-llvm-cov
143 | uses: taiki-e/install-action@cargo-llvm-cov
144 | - name: cargo generate-lockfile
145 | if: hashFiles('Cargo.lock') == ''
146 | run: cargo generate-lockfile
147 | - name: cargo llvm-cov
148 | run: cargo llvm-cov --locked --all-features --lcov --output-path lcov.info
149 | - name: Record Rust version
150 | run: echo "RUST=$(rustc --version)" >> "$GITHUB_ENV"
151 | - name: Upload to codecov.io
152 | uses: codecov/codecov-action@v5
153 | with:
154 | fail_ci_if_error: true
155 | token: ${{ secrets.CODECOV_TOKEN }}
156 | env_vars: OS,RUST
157 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 |
2 | /target
3 | **/*.rs.bk
4 |
--------------------------------------------------------------------------------
/Cargo.lock:
--------------------------------------------------------------------------------
1 | # This file is automatically @generated by Cargo.
2 | # It is not intended for manual editing.
3 | version = 3
4 |
5 | [[package]]
6 | name = "addr2line"
7 | version = "0.24.2"
8 | source = "registry+https://github.com/rust-lang/crates.io-index"
9 | checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
10 | dependencies = [
11 | "gimli",
12 | ]
13 |
14 | [[package]]
15 | name = "adler2"
16 | version = "2.0.0"
17 | source = "registry+https://github.com/rust-lang/crates.io-index"
18 | checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627"
19 |
20 | [[package]]
21 | name = "async-bincode"
22 | version = "0.7.2"
23 | source = "registry+https://github.com/rust-lang/crates.io-index"
24 | checksum = "21849a990d47109757e820904d7c0b569a8013f6595bf14d911884634d58795f"
25 | dependencies = [
26 | "bincode",
27 | "byteorder",
28 | "bytes",
29 | "futures-core",
30 | "futures-sink",
31 | "serde",
32 | "tokio",
33 | ]
34 |
35 | [[package]]
36 | name = "autocfg"
37 | version = "1.4.0"
38 | source = "registry+https://github.com/rust-lang/crates.io-index"
39 | checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
40 |
41 | [[package]]
42 | name = "backtrace"
43 | version = "0.3.74"
44 | source = "registry+https://github.com/rust-lang/crates.io-index"
45 | checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a"
46 | dependencies = [
47 | "addr2line",
48 | "cfg-if",
49 | "libc",
50 | "miniz_oxide",
51 | "object",
52 | "rustc-demangle",
53 | "windows-targets",
54 | ]
55 |
56 | [[package]]
57 | name = "bincode"
58 | version = "1.3.3"
59 | source = "registry+https://github.com/rust-lang/crates.io-index"
60 | checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
61 | dependencies = [
62 | "serde",
63 | ]
64 |
65 | [[package]]
66 | name = "bitflags"
67 | version = "2.6.0"
68 | source = "registry+https://github.com/rust-lang/crates.io-index"
69 | checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
70 |
71 | [[package]]
72 | name = "byteorder"
73 | version = "1.5.0"
74 | source = "registry+https://github.com/rust-lang/crates.io-index"
75 | checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
76 |
77 | [[package]]
78 | name = "bytes"
79 | version = "1.7.2"
80 | source = "registry+https://github.com/rust-lang/crates.io-index"
81 | checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
82 |
83 | [[package]]
84 | name = "cfg-if"
85 | version = "1.0.0"
86 | source = "registry+https://github.com/rust-lang/crates.io-index"
87 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
88 |
89 | [[package]]
90 | name = "futures"
91 | version = "0.3.31"
92 | source = "registry+https://github.com/rust-lang/crates.io-index"
93 | checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
94 | dependencies = [
95 | "futures-channel",
96 | "futures-core",
97 | "futures-executor",
98 | "futures-io",
99 | "futures-sink",
100 | "futures-task",
101 | "futures-util",
102 | ]
103 |
104 | [[package]]
105 | name = "futures-channel"
106 | version = "0.3.31"
107 | source = "registry+https://github.com/rust-lang/crates.io-index"
108 | checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
109 | dependencies = [
110 | "futures-core",
111 | "futures-sink",
112 | ]
113 |
114 | [[package]]
115 | name = "futures-core"
116 | version = "0.3.31"
117 | source = "registry+https://github.com/rust-lang/crates.io-index"
118 | checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
119 |
120 | [[package]]
121 | name = "futures-executor"
122 | version = "0.3.31"
123 | source = "registry+https://github.com/rust-lang/crates.io-index"
124 | checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
125 | dependencies = [
126 | "futures-core",
127 | "futures-task",
128 | "futures-util",
129 | ]
130 |
131 | [[package]]
132 | name = "futures-io"
133 | version = "0.3.31"
134 | source = "registry+https://github.com/rust-lang/crates.io-index"
135 | checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
136 |
137 | [[package]]
138 | name = "futures-macro"
139 | version = "0.3.31"
140 | source = "registry+https://github.com/rust-lang/crates.io-index"
141 | checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
142 | dependencies = [
143 | "proc-macro2",
144 | "quote",
145 | "syn",
146 | ]
147 |
148 | [[package]]
149 | name = "futures-sink"
150 | version = "0.3.31"
151 | source = "registry+https://github.com/rust-lang/crates.io-index"
152 | checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
153 |
154 | [[package]]
155 | name = "futures-task"
156 | version = "0.3.31"
157 | source = "registry+https://github.com/rust-lang/crates.io-index"
158 | checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
159 |
160 | [[package]]
161 | name = "futures-util"
162 | version = "0.3.31"
163 | source = "registry+https://github.com/rust-lang/crates.io-index"
164 | checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
165 | dependencies = [
166 | "futures-channel",
167 | "futures-core",
168 | "futures-io",
169 | "futures-macro",
170 | "futures-sink",
171 | "futures-task",
172 | "memchr",
173 | "pin-project-lite",
174 | "pin-utils",
175 | "slab",
176 | ]
177 |
178 | [[package]]
179 | name = "gimli"
180 | version = "0.31.1"
181 | source = "registry+https://github.com/rust-lang/crates.io-index"
182 | checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
183 |
184 | [[package]]
185 | name = "hermit-abi"
186 | version = "0.3.9"
187 | source = "registry+https://github.com/rust-lang/crates.io-index"
188 | checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
189 |
190 | [[package]]
191 | name = "libc"
192 | version = "0.2.161"
193 | source = "registry+https://github.com/rust-lang/crates.io-index"
194 | checksum = "8e9489c2807c139ffd9c1794f4af0ebe86a828db53ecdc7fea2111d0fed085d1"
195 |
196 | [[package]]
197 | name = "lock_api"
198 | version = "0.4.12"
199 | source = "registry+https://github.com/rust-lang/crates.io-index"
200 | checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
201 | dependencies = [
202 | "autocfg",
203 | "scopeguard",
204 | ]
205 |
206 | [[package]]
207 | name = "memchr"
208 | version = "2.7.4"
209 | source = "registry+https://github.com/rust-lang/crates.io-index"
210 | checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
211 |
212 | [[package]]
213 | name = "miniz_oxide"
214 | version = "0.8.0"
215 | source = "registry+https://github.com/rust-lang/crates.io-index"
216 | checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1"
217 | dependencies = [
218 | "adler2",
219 | ]
220 |
221 | [[package]]
222 | name = "mio"
223 | version = "1.0.2"
224 | source = "registry+https://github.com/rust-lang/crates.io-index"
225 | checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec"
226 | dependencies = [
227 | "hermit-abi",
228 | "libc",
229 | "wasi",
230 | "windows-sys",
231 | ]
232 |
233 | [[package]]
234 | name = "object"
235 | version = "0.36.5"
236 | source = "registry+https://github.com/rust-lang/crates.io-index"
237 | checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
238 | dependencies = [
239 | "memchr",
240 | ]
241 |
242 | [[package]]
243 | name = "parking_lot"
244 | version = "0.12.3"
245 | source = "registry+https://github.com/rust-lang/crates.io-index"
246 | checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27"
247 | dependencies = [
248 | "lock_api",
249 | "parking_lot_core",
250 | ]
251 |
252 | [[package]]
253 | name = "parking_lot_core"
254 | version = "0.9.10"
255 | source = "registry+https://github.com/rust-lang/crates.io-index"
256 | checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
257 | dependencies = [
258 | "cfg-if",
259 | "libc",
260 | "redox_syscall",
261 | "smallvec",
262 | "windows-targets",
263 | ]
264 |
265 | [[package]]
266 | name = "pin-project-lite"
267 | version = "0.2.14"
268 | source = "registry+https://github.com/rust-lang/crates.io-index"
269 | checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
270 |
271 | [[package]]
272 | name = "pin-utils"
273 | version = "0.1.0"
274 | source = "registry+https://github.com/rust-lang/crates.io-index"
275 | checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
276 |
277 | [[package]]
278 | name = "proc-macro2"
279 | version = "1.0.88"
280 | source = "registry+https://github.com/rust-lang/crates.io-index"
281 | checksum = "7c3a7fc5db1e57d5a779a352c8cdb57b29aa4c40cc69c3a68a7fedc815fbf2f9"
282 | dependencies = [
283 | "unicode-ident",
284 | ]
285 |
286 | [[package]]
287 | name = "quote"
288 | version = "1.0.37"
289 | source = "registry+https://github.com/rust-lang/crates.io-index"
290 | checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
291 | dependencies = [
292 | "proc-macro2",
293 | ]
294 |
295 | [[package]]
296 | name = "redox_syscall"
297 | version = "0.5.7"
298 | source = "registry+https://github.com/rust-lang/crates.io-index"
299 | checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
300 | dependencies = [
301 | "bitflags",
302 | ]
303 |
304 | [[package]]
305 | name = "rustc-demangle"
306 | version = "0.1.24"
307 | source = "registry+https://github.com/rust-lang/crates.io-index"
308 | checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
309 |
310 | [[package]]
311 | name = "scopeguard"
312 | version = "1.2.0"
313 | source = "registry+https://github.com/rust-lang/crates.io-index"
314 | checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
315 |
316 | [[package]]
317 | name = "serde"
318 | version = "1.0.210"
319 | source = "registry+https://github.com/rust-lang/crates.io-index"
320 | checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
321 | dependencies = [
322 | "serde_derive",
323 | ]
324 |
325 | [[package]]
326 | name = "serde_derive"
327 | version = "1.0.210"
328 | source = "registry+https://github.com/rust-lang/crates.io-index"
329 | checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
330 | dependencies = [
331 | "proc-macro2",
332 | "quote",
333 | "syn",
334 | ]
335 |
336 | [[package]]
337 | name = "signal-hook-registry"
338 | version = "1.4.2"
339 | source = "registry+https://github.com/rust-lang/crates.io-index"
340 | checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1"
341 | dependencies = [
342 | "libc",
343 | ]
344 |
345 | [[package]]
346 | name = "slab"
347 | version = "0.4.9"
348 | source = "registry+https://github.com/rust-lang/crates.io-index"
349 | checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
350 | dependencies = [
351 | "autocfg",
352 | ]
353 |
354 | [[package]]
355 | name = "smallvec"
356 | version = "1.13.2"
357 | source = "registry+https://github.com/rust-lang/crates.io-index"
358 | checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
359 |
360 | [[package]]
361 | name = "socket2"
362 | version = "0.5.7"
363 | source = "registry+https://github.com/rust-lang/crates.io-index"
364 | checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
365 | dependencies = [
366 | "libc",
367 | "windows-sys",
368 | ]
369 |
370 | [[package]]
371 | name = "streamunordered"
372 | version = "0.5.4"
373 | dependencies = [
374 | "async-bincode",
375 | "bincode",
376 | "futures",
377 | "futures-core",
378 | "futures-sink",
379 | "futures-util",
380 | "slab",
381 | "tokio",
382 | "tokio-stream",
383 | ]
384 |
385 | [[package]]
386 | name = "syn"
387 | version = "2.0.79"
388 | source = "registry+https://github.com/rust-lang/crates.io-index"
389 | checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590"
390 | dependencies = [
391 | "proc-macro2",
392 | "quote",
393 | "unicode-ident",
394 | ]
395 |
396 | [[package]]
397 | name = "tokio"
398 | version = "1.40.0"
399 | source = "registry+https://github.com/rust-lang/crates.io-index"
400 | checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
401 | dependencies = [
402 | "backtrace",
403 | "bytes",
404 | "libc",
405 | "mio",
406 | "parking_lot",
407 | "pin-project-lite",
408 | "signal-hook-registry",
409 | "socket2",
410 | "tokio-macros",
411 | "windows-sys",
412 | ]
413 |
414 | [[package]]
415 | name = "tokio-macros"
416 | version = "2.4.0"
417 | source = "registry+https://github.com/rust-lang/crates.io-index"
418 | checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
419 | dependencies = [
420 | "proc-macro2",
421 | "quote",
422 | "syn",
423 | ]
424 |
425 | [[package]]
426 | name = "tokio-stream"
427 | version = "0.1.16"
428 | source = "registry+https://github.com/rust-lang/crates.io-index"
429 | checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1"
430 | dependencies = [
431 | "futures-core",
432 | "pin-project-lite",
433 | "tokio",
434 | ]
435 |
436 | [[package]]
437 | name = "unicode-ident"
438 | version = "1.0.13"
439 | source = "registry+https://github.com/rust-lang/crates.io-index"
440 | checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe"
441 |
442 | [[package]]
443 | name = "wasi"
444 | version = "0.11.0+wasi-snapshot-preview1"
445 | source = "registry+https://github.com/rust-lang/crates.io-index"
446 | checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
447 |
448 | [[package]]
449 | name = "windows-sys"
450 | version = "0.52.0"
451 | source = "registry+https://github.com/rust-lang/crates.io-index"
452 | checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
453 | dependencies = [
454 | "windows-targets",
455 | ]
456 |
457 | [[package]]
458 | name = "windows-targets"
459 | version = "0.52.6"
460 | source = "registry+https://github.com/rust-lang/crates.io-index"
461 | checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
462 | dependencies = [
463 | "windows_aarch64_gnullvm",
464 | "windows_aarch64_msvc",
465 | "windows_i686_gnu",
466 | "windows_i686_gnullvm",
467 | "windows_i686_msvc",
468 | "windows_x86_64_gnu",
469 | "windows_x86_64_gnullvm",
470 | "windows_x86_64_msvc",
471 | ]
472 |
473 | [[package]]
474 | name = "windows_aarch64_gnullvm"
475 | version = "0.52.6"
476 | source = "registry+https://github.com/rust-lang/crates.io-index"
477 | checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
478 |
479 | [[package]]
480 | name = "windows_aarch64_msvc"
481 | version = "0.52.6"
482 | source = "registry+https://github.com/rust-lang/crates.io-index"
483 | checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
484 |
485 | [[package]]
486 | name = "windows_i686_gnu"
487 | version = "0.52.6"
488 | source = "registry+https://github.com/rust-lang/crates.io-index"
489 | checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
490 |
491 | [[package]]
492 | name = "windows_i686_gnullvm"
493 | version = "0.52.6"
494 | source = "registry+https://github.com/rust-lang/crates.io-index"
495 | checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
496 |
497 | [[package]]
498 | name = "windows_i686_msvc"
499 | version = "0.52.6"
500 | source = "registry+https://github.com/rust-lang/crates.io-index"
501 | checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
502 |
503 | [[package]]
504 | name = "windows_x86_64_gnu"
505 | version = "0.52.6"
506 | source = "registry+https://github.com/rust-lang/crates.io-index"
507 | checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
508 |
509 | [[package]]
510 | name = "windows_x86_64_gnullvm"
511 | version = "0.52.6"
512 | source = "registry+https://github.com/rust-lang/crates.io-index"
513 | checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
514 |
515 | [[package]]
516 | name = "windows_x86_64_msvc"
517 | version = "0.52.6"
518 | source = "registry+https://github.com/rust-lang/crates.io-index"
519 | checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
520 |
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "streamunordered"
3 | version = "0.5.4"
4 | authors = ["Jon Gjengset "]
5 | license = "MIT/Apache-2.0"
6 | edition = "2018"
7 |
8 | description = "An efficient async stream multiplexer"
9 | readme = "README.md"
10 |
11 | homepage = "https://github.com/jonhoo/streamunordered"
12 | repository = "https://github.com/jonhoo/streamunordered.git"
13 |
14 | categories = ["asynchronous"]
15 |
16 | [badges]
17 | travis-ci = { repository = "jonhoo/streamunordered" }
18 | maintenance = { status = "passively-maintained" }
19 |
20 | [dependencies]
21 | futures-core = "0.3.0"
22 | futures-sink = "0.3.0"
23 | futures-util = "0.3.31"
24 | slab = "0.4.0"
25 |
26 | [dev-dependencies]
27 | tokio = { version = "1.6.0", features = ["full"] }
28 | tokio-stream = "0.1.1"
29 | async-bincode = "0.7.0"
30 | futures = "0.3.0"
31 | bincode = "1.0.0"
32 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # streamunordered
2 |
3 | [](https://crates.io/crates/streamunordered)
4 | [](https://docs.rs/streamunordered/)
5 | [](https://codecov.io/gh/jonhoo/streamunordered)
6 |
7 | A stream that efficiently multiplexes multiple streams.
8 |
9 | This "combinator" provides the ability to maintain and drive a set of streams to completion,
10 | while also providing access to each stream as it yields new elements.
11 |
12 | Streams are inserted into this set and their realized values are yielded as they are produced.
13 | This structure is optimized to manage a large number of streams. Streams managed by
14 | `StreamUnordered` will only be polled when they generate notifications. This reduces the
15 | required amount of work needed to coordinate large numbers of streams.
16 |
17 | When a `StreamUnordered` is first created, it does not contain any streams. Calling `poll` in
18 | this state will result in `Ok(Async::Ready(None))` to be returned. Streams are submitted to the
19 | set using `insert`; however, the stream will **not** be polled at this point. `StreamUnordered`
20 | will only poll managed streams when `StreamUnordered::poll` is called. As such, it is important
21 | to call `poll` after inserting new streams.
22 |
23 | If `StreamUnordered::poll` returns `Ok(Async::Ready(None))` this means that the set is
24 | currently not managing any streams. A stream may be submitted to the set at a later time. At
25 | that point, a call to `StreamUnordered::poll` will either return the stream's resolved value
26 | **or** `Ok(Async::NotReady)` if the stream has not yet completed.
27 |
28 | Whenever a value is yielded, the yielding stream's index is also included. A reference to the
29 | stream that originated the value is obtained by using [`StreamUnordered::get`] or
30 | [`StreamUnordered::get_mut`].
31 |
--------------------------------------------------------------------------------
/rustfmt.toml:
--------------------------------------------------------------------------------
1 | edition = "2018"
2 |
--------------------------------------------------------------------------------
/src/abort.rs:
--------------------------------------------------------------------------------
1 | pub(super) fn abort(s: &str) -> ! {
2 | struct DoublePanic;
3 |
4 | impl Drop for DoublePanic {
5 | fn drop(&mut self) {
6 | panic!("panicking twice to abort the program");
7 | }
8 | }
9 |
10 | let _bomb = DoublePanic;
11 | panic!("{}", s);
12 | }
13 |
--------------------------------------------------------------------------------
/src/iter.rs:
--------------------------------------------------------------------------------
1 | use super::task::Task;
2 | use super::StreamUnordered;
3 | use core::marker::PhantomData;
4 | use core::pin::Pin;
5 | use core::sync::atomic::Ordering::Relaxed;
6 |
7 | /// Mutable iterator over all streams in the unordered set.
8 | #[derive(Debug)]
9 | pub struct IterPinMutWithToken<'a, S> {
10 | pub(super) task: *const Task,
11 | pub(super) len: usize,
12 | pub(super) _marker: PhantomData<&'a mut StreamUnordered>,
13 | }
14 |
15 | /// Mutable iterator over all streams in the unordered set.
16 | #[derive(Debug)]
17 | pub struct IterPinMut<'a, S>(pub(super) IterPinMutWithToken<'a, S>);
18 |
19 | /// Mutable iterator over all streams in the unordered set.
20 | #[derive(Debug)]
21 | pub struct IterMutWithToken<'a, S: Unpin>(pub(super) IterPinMutWithToken<'a, S>);
22 |
23 | /// Mutable iterator over all streams in the unordered set.
24 | #[derive(Debug)]
25 | pub struct IterMut<'a, S: Unpin>(pub(super) IterPinMutWithToken<'a, S>);
26 |
27 | /// Immutable iterator over all streams in the unordered set.
28 | #[derive(Debug)]
29 | pub struct IterWithToken<'a, S> {
30 | pub(super) task: *const Task,
31 | pub(super) len: usize,
32 | pub(super) pending_next_all: *mut Task,
33 | pub(super) _marker: PhantomData<&'a StreamUnordered>,
34 | }
35 |
36 | impl<'a, S> Iterator for IterPinMutWithToken<'a, S> {
37 | type Item = (Pin<&'a mut S>, usize);
38 |
39 | fn next(&mut self) -> Option {
40 | if self.task.is_null() {
41 | return None;
42 | }
43 | unsafe {
44 | let id = (*self.task).id;
45 | let stream = (*(*self.task).stream.get()).as_mut().unwrap();
46 |
47 | // Mutable access to a previously shared `StreamUnordered` implies
48 | // that the other threads already released the object before the
49 | // current thread acquired it, so relaxed ordering can be used and
50 | // valid `next_all` checks can be skipped.
51 | let next = (*self.task).next_all.load(Relaxed);
52 | self.task = next;
53 | self.len -= 1;
54 | Some((Pin::new_unchecked(stream), id))
55 | }
56 | }
57 |
58 | fn size_hint(&self) -> (usize, Option) {
59 | (self.len, Some(self.len))
60 | }
61 | }
62 |
63 | impl ExactSizeIterator for IterPinMutWithToken<'_, S> {}
64 |
65 | impl<'a, S> Iterator for IterPinMut<'a, S> {
66 | type Item = Pin<&'a mut S>;
67 |
68 | fn next(&mut self) -> Option {
69 | self.0.next().map(|(s, _)| s)
70 | }
71 |
72 | fn size_hint(&self) -> (usize, Option) {
73 | self.0.size_hint()
74 | }
75 | }
76 |
77 | impl ExactSizeIterator for IterPinMut<'_, S> {}
78 |
79 | impl<'a, S: Unpin> Iterator for IterMut<'a, S> {
80 | type Item = &'a mut S;
81 |
82 | fn next(&mut self) -> Option {
83 | self.0.next().map(|(stream, _)| Pin::get_mut(stream))
84 | }
85 |
86 | fn size_hint(&self) -> (usize, Option) {
87 | self.0.size_hint()
88 | }
89 | }
90 |
91 | impl ExactSizeIterator for IterMut<'_, S> {}
92 |
93 | impl<'a, S: Unpin> Iterator for IterMutWithToken<'a, S> {
94 | type Item = (&'a mut S, usize);
95 |
96 | fn next(&mut self) -> Option {
97 | self.0.next().map(|(stream, id)| (Pin::get_mut(stream), id))
98 | }
99 |
100 | fn size_hint(&self) -> (usize, Option) {
101 | self.0.size_hint()
102 | }
103 | }
104 |
105 | impl ExactSizeIterator for IterMutWithToken<'_, S> {}
106 |
107 | impl<'a, S> Iterator for IterWithToken<'a, S> {
108 | type Item = (&'a S, usize);
109 |
110 | fn next(&mut self) -> Option {
111 | if self.task.is_null() {
112 | return None;
113 | }
114 | unsafe {
115 | let id = (*self.task).id;
116 | let stream = (*(*self.task).stream.get()).as_ref().unwrap();
117 |
118 | // Relaxed ordering can be used since acquire ordering when
119 | // `head_all` was initially read for this iterator implies acquire
120 | // ordering for all previously inserted nodes (and we don't need to
121 | // read `len_all` again for any other nodes).
122 | let next = (*self.task).spin_next_all(self.pending_next_all, Relaxed);
123 | self.task = next;
124 | self.len -= 1;
125 | Some((stream, id))
126 | }
127 | }
128 |
129 | fn size_hint(&self) -> (usize, Option) {
130 | (self.len, Some(self.len))
131 | }
132 | }
133 |
134 | impl ExactSizeIterator for IterWithToken<'_, S> {}
135 |
--------------------------------------------------------------------------------
/src/lib.rs:
--------------------------------------------------------------------------------
1 | //! A stream that efficiently multiplexes multiple streams.
2 | //!
3 | //! This "combinator" provides the ability to maintain and drive a set of streams to completion,
4 | //! while also providing access to each stream as it yields new elements.
5 | //!
6 | //! Streams are inserted into this set and their realized values are yielded as they are produced.
7 | //! This structure is optimized to manage a large number of streams. Streams managed by
8 | //! `StreamUnordered` will only be polled when they generate notifications. This reduces the
9 | //! required amount of work needed to coordinate large numbers of streams.
10 | //!
11 | //! When a `StreamUnordered` is first created, it does not contain any streams. Calling `poll` in
12 | //! this state will result in `Poll::Ready((None)` to be returned. Streams are submitted to the
13 | //! set using `insert`; however, the stream will **not** be polled at this point. `StreamUnordered`
14 | //! will only poll managed streams when `StreamUnordered::poll` is called. As such, it is important
15 | //! to call `poll` after inserting new streams.
16 | //!
17 | //! If `StreamUnordered::poll` returns `Poll::Ready(None)` this means that the set is
18 | //! currently not managing any streams. A stream may be submitted to the set at a later time. At
19 | //! that point, a call to `StreamUnordered::poll` will either return the stream's resolved value
20 | //! **or** `Poll::Pending` if the stream has not yet completed.
21 | //!
22 | //! Whenever a value is yielded, the yielding stream's index is also included. A reference to the
23 | //! stream that originated the value is obtained by using [`StreamUnordered::get`],
24 | //! [`StreamUnordered::get_mut`], or [`StreamUnordered::get_pin_mut`].
25 | //!
26 | //! In normal operation, `poll` will yield a `StreamYield::Item` when it completes successfully.
27 | //! This value indicates that an underlying stream (the one indicated by the included index)
28 | //! produced an item. If an underlying stream yields `Poll::Ready(None)` to indicate termination,
29 | //! a `StreamYield::Finished` is returned instead. Note that as soon as a stream returns
30 | //! `StreamYield::Finished`, its token may be reused for new streams that are added.
31 |
32 | #![deny(missing_docs)]
33 | #![warn(rust_2018_idioms, rustdoc::broken_intra_doc_links)]
34 |
35 | // This is mainly FuturesUnordered from futures_util, but adapted to operate over Streams rather
36 | // than Futures.
37 |
38 | extern crate alloc;
39 |
40 | use alloc::sync::{Arc, Weak};
41 | use core::cell::UnsafeCell;
42 | use core::fmt::{self, Debug};
43 | use core::iter::FromIterator;
44 | use core::marker::PhantomData;
45 | use core::mem;
46 | use core::ops::{Index, IndexMut};
47 | use core::pin::Pin;
48 | use core::ptr;
49 | use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
50 | use core::sync::atomic::{AtomicBool, AtomicPtr};
51 | use futures_core::stream::{FusedStream, Stream};
52 | use futures_core::task::{Context, Poll};
53 | use futures_util::task::{ArcWake, AtomicWaker};
54 |
55 | mod abort;
56 |
57 | mod iter;
58 | pub use self::iter::{IterMut, IterMutWithToken, IterPinMut, IterPinMutWithToken, IterWithToken};
59 |
60 | mod task;
61 | use self::task::Task;
62 |
63 | mod ready_to_run_queue;
64 | use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue};
65 |
66 | /// Constant used for a `StreamUnordered` to determine how many times it is
67 | /// allowed to poll underlying futures without yielding.
68 | ///
69 | /// A single call to `poll_next` may potentially do a lot of work before
70 | /// yielding. This happens in particular if the underlying futures are awoken
71 | /// frequently but continue to return `Pending`. This is problematic if other
72 | /// tasks are waiting on the executor, since they do not get to run. This value
73 | /// caps the number of calls to `poll` on underlying streams a single call to
74 | /// `poll_next` is allowed to make.
75 | ///
76 | /// The value itself is chosen somewhat arbitrarily. It needs to be high enough
77 | /// that amortize wakeup and scheduling costs, but low enough that we do not
78 | /// starve other tasks for long.
79 | ///
80 | /// See also https://github.com/rust-lang/futures-rs/issues/2047.
81 | const YIELD_EVERY: usize = 32;
82 |
83 | /// A set of streams which may yield items in any order.
84 | ///
85 | /// This structure is optimized to manage a large number of streams.
86 | /// Streams managed by [`StreamUnordered`] will only be polled when they
87 | /// generate wake-up notifications. This reduces the required amount of work
88 | /// needed to poll large numbers of streams.
89 | ///
90 | /// [`StreamUnordered`] can be filled by [`collect`](Iterator::collect)ing an
91 | /// iterator of streams into a [`StreamUnordered`], or by
92 | /// [`insert`](StreamUnordered::insert)ing streams onto an existing
93 | /// [`StreamUnordered`]. When new streams are added,
94 | /// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
95 | /// wake-ups for new streams.
96 | ///
97 | /// Note that you can create a ready-made [`StreamUnordered`] via the
98 | /// [`collect`](Iterator::collect) method, or you can start with an empty set
99 | /// with the [`StreamUnordered::new`] constructor.
100 | #[must_use = "streams do nothing unless polled"]
101 | pub struct StreamUnordered {
102 | ready_to_run_queue: Arc>,
103 | head_all: AtomicPtr>,
104 | is_terminated: AtomicBool,
105 | by_id: slab::Slab<*const Task>,
106 | }
107 |
108 | unsafe impl Send for StreamUnordered {}
109 | unsafe impl Sync for StreamUnordered {}
110 | impl Unpin for StreamUnordered {}
111 |
112 | // StreamUnordered is implemented using two linked lists. One which links all
113 | // streams managed by a `StreamUnordered` and one that tracks streams that have
114 | // been scheduled for polling. The first linked list allows for thread safe
115 | // insertion of nodes at the head as well as forward iteration, but is otherwise
116 | // not thread safe and is only accessed by the thread that owns the
117 | // `StreamUnordered` value for any other operations. The second linked list is
118 | // an implementation of the intrusive MPSC queue algorithm described by
119 | // 1024cores.net.
120 | //
121 | // When a stream is submitted to the set, a task is allocated and inserted in
122 | // both linked lists. The next call to `poll_next` will (eventually) see this
123 | // task and call `poll` on the stream.
124 | //
125 | // Before a managed stream is polled, the current context's waker is replaced
126 | // with one that is aware of the specific stream being run. This ensures that
127 | // wake-up notifications generated by that specific stream are visible to
128 | // `StreamUnordered`. When a wake-up notification is received, the task is
129 | // inserted into the ready to run queue, so that its stream can be polled later.
130 | //
131 | // Each task is wrapped in an `Arc` and thereby atomically reference counted.
132 | // Also, each task contains an `AtomicBool` which acts as a flag that indicates
133 | // whether the task is currently inserted in the atomic queue. When a wake-up
134 | // notifiaction is received, the task will only be inserted into the ready to
135 | // run queue if it isn't inserted already.
136 |
137 | /// A handle to an vacant stream slot in a `StreamUnordered`.
138 | ///
139 | /// `StreamEntry` allows constructing streams that hold the token that they will be assigned.
140 | #[derive(Debug)]
141 | pub struct StreamEntry<'a, S> {
142 | token: usize,
143 | inserted: bool,
144 | backref: &'a mut StreamUnordered,
145 | }
146 |
147 | impl<'a, S: 'a> StreamEntry<'a, S> {
148 | /// Insert a stream in the slot, and return a mutable reference to the value.
149 | ///
150 | /// To get the token associated with the stream, use key prior to calling insert.
151 | pub fn insert(mut self, stream: S) {
152 | self.inserted = true;
153 |
154 | // this is safe because we've held &mut StreamUnordered the entire time,
155 | // so the token still points to a valid task, and no-one else is
156 | // touching the .stream of it.
157 | unsafe {
158 | (*(*self.backref.by_id[self.token]).stream.get()) = Some(stream);
159 | }
160 | }
161 |
162 | /// Return the token associated with this slot.
163 | ///
164 | /// A stream stored in this slot will be associated with this token.
165 | pub fn token(&self) -> usize {
166 | self.token
167 | }
168 | }
169 |
170 | impl<'a, S: 'a> Drop for StreamEntry<'a, S> {
171 | fn drop(&mut self) {
172 | if !self.inserted {
173 | // undo the insertion
174 | let task_ptr = self.backref.by_id.remove(self.token);
175 |
176 | // we know task_ptr points to a valid task, since the StreamEntry
177 | // has held the &mut StreamUnordered the entire time.
178 | let task = unsafe { self.backref.unlink(task_ptr) };
179 | self.backref.release_task(task);
180 | }
181 | }
182 | }
183 |
184 | impl StreamUnordered {
185 | /// Constructs a new, empty [`StreamUnordered`].
186 | ///
187 | /// The returned [`StreamUnordered`] does not contain any streams.
188 | /// In this state, [`StreamUnordered::poll_next`](Stream::poll_next) will
189 | /// return [`Poll::Ready(None)`](Poll::Ready).
190 | pub fn new() -> StreamUnordered {
191 | let mut slab = slab::Slab::new();
192 | let slot = slab.vacant_entry();
193 | let stub = Arc::new(Task {
194 | stream: UnsafeCell::new(None),
195 | is_done: UnsafeCell::new(false),
196 | next_all: AtomicPtr::new(ptr::null_mut()),
197 | prev_all: UnsafeCell::new(ptr::null()),
198 | len_all: UnsafeCell::new(0),
199 | next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
200 | queued: AtomicBool::new(true),
201 | ready_to_run_queue: Weak::new(),
202 | id: slot.key(),
203 | });
204 | let stub_ptr = &*stub as *const Task;
205 | let _ = slab.insert(stub_ptr);
206 |
207 | let ready_to_run_queue = Arc::new(ReadyToRunQueue {
208 | waker: AtomicWaker::new(),
209 | head: AtomicPtr::new(stub_ptr as *mut _),
210 | tail: UnsafeCell::new(stub_ptr),
211 | stub,
212 | });
213 |
214 | StreamUnordered {
215 | head_all: AtomicPtr::new(ptr::null_mut()),
216 | ready_to_run_queue,
217 | is_terminated: AtomicBool::new(false),
218 | by_id: slab,
219 | }
220 | }
221 | }
222 |
223 | impl Default for StreamUnordered {
224 | fn default() -> StreamUnordered {
225 | StreamUnordered::new()
226 | }
227 | }
228 |
229 | impl StreamUnordered {
230 | /// Returns the number of streams contained in the set.
231 | ///
232 | /// This represents the total number of in-flight streams.
233 | pub fn len(&self) -> usize {
234 | let (_, len) = self.atomic_load_head_and_len_all();
235 | len
236 | }
237 |
238 | /// Returns `true` if the set contains no streams.
239 | pub fn is_empty(&self) -> bool {
240 | // Relaxed ordering can be used here since we don't need to read from
241 | // the head pointer, only check whether it is null.
242 | self.head_all.load(Relaxed).is_null()
243 | }
244 |
245 | /// Returns a handle to a vacant stream entry allowing for further manipulation.
246 | ///
247 | /// This function is useful when creating values that must contain their stream token. The
248 | /// returned `StreamEntry` reserves an entry for the stream and is able to query the associated
249 | /// token.
250 | pub fn stream_entry(&mut self) -> StreamEntry<'_, S> {
251 | let next_all = self.pending_next_all();
252 | let slot = self.by_id.vacant_entry();
253 | let token = slot.key();
254 |
255 | let task = Arc::new(Task {
256 | stream: UnsafeCell::new(None),
257 | is_done: UnsafeCell::new(false),
258 | next_all: AtomicPtr::new(next_all),
259 | prev_all: UnsafeCell::new(ptr::null_mut()),
260 | len_all: UnsafeCell::new(0),
261 | next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
262 | queued: AtomicBool::new(true),
263 | ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
264 | id: token,
265 | });
266 |
267 | let _ = slot.insert(&*task as *const _);
268 |
269 | // Reset the `is_terminated` flag if we've previously marked ourselves
270 | // as terminated.
271 | self.is_terminated.store(false, Relaxed);
272 |
273 | // Right now our task has a strong reference count of 1. We transfer
274 | // ownership of this reference count to our internal linked list
275 | // and we'll reclaim ownership through the `unlink` method below.
276 | let ptr = self.link(task);
277 |
278 | // We'll need to get the stream "into the system" to start tracking it,
279 | // e.g. getting its wake-up notifications going to us tracking which
280 | // streams are ready. To do that we unconditionally enqueue it for
281 | // polling here.
282 | self.ready_to_run_queue.enqueue(ptr);
283 |
284 | StreamEntry {
285 | token,
286 | inserted: false,
287 | backref: self,
288 | }
289 | }
290 |
291 | /// Insert a stream into the set.
292 | ///
293 | /// A deprecated synonym for [`insert`].
294 | #[deprecated(since = "0.5.2", note = "Prefer StreamUnordered::insert")]
295 | pub fn push(&mut self, stream: S) -> usize {
296 | self.insert(stream)
297 | }
298 |
299 | /// Insert a stream into the set.
300 | ///
301 | /// This method adds the given stream to the set. This method will not call
302 | /// [`poll_next`](futures_util::stream::Stream::poll_next) on the submitted stream. The caller
303 | /// must ensure that [`StreamUnordered::poll_next`](Stream::poll_next) is called in order to
304 | /// receive wake-up notifications for the given stream.
305 | ///
306 | /// The returned token is an identifier that uniquely identifies the given stream in the
307 | /// current set. To get a handle to the inserted stream, pass the token to
308 | /// [`StreamUnordered::get`], [`StreamUnordered::get_mut`], or [`StreamUnordered::get_pin_mut`]
309 | /// (or just index `StreamUnordered` directly). The same token will be yielded whenever an
310 | /// element is pulled from this stream.
311 | ///
312 | /// Note that the streams are not ordered, and may not be yielded back in insertion or token
313 | /// order when you iterate over them.
314 | pub fn insert(&mut self, stream: S) -> usize {
315 | let s = self.stream_entry();
316 | let token = s.token();
317 | s.insert(stream);
318 | token
319 | }
320 |
321 | /// Remove a stream from the set.
322 | ///
323 | /// The stream will be dropped and will no longer yield stream events.
324 | pub fn remove(mut self: Pin<&mut Self>, token: usize) -> bool {
325 | if token == 0 {
326 | return false;
327 | }
328 |
329 | let task = if let Some(task) = self.by_id.get(token) {
330 | *task
331 | } else {
332 | return false;
333 | };
334 |
335 | // we know that by_id only references valid tasks
336 | let task = unsafe { self.unlink(task) };
337 | self.release_task(task);
338 | true
339 | }
340 |
341 | /// Remove and return a stream from the set.
342 | ///
343 | /// The stream will no longer be polled, and will no longer yield stream events.
344 | ///
345 | /// Note that since this method moves `S`, which we may have given out a `Pin` to, it requires
346 | /// that `S` is `Unpin`.
347 | pub fn take(mut self: Pin<&mut Self>, token: usize) -> Option
348 | where
349 | S: Unpin,
350 | {
351 | if token == 0 {
352 | return None;
353 | }
354 |
355 | let task = *self.by_id.get(token)?;
356 |
357 | // we know that by_id only references valid tasks
358 | let task = unsafe { self.unlink(task) };
359 |
360 | // This is safe because we're dropping the stream on the thread that owns
361 | // `StreamUnordered`, which correctly tracks `S`'s lifetimes and such.
362 | // The logic is the same as for why release_task is allowed to touch task.stream.
363 | // Since S: Unpin, it is okay for us to move S.
364 | let stream = unsafe { &mut *task.stream.get() }.take();
365 |
366 | self.release_task(task);
367 |
368 | stream
369 | }
370 |
371 | /// Returns `true` if the stream with the given token has yielded `None`.
372 | pub fn is_finished(&self, token: usize) -> Option {
373 | if token == 0 {
374 | return None;
375 | }
376 |
377 | // we know that by_id only references valid tasks
378 | Some(unsafe { *(**self.by_id.get(token)?).is_done.get() })
379 | }
380 |
381 | /// Returns a reference to the stream with the given token
382 | pub fn get(&self, token: usize) -> Option<&S> {
383 | // don't allow access to the 0th task, since it's not a stream
384 | if token == 0 {
385 | return None;
386 | }
387 |
388 | // we know that by_id only references valid tasks
389 | Some(unsafe { (*(**self.by_id.get(token)?).stream.get()).as_ref().unwrap() })
390 | }
391 |
392 | /// Returns a reference that allows modifying the stream with the given token.
393 | pub fn get_mut(&mut self, token: usize) -> Option<&mut S>
394 | where
395 | S: Unpin,
396 | {
397 | // don't allow access to the 0th task, since it's not a stream
398 | if token == 0 {
399 | return None;
400 | }
401 |
402 | // this is safe for the same reason that IterMut::next is safe
403 | Some(unsafe {
404 | (*(**self.by_id.get_mut(token)?).stream.get())
405 | .as_mut()
406 | .unwrap()
407 | })
408 | }
409 |
410 | /// Returns a pinned reference that allows modifying the stream with the given token.
411 | pub fn get_pin_mut(mut self: Pin<&mut Self>, token: usize) -> Option> {
412 | // don't allow access to the 0th task, since it's not a stream
413 | if token == 0 {
414 | return None;
415 | }
416 |
417 | // this is safe for the same reason that IterPinMut::next is safe
418 | Some(unsafe {
419 | Pin::new_unchecked(
420 | (*(**self.by_id.get_mut(token)?).stream.get())
421 | .as_mut()
422 | .unwrap(),
423 | )
424 | })
425 | }
426 |
427 | /// Returns an iterator that allows modifying each stream in the set.
428 | pub fn iter_mut(&mut self) -> IterMut<'_, S>
429 | where
430 | S: Unpin,
431 | {
432 | IterMut(Pin::new(self).iter_pin_mut_with_token())
433 | }
434 |
435 | /// Returns an iterator that allows modifying each stream in the set.
436 | pub fn iter_mut_with_token(&mut self) -> IterMutWithToken<'_, S>
437 | where
438 | S: Unpin,
439 | {
440 | IterMutWithToken(Pin::new(self).iter_pin_mut_with_token())
441 | }
442 |
443 | /// Returns an iterator that allows modifying each stream in the set.
444 | pub fn iter_pin_mut(self: Pin<&mut Self>) -> IterPinMut<'_, S> {
445 | IterPinMut(self.iter_pin_mut_with_token())
446 | }
447 |
448 | /// Returns an iterator that allows modifying each stream in the set.
449 | pub fn iter_pin_mut_with_token(mut self: Pin<&mut Self>) -> IterPinMutWithToken<'_, S> {
450 | // `head_all` can be accessed directly and we don't need to spin on
451 | // `Task::next_all` since we have exclusive access to the set.
452 | let task = *self.head_all.get_mut();
453 | let len = if task.is_null() {
454 | 0
455 | } else {
456 | unsafe { *(*task).len_all.get() }
457 | };
458 |
459 | IterPinMutWithToken {
460 | task,
461 | len,
462 | _marker: PhantomData,
463 | }
464 | }
465 |
466 | /// Returns an immutable iterator that allows getting a reference to each stream in the set.
467 | pub fn iter_with_token(&self) -> IterWithToken<'_, S> {
468 | let (task, len) = self.atomic_load_head_and_len_all();
469 | IterWithToken {
470 | task,
471 | len,
472 | pending_next_all: self.pending_next_all(),
473 | _marker: PhantomData,
474 | }
475 | }
476 |
477 | /// Returns the current head node and number of streams in the list of all
478 | /// streams within a context where access is shared with other threads
479 | /// (mostly for use with the `len` and `iter_pin_ref` methods).
480 | fn atomic_load_head_and_len_all(&self) -> (*const Task, usize) {
481 | let task = self.head_all.load(Acquire);
482 | let len = if task.is_null() {
483 | 0
484 | } else {
485 | unsafe {
486 | (*task).spin_next_all(self.pending_next_all(), Acquire);
487 | *(*task).len_all.get()
488 | }
489 | };
490 |
491 | (task, len)
492 | }
493 |
494 | /// Releases the task. It destorys the stream inside and either drops
495 | /// the `Arc` or transfers ownership to the ready to run queue.
496 | /// The task this method is called on must have been unlinked before.
497 | fn release_task(&mut self, task: Arc>) {
498 | self.by_id.remove(task.id);
499 |
500 | // `release_task` must only be called on unlinked tasks
501 | debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
502 | unsafe {
503 | debug_assert!((*task.prev_all.get()).is_null());
504 | }
505 |
506 | // The stream is done, try to reset the queued flag. This will prevent
507 | // `wake` from doing any work in the stream
508 | let prev = task.queued.swap(true, SeqCst);
509 |
510 | // Drop the stream, even if it hasn't finished yet. This is safe
511 | // because we're dropping the stream on the thread that owns
512 | // `StreamUnordered`, which correctly tracks `S`'s lifetimes and
513 | // such.
514 | unsafe {
515 | // Set to `None` rather than `take()`ing to prevent moving the
516 | // stream.
517 | *task.stream.get() = None;
518 | }
519 |
520 | // If the queued flag was previously set, then it means that this task
521 | // is still in our internal ready to run queue. We then transfer
522 | // ownership of our reference count to the ready to run queue, and it'll
523 | // come along and free it later, noticing that the stream is `None`.
524 | //
525 | // If, however, the queued flag was *not* set then we're safe to
526 | // release our reference count on the task. The queued flag was set
527 | // above so all stream `enqueue` operations will not actually
528 | // enqueue the task, so our task will never see the ready to run queue
529 | // again. The task itself will be deallocated once all reference counts
530 | // have been dropped elsewhere by the various wakers that contain it.
531 | if prev {
532 | mem::forget(task);
533 | }
534 | }
535 |
536 | /// Insert a new task into the internal linked list.
537 | fn link(&self, task: Arc>) -> *const Task {
538 | // `next_all` should already be reset to the pending state before this
539 | // function is called.
540 | debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
541 | let ptr = Arc::into_raw(task);
542 |
543 | // Atomically swap out the old head node to get the node that should be
544 | // assigned to `next_all`.
545 | let next = self.head_all.swap(ptr as *mut _, AcqRel);
546 |
547 | unsafe {
548 | // Store the new list length in the new node.
549 | let new_len = if next.is_null() {
550 | 1
551 | } else {
552 | // Make sure `next_all` has been written to signal that it is
553 | // safe to read `len_all`.
554 | (*next).spin_next_all(self.pending_next_all(), Acquire);
555 | *(*next).len_all.get() + 1
556 | };
557 | *(*ptr).len_all.get() = new_len;
558 |
559 | // Write the old head as the next node pointer, signaling to other
560 | // threads that `len_all` and `next_all` are ready to read.
561 | (*ptr).next_all.store(next, Release);
562 |
563 | // `prev_all` updates don't need to be synchronized, as the field is
564 | // only ever used after exclusive access has been acquired.
565 | if !next.is_null() {
566 | *(*next).prev_all.get() = ptr;
567 | }
568 | }
569 |
570 | ptr
571 | }
572 |
573 | /// Remove the task from the linked list tracking all tasks currently
574 | /// managed by `StreamUnordered`.
575 | /// This method is unsafe because it has be guaranteed that `task` is a
576 | /// valid pointer.
577 | unsafe fn unlink(&mut self, task: *const Task) -> Arc> {
578 | // Compute the new list length now in case we're removing the head node
579 | // and won't be able to retrieve the correct length later.
580 | let head = *self.head_all.get_mut();
581 | debug_assert!(!head.is_null());
582 | let new_len = *(*head).len_all.get() - 1;
583 |
584 | let task = Arc::from_raw(task);
585 | let next = task.next_all.load(Relaxed);
586 | let prev = *task.prev_all.get();
587 | task.next_all.store(self.pending_next_all(), Relaxed);
588 | *task.prev_all.get() = ptr::null_mut();
589 |
590 | if !next.is_null() {
591 | *(*next).prev_all.get() = prev;
592 | }
593 |
594 | if !prev.is_null() {
595 | (*prev).next_all.store(next, Relaxed);
596 | } else {
597 | *self.head_all.get_mut() = next;
598 | }
599 |
600 | // Store the new list length in the head node.
601 | let head = *self.head_all.get_mut();
602 | if !head.is_null() {
603 | *(*head).len_all.get() = new_len;
604 | }
605 |
606 | task
607 | }
608 |
609 | /// Returns the reserved value for `Task::next_all` to indicate a pending
610 | /// assignment from the thread that inserted the task.
611 | ///
612 | /// `StreamUnordered::link` needs to update `Task` pointers in an order
613 | /// that ensures any iterators created on other threads can correctly
614 | /// traverse the entire `Task` list using the chain of `next_all` pointers.
615 | /// This could be solved with a compare-exchange loop that stores the
616 | /// current `head_all` in `next_all` and swaps out `head_all` with the new
617 | /// `Task` pointer if the head hasn't already changed. Under heavy thread
618 | /// contention, this compare-exchange loop could become costly.
619 | ///
620 | /// An alternative is to initialize `next_all` to a reserved pending state
621 | /// first, perform an atomic swap on `head_all`, and finally update
622 | /// `next_all` with the old head node. Iterators will then either see the
623 | /// pending state value or the correct next node pointer, and can reload
624 | /// `next_all` as needed until the correct value is loaded. The number of
625 | /// retries needed (if any) would be small and will always be finite, so
626 | /// this should generally perform better than the compare-exchange loop.
627 | ///
628 | /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
629 | /// this value, so it is safe to use as a reserved value until the correct
630 | /// value can be written.
631 | fn pending_next_all(&self) -> *mut Task {
632 | // The `ReadyToRunQueue` stub is never inserted into the `head_all`
633 | // list, and its pointer value will remain valid for the lifetime of
634 | // this `StreamUnordered`, so we can make use of its value here.
635 | &*self.ready_to_run_queue.stub as *const _ as *mut _
636 | }
637 | }
638 |
639 | impl Index for StreamUnordered {
640 | type Output = S;
641 |
642 | fn index(&self, stream: usize) -> &Self::Output {
643 | self.get(stream).unwrap()
644 | }
645 | }
646 |
647 | impl IndexMut for StreamUnordered
648 | where
649 | S: Unpin,
650 | {
651 | fn index_mut(&mut self, stream: usize) -> &mut Self::Output {
652 | self.get_mut(stream).unwrap()
653 | }
654 | }
655 |
656 | /// An event that occurred for a managed stream.
657 | pub enum StreamYield
658 | where
659 | S: Stream,
660 | {
661 | /// The underlying stream produced an item.
662 | Item(S::Item),
663 | /// The underlying stream has completed.
664 | Finished(FinishedStream),
665 | }
666 |
667 | /// A stream that has yielded all the items it ever will.
668 | ///
669 | /// The underlying stream will only be dropped by explicitly removing it from the associated
670 | /// `StreamUnordered`. This method is marked as `#[must_use]` to ensure that you either remove the
671 | /// stream immediately, or you explicitly ask for it to be kept around for later use.
672 | ///
673 | /// If the `FinishedStream` is dropped, the exhausted stream will not be dropped until the owning
674 | /// `StreamUnordered` is.
675 | #[must_use]
676 | pub struct FinishedStream {
677 | token: usize,
678 | }
679 |
680 | impl FinishedStream {
681 | /// Remove the exhausted stream.
682 | ///
683 | /// See [`StreamUnordered::remove`].
684 | pub fn remove(self, so: Pin<&mut StreamUnordered>) {
685 | so.remove(self.token);
686 | }
687 |
688 | /// Take the exhausted stream.
689 | ///
690 | /// Note that this requires `S: Unpin` since it moves the stream even though it has already
691 | /// been pinned by `StreamUnordered`.
692 | ///
693 | /// See [`StreamUnordered::take`].
694 | pub fn take(self, so: Pin<&mut StreamUnordered>) -> Option
695 | where
696 | S: Unpin,
697 | {
698 | so.take(self.token)
699 | }
700 |
701 | /// Leave the exhausted stream in the `StreamUnordered`.
702 | ///
703 | /// This allows you to continue to access the stream through [`StreamUnordered::get_mut`] and
704 | /// friends should you need to perform further operations on it (e.g., if it is also being used
705 | /// as a `Sink`). Note that the stream will then not be dropped until you explicitly `remove`
706 | /// or `take` it from the `StreamUnordered`.
707 | pub fn keep(self) {}
708 |
709 | /// Return the token associated with the exhausted stream.
710 | pub fn token(self) -> usize {
711 | self.token
712 | }
713 | }
714 |
715 | impl Debug for StreamYield
716 | where
717 | S: Stream,
718 | S::Item: Debug,
719 | {
720 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
721 | match self {
722 | StreamYield::Item(ref i) => f.debug_tuple("StreamYield::Item").field(i).finish(),
723 | StreamYield::Finished(_) => f.debug_tuple("StreamYield::Finished").finish(),
724 | }
725 | }
726 | }
727 |
728 | impl PartialEq for StreamYield
729 | where
730 | S: Stream,
731 | S::Item: PartialEq,
732 | {
733 | fn eq(&self, other: &Self) -> bool {
734 | match (self, other) {
735 | (StreamYield::Item(s), StreamYield::Item(o)) => s == o,
736 | _ => false,
737 | }
738 | }
739 | }
740 |
741 | impl Stream for StreamUnordered {
742 | type Item = (StreamYield, usize);
743 |
744 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll