├── .github └── workflows │ ├── ci.yaml │ ├── master.yaml │ └── pr.yaml ├── .gitignore ├── Cargo.toml ├── FAQ.md ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── RELEASES.md ├── bors.toml ├── ci ├── alt-core │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ └── lib.rs ├── compat-Cargo.lock ├── highlander.sh └── highlander │ ├── Cargo.toml │ └── src │ └── main.rs ├── rayon-core ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── src │ ├── broadcast │ │ ├── mod.rs │ │ └── test.rs │ ├── compile_fail │ │ ├── mod.rs │ │ ├── quicksort_race1.rs │ │ ├── quicksort_race2.rs │ │ ├── quicksort_race3.rs │ │ ├── rc_return.rs │ │ ├── rc_upvar.rs │ │ └── scope_join_bad.rs │ ├── job.rs │ ├── join │ │ ├── mod.rs │ │ └── test.rs │ ├── latch.rs │ ├── lib.rs │ ├── private.rs │ ├── registry.rs │ ├── scope │ │ ├── mod.rs │ │ └── test.rs │ ├── sleep │ │ ├── README.md │ │ ├── counters.rs │ │ └── mod.rs │ ├── spawn │ │ ├── mod.rs │ │ └── test.rs │ ├── test.rs │ ├── thread_pool │ │ ├── mod.rs │ │ └── test.rs │ ├── tlv.rs │ ├── unwind.rs │ └── worker_local.rs └── tests │ ├── double_init_fail.rs │ ├── init_zero_threads.rs │ ├── scope_join.rs │ ├── scoped_threadpool.rs │ ├── simple_panic.rs │ └── stack_overflow_crash.rs ├── rayon-demo ├── Cargo.toml ├── data │ ├── README.md │ └── tsp │ │ ├── README.md │ │ ├── dj10.tsp │ │ ├── dj15.tsp │ │ └── dj38.tsp ├── examples │ ├── README.md │ └── cpu_monitor.rs └── src │ ├── cpu_time │ ├── mod.rs │ ├── unix.rs │ └── win.rs │ ├── factorial │ └── mod.rs │ ├── fibonacci │ └── mod.rs │ ├── find │ └── mod.rs │ ├── join_microbench.rs │ ├── lib.rs │ ├── life │ ├── bench.rs │ └── mod.rs │ ├── main.rs │ ├── map_collect.rs │ ├── matmul │ ├── bench.rs │ └── mod.rs │ ├── mergesort │ ├── bench.rs │ └── mod.rs │ ├── nbody │ ├── bench.rs │ ├── mod.rs │ ├── nbody.rs │ └── visualize.rs │ ├── noop │ └── mod.rs │ ├── pythagoras │ └── mod.rs │ ├── quicksort │ ├── bench.rs │ └── mod.rs │ ├── sieve │ ├── bench.rs │ └── mod.rs │ ├── sort.rs │ ├── str_split.rs │ ├── tsp │ ├── bench.rs │ ├── graph.rs │ ├── mod.rs │ ├── parser.rs │ ├── solver.rs │ ├── step.rs │ ├── tour.rs │ └── weight.rs │ └── vec_collect.rs ├── scripts └── analyze.sh ├── src ├── array.rs ├── collections │ ├── binary_heap.rs │ ├── btree_map.rs │ ├── btree_set.rs │ ├── hash_map.rs │ ├── hash_set.rs │ ├── linked_list.rs │ ├── mod.rs │ └── vec_deque.rs ├── compile_fail │ ├── cannot_collect_filtermap_data.rs │ ├── cannot_zip_filtered_data.rs │ ├── cell_par_iter.rs │ ├── mod.rs │ ├── must_use.rs │ ├── no_send_par_iter.rs │ └── rc_par_iter.rs ├── delegate.rs ├── iter │ ├── chain.rs │ ├── chunks.rs │ ├── cloned.rs │ ├── collect │ │ ├── consumer.rs │ │ ├── mod.rs │ │ └── test.rs │ ├── copied.rs │ ├── empty.rs │ ├── enumerate.rs │ ├── extend.rs │ ├── filter.rs │ ├── filter_map.rs │ ├── find.rs │ ├── find_first_last │ │ ├── mod.rs │ │ └── test.rs │ ├── flat_map.rs │ ├── flat_map_iter.rs │ ├── flatten.rs │ ├── flatten_iter.rs │ ├── fold.rs │ ├── fold_chunks.rs │ ├── fold_chunks_with.rs │ ├── for_each.rs │ ├── from_par_iter.rs │ ├── inspect.rs │ ├── interleave.rs │ ├── interleave_shortest.rs │ ├── intersperse.rs │ ├── len.rs │ ├── map.rs │ ├── map_with.rs │ ├── mod.rs │ ├── multizip.rs │ ├── noop.rs │ ├── once.rs │ ├── panic_fuse.rs │ ├── par_bridge.rs │ ├── plumbing │ │ ├── README.md │ │ └── mod.rs │ ├── positions.rs │ ├── product.rs │ ├── reduce.rs │ ├── repeat.rs │ ├── rev.rs │ ├── skip.rs │ ├── skip_any.rs │ ├── skip_any_while.rs │ ├── splitter.rs │ ├── step_by.rs │ ├── sum.rs │ ├── take.rs │ ├── take_any.rs │ ├── take_any_while.rs │ ├── test.rs │ ├── try_fold.rs │ ├── try_reduce.rs │ ├── try_reduce_with.rs │ ├── unzip.rs │ ├── update.rs │ ├── while_some.rs │ ├── zip.rs │ └── zip_eq.rs ├── lib.rs ├── math.rs ├── option.rs ├── par_either.rs ├── par_indexmap.rs ├── prelude.rs ├── private.rs ├── range.rs ├── range_inclusive.rs ├── result.rs ├── slice │ ├── chunks.rs │ ├── mergesort.rs │ ├── mod.rs │ ├── quicksort.rs │ ├── rchunks.rs │ └── test.rs ├── split_producer.rs ├── str.rs ├── string.rs └── vec.rs └── tests ├── chars.rs ├── clones.rs ├── collect.rs ├── cross-pool.rs ├── debug.rs ├── drain_vec.rs ├── intersperse.rs ├── issue671-unzip.rs ├── issue671.rs ├── iter_panic.rs ├── named-threads.rs ├── octillion.rs ├── par_bridge_recursion.rs ├── producer_split_at.rs ├── sort-panic-safe.rs └── str.rs /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | on: 3 | push: 4 | branches: 5 | - staging 6 | - trying 7 | 8 | jobs: 9 | 10 | check: 11 | name: Check (1.59.0) 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | - uses: dtolnay/rust-toolchain@1.59.0 16 | - run: cp ci/compat-Cargo.lock ./Cargo.lock 17 | - run: cargo check --verbose --locked 18 | 19 | test: 20 | name: Test 21 | runs-on: ${{ matrix.os }} 22 | strategy: 23 | matrix: 24 | os: [ubuntu-latest, windows-latest, macos-latest] 25 | rust: [stable, beta, nightly] 26 | steps: 27 | - uses: actions/checkout@v3 28 | - uses: dtolnay/rust-toolchain@master 29 | with: 30 | toolchain: ${{ matrix.rust }} 31 | - run: cargo build --verbose 32 | - run: cargo test --verbose --package rayon 33 | - run: cargo test --verbose --package rayon-core 34 | - run: ./ci/highlander.sh 35 | 36 | # rayon-demo has huge dependencies, so limit its testing. 37 | # build on stable, test on nightly (because of #[bench]) 38 | demo: 39 | name: Demo 40 | runs-on: ubuntu-latest 41 | strategy: 42 | matrix: 43 | rust: [stable, nightly] 44 | steps: 45 | - uses: actions/checkout@v3 46 | - uses: dtolnay/rust-toolchain@master 47 | with: 48 | toolchain: ${{ matrix.rust }} 49 | - run: cargo build --verbose --package rayon-demo 50 | - run: cargo test --verbose --package rayon-demo 51 | if: matrix.rust == 'nightly' 52 | 53 | i686: 54 | name: Test (ubuntu-latest, stable-i686) 55 | runs-on: ubuntu-latest 56 | steps: 57 | - run: | 58 | sudo apt-get update 59 | sudo apt-get install gcc-multilib 60 | - uses: actions/checkout@v3 61 | - uses: dtolnay/rust-toolchain@master 62 | with: 63 | toolchain: stable-i686-unknown-linux-gnu 64 | - run: cargo build --verbose 65 | - run: cargo test --verbose --package rayon 66 | - run: cargo test --verbose --package rayon-core 67 | 68 | # wasm32-unknown-unknown builds, and even has the runtime fallback for 69 | # unsupported threading, but we don't have an environment to execute in. 70 | # wasm32-wasi can test the fallback by running in wasmtime. 71 | wasm: 72 | name: WebAssembly 73 | runs-on: ubuntu-latest 74 | env: 75 | CARGO_TARGET_WASM32_WASI_RUNNER: /home/runner/.wasmtime/bin/wasmtime 76 | steps: 77 | - uses: actions/checkout@v3 78 | - uses: dtolnay/rust-toolchain@stable 79 | with: 80 | targets: wasm32-unknown-unknown,wasm32-wasi 81 | - run: cargo check --verbose --target wasm32-unknown-unknown 82 | - run: cargo check --verbose --target wasm32-wasi 83 | - run: curl https://wasmtime.dev/install.sh -sSf | bash 84 | - run: cargo test --verbose --target wasm32-wasi --package rayon 85 | - run: cargo test --verbose --target wasm32-wasi --package rayon-core 86 | 87 | fmt: 88 | name: Format 89 | runs-on: ubuntu-latest 90 | steps: 91 | - uses: actions/checkout@v3 92 | - uses: dtolnay/rust-toolchain@1.67.1 93 | with: 94 | components: rustfmt 95 | - run: cargo fmt --all --check 96 | -------------------------------------------------------------------------------- /.github/workflows/master.yaml: -------------------------------------------------------------------------------- 1 | name: master 2 | on: 3 | push: 4 | branches: 5 | - rustc 6 | schedule: 7 | - cron: '0 0 * * 0' # 00:00 Sunday 8 | 9 | jobs: 10 | 11 | test: 12 | name: Test (stable) 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: dtolnay/rust-toolchain@stable 17 | with: 18 | toolchain: stable 19 | profile: minimal 20 | override: true 21 | - run: cargo build --verbose 22 | - run: cargo test --verbose --package rustc-rayon 23 | - run: cargo test --verbose --package rustc-rayon-core 24 | - run: ./ci/highlander.sh 25 | if: github.repository != 'rust-lang/rustc-rayon' 26 | -------------------------------------------------------------------------------- /.github/workflows/pr.yaml: -------------------------------------------------------------------------------- 1 | name: PR 2 | on: pull_request 3 | 4 | # Using 16MB stacks for deep test/debug recursion 5 | env: 6 | RUST_MIN_STACK: 16777216 7 | 8 | jobs: 9 | 10 | check: 11 | name: Check (1.59.0) 12 | runs-on: ubuntu-latest 13 | if: github.repository != 'rust-lang/rustc-rayon' 14 | steps: 15 | - uses: actions/checkout@v3 16 | - uses: dtolnay/rust-toolchain@1.59.0 17 | - run: cp ci/compat-Cargo.lock ./Cargo.lock 18 | - run: cargo check --verbose --locked 19 | 20 | test: 21 | name: Test (stable) 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v3 25 | - uses: dtolnay/rust-toolchain@stable 26 | - run: cargo build --verbose 27 | - run: cargo test --verbose --package rustc-rayon 28 | - run: cargo test --verbose --package rustc-rayon-core 29 | - run: ./ci/highlander.sh 30 | if: github.repository != 'rust-lang/rustc-rayon' 31 | 32 | fmt: 33 | name: Format 34 | runs-on: ubuntu-latest 35 | steps: 36 | - uses: actions/checkout@v3 37 | - uses: dtolnay/rust-toolchain@1.67.1 38 | with: 39 | components: rustfmt 40 | - run: cargo fmt --all --check 41 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target 3 | *~ 4 | TAGS 5 | *.bk 6 | .idea -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rustc-rayon" 3 | version = "0.5.1" 4 | authors = ["Niko Matsakis ", 5 | "Josh Stone "] 6 | description = "Simple work-stealing parallelism for Rust - fork for rustc" 7 | rust-version = "1.59" 8 | edition = "2021" 9 | license = "MIT OR Apache-2.0" 10 | repository = "https://github.com/rust-lang/rustc-rayon" 11 | documentation = "https://docs.rs/rustc-rayon/" 12 | readme = "README.md" 13 | keywords = ["parallel", "thread", "concurrency", "join", "performance"] 14 | categories = ["concurrency"] 15 | exclude = ["/ci/*", "/scripts/*", "/.github/*", "/bors.toml"] 16 | 17 | [workspace] 18 | members = ["rayon-core"] 19 | exclude = ["ci"] 20 | 21 | [lib] 22 | name = "rayon" 23 | 24 | [dependencies] 25 | indexmap = { version = "2", optional = true } 26 | rustc-rayon-core = { version = "0.5", path = "rayon-core" } 27 | 28 | # This is a public dependency! 29 | [dependencies.either] 30 | version = "1.0" 31 | default-features = false 32 | 33 | [dev-dependencies] 34 | rand = "0.8" 35 | rand_xorshift = "0.3" 36 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rustc-rayon 2 | 3 | rustc-rayon is a fork of [the Rayon crate](https://github.com/rayon-rs/rayon/). It adds a few "in progress" features that rustc is using, mostly around deadlock detection. These features are not stable and should not be used by others -- though they may find their way into rayon proper at some point. In general, if you are not rustc, you should be using the real rayon crate, not rustc-rayon. =) 4 | 5 | ## License 6 | 7 | rustc-rayon is a fork of rayon. rayon is distributed under the terms of both the MIT license and the 8 | Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and 9 | [LICENSE-MIT](LICENSE-MIT) for details. Opening a pull request is 10 | assumed to signal agreement with these licensing terms. 11 | -------------------------------------------------------------------------------- /bors.toml: -------------------------------------------------------------------------------- 1 | status = [ 2 | "Check (1.59.0)", 3 | "Test (ubuntu-latest, stable)", 4 | "Test (ubuntu-latest, stable-i686)", 5 | "Test (ubuntu-latest, beta)", 6 | "Test (ubuntu-latest, nightly)", 7 | "Test (windows-latest, stable)", 8 | "Test (windows-latest, beta)", 9 | "Test (windows-latest, nightly)", 10 | "Test (macos-latest, stable)", 11 | "Test (macos-latest, beta)", 12 | "Test (macos-latest, nightly)", 13 | "Demo (stable)", 14 | "Demo (nightly)", 15 | "WebAssembly", 16 | "Format", 17 | ] 18 | -------------------------------------------------------------------------------- /ci/alt-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | edition = "2018" 3 | name = "alt-core" 4 | version = "0.0.0" 5 | authors = ["Josh Stone "] 6 | links = "rayon-core" 7 | build = "build.rs" 8 | publish = false 9 | 10 | [dependencies] 11 | -------------------------------------------------------------------------------- /ci/alt-core/build.rs: -------------------------------------------------------------------------------- 1 | fn main() {} 2 | -------------------------------------------------------------------------------- /ci/alt-core/src/lib.rs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rust-lang/rustc-rayon/5fadf4479edd3be1f66b46f56be61ac92fb6901a/ci/alt-core/src/lib.rs -------------------------------------------------------------------------------- /ci/highlander.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 4 | 5 | echo "INFO: There Can Be Only One!" >&2 6 | 7 | if cargo build --manifest-path "$DIR/highlander/Cargo.toml"; then 8 | echo "ERROR: we built with multiple rayon-core!" >&2 9 | exit 1 10 | fi 11 | 12 | echo "PASS: using multiple rayon-core failed." >&2 13 | -------------------------------------------------------------------------------- /ci/highlander/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Josh Stone "] 3 | edition = "2018" 4 | name = "highlander" 5 | description = "There Can Be Only One" 6 | version = "0.0.0" 7 | publish = false 8 | 9 | [dependencies] 10 | 11 | [dependencies.alt-core] 12 | optional = false 13 | path = "../alt-core" 14 | 15 | [dependencies.rayon-core] 16 | optional = false 17 | path = "../../rayon-core" 18 | -------------------------------------------------------------------------------- /ci/highlander/src/main.rs: -------------------------------------------------------------------------------- 1 | fn main() {} 2 | -------------------------------------------------------------------------------- /rayon-core/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rustc-rayon-core" 3 | version = "0.5.1" 4 | authors = ["Niko Matsakis ", 5 | "Josh Stone "] 6 | description = "Core APIs for Rayon - fork for rustc" 7 | license = "MIT OR Apache-2.0" 8 | repository = "https://github.com/rust-lang/rustc-rayon" 9 | documentation = "https://docs.rs/rustc-rayon-core/" 10 | rust-version = "1.63" 11 | edition = "2021" 12 | readme = "README.md" 13 | keywords = ["parallel", "thread", "concurrency", "join", "performance"] 14 | categories = ["concurrency"] 15 | 16 | [lib] 17 | name = "rayon_core" 18 | 19 | # Some dependencies may not be their latest version, in order to support older rustc. 20 | [dependencies] 21 | crossbeam-deque = "0.8.1" 22 | crossbeam-utils = "0.8.0" 23 | 24 | [dev-dependencies] 25 | rand = "0.9" 26 | rand_xorshift = "0.4" 27 | scoped-tls = "1.0" 28 | 29 | [target.'cfg(unix)'.dev-dependencies] 30 | libc = "0.2" 31 | 32 | [[test]] 33 | name = "stack_overflow_crash" 34 | path = "tests/stack_overflow_crash.rs" 35 | 36 | # NB: having one [[test]] manually defined means we need to declare them all 37 | 38 | [[test]] 39 | name = "double_init_fail" 40 | path = "tests/double_init_fail.rs" 41 | 42 | [[test]] 43 | name = "init_zero_threads" 44 | path = "tests/init_zero_threads.rs" 45 | 46 | [[test]] 47 | name = "scope_join" 48 | path = "tests/scope_join.rs" 49 | 50 | [[test]] 51 | name = "simple_panic" 52 | path = "tests/simple_panic.rs" 53 | 54 | [[test]] 55 | name = "scoped_threadpool" 56 | path = "tests/scoped_threadpool.rs" 57 | -------------------------------------------------------------------------------- /rayon-core/LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010 The Rust Project Developers 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /rayon-core/README.md: -------------------------------------------------------------------------------- 1 | Note: This is an unstable fork made for use in rustc 2 | 3 | Rayon-core represents the "core, stable" APIs of Rayon: join, scope, and so forth, as well as the ability to create custom thread-pools with ThreadPool. 4 | 5 | Maybe worth mentioning: users are not necessarily intended to directly access rayon-core; all its APIs are mirrored in the rayon crate. To that end, the examples in the docs use rayon::join and so forth rather than rayon_core::join. 6 | 7 | rayon-core aims to never, or almost never, have a breaking change to its API, because each revision of rayon-core also houses the global thread-pool (and hence if you have two simultaneous versions of rayon-core, you have two thread-pools). 8 | 9 | Please see [Rayon Docs] for details about using Rayon. 10 | 11 | [Rayon Docs]: https://docs.rs/rayon/ 12 | 13 | Rayon-core currently requires `rustc 1.63.0` or greater. 14 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/mod.rs: -------------------------------------------------------------------------------- 1 | // These modules contain `compile_fail` doc tests. 2 | mod quicksort_race1; 3 | mod quicksort_race2; 4 | mod quicksort_race3; 5 | mod rc_return; 6 | mod rc_upvar; 7 | mod scope_join_bad; 8 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/quicksort_race1.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0524 2 | 3 | fn quick_sort(v: &mut [T]) { 4 | if v.len() <= 1 { 5 | return; 6 | } 7 | 8 | let mid = partition(v); 9 | let (lo, _hi) = v.split_at_mut(mid); 10 | rayon_core::join(|| quick_sort(lo), || quick_sort(lo)); //~ ERROR 11 | } 12 | 13 | fn partition(v: &mut [T]) -> usize { 14 | let pivot = v.len() - 1; 15 | let mut i = 0; 16 | for j in 0..pivot { 17 | if v[j] <= v[pivot] { 18 | v.swap(i, j); 19 | i += 1; 20 | } 21 | } 22 | v.swap(i, pivot); 23 | i 24 | } 25 | 26 | fn main() { } 27 | 28 | ``` */ 29 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/quicksort_race2.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0500 2 | 3 | fn quick_sort(v: &mut [T]) { 4 | if v.len() <= 1 { 5 | return; 6 | } 7 | 8 | let mid = partition(v); 9 | let (lo, _hi) = v.split_at_mut(mid); 10 | rayon_core::join(|| quick_sort(lo), || quick_sort(v)); //~ ERROR 11 | } 12 | 13 | fn partition(v: &mut [T]) -> usize { 14 | let pivot = v.len() - 1; 15 | let mut i = 0; 16 | for j in 0..pivot { 17 | if v[j] <= v[pivot] { 18 | v.swap(i, j); 19 | i += 1; 20 | } 21 | } 22 | v.swap(i, pivot); 23 | i 24 | } 25 | 26 | fn main() { } 27 | 28 | ``` */ 29 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/quicksort_race3.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0524 2 | 3 | fn quick_sort(v: &mut [T]) { 4 | if v.len() <= 1 { 5 | return; 6 | } 7 | 8 | let mid = partition(v); 9 | let (_lo, hi) = v.split_at_mut(mid); 10 | rayon_core::join(|| quick_sort(hi), || quick_sort(hi)); //~ ERROR 11 | } 12 | 13 | fn partition(v: &mut [T]) -> usize { 14 | let pivot = v.len() - 1; 15 | let mut i = 0; 16 | for j in 0..pivot { 17 | if v[j] <= v[pivot] { 18 | v.swap(i, j); 19 | i += 1; 20 | } 21 | } 22 | v.swap(i, pivot); 23 | i 24 | } 25 | 26 | fn main() { } 27 | 28 | ``` */ 29 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/rc_return.rs: -------------------------------------------------------------------------------- 1 | /** ```compile_fail,E0277 2 | 3 | use std::rc::Rc; 4 | 5 | rayon_core::join(|| Rc::new(22), || ()); //~ ERROR 6 | 7 | ``` */ 8 | mod left {} 9 | 10 | /** ```compile_fail,E0277 11 | 12 | use std::rc::Rc; 13 | 14 | rayon_core::join(|| (), || Rc::new(23)); //~ ERROR 15 | 16 | ``` */ 17 | mod right {} 18 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/rc_upvar.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0277 2 | 3 | use std::rc::Rc; 4 | 5 | let r = Rc::new(22); 6 | rayon_core::join(|| r.clone(), || r.clone()); 7 | //~^ ERROR 8 | 9 | ``` */ 10 | -------------------------------------------------------------------------------- /rayon-core/src/compile_fail/scope_join_bad.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0373 2 | 3 | fn bad_scope(f: F) 4 | where F: FnOnce(&i32) + Send, 5 | { 6 | rayon_core::scope(|s| { 7 | let x = 22; 8 | s.spawn(|_| f(&x)); //~ ERROR `x` does not live long enough 9 | }); 10 | } 11 | 12 | fn good_scope(f: F) 13 | where F: FnOnce(&i32) + Send, 14 | { 15 | let x = 22; 16 | rayon_core::scope(|s| { 17 | s.spawn(|_| f(&x)); 18 | }); 19 | } 20 | 21 | fn main() { 22 | } 23 | 24 | ``` */ 25 | -------------------------------------------------------------------------------- /rayon-core/src/private.rs: -------------------------------------------------------------------------------- 1 | //! The public parts of this private module are used to create traits 2 | //! that cannot be implemented outside of our own crate. This way we 3 | //! can feel free to extend those traits without worrying about it 4 | //! being a breaking change for other implementations. 5 | 6 | /// If this type is pub but not publicly reachable, third parties 7 | /// can't name it and can't implement traits using it. 8 | #[allow(missing_debug_implementations)] 9 | pub struct PrivateMarker; 10 | 11 | macro_rules! private_decl { 12 | () => { 13 | /// This trait is private; this method exists to make it 14 | /// impossible to implement outside the crate. 15 | #[doc(hidden)] 16 | fn __rayon_private__(&self) -> crate::private::PrivateMarker; 17 | }; 18 | } 19 | 20 | macro_rules! private_impl { 21 | () => { 22 | fn __rayon_private__(&self) -> crate::private::PrivateMarker { 23 | crate::private::PrivateMarker 24 | } 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /rayon-core/src/tlv.rs: -------------------------------------------------------------------------------- 1 | //! Allows access to the Rayon's thread local value 2 | //! which is preserved when moving jobs across threads 3 | 4 | use std::{cell::Cell, ptr}; 5 | 6 | thread_local!(pub static TLV: Cell<*const ()> = const { Cell::new(ptr::null()) }); 7 | 8 | #[derive(Copy, Clone)] 9 | pub(crate) struct Tlv(pub(crate) *const ()); 10 | 11 | impl Tlv { 12 | #[inline] 13 | pub(crate) fn null() -> Self { 14 | Self(ptr::null()) 15 | } 16 | } 17 | 18 | unsafe impl Sync for Tlv {} 19 | unsafe impl Send for Tlv {} 20 | 21 | /// Sets the current thread-local value 22 | #[inline] 23 | pub(crate) fn set(value: Tlv) { 24 | TLV.with(|tlv| tlv.set(value.0)); 25 | } 26 | 27 | /// Returns the current thread-local value 28 | #[inline] 29 | pub(crate) fn get() -> Tlv { 30 | TLV.with(|tlv| Tlv(tlv.get())) 31 | } 32 | -------------------------------------------------------------------------------- /rayon-core/src/unwind.rs: -------------------------------------------------------------------------------- 1 | //! Package up unwind recovery. Note that if you are in some sensitive 2 | //! place, you can use the `AbortIfPanic` helper to protect against 3 | //! accidental panics in the rayon code itself. 4 | 5 | use std::any::Any; 6 | use std::panic::{self, AssertUnwindSafe}; 7 | use std::thread; 8 | 9 | /// Executes `f` and captures any panic, translating that panic into a 10 | /// `Err` result. The assumption is that any panic will be propagated 11 | /// later with `resume_unwinding`, and hence `f` can be treated as 12 | /// exception safe. 13 | pub(super) fn halt_unwinding(func: F) -> thread::Result 14 | where 15 | F: FnOnce() -> R, 16 | { 17 | panic::catch_unwind(AssertUnwindSafe(func)) 18 | } 19 | 20 | pub(super) fn resume_unwinding(payload: Box) -> ! { 21 | panic::resume_unwind(payload) 22 | } 23 | 24 | pub(super) struct AbortIfPanic; 25 | 26 | impl Drop for AbortIfPanic { 27 | fn drop(&mut self) { 28 | eprintln!("Rayon: detected unexpected panic; aborting"); 29 | ::std::process::abort(); 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /rayon-core/src/worker_local.rs: -------------------------------------------------------------------------------- 1 | use crate::registry::{Registry, WorkerThread}; 2 | use std::fmt; 3 | use std::ops::Deref; 4 | use std::sync::Arc; 5 | 6 | #[repr(align(64))] 7 | #[derive(Debug)] 8 | struct CacheAligned(T); 9 | 10 | /// Holds worker-locals values for each thread in a thread pool. 11 | /// You can only access the worker local value through the Deref impl 12 | /// on the thread pool it was constructed on. It will panic otherwise 13 | pub struct WorkerLocal { 14 | locals: Vec>, 15 | registry: Arc, 16 | } 17 | 18 | /// We prevent concurrent access to the underlying value in the 19 | /// Deref impl, thus any values safe to send across threads can 20 | /// be used with WorkerLocal. 21 | unsafe impl Sync for WorkerLocal {} 22 | 23 | impl WorkerLocal { 24 | /// Creates a new worker local where the `initial` closure computes the 25 | /// value this worker local should take for each thread in the thread pool. 26 | #[inline] 27 | pub fn new T>(mut initial: F) -> WorkerLocal { 28 | let registry = Registry::current(); 29 | WorkerLocal { 30 | locals: (0..registry.num_threads()) 31 | .map(|i| CacheAligned(initial(i))) 32 | .collect(), 33 | registry, 34 | } 35 | } 36 | 37 | /// Returns the worker-local value for each thread 38 | #[inline] 39 | pub fn into_inner(self) -> Vec { 40 | self.locals.into_iter().map(|c| c.0).collect() 41 | } 42 | 43 | fn current(&self) -> &T { 44 | unsafe { 45 | let worker_thread = WorkerThread::current(); 46 | if worker_thread.is_null() 47 | || &*(*worker_thread).registry as *const _ != &*self.registry as *const _ 48 | { 49 | panic!("WorkerLocal can only be used on the thread pool it was created on") 50 | } 51 | &self.locals[(*worker_thread).index].0 52 | } 53 | } 54 | } 55 | 56 | impl WorkerLocal> { 57 | /// Joins the elements of all the worker locals into one Vec 58 | pub fn join(self) -> Vec { 59 | self.into_inner().into_iter().flat_map(|v| v).collect() 60 | } 61 | } 62 | 63 | impl fmt::Debug for WorkerLocal { 64 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 65 | f.debug_struct("WorkerLocal") 66 | .field("registry", &self.registry.id()) 67 | .finish() 68 | } 69 | } 70 | 71 | impl Deref for WorkerLocal { 72 | type Target = T; 73 | 74 | #[inline(always)] 75 | fn deref(&self) -> &T { 76 | self.current() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /rayon-core/tests/double_init_fail.rs: -------------------------------------------------------------------------------- 1 | use rayon_core::ThreadPoolBuilder; 2 | use std::error::Error; 3 | 4 | #[test] 5 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 6 | fn double_init_fail() { 7 | let result1 = ThreadPoolBuilder::new().build_global(); 8 | assert!(result1.is_ok()); 9 | let err = ThreadPoolBuilder::new().build_global().unwrap_err(); 10 | assert!(err.source().is_none()); 11 | assert_eq!( 12 | err.to_string(), 13 | "The global thread pool has already been initialized.", 14 | ); 15 | } 16 | -------------------------------------------------------------------------------- /rayon-core/tests/init_zero_threads.rs: -------------------------------------------------------------------------------- 1 | use rayon_core::ThreadPoolBuilder; 2 | 3 | #[test] 4 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 5 | fn init_zero_threads() { 6 | ThreadPoolBuilder::new() 7 | .num_threads(0) 8 | .build_global() 9 | .unwrap(); 10 | } 11 | -------------------------------------------------------------------------------- /rayon-core/tests/scope_join.rs: -------------------------------------------------------------------------------- 1 | /// Test that one can emulate join with `scope`: 2 | fn pseudo_join(f: F, g: G) 3 | where 4 | F: FnOnce() + Send, 5 | G: FnOnce() + Send, 6 | { 7 | rayon_core::scope(|s| { 8 | s.spawn(|_| g()); 9 | f(); 10 | }); 11 | } 12 | 13 | fn quick_sort(v: &mut [T]) { 14 | if v.len() <= 1 { 15 | return; 16 | } 17 | 18 | let mid = partition(v); 19 | let (lo, hi) = v.split_at_mut(mid); 20 | pseudo_join(|| quick_sort(lo), || quick_sort(hi)); 21 | } 22 | 23 | fn partition(v: &mut [T]) -> usize { 24 | let pivot = v.len() - 1; 25 | let mut i = 0; 26 | for j in 0..pivot { 27 | if v[j] <= v[pivot] { 28 | v.swap(i, j); 29 | i += 1; 30 | } 31 | } 32 | v.swap(i, pivot); 33 | i 34 | } 35 | 36 | fn is_sorted(v: &[T]) -> bool { 37 | (1..v.len()).all(|i| v[i - 1] <= v[i]) 38 | } 39 | 40 | #[test] 41 | fn scope_join() { 42 | let mut v: Vec = (0..256).rev().collect(); 43 | quick_sort(&mut v); 44 | assert!(is_sorted(&v)); 45 | } 46 | -------------------------------------------------------------------------------- /rayon-core/tests/scoped_threadpool.rs: -------------------------------------------------------------------------------- 1 | use crossbeam_utils::thread; 2 | use rayon_core::ThreadPoolBuilder; 3 | 4 | #[derive(PartialEq, Eq, Debug)] 5 | struct Local(i32); 6 | 7 | scoped_tls::scoped_thread_local!(static LOCAL: Local); 8 | 9 | #[test] 10 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 11 | fn missing_scoped_tls() { 12 | LOCAL.set(&Local(42), || { 13 | let pool = ThreadPoolBuilder::new() 14 | .build() 15 | .expect("thread pool created"); 16 | 17 | // `LOCAL` is not set in the pool. 18 | pool.install(|| { 19 | assert!(!LOCAL.is_set()); 20 | }); 21 | }); 22 | } 23 | 24 | #[test] 25 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 26 | fn spawn_scoped_tls_threadpool() { 27 | LOCAL.set(&Local(42), || { 28 | LOCAL.with(|x| { 29 | thread::scope(|scope| { 30 | let pool = ThreadPoolBuilder::new() 31 | .spawn_handler(move |thread| { 32 | scope 33 | .builder() 34 | .spawn(move |_| { 35 | // Borrow the same local value in the thread pool. 36 | LOCAL.set(x, || thread.run()) 37 | }) 38 | .map(|_| ()) 39 | }) 40 | .build() 41 | .expect("thread pool created"); 42 | 43 | // The pool matches our local value. 44 | pool.install(|| { 45 | assert!(LOCAL.is_set()); 46 | LOCAL.with(|y| { 47 | assert_eq!(x, y); 48 | }); 49 | }); 50 | 51 | // If we change our local value, the pool is not affected. 52 | LOCAL.set(&Local(-1), || { 53 | pool.install(|| { 54 | assert!(LOCAL.is_set()); 55 | LOCAL.with(|y| { 56 | assert_eq!(x, y); 57 | }); 58 | }); 59 | }); 60 | }) 61 | .expect("scope threads ok"); 62 | // `thread::scope` will wait for the threads to exit before returning. 63 | }); 64 | }); 65 | } 66 | 67 | #[test] 68 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 69 | fn build_scoped_tls_threadpool() { 70 | LOCAL.set(&Local(42), || { 71 | LOCAL.with(|x| { 72 | ThreadPoolBuilder::new() 73 | .build_scoped( 74 | move |thread| LOCAL.set(x, || thread.run()), 75 | |pool| { 76 | // The pool matches our local value. 77 | pool.install(|| { 78 | assert!(LOCAL.is_set()); 79 | LOCAL.with(|y| { 80 | assert_eq!(x, y); 81 | }); 82 | }); 83 | 84 | // If we change our local value, the pool is not affected. 85 | LOCAL.set(&Local(-1), || { 86 | pool.install(|| { 87 | assert!(LOCAL.is_set()); 88 | LOCAL.with(|y| { 89 | assert_eq!(x, y); 90 | }); 91 | }); 92 | }); 93 | }, 94 | ) 95 | .expect("thread pool created"); 96 | // Internally, `std::thread::scope` will wait for the threads to exit before returning. 97 | }); 98 | }); 99 | } 100 | -------------------------------------------------------------------------------- /rayon-core/tests/simple_panic.rs: -------------------------------------------------------------------------------- 1 | use rayon_core::join; 2 | 3 | #[test] 4 | #[should_panic(expected = "should panic")] 5 | fn simple_panic() { 6 | join(|| {}, || panic!("should panic")); 7 | } 8 | -------------------------------------------------------------------------------- /rayon-core/tests/stack_overflow_crash.rs: -------------------------------------------------------------------------------- 1 | use rayon_core::ThreadPoolBuilder; 2 | 3 | use std::env; 4 | use std::process::{Command, ExitStatus, Stdio}; 5 | 6 | #[cfg(target_os = "linux")] 7 | use std::os::unix::process::ExitStatusExt; 8 | 9 | fn force_stack_overflow(depth: u32) { 10 | let mut buffer = [0u8; 1024 * 1024]; 11 | #[allow(clippy::incompatible_msrv)] 12 | std::hint::black_box(&mut buffer); 13 | if depth > 0 { 14 | force_stack_overflow(depth - 1); 15 | } 16 | } 17 | 18 | #[cfg(unix)] 19 | fn disable_core() { 20 | unsafe { 21 | libc::setrlimit( 22 | libc::RLIMIT_CORE, 23 | &libc::rlimit { 24 | rlim_cur: 0, 25 | rlim_max: 0, 26 | }, 27 | ); 28 | } 29 | } 30 | 31 | #[cfg(unix)] 32 | fn overflow_code() -> Option { 33 | None 34 | } 35 | 36 | #[cfg(windows)] 37 | fn overflow_code() -> Option { 38 | use std::os::windows::process::ExitStatusExt; 39 | 40 | ExitStatus::from_raw(0xc00000fd /*STATUS_STACK_OVERFLOW*/).code() 41 | } 42 | 43 | #[test] 44 | #[cfg_attr(not(any(unix, windows)), ignore)] 45 | fn stack_overflow_crash() { 46 | // First check that the recursive call actually causes a stack overflow, 47 | // and does not get optimized away. 48 | let status = run_ignored("run_with_small_stack"); 49 | assert!(!status.success()); 50 | #[cfg(any(unix, windows))] 51 | assert_eq!(status.code(), overflow_code()); 52 | #[cfg(target_os = "linux")] 53 | assert!(matches!( 54 | status.signal(), 55 | Some(libc::SIGABRT | libc::SIGSEGV) 56 | )); 57 | 58 | // Now run with a larger stack and verify correct operation. 59 | let status = run_ignored("run_with_large_stack"); 60 | assert_eq!(status.code(), Some(0)); 61 | #[cfg(target_os = "linux")] 62 | assert_eq!(status.signal(), None); 63 | } 64 | 65 | fn run_ignored(test: &str) -> ExitStatus { 66 | Command::new(env::current_exe().unwrap()) 67 | .arg("--ignored") 68 | .arg("--exact") 69 | .arg(test) 70 | .stdout(Stdio::null()) 71 | .stderr(Stdio::null()) 72 | .status() 73 | .unwrap() 74 | } 75 | 76 | #[test] 77 | #[ignore] 78 | fn run_with_small_stack() { 79 | run_with_stack(8); 80 | } 81 | 82 | #[test] 83 | #[ignore] 84 | fn run_with_large_stack() { 85 | run_with_stack(48); 86 | } 87 | 88 | fn run_with_stack(stack_size_in_mb: usize) { 89 | let pool = ThreadPoolBuilder::new() 90 | .stack_size(stack_size_in_mb * 1024 * 1024) 91 | .build() 92 | .unwrap(); 93 | pool.install(|| { 94 | #[cfg(unix)] 95 | disable_core(); 96 | force_stack_overflow(32); 97 | }); 98 | } 99 | -------------------------------------------------------------------------------- /rayon-demo/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | rust-version = "1.59" 3 | edition = "2021" 4 | name = "rayon-demo" 5 | version = "0.0.0" 6 | authors = ["Niko Matsakis "] 7 | publish = false 8 | 9 | [dependencies] 10 | rustc-rayon = { path = "../" } 11 | cgmath = "0.18" 12 | docopt = "1" 13 | fixedbitset = "0.4" 14 | glium = "0.32" 15 | once_cell = "1.17.1" 16 | rand = "0.8" 17 | rand_xorshift = "0.3" 18 | regex = "1" 19 | 20 | [dependencies.serde] 21 | version = "1.0.85" 22 | features = ["derive"] 23 | 24 | [target.'cfg(unix)'.dependencies] 25 | libc = "0.2" 26 | 27 | [target.'cfg(windows)'.dependencies] 28 | winapi = { version = "0.3", features = ["processthreadsapi"] } 29 | 30 | [dev-dependencies] 31 | doc-comment = "0.3" 32 | num = "0.4" 33 | -------------------------------------------------------------------------------- /rayon-demo/data/README.md: -------------------------------------------------------------------------------- 1 | Data for various benchmarks. All in public domain or otherwise freely 2 | licensed. 3 | -------------------------------------------------------------------------------- /rayon-demo/data/tsp/README.md: -------------------------------------------------------------------------------- 1 | Inputs for the Traveling Salesman Problem solver. These are in TSPLIB 2 | format. 3 | 4 | Sources: 5 | 6 | - `dj15.tsp`: derived from `dj38.tsp` 7 | - `dj38.tsp`: 8 | 9 | -------------------------------------------------------------------------------- /rayon-demo/data/tsp/dj10.tsp: -------------------------------------------------------------------------------- 1 | NAME: dj10 2 | COMMENT : 10 locations in Djibouti; chosen from dj38.tsp 3 | TYPE: TSP 4 | DIMENSION: 10 5 | EDGE_WEIGHT_TYPE: EUC_2D 6 | NODE_COORD_SECTION 7 | 1 11003.611100 42102.500000 8 | 2 11108.611100 42373.888900 9 | 3 11133.333300 42885.833300 10 | 4 11155.833300 42712.500000 11 | 5 11183.333300 42933.333300 12 | 6 11297.500000 42853.333300 13 | 7 11310.277800 42929.444400 14 | 8 11416.666700 42983.333300 15 | 9 11423.888900 43000.277800 16 | 10 11438.333300 42057.222200 17 | -------------------------------------------------------------------------------- /rayon-demo/data/tsp/dj15.tsp: -------------------------------------------------------------------------------- 1 | NAME: dj15 2 | COMMENT : 15 locations in Djibouti; chosen from dj38.tsp 3 | TYPE: TSP 4 | DIMENSION: 15 5 | EDGE_WEIGHT_TYPE: EUC_2D 6 | NODE_COORD_SECTION 7 | 1 11003.611100 42102.500000 8 | 2 11108.611100 42373.888900 9 | 3 11133.333300 42885.833300 10 | 4 11155.833300 42712.500000 11 | 5 11183.333300 42933.333300 12 | 6 11297.500000 42853.333300 13 | 7 11310.277800 42929.444400 14 | 8 11416.666700 42983.333300 15 | 9 11423.888900 43000.277800 16 | 10 11438.333300 42057.222200 17 | 11 11461.111100 43252.777800 18 | 12 11485.555600 43187.222200 19 | 13 11503.055600 42855.277800 20 | 14 11511.388900 42106.388900 21 | 15 11522.222200 42841.944400 22 | -------------------------------------------------------------------------------- /rayon-demo/data/tsp/dj38.tsp: -------------------------------------------------------------------------------- 1 | NAME: dj38 2 | COMMENT : 38 locations in Djibouti 3 | COMMENT : Derived from National Imagery and Mapping Agency data 4 | COMMENT : This file is a corrected version of dj89, where duplications 5 | COMMENT: have been removed. Thanks to Jay Muthuswamy and others for 6 | COMMENT: requesting data sets without duplications. 7 | TYPE: TSP 8 | DIMENSION: 38 9 | EDGE_WEIGHT_TYPE: EUC_2D 10 | NODE_COORD_SECTION 11 | 1 11003.611100 42102.500000 12 | 2 11108.611100 42373.888900 13 | 3 11133.333300 42885.833300 14 | 4 11155.833300 42712.500000 15 | 5 11183.333300 42933.333300 16 | 6 11297.500000 42853.333300 17 | 7 11310.277800 42929.444400 18 | 8 11416.666700 42983.333300 19 | 9 11423.888900 43000.277800 20 | 10 11438.333300 42057.222200 21 | 11 11461.111100 43252.777800 22 | 12 11485.555600 43187.222200 23 | 13 11503.055600 42855.277800 24 | 14 11511.388900 42106.388900 25 | 15 11522.222200 42841.944400 26 | 16 11569.444400 43136.666700 27 | 17 11583.333300 43150.000000 28 | 18 11595.000000 43148.055600 29 | 19 11600.000000 43150.000000 30 | 20 11690.555600 42686.666700 31 | 21 11715.833300 41836.111100 32 | 22 11751.111100 42814.444400 33 | 23 11770.277800 42651.944400 34 | 24 11785.277800 42884.444400 35 | 25 11822.777800 42673.611100 36 | 26 11846.944400 42660.555600 37 | 27 11963.055600 43290.555600 38 | 28 11973.055600 43026.111100 39 | 29 12058.333300 42195.555600 40 | 30 12149.444400 42477.500000 41 | 31 12286.944400 43355.555600 42 | 32 12300.000000 42433.333300 43 | 33 12355.833300 43156.388900 44 | 34 12363.333300 43189.166700 45 | 35 12372.777800 42711.388900 46 | 36 12386.666700 43334.722200 47 | 37 12421.666700 42895.555600 48 | 38 12645.000000 42973.333300 49 | -------------------------------------------------------------------------------- /rayon-demo/examples/README.md: -------------------------------------------------------------------------------- 1 | We use this directory for interactive tests that can't be run in an 2 | automatic fashion. For examples of how to use Rayon, or benchmarks, 3 | see `rayon-demo`. 4 | -------------------------------------------------------------------------------- /rayon-demo/examples/cpu_monitor.rs: -------------------------------------------------------------------------------- 1 | use docopt::Docopt; 2 | use std::io; 3 | use std::process; 4 | 5 | const USAGE: &str = " 6 | Usage: cpu_monitor [options] 7 | cpu_monitor --help 8 | 9 | A test for monitoring how much CPU usage Rayon consumes under various 10 | scenarios. This test is intended to be executed interactively, like so: 11 | 12 | cargo run --example cpu_monitor -- tasks_ended 13 | 14 | The list of scenarios you can try are as follows: 15 | 16 | - tasks_ended: after all tasks have finished, go to sleep 17 | - task_stall_root: a root task stalls for a very long time 18 | - task_stall_scope: a task in a scope stalls for a very long time 19 | 20 | Options: 21 | -h, --help Show this message. 22 | -d N, --depth N Control how hard the dummy task works [default: 27] 23 | "; 24 | 25 | #[derive(serde::Deserialize)] 26 | pub struct Args { 27 | arg_scenario: String, 28 | flag_depth: usize, 29 | } 30 | 31 | fn main() { 32 | let args: &Args = &Docopt::new(USAGE) 33 | .and_then(|d| d.deserialize()) 34 | .unwrap_or_else(|e| e.exit()); 35 | 36 | match &args.arg_scenario[..] { 37 | "tasks_ended" => tasks_ended(args), 38 | "task_stall_root" => task_stall_root(args), 39 | "task_stall_scope" => task_stall_scope(args), 40 | _ => { 41 | println!("unknown scenario: `{}`", args.arg_scenario); 42 | println!("try --help"); 43 | process::exit(1); 44 | } 45 | } 46 | } 47 | 48 | fn wait_for_user() { 49 | let mut input = String::new(); 50 | io::stdin().read_line(&mut input).unwrap(); 51 | } 52 | 53 | fn task(args: &Args) { 54 | fn join_recursively(n: usize) { 55 | if n == 0 { 56 | return; 57 | } 58 | rayon::join(|| join_recursively(n - 1), || join_recursively(n - 1)); 59 | } 60 | 61 | println!("Starting heavy work at depth {}...wait.", args.flag_depth); 62 | join_recursively(args.flag_depth); 63 | println!("Heavy work done; check top. You should see CPU usage drop to zero soon."); 64 | println!("Press to quit..."); 65 | } 66 | 67 | fn tasks_ended(args: &Args) { 68 | task(args); 69 | wait_for_user(); 70 | } 71 | 72 | fn task_stall_root(args: &Args) { 73 | rayon::join(|| task(args), wait_for_user); 74 | } 75 | 76 | fn task_stall_scope(args: &Args) { 77 | rayon::scope(|scope| { 78 | scope.spawn(move |_| task(args)); 79 | scope.spawn(move |_| wait_for_user()); 80 | }); 81 | } 82 | -------------------------------------------------------------------------------- /rayon-demo/src/cpu_time/mod.rs: -------------------------------------------------------------------------------- 1 | use std::time::{Duration, Instant}; 2 | 3 | #[cfg(windows)] 4 | mod win; 5 | #[cfg(windows)] 6 | pub use self::win::get_cpu_time; 7 | 8 | #[cfg(unix)] 9 | mod unix; 10 | #[cfg(unix)] 11 | pub use self::unix::get_cpu_time; 12 | 13 | #[cfg(not(any(unix, windows)))] 14 | pub fn get_cpu_time() -> Option { 15 | None 16 | } 17 | 18 | pub fn get_cpu_duration(start: Option, stop: Option) -> Option { 19 | Some(Duration::from_nanos(stop? - start?)) 20 | } 21 | 22 | #[derive(Copy, Clone)] 23 | pub struct CpuMeasure { 24 | /// Real time elapsed 25 | pub time_duration: Duration, 26 | 27 | /// percentage (0-100) of that as cpu time 28 | pub cpu_usage_percent: Option, 29 | } 30 | 31 | pub fn measure_cpu(op: impl FnOnce()) -> CpuMeasure { 32 | let time_start = Instant::now(); 33 | let cpu_start = get_cpu_time(); 34 | 35 | op(); 36 | 37 | let cpu_stop = get_cpu_time(); 38 | let time_duration = time_start.elapsed(); 39 | 40 | CpuMeasure { 41 | time_duration, 42 | cpu_usage_percent: get_cpu_duration(cpu_start, cpu_stop) 43 | .map(|cpu| 100.0 * cpu.as_secs_f64() / time_duration.as_secs_f64()), 44 | } 45 | } 46 | 47 | pub fn print_time(m: CpuMeasure) { 48 | println!(" wallclock: {} ns", m.time_duration.as_nanos()); 49 | if let Some(cpu_usage) = m.cpu_usage_percent { 50 | println!(" cpu usage: {:3.1}%", cpu_usage); 51 | } else { 52 | println!(" cpu usage: N/A"); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /rayon-demo/src/cpu_time/unix.rs: -------------------------------------------------------------------------------- 1 | use libc::{getrusage, RUSAGE_SELF}; 2 | use std::mem::MaybeUninit; 3 | 4 | pub fn get_cpu_time() -> Option { 5 | let usage = unsafe { 6 | let mut usage = MaybeUninit::uninit(); 7 | if getrusage(RUSAGE_SELF, usage.as_mut_ptr()) != 0 { 8 | return None; 9 | } 10 | usage.assume_init() 11 | }; 12 | let user = 13 | 1_000_000_000 * (usage.ru_utime.tv_sec as u64) + 1_000 * (usage.ru_utime.tv_usec as u64); 14 | let system = 15 | 1_000_000_000 * (usage.ru_stime.tv_sec as u64) + 1_000 * (usage.ru_stime.tv_usec as u64); 16 | Some(user + system) 17 | } 18 | -------------------------------------------------------------------------------- /rayon-demo/src/cpu_time/win.rs: -------------------------------------------------------------------------------- 1 | use std::mem::MaybeUninit; 2 | use winapi::um::processthreadsapi::{GetCurrentProcess, GetProcessTimes}; 3 | 4 | pub fn get_cpu_time() -> Option { 5 | let (kernel, user) = unsafe { 6 | let process = GetCurrentProcess(); 7 | let mut _creation = MaybeUninit::uninit(); 8 | let mut _exit = MaybeUninit::uninit(); 9 | let mut kernel = MaybeUninit::uninit(); 10 | let mut user = MaybeUninit::uninit(); 11 | if GetProcessTimes( 12 | process, 13 | _creation.as_mut_ptr(), 14 | _exit.as_mut_ptr(), 15 | kernel.as_mut_ptr(), 16 | user.as_mut_ptr(), 17 | ) == 0 18 | { 19 | return None; 20 | } 21 | (kernel.assume_init(), user.assume_init()) 22 | }; 23 | let kernel = (kernel.dwHighDateTime as u64) << 32 | kernel.dwLowDateTime as u64; 24 | let user = (user.dwHighDateTime as u64) << 32 | user.dwLowDateTime as u64; 25 | Some(100 * (kernel + user)) 26 | } 27 | -------------------------------------------------------------------------------- /rayon-demo/src/factorial/mod.rs: -------------------------------------------------------------------------------- 1 | //! Benchmark Factorial N! = 1×2×⋯×N 2 | 3 | use num::{BigUint, One}; 4 | use rayon::prelude::*; 5 | use std::ops::Mul; 6 | 7 | const N: u32 = 9999; 8 | 9 | /// Compute the Factorial using a plain iterator. 10 | fn factorial(n: u32) -> BigUint { 11 | (1..=n).map(BigUint::from).fold(BigUint::one(), Mul::mul) 12 | } 13 | 14 | #[bench] 15 | /// Benchmark the Factorial using a plain iterator. 16 | fn factorial_iterator(b: &mut test::Bencher) { 17 | let f = factorial(N); 18 | b.iter(|| assert_eq!(factorial(test::black_box(N)), f)); 19 | } 20 | 21 | #[bench] 22 | /// Compute the Factorial using rayon::par_iter. 23 | fn factorial_par_iter(b: &mut test::Bencher) { 24 | fn fact(n: u32) -> BigUint { 25 | (1..n + 1) 26 | .into_par_iter() 27 | .map(BigUint::from) 28 | .reduce_with(Mul::mul) 29 | .unwrap() 30 | } 31 | 32 | let f = factorial(N); 33 | b.iter(|| assert_eq!(fact(test::black_box(N)), f)); 34 | } 35 | 36 | #[bench] 37 | /// Compute the Factorial using rayon::fold_with. 38 | fn factorial_fold_with(b: &mut test::Bencher) { 39 | fn fact(n: u32) -> BigUint { 40 | (1..n + 1) 41 | .into_par_iter() 42 | .with_min_len(64) // for fair comparison with factorial_fold_chunks_with() 43 | .fold_with(BigUint::from(1_u32), |acc, x| acc.mul(x)) 44 | .reduce_with(Mul::mul) 45 | .unwrap() 46 | } 47 | 48 | let f = factorial(N); 49 | b.iter(|| assert_eq!(fact(test::black_box(N)), f)); 50 | } 51 | 52 | #[bench] 53 | /// Compute the Factorial using rayon::fold_chunks_with. 54 | fn factorial_fold_chunks_with(b: &mut test::Bencher) { 55 | fn fact(n: u32) -> BigUint { 56 | (1..n + 1) 57 | .into_par_iter() 58 | .fold_chunks_with(64, BigUint::from(1_u32), |acc, x| acc.mul(x)) 59 | .reduce_with(Mul::mul) 60 | .unwrap() 61 | } 62 | 63 | let f = factorial(N); 64 | b.iter(|| assert_eq!(fact(test::black_box(N)), f)); 65 | } 66 | 67 | #[bench] 68 | /// Compute the Factorial using divide-and-conquer serial recursion. 69 | fn factorial_recursion(b: &mut test::Bencher) { 70 | fn product(a: u32, b: u32) -> BigUint { 71 | if a == b { 72 | return a.into(); 73 | } 74 | let mid = (a + b) / 2; 75 | product(a, mid) * product(mid + 1, b) 76 | } 77 | 78 | let f = factorial(N); 79 | b.iter(|| assert_eq!(product(1, test::black_box(N)), f)); 80 | } 81 | 82 | #[bench] 83 | /// Compute the Factorial using divide-and-conquer parallel join. 84 | fn factorial_join(b: &mut test::Bencher) { 85 | fn product(a: u32, b: u32) -> BigUint { 86 | if a == b { 87 | return a.into(); 88 | } 89 | let mid = (a + b) / 2; 90 | let (x, y) = rayon::join(|| product(a, mid), || product(mid + 1, b)); 91 | x * y 92 | } 93 | 94 | let f = factorial(N); 95 | b.iter(|| assert_eq!(product(1, test::black_box(N)), f)); 96 | } 97 | -------------------------------------------------------------------------------- /rayon-demo/src/fibonacci/mod.rs: -------------------------------------------------------------------------------- 1 | //! Benchmark Fibonacci numbers, F(n) = F(n-1) + F(n-2) 2 | //! 3 | //! Recursion is a horrible way to compute this -- roughly O(2ⁿ). 4 | //! 5 | //! It's potentially interesting for rayon::join, because the splits are 6 | //! unequal. F(n-1) has roughly twice as much work to do as F(n-2). The 7 | //! imbalance might make it more likely to leave idle threads ready to steal 8 | //! jobs. We can also see if there's any effect to having the larger job first 9 | //! or second. 10 | //! 11 | //! We're doing very little real work in each job, so the rayon overhead is 12 | //! going to dominate. The serial recursive version will likely be faster, 13 | //! unless you have a whole lot of CPUs. The iterative version reveals the 14 | //! joke. 15 | 16 | const N: u32 = 32; 17 | const FN: u32 = 2_178_309; 18 | 19 | fn fib_iterative(n: u32) -> u32 { 20 | let mut a = 0; 21 | let mut b = 1; 22 | for _ in 0..n { 23 | let c = a + b; 24 | a = b; 25 | b = c; 26 | } 27 | a 28 | } 29 | 30 | fn fib_recursive(n: u32) -> u32 { 31 | if n < 2 { 32 | return n; 33 | } 34 | 35 | fib_recursive(n - 1) + fib_recursive(n - 2) 36 | } 37 | 38 | #[bench] 39 | /// Compute the Fibonacci number recursively, without any parallelism. 40 | fn fibonacci_recursive(b: &mut test::Bencher) { 41 | b.iter(|| assert_eq!(fib_recursive(test::black_box(N)), FN)); 42 | } 43 | 44 | #[bench] 45 | /// Compute the Fibonacci number recursively, using rayon::join. 46 | /// The larger branch F(N-1) is computed first. 47 | fn fibonacci_join_1_2(b: &mut test::Bencher) { 48 | fn fib(n: u32) -> u32 { 49 | if n < 2 { 50 | return n; 51 | } 52 | 53 | let (a, b) = rayon::join(|| fib(n - 1), || fib(n - 2)); 54 | a + b 55 | } 56 | 57 | b.iter(|| assert_eq!(fib(test::black_box(N)), FN)); 58 | } 59 | 60 | #[bench] 61 | /// Compute the Fibonacci number recursively, using rayon::join. 62 | /// The smaller branch F(N-2) is computed first. 63 | fn fibonacci_join_2_1(b: &mut test::Bencher) { 64 | fn fib(n: u32) -> u32 { 65 | if n < 2 { 66 | return n; 67 | } 68 | 69 | let (a, b) = rayon::join(|| fib(n - 2), || fib(n - 1)); 70 | a + b 71 | } 72 | 73 | b.iter(|| assert_eq!(fib(test::black_box(N)), FN)); 74 | } 75 | 76 | #[bench] 77 | /// Compute the Fibonacci number recursively, using rayon::iter::split to parallelize. 78 | fn fibonacci_split_recursive(b: &mut test::Bencher) { 79 | fn fib(n: u32) -> u32 { 80 | use rayon::iter::ParallelIterator; 81 | 82 | rayon::iter::split(n, |n| { 83 | if n < 2 { 84 | (n, None) 85 | } else { 86 | (n - 2, Some(n - 1)) 87 | } 88 | }) 89 | .map(fib_recursive) 90 | .sum() 91 | } 92 | 93 | b.iter(|| assert_eq!(fib(test::black_box(N)), FN)); 94 | } 95 | 96 | #[bench] 97 | /// Compute the Fibonacci number iteratively, using rayon::iter::split to parallelize. 98 | fn fibonacci_split_iterative(b: &mut test::Bencher) { 99 | fn fib(n: u32) -> u32 { 100 | use rayon::iter::ParallelIterator; 101 | 102 | rayon::iter::split(n, |n| { 103 | if n < 2 { 104 | (n, None) 105 | } else { 106 | (n - 2, Some(n - 1)) 107 | } 108 | }) 109 | .map(fib_iterative) 110 | .sum() 111 | } 112 | 113 | b.iter(|| assert_eq!(fib(test::black_box(N)), FN)); 114 | } 115 | 116 | #[bench] 117 | /// Compute the Fibonacci number iteratively, just to show how silly the others 118 | /// are. Parallelism can't make up for a bad choice of algorithm. 119 | fn fibonacci_iterative(b: &mut test::Bencher) { 120 | b.iter(|| assert_eq!(fib_iterative(test::black_box(N)), FN)); 121 | } 122 | -------------------------------------------------------------------------------- /rayon-demo/src/find/mod.rs: -------------------------------------------------------------------------------- 1 | /// Simple benchmarks of `find_any()` performance 2 | 3 | macro_rules! make_tests { 4 | ($n:expr, $m:ident) => { 5 | mod $m { 6 | use once_cell::sync::Lazy; 7 | use rand::distributions::Standard; 8 | use rand::Rng; 9 | use rayon::prelude::*; 10 | use test::Bencher; 11 | 12 | static HAYSTACK: Lazy> = Lazy::new(|| { 13 | let rng = crate::seeded_rng(); 14 | rng.sample_iter(&Standard) 15 | .map(|x| { 16 | let mut result: [u32; $n] = [0; $n]; 17 | result[0] = x; 18 | result 19 | }) 20 | .take(10_000_000) 21 | .collect() 22 | }); 23 | 24 | #[bench] 25 | fn parallel_find_first(b: &mut Bencher) { 26 | let needle = HAYSTACK[0][0]; 27 | b.iter(|| assert!(HAYSTACK.par_iter().find_any(|&&x| x[0] == needle).is_some())); 28 | } 29 | 30 | #[bench] 31 | fn serial_find_first(b: &mut Bencher) { 32 | let needle = HAYSTACK[0][0]; 33 | b.iter(|| assert!(HAYSTACK.iter().find(|&&x| x[0] == needle).is_some())); 34 | } 35 | 36 | #[bench] 37 | fn parallel_find_last(b: &mut Bencher) { 38 | let needle = HAYSTACK[HAYSTACK.len() - 1][0]; 39 | b.iter(|| assert!(HAYSTACK.par_iter().find_any(|&&x| x[0] == needle).is_some())); 40 | } 41 | 42 | #[bench] 43 | fn serial_find_last(b: &mut Bencher) { 44 | let needle = HAYSTACK[HAYSTACK.len() - 1][0]; 45 | b.iter(|| assert!(HAYSTACK.iter().find(|&&x| x[0] == needle).is_some())); 46 | } 47 | 48 | #[bench] 49 | fn parallel_find_middle(b: &mut Bencher) { 50 | let needle = HAYSTACK[HAYSTACK.len() / 3 * 2][0]; 51 | b.iter(|| assert!(HAYSTACK.par_iter().find_any(|&&x| x[0] == needle).is_some())); 52 | } 53 | 54 | #[bench] 55 | fn serial_find_middle(b: &mut Bencher) { 56 | let needle = HAYSTACK[HAYSTACK.len() / 3 * 2][0]; 57 | b.iter(|| assert!(HAYSTACK.iter().find(|&&x| x[0] == needle).is_some())); 58 | } 59 | 60 | #[bench] 61 | fn parallel_find_missing(b: &mut Bencher) { 62 | let needle = HAYSTACK.iter().map(|v| v[0]).max().unwrap() + 1; 63 | b.iter(|| assert!(HAYSTACK.par_iter().find_any(|&&x| x[0] == needle).is_none())); 64 | } 65 | 66 | #[bench] 67 | fn serial_find_missing(b: &mut Bencher) { 68 | let needle = HAYSTACK.iter().map(|v| v[0]).max().unwrap() + 1; 69 | b.iter(|| assert!(HAYSTACK.iter().find(|&&x| x[0] == needle).is_none())); 70 | } 71 | 72 | #[bench] 73 | fn parallel_find_common(b: &mut Bencher) { 74 | b.iter(|| { 75 | assert!(HAYSTACK 76 | .par_iter() 77 | .find_any(|&&x| x[0] % 1000 == 999) 78 | .is_some()) 79 | }); 80 | } 81 | 82 | #[bench] 83 | fn serial_find_common(b: &mut Bencher) { 84 | b.iter(|| assert!(HAYSTACK.iter().find(|&&x| x[0] % 1000 == 999).is_some())); 85 | } 86 | } 87 | }; 88 | } 89 | 90 | make_tests!(1, size1); 91 | // make_tests!(64, size64); 92 | // make_tests!(256, size256); 93 | -------------------------------------------------------------------------------- /rayon-demo/src/join_microbench.rs: -------------------------------------------------------------------------------- 1 | //! Some microbenchmarks that stress test a pure `join` path. 2 | 3 | use rayon::prelude::*; 4 | use std::usize; 5 | use test::Bencher; 6 | 7 | #[bench] 8 | fn increment_all(b: &mut Bencher) { 9 | let mut big_vec = vec![0_usize; 100 * 1024]; 10 | b.iter(|| { 11 | big_vec.par_iter_mut().for_each(|p| *p = p.wrapping_add(1)); 12 | }); 13 | } 14 | 15 | #[bench] 16 | fn increment_all_min(b: &mut Bencher) { 17 | let mut big_vec = vec![0_usize; 100 * 1024]; 18 | b.iter(|| { 19 | big_vec 20 | .par_iter_mut() 21 | .with_min_len(1024) 22 | .for_each(|p| *p = p.wrapping_add(1)); 23 | }); 24 | } 25 | 26 | #[bench] 27 | fn increment_all_serialized(b: &mut Bencher) { 28 | let mut big_vec = vec![0_usize; 100 * 1024]; 29 | b.iter(|| { 30 | big_vec 31 | .par_iter_mut() 32 | .with_min_len(usize::MAX) 33 | .for_each(|p| *p = p.wrapping_add(1)); 34 | }); 35 | } 36 | 37 | #[bench] 38 | fn increment_all_max(b: &mut Bencher) { 39 | let mut big_vec = vec![0_usize; 100 * 1024]; 40 | b.iter(|| { 41 | big_vec 42 | .par_iter_mut() 43 | .with_max_len(100) 44 | .for_each(|p| *p = p.wrapping_add(1)); 45 | }); 46 | } 47 | 48 | #[bench] 49 | fn increment_all_atomized(b: &mut Bencher) { 50 | let mut big_vec = vec![0_usize; 100 * 1024]; 51 | b.iter(|| { 52 | big_vec 53 | .par_iter_mut() 54 | .with_max_len(1) 55 | .for_each(|p| *p = p.wrapping_add(1)); 56 | }); 57 | } 58 | 59 | #[bench] 60 | fn join_recursively(b: &mut Bencher) { 61 | fn join_recursively(n: usize) { 62 | if n == 0 { 63 | return; 64 | } 65 | rayon::join(|| join_recursively(n - 1), || join_recursively(n - 1)); 66 | } 67 | 68 | b.iter(|| { 69 | join_recursively(16); 70 | }); 71 | } 72 | -------------------------------------------------------------------------------- /rayon-demo/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Make sure the examples in the main README actually compile. 2 | #[cfg(doctest)] 3 | doc_comment::doctest!("../../README.md"); 4 | -------------------------------------------------------------------------------- /rayon-demo/src/life/bench.rs: -------------------------------------------------------------------------------- 1 | use super::Board; 2 | 3 | #[bench] 4 | fn generations(b: &mut ::test::Bencher) { 5 | b.iter(|| super::generations(Board::new(200, 200).random(), 100)); 6 | } 7 | 8 | #[bench] 9 | fn par_iter_generations(b: &mut ::test::Bencher) { 10 | b.iter(|| super::parallel_generations(Board::new(200, 200).random(), 100)); 11 | } 12 | 13 | #[bench] 14 | fn par_bridge_generations(b: &mut ::test::Bencher) { 15 | b.iter(|| super::par_bridge_generations(Board::new(200, 200).random(), 100)); 16 | } 17 | -------------------------------------------------------------------------------- /rayon-demo/src/main.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(test, feature(test))] 2 | #![warn(rust_2018_idioms)] 3 | 4 | use std::{env, io, io::prelude::*, process::exit}; 5 | 6 | mod cpu_time; 7 | mod life; 8 | mod matmul; 9 | mod mergesort; 10 | mod nbody; 11 | mod noop; 12 | mod quicksort; 13 | mod sieve; 14 | mod tsp; 15 | 16 | // these are not "full-fledged" benchmarks yet, 17 | // they only run with cargo bench 18 | #[cfg(test)] 19 | mod factorial; 20 | #[cfg(test)] 21 | mod fibonacci; 22 | #[cfg(test)] 23 | mod find; 24 | #[cfg(test)] 25 | mod join_microbench; 26 | #[cfg(test)] 27 | mod map_collect; 28 | #[cfg(test)] 29 | mod pythagoras; 30 | #[cfg(test)] 31 | mod sort; 32 | #[cfg(test)] 33 | mod str_split; 34 | #[cfg(test)] 35 | mod vec_collect; 36 | 37 | #[cfg(test)] 38 | extern crate test; 39 | 40 | const USAGE: &str = " 41 | Usage: rayon-demo bench 42 | rayon-demo [ options ] 43 | rayon-demo --help 44 | 45 | A collection of different benchmarks of Rayon. You can run the full 46 | benchmark suite by executing `cargo bench` or `rayon-demo bench`. 47 | 48 | Alternatively, you can run individual benchmarks by running 49 | `rayon-demo foo`, where `foo` is the name of a benchmark. Each 50 | benchmark has its own options and modes, so try `rayon-demo foo 51 | --help`. 52 | 53 | Benchmarks: 54 | 55 | - life : Conway's Game of Life. 56 | - nbody: A physics simulation of multiple bodies attracting and repelling 57 | one another. 58 | - sieve: Finding primes using a Sieve of Eratosthenes. 59 | - matmul: Parallel matrix multiplication. 60 | - mergesort: Parallel mergesort. 61 | - noop: Launch empty tasks to measure CPU usage. 62 | - quicksort: Parallel quicksort. 63 | - tsp: Traveling salesman problem solver (sample data sets in `data/tsp`). 64 | "; 65 | 66 | fn usage() -> ! { 67 | let _ = writeln!(&mut io::stderr(), "{}", USAGE); 68 | exit(1); 69 | } 70 | 71 | fn main() { 72 | let args: Vec = env::args().collect(); 73 | 74 | if args.len() < 2 { 75 | usage(); 76 | } 77 | 78 | let bench_name = &args[1]; 79 | match &bench_name[..] { 80 | "matmul" => matmul::main(&args[1..]), 81 | "mergesort" => mergesort::main(&args[1..]), 82 | "nbody" => nbody::main(&args[1..]), 83 | "quicksort" => quicksort::main(&args[1..]), 84 | "sieve" => sieve::main(&args[1..]), 85 | "tsp" => tsp::main(&args[1..]), 86 | "life" => life::main(&args[1..]), 87 | "noop" => noop::main(&args[1..]), 88 | _ => usage(), 89 | } 90 | } 91 | 92 | fn seeded_rng() -> rand_xorshift::XorShiftRng { 93 | use rand::SeedableRng; 94 | use rand_xorshift::XorShiftRng; 95 | let mut seed = ::Seed::default(); 96 | (0..).zip(seed.as_mut()).for_each(|(i, x)| *x = i); 97 | XorShiftRng::from_seed(seed) 98 | } 99 | -------------------------------------------------------------------------------- /rayon-demo/src/matmul/bench.rs: -------------------------------------------------------------------------------- 1 | const ROW_SIZE: usize = 256; 2 | 3 | #[bench] 4 | fn bench_matmul_strassen(b: &mut test::Bencher) { 5 | let n = ROW_SIZE * ROW_SIZE; 6 | let x = vec![1f32; n]; 7 | let y = vec![2f32; n]; 8 | let mut z = vec![0f32; n]; 9 | 10 | b.iter(|| { 11 | super::matmul_strassen(&x, &y, &mut z); 12 | }); 13 | } 14 | -------------------------------------------------------------------------------- /rayon-demo/src/mergesort/bench.rs: -------------------------------------------------------------------------------- 1 | // Size to use when doing `cargo bench`; extensively tuned to run in 2 | // "not too long" on my laptop -nmatsakis 3 | const BENCH_SIZE: usize = 250_000_000 / 512; 4 | 5 | fn bench_harness(mut f: F, b: &mut test::Bencher) { 6 | let base_vec = super::default_vec(BENCH_SIZE); 7 | let mut sort_vec = vec![]; 8 | b.iter(|| { 9 | sort_vec = base_vec.clone(); 10 | f(&mut sort_vec); 11 | }); 12 | assert!(super::is_sorted(&mut sort_vec)); 13 | } 14 | 15 | #[bench] 16 | fn merge_sort_par_bench(b: &mut test::Bencher) { 17 | bench_harness(super::merge_sort, b); 18 | } 19 | 20 | #[bench] 21 | fn merge_sort_seq_bench(b: &mut test::Bencher) { 22 | bench_harness(super::seq_merge_sort, b); 23 | } 24 | -------------------------------------------------------------------------------- /rayon-demo/src/nbody/bench.rs: -------------------------------------------------------------------------------- 1 | use super::nbody::NBodyBenchmark; 2 | 3 | // Because benchmarks run iteratively, use smaller constants by default: 4 | const BENCH_BODIES: usize = 1000; 5 | 6 | const BENCH_TICKS: usize = 10; 7 | 8 | fn nbody_bench(b: &mut test::Bencher, mut tick: TICK) 9 | where 10 | TICK: FnMut(&mut NBodyBenchmark), 11 | { 12 | let mut rng = crate::seeded_rng(); 13 | let mut benchmark = NBodyBenchmark::new(BENCH_BODIES, &mut rng); 14 | b.iter(|| { 15 | for _ in 0..BENCH_TICKS { 16 | tick(&mut benchmark); 17 | } 18 | }); 19 | } 20 | 21 | #[bench] 22 | fn nbody_seq(b: &mut ::test::Bencher) { 23 | nbody_bench(b, |n| { 24 | n.tick_seq(); 25 | }); 26 | } 27 | 28 | #[bench] 29 | fn nbody_par_iter(b: &mut ::test::Bencher) { 30 | nbody_bench(b, |n| { 31 | n.tick_par(); 32 | }); 33 | } 34 | 35 | #[bench] 36 | fn nbody_par_bridge(b: &mut ::test::Bencher) { 37 | nbody_bench(b, |n| { 38 | n.tick_par_bridge(); 39 | }); 40 | } 41 | 42 | #[bench] 43 | fn nbody_parreduce(b: &mut ::test::Bencher) { 44 | nbody_bench(b, |n| { 45 | n.tick_par_reduce(); 46 | }); 47 | } 48 | -------------------------------------------------------------------------------- /rayon-demo/src/noop/mod.rs: -------------------------------------------------------------------------------- 1 | const USAGE: &str = " 2 | Usage: noop [--sleep N] [--iters N] 3 | 4 | Noop loop to measure CPU usage. See rayon-rs/rayon#642. 5 | 6 | Options: 7 | --sleep N How long to sleep (in millis) between doing a spawn. [default: 10] 8 | --iters N Total time to execution (in millis). [default: 100] 9 | "; 10 | 11 | use crate::cpu_time; 12 | use docopt::Docopt; 13 | 14 | #[derive(serde::Deserialize)] 15 | pub struct Args { 16 | flag_sleep: u64, 17 | flag_iters: u64, 18 | } 19 | 20 | pub fn main(args: &[String]) { 21 | let args: Args = Docopt::new(USAGE) 22 | .and_then(|d| d.argv(args).deserialize()) 23 | .unwrap_or_else(|e| e.exit()); 24 | 25 | let m = cpu_time::measure_cpu(|| { 26 | for _ in 1..args.flag_iters { 27 | std::thread::sleep(std::time::Duration::from_millis(args.flag_sleep)); 28 | rayon::spawn(move || {}); 29 | } 30 | }); 31 | println!( 32 | "noop --iters={} --sleep={}", 33 | args.flag_iters, args.flag_sleep 34 | ); 35 | cpu_time::print_time(m); 36 | } 37 | -------------------------------------------------------------------------------- /rayon-demo/src/pythagoras/mod.rs: -------------------------------------------------------------------------------- 1 | //! How many Pythagorean triples exist less than or equal to a million? 2 | //! i.e. a²+b²=c² and a,b,c ≤ 1000000 3 | 4 | use num::Integer; 5 | use rayon::prelude::*; 6 | use rayon::range::Iter; 7 | use std::ops::Add; 8 | use std::usize; 9 | 10 | /// Use Euclid's formula to count Pythagorean triples 11 | /// 12 | /// https://en.wikipedia.org/wiki/Pythagorean_triple#Generating_a_triple 13 | /// 14 | /// For coprime integers m and n, with m > n and m-n is odd, then 15 | /// a = m²-n², b = 2mn, c = m²+n² 16 | /// 17 | /// This is a coprime triple. Multiplying by factors k covers all triples. 18 | fn par_euclid(map_m: FM, map_n: FN) -> u32 19 | where 20 | FM: FnOnce(Iter) -> M, 21 | M: ParallelIterator, 22 | FN: Fn(Iter) -> N + Sync, 23 | N: ParallelIterator, 24 | { 25 | map_m((1u32..2_000).into_par_iter()) 26 | .map(|m| -> u32 { 27 | map_n((1..m).into_par_iter()) 28 | .filter(|n| (m - n).is_odd() && m.gcd(n) == 1) 29 | .map(|n| 4_000_000 / (m * m + n * n)) 30 | .sum() 31 | }) 32 | .sum() 33 | } 34 | 35 | /// Same as par_euclid, without tweaking split lengths 36 | fn par_euclid_weightless() -> u32 { 37 | (1u32..2_000) 38 | .into_par_iter() 39 | .map(|m| -> u32 { 40 | (1..m) 41 | .into_par_iter() 42 | .filter(|n| (m - n).is_odd() && m.gcd(n) == 1) 43 | .map(|n| 4_000_000 / (m * m + n * n)) 44 | .sum() 45 | }) 46 | .sum() 47 | } 48 | 49 | /// Same as par_euclid, without using rayon. 50 | fn euclid() -> u32 { 51 | (1u32..2_000) 52 | .map(|m| { 53 | (1..m) 54 | .filter(|n| (m - n).is_odd() && m.gcd(n) == 1) 55 | .map(|n| 4_000_000 / (m * m + n * n)) 56 | .fold(0, Add::add) 57 | }) 58 | .fold(0, Add::add) 59 | } 60 | 61 | #[bench] 62 | /// Benchmark without rayon at all 63 | fn euclid_serial(b: &mut test::Bencher) { 64 | let count = euclid(); 65 | b.iter(|| assert_eq!(euclid(), count)) 66 | } 67 | 68 | #[bench] 69 | /// Use huge minimums to force it fully serialized. 70 | fn euclid_faux_serial(b: &mut test::Bencher) { 71 | let count = euclid(); 72 | let serial = |r: Iter| r.with_min_len(usize::MAX); 73 | b.iter(|| assert_eq!(par_euclid(&serial, &serial), count)) 74 | } 75 | 76 | #[bench] 77 | /// Use the default without any weights 78 | fn euclid_parallel_weightless(b: &mut test::Bencher) { 79 | let count = euclid(); 80 | b.iter(|| assert_eq!(par_euclid_weightless(), count)) 81 | } 82 | 83 | #[bench] 84 | /// Use the default settings. 85 | fn euclid_parallel_one(b: &mut test::Bencher) { 86 | let count = euclid(); 87 | b.iter(|| assert_eq!(par_euclid(|m| m, |n| n), count)) 88 | } 89 | 90 | #[bench] 91 | /// Use a low maximum to force the outer loop parallelized. 92 | fn euclid_parallel_outer(b: &mut test::Bencher) { 93 | let count = euclid(); 94 | let parallel = |r: Iter| r.with_max_len(1); 95 | b.iter(|| assert_eq!(par_euclid(¶llel, |n| n), count)) 96 | } 97 | 98 | #[bench] 99 | /// Use low maximums to force it fully parallelized. 100 | fn euclid_parallel_full(b: &mut test::Bencher) { 101 | let count = euclid(); 102 | let parallel = |r: Iter| r.with_max_len(1); 103 | b.iter(|| assert_eq!(par_euclid(¶llel, ¶llel), count)) 104 | } 105 | -------------------------------------------------------------------------------- /rayon-demo/src/quicksort/bench.rs: -------------------------------------------------------------------------------- 1 | use super::{Parallel, Sequential}; 2 | 3 | // Size to use when doing `cargo bench`; extensively tuned to run in 4 | // "not too long" on my laptop -nmatsakis 5 | const BENCH_SIZE: usize = 250_000_000 / 512; 6 | 7 | fn bench_harness(mut f: F, b: &mut test::Bencher) { 8 | let base_vec = super::default_vec(BENCH_SIZE); 9 | let mut sort_vec = vec![]; 10 | b.iter(|| { 11 | sort_vec = base_vec.clone(); 12 | f(&mut sort_vec); 13 | }); 14 | assert!(super::is_sorted(&sort_vec)); 15 | } 16 | 17 | #[bench] 18 | fn quick_sort_par_bench(b: &mut test::Bencher) { 19 | bench_harness(super::quick_sort::, b); 20 | } 21 | 22 | #[bench] 23 | fn quick_sort_seq_bench(b: &mut test::Bencher) { 24 | bench_harness(super::quick_sort::, b); 25 | } 26 | 27 | #[bench] 28 | fn quick_sort_splitter(b: &mut test::Bencher) { 29 | use rayon::iter::ParallelIterator; 30 | 31 | bench_harness( 32 | |vec| { 33 | ::rayon::iter::split(vec, |vec| { 34 | if vec.len() > 1 { 35 | let mid = super::partition(vec); 36 | let (left, right) = vec.split_at_mut(mid); 37 | (left, Some(right)) 38 | } else { 39 | (vec, None) 40 | } 41 | }) 42 | .for_each(super::quick_sort::) 43 | }, 44 | b, 45 | ); 46 | } 47 | -------------------------------------------------------------------------------- /rayon-demo/src/quicksort/mod.rs: -------------------------------------------------------------------------------- 1 | #![allow(non_camel_case_types)] 2 | 3 | const USAGE: &str = " 4 | Usage: quicksort bench [options] 5 | quicksort --help 6 | 7 | Parallel quicksort. Only the main recursive step is parallelized. 8 | 9 | Commands: 10 | bench Run the benchmark in different modes and print the timings. 11 | 12 | Options: 13 | --size N Number of 32-bit words to sort [default: 250000000] (1GB) 14 | --par-only Skip the sequential sort. 15 | -h, --help Show this message. 16 | "; 17 | 18 | #[derive(serde::Deserialize)] 19 | pub struct Args { 20 | cmd_bench: bool, 21 | flag_size: usize, 22 | flag_par_only: bool, 23 | } 24 | 25 | use docopt::Docopt; 26 | use rand::distributions::Standard; 27 | use rand::Rng; 28 | use std::time::Instant; 29 | 30 | pub trait Joiner { 31 | fn is_parallel() -> bool; 32 | fn join(oper_a: A, oper_b: B) -> (R_A, R_B) 33 | where 34 | A: FnOnce() -> R_A + Send, 35 | B: FnOnce() -> R_B + Send, 36 | R_A: Send, 37 | R_B: Send; 38 | } 39 | 40 | pub struct Parallel; 41 | impl Joiner for Parallel { 42 | #[inline] 43 | fn is_parallel() -> bool { 44 | true 45 | } 46 | #[inline] 47 | fn join(oper_a: A, oper_b: B) -> (R_A, R_B) 48 | where 49 | A: FnOnce() -> R_A + Send, 50 | B: FnOnce() -> R_B + Send, 51 | R_A: Send, 52 | R_B: Send, 53 | { 54 | rayon::join(oper_a, oper_b) 55 | } 56 | } 57 | 58 | struct Sequential; 59 | impl Joiner for Sequential { 60 | #[inline] 61 | fn is_parallel() -> bool { 62 | false 63 | } 64 | #[inline] 65 | fn join(oper_a: A, oper_b: B) -> (R_A, R_B) 66 | where 67 | A: FnOnce() -> R_A + Send, 68 | B: FnOnce() -> R_B + Send, 69 | R_A: Send, 70 | R_B: Send, 71 | { 72 | let a = oper_a(); 73 | let b = oper_b(); 74 | (a, b) 75 | } 76 | } 77 | 78 | pub fn quick_sort(v: &mut [T]) { 79 | if v.len() <= 1 { 80 | return; 81 | } 82 | 83 | if J::is_parallel() && v.len() <= 5 * 1024 { 84 | return quick_sort::(v); 85 | } 86 | 87 | let mid = partition(v); 88 | let (lo, hi) = v.split_at_mut(mid); 89 | J::join(|| quick_sort::(lo), || quick_sort::(hi)); 90 | } 91 | 92 | fn partition(v: &mut [T]) -> usize { 93 | let pivot = v.len() - 1; 94 | let mut i = 0; 95 | for j in 0..pivot { 96 | if v[j] <= v[pivot] { 97 | v.swap(i, j); 98 | i += 1; 99 | } 100 | } 101 | v.swap(i, pivot); 102 | i 103 | } 104 | 105 | pub fn is_sorted(v: &[T]) -> bool { 106 | (1..v.len()).all(|i| v[i - 1] <= v[i]) 107 | } 108 | 109 | fn default_vec(n: usize) -> Vec { 110 | let rng = crate::seeded_rng(); 111 | rng.sample_iter(&Standard).take(n).collect() 112 | } 113 | 114 | fn timed_sort(n: usize, f: F, name: &str) -> u64 { 115 | let mut v = default_vec(n); 116 | 117 | let start = Instant::now(); 118 | f(&mut v[..]); 119 | let dur = Instant::now() - start; 120 | let nanos = u64::from(dur.subsec_nanos()) + dur.as_secs() * 1_000_000_000u64; 121 | println!("{}: sorted {} ints: {} s", name, n, nanos as f32 / 1e9f32); 122 | 123 | // Check correctness 124 | assert!(is_sorted(&v[..])); 125 | 126 | nanos 127 | } 128 | 129 | pub fn main(args: &[String]) { 130 | let args: Args = Docopt::new(USAGE) 131 | .and_then(|d| d.argv(args).deserialize()) 132 | .unwrap_or_else(|e| e.exit()); 133 | 134 | if args.cmd_bench { 135 | if args.flag_par_only { 136 | timed_sort(args.flag_size, quick_sort::, "par"); 137 | } else { 138 | let seq = timed_sort(args.flag_size, quick_sort::, "seq"); 139 | let par = timed_sort(args.flag_size, quick_sort::, "par"); 140 | let speedup = seq as f64 / par as f64; 141 | println!("speedup: {:.2}x", speedup); 142 | } 143 | } 144 | } 145 | 146 | #[cfg(test)] 147 | mod bench; 148 | -------------------------------------------------------------------------------- /rayon-demo/src/sieve/bench.rs: -------------------------------------------------------------------------------- 1 | use super::NUM_PRIMES; 2 | 3 | const MAGNITUDE: usize = 7; 4 | 5 | fn sieve_bench(b: &mut test::Bencher, mut tick: TICK) 6 | where 7 | TICK: FnMut(usize) -> Vec, 8 | { 9 | let mut result = vec![]; 10 | b.iter(|| result = tick(super::max(MAGNITUDE))); 11 | let num_primes = 1 + result.into_iter().filter(|&b| b).count(); 12 | assert_eq!(num_primes, NUM_PRIMES[MAGNITUDE]); 13 | } 14 | 15 | #[bench] 16 | fn sieve_serial(b: &mut ::test::Bencher) { 17 | sieve_bench(b, super::sieve_serial); 18 | } 19 | 20 | #[bench] 21 | fn sieve_chunks(b: &mut ::test::Bencher) { 22 | sieve_bench(b, super::sieve_chunks); 23 | } 24 | 25 | #[bench] 26 | fn sieve_parallel(b: &mut ::test::Bencher) { 27 | sieve_bench(b, super::sieve_parallel); 28 | } 29 | -------------------------------------------------------------------------------- /rayon-demo/src/str_split.rs: -------------------------------------------------------------------------------- 1 | //! Some microbenchmarks for splitting strings 2 | 3 | use once_cell::sync::Lazy; 4 | use rand::seq::SliceRandom; 5 | use rayon::prelude::*; 6 | use test::Bencher; 7 | 8 | static HAYSTACK: Lazy = Lazy::new(|| { 9 | let mut rng = crate::seeded_rng(); 10 | let mut bytes: Vec = "abcdefg ".bytes().cycle().take(1_000_000).collect(); 11 | bytes.shuffle(&mut rng); 12 | String::from_utf8(bytes).unwrap() 13 | }); 14 | 15 | static COUNT: Lazy = Lazy::new(|| HAYSTACK.split(' ').count()); 16 | 17 | // Try multiple kinds of whitespace, but HAYSTACK only contains plain spaces. 18 | const WHITESPACE: &[char] = &['\r', '\n', ' ', '\t']; 19 | 20 | fn get_string_count() -> (&'static str, usize) { 21 | (&HAYSTACK, *COUNT) 22 | } 23 | 24 | #[bench] 25 | fn parallel_space_char(b: &mut Bencher) { 26 | let (string, count) = get_string_count(); 27 | b.iter(|| assert_eq!(string.par_split(' ').count(), count)) 28 | } 29 | 30 | #[bench] 31 | fn parallel_space_chars(b: &mut Bencher) { 32 | let (string, count) = get_string_count(); 33 | b.iter(|| assert_eq!(string.par_split(WHITESPACE).count(), count)) 34 | } 35 | 36 | #[bench] 37 | fn parallel_space_fn(b: &mut Bencher) { 38 | let (string, count) = get_string_count(); 39 | b.iter(|| assert_eq!(string.par_split(|c| c == ' ').count(), count)) 40 | } 41 | 42 | #[bench] 43 | fn serial_space_char(b: &mut Bencher) { 44 | let (string, count) = get_string_count(); 45 | b.iter(|| assert_eq!(string.split(' ').count(), count)) 46 | } 47 | 48 | #[bench] 49 | fn serial_space_chars(b: &mut Bencher) { 50 | let (string, count) = get_string_count(); 51 | b.iter(|| assert_eq!(string.split(WHITESPACE).count(), count)) 52 | } 53 | 54 | #[bench] 55 | fn serial_space_fn(b: &mut Bencher) { 56 | let (string, count) = get_string_count(); 57 | b.iter(|| assert_eq!(string.split(|c| c == ' ').count(), count)) 58 | } 59 | 60 | #[bench] 61 | fn serial_space_str(b: &mut Bencher) { 62 | let (string, count) = get_string_count(); 63 | #[allow(clippy::single_char_pattern)] 64 | b.iter(|| assert_eq!(string.split(" ").count(), count)) 65 | } 66 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/bench.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | 3 | use super::graph::Node; 4 | use super::parse_solver; 5 | use super::solver::SolverCx; 6 | 7 | fn run_dir( 8 | b: &mut test::Bencher, 9 | name: &str, 10 | seq_threshold: usize, 11 | exp_weight: usize, 12 | exp_path: Vec, 13 | ) { 14 | let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR")); 15 | let tsp_path = manifest_dir.join("data/tsp/").join(name); 16 | let graph = parse_solver(&tsp_path).unwrap(); 17 | let mut solution = None; 18 | b.iter(|| { 19 | let mut solver = SolverCx::new(&graph, seq_threshold); 20 | solver.search_from(Node::new(0)); 21 | solution = Some(solver.into_result()); 22 | }); 23 | let (path, weight) = solution.unwrap(); 24 | let mut path: Vec = path.unwrap().iter().map(|n| n.index()).collect(); 25 | if path.iter().rev().lt(&path) { 26 | path.reverse(); // normalize the direction 27 | } 28 | assert_eq!( 29 | exp_weight, 30 | weight.to_usize(), 31 | "actual weight ({:?}) did not match expectation ({:?})", 32 | weight, 33 | exp_weight 34 | ); 35 | assert_eq!( 36 | exp_path, path, 37 | "best path ({:?}) did not match expectation ({:?})", 38 | path, exp_path 39 | ); 40 | } 41 | 42 | #[bench] 43 | fn dj10(b: &mut test::Bencher) { 44 | // these numbers are tuned to "not take THAT long" in cargo bench, 45 | // basically, but still exercise the spawning stuff -- each run 46 | // should spawn 6! (720) tasks or so this way. 47 | run_dir( 48 | b, 49 | "dj10.tsp", 50 | 4, 51 | 2577, 52 | vec![0, 1, 3, 2, 4, 6, 8, 7, 5, 9, 0], 53 | ); 54 | } 55 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/graph.rs: -------------------------------------------------------------------------------- 1 | use fixedbitset::FixedBitSet; 2 | use std::iter; 3 | 4 | use super::weight::Weight; 5 | 6 | pub struct Graph { 7 | num_nodes: usize, 8 | 9 | // a 2-d matrix indexed by (source, target); if `Weight::max()` 10 | // is stored in a particular entry, that means that there is no 11 | // edge. Otherwise, the weight is found. 12 | weights: Vec, 13 | } 14 | 15 | impl Graph { 16 | pub fn new(num_nodes: usize) -> Graph { 17 | Graph { 18 | num_nodes, 19 | weights: iter::repeat(Weight::max()) 20 | .take(num_nodes * num_nodes) 21 | .collect(), 22 | } 23 | } 24 | 25 | pub fn num_nodes(&self) -> usize { 26 | self.num_nodes 27 | } 28 | 29 | pub fn all_nodes(&self) -> impl Iterator { 30 | (0..self.num_nodes).map(Node::new) 31 | } 32 | 33 | pub fn node_set(&self) -> NodeSet { 34 | NodeSet { 35 | bits: FixedBitSet::with_capacity(self.num_nodes), 36 | } 37 | } 38 | 39 | fn edge_index(&self, source: Node, target: Node) -> usize { 40 | (source.index * self.num_nodes) + target.index 41 | } 42 | 43 | pub fn set_weight(&mut self, source: Node, target: Node, w: Weight) { 44 | assert!(!w.is_max()); 45 | let index = self.edge_index(source, target); 46 | self.weights[index] = w; 47 | } 48 | 49 | pub fn edge_weight(&self, source: Node, target: Node) -> Option { 50 | let w = self.weights[self.edge_index(source, target)]; 51 | if w.is_max() { 52 | None 53 | } else { 54 | Some(w) 55 | } 56 | } 57 | 58 | pub fn edges(&self, source: Node) -> impl Iterator + '_ { 59 | self.all_nodes().filter_map(move |target| { 60 | self.edge_weight(source, target).map(|weight| Edge { 61 | source, 62 | target, 63 | weight, 64 | }) 65 | }) 66 | } 67 | } 68 | 69 | #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] 70 | pub struct Node { 71 | index: usize, 72 | } 73 | 74 | impl Node { 75 | pub fn new(index: usize) -> Node { 76 | Node { index } 77 | } 78 | 79 | pub fn index(self) -> usize { 80 | self.index 81 | } 82 | } 83 | 84 | #[derive(Debug, PartialEq, Eq)] 85 | pub struct Edge { 86 | pub source: Node, 87 | pub target: Node, 88 | pub weight: Weight, 89 | } 90 | 91 | #[derive(Clone, Debug)] 92 | pub struct NodeSet { 93 | bits: FixedBitSet, 94 | } 95 | 96 | impl NodeSet { 97 | pub fn contains(&self, node: Node) -> bool { 98 | self.bits.contains(node.index) 99 | } 100 | 101 | pub fn with(&self, node: Node) -> NodeSet { 102 | let mut s = self.clone(); 103 | s.insert(node); 104 | s 105 | } 106 | 107 | pub fn insert(&mut self, node: Node) { 108 | self.bits.set(node.index, true); 109 | } 110 | 111 | pub fn remove(&mut self, node: Node) { 112 | self.bits.set(node.index, false); 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/mod.rs: -------------------------------------------------------------------------------- 1 | //! A solver for the Travelling Salesman Problem. 2 | //! 3 | //! Based on code developed at ETH by Christoph von Praun, Florian 4 | //! Schneider, Nicholas Matsakis, and Thomas Gross. 5 | 6 | use docopt::Docopt; 7 | use std::error::Error; 8 | use std::fmt::Write; 9 | use std::fs::File; 10 | use std::io::prelude::*; 11 | use std::path::Path; 12 | use std::time::Instant; 13 | 14 | #[cfg(test)] 15 | mod bench; 16 | mod graph; 17 | mod parser; 18 | mod solver; 19 | mod step; 20 | mod tour; 21 | mod weight; 22 | 23 | use self::graph::{Graph, Node}; 24 | use self::solver::SolverCx; 25 | 26 | const USAGE: &str = " 27 | Usage: tsp bench [--seq-threshold N] [--from N] 28 | 29 | Parallel traveling salesman problem solver. Data input is expected to 30 | be in TSPLIB format. 31 | 32 | Suggested command: 33 | cargo run --release -- tsp bench data/tsp/dj15.tsp --seq-threshold 8 34 | 35 | Commands: 36 | bench Run the benchmark and print the timings. 37 | 38 | Options: 39 | -h, --help Show this message. 40 | --seq-threshold N Adjust sequential fallback threshold [default: 10]. 41 | Fall back to seq search when there are N or fewer nodes remaining. 42 | Lower values of N mean more parallelism. 43 | --from N Node index from which to start the search [default: 0]. 44 | "; 45 | 46 | #[derive(serde::Deserialize)] 47 | pub struct Args { 48 | cmd_bench: bool, 49 | arg_datafile: String, 50 | flag_seq_threshold: usize, 51 | flag_from: usize, 52 | } 53 | 54 | pub fn main(args: &[String]) { 55 | let args: Args = Docopt::new(USAGE) 56 | .and_then(|d| d.argv(args).deserialize()) 57 | .unwrap_or_else(|e| e.exit()); 58 | 59 | if args.cmd_bench { 60 | let _ = run_solver( 61 | Path::new(&args.arg_datafile), 62 | args.flag_seq_threshold, 63 | args.flag_from, 64 | ); 65 | } 66 | } 67 | 68 | fn run_solver(datafile: &Path, seq_threshold: usize, from: usize) -> Result<(), ()> { 69 | let graph = match parse_solver(datafile) { 70 | Ok(g) => g, 71 | Err(e) => { 72 | println!("failed to parse `{}`: {}", datafile.display(), e); 73 | return Err(()); 74 | } 75 | }; 76 | 77 | println!("Graph size : {} nodes.", graph.num_nodes()); 78 | println!("Seq threshold: {} nodes.", seq_threshold); 79 | 80 | if from >= graph.num_nodes() { 81 | println!("Invalid node index given for `--from`: {}", from); 82 | return Err(()); 83 | } 84 | 85 | let mut solver = SolverCx::new(&graph, seq_threshold); 86 | let par_start = Instant::now(); 87 | solver.search_from(Node::new(from)); 88 | let par_time = par_start.elapsed(); 89 | 90 | let (path, weight) = solver.into_result(); 91 | 92 | println!("Total search time: {:?}", par_time); 93 | if let Some(path) = path { 94 | println!("Cheapest path cost: {}", weight.to_usize()); 95 | let mut output = "Cheapest path:".to_string(); 96 | for node in path { 97 | let _ = write!(output, " {}", node.index()); 98 | } 99 | println!("{}", output); 100 | } else { 101 | println!("No path found."); 102 | } 103 | 104 | Ok(()) 105 | } 106 | 107 | fn parse_solver(datafile: &Path) -> Result> { 108 | let mut file = File::open(datafile)?; 109 | let mut text = String::new(); 110 | file.read_to_string(&mut text)?; 111 | let graph = parser::parse_tsp_data(&text)?; 112 | Ok(graph) 113 | } 114 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/solver.rs: -------------------------------------------------------------------------------- 1 | use std::collections::BinaryHeap; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | use std::sync::{Arc, Mutex}; 4 | use std::usize; 5 | 6 | use super::graph::{Graph, Node}; 7 | use super::step; 8 | use super::tour::TourPrefix; 9 | use super::weight::Weight; 10 | 11 | /// Shared context 12 | pub struct SolverCx<'s> { 13 | graph: &'s Graph, 14 | seq_threshold: usize, 15 | priority_queue: Mutex>>, 16 | tour_counter: AtomicUsize, 17 | min_tour_weight: AtomicUsize, 18 | min_tour: Mutex>>, 19 | } 20 | 21 | /// Just an opaque integer assigned to each tour element as we go; 22 | /// lets us give them an ordering independent from the lower bound. 23 | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 24 | pub struct TourId { 25 | id: usize, 26 | } 27 | 28 | impl<'s> SolverCx<'s> { 29 | pub fn new(graph: &'s Graph, seq_threshold: usize) -> Self { 30 | SolverCx { 31 | graph, 32 | seq_threshold, 33 | priority_queue: Mutex::new(BinaryHeap::new()), 34 | tour_counter: AtomicUsize::new(0), 35 | min_tour_weight: AtomicUsize::new(usize::MAX), 36 | min_tour: Mutex::new(None), 37 | } 38 | } 39 | 40 | pub fn search_from(&mut self, node: Node) { 41 | // Enqueue the initial prefix: 42 | let id = self.tour_id(); 43 | let mut visited = self.graph.node_set(); 44 | visited.insert(node); 45 | self.priority_queue 46 | .get_mut() 47 | .unwrap() 48 | .push(Arc::new(TourPrefix { 49 | id, 50 | node, 51 | len: 1, 52 | prefix_weight: Weight::zero(), 53 | priority: Weight::max().to_priority(), 54 | visited, 55 | previous: None, 56 | })); 57 | 58 | // Start the iteration: 59 | rayon::scope(|s| step::step(s, self)); 60 | } 61 | 62 | pub fn seq_threshold(&self) -> usize { 63 | self.seq_threshold 64 | } 65 | 66 | pub fn tour_id(&self) -> TourId { 67 | let counter = self.tour_counter.fetch_add(1, Ordering::SeqCst); 68 | TourId { id: counter } 69 | } 70 | 71 | pub fn graph(&self) -> &'s Graph { 72 | self.graph 73 | } 74 | 75 | pub fn enqueue(&self, tour_element: Arc) { 76 | let mut priority_queue = self.priority_queue.lock().unwrap(); 77 | priority_queue.push(tour_element); 78 | } 79 | 80 | pub fn dequeue(&self) -> Option> { 81 | let mut priority_queue = self.priority_queue.lock().unwrap(); 82 | priority_queue.pop() 83 | } 84 | 85 | pub fn min_tour_weight(&self) -> Weight { 86 | // Relaxed read is ok because the only time we *care* about 87 | // this being precise, we are holding `min_tour` lock; and 88 | // that is also the only time we write to it. This is subtle. 89 | Weight::new(self.min_tour_weight.load(Ordering::Relaxed)) 90 | } 91 | 92 | pub fn add_complete_tour(&self, tour: &[Node], weight: Weight) { 93 | if weight < self.min_tour_weight() { 94 | let mut min_tour = self.min_tour.lock().unwrap(); 95 | if min_tour.is_none() || weight < self.min_tour_weight() { 96 | // this is a new minimum! 97 | *min_tour = Some(tour.to_vec()); 98 | self.min_tour_weight 99 | .store(weight.to_usize(), Ordering::Relaxed); 100 | } 101 | } 102 | } 103 | 104 | pub fn into_result(self) -> (Option>, Weight) { 105 | let weight = self.min_tour_weight(); 106 | (self.min_tour.into_inner().unwrap(), weight) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/tour.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd}; 2 | use std::sync::Arc; 3 | 4 | use super::graph::{Node, NodeSet}; 5 | use super::solver::TourId; 6 | use super::weight::{Priority, Weight}; 7 | 8 | #[derive(Clone, Debug)] 9 | pub struct TourPrefix { 10 | /// priority to visit, derived from a lower bound on how much weight we 11 | /// have remaining to complete the tour 12 | pub priority: Priority, 13 | 14 | pub id: TourId, 15 | 16 | /// the next node to traverse 17 | pub node: Node, 18 | 19 | /// total length of our tour 20 | pub len: usize, 21 | 22 | /// total weight of our tour so far 23 | pub prefix_weight: Weight, 24 | 25 | /// bit set with elements left to visit 26 | pub visited: NodeSet, 27 | 28 | /// we extend this; this is ordered last so that the `Ord` impl 29 | /// won't look at it until the other fields 30 | pub previous: Option>, 31 | } 32 | 33 | impl TourPrefix { 34 | /// Returns a tuple of stuff to use when comparing for ord/eq 35 | fn to_cmp_elements(&self) -> (Priority, TourId) { 36 | (self.priority, self.id) 37 | } 38 | 39 | pub fn visited(&self, node: Node) -> bool { 40 | self.visited.contains(node) 41 | } 42 | } 43 | 44 | impl PartialEq for TourPrefix { 45 | fn eq(&self, other: &Self) -> bool { 46 | self.id == other.id 47 | } 48 | } 49 | 50 | impl Eq for TourPrefix {} 51 | 52 | impl PartialOrd for TourPrefix { 53 | fn partial_cmp(&self, other: &Self) -> Option { 54 | Some(self.cmp(other)) 55 | } 56 | } 57 | 58 | impl Ord for TourPrefix { 59 | fn cmp(&self, other: &Self) -> Ordering { 60 | self.to_cmp_elements().cmp(&other.to_cmp_elements()) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /rayon-demo/src/tsp/weight.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Add, AddAssign, Sub, SubAssign}; 2 | use std::usize; 3 | 4 | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 5 | pub struct Weight { 6 | weight: usize, 7 | } 8 | 9 | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 10 | pub struct Priority { 11 | priority: usize, 12 | } 13 | 14 | impl Weight { 15 | pub fn new(w: usize) -> Weight { 16 | Weight { weight: w } 17 | } 18 | 19 | pub fn zero() -> Weight { 20 | Weight::new(0) 21 | } 22 | 23 | pub fn max() -> Weight { 24 | Weight { weight: usize::MAX } 25 | } 26 | 27 | pub fn to_usize(self) -> usize { 28 | self.weight 29 | } 30 | 31 | pub fn is_max(self) -> bool { 32 | self.weight == usize::MAX 33 | } 34 | 35 | /// Returns a priority for tours with this weight; lighter tours 36 | /// have higher priority. 37 | pub fn to_priority(self) -> Priority { 38 | Priority { 39 | priority: usize::MAX - self.weight, 40 | } 41 | } 42 | } 43 | 44 | impl Add for Weight { 45 | type Output = Weight; 46 | 47 | fn add(self, rhs: Weight) -> Weight { 48 | Weight::new(self.weight + rhs.weight) 49 | } 50 | } 51 | 52 | impl AddAssign for Weight { 53 | fn add_assign(&mut self, rhs: Weight) { 54 | *self = *self + rhs; 55 | } 56 | } 57 | 58 | impl Sub for Weight { 59 | type Output = Weight; 60 | 61 | fn sub(self, rhs: Weight) -> Weight { 62 | Weight::new(self.weight - rhs.weight) 63 | } 64 | } 65 | 66 | impl SubAssign for Weight { 67 | fn sub_assign(&mut self, rhs: Weight) { 68 | *self = *self - rhs; 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /scripts/analyze.sh: -------------------------------------------------------------------------------- 1 | # Rough and dirty shell script to scrape the `log.rs` output and 2 | # analyze what kinds of tasks have been started and stopped. Very 3 | # useful in tracking down deadlocks. 4 | 5 | TICKLES=$(grep Tickle $1 | wc -l) 6 | 7 | INJECT_JOBS=$(grep InjectJobs $1 | wc -l) 8 | echo "Injected jobs:" $(((INJECT_JOBS * 2))) 9 | 10 | JOINS=$(grep Join $1 | wc -l) 11 | echo "Joins: " $JOINS 12 | 13 | POPPED_RHS=$(grep PoppedRhs $1 | wc -l) 14 | POPPED_JOB=$(grep PoppedJob $1 | wc -l) 15 | POPPED_TOTAL=$((($POPPED_RHS + $POPPED_JOB))) 16 | echo "Popped jobs: " $POPPED_TOTAL = rhs $POPPED_RHS + other $POPPED_JOB 17 | 18 | FOUND_WORK=$(grep FoundWork $1 | wc -l) 19 | echo "Found work: " $FOUND_WORK 20 | 21 | STOLE_WORK=$(grep StoleWork $1 | wc -l) 22 | echo "Stole work: " $STOLE_WORK 23 | 24 | UNINJECTED_WORK=$(grep UninjectedWork $1 | wc -l) 25 | echo "Uninjected: " $UNINJECTED_WORK 26 | 27 | echo "Join balance: " $((( $JOINS - $POPPED_TOTAL - $STOLE_WORK ))) 28 | echo "Inj. balance: " $((( $INJECT_JOBS * 2 - $UNINJECTED_WORK ))) 29 | echo "Total balance:" $((( $FOUND_WORK + $POPPED_TOTAL - $JOINS - $INJECT_JOBS * 2 ))) 30 | 31 | -------------------------------------------------------------------------------- /src/array.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [arrays] (`[T; N]`) 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need 4 | //! to name one of the iterator types. 5 | //! 6 | //! [arrays]: https://doc.rust-lang.org/std/primitive.array.html 7 | 8 | use crate::iter::plumbing::*; 9 | use crate::iter::*; 10 | use crate::slice::{Iter, IterMut}; 11 | use crate::vec::DrainProducer; 12 | use std::mem::ManuallyDrop; 13 | 14 | impl<'data, T: Sync + 'data, const N: usize> IntoParallelIterator for &'data [T; N] { 15 | type Item = &'data T; 16 | type Iter = Iter<'data, T>; 17 | 18 | fn into_par_iter(self) -> Self::Iter { 19 | <&[T]>::into_par_iter(self) 20 | } 21 | } 22 | 23 | impl<'data, T: Send + 'data, const N: usize> IntoParallelIterator for &'data mut [T; N] { 24 | type Item = &'data mut T; 25 | type Iter = IterMut<'data, T>; 26 | 27 | fn into_par_iter(self) -> Self::Iter { 28 | <&mut [T]>::into_par_iter(self) 29 | } 30 | } 31 | 32 | impl IntoParallelIterator for [T; N] { 33 | type Item = T; 34 | type Iter = IntoIter; 35 | 36 | fn into_par_iter(self) -> Self::Iter { 37 | IntoIter { array: self } 38 | } 39 | } 40 | 41 | /// Parallel iterator that moves out of an array. 42 | #[derive(Debug, Clone)] 43 | pub struct IntoIter { 44 | array: [T; N], 45 | } 46 | 47 | impl ParallelIterator for IntoIter { 48 | type Item = T; 49 | 50 | fn drive_unindexed(self, consumer: C) -> C::Result 51 | where 52 | C: UnindexedConsumer, 53 | { 54 | bridge(self, consumer) 55 | } 56 | 57 | fn opt_len(&self) -> Option { 58 | Some(N) 59 | } 60 | } 61 | 62 | impl IndexedParallelIterator for IntoIter { 63 | fn drive(self, consumer: C) -> C::Result 64 | where 65 | C: Consumer, 66 | { 67 | bridge(self, consumer) 68 | } 69 | 70 | fn len(&self) -> usize { 71 | N 72 | } 73 | 74 | fn with_producer(self, callback: CB) -> CB::Output 75 | where 76 | CB: ProducerCallback, 77 | { 78 | unsafe { 79 | // Drain every item, and then the local array can just fall out of scope. 80 | let mut array = ManuallyDrop::new(self.array); 81 | let producer = DrainProducer::new(array.as_mut_slice()); 82 | callback.callback(producer) 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/collections/binary_heap.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for heaps 2 | //! (`BinaryHeap`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::BinaryHeap; 6 | 7 | use crate::iter::plumbing::*; 8 | use crate::iter::*; 9 | 10 | use crate::vec; 11 | 12 | /// Parallel iterator over a binary heap 13 | #[derive(Debug, Clone)] 14 | pub struct IntoIter { 15 | inner: vec::IntoIter, 16 | } 17 | 18 | impl IntoParallelIterator for BinaryHeap { 19 | type Item = T; 20 | type Iter = IntoIter; 21 | 22 | fn into_par_iter(self) -> Self::Iter { 23 | IntoIter { 24 | inner: Vec::from(self).into_par_iter(), 25 | } 26 | } 27 | } 28 | 29 | delegate_indexed_iterator! { 30 | IntoIter => T, 31 | impl 32 | } 33 | 34 | /// Parallel iterator over an immutable reference to a binary heap 35 | #[derive(Debug)] 36 | pub struct Iter<'a, T: Ord + Sync> { 37 | inner: vec::IntoIter<&'a T>, 38 | } 39 | 40 | impl<'a, T: Ord + Sync> Clone for Iter<'a, T> { 41 | fn clone(&self) -> Self { 42 | Iter { 43 | inner: self.inner.clone(), 44 | } 45 | } 46 | } 47 | 48 | into_par_vec! { 49 | &'a BinaryHeap => Iter<'a, T>, 50 | impl<'a, T: Ord + Sync> 51 | } 52 | 53 | delegate_indexed_iterator! { 54 | Iter<'a, T> => &'a T, 55 | impl<'a, T: Ord + Sync + 'a> 56 | } 57 | 58 | // `BinaryHeap` doesn't have a mutable `Iterator` 59 | 60 | /// Draining parallel iterator that moves out of a binary heap, 61 | /// but keeps the total capacity. 62 | #[derive(Debug)] 63 | pub struct Drain<'a, T: Ord + Send> { 64 | heap: &'a mut BinaryHeap, 65 | } 66 | 67 | impl<'a, T: Ord + Send> ParallelDrainFull for &'a mut BinaryHeap { 68 | type Iter = Drain<'a, T>; 69 | type Item = T; 70 | 71 | fn par_drain(self) -> Self::Iter { 72 | Drain { heap: self } 73 | } 74 | } 75 | 76 | impl<'a, T: Ord + Send> ParallelIterator for Drain<'a, T> { 77 | type Item = T; 78 | 79 | fn drive_unindexed(self, consumer: C) -> C::Result 80 | where 81 | C: UnindexedConsumer, 82 | { 83 | bridge(self, consumer) 84 | } 85 | 86 | fn opt_len(&self) -> Option { 87 | Some(self.len()) 88 | } 89 | } 90 | 91 | impl<'a, T: Ord + Send> IndexedParallelIterator for Drain<'a, T> { 92 | fn drive(self, consumer: C) -> C::Result 93 | where 94 | C: Consumer, 95 | { 96 | bridge(self, consumer) 97 | } 98 | 99 | fn len(&self) -> usize { 100 | self.heap.len() 101 | } 102 | 103 | fn with_producer(self, callback: CB) -> CB::Output 104 | where 105 | CB: ProducerCallback, 106 | { 107 | super::DrainGuard::new(self.heap) 108 | .par_drain(..) 109 | .with_producer(callback) 110 | } 111 | } 112 | 113 | impl<'a, T: Ord + Send> Drop for Drain<'a, T> { 114 | fn drop(&mut self) { 115 | if !self.heap.is_empty() { 116 | // We must not have produced, so just call a normal drain to remove the items. 117 | self.heap.drain(); 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/collections/btree_map.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for B-Tree maps 2 | //! (`BTreeMap`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::BTreeMap; 6 | 7 | use crate::iter::plumbing::*; 8 | use crate::iter::*; 9 | 10 | use crate::vec; 11 | 12 | /// Parallel iterator over a B-Tree map 13 | #[derive(Debug)] // std doesn't Clone 14 | pub struct IntoIter { 15 | inner: vec::IntoIter<(K, V)>, 16 | } 17 | 18 | into_par_vec! { 19 | BTreeMap => IntoIter, 20 | impl 21 | } 22 | 23 | delegate_iterator! { 24 | IntoIter => (K, V), 25 | impl 26 | } 27 | 28 | /// Parallel iterator over an immutable reference to a B-Tree map 29 | #[derive(Debug)] 30 | pub struct Iter<'a, K: Ord + Sync, V: Sync> { 31 | inner: vec::IntoIter<(&'a K, &'a V)>, 32 | } 33 | 34 | impl<'a, K: Ord + Sync, V: Sync> Clone for Iter<'a, K, V> { 35 | fn clone(&self) -> Self { 36 | Iter { 37 | inner: self.inner.clone(), 38 | } 39 | } 40 | } 41 | 42 | into_par_vec! { 43 | &'a BTreeMap => Iter<'a, K, V>, 44 | impl<'a, K: Ord + Sync, V: Sync> 45 | } 46 | 47 | delegate_iterator! { 48 | Iter<'a, K, V> => (&'a K, &'a V), 49 | impl<'a, K: Ord + Sync + 'a, V: Sync + 'a> 50 | } 51 | 52 | /// Parallel iterator over a mutable reference to a B-Tree map 53 | #[derive(Debug)] 54 | pub struct IterMut<'a, K: Ord + Sync, V: Send> { 55 | inner: vec::IntoIter<(&'a K, &'a mut V)>, 56 | } 57 | 58 | into_par_vec! { 59 | &'a mut BTreeMap => IterMut<'a, K, V>, 60 | impl<'a, K: Ord + Sync, V: Send> 61 | } 62 | 63 | delegate_iterator! { 64 | IterMut<'a, K, V> => (&'a K, &'a mut V), 65 | impl<'a, K: Ord + Sync + 'a, V: Send + 'a> 66 | } 67 | -------------------------------------------------------------------------------- /src/collections/btree_set.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for B-Tree sets 2 | //! (`BTreeSet`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::BTreeSet; 6 | 7 | use crate::iter::plumbing::*; 8 | use crate::iter::*; 9 | 10 | use crate::vec; 11 | 12 | /// Parallel iterator over a B-Tree set 13 | #[derive(Debug)] // std doesn't Clone 14 | pub struct IntoIter { 15 | inner: vec::IntoIter, 16 | } 17 | 18 | into_par_vec! { 19 | BTreeSet => IntoIter, 20 | impl 21 | } 22 | 23 | delegate_iterator! { 24 | IntoIter => T, 25 | impl 26 | } 27 | 28 | /// Parallel iterator over an immutable reference to a B-Tree set 29 | #[derive(Debug)] 30 | pub struct Iter<'a, T: Ord + Sync> { 31 | inner: vec::IntoIter<&'a T>, 32 | } 33 | 34 | impl<'a, T: Ord + Sync + 'a> Clone for Iter<'a, T> { 35 | fn clone(&self) -> Self { 36 | Iter { 37 | inner: self.inner.clone(), 38 | } 39 | } 40 | } 41 | 42 | into_par_vec! { 43 | &'a BTreeSet => Iter<'a, T>, 44 | impl<'a, T: Ord + Sync> 45 | } 46 | 47 | delegate_iterator! { 48 | Iter<'a, T> => &'a T, 49 | impl<'a, T: Ord + Sync + 'a> 50 | } 51 | 52 | // `BTreeSet` doesn't have a mutable `Iterator` 53 | -------------------------------------------------------------------------------- /src/collections/hash_map.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for hash maps 2 | //! (`HashMap`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::HashMap; 6 | use std::hash::{BuildHasher, Hash}; 7 | use std::marker::PhantomData; 8 | 9 | use crate::iter::plumbing::*; 10 | use crate::iter::*; 11 | 12 | use crate::vec; 13 | 14 | /// Parallel iterator over a hash map 15 | #[derive(Debug)] // std doesn't Clone 16 | pub struct IntoIter { 17 | inner: vec::IntoIter<(K, V)>, 18 | } 19 | 20 | into_par_vec! { 21 | HashMap => IntoIter, 22 | impl 23 | } 24 | 25 | delegate_iterator! { 26 | IntoIter => (K, V), 27 | impl 28 | } 29 | 30 | /// Parallel iterator over an immutable reference to a hash map 31 | #[derive(Debug)] 32 | pub struct Iter<'a, K: Hash + Eq + Sync, V: Sync> { 33 | inner: vec::IntoIter<(&'a K, &'a V)>, 34 | } 35 | 36 | impl<'a, K: Hash + Eq + Sync, V: Sync> Clone for Iter<'a, K, V> { 37 | fn clone(&self) -> Self { 38 | Iter { 39 | inner: self.inner.clone(), 40 | } 41 | } 42 | } 43 | 44 | into_par_vec! { 45 | &'a HashMap => Iter<'a, K, V>, 46 | impl<'a, K: Hash + Eq + Sync, V: Sync, S: BuildHasher> 47 | } 48 | 49 | delegate_iterator! { 50 | Iter<'a, K, V> => (&'a K, &'a V), 51 | impl<'a, K: Hash + Eq + Sync + 'a, V: Sync + 'a> 52 | } 53 | 54 | /// Parallel iterator over a mutable reference to a hash map 55 | #[derive(Debug)] 56 | pub struct IterMut<'a, K: Hash + Eq + Sync, V: Send> { 57 | inner: vec::IntoIter<(&'a K, &'a mut V)>, 58 | } 59 | 60 | into_par_vec! { 61 | &'a mut HashMap => IterMut<'a, K, V>, 62 | impl<'a, K: Hash + Eq + Sync, V: Send, S: BuildHasher> 63 | } 64 | 65 | delegate_iterator! { 66 | IterMut<'a, K, V> => (&'a K, &'a mut V), 67 | impl<'a, K: Hash + Eq + Sync + 'a, V: Send + 'a> 68 | } 69 | 70 | /// Draining parallel iterator that moves out of a hash map, 71 | /// but keeps the total capacity. 72 | #[derive(Debug)] 73 | pub struct Drain<'a, K: Hash + Eq + Send, V: Send> { 74 | inner: vec::IntoIter<(K, V)>, 75 | marker: PhantomData<&'a mut HashMap>, 76 | } 77 | 78 | impl<'a, K: Hash + Eq + Send, V: Send, S: BuildHasher> ParallelDrainFull 79 | for &'a mut HashMap 80 | { 81 | type Iter = Drain<'a, K, V>; 82 | type Item = (K, V); 83 | 84 | fn par_drain(self) -> Self::Iter { 85 | let vec: Vec<_> = self.drain().collect(); 86 | Drain { 87 | inner: vec.into_par_iter(), 88 | marker: PhantomData, 89 | } 90 | } 91 | } 92 | 93 | delegate_iterator! { 94 | Drain<'_, K, V> => (K, V), 95 | impl 96 | } 97 | -------------------------------------------------------------------------------- /src/collections/hash_set.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for hash sets 2 | //! (`HashSet`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::HashSet; 6 | use std::hash::{BuildHasher, Hash}; 7 | use std::marker::PhantomData; 8 | 9 | use crate::iter::plumbing::*; 10 | use crate::iter::*; 11 | 12 | use crate::vec; 13 | 14 | /// Parallel iterator over a hash set 15 | #[derive(Debug)] // std doesn't Clone 16 | pub struct IntoIter { 17 | inner: vec::IntoIter, 18 | } 19 | 20 | into_par_vec! { 21 | HashSet => IntoIter, 22 | impl 23 | } 24 | 25 | delegate_iterator! { 26 | IntoIter => T, 27 | impl 28 | } 29 | 30 | /// Parallel iterator over an immutable reference to a hash set 31 | #[derive(Debug)] 32 | pub struct Iter<'a, T: Hash + Eq + Sync> { 33 | inner: vec::IntoIter<&'a T>, 34 | } 35 | 36 | impl<'a, T: Hash + Eq + Sync> Clone for Iter<'a, T> { 37 | fn clone(&self) -> Self { 38 | Iter { 39 | inner: self.inner.clone(), 40 | } 41 | } 42 | } 43 | 44 | into_par_vec! { 45 | &'a HashSet => Iter<'a, T>, 46 | impl<'a, T: Hash + Eq + Sync, S: BuildHasher> 47 | } 48 | 49 | delegate_iterator! { 50 | Iter<'a, T> => &'a T, 51 | impl<'a, T: Hash + Eq + Sync + 'a> 52 | } 53 | 54 | // `HashSet` doesn't have a mutable `Iterator` 55 | 56 | /// Draining parallel iterator that moves out of a hash set, 57 | /// but keeps the total capacity. 58 | #[derive(Debug)] 59 | pub struct Drain<'a, T: Hash + Eq + Send> { 60 | inner: vec::IntoIter, 61 | marker: PhantomData<&'a mut HashSet>, 62 | } 63 | 64 | impl<'a, T: Hash + Eq + Send, S: BuildHasher> ParallelDrainFull for &'a mut HashSet { 65 | type Iter = Drain<'a, T>; 66 | type Item = T; 67 | 68 | fn par_drain(self) -> Self::Iter { 69 | let vec: Vec<_> = self.drain().collect(); 70 | Drain { 71 | inner: vec.into_par_iter(), 72 | marker: PhantomData, 73 | } 74 | } 75 | } 76 | 77 | delegate_iterator! { 78 | Drain<'_, T> => T, 79 | impl 80 | } 81 | -------------------------------------------------------------------------------- /src/collections/linked_list.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for linked lists 2 | //! (`LinkedList`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use std::collections::LinkedList; 6 | 7 | use crate::iter::plumbing::*; 8 | use crate::iter::*; 9 | 10 | use crate::vec; 11 | 12 | /// Parallel iterator over a linked list 13 | #[derive(Debug, Clone)] 14 | pub struct IntoIter { 15 | inner: vec::IntoIter, 16 | } 17 | 18 | into_par_vec! { 19 | LinkedList => IntoIter, 20 | impl 21 | } 22 | 23 | delegate_iterator! { 24 | IntoIter => T, 25 | impl 26 | } 27 | 28 | /// Parallel iterator over an immutable reference to a linked list 29 | #[derive(Debug)] 30 | pub struct Iter<'a, T: Sync> { 31 | inner: vec::IntoIter<&'a T>, 32 | } 33 | 34 | impl<'a, T: Sync> Clone for Iter<'a, T> { 35 | fn clone(&self) -> Self { 36 | Iter { 37 | inner: self.inner.clone(), 38 | } 39 | } 40 | } 41 | 42 | into_par_vec! { 43 | &'a LinkedList => Iter<'a, T>, 44 | impl<'a, T: Sync> 45 | } 46 | 47 | delegate_iterator! { 48 | Iter<'a, T> => &'a T, 49 | impl<'a, T: Sync + 'a> 50 | } 51 | 52 | /// Parallel iterator over a mutable reference to a linked list 53 | #[derive(Debug)] 54 | pub struct IterMut<'a, T: Send> { 55 | inner: vec::IntoIter<&'a mut T>, 56 | } 57 | 58 | into_par_vec! { 59 | &'a mut LinkedList => IterMut<'a, T>, 60 | impl<'a, T: Send> 61 | } 62 | 63 | delegate_iterator! { 64 | IterMut<'a, T> => &'a mut T, 65 | impl<'a, T: Send + 'a> 66 | } 67 | -------------------------------------------------------------------------------- /src/collections/mod.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [standard collections][std::collections] 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need 4 | //! to name one of the iterator types. 5 | //! 6 | //! [std::collections]: https://doc.rust-lang.org/stable/std/collections/ 7 | 8 | /// Convert an iterable collection into a parallel iterator by first 9 | /// collecting into a temporary `Vec`, then iterating that. 10 | macro_rules! into_par_vec { 11 | ($t:ty => $iter:ident<$($i:tt),*>, impl $($args:tt)*) => { 12 | impl $($args)* IntoParallelIterator for $t { 13 | type Item = <$t as IntoIterator>::Item; 14 | type Iter = $iter<$($i),*>; 15 | 16 | fn into_par_iter(self) -> Self::Iter { 17 | use std::iter::FromIterator; 18 | $iter { inner: Vec::from_iter(self).into_par_iter() } 19 | } 20 | } 21 | }; 22 | } 23 | 24 | pub mod binary_heap; 25 | pub mod btree_map; 26 | pub mod btree_set; 27 | pub mod hash_map; 28 | pub mod hash_set; 29 | pub mod linked_list; 30 | pub mod vec_deque; 31 | 32 | use self::drain_guard::DrainGuard; 33 | 34 | mod drain_guard { 35 | use crate::iter::ParallelDrainRange; 36 | use std::mem; 37 | use std::ops::RangeBounds; 38 | 39 | /// A proxy for draining a collection by converting to a `Vec` and back. 40 | /// 41 | /// This is used for draining `BinaryHeap` and `VecDeque`, which both have 42 | /// zero-allocation conversions to/from `Vec`, though not zero-cost: 43 | /// - `BinaryHeap` will heapify from `Vec`, but at least that will be empty. 44 | /// - `VecDeque` has to shift items to offset 0 when converting to `Vec`. 45 | #[allow(missing_debug_implementations)] 46 | pub(super) struct DrainGuard<'a, T, C: From>> { 47 | collection: &'a mut C, 48 | vec: Vec, 49 | } 50 | 51 | impl<'a, T, C> DrainGuard<'a, T, C> 52 | where 53 | C: Default + From>, 54 | Vec: From, 55 | { 56 | pub(super) fn new(collection: &'a mut C) -> Self { 57 | Self { 58 | // Temporarily steal the inner `Vec` so we can drain in place. 59 | vec: Vec::from(mem::take(collection)), 60 | collection, 61 | } 62 | } 63 | } 64 | 65 | impl<'a, T, C: From>> Drop for DrainGuard<'a, T, C> { 66 | fn drop(&mut self) { 67 | // Restore the collection from the `Vec` with its original capacity. 68 | *self.collection = C::from(mem::take(&mut self.vec)); 69 | } 70 | } 71 | 72 | impl<'a, T, C> ParallelDrainRange for &'a mut DrainGuard<'_, T, C> 73 | where 74 | T: Send, 75 | C: From>, 76 | { 77 | type Iter = crate::vec::Drain<'a, T>; 78 | type Item = T; 79 | 80 | fn par_drain>(self, range: R) -> Self::Iter { 81 | self.vec.par_drain(range) 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /src/compile_fail/cannot_collect_filtermap_data.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0599 2 | 3 | use rayon::prelude::*; 4 | 5 | // zip requires data of exact size, but filter yields only bounded 6 | // size, so check that we cannot apply it. 7 | 8 | let a: Vec = (0..1024).collect(); 9 | let mut v = vec![]; 10 | a.par_iter() 11 | .filter_map(|&x| Some(x as f32)) 12 | .collect_into_vec(&mut v); //~ ERROR no method 13 | 14 | ``` */ 15 | -------------------------------------------------------------------------------- /src/compile_fail/cannot_zip_filtered_data.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0277 2 | 3 | use rayon::prelude::*; 4 | 5 | // zip requires data of exact size, but filter yields only bounded 6 | // size, so check that we cannot apply it. 7 | 8 | let mut a: Vec = (0..1024).rev().collect(); 9 | let b: Vec = (0..1024).collect(); 10 | 11 | a.par_iter() 12 | .zip(b.par_iter().filter(|&&x| x > 3)); //~ ERROR 13 | 14 | ``` */ 15 | -------------------------------------------------------------------------------- /src/compile_fail/cell_par_iter.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0277 2 | 3 | // Check that we can't use the par-iter API to access contents of a `Cell`. 4 | 5 | use rayon::prelude::*; 6 | use std::cell::Cell; 7 | 8 | let c = Cell::new(42_i32); 9 | (0_i32..1024).into_par_iter() 10 | .map(|_| c.get()) //~ ERROR E0277 11 | .min(); 12 | 13 | ``` */ 14 | -------------------------------------------------------------------------------- /src/compile_fail/mod.rs: -------------------------------------------------------------------------------- 1 | // These modules contain `compile_fail` doc tests. 2 | mod cannot_collect_filtermap_data; 3 | mod cannot_zip_filtered_data; 4 | mod cell_par_iter; 5 | mod must_use; 6 | mod no_send_par_iter; 7 | mod rc_par_iter; 8 | -------------------------------------------------------------------------------- /src/compile_fail/must_use.rs: -------------------------------------------------------------------------------- 1 | // Check that we are flagged for ignoring `must_use` parallel adaptors. 2 | // (unfortunately there's no error code for `unused_must_use`) 3 | 4 | macro_rules! must_use { 5 | ($( $name:ident #[$expr:meta] )*) => {$( 6 | /// First sanity check that the expression is OK. 7 | /// 8 | /// ``` 9 | /// #![deny(unused_must_use)] 10 | /// 11 | /// use rayon::prelude::*; 12 | /// 13 | /// let v: Vec<_> = (0..100).map(Some).collect(); 14 | /// let _ = 15 | #[$expr] 16 | /// ``` 17 | /// 18 | /// Now trigger the `must_use`. 19 | /// 20 | /// ```compile_fail 21 | /// #![deny(unused_must_use)] 22 | /// 23 | /// use rayon::prelude::*; 24 | /// 25 | /// let v: Vec<_> = (0..100).map(Some).collect(); 26 | #[$expr] 27 | /// ``` 28 | mod $name {} 29 | )*} 30 | } 31 | 32 | must_use! { 33 | step_by /** v.par_iter().step_by(2); */ 34 | chain /** v.par_iter().chain(&v); */ 35 | chunks /** v.par_iter().chunks(2); */ 36 | fold_chunks /** v.par_iter().fold_chunks(2, || 0, |x, _| x); */ 37 | fold_chunks_with /** v.par_iter().fold_chunks_with(2, 0, |x, _| x); */ 38 | cloned /** v.par_iter().cloned(); */ 39 | copied /** v.par_iter().copied(); */ 40 | enumerate /** v.par_iter().enumerate(); */ 41 | filter /** v.par_iter().filter(|_| true); */ 42 | filter_map /** v.par_iter().filter_map(|x| *x); */ 43 | flat_map /** v.par_iter().flat_map(|x| *x); */ 44 | flat_map_iter /** v.par_iter().flat_map_iter(|x| *x); */ 45 | flatten /** v.par_iter().flatten(); */ 46 | flatten_iter /** v.par_iter().flatten_iter(); */ 47 | fold /** v.par_iter().fold(|| 0, |x, _| x); */ 48 | fold_with /** v.par_iter().fold_with(0, |x, _| x); */ 49 | try_fold /** v.par_iter().try_fold(|| 0, |x, _| Some(x)); */ 50 | try_fold_with /** v.par_iter().try_fold_with(0, |x, _| Some(x)); */ 51 | inspect /** v.par_iter().inspect(|_| {}); */ 52 | interleave /** v.par_iter().interleave(&v); */ 53 | interleave_shortest /** v.par_iter().interleave_shortest(&v); */ 54 | intersperse /** v.par_iter().intersperse(&None); */ 55 | map /** v.par_iter().map(|x| x); */ 56 | map_with /** v.par_iter().map_with(0, |_, x| x); */ 57 | map_init /** v.par_iter().map_init(|| 0, |_, x| x); */ 58 | panic_fuse /** v.par_iter().panic_fuse(); */ 59 | positions /** v.par_iter().positions(|_| true); */ 60 | rev /** v.par_iter().rev(); */ 61 | skip /** v.par_iter().skip(1); */ 62 | take /** v.par_iter().take(1); */ 63 | update /** v.par_iter().update(|_| {}); */ 64 | while_some /** v.par_iter().cloned().while_some(); */ 65 | with_max_len /** v.par_iter().with_max_len(1); */ 66 | with_min_len /** v.par_iter().with_min_len(1); */ 67 | zip /** v.par_iter().zip(&v); */ 68 | zip_eq /** v.par_iter().zip_eq(&v); */ 69 | } 70 | -------------------------------------------------------------------------------- /src/compile_fail/no_send_par_iter.rs: -------------------------------------------------------------------------------- 1 | // Check that `!Send` types fail early. 2 | 3 | /** ```compile_fail,E0277 4 | 5 | use rayon::prelude::*; 6 | use std::ptr::null; 7 | 8 | #[derive(Copy, Clone)] 9 | struct NoSend(*const ()); 10 | 11 | unsafe impl Sync for NoSend {} 12 | 13 | let x = Some(NoSend(null())); 14 | 15 | x.par_iter() 16 | .map(|&x| x) //~ ERROR 17 | .count(); //~ ERROR 18 | 19 | ``` */ 20 | mod map {} 21 | 22 | /** ```compile_fail,E0277 23 | 24 | use rayon::prelude::*; 25 | use std::ptr::null; 26 | 27 | #[derive(Copy, Clone)] 28 | struct NoSend(*const ()); 29 | 30 | unsafe impl Sync for NoSend {} 31 | 32 | let x = Some(NoSend(null())); 33 | 34 | x.par_iter() 35 | .filter_map(|&x| Some(x)) //~ ERROR 36 | .count(); //~ ERROR 37 | 38 | ``` */ 39 | mod filter_map {} 40 | 41 | /** ```compile_fail,E0277 42 | 43 | use rayon::prelude::*; 44 | use std::ptr::null; 45 | 46 | #[derive(Copy, Clone)] 47 | struct NoSend(*const ()); 48 | 49 | unsafe impl Sync for NoSend {} 50 | 51 | let x = Some(NoSend(null())); 52 | 53 | x.par_iter() 54 | .cloned() //~ ERROR 55 | .count(); //~ ERROR 56 | 57 | ``` */ 58 | mod cloned {} 59 | -------------------------------------------------------------------------------- /src/compile_fail/rc_par_iter.rs: -------------------------------------------------------------------------------- 1 | /*! ```compile_fail,E0599 2 | 3 | // Check that we can't use the par-iter API to access contents of an 4 | // `Rc`. 5 | 6 | use rayon::prelude::*; 7 | use std::rc::Rc; 8 | 9 | let x = vec![Rc::new(22), Rc::new(23)]; 10 | let mut y = vec![]; 11 | x.into_par_iter() //~ ERROR no method named `into_par_iter` 12 | .map(|rc| *rc) 13 | .collect_into_vec(&mut y); 14 | 15 | ``` */ 16 | -------------------------------------------------------------------------------- /src/delegate.rs: -------------------------------------------------------------------------------- 1 | //! Macros for delegating newtype iterators to inner types. 2 | 3 | // Note: these place `impl` bounds at the end, as token gobbling is the only way 4 | // I know how to consume an arbitrary list of constraints, with `$($args:tt)*`. 5 | 6 | /// Creates a parallel iterator implementation which simply wraps an inner type 7 | /// and delegates all methods inward. The actual struct must already be 8 | /// declared with an `inner` field. 9 | /// 10 | /// The implementation of `IntoParallelIterator` should be added separately. 11 | macro_rules! delegate_iterator { 12 | ($iter:ty => $item:ty , 13 | impl $( $args:tt )* 14 | ) => { 15 | impl $( $args )* ParallelIterator for $iter { 16 | type Item = $item; 17 | 18 | fn drive_unindexed(self, consumer: C) -> C::Result 19 | where C: UnindexedConsumer 20 | { 21 | self.inner.drive_unindexed(consumer) 22 | } 23 | 24 | fn opt_len(&self) -> Option { 25 | self.inner.opt_len() 26 | } 27 | } 28 | } 29 | } 30 | 31 | /// Creates an indexed parallel iterator implementation which simply wraps an 32 | /// inner type and delegates all methods inward. The actual struct must already 33 | /// be declared with an `inner` field. 34 | macro_rules! delegate_indexed_iterator { 35 | ($iter:ty => $item:ty , 36 | impl $( $args:tt )* 37 | ) => { 38 | delegate_iterator!{ 39 | $iter => $item , 40 | impl $( $args )* 41 | } 42 | 43 | impl $( $args )* IndexedParallelIterator for $iter { 44 | fn drive(self, consumer: C) -> C::Result 45 | where C: Consumer 46 | { 47 | self.inner.drive(consumer) 48 | } 49 | 50 | fn len(&self) -> usize { 51 | self.inner.len() 52 | } 53 | 54 | fn with_producer(self, callback: CB) -> CB::Output 55 | where CB: ProducerCallback 56 | { 57 | self.inner.with_producer(callback) 58 | } 59 | } 60 | } 61 | } 62 | 63 | #[test] 64 | fn unindexed_example() { 65 | use crate::collections::btree_map::IntoIter; 66 | use crate::iter::plumbing::*; 67 | use crate::prelude::*; 68 | 69 | use std::collections::BTreeMap; 70 | 71 | struct MyIntoIter { 72 | inner: IntoIter, 73 | } 74 | 75 | delegate_iterator! { 76 | MyIntoIter => (T, U), 77 | impl 78 | } 79 | 80 | let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]); 81 | let iter = MyIntoIter { 82 | inner: map.into_par_iter(), 83 | }; 84 | let vec: Vec<_> = iter.map(|(k, _)| k).collect(); 85 | assert_eq!(vec, &[1, 2, 3]); 86 | } 87 | 88 | #[test] 89 | fn indexed_example() { 90 | use crate::iter::plumbing::*; 91 | use crate::prelude::*; 92 | use crate::vec::IntoIter; 93 | 94 | struct MyIntoIter { 95 | inner: IntoIter, 96 | } 97 | 98 | delegate_indexed_iterator! { 99 | MyIntoIter => T, 100 | impl 101 | } 102 | 103 | let iter = MyIntoIter { 104 | inner: vec![1, 2, 3].into_par_iter(), 105 | }; 106 | let mut vec = vec![]; 107 | iter.collect_into_vec(&mut vec); 108 | assert_eq!(vec, &[1, 2, 3]); 109 | } 110 | -------------------------------------------------------------------------------- /src/iter/empty.rs: -------------------------------------------------------------------------------- 1 | use crate::iter::plumbing::*; 2 | use crate::iter::*; 3 | 4 | use std::fmt; 5 | use std::marker::PhantomData; 6 | 7 | /// Creates a parallel iterator that produces nothing. 8 | /// 9 | /// This admits no parallelism on its own, but it could be used for code that 10 | /// deals with generic parallel iterators. 11 | /// 12 | /// # Examples 13 | /// 14 | /// ``` 15 | /// use rayon::prelude::*; 16 | /// use rayon::iter::empty; 17 | /// 18 | /// let pi = (0..1234).into_par_iter() 19 | /// .chain(empty()) 20 | /// .chain(1234..10_000); 21 | /// 22 | /// assert_eq!(pi.count(), 10_000); 23 | /// ``` 24 | pub fn empty() -> Empty { 25 | Empty { 26 | marker: PhantomData, 27 | } 28 | } 29 | 30 | /// Iterator adaptor for [the `empty()` function](fn.empty.html). 31 | pub struct Empty { 32 | marker: PhantomData, 33 | } 34 | 35 | impl Clone for Empty { 36 | fn clone(&self) -> Self { 37 | empty() 38 | } 39 | } 40 | 41 | impl fmt::Debug for Empty { 42 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 43 | f.pad("Empty") 44 | } 45 | } 46 | 47 | impl ParallelIterator for Empty { 48 | type Item = T; 49 | 50 | fn drive_unindexed(self, consumer: C) -> C::Result 51 | where 52 | C: UnindexedConsumer, 53 | { 54 | self.drive(consumer) 55 | } 56 | 57 | fn opt_len(&self) -> Option { 58 | Some(0) 59 | } 60 | } 61 | 62 | impl IndexedParallelIterator for Empty { 63 | fn drive(self, consumer: C) -> C::Result 64 | where 65 | C: Consumer, 66 | { 67 | consumer.into_folder().complete() 68 | } 69 | 70 | fn len(&self) -> usize { 71 | 0 72 | } 73 | 74 | fn with_producer(self, callback: CB) -> CB::Output 75 | where 76 | CB: ProducerCallback, 77 | { 78 | callback.callback(EmptyProducer(PhantomData)) 79 | } 80 | } 81 | 82 | /// Private empty producer 83 | struct EmptyProducer(PhantomData); 84 | 85 | impl Producer for EmptyProducer { 86 | type Item = T; 87 | type IntoIter = std::iter::Empty; 88 | 89 | fn into_iter(self) -> Self::IntoIter { 90 | std::iter::empty() 91 | } 92 | 93 | fn split_at(self, index: usize) -> (Self, Self) { 94 | debug_assert_eq!(index, 0); 95 | (self, EmptyProducer(PhantomData)) 96 | } 97 | 98 | fn fold_with(self, folder: F) -> F 99 | where 100 | F: Folder, 101 | { 102 | folder 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/iter/enumerate.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::iter; 4 | use std::ops::Range; 5 | use std::usize; 6 | 7 | /// `Enumerate` is an iterator that returns the current count along with the element. 8 | /// This struct is created by the [`enumerate()`] method on [`IndexedParallelIterator`] 9 | /// 10 | /// [`enumerate()`]: trait.IndexedParallelIterator.html#method.enumerate 11 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 12 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 13 | #[derive(Debug, Clone)] 14 | pub struct Enumerate { 15 | base: I, 16 | } 17 | 18 | impl Enumerate 19 | where 20 | I: IndexedParallelIterator, 21 | { 22 | /// Creates a new `Enumerate` iterator. 23 | pub(super) fn new(base: I) -> Self { 24 | Enumerate { base } 25 | } 26 | } 27 | 28 | impl ParallelIterator for Enumerate 29 | where 30 | I: IndexedParallelIterator, 31 | { 32 | type Item = (usize, I::Item); 33 | 34 | fn drive_unindexed(self, consumer: C) -> C::Result 35 | where 36 | C: UnindexedConsumer, 37 | { 38 | bridge(self, consumer) 39 | } 40 | 41 | fn opt_len(&self) -> Option { 42 | Some(self.len()) 43 | } 44 | } 45 | 46 | impl IndexedParallelIterator for Enumerate 47 | where 48 | I: IndexedParallelIterator, 49 | { 50 | fn drive>(self, consumer: C) -> C::Result { 51 | bridge(self, consumer) 52 | } 53 | 54 | fn len(&self) -> usize { 55 | self.base.len() 56 | } 57 | 58 | fn with_producer(self, callback: CB) -> CB::Output 59 | where 60 | CB: ProducerCallback, 61 | { 62 | return self.base.with_producer(Callback { callback }); 63 | 64 | struct Callback { 65 | callback: CB, 66 | } 67 | 68 | impl ProducerCallback for Callback 69 | where 70 | CB: ProducerCallback<(usize, I)>, 71 | { 72 | type Output = CB::Output; 73 | fn callback

(self, base: P) -> CB::Output 74 | where 75 | P: Producer, 76 | { 77 | let producer = EnumerateProducer { base, offset: 0 }; 78 | self.callback.callback(producer) 79 | } 80 | } 81 | } 82 | } 83 | 84 | /// //////////////////////////////////////////////////////////////////////// 85 | /// Producer implementation 86 | 87 | struct EnumerateProducer

{ 88 | base: P, 89 | offset: usize, 90 | } 91 | 92 | impl

Producer for EnumerateProducer

93 | where 94 | P: Producer, 95 | { 96 | type Item = (usize, P::Item); 97 | type IntoIter = iter::Zip, P::IntoIter>; 98 | 99 | fn into_iter(self) -> Self::IntoIter { 100 | // Enumerate only works for IndexedParallelIterators. Since those 101 | // have a max length of usize::MAX, their max index is 102 | // usize::MAX - 1, so the range 0..usize::MAX includes all 103 | // possible indices. 104 | // 105 | // However, we should to use a precise end to the range, otherwise 106 | // reversing the iterator may have to walk back a long ways before 107 | // `Zip::next_back` can produce anything. 108 | let base = self.base.into_iter(); 109 | let end = self.offset + base.len(); 110 | (self.offset..end).zip(base) 111 | } 112 | 113 | fn min_len(&self) -> usize { 114 | self.base.min_len() 115 | } 116 | fn max_len(&self) -> usize { 117 | self.base.max_len() 118 | } 119 | 120 | fn split_at(self, index: usize) -> (Self, Self) { 121 | let (left, right) = self.base.split_at(index); 122 | ( 123 | EnumerateProducer { 124 | base: left, 125 | offset: self.offset, 126 | }, 127 | EnumerateProducer { 128 | base: right, 129 | offset: self.offset + index, 130 | }, 131 | ) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /src/iter/filter.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | use std::fmt::{self, Debug}; 5 | 6 | /// `Filter` takes a predicate `filter_op` and filters out elements that match. 7 | /// This struct is created by the [`filter()`] method on [`ParallelIterator`] 8 | /// 9 | /// [`filter()`]: trait.ParallelIterator.html#method.filter 10 | /// [`ParallelIterator`]: trait.ParallelIterator.html 11 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 12 | #[derive(Clone)] 13 | pub struct Filter { 14 | base: I, 15 | filter_op: P, 16 | } 17 | 18 | impl Debug for Filter { 19 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 20 | f.debug_struct("Filter").field("base", &self.base).finish() 21 | } 22 | } 23 | 24 | impl Filter 25 | where 26 | I: ParallelIterator, 27 | { 28 | /// Creates a new `Filter` iterator. 29 | pub(super) fn new(base: I, filter_op: P) -> Self { 30 | Filter { base, filter_op } 31 | } 32 | } 33 | 34 | impl ParallelIterator for Filter 35 | where 36 | I: ParallelIterator, 37 | P: Fn(&I::Item) -> bool + Sync + Send, 38 | { 39 | type Item = I::Item; 40 | 41 | fn drive_unindexed(self, consumer: C) -> C::Result 42 | where 43 | C: UnindexedConsumer, 44 | { 45 | let consumer1 = FilterConsumer::new(consumer, &self.filter_op); 46 | self.base.drive_unindexed(consumer1) 47 | } 48 | } 49 | 50 | /// //////////////////////////////////////////////////////////////////////// 51 | /// Consumer implementation 52 | 53 | struct FilterConsumer<'p, C, P> { 54 | base: C, 55 | filter_op: &'p P, 56 | } 57 | 58 | impl<'p, C, P> FilterConsumer<'p, C, P> { 59 | fn new(base: C, filter_op: &'p P) -> Self { 60 | FilterConsumer { base, filter_op } 61 | } 62 | } 63 | 64 | impl<'p, T, C, P: 'p> Consumer for FilterConsumer<'p, C, P> 65 | where 66 | C: Consumer, 67 | P: Fn(&T) -> bool + Sync, 68 | { 69 | type Folder = FilterFolder<'p, C::Folder, P>; 70 | type Reducer = C::Reducer; 71 | type Result = C::Result; 72 | 73 | fn split_at(self, index: usize) -> (Self, Self, C::Reducer) { 74 | let (left, right, reducer) = self.base.split_at(index); 75 | ( 76 | FilterConsumer::new(left, self.filter_op), 77 | FilterConsumer::new(right, self.filter_op), 78 | reducer, 79 | ) 80 | } 81 | 82 | fn into_folder(self) -> Self::Folder { 83 | FilterFolder { 84 | base: self.base.into_folder(), 85 | filter_op: self.filter_op, 86 | } 87 | } 88 | 89 | fn full(&self) -> bool { 90 | self.base.full() 91 | } 92 | } 93 | 94 | impl<'p, T, C, P: 'p> UnindexedConsumer for FilterConsumer<'p, C, P> 95 | where 96 | C: UnindexedConsumer, 97 | P: Fn(&T) -> bool + Sync, 98 | { 99 | fn split_off_left(&self) -> Self { 100 | FilterConsumer::new(self.base.split_off_left(), self.filter_op) 101 | } 102 | 103 | fn to_reducer(&self) -> Self::Reducer { 104 | self.base.to_reducer() 105 | } 106 | } 107 | 108 | struct FilterFolder<'p, C, P> { 109 | base: C, 110 | filter_op: &'p P, 111 | } 112 | 113 | impl<'p, C, P, T> Folder for FilterFolder<'p, C, P> 114 | where 115 | C: Folder, 116 | P: Fn(&T) -> bool + 'p, 117 | { 118 | type Result = C::Result; 119 | 120 | fn consume(self, item: T) -> Self { 121 | let filter_op = self.filter_op; 122 | if filter_op(&item) { 123 | let base = self.base.consume(item); 124 | FilterFolder { base, filter_op } 125 | } else { 126 | self 127 | } 128 | } 129 | 130 | // This cannot easily specialize `consume_iter` to be better than 131 | // the default, because that requires checking `self.base.full()` 132 | // during a call to `self.base.consume_iter()`. (#632) 133 | 134 | fn complete(self) -> Self::Result { 135 | self.base.complete() 136 | } 137 | 138 | fn full(&self) -> bool { 139 | self.base.full() 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/iter/find.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::sync::atomic::{AtomicBool, Ordering}; 4 | 5 | pub(super) fn find(pi: I, find_op: P) -> Option 6 | where 7 | I: ParallelIterator, 8 | P: Fn(&I::Item) -> bool + Sync, 9 | { 10 | let found = AtomicBool::new(false); 11 | let consumer = FindConsumer::new(&find_op, &found); 12 | pi.drive_unindexed(consumer) 13 | } 14 | 15 | struct FindConsumer<'p, P> { 16 | find_op: &'p P, 17 | found: &'p AtomicBool, 18 | } 19 | 20 | impl<'p, P> FindConsumer<'p, P> { 21 | fn new(find_op: &'p P, found: &'p AtomicBool) -> Self { 22 | FindConsumer { find_op, found } 23 | } 24 | } 25 | 26 | impl<'p, T, P: 'p> Consumer for FindConsumer<'p, P> 27 | where 28 | T: Send, 29 | P: Fn(&T) -> bool + Sync, 30 | { 31 | type Folder = FindFolder<'p, T, P>; 32 | type Reducer = FindReducer; 33 | type Result = Option; 34 | 35 | fn split_at(self, _index: usize) -> (Self, Self, Self::Reducer) { 36 | (self.split_off_left(), self, FindReducer) 37 | } 38 | 39 | fn into_folder(self) -> Self::Folder { 40 | FindFolder { 41 | find_op: self.find_op, 42 | found: self.found, 43 | item: None, 44 | } 45 | } 46 | 47 | fn full(&self) -> bool { 48 | self.found.load(Ordering::Relaxed) 49 | } 50 | } 51 | 52 | impl<'p, T, P: 'p> UnindexedConsumer for FindConsumer<'p, P> 53 | where 54 | T: Send, 55 | P: Fn(&T) -> bool + Sync, 56 | { 57 | fn split_off_left(&self) -> Self { 58 | FindConsumer::new(self.find_op, self.found) 59 | } 60 | 61 | fn to_reducer(&self) -> Self::Reducer { 62 | FindReducer 63 | } 64 | } 65 | 66 | struct FindFolder<'p, T, P> { 67 | find_op: &'p P, 68 | found: &'p AtomicBool, 69 | item: Option, 70 | } 71 | 72 | impl<'p, T, P> Folder for FindFolder<'p, T, P> 73 | where 74 | P: Fn(&T) -> bool + 'p, 75 | { 76 | type Result = Option; 77 | 78 | fn consume(mut self, item: T) -> Self { 79 | if (self.find_op)(&item) { 80 | self.found.store(true, Ordering::Relaxed); 81 | self.item = Some(item); 82 | } 83 | self 84 | } 85 | 86 | fn consume_iter(mut self, iter: I) -> Self 87 | where 88 | I: IntoIterator, 89 | { 90 | fn not_full(found: &AtomicBool) -> impl Fn(&T) -> bool + '_ { 91 | move |_| !found.load(Ordering::Relaxed) 92 | } 93 | 94 | self.item = iter 95 | .into_iter() 96 | // stop iterating if another thread has found something 97 | .take_while(not_full(self.found)) 98 | .find(self.find_op); 99 | if self.item.is_some() { 100 | self.found.store(true, Ordering::Relaxed) 101 | } 102 | self 103 | } 104 | 105 | fn complete(self) -> Self::Result { 106 | self.item 107 | } 108 | 109 | fn full(&self) -> bool { 110 | self.found.load(Ordering::Relaxed) 111 | } 112 | } 113 | 114 | struct FindReducer; 115 | 116 | impl Reducer> for FindReducer { 117 | fn reduce(self, left: Option, right: Option) -> Option { 118 | left.or(right) 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/iter/flatten.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | /// `Flatten` turns each element to a parallel iterator, then flattens these iterators 5 | /// together. This struct is created by the [`flatten()`] method on [`ParallelIterator`]. 6 | /// 7 | /// [`flatten()`]: trait.ParallelIterator.html#method.flatten 8 | /// [`ParallelIterator`]: trait.ParallelIterator.html 9 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 10 | #[derive(Debug, Clone)] 11 | pub struct Flatten { 12 | base: I, 13 | } 14 | 15 | impl Flatten 16 | where 17 | I: ParallelIterator, 18 | I::Item: IntoParallelIterator, 19 | { 20 | /// Creates a new `Flatten` iterator. 21 | pub(super) fn new(base: I) -> Self { 22 | Flatten { base } 23 | } 24 | } 25 | 26 | impl ParallelIterator for Flatten 27 | where 28 | I: ParallelIterator, 29 | I::Item: IntoParallelIterator, 30 | { 31 | type Item = ::Item; 32 | 33 | fn drive_unindexed(self, consumer: C) -> C::Result 34 | where 35 | C: UnindexedConsumer, 36 | { 37 | let consumer = FlattenConsumer::new(consumer); 38 | self.base.drive_unindexed(consumer) 39 | } 40 | } 41 | 42 | /// //////////////////////////////////////////////////////////////////////// 43 | /// Consumer implementation 44 | 45 | struct FlattenConsumer { 46 | base: C, 47 | } 48 | 49 | impl FlattenConsumer { 50 | fn new(base: C) -> Self { 51 | FlattenConsumer { base } 52 | } 53 | } 54 | 55 | impl Consumer for FlattenConsumer 56 | where 57 | C: UnindexedConsumer, 58 | T: IntoParallelIterator, 59 | { 60 | type Folder = FlattenFolder; 61 | type Reducer = C::Reducer; 62 | type Result = C::Result; 63 | 64 | fn split_at(self, index: usize) -> (Self, Self, C::Reducer) { 65 | let (left, right, reducer) = self.base.split_at(index); 66 | ( 67 | FlattenConsumer::new(left), 68 | FlattenConsumer::new(right), 69 | reducer, 70 | ) 71 | } 72 | 73 | fn into_folder(self) -> Self::Folder { 74 | FlattenFolder { 75 | base: self.base, 76 | previous: None, 77 | } 78 | } 79 | 80 | fn full(&self) -> bool { 81 | self.base.full() 82 | } 83 | } 84 | 85 | impl UnindexedConsumer for FlattenConsumer 86 | where 87 | C: UnindexedConsumer, 88 | T: IntoParallelIterator, 89 | { 90 | fn split_off_left(&self) -> Self { 91 | FlattenConsumer::new(self.base.split_off_left()) 92 | } 93 | 94 | fn to_reducer(&self) -> Self::Reducer { 95 | self.base.to_reducer() 96 | } 97 | } 98 | 99 | struct FlattenFolder { 100 | base: C, 101 | previous: Option, 102 | } 103 | 104 | impl Folder for FlattenFolder 105 | where 106 | C: UnindexedConsumer, 107 | T: IntoParallelIterator, 108 | { 109 | type Result = C::Result; 110 | 111 | fn consume(self, item: T) -> Self { 112 | let par_iter = item.into_par_iter(); 113 | let consumer = self.base.split_off_left(); 114 | let result = par_iter.drive_unindexed(consumer); 115 | 116 | let previous = match self.previous { 117 | None => Some(result), 118 | Some(previous) => { 119 | let reducer = self.base.to_reducer(); 120 | Some(reducer.reduce(previous, result)) 121 | } 122 | }; 123 | 124 | FlattenFolder { 125 | base: self.base, 126 | previous, 127 | } 128 | } 129 | 130 | fn complete(self) -> Self::Result { 131 | match self.previous { 132 | Some(previous) => previous, 133 | None => self.base.into_folder().complete(), 134 | } 135 | } 136 | 137 | fn full(&self) -> bool { 138 | self.base.full() 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/iter/flatten_iter.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | /// `FlattenIter` turns each element to a serial iterator, then flattens these iterators 5 | /// together. This struct is created by the [`flatten_iter()`] method on [`ParallelIterator`]. 6 | /// 7 | /// [`flatten_iter()`]: trait.ParallelIterator.html#method.flatten_iter 8 | /// [`ParallelIterator`]: trait.ParallelIterator.html 9 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 10 | #[derive(Debug, Clone)] 11 | pub struct FlattenIter { 12 | base: I, 13 | } 14 | 15 | impl FlattenIter 16 | where 17 | I: ParallelIterator, 18 | I::Item: IntoIterator, 19 | ::Item: Send, 20 | { 21 | /// Creates a new `FlattenIter` iterator. 22 | pub(super) fn new(base: I) -> Self { 23 | FlattenIter { base } 24 | } 25 | } 26 | 27 | impl ParallelIterator for FlattenIter 28 | where 29 | I: ParallelIterator, 30 | I::Item: IntoIterator, 31 | ::Item: Send, 32 | { 33 | type Item = ::Item; 34 | 35 | fn drive_unindexed(self, consumer: C) -> C::Result 36 | where 37 | C: UnindexedConsumer, 38 | { 39 | let consumer = FlattenIterConsumer::new(consumer); 40 | self.base.drive_unindexed(consumer) 41 | } 42 | } 43 | 44 | /// //////////////////////////////////////////////////////////////////////// 45 | /// Consumer implementation 46 | 47 | struct FlattenIterConsumer { 48 | base: C, 49 | } 50 | 51 | impl FlattenIterConsumer { 52 | fn new(base: C) -> Self { 53 | FlattenIterConsumer { base } 54 | } 55 | } 56 | 57 | impl Consumer for FlattenIterConsumer 58 | where 59 | C: UnindexedConsumer, 60 | T: IntoIterator, 61 | { 62 | type Folder = FlattenIterFolder; 63 | type Reducer = C::Reducer; 64 | type Result = C::Result; 65 | 66 | fn split_at(self, index: usize) -> (Self, Self, C::Reducer) { 67 | let (left, right, reducer) = self.base.split_at(index); 68 | ( 69 | FlattenIterConsumer::new(left), 70 | FlattenIterConsumer::new(right), 71 | reducer, 72 | ) 73 | } 74 | 75 | fn into_folder(self) -> Self::Folder { 76 | FlattenIterFolder { 77 | base: self.base.into_folder(), 78 | } 79 | } 80 | 81 | fn full(&self) -> bool { 82 | self.base.full() 83 | } 84 | } 85 | 86 | impl UnindexedConsumer for FlattenIterConsumer 87 | where 88 | C: UnindexedConsumer, 89 | T: IntoIterator, 90 | { 91 | fn split_off_left(&self) -> Self { 92 | FlattenIterConsumer::new(self.base.split_off_left()) 93 | } 94 | 95 | fn to_reducer(&self) -> Self::Reducer { 96 | self.base.to_reducer() 97 | } 98 | } 99 | 100 | struct FlattenIterFolder { 101 | base: C, 102 | } 103 | 104 | impl Folder for FlattenIterFolder 105 | where 106 | C: Folder, 107 | T: IntoIterator, 108 | { 109 | type Result = C::Result; 110 | 111 | fn consume(self, item: T) -> Self { 112 | let base = self.base.consume_iter(item); 113 | FlattenIterFolder { base } 114 | } 115 | 116 | fn consume_iter(self, iter: I) -> Self 117 | where 118 | I: IntoIterator, 119 | { 120 | let iter = iter.into_iter().flatten(); 121 | let base = self.base.consume_iter(iter); 122 | FlattenIterFolder { base } 123 | } 124 | 125 | fn complete(self) -> Self::Result { 126 | self.base.complete() 127 | } 128 | 129 | fn full(&self) -> bool { 130 | self.base.full() 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/iter/for_each.rs: -------------------------------------------------------------------------------- 1 | use super::noop::*; 2 | use super::plumbing::*; 3 | use super::ParallelIterator; 4 | 5 | pub(super) fn for_each(pi: I, op: &F) 6 | where 7 | I: ParallelIterator, 8 | F: Fn(T) + Sync, 9 | T: Send, 10 | { 11 | let consumer = ForEachConsumer { op }; 12 | pi.drive_unindexed(consumer) 13 | } 14 | 15 | struct ForEachConsumer<'f, F> { 16 | op: &'f F, 17 | } 18 | 19 | impl<'f, F, T> Consumer for ForEachConsumer<'f, F> 20 | where 21 | F: Fn(T) + Sync, 22 | { 23 | type Folder = ForEachConsumer<'f, F>; 24 | type Reducer = NoopReducer; 25 | type Result = (); 26 | 27 | fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) { 28 | (self.split_off_left(), self, NoopReducer) 29 | } 30 | 31 | fn into_folder(self) -> Self { 32 | self 33 | } 34 | 35 | fn full(&self) -> bool { 36 | false 37 | } 38 | } 39 | 40 | impl<'f, F, T> Folder for ForEachConsumer<'f, F> 41 | where 42 | F: Fn(T) + Sync, 43 | { 44 | type Result = (); 45 | 46 | fn consume(self, item: T) -> Self { 47 | (self.op)(item); 48 | self 49 | } 50 | 51 | fn consume_iter(self, iter: I) -> Self 52 | where 53 | I: IntoIterator, 54 | { 55 | iter.into_iter().for_each(self.op); 56 | self 57 | } 58 | 59 | fn complete(self) {} 60 | 61 | fn full(&self) -> bool { 62 | false 63 | } 64 | } 65 | 66 | impl<'f, F, T> UnindexedConsumer for ForEachConsumer<'f, F> 67 | where 68 | F: Fn(T) + Sync, 69 | { 70 | fn split_off_left(&self) -> Self { 71 | ForEachConsumer { op: self.op } 72 | } 73 | 74 | fn to_reducer(&self) -> NoopReducer { 75 | NoopReducer 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/iter/interleave_shortest.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | /// `InterleaveShortest` is an iterator that works similarly to 5 | /// `Interleave`, but this version stops returning elements once one 6 | /// of the iterators run out. 7 | /// 8 | /// This struct is created by the [`interleave_shortest()`] method on 9 | /// [`IndexedParallelIterator`]. 10 | /// 11 | /// [`interleave_shortest()`]: trait.IndexedParallelIterator.html#method.interleave_shortest 12 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 13 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 14 | #[derive(Debug, Clone)] 15 | pub struct InterleaveShortest 16 | where 17 | I: IndexedParallelIterator, 18 | J: IndexedParallelIterator, 19 | { 20 | interleave: Interleave, Take>, 21 | } 22 | 23 | impl InterleaveShortest 24 | where 25 | I: IndexedParallelIterator, 26 | J: IndexedParallelIterator, 27 | { 28 | /// Creates a new `InterleaveShortest` iterator 29 | pub(super) fn new(i: I, j: J) -> Self { 30 | InterleaveShortest { 31 | interleave: if i.len() <= j.len() { 32 | // take equal lengths from both iterators 33 | let n = i.len(); 34 | i.take(n).interleave(j.take(n)) 35 | } else { 36 | // take one extra item from the first iterator 37 | let n = j.len(); 38 | i.take(n + 1).interleave(j.take(n)) 39 | }, 40 | } 41 | } 42 | } 43 | 44 | impl ParallelIterator for InterleaveShortest 45 | where 46 | I: IndexedParallelIterator, 47 | J: IndexedParallelIterator, 48 | { 49 | type Item = I::Item; 50 | 51 | fn drive_unindexed(self, consumer: C) -> C::Result 52 | where 53 | C: Consumer, 54 | { 55 | bridge(self, consumer) 56 | } 57 | 58 | fn opt_len(&self) -> Option { 59 | Some(self.len()) 60 | } 61 | } 62 | 63 | impl IndexedParallelIterator for InterleaveShortest 64 | where 65 | I: IndexedParallelIterator, 66 | J: IndexedParallelIterator, 67 | { 68 | fn drive(self, consumer: C) -> C::Result 69 | where 70 | C: Consumer, 71 | { 72 | bridge(self, consumer) 73 | } 74 | 75 | fn len(&self) -> usize { 76 | self.interleave.len() 77 | } 78 | 79 | fn with_producer(self, callback: CB) -> CB::Output 80 | where 81 | CB: ProducerCallback, 82 | { 83 | self.interleave.with_producer(callback) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/iter/noop.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | 3 | pub(super) struct NoopConsumer; 4 | 5 | impl Consumer for NoopConsumer { 6 | type Folder = NoopConsumer; 7 | type Reducer = NoopReducer; 8 | type Result = (); 9 | 10 | fn split_at(self, _index: usize) -> (Self, Self, NoopReducer) { 11 | (NoopConsumer, NoopConsumer, NoopReducer) 12 | } 13 | 14 | fn into_folder(self) -> Self { 15 | self 16 | } 17 | 18 | fn full(&self) -> bool { 19 | false 20 | } 21 | } 22 | 23 | impl Folder for NoopConsumer { 24 | type Result = (); 25 | 26 | fn consume(self, _item: T) -> Self { 27 | self 28 | } 29 | 30 | fn consume_iter(self, iter: I) -> Self 31 | where 32 | I: IntoIterator, 33 | { 34 | iter.into_iter().for_each(drop); 35 | self 36 | } 37 | 38 | fn complete(self) {} 39 | 40 | fn full(&self) -> bool { 41 | false 42 | } 43 | } 44 | 45 | impl UnindexedConsumer for NoopConsumer { 46 | fn split_off_left(&self) -> Self { 47 | NoopConsumer 48 | } 49 | 50 | fn to_reducer(&self) -> NoopReducer { 51 | NoopReducer 52 | } 53 | } 54 | 55 | pub(super) struct NoopReducer; 56 | 57 | impl Reducer<()> for NoopReducer { 58 | fn reduce(self, _left: (), _right: ()) {} 59 | } 60 | -------------------------------------------------------------------------------- /src/iter/once.rs: -------------------------------------------------------------------------------- 1 | use crate::iter::plumbing::*; 2 | use crate::iter::*; 3 | 4 | /// Creates a parallel iterator that produces an element exactly once. 5 | /// 6 | /// This admits no parallelism on its own, but it could be chained to existing 7 | /// parallel iterators to extend their contents, or otherwise used for any code 8 | /// that deals with generic parallel iterators. 9 | /// 10 | /// # Examples 11 | /// 12 | /// ``` 13 | /// use rayon::prelude::*; 14 | /// use rayon::iter::once; 15 | /// 16 | /// let pi = (0..1234).into_par_iter() 17 | /// .chain(once(-1)) 18 | /// .chain(1234..10_000); 19 | /// 20 | /// assert_eq!(pi.clone().count(), 10_001); 21 | /// assert_eq!(pi.clone().filter(|&x| x < 0).count(), 1); 22 | /// assert_eq!(pi.position_any(|x| x < 0), Some(1234)); 23 | /// ``` 24 | pub fn once(item: T) -> Once { 25 | Once { item } 26 | } 27 | 28 | /// Iterator adaptor for [the `once()` function](fn.once.html). 29 | #[derive(Clone, Debug)] 30 | pub struct Once { 31 | item: T, 32 | } 33 | 34 | impl ParallelIterator for Once { 35 | type Item = T; 36 | 37 | fn drive_unindexed(self, consumer: C) -> C::Result 38 | where 39 | C: UnindexedConsumer, 40 | { 41 | self.drive(consumer) 42 | } 43 | 44 | fn opt_len(&self) -> Option { 45 | Some(1) 46 | } 47 | } 48 | 49 | impl IndexedParallelIterator for Once { 50 | fn drive(self, consumer: C) -> C::Result 51 | where 52 | C: Consumer, 53 | { 54 | consumer.into_folder().consume(self.item).complete() 55 | } 56 | 57 | fn len(&self) -> usize { 58 | 1 59 | } 60 | 61 | fn with_producer(self, callback: CB) -> CB::Output 62 | where 63 | CB: ProducerCallback, 64 | { 65 | // Let `OptionProducer` handle it. 66 | Some(self.item).into_par_iter().with_producer(callback) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/iter/positions.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | use std::fmt::{self, Debug}; 5 | 6 | /// `Positions` takes a predicate `predicate` and filters out elements that match, 7 | /// yielding their indices. 8 | /// 9 | /// This struct is created by the [`positions()`] method on [`IndexedParallelIterator`] 10 | /// 11 | /// [`positions()`]: trait.IndexedParallelIterator.html#method.positions 12 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 13 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 14 | #[derive(Clone)] 15 | pub struct Positions { 16 | base: I, 17 | predicate: P, 18 | } 19 | 20 | impl Debug for Positions { 21 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 22 | f.debug_struct("Positions") 23 | .field("base", &self.base) 24 | .finish() 25 | } 26 | } 27 | 28 | impl Positions 29 | where 30 | I: IndexedParallelIterator, 31 | { 32 | /// Create a new `Positions` iterator. 33 | pub(super) fn new(base: I, predicate: P) -> Self { 34 | Positions { base, predicate } 35 | } 36 | } 37 | 38 | impl ParallelIterator for Positions 39 | where 40 | I: IndexedParallelIterator, 41 | P: Fn(I::Item) -> bool + Sync + Send, 42 | { 43 | type Item = usize; 44 | 45 | fn drive_unindexed(self, consumer: C) -> C::Result 46 | where 47 | C: UnindexedConsumer, 48 | { 49 | let consumer1 = PositionsConsumer::new(consumer, &self.predicate, 0); 50 | self.base.drive(consumer1) 51 | } 52 | } 53 | 54 | /// //////////////////////////////////////////////////////////////////////// 55 | /// Consumer implementation 56 | 57 | struct PositionsConsumer<'p, C, P> { 58 | base: C, 59 | predicate: &'p P, 60 | offset: usize, 61 | } 62 | 63 | impl<'p, C, P> PositionsConsumer<'p, C, P> { 64 | fn new(base: C, predicate: &'p P, offset: usize) -> Self { 65 | PositionsConsumer { 66 | base, 67 | predicate, 68 | offset, 69 | } 70 | } 71 | } 72 | 73 | impl<'p, T, C, P> Consumer for PositionsConsumer<'p, C, P> 74 | where 75 | C: Consumer, 76 | P: Fn(T) -> bool + Sync, 77 | { 78 | type Folder = PositionsFolder<'p, C::Folder, P>; 79 | type Reducer = C::Reducer; 80 | type Result = C::Result; 81 | 82 | fn split_at(self, index: usize) -> (Self, Self, C::Reducer) { 83 | let (left, right, reducer) = self.base.split_at(index); 84 | ( 85 | PositionsConsumer::new(left, self.predicate, self.offset), 86 | PositionsConsumer::new(right, self.predicate, self.offset + index), 87 | reducer, 88 | ) 89 | } 90 | 91 | fn into_folder(self) -> Self::Folder { 92 | PositionsFolder { 93 | base: self.base.into_folder(), 94 | predicate: self.predicate, 95 | offset: self.offset, 96 | } 97 | } 98 | 99 | fn full(&self) -> bool { 100 | self.base.full() 101 | } 102 | } 103 | 104 | struct PositionsFolder<'p, F, P> { 105 | base: F, 106 | predicate: &'p P, 107 | offset: usize, 108 | } 109 | 110 | impl Folder for PositionsFolder<'_, F, P> 111 | where 112 | F: Folder, 113 | P: Fn(T) -> bool, 114 | { 115 | type Result = F::Result; 116 | 117 | fn consume(mut self, item: T) -> Self { 118 | let index = self.offset; 119 | self.offset += 1; 120 | if (self.predicate)(item) { 121 | self.base = self.base.consume(index); 122 | } 123 | self 124 | } 125 | 126 | // This cannot easily specialize `consume_iter` to be better than 127 | // the default, because that requires checking `self.base.full()` 128 | // during a call to `self.base.consume_iter()`. (#632) 129 | 130 | fn complete(self) -> Self::Result { 131 | self.base.complete() 132 | } 133 | 134 | fn full(&self) -> bool { 135 | self.base.full() 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/iter/product.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::ParallelIterator; 3 | 4 | use std::iter::{self, Product}; 5 | use std::marker::PhantomData; 6 | 7 | pub(super) fn product(pi: PI) -> P 8 | where 9 | PI: ParallelIterator, 10 | P: Send + Product + Product, 11 | { 12 | pi.drive_unindexed(ProductConsumer::new()) 13 | } 14 | 15 | fn mul(left: T, right: T) -> T { 16 | iter::once(left).chain(iter::once(right)).product() 17 | } 18 | 19 | struct ProductConsumer { 20 | _marker: PhantomData<*const P>, 21 | } 22 | 23 | unsafe impl Send for ProductConsumer

{} 24 | 25 | impl ProductConsumer

{ 26 | fn new() -> ProductConsumer

{ 27 | ProductConsumer { 28 | _marker: PhantomData, 29 | } 30 | } 31 | } 32 | 33 | impl Consumer for ProductConsumer

34 | where 35 | P: Send + Product + Product, 36 | { 37 | type Folder = ProductFolder

; 38 | type Reducer = Self; 39 | type Result = P; 40 | 41 | fn split_at(self, _index: usize) -> (Self, Self, Self) { 42 | ( 43 | ProductConsumer::new(), 44 | ProductConsumer::new(), 45 | ProductConsumer::new(), 46 | ) 47 | } 48 | 49 | fn into_folder(self) -> Self::Folder { 50 | ProductFolder { 51 | product: iter::empty::().product(), 52 | } 53 | } 54 | 55 | fn full(&self) -> bool { 56 | false 57 | } 58 | } 59 | 60 | impl UnindexedConsumer for ProductConsumer

61 | where 62 | P: Send + Product + Product, 63 | { 64 | fn split_off_left(&self) -> Self { 65 | ProductConsumer::new() 66 | } 67 | 68 | fn to_reducer(&self) -> Self::Reducer { 69 | ProductConsumer::new() 70 | } 71 | } 72 | 73 | impl

Reducer

for ProductConsumer

74 | where 75 | P: Send + Product, 76 | { 77 | fn reduce(self, left: P, right: P) -> P { 78 | mul(left, right) 79 | } 80 | } 81 | 82 | struct ProductFolder

{ 83 | product: P, 84 | } 85 | 86 | impl Folder for ProductFolder

87 | where 88 | P: Product + Product, 89 | { 90 | type Result = P; 91 | 92 | fn consume(self, item: T) -> Self { 93 | ProductFolder { 94 | product: mul(self.product, iter::once(item).product()), 95 | } 96 | } 97 | 98 | fn consume_iter(self, iter: I) -> Self 99 | where 100 | I: IntoIterator, 101 | { 102 | ProductFolder { 103 | product: mul(self.product, iter.into_iter().product()), 104 | } 105 | } 106 | 107 | fn complete(self) -> P { 108 | self.product 109 | } 110 | 111 | fn full(&self) -> bool { 112 | false 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /src/iter/reduce.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::ParallelIterator; 3 | 4 | pub(super) fn reduce(pi: PI, identity: ID, reduce_op: R) -> T 5 | where 6 | PI: ParallelIterator, 7 | R: Fn(T, T) -> T + Sync, 8 | ID: Fn() -> T + Sync, 9 | T: Send, 10 | { 11 | let consumer = ReduceConsumer { 12 | identity: &identity, 13 | reduce_op: &reduce_op, 14 | }; 15 | pi.drive_unindexed(consumer) 16 | } 17 | 18 | struct ReduceConsumer<'r, R, ID> { 19 | identity: &'r ID, 20 | reduce_op: &'r R, 21 | } 22 | 23 | impl<'r, R, ID> Copy for ReduceConsumer<'r, R, ID> {} 24 | 25 | impl<'r, R, ID> Clone for ReduceConsumer<'r, R, ID> { 26 | fn clone(&self) -> Self { 27 | *self 28 | } 29 | } 30 | 31 | impl<'r, R, ID, T> Consumer for ReduceConsumer<'r, R, ID> 32 | where 33 | R: Fn(T, T) -> T + Sync, 34 | ID: Fn() -> T + Sync, 35 | T: Send, 36 | { 37 | type Folder = ReduceFolder<'r, R, T>; 38 | type Reducer = Self; 39 | type Result = T; 40 | 41 | fn split_at(self, _index: usize) -> (Self, Self, Self) { 42 | (self, self, self) 43 | } 44 | 45 | fn into_folder(self) -> Self::Folder { 46 | ReduceFolder { 47 | reduce_op: self.reduce_op, 48 | item: (self.identity)(), 49 | } 50 | } 51 | 52 | fn full(&self) -> bool { 53 | false 54 | } 55 | } 56 | 57 | impl<'r, R, ID, T> UnindexedConsumer for ReduceConsumer<'r, R, ID> 58 | where 59 | R: Fn(T, T) -> T + Sync, 60 | ID: Fn() -> T + Sync, 61 | T: Send, 62 | { 63 | fn split_off_left(&self) -> Self { 64 | *self 65 | } 66 | 67 | fn to_reducer(&self) -> Self::Reducer { 68 | *self 69 | } 70 | } 71 | 72 | impl<'r, R, ID, T> Reducer for ReduceConsumer<'r, R, ID> 73 | where 74 | R: Fn(T, T) -> T + Sync, 75 | { 76 | fn reduce(self, left: T, right: T) -> T { 77 | (self.reduce_op)(left, right) 78 | } 79 | } 80 | 81 | struct ReduceFolder<'r, R, T> { 82 | reduce_op: &'r R, 83 | item: T, 84 | } 85 | 86 | impl<'r, R, T> Folder for ReduceFolder<'r, R, T> 87 | where 88 | R: Fn(T, T) -> T, 89 | { 90 | type Result = T; 91 | 92 | fn consume(self, item: T) -> Self { 93 | ReduceFolder { 94 | reduce_op: self.reduce_op, 95 | item: (self.reduce_op)(self.item, item), 96 | } 97 | } 98 | 99 | fn consume_iter(self, iter: I) -> Self 100 | where 101 | I: IntoIterator, 102 | { 103 | ReduceFolder { 104 | reduce_op: self.reduce_op, 105 | item: iter.into_iter().fold(self.item, self.reduce_op), 106 | } 107 | } 108 | 109 | fn complete(self) -> T { 110 | self.item 111 | } 112 | 113 | fn full(&self) -> bool { 114 | false 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/iter/rev.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::iter; 4 | 5 | /// `Rev` is an iterator that produces elements in reverse order. This struct 6 | /// is created by the [`rev()`] method on [`IndexedParallelIterator`] 7 | /// 8 | /// [`rev()`]: trait.IndexedParallelIterator.html#method.rev 9 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 10 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 11 | #[derive(Debug, Clone)] 12 | pub struct Rev { 13 | base: I, 14 | } 15 | 16 | impl Rev 17 | where 18 | I: IndexedParallelIterator, 19 | { 20 | /// Creates a new `Rev` iterator. 21 | pub(super) fn new(base: I) -> Self { 22 | Rev { base } 23 | } 24 | } 25 | 26 | impl ParallelIterator for Rev 27 | where 28 | I: IndexedParallelIterator, 29 | { 30 | type Item = I::Item; 31 | 32 | fn drive_unindexed(self, consumer: C) -> C::Result 33 | where 34 | C: UnindexedConsumer, 35 | { 36 | bridge(self, consumer) 37 | } 38 | 39 | fn opt_len(&self) -> Option { 40 | Some(self.len()) 41 | } 42 | } 43 | 44 | impl IndexedParallelIterator for Rev 45 | where 46 | I: IndexedParallelIterator, 47 | { 48 | fn drive>(self, consumer: C) -> C::Result { 49 | bridge(self, consumer) 50 | } 51 | 52 | fn len(&self) -> usize { 53 | self.base.len() 54 | } 55 | 56 | fn with_producer(self, callback: CB) -> CB::Output 57 | where 58 | CB: ProducerCallback, 59 | { 60 | let len = self.base.len(); 61 | return self.base.with_producer(Callback { callback, len }); 62 | 63 | struct Callback { 64 | callback: CB, 65 | len: usize, 66 | } 67 | 68 | impl ProducerCallback for Callback 69 | where 70 | CB: ProducerCallback, 71 | { 72 | type Output = CB::Output; 73 | fn callback

(self, base: P) -> CB::Output 74 | where 75 | P: Producer, 76 | { 77 | let producer = RevProducer { 78 | base, 79 | len: self.len, 80 | }; 81 | self.callback.callback(producer) 82 | } 83 | } 84 | } 85 | } 86 | 87 | struct RevProducer

{ 88 | base: P, 89 | len: usize, 90 | } 91 | 92 | impl

Producer for RevProducer

93 | where 94 | P: Producer, 95 | { 96 | type Item = P::Item; 97 | type IntoIter = iter::Rev; 98 | 99 | fn into_iter(self) -> Self::IntoIter { 100 | self.base.into_iter().rev() 101 | } 102 | 103 | fn min_len(&self) -> usize { 104 | self.base.min_len() 105 | } 106 | fn max_len(&self) -> usize { 107 | self.base.max_len() 108 | } 109 | 110 | fn split_at(self, index: usize) -> (Self, Self) { 111 | let (left, right) = self.base.split_at(self.len - index); 112 | ( 113 | RevProducer { 114 | base: right, 115 | len: index, 116 | }, 117 | RevProducer { 118 | base: left, 119 | len: self.len - index, 120 | }, 121 | ) 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /src/iter/skip.rs: -------------------------------------------------------------------------------- 1 | use super::noop::NoopConsumer; 2 | use super::plumbing::*; 3 | use super::*; 4 | use std::cmp::min; 5 | 6 | /// `Skip` is an iterator that skips over the first `n` elements. 7 | /// This struct is created by the [`skip()`] method on [`IndexedParallelIterator`] 8 | /// 9 | /// [`skip()`]: trait.IndexedParallelIterator.html#method.skip 10 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 11 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 12 | #[derive(Debug, Clone)] 13 | pub struct Skip { 14 | base: I, 15 | n: usize, 16 | } 17 | 18 | impl Skip 19 | where 20 | I: IndexedParallelIterator, 21 | { 22 | /// Creates a new `Skip` iterator. 23 | pub(super) fn new(base: I, n: usize) -> Self { 24 | let n = min(base.len(), n); 25 | Skip { base, n } 26 | } 27 | } 28 | 29 | impl ParallelIterator for Skip 30 | where 31 | I: IndexedParallelIterator, 32 | { 33 | type Item = I::Item; 34 | 35 | fn drive_unindexed(self, consumer: C) -> C::Result 36 | where 37 | C: UnindexedConsumer, 38 | { 39 | bridge(self, consumer) 40 | } 41 | 42 | fn opt_len(&self) -> Option { 43 | Some(self.len()) 44 | } 45 | } 46 | 47 | impl IndexedParallelIterator for Skip 48 | where 49 | I: IndexedParallelIterator, 50 | { 51 | fn len(&self) -> usize { 52 | self.base.len() - self.n 53 | } 54 | 55 | fn drive>(self, consumer: C) -> C::Result { 56 | bridge(self, consumer) 57 | } 58 | 59 | fn with_producer(self, callback: CB) -> CB::Output 60 | where 61 | CB: ProducerCallback, 62 | { 63 | return self.base.with_producer(Callback { 64 | callback, 65 | n: self.n, 66 | }); 67 | 68 | struct Callback { 69 | callback: CB, 70 | n: usize, 71 | } 72 | 73 | impl ProducerCallback for Callback 74 | where 75 | CB: ProducerCallback, 76 | { 77 | type Output = CB::Output; 78 | fn callback

(self, base: P) -> CB::Output 79 | where 80 | P: Producer, 81 | { 82 | crate::in_place_scope(|scope| { 83 | let Self { callback, n } = self; 84 | let (before_skip, after_skip) = base.split_at(n); 85 | 86 | // Run the skipped part separately for side effects. 87 | // We'll still get any panics propagated back by the scope. 88 | scope.spawn(move |_| bridge_producer_consumer(n, before_skip, NoopConsumer)); 89 | 90 | callback.callback(after_skip) 91 | }) 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /src/iter/skip_any.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | 5 | /// `SkipAny` is an iterator that skips over `n` elements from anywhere in `I`. 6 | /// This struct is created by the [`skip_any()`] method on [`ParallelIterator`] 7 | /// 8 | /// [`skip_any()`]: trait.ParallelIterator.html#method.skip_any 9 | /// [`ParallelIterator`]: trait.ParallelIterator.html 10 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 11 | #[derive(Clone, Debug)] 12 | pub struct SkipAny { 13 | base: I, 14 | count: usize, 15 | } 16 | 17 | impl SkipAny 18 | where 19 | I: ParallelIterator, 20 | { 21 | /// Creates a new `SkipAny` iterator. 22 | pub(super) fn new(base: I, count: usize) -> Self { 23 | SkipAny { base, count } 24 | } 25 | } 26 | 27 | impl ParallelIterator for SkipAny 28 | where 29 | I: ParallelIterator, 30 | { 31 | type Item = I::Item; 32 | 33 | fn drive_unindexed(self, consumer: C) -> C::Result 34 | where 35 | C: UnindexedConsumer, 36 | { 37 | let consumer1 = SkipAnyConsumer { 38 | base: consumer, 39 | count: &AtomicUsize::new(self.count), 40 | }; 41 | self.base.drive_unindexed(consumer1) 42 | } 43 | } 44 | 45 | /// //////////////////////////////////////////////////////////////////////// 46 | /// Consumer implementation 47 | 48 | struct SkipAnyConsumer<'f, C> { 49 | base: C, 50 | count: &'f AtomicUsize, 51 | } 52 | 53 | impl<'f, T, C> Consumer for SkipAnyConsumer<'f, C> 54 | where 55 | C: Consumer, 56 | T: Send, 57 | { 58 | type Folder = SkipAnyFolder<'f, C::Folder>; 59 | type Reducer = C::Reducer; 60 | type Result = C::Result; 61 | 62 | fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) { 63 | let (left, right, reducer) = self.base.split_at(index); 64 | ( 65 | SkipAnyConsumer { base: left, ..self }, 66 | SkipAnyConsumer { 67 | base: right, 68 | ..self 69 | }, 70 | reducer, 71 | ) 72 | } 73 | 74 | fn into_folder(self) -> Self::Folder { 75 | SkipAnyFolder { 76 | base: self.base.into_folder(), 77 | count: self.count, 78 | } 79 | } 80 | 81 | fn full(&self) -> bool { 82 | self.base.full() 83 | } 84 | } 85 | 86 | impl<'f, T, C> UnindexedConsumer for SkipAnyConsumer<'f, C> 87 | where 88 | C: UnindexedConsumer, 89 | T: Send, 90 | { 91 | fn split_off_left(&self) -> Self { 92 | SkipAnyConsumer { 93 | base: self.base.split_off_left(), 94 | ..*self 95 | } 96 | } 97 | 98 | fn to_reducer(&self) -> Self::Reducer { 99 | self.base.to_reducer() 100 | } 101 | } 102 | 103 | struct SkipAnyFolder<'f, C> { 104 | base: C, 105 | count: &'f AtomicUsize, 106 | } 107 | 108 | fn checked_decrement(u: &AtomicUsize) -> bool { 109 | u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1)) 110 | .is_ok() 111 | } 112 | 113 | impl<'f, T, C> Folder for SkipAnyFolder<'f, C> 114 | where 115 | C: Folder, 116 | { 117 | type Result = C::Result; 118 | 119 | fn consume(mut self, item: T) -> Self { 120 | if !checked_decrement(self.count) { 121 | self.base = self.base.consume(item); 122 | } 123 | self 124 | } 125 | 126 | fn consume_iter(mut self, iter: I) -> Self 127 | where 128 | I: IntoIterator, 129 | { 130 | self.base = self.base.consume_iter( 131 | iter.into_iter() 132 | .skip_while(move |_| checked_decrement(self.count)), 133 | ); 134 | self 135 | } 136 | 137 | fn complete(self) -> C::Result { 138 | self.base.complete() 139 | } 140 | 141 | fn full(&self) -> bool { 142 | self.base.full() 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/iter/step_by.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::min; 2 | 3 | use super::plumbing::*; 4 | use super::*; 5 | use crate::math::div_round_up; 6 | use std::iter; 7 | use std::usize; 8 | 9 | /// `StepBy` is an iterator that skips `n` elements between each yield, where `n` is the given step. 10 | /// This struct is created by the [`step_by()`] method on [`IndexedParallelIterator`] 11 | /// 12 | /// [`step_by()`]: trait.IndexedParallelIterator.html#method.step_by 13 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 14 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 15 | #[derive(Debug, Clone)] 16 | pub struct StepBy { 17 | base: I, 18 | step: usize, 19 | } 20 | 21 | impl StepBy 22 | where 23 | I: IndexedParallelIterator, 24 | { 25 | /// Creates a new `StepBy` iterator. 26 | pub(super) fn new(base: I, step: usize) -> Self { 27 | StepBy { base, step } 28 | } 29 | } 30 | 31 | impl ParallelIterator for StepBy 32 | where 33 | I: IndexedParallelIterator, 34 | { 35 | type Item = I::Item; 36 | 37 | fn drive_unindexed(self, consumer: C) -> C::Result 38 | where 39 | C: UnindexedConsumer, 40 | { 41 | bridge(self, consumer) 42 | } 43 | 44 | fn opt_len(&self) -> Option { 45 | Some(self.len()) 46 | } 47 | } 48 | 49 | impl IndexedParallelIterator for StepBy 50 | where 51 | I: IndexedParallelIterator, 52 | { 53 | fn drive>(self, consumer: C) -> C::Result { 54 | bridge(self, consumer) 55 | } 56 | 57 | fn len(&self) -> usize { 58 | div_round_up(self.base.len(), self.step) 59 | } 60 | 61 | fn with_producer(self, callback: CB) -> CB::Output 62 | where 63 | CB: ProducerCallback, 64 | { 65 | let len = self.base.len(); 66 | return self.base.with_producer(Callback { 67 | callback, 68 | step: self.step, 69 | len, 70 | }); 71 | 72 | struct Callback { 73 | callback: CB, 74 | step: usize, 75 | len: usize, 76 | } 77 | 78 | impl ProducerCallback for Callback 79 | where 80 | CB: ProducerCallback, 81 | { 82 | type Output = CB::Output; 83 | fn callback

(self, base: P) -> CB::Output 84 | where 85 | P: Producer, 86 | { 87 | let producer = StepByProducer { 88 | base, 89 | step: self.step, 90 | len: self.len, 91 | }; 92 | self.callback.callback(producer) 93 | } 94 | } 95 | } 96 | } 97 | 98 | /// //////////////////////////////////////////////////////////////////////// 99 | /// Producer implementation 100 | 101 | struct StepByProducer

{ 102 | base: P, 103 | step: usize, 104 | len: usize, 105 | } 106 | 107 | impl

Producer for StepByProducer

108 | where 109 | P: Producer, 110 | { 111 | type Item = P::Item; 112 | type IntoIter = iter::StepBy; 113 | 114 | fn into_iter(self) -> Self::IntoIter { 115 | self.base.into_iter().step_by(self.step) 116 | } 117 | 118 | fn split_at(self, index: usize) -> (Self, Self) { 119 | let elem_index = min(index * self.step, self.len); 120 | 121 | let (left, right) = self.base.split_at(elem_index); 122 | ( 123 | StepByProducer { 124 | base: left, 125 | step: self.step, 126 | len: elem_index, 127 | }, 128 | StepByProducer { 129 | base: right, 130 | step: self.step, 131 | len: self.len - elem_index, 132 | }, 133 | ) 134 | } 135 | 136 | fn min_len(&self) -> usize { 137 | div_round_up(self.base.min_len(), self.step) 138 | } 139 | 140 | fn max_len(&self) -> usize { 141 | self.base.max_len() / self.step 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /src/iter/sum.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::ParallelIterator; 3 | 4 | use std::iter::{self, Sum}; 5 | use std::marker::PhantomData; 6 | 7 | pub(super) fn sum(pi: PI) -> S 8 | where 9 | PI: ParallelIterator, 10 | S: Send + Sum + Sum, 11 | { 12 | pi.drive_unindexed(SumConsumer::new()) 13 | } 14 | 15 | fn add(left: T, right: T) -> T { 16 | iter::once(left).chain(iter::once(right)).sum() 17 | } 18 | 19 | struct SumConsumer { 20 | _marker: PhantomData<*const S>, 21 | } 22 | 23 | unsafe impl Send for SumConsumer {} 24 | 25 | impl SumConsumer { 26 | fn new() -> SumConsumer { 27 | SumConsumer { 28 | _marker: PhantomData, 29 | } 30 | } 31 | } 32 | 33 | impl Consumer for SumConsumer 34 | where 35 | S: Send + Sum + Sum, 36 | { 37 | type Folder = SumFolder; 38 | type Reducer = Self; 39 | type Result = S; 40 | 41 | fn split_at(self, _index: usize) -> (Self, Self, Self) { 42 | (SumConsumer::new(), SumConsumer::new(), SumConsumer::new()) 43 | } 44 | 45 | fn into_folder(self) -> Self::Folder { 46 | SumFolder { 47 | sum: iter::empty::().sum(), 48 | } 49 | } 50 | 51 | fn full(&self) -> bool { 52 | false 53 | } 54 | } 55 | 56 | impl UnindexedConsumer for SumConsumer 57 | where 58 | S: Send + Sum + Sum, 59 | { 60 | fn split_off_left(&self) -> Self { 61 | SumConsumer::new() 62 | } 63 | 64 | fn to_reducer(&self) -> Self::Reducer { 65 | SumConsumer::new() 66 | } 67 | } 68 | 69 | impl Reducer for SumConsumer 70 | where 71 | S: Send + Sum, 72 | { 73 | fn reduce(self, left: S, right: S) -> S { 74 | add(left, right) 75 | } 76 | } 77 | 78 | struct SumFolder { 79 | sum: S, 80 | } 81 | 82 | impl Folder for SumFolder 83 | where 84 | S: Sum + Sum, 85 | { 86 | type Result = S; 87 | 88 | fn consume(self, item: T) -> Self { 89 | SumFolder { 90 | sum: add(self.sum, iter::once(item).sum()), 91 | } 92 | } 93 | 94 | fn consume_iter(self, iter: I) -> Self 95 | where 96 | I: IntoIterator, 97 | { 98 | SumFolder { 99 | sum: add(self.sum, iter.into_iter().sum()), 100 | } 101 | } 102 | 103 | fn complete(self) -> S { 104 | self.sum 105 | } 106 | 107 | fn full(&self) -> bool { 108 | false 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/iter/take.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::cmp::min; 4 | 5 | /// `Take` is an iterator that iterates over the first `n` elements. 6 | /// This struct is created by the [`take()`] method on [`IndexedParallelIterator`] 7 | /// 8 | /// [`take()`]: trait.IndexedParallelIterator.html#method.take 9 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 10 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 11 | #[derive(Debug, Clone)] 12 | pub struct Take { 13 | base: I, 14 | n: usize, 15 | } 16 | 17 | impl Take 18 | where 19 | I: IndexedParallelIterator, 20 | { 21 | /// Creates a new `Take` iterator. 22 | pub(super) fn new(base: I, n: usize) -> Self { 23 | let n = min(base.len(), n); 24 | Take { base, n } 25 | } 26 | } 27 | 28 | impl ParallelIterator for Take 29 | where 30 | I: IndexedParallelIterator, 31 | { 32 | type Item = I::Item; 33 | 34 | fn drive_unindexed(self, consumer: C) -> C::Result 35 | where 36 | C: UnindexedConsumer, 37 | { 38 | bridge(self, consumer) 39 | } 40 | 41 | fn opt_len(&self) -> Option { 42 | Some(self.len()) 43 | } 44 | } 45 | 46 | impl IndexedParallelIterator for Take 47 | where 48 | I: IndexedParallelIterator, 49 | { 50 | fn len(&self) -> usize { 51 | self.n 52 | } 53 | 54 | fn drive>(self, consumer: C) -> C::Result { 55 | bridge(self, consumer) 56 | } 57 | 58 | fn with_producer(self, callback: CB) -> CB::Output 59 | where 60 | CB: ProducerCallback, 61 | { 62 | return self.base.with_producer(Callback { 63 | callback, 64 | n: self.n, 65 | }); 66 | 67 | struct Callback { 68 | callback: CB, 69 | n: usize, 70 | } 71 | 72 | impl ProducerCallback for Callback 73 | where 74 | CB: ProducerCallback, 75 | { 76 | type Output = CB::Output; 77 | fn callback

(self, base: P) -> CB::Output 78 | where 79 | P: Producer, 80 | { 81 | let (producer, _) = base.split_at(self.n); 82 | self.callback.callback(producer) 83 | } 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/iter/take_any.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | 5 | /// `TakeAny` is an iterator that iterates over `n` elements from anywhere in `I`. 6 | /// This struct is created by the [`take_any()`] method on [`ParallelIterator`] 7 | /// 8 | /// [`take_any()`]: trait.ParallelIterator.html#method.take_any 9 | /// [`ParallelIterator`]: trait.ParallelIterator.html 10 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 11 | #[derive(Clone, Debug)] 12 | pub struct TakeAny { 13 | base: I, 14 | count: usize, 15 | } 16 | 17 | impl TakeAny 18 | where 19 | I: ParallelIterator, 20 | { 21 | /// Creates a new `TakeAny` iterator. 22 | pub(super) fn new(base: I, count: usize) -> Self { 23 | TakeAny { base, count } 24 | } 25 | } 26 | 27 | impl ParallelIterator for TakeAny 28 | where 29 | I: ParallelIterator, 30 | { 31 | type Item = I::Item; 32 | 33 | fn drive_unindexed(self, consumer: C) -> C::Result 34 | where 35 | C: UnindexedConsumer, 36 | { 37 | let consumer1 = TakeAnyConsumer { 38 | base: consumer, 39 | count: &AtomicUsize::new(self.count), 40 | }; 41 | self.base.drive_unindexed(consumer1) 42 | } 43 | } 44 | 45 | /// //////////////////////////////////////////////////////////////////////// 46 | /// Consumer implementation 47 | 48 | struct TakeAnyConsumer<'f, C> { 49 | base: C, 50 | count: &'f AtomicUsize, 51 | } 52 | 53 | impl<'f, T, C> Consumer for TakeAnyConsumer<'f, C> 54 | where 55 | C: Consumer, 56 | T: Send, 57 | { 58 | type Folder = TakeAnyFolder<'f, C::Folder>; 59 | type Reducer = C::Reducer; 60 | type Result = C::Result; 61 | 62 | fn split_at(self, index: usize) -> (Self, Self, Self::Reducer) { 63 | let (left, right, reducer) = self.base.split_at(index); 64 | ( 65 | TakeAnyConsumer { base: left, ..self }, 66 | TakeAnyConsumer { 67 | base: right, 68 | ..self 69 | }, 70 | reducer, 71 | ) 72 | } 73 | 74 | fn into_folder(self) -> Self::Folder { 75 | TakeAnyFolder { 76 | base: self.base.into_folder(), 77 | count: self.count, 78 | } 79 | } 80 | 81 | fn full(&self) -> bool { 82 | self.count.load(Ordering::Relaxed) == 0 || self.base.full() 83 | } 84 | } 85 | 86 | impl<'f, T, C> UnindexedConsumer for TakeAnyConsumer<'f, C> 87 | where 88 | C: UnindexedConsumer, 89 | T: Send, 90 | { 91 | fn split_off_left(&self) -> Self { 92 | TakeAnyConsumer { 93 | base: self.base.split_off_left(), 94 | ..*self 95 | } 96 | } 97 | 98 | fn to_reducer(&self) -> Self::Reducer { 99 | self.base.to_reducer() 100 | } 101 | } 102 | 103 | struct TakeAnyFolder<'f, C> { 104 | base: C, 105 | count: &'f AtomicUsize, 106 | } 107 | 108 | fn checked_decrement(u: &AtomicUsize) -> bool { 109 | u.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |u| u.checked_sub(1)) 110 | .is_ok() 111 | } 112 | 113 | impl<'f, T, C> Folder for TakeAnyFolder<'f, C> 114 | where 115 | C: Folder, 116 | { 117 | type Result = C::Result; 118 | 119 | fn consume(mut self, item: T) -> Self { 120 | if checked_decrement(self.count) { 121 | self.base = self.base.consume(item); 122 | } 123 | self 124 | } 125 | 126 | fn consume_iter(mut self, iter: I) -> Self 127 | where 128 | I: IntoIterator, 129 | { 130 | self.base = self.base.consume_iter( 131 | iter.into_iter() 132 | .take_while(move |_| checked_decrement(self.count)), 133 | ); 134 | self 135 | } 136 | 137 | fn complete(self) -> C::Result { 138 | self.base.complete() 139 | } 140 | 141 | fn full(&self) -> bool { 142 | self.count.load(Ordering::Relaxed) == 0 || self.base.full() 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /src/iter/try_reduce.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::ParallelIterator; 3 | use super::Try; 4 | 5 | use std::ops::ControlFlow::{self, Break, Continue}; 6 | use std::sync::atomic::{AtomicBool, Ordering}; 7 | 8 | pub(super) fn try_reduce(pi: PI, identity: ID, reduce_op: R) -> T 9 | where 10 | PI: ParallelIterator, 11 | R: Fn(T::Output, T::Output) -> T + Sync, 12 | ID: Fn() -> T::Output + Sync, 13 | T: Try + Send, 14 | { 15 | let full = AtomicBool::new(false); 16 | let consumer = TryReduceConsumer { 17 | identity: &identity, 18 | reduce_op: &reduce_op, 19 | full: &full, 20 | }; 21 | pi.drive_unindexed(consumer) 22 | } 23 | 24 | struct TryReduceConsumer<'r, R, ID> { 25 | identity: &'r ID, 26 | reduce_op: &'r R, 27 | full: &'r AtomicBool, 28 | } 29 | 30 | impl<'r, R, ID> Copy for TryReduceConsumer<'r, R, ID> {} 31 | 32 | impl<'r, R, ID> Clone for TryReduceConsumer<'r, R, ID> { 33 | fn clone(&self) -> Self { 34 | *self 35 | } 36 | } 37 | 38 | impl<'r, R, ID, T> Consumer for TryReduceConsumer<'r, R, ID> 39 | where 40 | R: Fn(T::Output, T::Output) -> T + Sync, 41 | ID: Fn() -> T::Output + Sync, 42 | T: Try + Send, 43 | { 44 | type Folder = TryReduceFolder<'r, R, T>; 45 | type Reducer = Self; 46 | type Result = T; 47 | 48 | fn split_at(self, _index: usize) -> (Self, Self, Self) { 49 | (self, self, self) 50 | } 51 | 52 | fn into_folder(self) -> Self::Folder { 53 | TryReduceFolder { 54 | reduce_op: self.reduce_op, 55 | control: Continue((self.identity)()), 56 | full: self.full, 57 | } 58 | } 59 | 60 | fn full(&self) -> bool { 61 | self.full.load(Ordering::Relaxed) 62 | } 63 | } 64 | 65 | impl<'r, R, ID, T> UnindexedConsumer for TryReduceConsumer<'r, R, ID> 66 | where 67 | R: Fn(T::Output, T::Output) -> T + Sync, 68 | ID: Fn() -> T::Output + Sync, 69 | T: Try + Send, 70 | { 71 | fn split_off_left(&self) -> Self { 72 | *self 73 | } 74 | 75 | fn to_reducer(&self) -> Self::Reducer { 76 | *self 77 | } 78 | } 79 | 80 | impl<'r, R, ID, T> Reducer for TryReduceConsumer<'r, R, ID> 81 | where 82 | R: Fn(T::Output, T::Output) -> T + Sync, 83 | T: Try, 84 | { 85 | fn reduce(self, left: T, right: T) -> T { 86 | match (left.branch(), right.branch()) { 87 | (Continue(left), Continue(right)) => (self.reduce_op)(left, right), 88 | (Break(r), _) | (_, Break(r)) => T::from_residual(r), 89 | } 90 | } 91 | } 92 | 93 | struct TryReduceFolder<'r, R, T: Try> { 94 | reduce_op: &'r R, 95 | control: ControlFlow, 96 | full: &'r AtomicBool, 97 | } 98 | 99 | impl<'r, R, T> Folder for TryReduceFolder<'r, R, T> 100 | where 101 | R: Fn(T::Output, T::Output) -> T, 102 | T: Try, 103 | { 104 | type Result = T; 105 | 106 | fn consume(mut self, item: T) -> Self { 107 | let reduce_op = self.reduce_op; 108 | self.control = match (self.control, item.branch()) { 109 | (Continue(left), Continue(right)) => reduce_op(left, right).branch(), 110 | (control @ Break(_), _) | (_, control @ Break(_)) => control, 111 | }; 112 | if let Break(_) = self.control { 113 | self.full.store(true, Ordering::Relaxed); 114 | } 115 | self 116 | } 117 | 118 | fn complete(self) -> T { 119 | match self.control { 120 | Continue(c) => T::from_output(c), 121 | Break(r) => T::from_residual(r), 122 | } 123 | } 124 | 125 | fn full(&self) -> bool { 126 | match self.control { 127 | Break(_) => true, 128 | _ => self.full.load(Ordering::Relaxed), 129 | } 130 | } 131 | } 132 | -------------------------------------------------------------------------------- /src/iter/try_reduce_with.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::ParallelIterator; 3 | use super::Try; 4 | 5 | use std::ops::ControlFlow::{self, Break, Continue}; 6 | use std::sync::atomic::{AtomicBool, Ordering}; 7 | 8 | pub(super) fn try_reduce_with(pi: PI, reduce_op: R) -> Option 9 | where 10 | PI: ParallelIterator, 11 | R: Fn(T::Output, T::Output) -> T + Sync, 12 | T: Try + Send, 13 | { 14 | let full = AtomicBool::new(false); 15 | let consumer = TryReduceWithConsumer { 16 | reduce_op: &reduce_op, 17 | full: &full, 18 | }; 19 | pi.drive_unindexed(consumer) 20 | } 21 | 22 | struct TryReduceWithConsumer<'r, R> { 23 | reduce_op: &'r R, 24 | full: &'r AtomicBool, 25 | } 26 | 27 | impl<'r, R> Copy for TryReduceWithConsumer<'r, R> {} 28 | 29 | impl<'r, R> Clone for TryReduceWithConsumer<'r, R> { 30 | fn clone(&self) -> Self { 31 | *self 32 | } 33 | } 34 | 35 | impl<'r, R, T> Consumer for TryReduceWithConsumer<'r, R> 36 | where 37 | R: Fn(T::Output, T::Output) -> T + Sync, 38 | T: Try + Send, 39 | { 40 | type Folder = TryReduceWithFolder<'r, R, T>; 41 | type Reducer = Self; 42 | type Result = Option; 43 | 44 | fn split_at(self, _index: usize) -> (Self, Self, Self) { 45 | (self, self, self) 46 | } 47 | 48 | fn into_folder(self) -> Self::Folder { 49 | TryReduceWithFolder { 50 | reduce_op: self.reduce_op, 51 | opt_control: None, 52 | full: self.full, 53 | } 54 | } 55 | 56 | fn full(&self) -> bool { 57 | self.full.load(Ordering::Relaxed) 58 | } 59 | } 60 | 61 | impl<'r, R, T> UnindexedConsumer for TryReduceWithConsumer<'r, R> 62 | where 63 | R: Fn(T::Output, T::Output) -> T + Sync, 64 | T: Try + Send, 65 | { 66 | fn split_off_left(&self) -> Self { 67 | *self 68 | } 69 | 70 | fn to_reducer(&self) -> Self::Reducer { 71 | *self 72 | } 73 | } 74 | 75 | impl<'r, R, T> Reducer> for TryReduceWithConsumer<'r, R> 76 | where 77 | R: Fn(T::Output, T::Output) -> T + Sync, 78 | T: Try, 79 | { 80 | fn reduce(self, left: Option, right: Option) -> Option { 81 | let reduce_op = self.reduce_op; 82 | match (left, right) { 83 | (Some(left), Some(right)) => match (left.branch(), right.branch()) { 84 | (Continue(left), Continue(right)) => Some(reduce_op(left, right)), 85 | (Break(r), _) | (_, Break(r)) => Some(T::from_residual(r)), 86 | }, 87 | (None, x) | (x, None) => x, 88 | } 89 | } 90 | } 91 | 92 | struct TryReduceWithFolder<'r, R, T: Try> { 93 | reduce_op: &'r R, 94 | opt_control: Option>, 95 | full: &'r AtomicBool, 96 | } 97 | 98 | impl<'r, R, T> Folder for TryReduceWithFolder<'r, R, T> 99 | where 100 | R: Fn(T::Output, T::Output) -> T, 101 | T: Try, 102 | { 103 | type Result = Option; 104 | 105 | fn consume(mut self, item: T) -> Self { 106 | let reduce_op = self.reduce_op; 107 | let control = match (self.opt_control, item.branch()) { 108 | (Some(Continue(left)), Continue(right)) => reduce_op(left, right).branch(), 109 | (Some(control @ Break(_)), _) | (_, control) => control, 110 | }; 111 | if let Break(_) = control { 112 | self.full.store(true, Ordering::Relaxed) 113 | } 114 | self.opt_control = Some(control); 115 | self 116 | } 117 | 118 | fn complete(self) -> Option { 119 | match self.opt_control { 120 | Some(Continue(c)) => Some(T::from_output(c)), 121 | Some(Break(r)) => Some(T::from_residual(r)), 122 | None => None, 123 | } 124 | } 125 | 126 | fn full(&self) -> bool { 127 | match self.opt_control { 128 | Some(Break(_)) => true, 129 | _ => self.full.load(Ordering::Relaxed), 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/iter/zip_eq.rs: -------------------------------------------------------------------------------- 1 | use super::plumbing::*; 2 | use super::*; 3 | 4 | /// An [`IndexedParallelIterator`] that iterates over two parallel iterators of equal 5 | /// length simultaneously. 6 | /// 7 | /// This struct is created by the [`zip_eq`] method on [`IndexedParallelIterator`], 8 | /// see its documentation for more information. 9 | /// 10 | /// [`zip_eq`]: trait.IndexedParallelIterator.html#method.zip_eq 11 | /// [`IndexedParallelIterator`]: trait.IndexedParallelIterator.html 12 | #[must_use = "iterator adaptors are lazy and do nothing unless consumed"] 13 | #[derive(Debug, Clone)] 14 | pub struct ZipEq { 15 | zip: Zip, 16 | } 17 | 18 | impl ZipEq 19 | where 20 | A: IndexedParallelIterator, 21 | B: IndexedParallelIterator, 22 | { 23 | /// Creates a new `ZipEq` iterator. 24 | pub(super) fn new(a: A, b: B) -> Self { 25 | ZipEq { 26 | zip: super::Zip::new(a, b), 27 | } 28 | } 29 | } 30 | 31 | impl ParallelIterator for ZipEq 32 | where 33 | A: IndexedParallelIterator, 34 | B: IndexedParallelIterator, 35 | { 36 | type Item = (A::Item, B::Item); 37 | 38 | fn drive_unindexed(self, consumer: C) -> C::Result 39 | where 40 | C: UnindexedConsumer, 41 | { 42 | bridge(self.zip, consumer) 43 | } 44 | 45 | fn opt_len(&self) -> Option { 46 | Some(self.zip.len()) 47 | } 48 | } 49 | 50 | impl IndexedParallelIterator for ZipEq 51 | where 52 | A: IndexedParallelIterator, 53 | B: IndexedParallelIterator, 54 | { 55 | fn drive(self, consumer: C) -> C::Result 56 | where 57 | C: Consumer, 58 | { 59 | bridge(self.zip, consumer) 60 | } 61 | 62 | fn len(&self) -> usize { 63 | self.zip.len() 64 | } 65 | 66 | fn with_producer(self, callback: CB) -> CB::Output 67 | where 68 | CB: ProducerCallback, 69 | { 70 | self.zip.with_producer(callback) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/math.rs: -------------------------------------------------------------------------------- 1 | use std::ops::{Bound, Range, RangeBounds}; 2 | 3 | /// Divide `n` by `divisor`, and round up to the nearest integer 4 | /// if not evenly divisible. 5 | #[inline] 6 | pub(super) fn div_round_up(n: usize, divisor: usize) -> usize { 7 | debug_assert!(divisor != 0, "Division by zero!"); 8 | if n == 0 { 9 | 0 10 | } else { 11 | (n - 1) / divisor + 1 12 | } 13 | } 14 | 15 | /// Normalize arbitrary `RangeBounds` to a `Range` 16 | pub(super) fn simplify_range(range: impl RangeBounds, len: usize) -> Range { 17 | let start = match range.start_bound() { 18 | Bound::Unbounded => 0, 19 | Bound::Included(&i) if i <= len => i, 20 | Bound::Excluded(&i) if i < len => i + 1, 21 | bound => panic!("range start {:?} should be <= length {}", bound, len), 22 | }; 23 | let end = match range.end_bound() { 24 | Bound::Unbounded => len, 25 | Bound::Excluded(&i) if i <= len => i, 26 | Bound::Included(&i) if i < len => i + 1, 27 | bound => panic!("range end {:?} should be <= length {}", bound, len), 28 | }; 29 | if start > end { 30 | panic!( 31 | "range start {:?} should be <= range end {:?}", 32 | range.start_bound(), 33 | range.end_bound() 34 | ); 35 | } 36 | start..end 37 | } 38 | 39 | #[cfg(test)] 40 | mod tests { 41 | use super::*; 42 | 43 | #[test] 44 | fn check_div_round_up() { 45 | assert_eq!(0, div_round_up(0, 5)); 46 | assert_eq!(1, div_round_up(5, 5)); 47 | assert_eq!(1, div_round_up(1, 5)); 48 | assert_eq!(2, div_round_up(3, 2)); 49 | assert_eq!( 50 | usize::max_value() / 2 + 1, 51 | div_round_up(usize::max_value(), 2) 52 | ); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/par_either.rs: -------------------------------------------------------------------------------- 1 | use crate::iter::plumbing::*; 2 | use crate::iter::Either::{Left, Right}; 3 | use crate::iter::*; 4 | 5 | /// `Either` is a parallel iterator if both `L` and `R` are parallel iterators. 6 | impl ParallelIterator for Either 7 | where 8 | L: ParallelIterator, 9 | R: ParallelIterator, 10 | { 11 | type Item = L::Item; 12 | 13 | fn drive_unindexed(self, consumer: C) -> C::Result 14 | where 15 | C: UnindexedConsumer, 16 | { 17 | match self { 18 | Left(iter) => iter.drive_unindexed(consumer), 19 | Right(iter) => iter.drive_unindexed(consumer), 20 | } 21 | } 22 | 23 | fn opt_len(&self) -> Option { 24 | self.as_ref().either(L::opt_len, R::opt_len) 25 | } 26 | } 27 | 28 | impl IndexedParallelIterator for Either 29 | where 30 | L: IndexedParallelIterator, 31 | R: IndexedParallelIterator, 32 | { 33 | fn drive(self, consumer: C) -> C::Result 34 | where 35 | C: Consumer, 36 | { 37 | match self { 38 | Left(iter) => iter.drive(consumer), 39 | Right(iter) => iter.drive(consumer), 40 | } 41 | } 42 | 43 | fn len(&self) -> usize { 44 | self.as_ref().either(L::len, R::len) 45 | } 46 | 47 | fn with_producer(self, callback: CB) -> CB::Output 48 | where 49 | CB: ProducerCallback, 50 | { 51 | match self { 52 | Left(iter) => iter.with_producer(callback), 53 | Right(iter) => iter.with_producer(callback), 54 | } 55 | } 56 | } 57 | 58 | /// `Either` can be extended if both `L` and `R` are parallel extendable. 59 | impl ParallelExtend for Either 60 | where 61 | L: ParallelExtend, 62 | R: ParallelExtend, 63 | T: Send, 64 | { 65 | fn par_extend(&mut self, par_iter: I) 66 | where 67 | I: IntoParallelIterator, 68 | { 69 | match self.as_mut() { 70 | Left(collection) => collection.par_extend(par_iter), 71 | Right(collection) => collection.par_extend(par_iter), 72 | } 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /src/prelude.rs: -------------------------------------------------------------------------------- 1 | //! The rayon prelude imports the various `ParallelIterator` traits. 2 | //! The intention is that one can include `use rayon::prelude::*` and 3 | //! have easy access to the various traits and methods you will need. 4 | 5 | pub use crate::iter::FromParallelIterator; 6 | pub use crate::iter::IndexedParallelIterator; 7 | pub use crate::iter::IntoParallelIterator; 8 | pub use crate::iter::IntoParallelRefIterator; 9 | pub use crate::iter::IntoParallelRefMutIterator; 10 | pub use crate::iter::ParallelBridge; 11 | pub use crate::iter::ParallelDrainFull; 12 | pub use crate::iter::ParallelDrainRange; 13 | pub use crate::iter::ParallelExtend; 14 | pub use crate::iter::ParallelIterator; 15 | pub use crate::slice::ParallelSlice; 16 | pub use crate::slice::ParallelSliceMut; 17 | pub use crate::str::ParallelString; 18 | -------------------------------------------------------------------------------- /src/private.rs: -------------------------------------------------------------------------------- 1 | //! The public parts of this private module are used to create traits 2 | //! that cannot be implemented outside of our own crate. This way we 3 | //! can feel free to extend those traits without worrying about it 4 | //! being a breaking change for other implementations. 5 | 6 | /// If this type is pub but not publicly reachable, third parties 7 | /// can't name it and can't implement traits using it. 8 | #[allow(missing_debug_implementations)] 9 | pub struct PrivateMarker; 10 | 11 | macro_rules! private_decl { 12 | () => { 13 | /// This trait is private; this method exists to make it 14 | /// impossible to implement outside the crate. 15 | #[doc(hidden)] 16 | fn __rayon_private__(&self) -> crate::private::PrivateMarker; 17 | }; 18 | } 19 | 20 | macro_rules! private_impl { 21 | () => { 22 | fn __rayon_private__(&self) -> crate::private::PrivateMarker { 23 | crate::private::PrivateMarker 24 | } 25 | }; 26 | } 27 | -------------------------------------------------------------------------------- /src/result.rs: -------------------------------------------------------------------------------- 1 | //! Parallel iterator types for [results][std::result] 2 | //! 3 | //! You will rarely need to interact with this module directly unless you need 4 | //! to name one of the iterator types. 5 | //! 6 | //! [std::result]: https://doc.rust-lang.org/stable/std/result/ 7 | 8 | use crate::iter::plumbing::*; 9 | use crate::iter::*; 10 | use std::sync::Mutex; 11 | 12 | use crate::option; 13 | 14 | /// Parallel iterator over a result 15 | #[derive(Debug, Clone)] 16 | pub struct IntoIter { 17 | inner: option::IntoIter, 18 | } 19 | 20 | impl IntoParallelIterator for Result { 21 | type Item = T; 22 | type Iter = IntoIter; 23 | 24 | fn into_par_iter(self) -> Self::Iter { 25 | IntoIter { 26 | inner: self.ok().into_par_iter(), 27 | } 28 | } 29 | } 30 | 31 | delegate_indexed_iterator! { 32 | IntoIter => T, 33 | impl 34 | } 35 | 36 | /// Parallel iterator over an immutable reference to a result 37 | #[derive(Debug)] 38 | pub struct Iter<'a, T: Sync> { 39 | inner: option::IntoIter<&'a T>, 40 | } 41 | 42 | impl<'a, T: Sync> Clone for Iter<'a, T> { 43 | fn clone(&self) -> Self { 44 | Iter { 45 | inner: self.inner.clone(), 46 | } 47 | } 48 | } 49 | 50 | impl<'a, T: Sync, E> IntoParallelIterator for &'a Result { 51 | type Item = &'a T; 52 | type Iter = Iter<'a, T>; 53 | 54 | fn into_par_iter(self) -> Self::Iter { 55 | Iter { 56 | inner: self.as_ref().ok().into_par_iter(), 57 | } 58 | } 59 | } 60 | 61 | delegate_indexed_iterator! { 62 | Iter<'a, T> => &'a T, 63 | impl<'a, T: Sync + 'a> 64 | } 65 | 66 | /// Parallel iterator over a mutable reference to a result 67 | #[derive(Debug)] 68 | pub struct IterMut<'a, T: Send> { 69 | inner: option::IntoIter<&'a mut T>, 70 | } 71 | 72 | impl<'a, T: Send, E> IntoParallelIterator for &'a mut Result { 73 | type Item = &'a mut T; 74 | type Iter = IterMut<'a, T>; 75 | 76 | fn into_par_iter(self) -> Self::Iter { 77 | IterMut { 78 | inner: self.as_mut().ok().into_par_iter(), 79 | } 80 | } 81 | } 82 | 83 | delegate_indexed_iterator! { 84 | IterMut<'a, T> => &'a mut T, 85 | impl<'a, T: Send + 'a> 86 | } 87 | 88 | /// Collect an arbitrary `Result`-wrapped collection. 89 | /// 90 | /// If any item is `Err`, then all previous `Ok` items collected are 91 | /// discarded, and it returns that error. If there are multiple errors, the 92 | /// one returned is not deterministic. 93 | impl FromParallelIterator> for Result 94 | where 95 | C: FromParallelIterator, 96 | T: Send, 97 | E: Send, 98 | { 99 | fn from_par_iter(par_iter: I) -> Self 100 | where 101 | I: IntoParallelIterator>, 102 | { 103 | fn ok(saved: &Mutex>) -> impl Fn(Result) -> Option + '_ { 104 | move |item| match item { 105 | Ok(item) => Some(item), 106 | Err(error) => { 107 | // We don't need a blocking `lock()`, as anybody 108 | // else holding the lock will also be writing 109 | // `Some(error)`, and then ours is irrelevant. 110 | if let Ok(mut guard) = saved.try_lock() { 111 | if guard.is_none() { 112 | *guard = Some(error); 113 | } 114 | } 115 | None 116 | } 117 | } 118 | } 119 | 120 | let saved_error = Mutex::new(None); 121 | let collection = par_iter 122 | .into_par_iter() 123 | .map(ok(&saved_error)) 124 | .while_some() 125 | .collect(); 126 | 127 | match saved_error.into_inner().unwrap() { 128 | Some(error) => Err(error), 129 | None => Ok(collection), 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /src/string.rs: -------------------------------------------------------------------------------- 1 | //! This module contains the parallel iterator types for owned strings 2 | //! (`String`). You will rarely need to interact with it directly 3 | //! unless you have need to name one of the iterator types. 4 | 5 | use crate::iter::plumbing::*; 6 | use crate::math::simplify_range; 7 | use crate::prelude::*; 8 | use std::ops::{Range, RangeBounds}; 9 | 10 | impl<'a> ParallelDrainRange for &'a mut String { 11 | type Iter = Drain<'a>; 12 | type Item = char; 13 | 14 | fn par_drain>(self, range: R) -> Self::Iter { 15 | Drain { 16 | range: simplify_range(range, self.len()), 17 | string: self, 18 | } 19 | } 20 | } 21 | 22 | /// Draining parallel iterator that moves a range of characters out of a string, 23 | /// but keeps the total capacity. 24 | #[derive(Debug)] 25 | pub struct Drain<'a> { 26 | string: &'a mut String, 27 | range: Range, 28 | } 29 | 30 | impl<'a> ParallelIterator for Drain<'a> { 31 | type Item = char; 32 | 33 | fn drive_unindexed(self, consumer: C) -> C::Result 34 | where 35 | C: UnindexedConsumer, 36 | { 37 | self.string[self.range.clone()] 38 | .par_chars() 39 | .drive_unindexed(consumer) 40 | } 41 | } 42 | 43 | impl<'a> Drop for Drain<'a> { 44 | fn drop(&mut self) { 45 | // Remove the drained range. 46 | self.string.drain(self.range.clone()); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tests/chars.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use std::char; 3 | 4 | #[test] 5 | fn half_open_correctness() { 6 | let low = char::from_u32(0xD800 - 0x7).unwrap(); 7 | let high = char::from_u32(0xE000 + 0x7).unwrap(); 8 | 9 | let range = low..high; 10 | let mut chars: Vec = range.into_par_iter().collect(); 11 | chars.sort(); 12 | 13 | assert_eq!( 14 | chars, 15 | vec![ 16 | '\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}', 17 | '\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}', 18 | ] 19 | ); 20 | } 21 | 22 | #[test] 23 | fn closed_correctness() { 24 | let low = char::from_u32(0xD800 - 0x7).unwrap(); 25 | let high = char::from_u32(0xE000 + 0x7).unwrap(); 26 | 27 | let range = low..=high; 28 | let mut chars: Vec = range.into_par_iter().collect(); 29 | chars.sort(); 30 | 31 | assert_eq!( 32 | chars, 33 | vec![ 34 | '\u{D7F9}', '\u{D7FA}', '\u{D7FB}', '\u{D7FC}', '\u{D7FD}', '\u{D7FE}', '\u{D7FF}', 35 | '\u{E000}', '\u{E001}', '\u{E002}', '\u{E003}', '\u{E004}', '\u{E005}', '\u{E006}', 36 | '\u{E007}', 37 | ] 38 | ); 39 | } 40 | -------------------------------------------------------------------------------- /tests/collect.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | 3 | use std::panic; 4 | use std::sync::atomic::AtomicUsize; 5 | use std::sync::atomic::Ordering; 6 | use std::sync::Mutex; 7 | 8 | #[test] 9 | #[cfg_attr(not(panic = "unwind"), ignore)] 10 | fn collect_drop_on_unwind() { 11 | struct Recorddrop<'a>(i64, &'a Mutex>); 12 | 13 | impl<'a> Drop for Recorddrop<'a> { 14 | fn drop(&mut self) { 15 | self.1.lock().unwrap().push(self.0); 16 | } 17 | } 18 | 19 | let test_collect_panic = |will_panic: bool| { 20 | let test_vec_len = 1024; 21 | let panic_point = 740; 22 | 23 | let mut inserts = Mutex::new(Vec::new()); 24 | let mut drops = Mutex::new(Vec::new()); 25 | 26 | let mut a = (0..test_vec_len).collect::>(); 27 | let b = (0..test_vec_len).collect::>(); 28 | 29 | let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| { 30 | let mut result = Vec::new(); 31 | a.par_iter_mut() 32 | .zip(&b) 33 | .map(|(&mut a, &b)| { 34 | if a > panic_point && will_panic { 35 | panic!("unwinding for test"); 36 | } 37 | let elt = a + b; 38 | inserts.lock().unwrap().push(elt); 39 | Recorddrop(elt, &drops) 40 | }) 41 | .collect_into_vec(&mut result); 42 | 43 | // If we reach this point, this must pass 44 | assert_eq!(a.len(), result.len()); 45 | })); 46 | 47 | let inserts = inserts.get_mut().unwrap(); 48 | let drops = drops.get_mut().unwrap(); 49 | println!("{:?}", inserts); 50 | println!("{:?}", drops); 51 | 52 | assert_eq!(inserts.len(), drops.len(), "Incorrect number of drops"); 53 | // sort to normalize order 54 | inserts.sort(); 55 | drops.sort(); 56 | assert_eq!(inserts, drops, "Incorrect elements were dropped"); 57 | }; 58 | 59 | for &should_panic in &[true, false] { 60 | test_collect_panic(should_panic); 61 | } 62 | } 63 | 64 | #[test] 65 | #[cfg_attr(not(panic = "unwind"), ignore)] 66 | fn collect_drop_on_unwind_zst() { 67 | static INSERTS: AtomicUsize = AtomicUsize::new(0); 68 | static DROPS: AtomicUsize = AtomicUsize::new(0); 69 | 70 | struct RecorddropZst; 71 | 72 | impl Drop for RecorddropZst { 73 | fn drop(&mut self) { 74 | DROPS.fetch_add(1, Ordering::SeqCst); 75 | } 76 | } 77 | 78 | let test_collect_panic = |will_panic: bool| { 79 | INSERTS.store(0, Ordering::SeqCst); 80 | DROPS.store(0, Ordering::SeqCst); 81 | 82 | let test_vec_len = 1024; 83 | let panic_point = 740; 84 | 85 | let a = (0..test_vec_len).collect::>(); 86 | 87 | let _result = panic::catch_unwind(panic::AssertUnwindSafe(|| { 88 | let mut result = Vec::new(); 89 | a.par_iter() 90 | .map(|&a| { 91 | if a > panic_point && will_panic { 92 | panic!("unwinding for test"); 93 | } 94 | INSERTS.fetch_add(1, Ordering::SeqCst); 95 | RecorddropZst 96 | }) 97 | .collect_into_vec(&mut result); 98 | 99 | // If we reach this point, this must pass 100 | assert_eq!(a.len(), result.len()); 101 | })); 102 | 103 | let inserts = INSERTS.load(Ordering::SeqCst); 104 | let drops = DROPS.load(Ordering::SeqCst); 105 | 106 | assert_eq!(inserts, drops, "Incorrect number of drops"); 107 | assert!(will_panic || drops == test_vec_len) 108 | }; 109 | 110 | for &should_panic in &[true, false] { 111 | test_collect_panic(should_panic); 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /tests/cross-pool.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use rayon::ThreadPoolBuilder; 3 | 4 | #[test] 5 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 6 | fn cross_pool_busy() { 7 | let pool1 = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); 8 | let pool2 = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); 9 | 10 | let n: i32 = 100; 11 | let sum: i32 = pool1.install(move || { 12 | // Each item will block on pool2, but pool1 can continue processing other work from the 13 | // parallel iterator in the meantime. There's a chance that pool1 will still be awake to 14 | // see the latch set without being tickled, and then it will drop that stack job. The latch 15 | // internals must not assume that the job will still be alive after it's set! 16 | (1..=n) 17 | .into_par_iter() 18 | .map(|i| pool2.install(move || i)) 19 | .sum() 20 | }); 21 | assert_eq!(sum, n * (n + 1) / 2); 22 | } 23 | -------------------------------------------------------------------------------- /tests/drain_vec.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | 3 | #[test] 4 | fn drain_vec_yielded() { 5 | let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 6 | 7 | let yielded = vec_org.par_drain(0..5).collect::>(); 8 | 9 | assert_eq!(&yielded, &[0, 1, 2, 3, 4]); 10 | assert_eq!(&vec_org, &[5, 6, 7, 8, 9]); 11 | } 12 | 13 | #[test] 14 | fn drain_vec_dropped() { 15 | let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 16 | 17 | let yielded = vec_org.par_drain(0..5); 18 | 19 | drop(yielded); 20 | assert_eq!(&vec_org, &[5, 6, 7, 8, 9]); 21 | } 22 | 23 | #[test] 24 | fn drain_vec_empty_range_yielded() { 25 | let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 26 | 27 | let yielded = vec_org.par_drain(5..5).collect::>(); 28 | 29 | assert_eq!(&yielded, &[]); 30 | assert_eq!(&vec_org, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); 31 | } 32 | 33 | #[test] 34 | fn drain_vec_empty_range_dropped() { 35 | let mut vec_org = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 36 | 37 | let yielded = vec_org.par_drain(5..5); 38 | 39 | drop(yielded); 40 | assert_eq!(&vec_org, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); 41 | } 42 | -------------------------------------------------------------------------------- /tests/intersperse.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | 3 | #[test] 4 | fn check_intersperse() { 5 | let v: Vec<_> = (0..1000).into_par_iter().intersperse(-1).collect(); 6 | assert_eq!(v.len(), 1999); 7 | for (i, x) in v.into_iter().enumerate() { 8 | assert_eq!(x, if i % 2 == 0 { i as i32 / 2 } else { -1 }); 9 | } 10 | } 11 | 12 | #[test] 13 | fn check_intersperse_again() { 14 | let v: Vec<_> = (0..1000) 15 | .into_par_iter() 16 | .intersperse(-1) 17 | .intersperse(-2) 18 | .collect(); 19 | assert_eq!(v.len(), 3997); 20 | for (i, x) in v.into_iter().enumerate() { 21 | let y = match i % 4 { 22 | 0 => i as i32 / 4, 23 | 2 => -1, 24 | _ => -2, 25 | }; 26 | assert_eq!(x, y); 27 | } 28 | } 29 | 30 | #[test] 31 | fn check_intersperse_unindexed() { 32 | let v: Vec<_> = (0..1000).map(|i| i.to_string()).collect(); 33 | let s = v.join(","); 34 | let s2 = v.join(";"); 35 | let par: String = s.par_split(',').intersperse(";").collect(); 36 | assert_eq!(par, s2); 37 | } 38 | 39 | #[test] 40 | fn check_intersperse_producer() { 41 | (0..1000) 42 | .into_par_iter() 43 | .intersperse(-1) 44 | .zip_eq(0..1999) 45 | .for_each(|(x, i)| { 46 | assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 }); 47 | }); 48 | } 49 | 50 | #[test] 51 | fn check_intersperse_rev() { 52 | (0..1000) 53 | .into_par_iter() 54 | .intersperse(-1) 55 | .zip_eq(0..1999) 56 | .rev() 57 | .for_each(|(x, i)| { 58 | assert_eq!(x, if i % 2 == 0 { i / 2 } else { -1 }); 59 | }); 60 | } 61 | -------------------------------------------------------------------------------- /tests/issue671-unzip.rs: -------------------------------------------------------------------------------- 1 | #![type_length_limit = "10000"] 2 | 3 | use rayon::prelude::*; 4 | 5 | #[test] 6 | fn type_length_limit() { 7 | let input = vec![1, 2, 3, 4, 5]; 8 | let (indexes, (squares, cubes)): (Vec<_>, (Vec<_>, Vec<_>)) = input 9 | .par_iter() 10 | .map(|x| (x * x, x * x * x)) 11 | .enumerate() 12 | .unzip(); 13 | 14 | drop(indexes); 15 | drop(squares); 16 | drop(cubes); 17 | } 18 | -------------------------------------------------------------------------------- /tests/issue671.rs: -------------------------------------------------------------------------------- 1 | #![type_length_limit = "500000"] 2 | 3 | use rayon::prelude::*; 4 | 5 | #[test] 6 | fn type_length_limit() { 7 | let _ = Vec::>::new() 8 | .into_par_iter() 9 | .map(|x| x) 10 | .map(|x| x) 11 | .map(|x| x) 12 | .map(|x| x) 13 | .map(|x| x) 14 | .map(|x| x) 15 | .collect::>(); 16 | } 17 | -------------------------------------------------------------------------------- /tests/iter_panic.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use rayon::ThreadPoolBuilder; 3 | use std::ops::Range; 4 | use std::panic::{self, UnwindSafe}; 5 | use std::sync::atomic::{AtomicUsize, Ordering}; 6 | 7 | const ITER: Range = 0..0x1_0000; 8 | const PANIC: i32 = 0xC000; 9 | 10 | fn check(&i: &i32) { 11 | if i == PANIC { 12 | panic!("boom") 13 | } 14 | } 15 | 16 | #[test] 17 | #[should_panic(expected = "boom")] 18 | fn iter_panic() { 19 | ITER.into_par_iter().for_each(|i| check(&i)); 20 | } 21 | 22 | #[test] 23 | #[cfg_attr(not(panic = "unwind"), ignore)] 24 | fn iter_panic_fuse() { 25 | // We only use a single thread in order to make the behavior 26 | // of 'panic_fuse' deterministic 27 | let pool = ThreadPoolBuilder::new().num_threads(1).build().unwrap(); 28 | 29 | pool.install(|| { 30 | fn count(iter: impl ParallelIterator + UnwindSafe) -> usize { 31 | let count = AtomicUsize::new(0); 32 | let result = panic::catch_unwind(|| { 33 | iter.for_each(|_| { 34 | count.fetch_add(1, Ordering::Relaxed); 35 | }); 36 | }); 37 | assert!(result.is_err()); 38 | count.into_inner() 39 | } 40 | 41 | // Without `panic_fuse()`, we'll reach every item except the panicking one. 42 | let expected = ITER.len() - 1; 43 | let iter = ITER.into_par_iter().with_max_len(1); 44 | assert_eq!(count(iter.clone().inspect(check)), expected); 45 | 46 | // With `panic_fuse()` anywhere in the chain, we'll reach fewer items. 47 | assert!(count(iter.clone().inspect(check).panic_fuse()) < expected); 48 | assert!(count(iter.clone().panic_fuse().inspect(check)) < expected); 49 | 50 | // Try in reverse to be sure we hit the producer case. 51 | assert!(count(iter.panic_fuse().inspect(check).rev()) < expected); 52 | }); 53 | } 54 | -------------------------------------------------------------------------------- /tests/named-threads.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashSet; 2 | 3 | use rayon::prelude::*; 4 | use rayon::*; 5 | 6 | #[test] 7 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 8 | fn named_threads() { 9 | ThreadPoolBuilder::new() 10 | .thread_name(|i| format!("hello-name-test-{}", i)) 11 | .build_global() 12 | .unwrap(); 13 | 14 | const N: usize = 10000; 15 | 16 | let thread_names = (0..N) 17 | .into_par_iter() 18 | .flat_map(|_| ::std::thread::current().name().map(str::to_owned)) 19 | .collect::>(); 20 | 21 | let all_contains_name = thread_names 22 | .iter() 23 | .all(|name| name.starts_with("hello-name-test-")); 24 | assert!(all_contains_name); 25 | } 26 | -------------------------------------------------------------------------------- /tests/par_bridge_recursion.rs: -------------------------------------------------------------------------------- 1 | use rayon::prelude::*; 2 | use std::iter::once_with; 3 | 4 | const N: usize = 100_000; 5 | 6 | #[test] 7 | #[cfg_attr(any(target_os = "emscripten", target_family = "wasm"), ignore)] 8 | fn par_bridge_recursion() { 9 | let pool = rayon::ThreadPoolBuilder::new() 10 | .num_threads(10) 11 | .build() 12 | .unwrap(); 13 | 14 | let seq: Vec<_> = (0..N).map(|i| (i, i.to_string())).collect(); 15 | 16 | pool.broadcast(|_| { 17 | let mut par: Vec<_> = (0..N) 18 | .into_par_iter() 19 | .flat_map(|i| { 20 | once_with(move || { 21 | // Using rayon within the serial iterator creates an opportunity for 22 | // work-stealing to make par_bridge's mutex accidentally recursive. 23 | rayon::join(move || i, move || i.to_string()) 24 | }) 25 | .par_bridge() 26 | }) 27 | .collect(); 28 | par.par_sort_unstable(); 29 | assert_eq!(seq, par); 30 | }); 31 | } 32 | --------------------------------------------------------------------------------