├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE ├── README.md ├── assets └── logo.png ├── benches └── perf.rs ├── deny.toml └── src ├── atomic.rs ├── barrier.rs ├── bee ├── context.rs ├── error.rs ├── mod.rs ├── queen.rs ├── stock │ ├── call.rs │ ├── echo.rs │ ├── mod.rs │ └── thunk.rs └── worker.rs ├── boxed.rs ├── channel.rs ├── hive ├── builder │ ├── bee.rs │ ├── full.rs │ ├── mod.rs │ ├── open.rs │ └── queue.rs ├── context.rs ├── cores.rs ├── hive.rs ├── husk.rs ├── inner │ ├── builder.rs │ ├── config.rs │ ├── counter.rs │ ├── diagram.mmd │ ├── gate.rs │ ├── mod.rs │ ├── queue │ │ ├── channel.rs │ │ ├── mod.rs │ │ ├── retry.rs │ │ ├── status.rs │ │ └── workstealing.rs │ ├── shared.rs │ └── task.rs ├── mock.rs ├── mod.rs ├── outcome │ ├── batch.rs │ ├── impl.rs │ ├── iter.rs │ ├── mod.rs │ ├── queue.rs │ └── store.rs ├── sentinel.rs ├── util.rs └── weighted.rs ├── lib.rs ├── panic.rs └── util.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Cargo Build & Test 2 | 3 | on: 4 | push: 5 | pull_request: 6 | 7 | env: 8 | CARGO_TERM_COLOR: always 9 | 10 | jobs: 11 | check: 12 | name: Check lints, formatting, and documentation 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v4 16 | - uses: EmbarkStudios/cargo-deny-action@v2 17 | with: 18 | rust-version: "1.85.0" 19 | - uses: actions-rust-lang/setup-rust-toolchain@v1 20 | with: 21 | components: rustfmt, clippy 22 | - run: | 23 | cargo clippy --all-targets -F affinity,local-batch,retry \ 24 | -- -D warnings $(cat .lints | cut -f1 -d"#" | tr '\n' ' ') 25 | - run: cargo fmt -- --check 26 | - run: RUSTDOCFLAGS="-D warnings" cargo doc -F affinity,local-batch,retry 27 | - run: cargo test -F affinity,local-batch,retry --doc 28 | 29 | coverage: 30 | name: Code coverage 31 | runs-on: ubuntu-latest 32 | steps: 33 | - uses: actions/checkout@v4 34 | - uses: actions-rust-lang/setup-rust-toolchain@v1 35 | - name: Install cargo-llvm-cov 36 | uses: taiki-e/install-action@cargo-llvm-cov 37 | - name: Generate code coverage 38 | run: cargo llvm-cov --lcov --output-path lcov.info -F affinity,local-batch,retry 39 | - name: Upload coverage to Codecov 40 | uses: codecov/codecov-action@v3 41 | with: 42 | token: ${{ secrets.CODECOV_TOKEN }} 43 | files: lcov.info 44 | fail_ci_if_error: true 45 | 46 | build_and_test: 47 | name: Build and test all combinations features 48 | runs-on: ubuntu-latest 49 | strategy: 50 | matrix: 51 | toolchain: 52 | - stable 53 | - beta 54 | - nightly 55 | channel: 56 | - default 57 | - crossbeam 58 | - flume 59 | - loole 60 | steps: 61 | - uses: actions/checkout@v4 62 | - uses: actions-rust-lang/setup-rust-toolchain@v1 63 | - uses: actions-rs/cargo@v1 64 | with: 65 | command: install 66 | args: cargo-all-features 67 | - uses: actions-rs/cargo@v1 68 | with: 69 | command: build-all-features 70 | args: -F ${{ matrix.channel }} --all-targets 71 | - uses: actions-rs/cargo@v1 72 | with: 73 | command: test-all-features 74 | args: -F ${{ matrix.channel }} --lib 75 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | debug/ 4 | target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | # MSVC Windows builds of rustc generate these, which store debugging information 14 | *.pdb 15 | 16 | # RustRover 17 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 18 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 19 | # and can be added to the global gitignore or merged into this file. For a more nuclear 20 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 21 | #.idea/ 22 | 23 | # Added by cargo 24 | 25 | /target 26 | 27 | lcov.info 28 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Beekeeper changelog 2 | 3 | ## 0.3.0 4 | 5 | The general theme of this release is performance improvement by eliminating thread contention due to unnecessary locking of shared state. This required making some breaking changes to the API. 6 | 7 | * **Breaking** 8 | * `beekeeper::hive::Hive` type signature has changed 9 | * Removed the `W: Worker` parameter as it is redundant (can be obtained from `Q::Kind`) 10 | * Added `T: TaskQueues`to specify the `TaskQueues` implementation 11 | * The `Builder` interface has been re-written to enable maximum flexibility. 12 | * `Builder` is now a trait that must be in scope. 13 | * `ChannelBuilder` implements the previous builder functionality. 14 | * `WorkstealingBuilder` is identical to `ChannelBuilder`, except that it uses workstealing-based task queues (see `Features` below). 15 | * `OpenBuilder` has no type parameters and can be specialized to create a `Hive` with any combination of `Queen` and `TaskQueues`. 16 | * `BeeBuilder` and `FullBuilder` are intermediate types that generally should not be instantiated directly. 17 | * `beekeeper::bee::Queen::create` now takes `&self` rather than `&mut self`. There is a new type, `beekeeper::bee::QueenMut`, with a `create(&mut self)` method, and needs to be wrapped in a `beekeeper::bee::QueenCell` to implement the `Queen` trait. This enables the `Hive` to create new workers without locking in the case of a `Queen` that does not need mutable state. 18 | * `beekeeper::bee::Context` now takes a generic parameter that must be input type of the `Worker`. 19 | * `beekeeper::hive::Hive::try_into_husk` now has an `urgent` parameter to indicate whether queued tasks should be abandoned when shutting down the hive (`true`) or if they should be allowed to finish processing (`false`). 20 | * The type of `attempt` and `max_retries` has been changed to `u8`. This reduces memory usage and should still allow for the majority of use cases. 21 | * The `::of` methods have been removed from stock `Worker`s in favor of implementing `From`. 22 | * Features 23 | * Added the `TaskQueues` trait, which enables `Hive` to be specialized for different implementations of global (i.e., sending tasks from the `Hive` to worker threads) and local (i.e., worker thread-specific) queues. 24 | * `ChannelTaskQueues` implements the existing behavior, using a channel for sending tasks. 25 | * `WorkstealingTaskQueues` has been added to implement the workstealing pattern, based on `crossbeam::dequeue`. 26 | * Added the `local-batch` feature, which enables worker threads to queue up batches of tasks locally, which can alleviate contention between threads in the pool, especially when there are many short-lived tasks. 27 | * When this feature is enabled, tasks can be optionally weighted (by wrapping each input in `crate::hive::Weighted`) to help evenly distribute tasks with variable processing times. 28 | * Enabling this feature should be transparent (i.e., not break existing code), and the `Hive`'s task submission methods support both weighted and unweighted inputs (due to the blanket implementation of `From for Weighted`); however, there are some cases where it is now necessary to specify the input type where before it could be elided. 29 | * Added the `Context::submit` method, which enables tasks to submit new tasks to the `Hive`. 30 | * Other 31 | * Switched to using thread-local retry queues for the implementation of the `retry` feature, to reduce thread-contention. 32 | * Switched to storing `Outcome`s in the hive using a data structure that does not require locking when inserting, which should reduce thread contention when using `*_store` operations. 33 | * Switched to using `crossbeam_channel` for the task input channel in `ChannelTaskQueues`. These are multi-produer, multi-consumer channels (mpmc; as opposed to `std::mpsc`, which is single-consumer), which means it is no longer necessary for worker threads to aquire a Mutex lock on the channel receiver when getting tasks. 34 | * Added the `beekeeper::hive::mock` module, which has a `MockTaskRunner` for `apply`ing a worker in a mock context. This is useful for testing your `Worker`. 35 | * Updated to `2024` edition and Rust version `1.85` 36 | 37 | ## 0.2.1 38 | 39 | * Bugfixes 40 | * Reverted accidental change to default features in Cargo.toml 41 | * Panics during drop of worker threads 42 | * Other 43 | * Added initial performance benchmarks 44 | 45 | ## 0.2.0 46 | 47 | * **Breaking** 48 | * `Builder::build*`, `Husk::into_hive*` now return `Hive` rather than `Result` 49 | * `beekeeper::hive::SpawnError` has been removed 50 | * `Hive::grow` and `Hive::use_all_cores` now return `Result` rather than `usize` 51 | * `Hive::num_threads` has been renamed `Hive::max_workers` 52 | * Features 53 | * `Hive` now keeps track of spawn results 54 | * `Hive::alive_workers` reports the number of worker threads that are currently alive (<= `max_workers`) 55 | * `Hive::has_dead_workers` returns `true` if the `Hive` has encountered any errors spawing worker threads 56 | * `Hive::revive_workers` attempts to re-spawn any dead worker threads 57 | * Bugfixes 58 | * Ordered iterators would enter an infinite loop if there were missing indicies 59 | 60 | ## 0.1.0 61 | 62 | * Initial release 63 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "beekeeper" 3 | description = "A full-featured worker pool library for parallelizing tasks" 4 | version = "0.3.0" 5 | edition = "2024" 6 | rust-version = "1.85" 7 | authors = ["John Didion "] 8 | repository = "https://github.com/jdidion/beekeeper" 9 | license = "MIT OR Apache-2.0" 10 | 11 | [dependencies] 12 | crossbeam-channel = "0.5.13" 13 | crossbeam-deque = "0.8.6" 14 | crossbeam-queue = "0.3.12" 15 | crossbeam-utils = "0.8.20" 16 | derive_more = { version = "2.0.1", features = ["debug"] } 17 | nanorand = { version = "0.7.0", default-features = false, features = [ 18 | "std", 19 | "tls", 20 | ] } 21 | num = "0.4.3" 22 | num_cpus = "1.16.0" 23 | parking_lot = "0.12.3" 24 | paste = "1.0.15" 25 | simple-mermaid = "0.2.0" 26 | thiserror = "1.0.63" 27 | # required with the `affinity` feature 28 | core_affinity = { version = "0.8.1", optional = true } 29 | # required with alternate outcome channel implementations that can be enabled with features 30 | # NOTE: these version requirements could be relaxed as we don't actually depend on the 31 | # functionality of these crates internally (other than in tests) 32 | flume = { version = "0.11.1", optional = true } 33 | loole = { version = "0.4.0", optional = true } 34 | 35 | [dev-dependencies] 36 | divan = "0.1.17" 37 | itertools = "0.14.0" 38 | serial_test = "3.2.0" 39 | rstest = "0.22.0" 40 | stacker = "0.1.17" 41 | aquamarine = "0.6.0" 42 | simple-mermaid = "0.2.0" 43 | 44 | [[bench]] 45 | name = "perf" 46 | harness = false 47 | 48 | [features] 49 | default = ["local-batch"] 50 | affinity = ["dep:core_affinity"] 51 | local-batch = [] 52 | retry = [] 53 | crossbeam = [] 54 | flume = ["dep:flume"] 55 | loole = ["dep:loole"] 56 | 57 | [lints.rust] 58 | unexpected_cfgs = { level = "warn", check-cfg = [ 59 | 'cfg(coverage,coverage_nightly)', 60 | ] } 61 | 62 | [package.metadata.cargo-all-features] 63 | allowlist = ["affinity", "local-batch", "retry"] 64 | 65 | [profile.release] 66 | lto = true 67 | codegen-units = 1 68 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 John Didion 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jdidion/beekeeper/833b65c788a674e7a0671be1999dbe9d88b88703/assets/logo.png -------------------------------------------------------------------------------- /benches/perf.rs: -------------------------------------------------------------------------------- 1 | use beekeeper::bee::stock::EchoWorker; 2 | use beekeeper::hive::{Builder, ChannelBuilder, TaskQueuesBuilder, outcome_channel}; 3 | use divan::{AllocProfiler, Bencher, bench, black_box_drop}; 4 | use itertools::iproduct; 5 | 6 | #[global_allocator] 7 | static ALLOC: AllocProfiler = AllocProfiler::system(); 8 | 9 | const THREADS: &[usize] = &[1, 4, 8, 16]; 10 | const TASKS: &[usize] = &[1, 100, 10_000, 1_000_000]; 11 | 12 | #[bench(args = iproduct!(THREADS, TASKS))] 13 | fn bench_apply_short_task(bencher: Bencher, (num_threads, num_tasks): (&usize, &usize)) { 14 | let hive = ChannelBuilder::empty() 15 | .num_threads(*num_threads) 16 | .with_worker_default::>() 17 | .build(); 18 | bencher.bench_local(|| { 19 | let (tx, rx) = outcome_channel(); 20 | for i in 0..*num_tasks { 21 | hive.apply_send(i, &tx); 22 | } 23 | hive.join(); 24 | rx.into_iter().take(*num_tasks).for_each(black_box_drop); 25 | }) 26 | } 27 | 28 | fn main() { 29 | divan::main(); 30 | } 31 | -------------------------------------------------------------------------------- /deny.toml: -------------------------------------------------------------------------------- 1 | # This template contains all of the possible sections and their default values 2 | 3 | # Note that all fields that take a lint level have these possible values: 4 | # * deny - An error will be produced and the check will fail 5 | # * warn - A warning will be produced, but the check will not fail 6 | # * allow - No warning or error will be produced, though in some cases a note 7 | # will be 8 | 9 | # The values provided in this template are the default values that will be used 10 | # when any section or field is not specified in your own configuration 11 | 12 | # Root options 13 | 14 | # The graph table configures how the dependency graph is constructed and thus 15 | # which crates the checks are performed against 16 | [graph] 17 | # If 1 or more target triples (and optionally, target_features) are specified, 18 | # only the specified targets will be checked when running `cargo deny check`. 19 | # This means, if a particular package is only ever used as a target specific 20 | # dependency, such as, for example, the `nix` crate only being used via the 21 | # `target_family = "unix"` configuration, that only having windows targets in 22 | # this list would mean the nix crate, as well as any of its exclusive 23 | # dependencies not shared by any other crates, would be ignored, as the target 24 | # list here is effectively saying which targets you are building for. 25 | targets = [ 26 | # The triple can be any string, but only the target triples built in to 27 | # rustc (as of 1.40) can be checked against actual config expressions 28 | #"x86_64-unknown-linux-musl", 29 | # You can also specify which target_features you promise are enabled for a 30 | # particular target. target_features are currently not validated against 31 | # the actual valid features supported by the target architecture. 32 | #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, 33 | ] 34 | # When creating the dependency graph used as the source of truth when checks are 35 | # executed, this field can be used to prune crates from the graph, removing them 36 | # from the view of cargo-deny. This is an extremely heavy hammer, as if a crate 37 | # is pruned from the graph, all of its dependencies will also be pruned unless 38 | # they are connected to another crate in the graph that hasn't been pruned, 39 | # so it should be used with care. The identifiers are [Package ID Specifications] 40 | # (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) 41 | #exclude = [] 42 | # If true, metadata will be collected with `--all-features`. Note that this can't 43 | # be toggled off if true, if you want to conditionally enable `--all-features` it 44 | # is recommended to pass `--all-features` on the cmd line instead 45 | all-features = false 46 | # If true, metadata will be collected with `--no-default-features`. The same 47 | # caveat with `all-features` applies 48 | no-default-features = false 49 | # If set, these feature will be enabled when collecting metadata. If `--features` 50 | # is specified on the cmd line they will take precedence over this option. 51 | #features = [] 52 | 53 | # The output table provides options for how/if diagnostics are outputted 54 | [output] 55 | # When outputting inclusion graphs in diagnostics that include features, this 56 | # option can be used to specify the depth at which feature edges will be added. 57 | # This option is included since the graphs can be quite large and the addition 58 | # of features from the crate(s) to all of the graph roots can be far too verbose. 59 | # This option can be overridden via `--feature-depth` on the cmd line 60 | feature-depth = 1 61 | 62 | # This section is considered when running `cargo deny check advisories` 63 | # More documentation for the advisories section can be found here: 64 | # https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html 65 | [advisories] 66 | # The path where the advisory databases are cloned/fetched into 67 | #db-path = "$CARGO_HOME/advisory-dbs" 68 | # The url(s) of the advisory databases to use 69 | #db-urls = ["https://github.com/rustsec/advisory-db"] 70 | # A list of advisory IDs to ignore. Note that ignored advisories will still 71 | # output a note when they are encountered. 72 | ignore = [ 73 | { id = "RUSTSEC-2024-0436", reason = "paste is considered 'finished'" }, 74 | ] 75 | # If this is true, then cargo deny will use the git executable to fetch advisory database. 76 | # If this is false, then it uses a built-in git library. 77 | # Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. 78 | # See Git Authentication for more information about setting up git authentication. 79 | #git-fetch-with-cli = true 80 | 81 | # This section is considered when running `cargo deny check licenses` 82 | # More documentation for the licenses section can be found here: 83 | # https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html 84 | [licenses] 85 | # List of explicitly allowed licenses 86 | # See https://spdx.org/licenses/ for list of possible licenses 87 | # [possible values: any SPDX 3.11 short identifier (+ optional exception)]. 88 | allow = ["MIT", "Apache-2.0", "Unicode-3.0"] 89 | # The confidence threshold for detecting a license from license text. 90 | # The higher the value, the more closely the license text must be to the 91 | # canonical license text of a valid SPDX license file. 92 | # [possible values: any between 0.0 and 1.0]. 93 | confidence-threshold = 0.8 94 | # Allow 1 or more licenses on a per-crate basis, so that particular licenses 95 | # aren't accepted for every possible crate as with the normal allow list 96 | exceptions = [ 97 | # Each entry is the crate and version constraint, and its specific allow 98 | # list 99 | { allow = ["Zlib"], crate = "nanorand" }, 100 | ] 101 | 102 | # Some crates don't have (easily) machine readable licensing information, 103 | # adding a clarification entry for it allows you to manually specify the 104 | # licensing information 105 | #[[licenses.clarify]] 106 | # The package spec the clarification applies to 107 | #crate = "ring" 108 | # The SPDX expression for the license requirements of the crate 109 | #expression = "MIT AND ISC AND OpenSSL" 110 | # One or more files in the crate's source used as the "source of truth" for 111 | # the license expression. If the contents match, the clarification will be used 112 | # when running the license check, otherwise the clarification will be ignored 113 | # and the crate will be checked normally, which may produce warnings or errors 114 | # depending on the rest of your configuration 115 | #license-files = [ 116 | # Each entry is a crate relative path, and the (opaque) hash of its contents 117 | #{ path = "LICENSE", hash = 0xbd0eed23 } 118 | #] 119 | 120 | [licenses.private] 121 | # If true, ignores workspace crates that aren't published, or are only 122 | # published to private registries. 123 | # To see how to mark a crate as unpublished (to the official registry), 124 | # visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. 125 | ignore = false 126 | # One or more private registries that you might publish crates to, if a crate 127 | # is only published to private registries, and ignore is true, the crate will 128 | # not have its license(s) checked 129 | registries = [ 130 | #"https://sekretz.com/registry 131 | ] 132 | 133 | # This section is considered when running `cargo deny check bans`. 134 | # More documentation about the 'bans' section can be found here: 135 | # https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html 136 | [bans] 137 | # Lint level for when multiple versions of the same crate are detected 138 | multiple-versions = "warn" 139 | # Lint level for when a crate version requirement is `*` 140 | wildcards = "allow" 141 | # The graph highlighting used when creating dotgraphs for crates 142 | # with multiple versions 143 | # * lowest-version - The path to the lowest versioned duplicate is highlighted 144 | # * simplest-path - The path to the version with the fewest edges is highlighted 145 | # * all - Both lowest-version and simplest-path are used 146 | highlight = "all" 147 | # The default lint level for `default` features for crates that are members of 148 | # the workspace that is being checked. This can be overridden by allowing/denying 149 | # `default` on a crate-by-crate basis if desired. 150 | workspace-default-features = "allow" 151 | # The default lint level for `default` features for external crates that are not 152 | # members of the workspace. This can be overridden by allowing/denying `default` 153 | # on a crate-by-crate basis if desired. 154 | external-default-features = "allow" 155 | # List of crates that are allowed. Use with care! 156 | allow = [ 157 | #"ansi_term@0.11.0", 158 | #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" }, 159 | ] 160 | # List of crates to deny 161 | deny = [ 162 | #"ansi_term@0.11.0", 163 | #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" }, 164 | # Wrapper crates can optionally be specified to allow the crate when it 165 | # is a direct dependency of the otherwise banned crate 166 | #{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] }, 167 | ] 168 | 169 | # List of features to allow/deny 170 | # Each entry the name of a crate and a version range. If version is 171 | # not specified, all versions will be matched. 172 | #[[bans.features]] 173 | #crate = "reqwest" 174 | # Features to not allow 175 | #deny = ["json"] 176 | # Features to allow 177 | #allow = [ 178 | # "rustls", 179 | # "__rustls", 180 | # "__tls", 181 | # "hyper-rustls", 182 | # "rustls", 183 | # "rustls-pemfile", 184 | # "rustls-tls-webpki-roots", 185 | # "tokio-rustls", 186 | # "webpki-roots", 187 | #] 188 | # If true, the allowed features must exactly match the enabled feature set. If 189 | # this is set there is no point setting `deny` 190 | #exact = true 191 | 192 | # Certain crates/versions that will be skipped when doing duplicate detection. 193 | skip = [ 194 | #"ansi_term@0.11.0", 195 | #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" }, 196 | ] 197 | # Similarly to `skip` allows you to skip certain crates during duplicate 198 | # detection. Unlike skip, it also includes the entire tree of transitive 199 | # dependencies starting at the specified crate, up to a certain depth, which is 200 | # by default infinite. 201 | skip-tree = [ 202 | #"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies 203 | #{ crate = "ansi_term@0.11.0", depth = 20 }, 204 | ] 205 | 206 | # This section is considered when running `cargo deny check sources`. 207 | # More documentation about the 'sources' section can be found here: 208 | # https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html 209 | [sources] 210 | # Lint level for what to happen when a crate from a crate registry that is not 211 | # in the allow list is encountered 212 | unknown-registry = "warn" 213 | # Lint level for what to happen when a crate from a git repository that is not 214 | # in the allow list is encountered 215 | unknown-git = "warn" 216 | # List of URLs for allowed crate registries. Defaults to the crates.io index 217 | # if not specified. If it is specified but empty, no registries are allowed. 218 | allow-registry = ["https://github.com/rust-lang/crates.io-index"] 219 | # List of URLs for allowed Git repositories 220 | allow-git = [] 221 | 222 | [sources.allow-org] 223 | # github.com organizations to allow git sources for 224 | github = [] 225 | # gitlab.com organizations to allow git sources for 226 | gitlab = [] 227 | # bitbucket.org organizations to allow git sources for 228 | bitbucket = [] 229 | -------------------------------------------------------------------------------- /src/barrier.rs: -------------------------------------------------------------------------------- 1 | use parking_lot::RwLock; 2 | use std::collections::HashSet; 3 | use std::sync::atomic::{AtomicBool, Ordering}; 4 | use std::sync::{Arc, Barrier}; 5 | use std::thread::{self, ThreadId}; 6 | 7 | /// Enables multiple threads to synchronize the beginning of some computation. Unlike 8 | /// [`std::sync::Barrier`], this one keeps track of which threads have reached it and only 9 | /// recognizes the first wait from each thread. 10 | #[derive(Clone)] 11 | pub struct IndexedBarrier(Arc); 12 | 13 | struct Inner { 14 | barrier: Barrier, 15 | threads_seen: RwLock>, 16 | is_crossed: AtomicBool, 17 | } 18 | 19 | impl IndexedBarrier { 20 | pub fn new(num_threads: usize) -> Self { 21 | Self(Arc::new(Inner { 22 | barrier: Barrier::new(num_threads + 1), 23 | threads_seen: RwLock::new(HashSet::with_capacity(num_threads + 1)), 24 | is_crossed: AtomicBool::new(false), 25 | })) 26 | } 27 | 28 | /// Wait for all threads to reach this barrier. Returns `None` if the barrier has already been 29 | /// crossed, or if this thread has already called `wait` on this barrier (which should never 30 | /// happen). Otherwise returns `Some(is_leader)`, where `is_leader is `true` for a single, 31 | /// arbitrary thread. 32 | pub fn wait(&self) -> Option { 33 | if self.0.is_crossed.load(Ordering::Acquire) { 34 | return None; 35 | } 36 | let thread_id = thread::current().id(); 37 | if !self.0.threads_seen.read().contains(&thread_id) { 38 | self.0.threads_seen.write().insert(thread_id); 39 | } else { 40 | return None; 41 | } 42 | let is_leader = self.0.barrier.wait().is_leader(); 43 | if is_leader { 44 | self.0.is_crossed.store(true, Ordering::Release); 45 | } 46 | Some(is_leader) 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /src/bee/context.rs: -------------------------------------------------------------------------------- 1 | //! The context for a task processed by a `Worker`. 2 | use std::cell::RefCell; 3 | use std::fmt::Debug; 4 | 5 | /// Type of unique ID for a task within the `Hive`. 6 | pub type TaskId = usize; 7 | 8 | /// Trait that provides a `Context` with limited access to a worker thread's state during 9 | /// task execution. 10 | pub trait LocalContext: Debug { 11 | /// Returns `true` if tasks in progress should be cancelled. 12 | fn should_cancel_tasks(&self) -> bool; 13 | 14 | /// Submits a new task to the `Hive` that is executing the current task. 15 | fn submit_task(&self, input: I) -> TaskId; 16 | 17 | #[cfg(test)] 18 | fn thread_index(&self) -> usize; 19 | } 20 | 21 | /// The context visible to a task when processing an input. 22 | #[derive(Debug)] 23 | pub struct Context<'a, I> { 24 | meta: TaskMeta, 25 | local: Option<&'a dyn LocalContext>, 26 | subtask_ids: RefCell>>, 27 | } 28 | 29 | impl<'a, I> Context<'a, I> { 30 | /// Returns a new empty context. This is primarily useful for testing. 31 | pub fn empty() -> Self { 32 | Self { 33 | meta: TaskMeta::default(), 34 | local: None, 35 | subtask_ids: RefCell::new(None), 36 | } 37 | } 38 | 39 | /// Creates a new `Context` with the given task metadata and shared state. 40 | pub fn new(meta: TaskMeta, local: Option<&'a dyn LocalContext>) -> Self { 41 | Self { 42 | meta, 43 | local, 44 | subtask_ids: RefCell::new(None), 45 | } 46 | } 47 | 48 | /// The unique ID of this task within the `Hive`. 49 | pub fn task_id(&self) -> TaskId { 50 | self.meta.id() 51 | } 52 | 53 | /// Returns the number of previous failed attempts to execute the current task. 54 | pub fn attempt(&self) -> u8 { 55 | self.meta.attempt() 56 | } 57 | 58 | /// Returns `true` if the current task should be cancelled. 59 | /// 60 | /// A long-running `Worker` should check this periodically and, if it returns `true`, exit 61 | /// early with an `ApplyError::Cancelled` result. 62 | pub fn is_cancelled(&self) -> bool { 63 | self.local 64 | .as_ref() 65 | .map(|local| local.should_cancel_tasks()) 66 | .unwrap_or(false) 67 | } 68 | 69 | /// Submits a new task to the `Hive` that is executing the current task. 70 | /// 71 | /// If a thread-local queue is available and has capacity, the task will be added to it, 72 | /// otherwise it is added to the global queue. The ID of the submitted task is stored in this 73 | /// `Context` and ultimately returned in the `subtask_ids` of the `Outcome` of the submitting 74 | /// task. 75 | /// 76 | /// The task will be submitted with the same outcome sender as the current task, or stored in 77 | /// the `Hive` if there is no sender. 78 | /// 79 | /// Returns an `Err` containing `input` if the new task was not successfully submitted. 80 | pub fn submit(&self, input: I) -> Result<(), I> { 81 | if let Some(local) = self.local.as_ref() { 82 | let task_id = local.submit_task(input); 83 | self.subtask_ids 84 | .borrow_mut() 85 | .get_or_insert_default() 86 | .push(task_id); 87 | Ok(()) 88 | } else { 89 | Err(input) 90 | } 91 | } 92 | 93 | /// Returns the unique index of the worker thread executing this task. 94 | #[cfg(test)] 95 | pub fn thread_index(&self) -> Option { 96 | self.local.map(|local| local.thread_index()) 97 | } 98 | 99 | /// Consumes this `Context` and returns the IDs of the subtasks spawned during the execution 100 | /// of the task, if any. 101 | pub(crate) fn into_parts(self) -> (TaskMeta, Option>) { 102 | (self.meta, self.subtask_ids.into_inner()) 103 | } 104 | } 105 | 106 | /// The metadata of a task. 107 | #[derive(Clone, Debug, Default)] 108 | pub struct TaskMeta { 109 | id: TaskId, 110 | #[cfg(feature = "local-batch")] 111 | weight: u32, 112 | #[cfg(feature = "retry")] 113 | attempt: u8, 114 | } 115 | 116 | impl TaskMeta { 117 | /// Creates a new `TaskMeta` with the given task ID. 118 | pub fn new(id: TaskId) -> Self { 119 | TaskMeta { 120 | id, 121 | ..Default::default() 122 | } 123 | } 124 | 125 | /// Creates a new `TaskMeta` with the given task ID and weight. 126 | #[cfg(feature = "local-batch")] 127 | pub fn with_weight(task_id: TaskId, weight: u32) -> Self { 128 | TaskMeta { 129 | id: task_id, 130 | weight, 131 | ..Default::default() 132 | } 133 | } 134 | 135 | /// Returns the unique ID of this task within the `Hive`. 136 | pub fn id(&self) -> TaskId { 137 | self.id 138 | } 139 | 140 | /// Returns the number of previous failed attempts to execute the current task. 141 | /// 142 | /// Always returns `0` if the `retry` feature is not enabled. 143 | pub fn attempt(&self) -> u8 { 144 | #[cfg(feature = "retry")] 145 | return self.attempt; 146 | #[cfg(not(feature = "retry"))] 147 | return 0; 148 | } 149 | 150 | /// Increments the number of previous failed attempts to execute the current task. 151 | #[cfg(feature = "retry")] 152 | pub(crate) fn inc_attempt(&mut self) { 153 | self.attempt += 1; 154 | } 155 | 156 | /// Returns the task weight. 157 | /// 158 | /// Always returns `0` if the `local-batch` feature is not enabled. 159 | pub fn weight(&self) -> u32 { 160 | #[cfg(feature = "local-batch")] 161 | return self.weight; 162 | #[cfg(not(feature = "local-batch"))] 163 | return 0; 164 | } 165 | } 166 | 167 | impl From for TaskMeta { 168 | fn from(value: TaskId) -> Self { 169 | TaskMeta::new(value) 170 | } 171 | } 172 | 173 | #[cfg(all(test, feature = "retry"))] 174 | impl TaskMeta { 175 | pub fn with_attempt(task_id: TaskId, attempt: u8) -> Self { 176 | Self { 177 | id: task_id, 178 | attempt, 179 | ..Default::default() 180 | } 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /src/bee/error.rs: -------------------------------------------------------------------------------- 1 | //! Error types that may be returned by `Worker`s. 2 | use crate::panic::Panic; 3 | use std::fmt::Debug; 4 | 5 | /// Error that can result from applying a `Worker`'s function to an input. 6 | #[derive(thiserror::Error, Debug)] 7 | pub enum ApplyError { 8 | /// The task failed due to a fatal error that cannot be retried. 9 | #[error("Task failed (and is not retryable)")] 10 | Fatal { input: Option, error: E }, 11 | /// The task failed due to a (possibly) transient error and can be retried. 12 | #[error("Task failed, but is retryable")] 13 | Retryable { input: I, error: E }, 14 | /// The task was cancelled before it completed. 15 | #[error("Task was cancelled")] 16 | Cancelled { input: I }, 17 | /// The task panicked. 18 | #[error("Task panicked")] 19 | Panic { 20 | input: Option, 21 | payload: Panic, 22 | }, 23 | } 24 | 25 | impl ApplyError { 26 | /// Returns the input value associated with this error, if available. 27 | pub fn input(&self) -> Option<&I> { 28 | match self { 29 | Self::Fatal { input, .. } => input.as_ref(), 30 | Self::Retryable { input, .. } => Some(input), 31 | Self::Cancelled { input, .. } => Some(input), 32 | Self::Panic { input, .. } => input.as_ref(), 33 | } 34 | } 35 | 36 | /// Consumes this `ApplyError` and returns the input value associated with it, if available. 37 | pub fn into_input(self) -> Option { 38 | match self { 39 | Self::Fatal { input, .. } => input, 40 | Self::Retryable { input, .. } => Some(input), 41 | Self::Cancelled { input, .. } => Some(input), 42 | Self::Panic { input, .. } => input, 43 | } 44 | } 45 | 46 | /// Consumes this `ApplyError` and: 47 | /// * Panics, if this is a `Panic` variant, 48 | /// * Returns `None`, if this is a `Cancelled` variant, 49 | /// * Returns `Some(E)` otherwise 50 | pub fn into_source(self) -> Option { 51 | match self { 52 | Self::Fatal { input: _, error } => Some(error), 53 | Self::Retryable { input: _, error } => Some(error), 54 | Self::Cancelled { .. } => None, 55 | Self::Panic { input: _, payload } => payload.resume(), 56 | } 57 | } 58 | } 59 | 60 | /// Error that can result from applying a `RefWorker`'s function to an input. 61 | #[derive(thiserror::Error, Debug)] 62 | pub enum ApplyRefError { 63 | /// The task failed due to a fatal error that cannot be retried. 64 | #[error("Error is not retryable")] 65 | Fatal(E), 66 | /// The task failed due to a (possibly) transient error and can be retried. 67 | #[error("Error is retryable")] 68 | Retryable(E), 69 | /// The task was cancelled before it completed. 70 | #[error("Task was cancelled")] 71 | Cancelled, 72 | } 73 | 74 | impl ApplyRefError { 75 | pub(super) fn into_apply_error(self, input: I) -> ApplyError { 76 | match self { 77 | Self::Fatal(error) => ApplyError::Fatal { 78 | input: Some(input), 79 | error, 80 | }, 81 | Self::Retryable(error) => ApplyError::Retryable { input, error }, 82 | Self::Cancelled => ApplyError::Cancelled { input }, 83 | } 84 | } 85 | } 86 | 87 | impl From for ApplyRefError { 88 | fn from(e: E) -> Self { 89 | ApplyRefError::Fatal(e) 90 | } 91 | } 92 | 93 | #[cfg(test)] 94 | #[cfg_attr(coverage_nightly, coverage(off))] 95 | mod tests { 96 | use super::ApplyError; 97 | use crate::panic::Panic; 98 | 99 | type TestError<'a> = ApplyError; 100 | 101 | impl ApplyError { 102 | pub fn panic(input: Option, detail: Option) -> Self { 103 | Self::Panic { 104 | input, 105 | payload: Panic::new("test", detail), 106 | } 107 | } 108 | } 109 | 110 | #[test] 111 | fn test_input() { 112 | let cancelled: TestError = ApplyError::Cancelled { input: 42 }; 113 | assert_eq!(&42, cancelled.input().unwrap()); 114 | assert_eq!(42, cancelled.into_input().unwrap()); 115 | 116 | let retryable: TestError = ApplyError::Retryable { 117 | input: 42, 118 | error: "bork", 119 | }; 120 | assert_eq!(&42, retryable.input().unwrap()); 121 | assert_eq!(42, retryable.into_input().unwrap()); 122 | 123 | let not_retryable: TestError = ApplyError::Fatal { 124 | input: Some(42), 125 | error: "bork", 126 | }; 127 | assert_eq!(&42, not_retryable.input().unwrap()); 128 | assert_eq!(42, not_retryable.into_input().unwrap()); 129 | 130 | let panic: TestError = ApplyError::panic(Some(42), None); 131 | assert_eq!(&42, panic.input().unwrap()); 132 | assert_eq!(42, panic.into_input().unwrap()); 133 | } 134 | 135 | #[test] 136 | fn test_error() { 137 | let cancelled: TestError = ApplyError::Cancelled { input: 42 }; 138 | assert_eq!(None, cancelled.into_source()); 139 | 140 | let retryable: TestError = ApplyError::Retryable { 141 | input: 42, 142 | error: "bork", 143 | }; 144 | assert_eq!(Some("bork"), retryable.into_source()); 145 | 146 | let not_retryable: TestError = ApplyError::Fatal { 147 | input: Some(42), 148 | error: "bork", 149 | }; 150 | assert_eq!(Some("bork"), not_retryable.into_source()); 151 | } 152 | 153 | #[test] 154 | #[should_panic] 155 | fn test_panic() { 156 | let panic: TestError = ApplyError::panic(Some(42), Some("borked".to_string())); 157 | let _ = panic.into_source(); 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /src/bee/mod.rs: -------------------------------------------------------------------------------- 1 | //! Traits for defining workers in the worker pool. 2 | //! 3 | //! A [`Hive`](crate::hive::Hive) is populated by bees: 4 | //! * The [`Worker`]s process the tasks submitted to the `Hive`. 5 | //! * The [`Queen`] creates a new `Worker` for each thread in the `Hive`. 6 | //! * [`QueenMut`] can be used to implement a stateful queen - it must be wrapped in a 7 | //! [`QueenCell`] to make it thread-safe. 8 | //! 9 | //! It is easiest to use the [`prelude`] when implementing your bees: 10 | //! 11 | //! ``` 12 | //! use beekeeper::bee::prelude::*; 13 | //! ``` 14 | //! 15 | //! # Worker 16 | //! 17 | //! A worker is defined by implementing the [`Worker`] trait. A `Worker` implementation has three 18 | //! associated types: 19 | //! * `Input`: the type of the input to the worker. 20 | //! * `Output`: the type of the output produced by the worker. 21 | //! * `Error`: the type of error that can occur during the execution of the worker. 22 | //! 23 | //! Note that all of a `Worker`'s associated types must be [`Send`]; however, the `Worker` itself 24 | //! will only ever exist within the context of a single worker thread, and thus does not itself 25 | //! need to be `Send`. 26 | //! 27 | //! The `Worker` trait has a single method, [`apply`](crate::bee::Worker::apply), which 28 | //! takes an input of type `Input` and a [`Context`] and returns a `Result` containing an either an 29 | //! `Output` or an [`ApplyError`]. Note that `Worker::apply()` takes a `&mut self` parameter, 30 | //! meaning that it can modify its own state. 31 | //! 32 | //! If a fatal error occurs during processing of the task, the worker should return 33 | //! [`ApplyError::Fatal`]. 34 | //! 35 | //! If the task instead fails due to a transient error, the worker should return 36 | //! [`ApplyError::Retryable`]. If the `retry` feature is enabled, then a task that fails with a 37 | //! `ApplyError::Retryable` error will be retried, otherwise the error is converted to `Fatal`. 38 | //! 39 | //! The `Context` contains information about the task, including: 40 | //! * The task ID. Each task submitted to a `Hive` is assigned an ID that is unique within 41 | //! that `Hive`. 42 | //! * Whether the task has been cancelled: the user may request that all active tasks are cancelled, 43 | //! such as by calling [`Hive::suspend()`](crate::hive::Hive::suspend). A `Worker` is not 44 | //! required to handle cancellation, but for long-running tasks it is suggested that the worker 45 | //! periodically check the cancellation flag by calling 46 | //! [`Context::is_cancelled()`](crate::bee::context::Context::is_cancelled). If the cancellation 47 | //! flag is set, the worker may terminate early by returning [`ApplyError::Cancelled`]. 48 | //! * The retry [`attempt`](crate::bee::context::Context::attempt), which starts at `0` the first 49 | //! time the task is attempted. If the `retry` feature is enabled and the task fails with 50 | //! [`ApplyError::Retryable], this value increments by `1` for each subsequent retry attempt. 51 | //! 52 | //! The `Context` also provides the ability to submit new tasks to the `Hive` using the 53 | //! [`submit`](crate::bee::Context::submit) method. The IDs of submitted subtasks are stored in the 54 | //! `Context` and are returned in a field of the [`Outcome`](crate::hive::Outcome) that results 55 | //! from the parent task. 56 | //! 57 | //! A `Worker` should not panic. However, if it must execute code that may panic, it can do so 58 | //! within a closure passed to [`Panic::try_call`](crate::panic::Panic::try_call) and convert an 59 | //! `Err` result to an [`ApplyError::Panic`]. In the worst-case scenario, if a worker fails with an 60 | //! uncaught panic, the worker thread will terminate and the `Hive` will spawn a new worker thread; 61 | //! however, the input on which the worker failed will be irretrievably lost. 62 | //! 63 | //! As an alternative to implementing the `Worker` trait, you may instead implement 64 | //! [`RefWorker`], which is similar to `Worker`, with the following differences: 65 | //! * You implement [`apply_ref`](crate::bee::RefWorker::apply_ref) instead of `apply`. 66 | //! * The `apply_ref` method takes a reference to the input rather than an owned value. 67 | //! * The `apply_ref` method returns a `Result` containing an either an `Output` or a 68 | //! [`ApplyRefError`]. 69 | //! * You do not need to catch panics - the blanket implementation of `Worker::apply` for 70 | //! `RefWorker` calls `apply_ref` within a `Panic::try_call` closure and automatically handles the 71 | //! result. 72 | //! 73 | //! ## Stock Workers 74 | //! 75 | //! The [`stock`] submodule provides some commonly used worker implementations: 76 | //! * [`Caller`](crate::bee::stock::Caller): a worker that wraps a callable (function or closure) 77 | //! with a single input parameter of type `Input` (i.e., the worker's associated `Input` type) 78 | //! and an output of type `Output`. 79 | //! * A [`OnceCaller`](crate::bee::stock::OnceCaller) is like `Caller`, but it may also return 80 | //! an error, which is always considered fatal. 81 | //! * A [`RefCaller`](crate::bee::stock::RefCaller) is like `OnceCaller`, except that it passes 82 | //! an `&Input` to its wrapped callable. The benefit of using `RefCaller` is that the input 83 | //! can be recovered if there is an error. 84 | //! * [`RetryCaller`](crate::bee::stock::RetryCaller) is like `OnceCaller`, but its error type 85 | //! is `ApplyError`, which enables transient errors to be retried (when the `retry` feature 86 | //! is enabled). 87 | //! * [`ThunkWorker`](crate::bee::stock::ThunkWorker): a worker that processes 88 | //! [`Thunk`](crate::bee::stock::Thunk)s, which are no-argument callables (functions or closures) 89 | //! with a common return type. 90 | //! * [`FunkWorker`](crate::bee::stock::FunkWorker) is like `ThunkWorker` except that it 91 | //! processes fallible thunks (`Funk`s), which also have a common error type. 92 | //! * [`PunkWorker`](crate::bee::stock::PunkWorker) is like `ThunkWorker` except that it 93 | //! processes thunks that may panic (`Punk`s). 94 | //! * [`EchoWorker`](crate::bee::stock::EchoWorker): simply returns its input. This is primarily 95 | //! useful for testing. 96 | //! 97 | //! # Queen 98 | //! 99 | //! A queen is defined by implementing the [`Queen`] trait. A single `Queen` instance is used to 100 | //! create the `Worker` instances for each worker thread in a `Hive`. 101 | //! 102 | //! If you need for the queen to have mutable state, you can instead implement [`QueenMut`], whose 103 | //! [`create`](crate::bee::QueenMut::create) method takes `&mut self` as a parameter. When 104 | //! creating a `Hive`, the `QueenMut` must be wrapped in a [`QueenCell`] to make it thread-safe. 105 | //! 106 | //! It is often not necessary to manually implement the `Queen` trait. For example, if your `Worker` 107 | //! implements `Default`, then you can use [`DefaultQueen`] implicitly by calling 108 | //! [`OpenBuilder::with_worker_default`](crate::hive::OpenBuilder::with_worker_default). Similarly, 109 | //! if your `Worker` implements `Clone`, then you can use [`CloneQueen`] 110 | //! implicitly by calling [`OpenBuilder::with_worker`](crate::hive::OpenBuilder::with_worker). 111 | //! 112 | //! A `Queen` should never panic when creating `Worker`s. 113 | //! 114 | //! The state of a `Hive`'s `Queen` may be interrogated either 115 | //! [during](crate::hive::Hive::queen) or [after](crate::hive::Hive::try_into_husk) the 116 | //! life of the `Hive`. However, `Worker`s may never be accessed directly. Thus, it is often 117 | //! more appropriate to use synchronized types (`Arc`, `Mutex`, etc.) to share state between 118 | //! workers, the queen, and/or the client thread(s). 119 | mod context; 120 | mod error; 121 | mod queen; 122 | pub mod stock; 123 | mod worker; 124 | 125 | pub use self::context::{Context, LocalContext, TaskId, TaskMeta}; 126 | pub use self::error::{ApplyError, ApplyRefError}; 127 | pub use self::queen::{CloneQueen, DefaultQueen, Queen, QueenCell, QueenMut}; 128 | pub use self::worker::{RefWorker, RefWorkerResult, Worker, WorkerError, WorkerResult}; 129 | 130 | pub mod prelude { 131 | pub use super::{ 132 | ApplyError, ApplyRefError, Context, Queen, QueenCell, QueenMut, RefWorker, RefWorkerResult, 133 | Worker, WorkerError, WorkerResult, 134 | }; 135 | } 136 | -------------------------------------------------------------------------------- /src/bee/queen.rs: -------------------------------------------------------------------------------- 1 | //! The Queen bee trait. 2 | use super::Worker; 3 | use derive_more::Debug; 4 | use parking_lot::RwLock; 5 | use std::marker::PhantomData; 6 | use std::ops::Deref; 7 | use std::{any, fmt}; 8 | 9 | /// A trait for factories that create `Worker`s. 10 | pub trait Queen: Send + Sync + 'static { 11 | /// The kind of `Worker` created by this factory. 12 | type Kind: Worker; 13 | 14 | /// Creates and returns a new instance of `Self::Kind`, *immutably*. 15 | fn create(&self) -> Self::Kind; 16 | } 17 | 18 | /// A trait for mutable factories that create `Worker`s. 19 | pub trait QueenMut: Send + Sync + 'static { 20 | /// The kind of `Worker` created by this factory. 21 | type Kind: Worker; 22 | 23 | /// Creates and returns a new instance of `Self::Kind`, *immutably*. 24 | fn create(&mut self) -> Self::Kind; 25 | } 26 | 27 | /// A wrapper for a `MutQueen` that implements `Queen`. 28 | /// 29 | /// Interior mutability is enabled using an `RwLock`. 30 | pub struct QueenCell(RwLock); 31 | 32 | impl QueenCell { 33 | /// Creates a new `QueenCell` with the given `mut_queen`. 34 | pub fn new(mut_queen: Q) -> Self { 35 | Self(RwLock::new(mut_queen)) 36 | } 37 | 38 | /// Returns a reference to the wrapped `Queen`. 39 | pub fn get(&self) -> impl Deref { 40 | self.0.read() 41 | } 42 | 43 | /// Consumes this `QueenCell` and returns the inner `Queen`. 44 | pub fn into_inner(self) -> Q { 45 | self.0.into_inner() 46 | } 47 | } 48 | 49 | impl Queen for QueenCell { 50 | type Kind = Q::Kind; 51 | 52 | /// Calls the wrapped `QueenMut::create` method using interior mutability. 53 | fn create(&self) -> Self::Kind { 54 | self.0.write().create() 55 | } 56 | } 57 | 58 | impl fmt::Debug for QueenCell { 59 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 60 | f.debug_struct("QueenCell") 61 | .field("queen", &*self.0.read()) 62 | .finish() 63 | } 64 | } 65 | 66 | impl Clone for QueenCell { 67 | fn clone(&self) -> Self { 68 | Self(RwLock::new(self.0.read().clone())) 69 | } 70 | } 71 | 72 | impl Default for QueenCell { 73 | fn default() -> Self { 74 | Self::new(Q::default()) 75 | } 76 | } 77 | 78 | impl From for QueenCell { 79 | fn from(queen: Q) -> Self { 80 | Self::new(queen) 81 | } 82 | } 83 | 84 | /// A `Queen` that can create a `Worker` type that implements `Default`. 85 | /// 86 | /// Note that, for the implementation to be generic, `W` also needs to be `Send` and `Sync`. If you 87 | /// want a `Queen` for a specific `Worker` type that is not `Send/Sync`: 88 | /// 89 | /// ``` 90 | /// # use beekeeper::bee::{Context, Queen, Worker, WorkerResult}; 91 | /// # use std::rc::Rc; 92 | /// 93 | /// #[derive(Default, Debug)] 94 | /// struct MyWorker(Rc); // not `Send` or `Sync` 95 | /// 96 | /// impl Worker for MyWorker { 97 | /// type Input = u8; 98 | /// type Output = u8; 99 | /// type Error = (); 100 | /// 101 | /// fn apply(&mut self, input: u8, _: &Context) -> WorkerResult { 102 | /// Ok(self.0.saturating_add(input)) 103 | /// } 104 | /// } 105 | /// 106 | /// struct MyQueen; 107 | /// 108 | /// impl Queen for MyQueen { 109 | /// type Kind = MyWorker; 110 | /// 111 | /// fn create(&self) -> Self::Kind { 112 | /// MyWorker::default() 113 | /// } 114 | /// } 115 | /// ``` 116 | #[derive(Default, Debug)] 117 | #[debug("DefaultQueen<{}>", any::type_name::())] 118 | pub struct DefaultQueen(PhantomData); 119 | 120 | impl Clone for DefaultQueen { 121 | fn clone(&self) -> Self { 122 | Self::default() 123 | } 124 | } 125 | 126 | impl Queen for DefaultQueen { 127 | type Kind = W; 128 | 129 | fn create(&self) -> Self::Kind { 130 | Self::Kind::default() 131 | } 132 | } 133 | 134 | /// A `Queen` that can create a `Worker` type that implements `Clone`, by making copies of 135 | /// an existing instance of that `Worker` type. 136 | #[derive(Debug)] 137 | #[debug("CloneQueen<{}>", any::type_name::())] 138 | pub struct CloneQueen(W); 139 | 140 | impl CloneQueen { 141 | pub fn new(worker: W) -> Self { 142 | CloneQueen(worker) 143 | } 144 | } 145 | 146 | impl Clone for CloneQueen { 147 | fn clone(&self) -> Self { 148 | Self(self.0.clone()) 149 | } 150 | } 151 | 152 | impl Default for CloneQueen { 153 | fn default() -> Self { 154 | Self(W::default()) 155 | } 156 | } 157 | 158 | impl Queen for CloneQueen { 159 | type Kind = W; 160 | 161 | fn create(&self) -> Self::Kind { 162 | self.0.clone() 163 | } 164 | } 165 | 166 | #[cfg(test)] 167 | #[cfg_attr(coverage_nightly, coverage(off))] 168 | mod tests { 169 | use super::{CloneQueen, DefaultQueen, Queen, QueenCell, QueenMut}; 170 | use crate::bee::stock::EchoWorker; 171 | 172 | #[derive(Default, Debug, Clone)] 173 | struct TestQueen(usize); 174 | 175 | impl QueenMut for TestQueen { 176 | type Kind = EchoWorker; 177 | 178 | fn create(&mut self) -> Self::Kind { 179 | self.0 += 1; 180 | EchoWorker::default() 181 | } 182 | } 183 | 184 | #[test] 185 | fn test_queen_cell() { 186 | let queen = QueenCell::new(TestQueen(0)); 187 | for _ in 0..10 { 188 | let _worker = queen.create(); 189 | } 190 | assert_eq!(queen.get().0, 10); 191 | assert_eq!(queen.into_inner().0, 10); 192 | } 193 | 194 | #[test] 195 | fn test_queen_cell_default() { 196 | let queen = QueenCell::::default(); 197 | for _ in 0..10 { 198 | let _worker = queen.create(); 199 | } 200 | assert_eq!(queen.get().0, 10); 201 | } 202 | 203 | #[test] 204 | fn test_queen_cell_clone() { 205 | let queen = QueenCell::::default(); 206 | for _ in 0..10 { 207 | let _worker = queen.create(); 208 | } 209 | assert_eq!(queen.clone().get().0, 10); 210 | } 211 | 212 | #[test] 213 | fn test_queen_cell_debug() { 214 | let queen = QueenCell::::default(); 215 | for _ in 0..10 { 216 | let _worker = queen.create(); 217 | } 218 | assert_eq!(format!("{:?}", queen), "QueenCell { queen: TestQueen(10) }"); 219 | } 220 | 221 | #[test] 222 | fn test_queen_cell_from() { 223 | let queen = QueenCell::from(TestQueen::default()); 224 | for _ in 0..10 { 225 | let _worker = queen.create(); 226 | } 227 | assert_eq!(queen.get().0, 10); 228 | } 229 | 230 | #[test] 231 | fn test_default_queen() { 232 | let queen1 = DefaultQueen::>::default(); 233 | let worker1 = queen1.create(); 234 | let queen2 = queen1.clone(); 235 | let worker2 = queen2.create(); 236 | assert_eq!(worker1, worker2); 237 | } 238 | 239 | #[test] 240 | fn test_clone_queen() { 241 | let worker = EchoWorker::::default(); 242 | let queen = CloneQueen::new(worker); 243 | let worker1 = queen.create(); 244 | let queen2 = queen.clone(); 245 | let worker2 = queen2.create(); 246 | assert_eq!(worker1, worker2); 247 | } 248 | } 249 | -------------------------------------------------------------------------------- /src/bee/stock/call.rs: -------------------------------------------------------------------------------- 1 | //! Worker implementations that wrap callables (closures or function pointers that are `FnMut`). 2 | use crate::bee::{ 3 | ApplyError, ApplyRefError, Context, RefWorker, RefWorkerResult, Worker, WorkerResult, 4 | }; 5 | use derive_more::Debug; 6 | use std::marker::PhantomData; 7 | use std::ops::{Deref, DerefMut}; 8 | use std::{any, fmt}; 9 | 10 | /// Wraps a closure or function pointer and calls it when applied. For this `Callable` to be 11 | /// useable by a `Worker`, the function must be `FnMut` *and* `Clone`able. 12 | /// 13 | /// TODO: we could provide a better `Debug` implementation by providing a macro that can wrap a 14 | /// closure and store the text of the function, and then change all the Workers to take a 15 | /// `F: Deref`. 16 | /// See https://users.rust-lang.org/t/is-it-possible-to-implement-debug-for-fn-type/14824/3 17 | #[derive(Debug)] 18 | struct Callable { 19 | #[debug(skip)] 20 | f: F, 21 | #[debug("{}", any::type_name::())] 22 | i: PhantomData, 23 | #[debug("{}", any::type_name::())] 24 | o: PhantomData, 25 | #[debug("{}", any::type_name::())] 26 | e: PhantomData, 27 | } 28 | 29 | impl Callable { 30 | fn of(f: F) -> Self { 31 | Self { 32 | f, 33 | i: PhantomData, 34 | o: PhantomData, 35 | e: PhantomData, 36 | } 37 | } 38 | 39 | fn into_inner(self) -> F { 40 | self.f 41 | } 42 | } 43 | 44 | impl Clone for Callable { 45 | fn clone(&self) -> Self { 46 | Self::of(self.f.clone()) 47 | } 48 | } 49 | 50 | impl Deref for Callable { 51 | type Target = F; 52 | 53 | fn deref(&self) -> &Self::Target { 54 | &self.f 55 | } 56 | } 57 | 58 | impl DerefMut for Callable { 59 | fn deref_mut(&mut self) -> &mut Self::Target { 60 | &mut self.f 61 | } 62 | } 63 | 64 | /// A `Caller` that executes its function once on the input and returns the output. The function 65 | /// should not panic. 66 | #[derive(Debug)] 67 | pub struct Caller { 68 | callable: Callable, 69 | } 70 | 71 | impl Caller { 72 | /// Returns the wrapped callable. 73 | pub fn into_inner(self) -> F { 74 | self.callable.into_inner() 75 | } 76 | } 77 | 78 | impl From for Caller 79 | where 80 | I: Send + Sync + 'static, 81 | O: Send + Sync + 'static, 82 | F: FnMut(I) -> O + Clone + 'static, 83 | { 84 | fn from(f: F) -> Self { 85 | Caller { 86 | callable: Callable::of(f), 87 | } 88 | } 89 | } 90 | 91 | impl Clone for Caller { 92 | fn clone(&self) -> Self { 93 | Self { 94 | callable: self.callable.clone(), 95 | } 96 | } 97 | } 98 | 99 | impl Worker for Caller 100 | where 101 | I: Send + 'static, 102 | O: Send + 'static, 103 | F: FnMut(I) -> O + Clone + 'static, 104 | { 105 | type Input = I; 106 | type Output = O; 107 | type Error = (); 108 | 109 | #[inline] 110 | fn apply(&mut self, input: Self::Input, _: &Context) -> WorkerResult { 111 | Ok((self.callable)(input)) 112 | } 113 | } 114 | 115 | /// A `Caller` that executes its function once on each input. The input value is consumed by the 116 | /// function. If the function returns an error, it is wrapped in `ApplyError::Fatal`. 117 | /// 118 | /// If ownership of the input value is not required, consider using `RefCaller` instead. 119 | #[derive(Debug)] 120 | pub struct OnceCaller { 121 | callable: Callable, 122 | } 123 | 124 | impl OnceCaller { 125 | /// Returns the wrapped callable. 126 | pub fn into_inner(self) -> F { 127 | self.callable.into_inner() 128 | } 129 | } 130 | 131 | impl From for OnceCaller 132 | where 133 | I: Send + Sync + 'static, 134 | O: Send + Sync + 'static, 135 | E: Send + Sync + fmt::Debug + 'static, 136 | F: FnMut(I) -> Result + Clone + 'static, 137 | { 138 | fn from(f: F) -> Self { 139 | OnceCaller { 140 | callable: Callable::of(f), 141 | } 142 | } 143 | } 144 | 145 | impl Clone for OnceCaller { 146 | fn clone(&self) -> Self { 147 | Self { 148 | callable: self.callable.clone(), 149 | } 150 | } 151 | } 152 | 153 | impl Worker for OnceCaller 154 | where 155 | I: Send + 'static, 156 | O: Send + 'static, 157 | E: Send + fmt::Debug + 'static, 158 | F: FnMut(I) -> Result + Clone + 'static, 159 | { 160 | type Input = I; 161 | type Output = O; 162 | type Error = E; 163 | 164 | #[inline] 165 | fn apply(&mut self, input: Self::Input, _: &Context) -> WorkerResult { 166 | (self.callable)(input).map_err(|error| ApplyError::Fatal { error, input: None }) 167 | } 168 | } 169 | 170 | /// A `Caller` that executes its function once on a reference to the input. If the function 171 | /// returns an error, it is wrapped in `ApplyError::Fatal`. 172 | /// 173 | /// The benefit of using `RefCaller` over `OnceCaller` is that the `Fatal` error 174 | /// contains the input value for later recovery. 175 | #[derive(Debug)] 176 | pub struct RefCaller { 177 | callable: Callable, 178 | } 179 | 180 | impl RefCaller { 181 | /// Returns the wrapped callable. 182 | pub fn into_inner(self) -> F { 183 | self.callable.into_inner() 184 | } 185 | } 186 | 187 | impl From for RefCaller 188 | where 189 | I: Send + Sync + 'static, 190 | O: Send + Sync + 'static, 191 | E: Send + Sync + fmt::Debug + 'static, 192 | F: FnMut(&I) -> Result + Clone + 'static, 193 | { 194 | fn from(f: F) -> Self { 195 | RefCaller { 196 | callable: Callable::of(f), 197 | } 198 | } 199 | } 200 | 201 | impl Clone for RefCaller { 202 | fn clone(&self) -> Self { 203 | Self { 204 | callable: self.callable.clone(), 205 | } 206 | } 207 | } 208 | 209 | impl RefWorker for RefCaller 210 | where 211 | I: Send + 'static, 212 | O: Send + 'static, 213 | E: Send + fmt::Debug + 'static, 214 | F: FnMut(&I) -> Result + Clone + 'static, 215 | { 216 | type Input = I; 217 | type Output = O; 218 | type Error = E; 219 | 220 | #[inline] 221 | fn apply_ref( 222 | &mut self, 223 | input: &Self::Input, 224 | _: &Context, 225 | ) -> RefWorkerResult { 226 | (self.callable)(input).map_err(|error| ApplyRefError::Fatal(error)) 227 | } 228 | } 229 | 230 | /// A `Caller` that returns a `Result`. A result of `Err(ApplyError::Retryable)` 231 | /// can be returned to indicate the task should be retried. 232 | #[derive(Debug)] 233 | pub struct RetryCaller { 234 | callable: Callable, 235 | } 236 | 237 | impl RetryCaller { 238 | /// Returns the wrapped callable. 239 | pub fn into_inner(self) -> F { 240 | self.callable.into_inner() 241 | } 242 | } 243 | 244 | impl From for RetryCaller 245 | where 246 | I: Send + Sync + 'static, 247 | O: Send + Sync + 'static, 248 | E: Send + Sync + fmt::Debug + 'static, 249 | F: FnMut(I, &Context) -> Result> + Clone + 'static, 250 | { 251 | fn from(f: F) -> Self { 252 | RetryCaller { 253 | callable: Callable::of(f), 254 | } 255 | } 256 | } 257 | 258 | impl Clone for RetryCaller { 259 | fn clone(&self) -> Self { 260 | Self { 261 | callable: self.callable.clone(), 262 | } 263 | } 264 | } 265 | 266 | impl Worker for RetryCaller 267 | where 268 | I: Send + 'static, 269 | O: Send + 'static, 270 | E: Send + fmt::Debug + 'static, 271 | F: FnMut(I, &Context) -> Result> + Clone + 'static, 272 | { 273 | type Input = I; 274 | type Output = O; 275 | type Error = E; 276 | 277 | #[inline] 278 | fn apply(&mut self, input: Self::Input, ctx: &Context) -> WorkerResult { 279 | (self.callable)(input, ctx) 280 | } 281 | } 282 | 283 | #[cfg(test)] 284 | #[cfg_attr(coverage_nightly, coverage(off))] 285 | mod tests { 286 | use super::*; 287 | use crate::bee::Context; 288 | 289 | #[test] 290 | fn test_call() { 291 | let mut worker = Caller::from(|input: u8| input + 1); 292 | assert!(matches!(worker.apply(5, &Context::empty()), Ok(6))) 293 | } 294 | 295 | #[test] 296 | fn test_clone() { 297 | let worker1 = Caller::from(|input: u8| input + 1); 298 | let worker2 = worker1.clone(); 299 | let f = worker2.into_inner(); 300 | assert_eq!(f(5), 6); 301 | } 302 | 303 | #[allow(clippy::type_complexity)] 304 | fn try_caller() -> RetryCaller< 305 | (bool, u8), 306 | u8, 307 | String, 308 | impl FnMut((bool, u8), &Context<(bool, u8)>) -> Result> 309 | + Clone 310 | + 'static, 311 | > { 312 | RetryCaller::from(|input: (bool, u8), _: &Context<(bool, u8)>| { 313 | if input.0 { 314 | Ok(input.1 + 1) 315 | } else { 316 | Err(ApplyError::Fatal { 317 | input: Some(input), 318 | error: "failure".into(), 319 | }) 320 | } 321 | }) 322 | } 323 | 324 | #[test] 325 | fn test_try_call_ok() { 326 | let mut worker = try_caller(); 327 | assert!(matches!(worker.apply((true, 5), &Context::empty()), Ok(6))); 328 | } 329 | 330 | #[test] 331 | fn test_clone_retry_caller() { 332 | let worker1 = try_caller(); 333 | let worker2 = worker1.clone(); 334 | let mut f = worker2.into_inner(); 335 | assert!(matches!(f((true, 5), &Context::empty()), Ok(6))); 336 | } 337 | 338 | #[test] 339 | fn test_try_call_fail() { 340 | let mut worker = try_caller(); 341 | let result = worker.apply((false, 5), &Context::empty()); 342 | let _error = String::from("failure"); 343 | assert!(matches!( 344 | result, 345 | Err(ApplyError::Fatal { 346 | input: Some((false, 5)), 347 | error: _error 348 | }) 349 | )); 350 | } 351 | 352 | #[allow(clippy::type_complexity)] 353 | fn once_caller() -> OnceCaller< 354 | (bool, u8), 355 | u8, 356 | String, 357 | impl FnMut((bool, u8)) -> Result + Clone + 'static, 358 | > { 359 | OnceCaller::from(|input: (bool, u8)| { 360 | if input.0 { 361 | Ok(input.1 + 1) 362 | } else { 363 | Err("failure".into()) 364 | } 365 | }) 366 | } 367 | 368 | #[test] 369 | fn test_once_call_ok() { 370 | let mut worker = once_caller(); 371 | assert!(matches!(worker.apply((true, 5), &Context::empty()), Ok(6))); 372 | } 373 | 374 | #[test] 375 | fn test_clone_once_caller() { 376 | let worker1 = once_caller(); 377 | let worker2 = worker1.clone(); 378 | let mut f = worker2.into_inner(); 379 | assert!(matches!(f((true, 5)), Ok(6))); 380 | } 381 | 382 | #[test] 383 | fn test_once_call_fail() { 384 | let mut worker = once_caller(); 385 | let result = worker.apply((false, 5), &Context::empty()); 386 | let _error = String::from("failure"); 387 | assert!(matches!( 388 | result, 389 | Err(ApplyError::Fatal { 390 | input: None, 391 | error: _error 392 | }) 393 | )); 394 | } 395 | 396 | #[allow(clippy::type_complexity)] 397 | fn ref_caller() -> RefCaller< 398 | (bool, u8), 399 | u8, 400 | String, 401 | impl FnMut(&(bool, u8)) -> Result + Clone + 'static, 402 | > { 403 | RefCaller::from(|input: &(bool, u8)| { 404 | if input.0 { 405 | Ok(input.1 + 1) 406 | } else { 407 | Err("failure".into()) 408 | } 409 | }) 410 | } 411 | 412 | #[test] 413 | fn test_ref_call_ok() { 414 | let mut worker = ref_caller(); 415 | assert!(matches!(worker.apply((true, 5), &Context::empty()), Ok(6))); 416 | } 417 | 418 | #[test] 419 | fn test_clone_ref_caller() { 420 | let worker1 = ref_caller(); 421 | let worker2 = worker1.clone(); 422 | let mut f = worker2.into_inner(); 423 | assert!(matches!(f(&(true, 5)), Ok(6))); 424 | } 425 | 426 | #[test] 427 | fn test_ref_call_fail() { 428 | let mut worker = ref_caller(); 429 | let result = worker.apply((false, 5), &Context::empty()); 430 | let _error = String::from("failure"); 431 | assert!(matches!( 432 | result, 433 | Err(ApplyError::Fatal { 434 | input: Some((false, 5)), 435 | error: _error 436 | }) 437 | )); 438 | } 439 | } 440 | -------------------------------------------------------------------------------- /src/bee/stock/echo.rs: -------------------------------------------------------------------------------- 1 | use crate::bee::{Context, Worker, WorkerResult}; 2 | use derive_more::Debug; 3 | use std::marker::PhantomData; 4 | use std::{any, fmt}; 5 | 6 | /// A `Worker` that simply returns the input. 7 | #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] 8 | #[debug("EchoWorker<{}>", any::type_name::())] 9 | pub struct EchoWorker(PhantomData); 10 | 11 | impl Worker for EchoWorker { 12 | type Input = T; 13 | type Output = T; 14 | type Error = (); 15 | 16 | #[inline] 17 | fn apply(&mut self, input: Self::Input, _: &Context) -> WorkerResult { 18 | Ok(input) 19 | } 20 | } 21 | 22 | #[cfg(test)] 23 | #[cfg_attr(coverage_nightly, coverage(off))] 24 | mod tests { 25 | use super::*; 26 | use crate::bee::Context; 27 | 28 | #[test] 29 | fn test_echo() { 30 | let mut echo = EchoWorker::::default(); 31 | assert_eq!(1, echo.apply(1, &Context::empty()).unwrap()); 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /src/bee/stock/mod.rs: -------------------------------------------------------------------------------- 1 | mod call; 2 | mod echo; 3 | mod thunk; 4 | 5 | pub use call::{Caller, OnceCaller, RefCaller, RetryCaller}; 6 | pub use echo::EchoWorker; 7 | pub use thunk::{FunkWorker, PunkWorker, Thunk, ThunkWorker}; 8 | -------------------------------------------------------------------------------- /src/bee/stock/thunk.rs: -------------------------------------------------------------------------------- 1 | use crate::bee::{ApplyError, Context, Worker, WorkerResult}; 2 | use crate::boxed::BoxedFnOnce; 3 | use crate::panic::Panic; 4 | use derive_more::Debug; 5 | use std::marker::PhantomData; 6 | use std::{any, fmt}; 7 | 8 | /// A `Worker` that executes infallible `Thunk`s when applied. 9 | #[derive(Debug)] 10 | #[debug("ThunkWorker<{}>", any::type_name::())] 11 | pub struct ThunkWorker(PhantomData); 12 | 13 | impl Default for ThunkWorker { 14 | fn default() -> Self { 15 | Self(PhantomData) 16 | } 17 | } 18 | 19 | impl Clone for ThunkWorker { 20 | fn clone(&self) -> Self { 21 | Self::default() 22 | } 23 | } 24 | 25 | impl Worker for ThunkWorker { 26 | type Input = Thunk; 27 | type Output = T; 28 | type Error = (); 29 | 30 | #[inline] 31 | fn apply(&mut self, f: Self::Input, _: &Context) -> WorkerResult { 32 | Ok(f.0.call_box()) 33 | } 34 | } 35 | 36 | /// A `Worker` that executes fallible `Thunk>`s when applied. 37 | #[derive(Debug)] 38 | #[debug("FunkWorker<{}, {}>", any::type_name::(), any::type_name::())] 39 | pub struct FunkWorker(PhantomData, PhantomData); 40 | 41 | impl Default for FunkWorker { 42 | fn default() -> Self { 43 | Self(PhantomData, PhantomData) 44 | } 45 | } 46 | 47 | impl Clone for FunkWorker { 48 | fn clone(&self) -> Self { 49 | Self::default() 50 | } 51 | } 52 | 53 | impl Worker for FunkWorker 54 | where 55 | T: Send + fmt::Debug + 'static, 56 | E: Send + fmt::Debug + 'static, 57 | { 58 | type Input = Thunk>; 59 | type Output = T; 60 | type Error = E; 61 | 62 | #[inline] 63 | fn apply(&mut self, f: Self::Input, _: &Context) -> WorkerResult { 64 | f.0.call_box() 65 | .map_err(|error| ApplyError::Fatal { error, input: None }) 66 | } 67 | } 68 | 69 | /// A `Worker` that executes `Thunk`s that may panic. A panic is caught and returned as an 70 | /// `ApplyError::Panic` error. 71 | #[derive(Debug)] 72 | #[debug("PunkWorker<{}>", any::type_name::())] 73 | pub struct PunkWorker(PhantomData); 74 | 75 | impl Default for PunkWorker { 76 | fn default() -> Self { 77 | Self(PhantomData) 78 | } 79 | } 80 | 81 | impl Clone for PunkWorker { 82 | fn clone(&self) -> Self { 83 | Self::default() 84 | } 85 | } 86 | 87 | impl Worker for PunkWorker { 88 | type Input = Thunk; 89 | type Output = T; 90 | type Error = (); 91 | 92 | fn apply(&mut self, f: Self::Input, _: &Context) -> WorkerResult { 93 | Panic::try_call_boxed(None, f.0).map_err(|payload| ApplyError::Panic { 94 | input: None, 95 | payload, 96 | }) 97 | } 98 | } 99 | 100 | /// A wrapper around a closure that can be executed exactly once by a worker in a `Hive`. 101 | #[derive(Debug)] 102 | #[debug("Thunk<{}>", any::type_name::())] 103 | pub struct Thunk(Box + Send>); 104 | 105 | impl T + Send + 'static> From for Thunk { 106 | fn from(f: F) -> Self { 107 | Self(Box::new(f)) 108 | } 109 | } 110 | 111 | impl Thunk> { 112 | pub fn fallible Result + Send + 'static>(f: F) -> Self { 113 | Self(Box::new(f)) 114 | } 115 | } 116 | 117 | #[cfg(test)] 118 | #[cfg_attr(coverage_nightly, coverage(off))] 119 | mod tests { 120 | use super::*; 121 | use crate::bee::Context; 122 | 123 | #[test] 124 | fn test_thunk() { 125 | let mut worker = ThunkWorker::::default(); 126 | let thunk = Thunk::from(|| 5); 127 | assert_eq!(5, worker.apply(thunk, &Context::empty()).unwrap()); 128 | } 129 | 130 | #[test] 131 | fn test_funk_ok() { 132 | let mut worker = FunkWorker::::default(); 133 | let funk = Thunk::fallible(|| Ok(1)); 134 | assert_eq!(1, worker.apply(funk, &Context::empty()).unwrap()) 135 | } 136 | 137 | #[test] 138 | fn test_funk_error() { 139 | let mut worker = FunkWorker::::default(); 140 | let funk = Thunk::fallible(|| Err("failure".into())); 141 | let result = worker.apply(funk, &Context::empty()); 142 | let _error = String::from("failure"); 143 | assert!(matches!( 144 | result, 145 | Err(ApplyError::Fatal { 146 | input: None, 147 | error: _error 148 | }) 149 | )); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/bee/worker.rs: -------------------------------------------------------------------------------- 1 | //! Worker bee traits. 2 | use super::{ApplyError, ApplyRefError, Context}; 3 | use crate::panic::Panic; 4 | use std::fmt::Debug; 5 | 6 | /// Alias for an `ApplyError` whose `I`nput and `E`rror paramters are taken from `W::Input` and 7 | /// `W::Error` respectively. 8 | pub type WorkerError = ApplyError<::Input, ::Error>; 9 | pub type WorkerResult = Result<::Output, WorkerError>; 10 | 11 | /// A trait for stateful, fallible, idempotent functions. 12 | pub trait Worker: Debug + Sized + 'static { 13 | /// The type of the input to this function. 14 | type Input: Send; 15 | /// The type of the output from this function. 16 | type Output: Send; 17 | /// The type of error produced by this function. 18 | type Error: Send + Debug; 19 | 20 | /// Applies this `Worker`'s function to the given input of type `Self::Input` and returns a 21 | /// `Result` containing the output of type `Self::Output` or an [`ApplyError`] that indicates 22 | /// whether the task can be retried. 23 | /// 24 | /// The [`Context`] parameter provides additional context for the task, including: 25 | /// * task_id: the ID of the task within the [`Hive`](crate::hive::Hive). This value is used 26 | /// for ordering results. 27 | /// * attempt: the retry attempt number. The attempt value is `0` the first time the task is 28 | /// attempted and increases by `1` for each subsequent retry attempt. (Note: retrying is only 29 | /// supported when the `retry` feature is enabled.) 30 | /// * cancelled: whether the task has been cancelled and should exit early with an 31 | /// [`ApplyError::Cancelled`] result. 32 | /// 33 | /// This method should not panic. If it may panic, then [`Panic::try_call`] should be used to 34 | /// catch the panic and turn it into an [`ApplyError::Panic`] error. 35 | fn apply(&mut self, _: Self::Input, _: &Context) -> WorkerResult; 36 | 37 | /// Applies this `Worker`'s function sequentially to an iterator of inputs and returns a 38 | /// iterator over the outputs. 39 | fn map( 40 | &mut self, 41 | inputs: impl IntoIterator, 42 | ) -> impl Iterator> { 43 | let ctx = Context::empty(); 44 | inputs.into_iter().map(move |input| { 45 | self.apply(input, &ctx).map_err(|error| match error { 46 | ApplyError::Retryable { error, .. } => error, 47 | ApplyError::Fatal { error, .. } => error, 48 | _ => panic!("unexpected error"), 49 | }) 50 | }) 51 | } 52 | } 53 | 54 | /// Alias for an `ApplyRefError` whose `I`nput and `E`rror paramters are taken from `W::Input` and 55 | /// `W::Error` respectively. 56 | pub type RefWorkerError = ApplyRefError<::Error>; 57 | pub type RefWorkerResult = Result<::Output, RefWorkerError>; 58 | 59 | /// A trait for stateful, fallible, idempotent functions that take a reference to their input. 60 | pub trait RefWorker: Debug + Sized + 'static { 61 | /// The type of the input to this funciton. 62 | type Input: Send; 63 | /// The type of the output from this function. 64 | type Output: Send; 65 | /// The type of error produced by this function. 66 | type Error: Send + Debug; 67 | 68 | fn apply_ref(&mut self, _: &Self::Input, _: &Context) -> RefWorkerResult; 69 | } 70 | 71 | /// Blanket implementation of `Worker` for `RefWorker` that calls `apply_ref` and catches any 72 | /// panic. This enables the `input` to be preserved on panic, whereas it is lost when implementing 73 | /// `Worker` directly (without manual panic handling). 74 | impl> Worker for T 75 | where 76 | I: Send, 77 | O: Send, 78 | E: Send + Debug, 79 | { 80 | type Input = I; 81 | type Output = O; 82 | type Error = E; 83 | 84 | fn apply(&mut self, input: Self::Input, ctx: &Context) -> WorkerResult { 85 | match Panic::try_call(None, || self.apply_ref(&input, ctx)) { 86 | Ok(Ok(output)) => Ok(output), 87 | Ok(Err(error)) => Err(error.into_apply_error(input)), 88 | Err(payload) => Err(ApplyError::Panic { 89 | input: Some(input), 90 | payload, 91 | }), 92 | } 93 | } 94 | } 95 | 96 | #[cfg(test)] 97 | #[cfg_attr(coverage_nightly, coverage(off))] 98 | mod tests { 99 | use super::{ApplyRefError, RefWorker, RefWorkerResult, Worker, WorkerResult}; 100 | use crate::bee::{ApplyError, Context}; 101 | 102 | #[derive(Debug)] 103 | struct MyWorker; 104 | 105 | impl Worker for MyWorker { 106 | type Input = u8; 107 | type Output = u8; 108 | type Error = (); 109 | 110 | fn apply(&mut self, input: Self::Input, _: &Context) -> WorkerResult { 111 | Ok(input + 1) 112 | } 113 | } 114 | 115 | #[test] 116 | fn test_map() { 117 | let mut worker = MyWorker; 118 | assert_eq!( 119 | 55u8, 120 | worker 121 | .map(0..10) 122 | .collect::, _>>() 123 | .unwrap() 124 | .into_iter() 125 | .sum() 126 | ); 127 | } 128 | 129 | #[derive(Debug)] 130 | struct MyRefWorker; 131 | 132 | impl RefWorker for MyRefWorker { 133 | type Input = u8; 134 | type Output = u8; 135 | type Error = (); 136 | 137 | fn apply_ref( 138 | &mut self, 139 | input: &Self::Input, 140 | _: &Context, 141 | ) -> RefWorkerResult { 142 | match *input { 143 | 0 => Err(ApplyRefError::Retryable(())), 144 | 1 => Err(ApplyRefError::Fatal(())), 145 | 2 => Err(ApplyRefError::Cancelled), 146 | i => Ok(i + 1), 147 | } 148 | } 149 | } 150 | 151 | #[test] 152 | fn test_apply() { 153 | let mut worker = MyRefWorker; 154 | let ctx = Context::empty(); 155 | assert!(matches!(worker.apply(5, &ctx), Ok(6))); 156 | } 157 | 158 | #[test] 159 | fn test_apply_fail() { 160 | let mut worker = MyRefWorker; 161 | let ctx = Context::empty(); 162 | assert!(matches!( 163 | worker.apply(0, &ctx), 164 | Err(ApplyError::Retryable { input: 0, .. }) 165 | )); 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /src/boxed.rs: -------------------------------------------------------------------------------- 1 | /// Trait implemented by a `Box`ed callable that can be executed once. 2 | pub trait BoxedFnOnce { 3 | /// The type returned by the callable when called. 4 | type Output; 5 | 6 | /// Calls the boxed callable and returns the result. 7 | fn call_box(self: Box) -> Self::Output; 8 | } 9 | 10 | /// Blanket implementation of `BoxedFnOnce` for `FnOnce`. 11 | impl T> BoxedFnOnce for F { 12 | type Output = T; 13 | 14 | #[inline] 15 | fn call_box(self: Box) -> Self::Output { 16 | (*self)() 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/channel.rs: -------------------------------------------------------------------------------- 1 | //! Support for various channel implementations. 2 | //! 3 | //! A maximum one of the channel feature may be enabled. If no channel feature is enabled, then 4 | //! `std::sync::mpsc` will be used. 5 | use derive_more::Debug; 6 | pub use prelude::channel; 7 | pub(crate) use prelude::*; 8 | use std::any; 9 | 10 | /// Possible results of calling `ReceiverExt::try_recv_msg()` on a `Receiver`. 11 | #[derive(Debug)] 12 | pub enum Message { 13 | /// A message was successfully received from the channel. 14 | #[debug("Received: {}", any::type_name::())] 15 | Received(T), 16 | /// The channel was disconnected. 17 | ChannelDisconnected, 18 | /// The channel had no messages to receive. 19 | ChannelEmpty, 20 | } 21 | 22 | pub trait SenderExt { 23 | /// Attempts to send a message to the channel. Returns `None` if the send was successful, or 24 | /// `Some(t)` if the send was not successful due to the channel being disconnected. 25 | fn try_send_msg(&self, msg: T) -> Option; 26 | } 27 | 28 | /// Trait implemented for all channel `Receiver` types that standardizes non-blocking `recv()`. 29 | pub trait ReceiverExt { 30 | /// Attempts to receive a message from the channel. Returns `Message::Received` if a message 31 | /// was successfully received, otherwise one of `Message`'s error variants. 32 | fn try_recv_msg(&self) -> Message; 33 | } 34 | 35 | #[cfg(not(any(feature = "crossbeam", feature = "flume", feature = "loole")))] 36 | pub mod prelude { 37 | pub use std::sync::mpsc::{Receiver, SendError, Sender, channel}; 38 | 39 | use super::{Message, ReceiverExt, SenderExt}; 40 | use std::sync::mpsc::TryRecvError; 41 | 42 | impl SenderExt for Sender { 43 | fn try_send_msg(&self, t: T) -> Option { 44 | match self.send(t) { 45 | Ok(_) => None, 46 | Err(SendError(t)) => Some(t), 47 | } 48 | } 49 | } 50 | 51 | impl ReceiverExt for Receiver { 52 | fn try_recv_msg(&self) -> super::Message { 53 | match self.try_recv() { 54 | Ok(t) => Message::Received(t), 55 | Err(TryRecvError::Empty) => Message::ChannelEmpty, 56 | Err(TryRecvError::Disconnected) => Message::ChannelDisconnected, 57 | } 58 | } 59 | } 60 | } 61 | 62 | #[cfg(all(feature = "crossbeam", not(any(feature = "flume", feature = "loole"))))] 63 | pub mod prelude { 64 | pub use crossbeam_channel::{Receiver, SendError, Sender, unbounded as channel}; 65 | 66 | use super::{Message, ReceiverExt, SenderExt}; 67 | use crossbeam_channel::TryRecvError; 68 | 69 | impl SenderExt for Sender { 70 | fn try_send_msg(&self, t: T) -> Option { 71 | match self.send(t) { 72 | Ok(_) => None, 73 | Err(SendError(t)) => Some(t), 74 | } 75 | } 76 | } 77 | 78 | impl ReceiverExt for Receiver { 79 | fn try_recv_msg(&self) -> super::Message { 80 | match self.try_recv() { 81 | Ok(t) => Message::Received(t), 82 | Err(TryRecvError::Empty) => Message::ChannelEmpty, 83 | Err(TryRecvError::Disconnected) => Message::ChannelDisconnected, 84 | } 85 | } 86 | } 87 | } 88 | 89 | #[cfg(all(feature = "flume", not(any(feature = "crossbeam", feature = "loole"))))] 90 | pub mod prelude { 91 | pub use flume::{Receiver, SendError, Sender, unbounded as channel}; 92 | 93 | use super::{Message, ReceiverExt, SenderExt}; 94 | use flume::TryRecvError; 95 | 96 | impl SenderExt for Sender { 97 | fn try_send_msg(&self, t: T) -> Option { 98 | match self.send(t) { 99 | Ok(_) => None, 100 | Err(SendError(t)) => Some(t), 101 | } 102 | } 103 | } 104 | 105 | impl ReceiverExt for Receiver { 106 | fn try_recv_msg(&self) -> super::Message { 107 | match self.try_recv() { 108 | Ok(t) => Message::Received(t), 109 | Err(TryRecvError::Empty) => Message::ChannelEmpty, 110 | Err(TryRecvError::Disconnected) => Message::ChannelDisconnected, 111 | } 112 | } 113 | } 114 | } 115 | 116 | #[cfg(all(feature = "loole", not(any(feature = "crossbeam", feature = "flume"))))] 117 | pub mod prelude { 118 | pub use loole::{Receiver, SendError, Sender, unbounded as channel}; 119 | 120 | use super::{Message, ReceiverExt, SenderExt}; 121 | use loole::TryRecvError; 122 | 123 | impl SenderExt for Sender { 124 | fn try_send_msg(&self, t: T) -> Option { 125 | match self.send(t) { 126 | Ok(_) => None, 127 | Err(SendError(t)) => Some(t), 128 | } 129 | } 130 | } 131 | 132 | impl ReceiverExt for Receiver { 133 | fn try_recv_msg(&self) -> super::Message { 134 | match self.try_recv() { 135 | Ok(t) => Message::Received(t), 136 | Err(TryRecvError::Empty) => Message::ChannelEmpty, 137 | Err(TryRecvError::Disconnected) => Message::ChannelDisconnected, 138 | } 139 | } 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/hive/builder/bee.rs: -------------------------------------------------------------------------------- 1 | use super::{BuilderConfig, FullBuilder, Token}; 2 | use crate::bee::{CloneQueen, DefaultQueen, Queen, QueenCell, QueenMut, Worker}; 3 | use crate::hive::{ChannelTaskQueues, Config, TaskQueues, WorkstealingTaskQueues}; 4 | use derive_more::Debug; 5 | use std::any; 6 | 7 | /// A Builder for creating `Hive` instances for specific [`Worker`] and [`TaskQueues`] types. 8 | #[derive(Clone, Default, Debug)] 9 | pub struct BeeBuilder { 10 | config: Config, 11 | #[debug("{}",any::type_name::())] 12 | queen: Q, 13 | } 14 | 15 | impl BeeBuilder { 16 | /// Creates a new `BeeBuilder` with the given queen and no options configured. 17 | pub fn empty(queen: Q) -> Self { 18 | Self { 19 | config: Config::empty(), 20 | queen, 21 | } 22 | } 23 | 24 | /// Creates a new `BeeBuilder` with the given `queen` and options configured with global 25 | /// preset values. 26 | pub fn preset(queen: Q) -> Self { 27 | Self { 28 | config: Config::default(), 29 | queen, 30 | } 31 | } 32 | 33 | /// Creates a new `BeeBuilder` from an existing `config` and a `queen`. 34 | pub(super) fn from_config_and_queen(config: Config, queen: Q) -> Self { 35 | Self { config, queen } 36 | } 37 | 38 | /// Creates a new `FullBuilder` with the current configuration and queen and specified 39 | /// `TaskQueues` type. 40 | pub fn with_queues>(self) -> FullBuilder { 41 | FullBuilder::from_config_and_queen(self.config, self.queen) 42 | } 43 | 44 | /// Creates a new `FullBuilder` with the current configuration and queen and channel-based 45 | /// task queues. 46 | pub fn with_channel_queues(self) -> FullBuilder> { 47 | FullBuilder::from_config_and_queen(self.config, self.queen) 48 | } 49 | 50 | /// Creates a new `FullBuilder` with the current configuration and queen and workstealing 51 | /// task queues. 52 | pub fn with_workstealing_queues(self) -> FullBuilder> { 53 | FullBuilder::from_config_and_queen(self.config, self.queen) 54 | } 55 | } 56 | 57 | impl BeeBuilder { 58 | /// Creates a new `BeeBuilder` with a queen created with 59 | /// [`Q::default()`](std::default::Default) and no options configured. 60 | pub fn empty_with_queen_default() -> Self { 61 | Self { 62 | config: Config::empty(), 63 | queen: Q::default(), 64 | } 65 | } 66 | 67 | /// Creates a new `BeeBuilder` with a queen created with 68 | /// [`Q::default()`](std::default::Default) and options configured with global defaults. 69 | pub fn preset_with_queen_default() -> Self { 70 | Self { 71 | config: Config::default(), 72 | queen: Q::default(), 73 | } 74 | } 75 | } 76 | 77 | impl BeeBuilder> { 78 | /// Creates a new `BeeBuilder` with a queen created with 79 | /// [`Q::default()`](std::default::Default) and no options configured. 80 | pub fn empty_with_queen_mut_default() -> Self { 81 | Self { 82 | config: Config::empty(), 83 | queen: QueenCell::new(Q::default()), 84 | } 85 | } 86 | 87 | /// Creates a new `BeeBuilder` with a queen created with 88 | /// [`Q::default()`](std::default::Default) and options configured with global defaults. 89 | pub fn preset_with_queen_mut_default() -> Self { 90 | Self { 91 | config: Config::default(), 92 | queen: QueenCell::new(Q::default()), 93 | } 94 | } 95 | } 96 | 97 | impl BeeBuilder> { 98 | /// Creates a new `BeeBuilder` with a `CloneQueen` created with the given `worker` and no 99 | /// options configured. 100 | pub fn empty_with_worker(worker: W) -> Self { 101 | Self { 102 | config: Config::empty(), 103 | queen: CloneQueen::new(worker), 104 | } 105 | } 106 | 107 | /// Creates a new `BeeBuilder` with a `CloneQueen` created with the given `worker` and 108 | /// and options configured with global defaults. 109 | pub fn preset_with_worker(worker: W) -> Self { 110 | Self { 111 | config: Config::default(), 112 | queen: CloneQueen::new(worker), 113 | } 114 | } 115 | } 116 | 117 | impl BeeBuilder> { 118 | /// Creates a new `BeeBuilder` with a `DefaultQueen` created with the given `Worker` type and 119 | /// no options configured. 120 | pub fn empty_with_worker_default() -> Self { 121 | Self { 122 | config: Config::empty(), 123 | queen: DefaultQueen::default(), 124 | } 125 | } 126 | 127 | /// Creates a new `BeeBuilder` with a `DefaultQueen` created with the given `Worker` type and 128 | /// and options configured with global defaults. 129 | pub fn preset_with_worker_default() -> Self { 130 | Self { 131 | config: Config::default(), 132 | queen: DefaultQueen::default(), 133 | } 134 | } 135 | } 136 | 137 | impl BuilderConfig for BeeBuilder { 138 | fn config_ref(&mut self, _: Token) -> &mut Config { 139 | &mut self.config 140 | } 141 | } 142 | 143 | impl From for BeeBuilder { 144 | fn from(value: Config) -> Self { 145 | Self::from_config_and_queen(value, Q::default()) 146 | } 147 | } 148 | 149 | impl From for BeeBuilder { 150 | fn from(value: Q) -> Self { 151 | Self::from_config_and_queen(Config::default(), value) 152 | } 153 | } 154 | 155 | #[cfg(test)] 156 | #[cfg_attr(coverage_nightly, coverage(off))] 157 | mod tests { 158 | use super::*; 159 | use crate::bee::stock::EchoWorker; 160 | use crate::bee::{CloneQueen, DefaultQueen, Queen, QueenCell, QueenMut}; 161 | use rstest::rstest; 162 | 163 | #[derive(Clone, Default)] 164 | struct TestQueen; 165 | 166 | impl Queen for TestQueen { 167 | type Kind = EchoWorker; 168 | 169 | fn create(&self) -> Self::Kind { 170 | EchoWorker::default() 171 | } 172 | } 173 | 174 | impl QueenMut for TestQueen { 175 | type Kind = EchoWorker; 176 | 177 | fn create(&mut self) -> Self::Kind { 178 | EchoWorker::default() 179 | } 180 | } 181 | 182 | #[rstest] 183 | fn test_queen( 184 | #[values( 185 | BeeBuilder::::empty, 186 | BeeBuilder::::preset 187 | )] 188 | factory: F, 189 | #[values( 190 | BeeBuilder::::with_channel_queues, 191 | BeeBuilder::::with_workstealing_queues, 192 | )] 193 | with_fn: W, 194 | ) where 195 | F: Fn(TestQueen) -> BeeBuilder, 196 | T: TaskQueues>, 197 | W: Fn(BeeBuilder) -> FullBuilder, 198 | { 199 | let bee_builder = factory(TestQueen); 200 | let full_builder = with_fn(bee_builder); 201 | let _hive = full_builder.build(); 202 | } 203 | 204 | #[rstest] 205 | fn test_queen_default( 206 | #[values( 207 | BeeBuilder::::empty_with_queen_default, 208 | BeeBuilder::::preset_with_queen_default 209 | )] 210 | factory: F, 211 | #[values( 212 | BeeBuilder::::with_channel_queues, 213 | BeeBuilder::::with_workstealing_queues, 214 | )] 215 | with_fn: W, 216 | ) where 217 | F: Fn() -> BeeBuilder, 218 | T: TaskQueues>, 219 | W: Fn(BeeBuilder) -> FullBuilder, 220 | { 221 | let bee_builder = factory(); 222 | let full_builder = with_fn(bee_builder); 223 | let _hive = full_builder.build(); 224 | } 225 | 226 | #[rstest] 227 | fn test_queen_mut_default( 228 | #[values( 229 | BeeBuilder::>::empty_with_queen_mut_default, 230 | BeeBuilder::>::preset_with_queen_mut_default 231 | )] 232 | factory: F, 233 | #[values( 234 | BeeBuilder::>::with_channel_queues, 235 | BeeBuilder::>::with_workstealing_queues, 236 | )] 237 | with_fn: W, 238 | ) where 239 | F: Fn() -> BeeBuilder>, 240 | T: TaskQueues>, 241 | W: Fn(BeeBuilder>) -> FullBuilder, T>, 242 | { 243 | let bee_builder = factory(); 244 | let full_builder = with_fn(bee_builder); 245 | let _hive = full_builder.build(); 246 | } 247 | 248 | #[rstest] 249 | fn test_worker( 250 | #[values( 251 | BeeBuilder::>>::empty_with_worker, 252 | BeeBuilder::>>::preset_with_worker 253 | )] 254 | factory: F, 255 | #[values( 256 | BeeBuilder::>>::with_channel_queues, 257 | BeeBuilder::>>::with_workstealing_queues, 258 | )] 259 | with_fn: W, 260 | ) where 261 | F: Fn(EchoWorker) -> BeeBuilder>>, 262 | T: TaskQueues>, 263 | W: Fn( 264 | BeeBuilder>>, 265 | ) -> FullBuilder>, T>, 266 | { 267 | let bee_builder = factory(EchoWorker::default()); 268 | let full_builder = with_fn(bee_builder); 269 | let _hive = full_builder.build(); 270 | } 271 | 272 | #[rstest] 273 | fn test_worker_default( 274 | #[values( 275 | BeeBuilder::>>::empty_with_worker_default, 276 | BeeBuilder::>>::preset_with_worker_default 277 | )] 278 | factory: F, 279 | #[values( 280 | BeeBuilder::>>::with_channel_queues, 281 | BeeBuilder::>>::with_workstealing_queues, 282 | )] 283 | with_fn: W, 284 | ) where 285 | F: Fn() -> BeeBuilder>>, 286 | T: TaskQueues>, 287 | W: Fn( 288 | BeeBuilder>>, 289 | ) -> FullBuilder>, T>, 290 | { 291 | let bee_builder = factory(); 292 | let full_builder = with_fn(bee_builder); 293 | let _hive = full_builder.build(); 294 | } 295 | } 296 | -------------------------------------------------------------------------------- /src/hive/builder/full.rs: -------------------------------------------------------------------------------- 1 | use super::{BuilderConfig, Token}; 2 | use crate::bee::Queen; 3 | use crate::hive::{Config, Hive, TaskQueues}; 4 | use derive_more::Debug; 5 | use std::any; 6 | use std::marker::PhantomData; 7 | 8 | /// A Builder for creating `Hive` instances for specific [`Queen`] and [`TaskQueues`] types. 9 | #[derive(Clone, Default, Debug)] 10 | pub struct FullBuilder> { 11 | config: Config, 12 | #[debug("{}", any::type_name::())] 13 | queen: Q, 14 | #[debug("{}", any::type_name::())] 15 | _queues: PhantomData, 16 | } 17 | 18 | impl> FullBuilder { 19 | /// Creates a new `FullBuilder` with the given queen and no options configured. 20 | pub fn empty(queen: Q) -> Self { 21 | Self { 22 | config: Config::empty(), 23 | queen, 24 | _queues: PhantomData, 25 | } 26 | } 27 | 28 | /// Creates a new `FullBuilder` with the given `queen` and options configured with global 29 | /// defaults. 30 | pub fn preset(queen: Q) -> Self { 31 | Self { 32 | config: Config::default(), 33 | queen, 34 | _queues: PhantomData, 35 | } 36 | } 37 | 38 | /// Creates a new `FullBuilder` from an existing `config` and a `queen`. 39 | pub(super) fn from_config_and_queen(config: Config, queen: Q) -> Self { 40 | Self { 41 | config, 42 | queen, 43 | _queues: PhantomData, 44 | } 45 | } 46 | 47 | /// Consumes this `Builder` and returns a new [`Hive`]. 48 | pub fn build(self) -> Hive { 49 | Hive::new(self.config, self.queen) 50 | } 51 | } 52 | 53 | impl> From for FullBuilder { 54 | fn from(value: Config) -> Self { 55 | Self::from_config_and_queen(value, Q::default()) 56 | } 57 | } 58 | 59 | impl> From for FullBuilder { 60 | fn from(value: Q) -> Self { 61 | Self::from_config_and_queen(Config::default(), value) 62 | } 63 | } 64 | 65 | impl> BuilderConfig for FullBuilder { 66 | fn config_ref(&mut self, _: Token) -> &mut Config { 67 | &mut self.config 68 | } 69 | } 70 | 71 | #[cfg(test)] 72 | #[cfg_attr(coverage_nightly, coverage(off))] 73 | mod tests { 74 | use super::*; 75 | use crate::bee::Queen; 76 | use crate::bee::stock::EchoWorker; 77 | use crate::hive::{ChannelTaskQueues, WorkstealingTaskQueues}; 78 | use rstest::rstest; 79 | 80 | #[derive(Clone, Default)] 81 | struct TestQueen; 82 | 83 | impl Queen for TestQueen { 84 | type Kind = EchoWorker; 85 | 86 | fn create(&self) -> Self::Kind { 87 | EchoWorker::default() 88 | } 89 | } 90 | 91 | #[rstest] 92 | fn test_channel( 93 | #[values( 94 | FullBuilder::>>::empty, 95 | FullBuilder::>>::preset 96 | )] 97 | factory: F, 98 | ) where 99 | F: Fn(TestQueen) -> FullBuilder>>, 100 | { 101 | let builder = factory(TestQueen); 102 | let _hive = builder.build(); 103 | } 104 | 105 | #[rstest] 106 | fn test_workstealing( 107 | #[values( 108 | FullBuilder::>>::empty, 109 | FullBuilder::>>::preset 110 | )] 111 | factory: F, 112 | ) where 113 | F: Fn(TestQueen) -> FullBuilder>>, 114 | { 115 | let builder = factory(TestQueen); 116 | let _hive = builder.build(); 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/hive/builder/mod.rs: -------------------------------------------------------------------------------- 1 | //! There are a few different builder types. 2 | //! 3 | //! * Open: has no type parameters; can only set config parameters. Has methods to create 4 | //! typed builders. 5 | //! * Bee-typed: has type parameters for the `Worker` and `Queen` types. 6 | //! * Queue-typed: builder instances that are specific to the `TaskQueues` type. 7 | //! * Fully-typed: builder that has type parameters for the `Worker`, `Queen`, and `TaskQueues` 8 | //! types. This is the only builder with a `build` method to create a `Hive`. 9 | //! 10 | //! All builders implement the `Builder` trait, which provides methods to set configuration 11 | //! parameters. The configuration options available: 12 | //! * [`Builder::num_threads`]: number of worker threads that will be spawned by the built `Hive`. 13 | //! * [`Builder::with_default_num_threads`] will set `num_threads` to the global default value. 14 | //! * [`Builder::with_thread_per_core`] will set `num_threads` to the number of available CPU 15 | //! cores. 16 | //! * [`Builder::thread_name`]: thread name for each of the threads spawned by the built `Hive`. By 17 | //! default, threads are unnamed. 18 | //! * [`Builder::thread_stack_size`]: stack size (in bytes) for each of the threads spawned by the 19 | //! built `Hive`. See the 20 | //! [`std::thread`](https://doc.rust-lang.org/stable/std/thread/index.html#stack-size) 21 | //! documentation for details on the default stack size. 22 | //! 23 | //! The following configuration options are available when the `affinity` feature is enabled: 24 | //! * [`Builder::core_affinity`]: List of CPU core indices to which the threads should be pinned. 25 | //! * [`Builder::with_default_core_affinity`] will set the list to all CPU core indices, though 26 | //! only the first `num_threads` indices will be used. 27 | //! 28 | //! The following configuration options are available when the `local-batch` feature is enabled: 29 | //! * [`Builder::batch_limit`]: Maximum number of tasks that can queued by a worker. 30 | //! * [`Builder::weight_limit`]: Maximum "weight" of tasks that can be queued by a worker. 31 | //! * [`Builder::with_default_batch_limit`] and [`Builder::with_default_weight_limit`] set the 32 | //! local-batch options to the global defaults, while [`Builder::with_no_local_batching`] 33 | //! disables local-batching. 34 | //! 35 | //! The following configuration options are available when the `retry` feature is enabled: 36 | //! * [`Builder::max_retries`]: maximum number of times a `Worker` will retry an 37 | //! [`ApplyError::Retryable`](crate::bee::ApplyError#Retryable) before giving up. 38 | //! * [`Builder::retry_factor`]: [`Duration`](std::time::Duration) factor for exponential backoff 39 | //! when retrying an `ApplyError::Retryable` error. 40 | //! * [`Builder::with_default_max_retries`] and [`Builder::with_default_retry_factor`] set the 41 | //! retry options to the global defaults, while [`Builder::with_no_retries`] disables retrying. 42 | mod bee; 43 | mod full; 44 | mod open; 45 | mod queue; 46 | 47 | pub use bee::BeeBuilder; 48 | pub use full::FullBuilder; 49 | pub use open::OpenBuilder; 50 | pub use queue::TaskQueuesBuilder; 51 | pub use queue::channel::ChannelBuilder; 52 | pub use queue::workstealing::WorkstealingBuilder; 53 | 54 | use crate::hive::inner::{Builder, BuilderConfig, Token}; 55 | 56 | /// Creates a new `OpenBuilder`. If `with_defaults` is `true`, the builder will be pre-configured 57 | /// with the global defaults. 58 | pub fn open(with_defaults: bool) -> OpenBuilder { 59 | if with_defaults { 60 | OpenBuilder::default() 61 | } else { 62 | OpenBuilder::empty() 63 | } 64 | } 65 | 66 | /// Creates a new `ChannelBuilder`. If `with_defaults` is `true`, the builder will be 67 | /// pre-configured with the global defaults. 68 | pub fn channel(with_defaults: bool) -> ChannelBuilder { 69 | if with_defaults { 70 | ChannelBuilder::default() 71 | } else { 72 | ChannelBuilder::empty() 73 | } 74 | } 75 | /// Creates a new `WorkstealingBuilder`. If `with_defaults` is `true`, the builder will be 76 | /// pre-configured with the global defaults. 77 | pub fn workstealing(with_defaults: bool) -> WorkstealingBuilder { 78 | if with_defaults { 79 | WorkstealingBuilder::default() 80 | } else { 81 | WorkstealingBuilder::empty() 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | #[cfg_attr(coverage_nightly, coverage(off))] 87 | mod tests { 88 | use super::*; 89 | use crate::hive::Builder; 90 | use rstest::*; 91 | 92 | #[rstest] 93 | fn test_create B>( 94 | #[values(open, channel, workstealing)] builder_factory: F, 95 | #[values(true, false)] with_defaults: bool, 96 | ) { 97 | let mut builder = builder_factory(with_defaults) 98 | .num_threads(4) 99 | .thread_name("foo") 100 | .thread_stack_size(100); 101 | crate::hive::inner::builder_test_utils::check_builder(&mut builder); 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /src/hive/builder/queue.rs: -------------------------------------------------------------------------------- 1 | use super::{Builder, FullBuilder}; 2 | use crate::bee::{CloneQueen, DefaultQueen, Queen, QueenCell, QueenMut, Worker}; 3 | use crate::hive::TaskQueues; 4 | 5 | /// Trait implemented by builders specialized to a `TaskQueues` type. 6 | pub trait TaskQueuesBuilder: Builder + Clone + Default + Sized { 7 | /// The type of the `TaskQueues` to use when building the `Hive`. 8 | type TaskQueues: TaskQueues; 9 | 10 | /// Creates a new empty `Builder`. 11 | fn empty() -> Self; 12 | 13 | /// Consumes this `Builder` and returns a new [`FullBuilder`] using the given [`Queen`] to 14 | /// create [`Worker`]s. 15 | fn with_queen(self, queen: Q) -> FullBuilder>; 16 | 17 | /// Consumes this `Builder` and returns a new [`FullBuilder`] using a [`Queen`] created with 18 | /// [`Q::default()`](std::default::Default) to create [`Worker`]s. 19 | fn with_queen_default(self) -> FullBuilder> 20 | where 21 | Q: Queen + Default, 22 | { 23 | self.with_queen(Q::default()) 24 | } 25 | 26 | /// Consumes this `Builder` and returns a new [`FullBuilder`] using a [`QueenMut`] created with 27 | /// [`Q::default()`](std::default::Default) to create [`Worker`]s. 28 | fn with_queen_mut_default(self) -> FullBuilder, Self::TaskQueues> 29 | where 30 | Q: QueenMut + Default, 31 | { 32 | self.with_queen(QueenCell::new(Q::default())) 33 | } 34 | 35 | /// Consumes this `Builder` and returns a new [`FullBuilder`] with [`Worker`]s created by 36 | /// cloning `worker`. 37 | fn with_worker(self, worker: W) -> FullBuilder, Self::TaskQueues> 38 | where 39 | W: Worker + Send + Sync + Clone, 40 | { 41 | self.with_queen(CloneQueen::new(worker)) 42 | } 43 | 44 | /// Consumes this `Builder` and returns a new [`FullBuilder`] with [`Worker`]s created using 45 | /// [`W::default()`](std::default::Default). 46 | fn with_worker_default(self) -> FullBuilder, Self::TaskQueues> 47 | where 48 | W: Worker + Send + Sync + Default, 49 | { 50 | self.with_queen(DefaultQueen::default()) 51 | } 52 | } 53 | 54 | pub mod channel { 55 | use super::*; 56 | use crate::hive::builder::{BuilderConfig, Token}; 57 | use crate::hive::{ChannelTaskQueues, Config}; 58 | 59 | /// `TaskQueuesBuilder` implementation for channel-based task queues. 60 | #[derive(Clone, Default, Debug)] 61 | pub struct ChannelBuilder(Config); 62 | 63 | impl BuilderConfig for ChannelBuilder { 64 | fn config_ref(&mut self, _: Token) -> &mut Config { 65 | &mut self.0 66 | } 67 | } 68 | 69 | impl TaskQueuesBuilder for ChannelBuilder { 70 | type TaskQueues = ChannelTaskQueues; 71 | 72 | fn empty() -> Self { 73 | Self(Config::empty()) 74 | } 75 | 76 | /// Consumes this `Builder` and returns a new [`FullBuilder`] using the given [`Queen`] to 77 | /// create [`Worker`]s. 78 | fn with_queen(self, queen: Q) -> FullBuilder> { 79 | FullBuilder::from_config_and_queen(self.0, queen) 80 | } 81 | } 82 | 83 | impl From for ChannelBuilder { 84 | fn from(value: Config) -> Self { 85 | Self(value) 86 | } 87 | } 88 | } 89 | 90 | pub mod workstealing { 91 | use super::*; 92 | use crate::hive::builder::{BuilderConfig, Token}; 93 | use crate::hive::{Config, WorkstealingTaskQueues}; 94 | 95 | /// `TaskQueuesBuilder` implementation for workstealing-based task queues. 96 | #[derive(Clone, Default, Debug)] 97 | pub struct WorkstealingBuilder(Config); 98 | 99 | impl BuilderConfig for WorkstealingBuilder { 100 | fn config_ref(&mut self, _: Token) -> &mut Config { 101 | &mut self.0 102 | } 103 | } 104 | 105 | impl TaskQueuesBuilder for WorkstealingBuilder { 106 | type TaskQueues = WorkstealingTaskQueues; 107 | 108 | fn empty() -> Self { 109 | Self(Config::empty()) 110 | } 111 | 112 | /// Consumes this `Builder` and returns a new [`FullBuilder`] using the given [`Queen`] to 113 | /// create [`Worker`]s. 114 | fn with_queen(self, queen: Q) -> FullBuilder> { 115 | FullBuilder::from_config_and_queen(self.0, queen) 116 | } 117 | } 118 | 119 | impl From for WorkstealingBuilder { 120 | fn from(value: Config) -> Self { 121 | Self(value) 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/hive/context.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of `crate::bee::LocalContext` for a `Hive`. 2 | use crate::bee::{LocalContext, Queen, TaskId, Worker}; 3 | use crate::hive::{OutcomeSender, Shared, TaskQueues, WorkerQueues}; 4 | use std::fmt; 5 | use std::sync::Arc; 6 | 7 | pub struct HiveLocalContext<'a, W, Q, T> 8 | where 9 | W: Worker, 10 | Q: Queen, 11 | T: TaskQueues, 12 | { 13 | worker_queues: &'a T::WorkerQueues, 14 | shared: &'a Arc>, 15 | outcome_tx: Option<&'a OutcomeSender>, 16 | } 17 | 18 | impl<'a, W, Q, T> HiveLocalContext<'a, W, Q, T> 19 | where 20 | W: Worker, 21 | Q: Queen, 22 | T: TaskQueues, 23 | { 24 | /// Creates a new `HiveLocalContext` instance. 25 | pub fn new( 26 | worker_queues: &'a T::WorkerQueues, 27 | shared: &'a Arc>, 28 | outcome_tx: Option<&'a OutcomeSender>, 29 | ) -> Self { 30 | Self { 31 | worker_queues, 32 | shared, 33 | outcome_tx, 34 | } 35 | } 36 | } 37 | 38 | impl LocalContext for HiveLocalContext<'_, W, Q, T> 39 | where 40 | W: Worker, 41 | Q: Queen, 42 | T: TaskQueues, 43 | { 44 | fn should_cancel_tasks(&self) -> bool { 45 | self.shared.is_suspended() 46 | } 47 | 48 | fn submit_task(&self, input: W::Input) -> TaskId { 49 | let task = self.shared.prepare_task(input, self.outcome_tx); 50 | let task_id = task.id(); 51 | self.worker_queues.push(task); 52 | task_id 53 | } 54 | 55 | #[cfg(test)] 56 | fn thread_index(&self) -> usize { 57 | self.worker_queues.thread_index() 58 | } 59 | } 60 | 61 | impl fmt::Debug for HiveLocalContext<'_, W, Q, T> 62 | where 63 | W: Worker, 64 | Q: Queen, 65 | T: TaskQueues, 66 | { 67 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 68 | f.debug_struct("HiveLocalContext").finish() 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/hive/cores.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for pinning worker threads to CPU cores in a `Hive`. 2 | use core_affinity::{self, CoreId}; 3 | use parking_lot::{Mutex, MutexGuard}; 4 | use std::collections::HashSet; 5 | use std::ops::{BitOr, BitOrAssign, Sub, SubAssign}; 6 | use std::sync::LazyLock; 7 | 8 | /// The sequence of CPU core IDs. 9 | /// 10 | /// This sequence is established the first time an attempt is made to pin threads to CPU cores. The 11 | /// sequence of core IDs is assumed to remain stable throughout the life of the program, e.g., the 12 | /// core ID at index 0 of the sequence must always correspond to the same physical core. If this 13 | /// assumption is violated, it may result in sub-optimal performance (e.g., inability to pin a 14 | /// thread to a core or multiple threads pinned to the same core) but will not cause any panic or 15 | /// undefined behavior. 16 | /// 17 | /// If new cores become available during the life of the program, they are immediately available 18 | /// for worker thread scheduling, but they are *not* available for pinning until the 19 | /// `refresh()` function is called. 20 | pub static CORES: LazyLock = LazyLock::new(CoreIds::from_system); 21 | 22 | /// Global list of CPU core IDs. 23 | /// 24 | /// This is meant to be created at most once, when `CORES` is initialized. 25 | pub struct CoreIds(Mutex>); 26 | 27 | impl CoreIds { 28 | fn from_system() -> Self { 29 | Self::new( 30 | core_affinity::get_core_ids() 31 | .map(|core_ids| core_ids.into_iter().map(Core::from).collect()) 32 | .unwrap_or_default(), 33 | ) 34 | } 35 | 36 | fn new(core_ids: Vec) -> Self { 37 | Self(Mutex::new(core_ids)) 38 | } 39 | 40 | fn get(&self, index: usize) -> Option { 41 | self.0.lock().get(index).cloned() 42 | } 43 | 44 | fn update_from(&self, mut new_ids: HashSet) -> usize { 45 | let mut cur_ids = self.0.lock(); 46 | cur_ids.iter_mut().for_each(|core| { 47 | if new_ids.contains(&core.id) { 48 | core.available = true; 49 | new_ids.remove(&core.id); 50 | } else { 51 | core.available = false; 52 | } 53 | }); 54 | let num_new_ids = new_ids.len(); 55 | cur_ids.extend(new_ids.into_iter().map(Core::from)); 56 | num_new_ids 57 | } 58 | 59 | /// Updates `CORES` with the currently available CPU core IDs. The correspondence between the 60 | /// index in the sequence and the core ID is maintained for any core IDs already in the 61 | /// sequence. If a previously available core has become unavailable, its `available` flag is 62 | /// set to `false`. Any new cores are appended to the end of the sequence. Returns the number 63 | /// of new cores added to the sequence. 64 | pub fn refresh(&self) -> usize { 65 | let new_ids: HashSet<_> = core_affinity::get_core_ids() 66 | .map(|core_ids| core_ids.into_iter().collect()) 67 | .unwrap_or_default(); 68 | self.update_from(new_ids) 69 | } 70 | } 71 | 72 | /// A sequence of CPU core indices. An index in the range `0..num_cpus::get()` may be associated 73 | /// with a CPU core, while an index outside this range will never be associated with a CPU core. 74 | /// 75 | /// The mapping between CPU indices and core IDs is platform-specific, but the same index is 76 | /// guaranteed to always refer to the same physical core. 77 | #[derive(Debug, Default, Clone, PartialEq, Eq)] 78 | pub struct Cores(Vec); 79 | 80 | impl Cores { 81 | /// Returns a `Cores` set populated with the first `n` CPU indices (up to the number of 82 | /// available cores). 83 | pub fn first(n: usize) -> Self { 84 | Self(Vec::from_iter(0..n.min(num_cpus::get()))) 85 | } 86 | 87 | /// Returns a `Cores` with all CPU indices. 88 | pub fn all() -> Self { 89 | Self::from(0..num_cpus::get()) 90 | } 91 | 92 | /// Appends a new CPU core index to the end of the sequence. Returns `true` if the specified 93 | /// index did not previously exist in the set. 94 | pub fn append(&mut self, index: usize) -> bool { 95 | if !self.0.contains(&index) { 96 | self.0.push(index); 97 | true 98 | } else { 99 | false 100 | } 101 | } 102 | 103 | /// Appends to the end of this sequence all the CPU core indices in `other` that are not 104 | /// already present. Returns the number of new core indices added to the sequence. 105 | pub fn union(&mut self, other: &Self) -> usize { 106 | (*other.0) 107 | .iter() 108 | .filter_map(|index| self.append(*index).then_some(())) 109 | .count() 110 | } 111 | 112 | /// Returns the `Core` associated with the specified index if the index exists and the core 113 | /// is available, otherwise returns `None`. 114 | pub fn get(&self, index: usize) -> Option { 115 | self.0 116 | .get(index) 117 | .and_then(|&index| CORES.get(index)) 118 | .filter(|core| core.available) 119 | } 120 | 121 | /// Returns an iterator over `(core_index, Option)`, where `Some(core)` can be used to 122 | /// set the core affinity of the current thread. The `core` will be `None` for cores that are 123 | /// not currently available. 124 | pub fn iter(&self) -> impl Iterator)> { 125 | CoreIter::new(self.0.iter().cloned()) 126 | } 127 | } 128 | 129 | impl BitOr for Cores { 130 | type Output = Self; 131 | 132 | fn bitor(mut self, rhs: Self) -> Self::Output { 133 | self |= rhs; 134 | self 135 | } 136 | } 137 | 138 | impl BitOrAssign for Cores { 139 | fn bitor_assign(&mut self, rhs: Self) { 140 | let mut rhs_indexes = rhs.0; 141 | rhs_indexes.retain(|index| !self.0.contains(index)); 142 | self.0.extend(rhs_indexes); 143 | } 144 | } 145 | 146 | impl Sub for Cores { 147 | type Output = Self; 148 | 149 | fn sub(mut self, rhs: Self) -> Self::Output { 150 | self -= rhs; 151 | self 152 | } 153 | } 154 | 155 | impl SubAssign for Cores { 156 | fn sub_assign(&mut self, rhs: Self) { 157 | self.0.retain(|i| !rhs.0.contains(i)); 158 | } 159 | } 160 | 161 | impl FromIterator for Cores { 162 | fn from_iter>(iter: T) -> Self { 163 | Self(Vec::from_iter(iter)) 164 | } 165 | } 166 | 167 | impl> From for Cores { 168 | fn from(value: I) -> Self { 169 | Self(Vec::from_iter(value)) 170 | } 171 | } 172 | 173 | /// Iterator over core (index, id) tuples. This itertor holds the `MutexGuard` for the shared 174 | /// global `CoreIds`, so only one thread can iterate at a time. 175 | pub struct CoreIter<'a, I: Iterator> { 176 | index_iter: I, 177 | cores: MutexGuard<'a, Vec>, 178 | } 179 | 180 | impl> CoreIter<'_, I> { 181 | fn new(index_iter: I) -> Self { 182 | Self { 183 | index_iter, 184 | cores: CORES.0.lock(), 185 | } 186 | } 187 | } 188 | 189 | impl> Iterator for CoreIter<'_, I> { 190 | type Item = (usize, Option); 191 | 192 | fn next(&mut self) -> Option { 193 | let index = self.index_iter.next()?; 194 | let core = self.cores.get(index).cloned().filter(|core| core.available); 195 | Some((index, core)) 196 | } 197 | } 198 | 199 | /// Represents a CPU core. 200 | #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] 201 | pub struct Core { 202 | /// the OS-specific core ID 203 | id: CoreId, 204 | /// whether this core is currently available for pinning threads 205 | available: bool, 206 | } 207 | 208 | impl Core { 209 | fn new(id: CoreId, available: bool) -> Self { 210 | Self { id, available } 211 | } 212 | 213 | /// Attempts to pin the current thread to this CPU core. Returns `true` if the thread was 214 | /// successfully pinned. 215 | /// 216 | /// If the `available` flag is `false`, this immediately returns `false` and does not attempt 217 | /// to pin the thread. 218 | pub fn try_pin_current(&self) -> bool { 219 | self.available && core_affinity::set_for_current(self.id) 220 | } 221 | } 222 | 223 | impl From for Core { 224 | /// Creates a new `Core` with `available` set to `true`. 225 | fn from(id: CoreId) -> Self { 226 | Self::new(id, true) 227 | } 228 | } 229 | 230 | #[cfg(test)] 231 | #[cfg_attr(coverage_nightly, coverage(off))] 232 | mod tests { 233 | use super::*; 234 | use std::collections::HashSet; 235 | 236 | #[test] 237 | fn test_core_ids() { 238 | let core_ids = CoreIds::new((0..10usize).map(|id| Core::from(CoreId { id })).collect()); 239 | assert_eq!( 240 | (0..10) 241 | .flat_map(|i| core_ids.get(i).map(|id| id.id)) 242 | .collect::>(), 243 | (0..10).map(|id| CoreId { id }).collect::>() 244 | ); 245 | assert!((0..10).all(|i| core_ids.get(i).map(|id| id.available).unwrap_or_default())); 246 | let new_ids: HashSet = vec![10, 11, 1, 3, 5, 7, 9] 247 | .into_iter() 248 | .map(|id| CoreId { id }) 249 | .collect(); 250 | let num_added = core_ids.update_from(new_ids); 251 | assert_eq!(num_added, 2); 252 | let mut new_core_ids = (0..12) 253 | .flat_map(|i| core_ids.get(i).map(|id| id.id)) 254 | .collect::>(); 255 | new_core_ids.sort(); 256 | assert_eq!( 257 | new_core_ids, 258 | (0..12).map(|id| CoreId { id }).collect::>() 259 | ); 260 | assert_eq!( 261 | (0..12) 262 | .flat_map(|i| core_ids.get(i)) 263 | .filter(|id| id.available) 264 | .count(), 265 | 7 266 | ); 267 | } 268 | 269 | #[test] 270 | fn test_empty() { 271 | assert_eq!(Cores::default().0.len(), 0); 272 | } 273 | 274 | #[test] 275 | fn test_first() { 276 | let max = num_cpus::get(); 277 | for n in 1..=max { 278 | assert_eq!(Cores::first(n).0.len(), n); 279 | } 280 | assert_eq!(Cores::first(max + 1).0.len(), max); 281 | } 282 | 283 | #[test] 284 | fn test_all() { 285 | let max = num_cpus::get(); 286 | assert_eq!(Cores::all().0.len(), max); 287 | } 288 | 289 | #[test] 290 | fn test_append() { 291 | let mut a = Cores::from(0..4); 292 | a.append(4); 293 | assert_eq!(a, Cores::from(0..5)); 294 | } 295 | 296 | #[test] 297 | fn test_union() { 298 | let mut a = Cores::from(0..4); 299 | let b = Cores::from(3..6); 300 | a.union(&b); 301 | assert_eq!(a, Cores::from(0..6)); 302 | } 303 | 304 | #[test] 305 | fn test_ops() { 306 | let a = Cores::from(0..4); 307 | let b = Cores::from(3..6); 308 | assert_eq!(a.clone() | b.clone(), Cores::from(0..6)); 309 | assert_eq!(a.clone() - b.clone(), Cores::from(0..3)); 310 | assert_eq!(b.clone() - a.clone(), Cores::from(4..6)); 311 | } 312 | 313 | #[test] 314 | fn test_assign_ops() { 315 | let mut a = Cores::from(0..4); 316 | let b = Cores::from(3..6); 317 | a |= b; 318 | assert_eq!(a, Cores::from(0..6)); 319 | let c = Cores::from(0..3); 320 | a -= c; 321 | assert_eq!(a, Cores::from(3..6)) 322 | } 323 | 324 | #[test] 325 | fn test_iter() { 326 | let core_ids = core_affinity::get_core_ids().unwrap(); 327 | let n = core_ids.len(); 328 | let pairs: Vec<_> = Cores::from(0..n).iter().collect(); 329 | assert_eq!(pairs.len(), n); 330 | } 331 | } 332 | -------------------------------------------------------------------------------- /src/hive/husk.rs: -------------------------------------------------------------------------------- 1 | use super::{ 2 | Config, DerefOutcomes, Hive, OpenBuilder, Outcome, OutcomeBatch, OutcomeSender, OutcomeStore, 3 | OwnedOutcomes, TaskQueues, 4 | }; 5 | use crate::bee::{Queen, TaskId, Worker}; 6 | use derive_more::Debug; 7 | use std::any; 8 | use std::collections::HashMap; 9 | use std::ops::{Deref, DerefMut}; 10 | 11 | /// The remnants of a `Hive`. 12 | /// 13 | /// Provides access to the `Queen` and to stored `Outcome`s. Can be used to create a new `Hive` 14 | /// based on the previous `Hive`'s configuration. 15 | #[derive(Debug)] 16 | pub struct Husk { 17 | config: Config, 18 | #[debug("{}", any::type_name::())] 19 | queen: Q, 20 | num_panics: usize, 21 | #[debug(skip)] 22 | outcomes: HashMap>, 23 | } 24 | 25 | impl Husk { 26 | /// Creates a new `Husk`. Should only be called from `Shared::try_into_husk`. 27 | pub(super) fn new( 28 | config: Config, 29 | queen: Q, 30 | num_panics: usize, 31 | outcomes: HashMap>, 32 | ) -> Self { 33 | Self { 34 | config, 35 | queen, 36 | num_panics, 37 | outcomes, 38 | } 39 | } 40 | 41 | /// The `Queen` of the former `Hive`. 42 | pub fn queen(&self) -> &Q { 43 | &self.queen 44 | } 45 | 46 | /// The number of panicked threads in the former `Hive`. 47 | pub fn num_panics(&self) -> usize { 48 | self.num_panics 49 | } 50 | 51 | /// Consumes this `Husk` and returns the `Queen` and `Outcome`s. 52 | pub fn into_parts(self) -> (Q, OutcomeBatch) { 53 | (self.queen, OutcomeBatch::new(self.outcomes)) 54 | } 55 | 56 | /// Returns a new `Builder` that will create a `Hive` with the same configuration as the one 57 | /// that produced this `Husk`. 58 | pub fn as_builder(&self) -> OpenBuilder { 59 | OpenBuilder::from(self.config.clone()) 60 | } 61 | 62 | /// Consumes this `Husk` and returns a new `Hive` with the same configuration and `Queen` as 63 | /// the one that produced this `Husk`. 64 | pub fn into_hive>(self) -> Hive { 65 | self.as_builder() 66 | .with_queen(self.queen) 67 | .with_queues::() 68 | .build() 69 | } 70 | 71 | /// Consumes this `Husk` and creates a new `Hive` with the same configuration as the one that 72 | /// produced this `Husk`, and queues all the `Outcome::Unprocessed` values. The results will 73 | /// be sent to `tx`. Returns the new `Hive` and the IDs of the tasks that were queued. 74 | /// 75 | /// This method returns a `SpawnError` if there is an error creating the new `Hive`. 76 | pub fn into_hive_swarm_send_unprocessed>( 77 | mut self, 78 | tx: &OutcomeSender, 79 | ) -> (Hive, Vec) { 80 | let unprocessed: Vec<_> = self 81 | .remove_all_unprocessed() 82 | .into_iter() 83 | .map(|(_, input)| input) 84 | .collect(); 85 | let hive = self 86 | .as_builder() 87 | .with_queen(self.queen) 88 | .with_queues::() 89 | .build(); 90 | let task_ids = hive.swarm_send(unprocessed, tx); 91 | (hive, task_ids) 92 | } 93 | 94 | /// Consumes this `Husk` and creates a new `Hive` with the same configuration as the one that 95 | /// produced this `Husk`, and queues all the `Outcome::Unprocessed` values. The results will 96 | /// be retained in the new `Hive` for later retrieval. Returns the new `Hive` and the IDs 97 | /// of the tasks that were queued. 98 | /// 99 | /// This method returns a `SpawnError` if there is an error creating the new `Hive`. 100 | pub fn into_hive_swarm_store_unprocessed>( 101 | mut self, 102 | ) -> (Hive, Vec) { 103 | let unprocessed: Vec<_> = self 104 | .remove_all_unprocessed() 105 | .into_iter() 106 | .map(|(_, input)| input) 107 | .collect(); 108 | let hive = self 109 | .as_builder() 110 | .with_queen(self.queen) 111 | .with_queues::() 112 | .build(); 113 | let task_ids = hive.swarm_store(unprocessed); 114 | (hive, task_ids) 115 | } 116 | } 117 | 118 | impl> DerefOutcomes for Husk { 119 | #[inline] 120 | fn outcomes_deref(&self) -> impl Deref>> { 121 | &self.outcomes 122 | } 123 | 124 | #[inline] 125 | fn outcomes_deref_mut(&mut self) -> impl DerefMut>> { 126 | &mut self.outcomes 127 | } 128 | } 129 | 130 | impl> OwnedOutcomes for Husk { 131 | #[inline] 132 | fn outcomes(self) -> HashMap> { 133 | self.outcomes 134 | } 135 | 136 | #[inline] 137 | fn outcomes_ref(&self) -> &HashMap> { 138 | &self.outcomes 139 | } 140 | } 141 | 142 | #[cfg(test)] 143 | #[cfg_attr(coverage_nightly, coverage(off))] 144 | mod tests { 145 | use crate::bee::stock::{PunkWorker, Thunk, ThunkWorker}; 146 | use crate::hive::ChannelTaskQueues; 147 | use crate::hive::{ 148 | Builder, ChannelBuilder, Outcome, OutcomeIteratorExt, OutcomeStore, TaskQueuesBuilder, 149 | outcome_channel, 150 | }; 151 | 152 | #[test] 153 | fn test_unprocessed() { 154 | // don't spin up any worker threads so that no tasks will be processed 155 | let hive = ChannelBuilder::empty() 156 | .num_threads(0) 157 | .with_worker_default::>() 158 | .build(); 159 | let mut task_ids = hive.map_store((0..10).map(|i| Thunk::from(move || i))); 160 | // cancel and smash the hive before the tasks can be processed 161 | hive.suspend(); 162 | let mut husk = hive.try_into_husk(false).unwrap(); 163 | assert!(husk.has_unprocessed()); 164 | for i in task_ids.iter() { 165 | assert!(husk.get(*i).unwrap().is_unprocessed()); 166 | } 167 | assert_eq!(husk.iter_unprocessed().count(), 10); 168 | let mut unprocessed_task_ids = husk 169 | .iter_unprocessed() 170 | .map(|(task_id, _)| *task_id) 171 | .collect::>(); 172 | task_ids.sort(); 173 | unprocessed_task_ids.sort(); 174 | assert_eq!(task_ids, unprocessed_task_ids); 175 | assert_eq!(husk.remove_all_unprocessed().len(), 10); 176 | } 177 | 178 | #[test] 179 | fn test_reprocess_unprocessed() { 180 | // don't spin up any worker threads so that no tasks will be processed 181 | let hive1 = ChannelBuilder::empty() 182 | .num_threads(0) 183 | .with_worker_default::>() 184 | .build(); 185 | let _ = hive1.map_store((0..10).map(|i| Thunk::from(move || i))); 186 | // cancel and smash the hive before the tasks can be processed 187 | hive1.suspend(); 188 | let husk1 = hive1.try_into_husk(false).unwrap(); 189 | let (hive2, _) = husk1.into_hive_swarm_store_unprocessed::>(); 190 | // now spin up worker threads to process the tasks 191 | hive2.grow(8).expect("error spawning threads"); 192 | hive2.join(); 193 | let husk2 = hive2.try_into_husk(false).unwrap(); 194 | assert!(!husk2.has_unprocessed()); 195 | assert!(husk2.has_successes()); 196 | assert_eq!(husk2.iter_successes().count(), 10); 197 | } 198 | 199 | #[test] 200 | fn test_reprocess_unprocessed_to() { 201 | // don't spin up any worker threads so that no tasks will be processed 202 | let hive1 = ChannelBuilder::empty() 203 | .num_threads(0) 204 | .with_worker_default::>() 205 | .build(); 206 | let _ = hive1.map_store((0..10).map(|i| Thunk::from(move || i))); 207 | // cancel and smash the hive before the tasks can be processed 208 | hive1.suspend(); 209 | let husk1 = hive1.try_into_husk(false).unwrap(); 210 | let (tx, rx) = outcome_channel(); 211 | let (hive2, task_ids) = husk1.into_hive_swarm_send_unprocessed::>(&tx); 212 | // now spin up worker threads to process the tasks 213 | hive2.grow(8).expect("error spawning threads"); 214 | hive2.join(); 215 | let husk2 = hive2.try_into_husk(false).unwrap(); 216 | assert!(husk2.is_empty()); 217 | let mut outputs = rx 218 | .select_ordered(task_ids) 219 | .map(Outcome::unwrap) 220 | .collect::>(); 221 | outputs.sort(); 222 | assert_eq!(outputs, (0..10).collect::>()); 223 | } 224 | 225 | #[test] 226 | fn test_into_result() { 227 | let hive = ChannelBuilder::empty() 228 | .num_threads(4) 229 | .with_worker_default::>() 230 | .build(); 231 | hive.map_store((0..10).map(|i| Thunk::from(move || i))); 232 | hive.join(); 233 | let mut outputs = hive.try_into_husk(false).unwrap().into_parts().1.unwrap(); 234 | outputs.sort(); 235 | assert_eq!(outputs, (0..10).collect::>()); 236 | } 237 | 238 | #[test] 239 | #[should_panic] 240 | fn test_into_result_panic() { 241 | let hive = ChannelBuilder::empty() 242 | .num_threads(4) 243 | .with_worker_default::>() 244 | .build(); 245 | hive.map_store( 246 | (0..10).map(|i| Thunk::from(move || if i == 5 { panic!("oh no!") } else { i })), 247 | ); 248 | hive.join(); 249 | let (_, result) = hive.try_into_husk(false).unwrap().into_parts(); 250 | let _ = result.ok_or_unwrap_errors(true); 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /src/hive/inner/config.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "local-batch")] 2 | pub use self::local_batch::set_batch_limit_default; 3 | #[cfg(feature = "local-batch")] 4 | pub use self::local_batch::set_weight_limit_default; 5 | #[cfg(feature = "retry")] 6 | pub use self::retry::{ 7 | set_max_retries_default, set_retries_default_disabled, set_retry_factor_default, 8 | }; 9 | 10 | use super::Config; 11 | use parking_lot::Mutex; 12 | use std::sync::LazyLock; 13 | 14 | const DEFAULT_NUM_THREADS: usize = 4; 15 | 16 | pub static DEFAULTS: LazyLock> = LazyLock::new(|| { 17 | let mut config = Config::empty(); 18 | config.set_const_defaults(); 19 | Mutex::new(config) 20 | }); 21 | 22 | /// Sets the number of threads a `Builder` is configured with when using `Builder::default()`. 23 | pub fn set_num_threads_default(num_threads: usize) { 24 | DEFAULTS.lock().num_threads.set(Some(num_threads)); 25 | } 26 | 27 | /// Sets the number of threads a `Builder` is configured with when using `Builder::default()` to 28 | /// the number of available CPU cores. 29 | pub fn set_num_threads_default_all() { 30 | set_num_threads_default(num_cpus::get()); 31 | } 32 | 33 | /// Resets all builder defaults to their original values. 34 | pub fn reset_defaults() { 35 | let mut config = DEFAULTS.lock(); 36 | config.set_const_defaults(); 37 | } 38 | 39 | impl Config { 40 | /// Creates a new `Config` with all values unset. 41 | pub fn empty() -> Self { 42 | Self { 43 | num_threads: Default::default(), 44 | thread_name: Default::default(), 45 | thread_stack_size: Default::default(), 46 | #[cfg(feature = "affinity")] 47 | affinity: Default::default(), 48 | #[cfg(feature = "local-batch")] 49 | batch_limit: Default::default(), 50 | #[cfg(feature = "local-batch")] 51 | weight_limit: Default::default(), 52 | #[cfg(feature = "retry")] 53 | max_retries: Default::default(), 54 | #[cfg(feature = "retry")] 55 | retry_factor: Default::default(), 56 | } 57 | } 58 | 59 | /// Resets config values to their pre-configured defaults. 60 | fn set_const_defaults(&mut self) { 61 | self.num_threads.set(Some(DEFAULT_NUM_THREADS)); 62 | #[cfg(feature = "local-batch")] 63 | self.set_batch_const_defaults(); 64 | #[cfg(feature = "retry")] 65 | self.set_retry_const_defaults(); 66 | } 67 | 68 | /// Converts fields into `Sync` variants to make this `Config` thread-safe. 69 | pub fn into_sync(self) -> Self { 70 | Self { 71 | num_threads: self.num_threads.into_sync_default(), 72 | thread_name: self.thread_name.into_sync(), 73 | thread_stack_size: self.thread_stack_size.into_sync(), 74 | #[cfg(feature = "affinity")] 75 | affinity: self.affinity.into_sync(), 76 | #[cfg(feature = "local-batch")] 77 | batch_limit: self.batch_limit.into_sync_default(), 78 | #[cfg(feature = "local-batch")] 79 | weight_limit: self.weight_limit.into_sync_default(), 80 | #[cfg(feature = "retry")] 81 | max_retries: self.max_retries.into_sync_default(), 82 | #[cfg(feature = "retry")] 83 | retry_factor: self.retry_factor.into_sync_default(), 84 | } 85 | } 86 | 87 | /// Converts fields into `Unsync` variants to enable them to be modified in a single-threaded 88 | /// context. 89 | pub fn into_unsync(self) -> Self { 90 | Self { 91 | num_threads: self.num_threads.into_unsync(), 92 | thread_name: self.thread_name.into_unsync(), 93 | thread_stack_size: self.thread_stack_size.into_unsync(), 94 | #[cfg(feature = "affinity")] 95 | affinity: self.affinity.into_unsync(), 96 | #[cfg(feature = "local-batch")] 97 | batch_limit: self.batch_limit.into_unsync(), 98 | #[cfg(feature = "local-batch")] 99 | weight_limit: self.weight_limit.into_unsync(), 100 | #[cfg(feature = "retry")] 101 | max_retries: self.max_retries.into_unsync(), 102 | #[cfg(feature = "retry")] 103 | retry_factor: self.retry_factor.into_unsync(), 104 | } 105 | } 106 | } 107 | 108 | impl Default for Config { 109 | /// Creates a new `Config` with default values. This simply clones `DEFAULTS`. 110 | fn default() -> Self { 111 | DEFAULTS.lock().clone() 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | #[cfg_attr(coverage_nightly, coverage(off))] 117 | pub mod reset { 118 | /// Struct that resets the default values when `drop`ped. 119 | pub struct Reset; 120 | 121 | impl Drop for Reset { 122 | fn drop(&mut self) { 123 | super::reset_defaults(); 124 | } 125 | } 126 | } 127 | 128 | #[cfg(test)] 129 | #[cfg_attr(coverage_nightly, coverage(off))] 130 | mod tests { 131 | use super::Config; 132 | use super::reset::Reset; 133 | use serial_test::serial; 134 | 135 | #[test] 136 | #[serial] 137 | fn test_set_num_threads_default() { 138 | let reset = Reset; 139 | super::set_num_threads_default(2); 140 | let config = Config::default(); 141 | assert_eq!(config.num_threads.get(), Some(2)); 142 | // Dropping `Reset` should reset the defaults 143 | drop(reset); 144 | 145 | let reset = Reset; 146 | super::set_num_threads_default_all(); 147 | let config = Config::default(); 148 | assert_eq!(config.num_threads.get(), Some(num_cpus::get())); 149 | drop(reset); 150 | 151 | let config = Config::default(); 152 | assert_eq!(config.num_threads.get(), Some(super::DEFAULT_NUM_THREADS)); 153 | } 154 | } 155 | 156 | #[cfg(feature = "local-batch")] 157 | mod local_batch { 158 | use super::{Config, DEFAULTS}; 159 | 160 | const DEFAULT_BATCH_LIMIT: usize = 10; 161 | 162 | /// Sets the batch limit a `config` is configured with when using `Builder::default()`. 163 | pub fn set_batch_limit_default(batch_limit: usize) { 164 | DEFAULTS.lock().batch_limit.set(Some(batch_limit)); 165 | } 166 | 167 | /// Sets the weight limit a `config` is configured with when using `Builder::default()`. 168 | pub fn set_weight_limit_default(weight_limit: u64) { 169 | DEFAULTS.lock().weight_limit.set(Some(weight_limit)); 170 | } 171 | 172 | impl Config { 173 | pub(super) fn set_batch_const_defaults(&mut self) { 174 | self.batch_limit.set(Some(DEFAULT_BATCH_LIMIT)); 175 | self.weight_limit.set(None); 176 | } 177 | } 178 | } 179 | 180 | #[cfg(feature = "retry")] 181 | mod retry { 182 | use super::{Config, DEFAULTS}; 183 | use std::time::Duration; 184 | 185 | const DEFAULT_MAX_RETRIES: u8 = 3; 186 | const DEFAULT_RETRY_FACTOR_SECS: u64 = 1; 187 | 188 | /// Sets the max number of retries a `config` is configured with when using `Builder::default()`. 189 | pub fn set_max_retries_default(num_retries: u8) { 190 | DEFAULTS.lock().max_retries.set(Some(num_retries)); 191 | } 192 | 193 | /// Sets the retry factor a `config` is configured with when using `Builder::default()`. 194 | pub fn set_retry_factor_default(retry_factor: Duration) { 195 | DEFAULTS.lock().set_retry_factor_from(retry_factor); 196 | } 197 | 198 | /// Specifies that retries should be disabled by default when using `Builder::default()`. 199 | pub fn set_retries_default_disabled() { 200 | set_max_retries_default(0); 201 | } 202 | 203 | impl Config { 204 | pub fn get_retry_factor_duration(&self) -> Option { 205 | self.retry_factor.get().map(Duration::from_nanos) 206 | } 207 | 208 | pub fn set_retry_factor_from(&mut self, duration: Duration) -> Option { 209 | self.retry_factor 210 | .set(Some(duration.as_nanos() as u64)) 211 | .map(Duration::from_nanos) 212 | } 213 | 214 | pub fn try_set_retry_factor_from(&self, duration: Duration) -> Option { 215 | self.retry_factor 216 | .try_set(duration.as_nanos() as u64) 217 | .map(Duration::from_nanos) 218 | .ok() 219 | } 220 | 221 | pub(super) fn set_retry_const_defaults(&mut self) { 222 | self.max_retries.set(Some(DEFAULT_MAX_RETRIES)); 223 | self.retry_factor.set(Some( 224 | Duration::from_secs(DEFAULT_RETRY_FACTOR_SECS).as_nanos() as u64, 225 | )); 226 | } 227 | } 228 | 229 | #[cfg(test)] 230 | #[cfg_attr(coverage_nightly, coverage(off))] 231 | mod tests { 232 | use super::Config; 233 | use crate::hive::inner::config::reset::Reset; 234 | use serial_test::serial; 235 | use std::time::Duration; 236 | 237 | #[test] 238 | #[serial] 239 | fn test_set_max_retries_default() { 240 | let reset = Reset; 241 | super::set_max_retries_default(1); 242 | let config = Config::default(); 243 | assert_eq!(config.max_retries.get(), Some(1)); 244 | // Dropping `Reset` should reset the defaults 245 | drop(reset); 246 | 247 | let reset = Reset; 248 | super::set_retries_default_disabled(); 249 | let config = Config::default(); 250 | assert_eq!(config.max_retries.get(), Some(0)); 251 | drop(reset); 252 | 253 | let config = Config::default(); 254 | assert_eq!(config.max_retries.get(), Some(super::DEFAULT_MAX_RETRIES)); 255 | } 256 | 257 | #[test] 258 | #[serial] 259 | fn test_set_retry_factor_default() { 260 | let reset = Reset; 261 | super::set_retry_factor_default(Duration::from_secs(2)); 262 | let config = Config::default(); 263 | assert_eq!( 264 | config.get_retry_factor_duration(), 265 | Some(Duration::from_secs(2)) 266 | ); 267 | // Dropping `Reset` should reset the defaults 268 | drop(reset); 269 | let config = Config::default(); 270 | assert_eq!( 271 | config.get_retry_factor_duration(), 272 | Some(Duration::from_secs(super::DEFAULT_RETRY_FACTOR_SECS)) 273 | ); 274 | } 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /src/hive/inner/counter.rs: -------------------------------------------------------------------------------- 1 | use crate::atomic::{Atomic, AtomicInt, AtomicU64, Ordering, Orderings}; 2 | 3 | // TODO: it's not clear if SeqCst ordering is actually necessary - need to do some fuzz testing. 4 | const SEQCST_ORDERING: Orderings = Orderings { 5 | load: Ordering::SeqCst, 6 | swap: Ordering::SeqCst, 7 | fetch_add: Ordering::SeqCst, 8 | fetch_sub: Ordering::SeqCst, 9 | }; 10 | 11 | #[derive(thiserror::Error, Debug)] 12 | pub enum CounterError { 13 | #[error("Left counter overflow")] 14 | LeftOverflow, 15 | #[error("Right counter overflow")] 16 | RightOverflow, 17 | #[error("Left counter underflow")] 18 | LeftUnderflow, 19 | #[error("Right counter underflow")] 20 | RightUnderflow, 21 | } 22 | 23 | /// A counter that can keep track of two related values (`L` and `R`) using a single atomic number. 24 | /// The two values may be different sizes, but their total size in bits must equal the size of the 25 | /// data type (for now fixed to `64`) used to store the value. 26 | /// 27 | /// The following operations are supported: 28 | /// * increment/decrement the left counter (`L`) 29 | /// * decrement the right counter (`R`) 30 | /// * transfer an amount `N` from `L` to `R` (i.e., a simultaneous decrement of `L` and 31 | /// increment of `R` by the same amount) 32 | pub struct DualCounter(AtomicU64); 33 | 34 | impl DualCounter { 35 | // validate that L is > 0 36 | const L_BITS: u32 = L.checked_sub(1).expect("L must be > 0") + 1; 37 | // validate that L is < 64 38 | const R_BITS: u32 = 63u32.checked_sub(Self::L_BITS).expect("L must be <= 63") + 1; 39 | // compute the maximum possible values for L and R 40 | const L_MAX: u64 = (1 << Self::L_BITS) - 1; 41 | const R_MAX: u64 = (1 << Self::R_BITS) - 1; 42 | 43 | /// Decomposes a 64-bit value into its left and right parts. 44 | #[inline] 45 | fn decompose(n: u64) -> (u64, u64) { 46 | (n & Self::L_MAX, n >> Self::L_BITS) 47 | } 48 | 49 | /// Returns a tuple with the (left, right) parts of the counter. 50 | pub fn get(&self) -> (u64, u64) { 51 | Self::decompose(self.0.get()) 52 | } 53 | 54 | // pub fn reset(&self) -> (u64, u64) { 55 | // Self::decompose(self.0.set(0)) 56 | // } 57 | 58 | /// Increments the left counter by `n` and returns the previous value. 59 | /// 60 | /// Returns an error if `n` is greater than the maximum value (2^L - 1) or if the left counter 61 | /// overflows when incremented by `n`. 62 | pub fn increment_left(&self, n: u64) -> Result { 63 | if n > Self::L_MAX { 64 | return Err(CounterError::LeftOverflow); 65 | } 66 | let prev_val = self.0.add(n) & Self::L_MAX; 67 | match prev_val.checked_add(n) { 68 | Some(new_val) if new_val <= Self::L_MAX => Ok(prev_val), 69 | Some(_) => Err(CounterError::LeftOverflow), 70 | None => unreachable!("counter overflow"), 71 | } 72 | } 73 | 74 | /// Decrements the left counter by `n` and returns the previous value. 75 | /// 76 | /// Returns an error if `n` is greater than the maximum value (2^L - 1) or if the left counter 77 | /// underflows when decremented by `n`. 78 | pub fn decrement_left(&self, n: u64) -> Result { 79 | if n > Self::L_MAX { 80 | return Err(CounterError::LeftUnderflow); 81 | } 82 | let prev_val = self.0.sub(n) & Self::L_MAX; 83 | if prev_val >= n { 84 | Ok(prev_val) 85 | } else { 86 | Err(CounterError::LeftUnderflow) 87 | } 88 | } 89 | 90 | /// Decrements the right counter by `n` and returns the previous value. 91 | /// 92 | /// Returns an error if `n` is greater than the maximum value (2^(64-L) - 1) or if the right 93 | /// counter underflows when decremented by `n`. 94 | pub fn decrement_right(&self, n: u64) -> Result { 95 | if n > Self::R_MAX { 96 | return Err(CounterError::RightUnderflow); 97 | } 98 | let n_shifted = n.checked_shl(Self::L_BITS).unwrap(); 99 | let prev_val = self.0.sub(n_shifted) >> Self::L_BITS; 100 | if prev_val >= n { 101 | Ok(prev_val) 102 | } else { 103 | Err(CounterError::RightUnderflow) 104 | } 105 | } 106 | 107 | /// Atomically decrements the left counter and increments the right counter by `n`, and returns 108 | /// the previous values of the counters. 109 | /// 110 | /// Returns an error if `n` is greater than the maximum value for either the right or left 111 | /// counter, if the left counter overflows when incremented, or if the right counter underflows 112 | /// when decremented. 113 | pub fn transfer(&self, n: u64) -> Result<(u64, u64), CounterError> { 114 | if n > Self::L_MAX { 115 | return Err(CounterError::LeftUnderflow); 116 | } 117 | if n > Self::R_MAX { 118 | return Err(CounterError::RightOverflow); 119 | } 120 | let delta = n.checked_shl(Self::L_BITS).unwrap().checked_sub(n).unwrap(); 121 | let (prev_left, prev_right) = Self::decompose(self.0.add(delta)); 122 | if prev_left < n { 123 | Err(CounterError::LeftUnderflow) 124 | } else { 125 | match prev_right.checked_add(n) { 126 | Some(new_val) if new_val <= Self::R_MAX => Ok((prev_left, prev_right)), 127 | Some(_) => Err(CounterError::RightOverflow), 128 | None => unreachable!("counter overflow"), 129 | } 130 | } 131 | } 132 | } 133 | 134 | impl Default for DualCounter { 135 | fn default() -> Self { 136 | DualCounter(AtomicU64::with_orderings(0, SEQCST_ORDERING)) 137 | } 138 | } 139 | 140 | #[cfg(test)] 141 | #[cfg_attr(coverage_nightly, coverage(off))] 142 | mod tests { 143 | use super::*; 144 | 145 | #[test] 146 | fn test_works() { 147 | let counter = DualCounter::<48>::default(); 148 | 149 | assert_eq!(counter.increment_left(3).unwrap(), 0); 150 | assert_eq!(counter.increment_left(1).unwrap(), 3); 151 | assert_eq!(counter.get(), (4, 0)); 152 | 153 | assert_eq!(counter.transfer(3).unwrap(), (4, 0)); 154 | assert_eq!(counter.get(), (1, 3)); 155 | 156 | assert_eq!(counter.decrement_right(2).unwrap(), 3); 157 | assert_eq!(counter.decrement_right(1).unwrap(), 1); 158 | assert_eq!(counter.get(), (1, 0)); 159 | } 160 | 161 | #[test] 162 | fn test_increment_too_large() { 163 | let counter = DualCounter::<1>::default(); 164 | assert!(matches!( 165 | counter.increment_left(2), 166 | Err(CounterError::LeftOverflow) 167 | )); 168 | } 169 | 170 | #[test] 171 | fn test_increment_overflow() { 172 | let counter = DualCounter::<48>::default(); 173 | assert!(counter.increment_left(DualCounter::<48>::L_MAX).is_ok()); 174 | assert!(matches!( 175 | counter.increment_left(DualCounter::<48>::L_MAX), 176 | Err(CounterError::LeftOverflow) 177 | )); 178 | } 179 | 180 | #[test] 181 | fn test_counter_overflow() { 182 | let counter = DualCounter::<63>::default(); 183 | assert!(counter.increment_left(DualCounter::<63>::L_MAX).is_ok()); 184 | assert!(matches!( 185 | counter.increment_left(DualCounter::<63>::L_MAX), 186 | Err(CounterError::LeftOverflow) 187 | )); 188 | } 189 | 190 | #[test] 191 | fn test_left_overflow() { 192 | let counter = DualCounter::<1>::default(); 193 | assert!(counter.increment_left(1).is_ok()); 194 | assert!(matches!( 195 | counter.increment_left(1), 196 | Err(CounterError::LeftOverflow) 197 | )); 198 | } 199 | 200 | #[test] 201 | fn test_transfer_overflow() { 202 | let counter = DualCounter::<63>::default(); 203 | assert!(counter.increment_left(2).is_ok()); 204 | assert!(matches!( 205 | counter.transfer(2), 206 | Err(CounterError::RightOverflow) 207 | )); 208 | } 209 | 210 | #[test] 211 | fn test_transfer_left_too_small() { 212 | let counter = DualCounter::<32>::default(); 213 | assert!(counter.increment_left(2).is_ok()); 214 | assert!(matches!( 215 | counter.transfer(3), 216 | Err(CounterError::LeftUnderflow) 217 | )); 218 | } 219 | 220 | #[test] 221 | fn test_transfer_right_too_large() { 222 | let counter = DualCounter::<32>::default(); 223 | assert!(counter.increment_left(DualCounter::<32>::L_MAX).is_ok()); 224 | assert!(counter.transfer(DualCounter::<32>::L_MAX).is_ok()); 225 | assert!(counter.increment_left(1).is_ok()); 226 | assert!(matches!( 227 | counter.transfer(1), 228 | Err(CounterError::RightOverflow) 229 | )); 230 | } 231 | 232 | #[test] 233 | fn test_decrement_too_large() { 234 | let counter = DualCounter::<63>::default(); 235 | assert!(counter.increment_left(2).is_ok()); 236 | assert!(counter.transfer(1).is_ok()); 237 | assert!(matches!( 238 | counter.decrement_right(2), 239 | Err(CounterError::RightUnderflow) 240 | )); 241 | } 242 | 243 | #[test] 244 | fn test_right_underflow() { 245 | let counter = DualCounter::<63>::default(); 246 | assert!(counter.increment_left(2).is_ok()); 247 | assert!(counter.transfer(1).is_ok()); 248 | assert!(counter.decrement_right(1).is_ok()); 249 | assert!(matches!( 250 | counter.decrement_right(1), 251 | Err(CounterError::RightUnderflow) 252 | )); 253 | } 254 | } 255 | -------------------------------------------------------------------------------- /src/hive/inner/diagram.mmd: -------------------------------------------------------------------------------- 1 | graph TD; 2 | Generic-->Queue 3 | Generic-->Bee 4 | Bee-->Full 5 | Queue-->Full -------------------------------------------------------------------------------- /src/hive/inner/gate.rs: -------------------------------------------------------------------------------- 1 | //! Implementations of a `gate` that blocks threads waiting on a condition. 2 | use parking_lot::{Condvar, Mutex}; 3 | use std::sync::atomic::{AtomicUsize, Ordering}; 4 | 5 | /// Wraps a `Mutex` and a `Condvar`, and provides methods for threads to wait on a condition and be 6 | /// notified when the condition may have changed. 7 | #[derive(Debug, Default)] 8 | pub struct Gate { 9 | mutex: Mutex<()>, 10 | condvar: Condvar, 11 | } 12 | 13 | impl Gate { 14 | /// Waits on the condition variable while the condition evaluates to true. The condition is 15 | /// checked first to avoid acquiring the mutex lock unnecessarily. 16 | #[inline] 17 | pub fn wait_while bool>(&self, condition: F) { 18 | if condition() { 19 | let mut lock = self.mutex.lock(); 20 | while condition() { 21 | self.condvar.wait(&mut lock); 22 | } 23 | } 24 | } 25 | 26 | /// Notifies all waiting threads that the condition may have changed. 27 | pub fn notify_all(&self) { 28 | let _lock = self.mutex.lock(); 29 | self.condvar.notify_all(); 30 | } 31 | } 32 | 33 | /// A `Gate`, whose `wait_while` method also depends on a `phase` that changes each time the 34 | /// condition evaluates to `false` after first evaluating to `true`. This prevents a condition that 35 | /// changes rapidly from keeping a thread continually locked. 36 | #[derive(Debug, Default)] 37 | pub struct PhasedGate { 38 | mutex: Mutex<()>, 39 | condvar: Condvar, 40 | phase: AtomicUsize, 41 | } 42 | 43 | impl PhasedGate { 44 | /// Waits on the condition variable while the condition evaluates to true *and* the phase 45 | /// hasn't changed. The first thread to finish waiting during a given phase increments the 46 | /// phase number. The condition is checked first to avoid aquiring the mutex lock unnecessarily. 47 | #[inline] 48 | pub fn wait_while bool>(&self, condition: F) { 49 | if condition() { 50 | let phase = self.phase.load(Ordering::SeqCst); 51 | let mut lock = self.mutex.lock(); 52 | while phase == self.phase.load(Ordering::Relaxed) && condition() { 53 | self.condvar.wait(&mut lock); 54 | } 55 | // increase phase for the first thread to come out of the loop 56 | let _ = self.phase.compare_exchange( 57 | phase, 58 | phase.wrapping_add(1), 59 | Ordering::SeqCst, 60 | Ordering::Relaxed, 61 | ); 62 | } 63 | } 64 | 65 | /// Notifies all waiting threads that the condition may have changed. 66 | pub fn notify_all(&self) { 67 | let _lock = self.mutex.lock(); 68 | self.condvar.notify_all(); 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /src/hive/inner/mod.rs: -------------------------------------------------------------------------------- 1 | //! Internal data structures needed to implement `Hive`. 2 | mod builder; 3 | mod config; 4 | mod counter; 5 | mod gate; 6 | mod queue; 7 | mod shared; 8 | mod task; 9 | 10 | /// Prelude-like module that collects all the functions for setting global configuration defaults. 11 | pub mod set_config { 12 | pub use super::config::{reset_defaults, set_num_threads_default, set_num_threads_default_all}; 13 | #[cfg(feature = "local-batch")] 14 | pub use super::config::{set_batch_limit_default, set_weight_limit_default}; 15 | #[cfg(feature = "retry")] 16 | pub use super::config::{ 17 | set_max_retries_default, set_retries_default_disabled, set_retry_factor_default, 18 | }; 19 | } 20 | 21 | // Note: it would be more appropriate for the publicly exported traits (`Builder`, `TaskQueues`) 22 | // to be in the `beekeeper::hive` module, but they need to be in `inner` for visiblity reasons. 23 | 24 | pub use self::builder::{Builder, BuilderConfig}; 25 | pub use self::queue::{ChannelTaskQueues, TaskQueues, WorkerQueues, WorkstealingTaskQueues}; 26 | pub use self::task::TaskInput; 27 | 28 | use self::counter::DualCounter; 29 | use self::gate::{Gate, PhasedGate}; 30 | use self::queue::PopTaskError; 31 | use crate::atomic::{AtomicAny, AtomicBool, AtomicOption, AtomicUsize}; 32 | use crate::bee::{Queen, TaskMeta, Worker}; 33 | use crate::hive::{OutcomeQueue, OutcomeSender, SpawnError}; 34 | use parking_lot::Mutex; 35 | use std::thread::JoinHandle; 36 | 37 | type Any = AtomicOption>; 38 | type Usize = AtomicOption; 39 | #[cfg(feature = "retry")] 40 | type U8 = AtomicOption; 41 | #[cfg(any(feature = "local-batch", feature = "retry"))] 42 | type U64 = AtomicOption; 43 | 44 | /// Private, zero-size struct used to call private methods in public sealed traits. 45 | pub struct Token; 46 | 47 | /// Internal representation of a task to be processed by a `Hive`. 48 | #[derive(Debug)] 49 | pub struct Task { 50 | input: W::Input, 51 | meta: TaskMeta, 52 | outcome_tx: Option>, 53 | } 54 | 55 | /// Data shared by all worker threads in a `Hive`. This is the private API used by the `Hive` and 56 | /// worker threads to enqueue, dequeue, and process tasks. 57 | pub struct Shared> { 58 | /// core configuration parameters 59 | config: Config, 60 | /// the `Queen` used to create new workers 61 | queen: Q, 62 | /// global and local task queues used by the `Hive` to send tasks to the worker threads 63 | task_queues: T, 64 | /// The results of spawning each worker 65 | spawn_results: Mutex, SpawnError>>>, 66 | /// allows for 2^48 queued tasks and 2^16 active tasks 67 | num_tasks: DualCounter<48>, 68 | /// ID that will be assigned to the next task submitted to the `Hive` 69 | next_task_id: AtomicUsize, 70 | /// number of times a worker has panicked 71 | num_panics: AtomicUsize, 72 | /// number of `Hive` clones with a reference to this shared data 73 | num_referrers: AtomicUsize, 74 | /// whether the internal state of the hive is corrupted - if true, this prevents new tasks from 75 | /// processed (new tasks may be queued but they will never be processed); currently, this can 76 | /// only happen if the task counter somehow get corrupted 77 | poisoned: AtomicBool, 78 | /// whether the hive is suspended - if true, active tasks may complete and new tasks may be 79 | /// queued, but new tasks will not be processed 80 | suspended: AtomicBool, 81 | /// gate used by worker threads to wait until the hive is resumed 82 | resume_gate: Gate, 83 | /// gate used by client threads to wait until all tasks have completed 84 | join_gate: PhasedGate, 85 | /// outcomes stored in the hive 86 | outcomes: OutcomeQueue, 87 | } 88 | 89 | /// Core configuration parameters that are set by a `Builder`, used in a `Hive`, and preserved in a 90 | /// `Husk`. Fields are `AtomicOption`s, which enables them to be transitioned back and forth 91 | /// between thread-safe and non-thread-safe contexts. 92 | #[derive(Clone, Debug)] 93 | pub struct Config { 94 | /// Number of worker threads to spawn 95 | num_threads: Usize, 96 | /// Name to give each worker thread 97 | thread_name: Any, 98 | /// Stack size for each worker thread 99 | thread_stack_size: Usize, 100 | /// CPU cores to which worker threads can be pinned 101 | #[cfg(feature = "affinity")] 102 | affinity: Any, 103 | /// Maximum number of tasks for a worker thread to take when receiving from the input channel 104 | #[cfg(feature = "local-batch")] 105 | batch_limit: Usize, 106 | /// Maximum "weight" of tasks a worker thread may have active and pending 107 | #[cfg(feature = "local-batch")] 108 | weight_limit: U64, 109 | /// Maximum number of retries for a task 110 | #[cfg(feature = "retry")] 111 | max_retries: U8, 112 | /// Multiplier for the retry backoff strategy 113 | #[cfg(feature = "retry")] 114 | retry_factor: U64, 115 | } 116 | 117 | #[cfg(test)] 118 | pub(super) mod builder_test_utils { 119 | use super::*; 120 | 121 | pub fn check_builder(builder: &mut B) { 122 | let config = builder.config_ref(Token); 123 | assert_eq!(config.num_threads.get(), Some(4)); 124 | assert_eq!(config.thread_name.get(), Some("foo".into())); 125 | assert_eq!(config.thread_stack_size.get(), Some(100)); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/hive/inner/queue/mod.rs: -------------------------------------------------------------------------------- 1 | mod channel; 2 | #[cfg(feature = "retry")] 3 | mod retry; 4 | mod status; 5 | mod workstealing; 6 | 7 | pub use self::channel::ChannelTaskQueues; 8 | pub use self::workstealing::WorkstealingTaskQueues; 9 | 10 | #[cfg(feature = "retry")] 11 | use self::retry::RetryQueue; 12 | use self::status::Status; 13 | use super::{Config, Task, Token}; 14 | use crate::bee::Worker; 15 | 16 | /// Errors that may occur when trying to pop tasks from the global queue. 17 | #[derive(thiserror::Error, Debug)] 18 | pub enum PopTaskError { 19 | #[error("Global task queue is empty")] 20 | Empty, 21 | #[error("Global task queue is closed")] 22 | Closed, 23 | } 24 | 25 | /// Trait that encapsulates the global and local task queues used by a `Hive` for managing tasks 26 | /// within and between worker threads. 27 | /// 28 | /// This trait is sealed - it cannot be implemented outside of this crate. 29 | pub trait TaskQueues: Sized + Send + Sync + 'static { 30 | type WorkerQueues: WorkerQueues; 31 | 32 | /// Returns a new instance. 33 | /// 34 | /// The private `Token` is used to prevent this method from being called externally. 35 | fn new(token: Token) -> Self; 36 | 37 | /// Initializes the local queues for the given range of worker thread indices. 38 | fn init_for_threads(&self, start_index: usize, end_index: usize, config: &Config); 39 | 40 | /// Updates the queue settings from `config` for the given range of worker threads. 41 | fn update_for_threads(&self, start_index: usize, end_index: usize, config: &Config); 42 | 43 | /// Tries to add a task to the global queue. 44 | /// 45 | /// Returns an error with the task if the queue is disconnected. 46 | fn try_push_global(&self, task: Task) -> Result<(), Task>; 47 | 48 | /// Returns a `WorkerQueues` instance for the worker thread with the given `index`. 49 | fn worker_queues(&self, thread_index: usize) -> Self::WorkerQueues; 50 | 51 | /// Closes this `GlobalQueue` so no more tasks may be pushed. 52 | /// 53 | /// If `urgent` is `true`, this also prevents queued tasks from being popped. 54 | /// 55 | /// The private `Token` is used to prevent this method from being called externally. 56 | fn close(&self, urgent: bool, token: Token); 57 | 58 | /// Consumes this `TaskQueues` and Drains all tasks from all global and local queues and 59 | /// returns them as a `Vec`. 60 | /// 61 | /// This method panics if `close` has not been called. 62 | fn drain(self) -> Vec>; 63 | } 64 | 65 | /// Trait that provides access to the task queues to each worker thread. Implementations of this 66 | /// trait can hold thread-local types that are not Send/Sync. 67 | pub trait WorkerQueues { 68 | /// Attempts to add a task to the local queue if space is available, otherwise adds it to the 69 | /// global queue. If adding to the global queue fails, the task is added to a local "abandoned" 70 | /// queue from which it may be popped or will otherwise be converted to an `Unprocessed` 71 | /// outcome. 72 | fn push(&self, task: Task); 73 | 74 | /// Attempts to remove a task from the local queue for the given worker thread index. If there 75 | /// are no local queues, or if the local queues are empty, falls back to taking a task from the 76 | /// global queue. 77 | /// 78 | /// Returns an error if a task is not available, where each implementation may have a different 79 | /// definition of "available". 80 | /// 81 | /// Also returns an error if the queues are closed. 82 | fn try_pop(&self) -> Result, PopTaskError>; 83 | 84 | /// Attempts to add `task` to the local retry queue. 85 | /// 86 | /// Returns the earliest `Instant` at which it might be retried. If the task could not be added 87 | /// to the retry queue (e.g., if the queue is full), the task returned as an error. 88 | #[cfg(feature = "retry")] 89 | fn try_push_retry(&self, task: Task) -> Result>; 90 | 91 | /// Returns the unique index of the thread that owns this `WorkerQueues` instance. 92 | #[cfg(test)] 93 | fn thread_index(&self) -> usize; 94 | } 95 | -------------------------------------------------------------------------------- /src/hive/inner/queue/retry.rs: -------------------------------------------------------------------------------- 1 | use crate::atomic::{Atomic, AtomicU64}; 2 | use crate::bee::Worker; 3 | use crate::hive::Task; 4 | use std::cell::UnsafeCell; 5 | use std::cmp::Ordering; 6 | use std::collections::BinaryHeap; 7 | use std::time::{Duration, Instant}; 8 | 9 | /// A task queue where each task has an associated `Instant` at which it will be available. 10 | /// 11 | /// This is implemented internally as `UnsafeCell`. 12 | /// 13 | /// SAFETY: This data structure is designed to enable the queue to be modified (using `push` and 14 | /// `try_pop`) by a *single thread* using interior mutability. The `drain` method is called by a 15 | /// different thread, but it first takes ownership of the queue and so will never be called 16 | /// concurrently with `push/pop`. 17 | /// 18 | /// `UnsafeCell` is used for performance - this is safe so long as the queue is only accessed from 19 | /// a single thread at a time. This data structure is *not* thread-safe. 20 | #[derive(Debug)] 21 | pub struct RetryQueue { 22 | inner: UnsafeCell>>, 23 | delay_factor: AtomicU64, 24 | } 25 | 26 | impl RetryQueue { 27 | /// Creates a new `RetryQueue` with the given `delay_factor` (in nanoseconds). 28 | pub fn new(delay_factor: u64) -> Self { 29 | Self { 30 | inner: UnsafeCell::new(BinaryHeap::new()), 31 | delay_factor: AtomicU64::new(delay_factor), 32 | } 33 | } 34 | 35 | /// Changes the delay factor for the queue. 36 | pub fn set_delay_factor(&self, delay_factor: u64) { 37 | self.delay_factor.set(delay_factor); 38 | } 39 | 40 | /// Pushes an item onto the queue. Returns the `Instant` at which the task will be available, 41 | /// or an error with `task` if there was an error pushing it. 42 | /// 43 | /// SAFETY: this method is only ever called within a single thread. 44 | pub fn try_push(&self, task: Task) -> Result> { 45 | unsafe { 46 | match self.inner.get().as_mut() { 47 | Some(queue) => { 48 | // compute the delay 49 | let delay = 2u64 50 | .checked_pow(task.meta.attempt() as u32 - 1) 51 | .and_then(|multiplier| { 52 | self.delay_factor 53 | .get() 54 | .checked_mul(multiplier) 55 | .or(Some(u64::MAX)) 56 | .map(Duration::from_nanos) 57 | }) 58 | .unwrap_or_default(); 59 | let delayed = DelayedTask::new(task, delay); 60 | let until = delayed.until; 61 | queue.push(delayed); 62 | Ok(until) 63 | } 64 | None => Err(task), 65 | } 66 | } 67 | } 68 | 69 | /// Returns the task at the head of the queue, if one exists and is available (i.e., its delay 70 | /// has been exceeded), and removes it. 71 | /// 72 | /// SAFETY: this method is only ever called within a single thread. 73 | pub fn try_pop(&self) -> Option> { 74 | unsafe { 75 | let queue_ptr = self.inner.get(); 76 | if queue_ptr 77 | .as_ref() 78 | .and_then(|queue| queue.peek()) 79 | .map(|head| head.until <= Instant::now()) 80 | .unwrap_or(false) 81 | { 82 | queue_ptr 83 | .as_mut() 84 | .and_then(|queue| queue.pop()) 85 | .map(|delayed| delayed.value) 86 | } else { 87 | None 88 | } 89 | } 90 | } 91 | 92 | /// Consumes this `RetryQueue` and drains all tasks from the queue into `sink`. 93 | pub fn drain_into(self, sink: &mut Vec>) { 94 | let mut queue = self.inner.into_inner(); 95 | sink.reserve(queue.len()); 96 | sink.extend(queue.drain().map(|delayed| delayed.value)) 97 | } 98 | } 99 | 100 | unsafe impl Sync for RetryQueue {} 101 | 102 | /// Wrapper for a Task with an associated `Instant` at which it will be available. 103 | struct DelayedTask { 104 | value: Task, 105 | until: Instant, 106 | } 107 | 108 | impl DelayedTask { 109 | pub fn new(value: Task, delay: Duration) -> Self { 110 | Self { 111 | value, 112 | until: Instant::now() + delay, 113 | } 114 | } 115 | } 116 | 117 | /// Implements ordering for `Delayed`, so it can be used to correctly order elements in the 118 | /// `BinaryHeap` of the `RetryQueue`. 119 | /// 120 | /// Earlier entries have higher priority (should be popped first), so they are Greater that later 121 | /// entries. 122 | impl Ord for DelayedTask { 123 | fn cmp(&self, other: &DelayedTask) -> Ordering { 124 | other.until.cmp(&self.until) 125 | } 126 | } 127 | 128 | impl PartialOrd for DelayedTask { 129 | fn partial_cmp(&self, other: &DelayedTask) -> Option { 130 | Some(self.cmp(other)) 131 | } 132 | } 133 | 134 | impl PartialEq for DelayedTask { 135 | fn eq(&self, other: &DelayedTask) -> bool { 136 | self.cmp(other) == Ordering::Equal 137 | } 138 | } 139 | 140 | impl Eq for DelayedTask {} 141 | 142 | #[cfg(test)] 143 | #[cfg_attr(coverage_nightly, coverage(off))] 144 | mod tests { 145 | use super::{RetryQueue, Task, Worker}; 146 | use crate::bee::stock::EchoWorker; 147 | use crate::bee::{TaskId, TaskMeta}; 148 | use std::{thread, time::Duration}; 149 | 150 | type TestWorker = EchoWorker; 151 | const DELAY: u64 = Duration::from_secs(1).as_nanos() as u64; 152 | 153 | impl RetryQueue { 154 | fn len(&self) -> usize { 155 | unsafe { self.inner.get().as_ref().unwrap().len() } 156 | } 157 | } 158 | 159 | impl Task { 160 | /// Creates a new `Task` with the given `task_id`. 161 | fn with_attempt(task_id: TaskId, input: W::Input, attempt: u8) -> Self { 162 | Self { 163 | input, 164 | meta: TaskMeta::with_attempt(task_id, attempt), 165 | outcome_tx: None, 166 | } 167 | } 168 | } 169 | 170 | #[test] 171 | fn test_works() { 172 | let queue = RetryQueue::::new(DELAY); 173 | 174 | let task1 = Task::with_attempt(1, 1, 1); 175 | let task2 = Task::with_attempt(2, 2, 2); 176 | let task3 = Task::with_attempt(3, 3, 3); 177 | 178 | queue.try_push(task1.clone()).unwrap(); 179 | queue.try_push(task2.clone()).unwrap(); 180 | queue.try_push(task3.clone()).unwrap(); 181 | 182 | assert_eq!(queue.len(), 3); 183 | assert_eq!(queue.try_pop(), None); 184 | 185 | thread::sleep(Duration::from_secs(1)); 186 | assert_eq!(queue.try_pop(), Some(task1)); 187 | assert_eq!(queue.len(), 2); 188 | 189 | thread::sleep(Duration::from_secs(1)); 190 | assert_eq!(queue.try_pop(), Some(task2)); 191 | assert_eq!(queue.len(), 1); 192 | 193 | thread::sleep(Duration::from_secs(2)); 194 | assert_eq!(queue.try_pop(), Some(task3)); 195 | assert_eq!(queue.len(), 0); 196 | 197 | assert_eq!(queue.try_pop(), None); 198 | } 199 | 200 | #[test] 201 | fn test_into_vec() { 202 | let queue = RetryQueue::::new(DELAY); 203 | 204 | let task1 = Task::with_attempt(1, 1, 1); 205 | let task2 = Task::with_attempt(2, 2, 2); 206 | let task3 = Task::with_attempt(3, 3, 3); 207 | 208 | queue.try_push(task1.clone()).unwrap(); 209 | queue.try_push(task2.clone()).unwrap(); 210 | queue.try_push(task3.clone()).unwrap(); 211 | 212 | let mut v = Vec::new(); 213 | queue.drain_into(&mut v); 214 | v.sort(); 215 | 216 | assert_eq!(v, vec![task1, task2, task3]); 217 | } 218 | } 219 | -------------------------------------------------------------------------------- /src/hive/inner/queue/status.rs: -------------------------------------------------------------------------------- 1 | use crate::atomic::{Atomic, AtomicU8}; 2 | 3 | const OPEN: u8 = 0; 4 | const CLOSED_PUSH: u8 = 1; 5 | const CLOSED_POP: u8 = 2; 6 | 7 | /// Represents the status of a task queue. 8 | /// 9 | /// This is a simple state machine 10 | /// OPEN -> CLOSED_PUSH -> CLOSED_POP 11 | /// |________________________^ 12 | pub struct Status(AtomicU8); 13 | 14 | impl Status { 15 | /// Returns `true` if the queue status is `CLOSED_PUSH` or `CLOSED_POP`. 16 | pub fn is_closed(&self) -> bool { 17 | self.0.get() > OPEN 18 | } 19 | 20 | /// Returns `true` if the queue can accept new tasks. 21 | pub fn can_push(&self) -> bool { 22 | self.0.get() < CLOSED_PUSH 23 | } 24 | 25 | /// Returns `true` if the queue can remove tasks. 26 | pub fn can_pop(&self) -> bool { 27 | self.0.get() < CLOSED_POP 28 | } 29 | 30 | /// Sets the queue status to `CLOSED_PUSH` if `urgent` is `false`, or `CLOSED_POP` if `urgent` 31 | /// is `true`. 32 | pub fn set(&self, urgent: bool) { 33 | // TODO: this update should be done with `fetch_max` 34 | let new_status = if urgent { CLOSED_POP } else { CLOSED_PUSH }; 35 | if new_status > self.0.get() { 36 | self.0.set(new_status); 37 | } 38 | } 39 | } 40 | 41 | impl Default for Status { 42 | fn default() -> Self { 43 | Self(AtomicU8::new(OPEN)) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/hive/inner/task.rs: -------------------------------------------------------------------------------- 1 | use super::Task; 2 | use crate::bee::{TaskId, TaskMeta, Worker}; 3 | use crate::hive::{Outcome, OutcomeSender}; 4 | 5 | /// The type of input to a task for a given `Worker` type. This changes depending on the features 6 | /// that are enabled. 7 | pub use task_impl::TaskInput; 8 | 9 | impl Task { 10 | /// Returns the ID of the task. 11 | #[inline] 12 | pub fn id(&self) -> TaskId { 13 | self.meta.id() 14 | } 15 | 16 | /// Returns a reference to the task metadata. 17 | #[inline] 18 | pub fn meta(&self) -> &TaskMeta { 19 | &self.meta 20 | } 21 | 22 | /// Consumes this `Task` and returns its input, metadata, and outcome sender. 23 | pub fn into_parts(self) -> (W::Input, TaskMeta, Option>) { 24 | (self.input, self.meta, self.outcome_tx) 25 | } 26 | 27 | /// Consumes this `Task` and returns an `Outcome::Unprocessed` outcome with the input and ID, 28 | /// and the outcome sender. 29 | pub fn into_unprocessed(self) -> (Outcome, Option>) { 30 | let outcome = Outcome::Unprocessed { 31 | input: self.input, 32 | task_id: self.meta.id(), 33 | }; 34 | (outcome, self.outcome_tx) 35 | } 36 | 37 | /// Creates a new `Task` with the given metadata, and increments the attempt number. 38 | #[cfg(feature = "retry")] 39 | pub fn next_retry_attempt( 40 | input: W::Input, 41 | mut meta: TaskMeta, 42 | outcome_tx: Option>, 43 | ) -> Self { 44 | meta.inc_attempt(); 45 | Self { 46 | input, 47 | meta, 48 | outcome_tx, 49 | } 50 | } 51 | } 52 | 53 | #[cfg(not(feature = "local-batch"))] 54 | mod task_impl { 55 | use super::Task; 56 | use crate::bee::{TaskId, TaskMeta, Worker}; 57 | use crate::hive::OutcomeSender; 58 | 59 | pub type TaskInput = ::Input; 60 | 61 | impl Task { 62 | /// Creates a new `Task` with the given `task_id`. 63 | pub fn new( 64 | task_id: TaskId, 65 | input: TaskInput, 66 | outcome_tx: Option>, 67 | ) -> Self { 68 | Task { 69 | input, 70 | meta: TaskMeta::new(task_id), 71 | outcome_tx, 72 | } 73 | } 74 | } 75 | } 76 | 77 | #[cfg(feature = "local-batch")] 78 | mod task_impl { 79 | use super::Task; 80 | use crate::bee::{TaskId, TaskMeta, Worker}; 81 | use crate::hive::{Outcome, OutcomeSender, Weighted}; 82 | 83 | pub type TaskInput = Weighted<::Input>; 84 | 85 | impl Task { 86 | /// Creates a new `Task` with the given `task_id`. 87 | pub fn new( 88 | task_id: TaskId, 89 | input: TaskInput, 90 | outcome_tx: Option>, 91 | ) -> Self { 92 | let (input, weight) = input.into_parts(); 93 | Task { 94 | input, 95 | meta: TaskMeta::with_weight(task_id, weight), 96 | outcome_tx, 97 | } 98 | } 99 | 100 | /// Consumes this `Task` and returns a `Outcome::WeightLimitExceeded` outcome with the 101 | /// input, weight, and ID, and the outcome sender. 102 | pub fn into_overweight(self) -> (Outcome, Option>) { 103 | let outcome = Outcome::WeightLimitExceeded { 104 | input: self.input, 105 | weight: self.meta.weight(), 106 | task_id: self.meta.id(), 107 | }; 108 | (outcome, self.outcome_tx) 109 | } 110 | } 111 | } 112 | 113 | impl> Clone for Task { 114 | fn clone(&self) -> Self { 115 | Self { 116 | input: self.input.clone(), 117 | meta: self.meta.clone(), 118 | outcome_tx: self.outcome_tx.clone(), 119 | } 120 | } 121 | } 122 | 123 | impl PartialEq for Task { 124 | fn eq(&self, other: &Self) -> bool { 125 | self.meta.id() == other.meta.id() 126 | } 127 | } 128 | 129 | impl Eq for Task {} 130 | 131 | impl PartialOrd for Task { 132 | fn partial_cmp(&self, other: &Self) -> Option { 133 | Some(self.cmp(other)) 134 | } 135 | } 136 | 137 | impl Ord for Task { 138 | fn cmp(&self, other: &Self) -> std::cmp::Ordering { 139 | self.meta.id().cmp(&other.meta.id()) 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/hive/mock.rs: -------------------------------------------------------------------------------- 1 | //! Utilities for testing `Worker`s. 2 | use super::{Outcome, Task, TaskInput}; 3 | use crate::bee::{Context, LocalContext, TaskId, Worker}; 4 | use std::cell::RefCell; 5 | 6 | /// A struct used for testing `Worker`s in a mock environment without needing to create a `Hive`. 7 | #[derive(Debug)] 8 | pub struct MockTaskRunner { 9 | worker: RefCell, 10 | task_id: RefCell, 11 | } 12 | 13 | impl MockTaskRunner { 14 | /// Creates a new `MockTaskRunner` with a starting task ID of 0. 15 | pub fn new(worker: W, first_task_id: TaskId) -> Self { 16 | Self { 17 | worker: RefCell::new(worker), 18 | task_id: RefCell::new(first_task_id), 19 | } 20 | } 21 | 22 | /// Applies the given `worker` to the given `input`. 23 | /// 24 | /// The task ID is automatically incremented and used to create the `Context`. 25 | /// 26 | /// Returns the `Outcome` from executing the task. 27 | pub fn apply>>(&self, input: I) -> Outcome { 28 | let task_id = self.next_task_id(); 29 | let local = MockLocalContext(self); 30 | let task: Task = Task::new(task_id, input.into(), None); 31 | let (input, task_meta, _) = task.into_parts(); 32 | let ctx = Context::new(task_meta, Some(&local)); 33 | let result = self.worker.borrow_mut().apply(input, &ctx); 34 | let (task_meta, subtask_ids) = ctx.into_parts(); 35 | Outcome::from_worker_result(result, task_meta, subtask_ids) 36 | } 37 | 38 | fn next_task_id(&self) -> TaskId { 39 | let mut task_id_counter = self.task_id.borrow_mut(); 40 | let task_id = *task_id_counter; 41 | *task_id_counter += 1; 42 | task_id 43 | } 44 | } 45 | 46 | impl From for MockTaskRunner { 47 | fn from(value: W) -> Self { 48 | Self::new(value, 0) 49 | } 50 | } 51 | 52 | impl Default for MockTaskRunner { 53 | fn default() -> Self { 54 | Self::from(W::default()) 55 | } 56 | } 57 | 58 | #[derive(Debug)] 59 | struct MockLocalContext<'a, W: Worker>(&'a MockTaskRunner); 60 | 61 | impl LocalContext for MockLocalContext<'_, W> 62 | where 63 | W: Worker, 64 | I: Into>, 65 | { 66 | fn should_cancel_tasks(&self) -> bool { 67 | false 68 | } 69 | 70 | fn submit_task(&self, _: I) -> TaskId { 71 | self.0.next_task_id() 72 | } 73 | 74 | #[cfg(test)] 75 | fn thread_index(&self) -> usize { 76 | 0 77 | } 78 | } 79 | 80 | #[cfg(test)] 81 | #[cfg_attr(coverage_nightly, coverage(off))] 82 | mod tests { 83 | use std::vec; 84 | 85 | use super::MockTaskRunner; 86 | use crate::bee::{Context, Worker, WorkerResult}; 87 | use crate::hive::Outcome; 88 | 89 | #[derive(Debug, Default)] 90 | struct TestWorker; 91 | 92 | impl Worker for TestWorker { 93 | type Input = usize; 94 | type Output = usize; 95 | type Error = (); 96 | 97 | fn apply(&mut self, input: Self::Input, ctx: &Context) -> WorkerResult { 98 | if !ctx.is_cancelled() { 99 | for i in 1..=3 { 100 | ctx.submit(input + i).unwrap(); 101 | } 102 | } 103 | Ok(input) 104 | } 105 | } 106 | 107 | #[test] 108 | fn test_works() { 109 | let runner = MockTaskRunner::::default(); 110 | let outcome = runner.apply(42usize); 111 | assert!(matches!( 112 | outcome, 113 | Outcome::SuccessWithSubtasks { 114 | value: 42, 115 | task_id: 0, 116 | .. 117 | } 118 | )); 119 | assert_eq!(outcome.subtask_ids(), Some(&vec![1, 2, 3])) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /src/hive/outcome/batch.rs: -------------------------------------------------------------------------------- 1 | use super::{DerefOutcomes, Outcome, OwnedOutcomes}; 2 | use crate::bee::{TaskId, Worker}; 3 | use derive_more::Debug; 4 | use std::any; 5 | use std::collections::HashMap; 6 | use std::ops::{Deref, DerefMut}; 7 | 8 | /// A batch of `Outcome`s. 9 | #[derive(Debug)] 10 | #[debug("OutcomeBatch<{}>", any::type_name::())] 11 | pub struct OutcomeBatch(HashMap>); 12 | 13 | impl OutcomeBatch { 14 | pub(crate) fn new(outcomes: HashMap>) -> Self { 15 | Self(outcomes) 16 | } 17 | } 18 | 19 | impl>> From for OutcomeBatch { 20 | fn from(value: I) -> Self { 21 | OutcomeBatch::new( 22 | value 23 | .into_iter() 24 | .map(|outcome| (*outcome.task_id(), outcome)) 25 | .collect(), 26 | ) 27 | } 28 | } 29 | 30 | impl OwnedOutcomes for OutcomeBatch { 31 | #[inline] 32 | fn outcomes(self) -> HashMap> { 33 | self.0 34 | } 35 | 36 | #[inline] 37 | fn outcomes_ref(&self) -> &HashMap> { 38 | &self.0 39 | } 40 | } 41 | 42 | impl DerefOutcomes for OutcomeBatch { 43 | #[inline] 44 | fn outcomes_deref(&self) -> impl Deref>> { 45 | &self.0 46 | } 47 | 48 | #[inline] 49 | fn outcomes_deref_mut(&mut self) -> impl DerefMut>> { 50 | &mut self.0 51 | } 52 | } 53 | 54 | /// Functions only used in testing. 55 | #[cfg(test)] 56 | #[cfg_attr(coverage_nightly, coverage(off))] 57 | impl OutcomeBatch { 58 | pub(crate) fn empty() -> Self { 59 | OutcomeBatch::new(HashMap::new()) 60 | } 61 | 62 | pub(crate) fn insert(&mut self, outcome: Outcome) { 63 | self.0.insert(*outcome.task_id(), outcome); 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /src/hive/outcome/mod.rs: -------------------------------------------------------------------------------- 1 | mod batch; 2 | mod r#impl; 3 | mod iter; 4 | mod queue; 5 | mod store; 6 | 7 | pub use self::batch::OutcomeBatch; 8 | pub use self::iter::OutcomeIteratorExt; 9 | pub use self::queue::OutcomeQueue; 10 | pub use self::store::OutcomeStore; 11 | 12 | pub(super) use self::store::{DerefOutcomes, OwnedOutcomes}; 13 | 14 | use crate::bee::{TaskId, Worker}; 15 | use crate::panic::Panic; 16 | use derive_more::Debug; 17 | 18 | /// The possible outcomes of a task execution. 19 | /// 20 | /// Each outcome includes the task ID of the task that produced it. Tasks that submitted 21 | /// subtasks (via [`crate::bee::Context::submit`]) produce `Outcome` variants that have 22 | /// `subtask_ids`. 23 | /// 24 | /// Note that `Outcome`s can only be compared or ordered with other `Outcome`s produced by the same 25 | /// `Hive`, because comparison/ordering is completely based on the task ID. 26 | #[derive(Debug)] 27 | pub enum Outcome { 28 | /// The task was executed successfully. 29 | Success { 30 | #[debug(skip)] 31 | value: W::Output, 32 | task_id: TaskId, 33 | }, 34 | /// The task was executed successfully, and it also submitted one or more subtask_ids to the 35 | /// `Hive`. 36 | SuccessWithSubtasks { 37 | #[debug(skip)] 38 | value: W::Output, 39 | task_id: TaskId, 40 | subtask_ids: Vec, 41 | }, 42 | /// The task failed with an error that was not retryable. The input value that caused the 43 | /// failure is provided if possible. 44 | Failure { 45 | #[debug(skip)] 46 | input: Option, 47 | error: W::Error, 48 | task_id: TaskId, 49 | }, 50 | /// The task failed with an error that was not retryable, but it submitted one or more subtask_ids 51 | /// before failing. The input value that caused the failure is provided if possible. 52 | FailureWithSubtasks { 53 | #[debug(skip)] 54 | input: Option, 55 | error: W::Error, 56 | task_id: TaskId, 57 | subtask_ids: Vec, 58 | }, 59 | /// The task was not executed before the Hive was dropped, or processing of the task was 60 | /// interrupted (e.g., by `suspend`ing the `Hive`). 61 | Unprocessed { 62 | #[debug(skip)] 63 | input: W::Input, 64 | task_id: TaskId, 65 | }, 66 | /// The task was not executed before the Hive was dropped, or processing of the task was 67 | /// interrupted (e.g., by `suspend`ing the `Hive`), but it first submitted one or more subtask_ids. 68 | UnprocessedWithSubtasks { 69 | #[debug(skip)] 70 | input: W::Input, 71 | task_id: TaskId, 72 | subtask_ids: Vec, 73 | }, 74 | /// The task with the given task_id was not found in the `Hive` or iterator from which it was 75 | /// being requested. 76 | Missing { task_id: TaskId }, 77 | /// The task panicked. The input value that caused the panic is provided if possible. 78 | Panic { 79 | #[debug(skip)] 80 | input: Option, 81 | payload: Panic, 82 | task_id: TaskId, 83 | }, 84 | /// The task panicked, but it submitted one or more subtask_ids before panicking. The input value 85 | /// that caused the panic is provided if possible. 86 | PanicWithSubtasks { 87 | #[debug(skip)] 88 | input: Option, 89 | payload: Panic, 90 | task_id: TaskId, 91 | subtask_ids: Vec, 92 | }, 93 | /// The task's weight was larger than the configured limit for the `Hive`. 94 | #[cfg(feature = "local-batch")] 95 | WeightLimitExceeded { 96 | #[debug(skip)] 97 | input: W::Input, 98 | weight: u32, 99 | task_id: TaskId, 100 | }, 101 | /// The task failed after retrying the maximum number of times. 102 | #[cfg(feature = "retry")] 103 | MaxRetriesAttempted { 104 | #[debug(skip)] 105 | input: W::Input, 106 | error: W::Error, 107 | task_id: TaskId, 108 | }, 109 | } 110 | -------------------------------------------------------------------------------- /src/hive/outcome/queue.rs: -------------------------------------------------------------------------------- 1 | use super::{DerefOutcomes, Outcome}; 2 | use crate::bee::{TaskId, Worker}; 3 | use crossbeam_queue::SegQueue; 4 | use parking_lot::Mutex; 5 | use std::collections::HashMap; 6 | use std::ops::{Deref, DerefMut}; 7 | 8 | /// Data structure that supports queuing `Outcomes` from multiple threads (without locking) and 9 | /// fetching from a single thread (which requires draining the queue into a map that is behind a 10 | /// mutex). 11 | /// 12 | /// TODO: test vs using a 13 | /// [`SkipMap`](https://docs.rs/crossbeam-skiplist/latest/crossbeam_skiplist/struct.SkipMap.html) or 14 | /// [`DashMap`](https://docs.rs/dashmap/latest/dashmap/struct.DashMap.html) 15 | pub struct OutcomeQueue { 16 | queue: SegQueue>, 17 | outcomes: Mutex>>, 18 | } 19 | 20 | impl OutcomeQueue { 21 | /// Adds an `outcome` to the queue. 22 | pub fn push(&self, outcome: Outcome) { 23 | self.queue.push(outcome); 24 | } 25 | 26 | /// Flushes the queue into the map of outcomes and returns a mutable reference to the map. 27 | pub fn get_mut(&self) -> impl DerefMut>> { 28 | let mut outcomes = self.outcomes.lock(); 29 | drain_into(&self.queue, &mut outcomes); 30 | outcomes 31 | } 32 | 33 | /// Consumes this `OutcomeQueue`, drains the queue, and returns the outcomes as a map. 34 | pub fn into_inner(self) -> HashMap> { 35 | let mut outcomes = self.outcomes.into_inner(); 36 | drain_into(&self.queue, &mut outcomes); 37 | outcomes 38 | } 39 | } 40 | 41 | #[inline] 42 | fn drain_into(queue: &SegQueue>, outcomes: &mut HashMap>) { 43 | while let Some(outcome) = queue.pop() { 44 | outcomes.insert(*outcome.task_id(), outcome); 45 | } 46 | } 47 | 48 | impl Default for OutcomeQueue { 49 | fn default() -> Self { 50 | Self { 51 | queue: Default::default(), 52 | outcomes: Default::default(), 53 | } 54 | } 55 | } 56 | 57 | impl DerefOutcomes for OutcomeQueue { 58 | fn outcomes_deref(&self) -> impl Deref>> { 59 | self.get_mut() 60 | } 61 | 62 | fn outcomes_deref_mut(&mut self) -> impl DerefMut>> { 63 | self.get_mut() 64 | } 65 | } 66 | 67 | #[cfg(test)] 68 | #[cfg_attr(coverage_nightly, coverage(off))] 69 | mod tests { 70 | use super::*; 71 | use crate::bee::stock::EchoWorker; 72 | use crate::hive::OutcomeStore; 73 | 74 | #[test] 75 | fn test_works() { 76 | let queue = OutcomeQueue::>::default(); 77 | queue.push(Outcome::Success { 78 | value: 42, 79 | task_id: 1, 80 | }); 81 | queue.push(Outcome::Unprocessed { 82 | input: 43, 83 | task_id: 2, 84 | }); 85 | queue.push(Outcome::Failure { 86 | input: Some(44), 87 | error: (), 88 | task_id: 3, 89 | }); 90 | assert_eq!(queue.count(), (1, 1, 1)); 91 | queue.push(Outcome::Missing { task_id: 4 }); 92 | let outcomes = queue.into_inner(); 93 | assert_eq!(outcomes.len(), 4); 94 | assert_eq!( 95 | outcomes[&1], 96 | Outcome::Success { 97 | value: 42, 98 | task_id: 1 99 | } 100 | ); 101 | assert_eq!( 102 | outcomes[&2], 103 | Outcome::Unprocessed { 104 | input: 43, 105 | task_id: 2 106 | } 107 | ); 108 | assert_eq!( 109 | outcomes[&3], 110 | Outcome::Failure { 111 | input: Some(44), 112 | error: (), 113 | task_id: 3 114 | } 115 | ); 116 | assert_eq!(outcomes[&4], Outcome::Missing { task_id: 4 }) 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /src/hive/sentinel.rs: -------------------------------------------------------------------------------- 1 | use super::{Shared, TaskQueues}; 2 | use crate::bee::{Queen, Worker}; 3 | use std::io::Error as SpawnError; 4 | use std::sync::Arc; 5 | use std::thread::{self, JoinHandle}; 6 | 7 | /// Sentinel for a worker thread. Until the sentinel is cancelled, it will respawn the worker 8 | /// thread if it panics. 9 | pub struct Sentinel 10 | where 11 | W: Worker, 12 | Q: Queen, 13 | T: TaskQueues, 14 | F: Fn(usize, &Arc>) -> Result, SpawnError> + 'static, 15 | { 16 | /// The index of the worker thread 17 | thread_index: usize, 18 | /// The shared data to pass to the new worker thread when respawning 19 | shared: Arc>, 20 | /// Whether sentinel is active 21 | active: bool, 22 | /// The function that will be called to respawn the worker thread 23 | respawn_fn: F, 24 | } 25 | 26 | impl Sentinel 27 | where 28 | W: Worker, 29 | Q: Queen, 30 | T: TaskQueues, 31 | F: Fn(usize, &Arc>) -> Result, SpawnError> + 'static, 32 | { 33 | pub fn new(thread_index: usize, shared: Arc>, respawn_fn: F) -> Self { 34 | Self { 35 | thread_index, 36 | shared, 37 | active: true, 38 | respawn_fn, 39 | } 40 | } 41 | 42 | /// Cancel and destroy this sentinel. 43 | pub fn cancel(mut self) { 44 | self.active = false; 45 | } 46 | } 47 | 48 | impl Drop for Sentinel 49 | where 50 | W: Worker, 51 | Q: Queen, 52 | T: TaskQueues, 53 | F: Fn(usize, &Arc>) -> Result, SpawnError> + 'static, 54 | { 55 | fn drop(&mut self) { 56 | if self.active { 57 | // if the sentinel is active, that means the thread panicked during task execution, so 58 | // we have to finish the task here before respawning 59 | self.shared.finish_task(thread::panicking()); 60 | // only respawn if the sentinel is active and the hive has not been poisoned 61 | if !self.shared.is_poisoned() { 62 | // can't do anything with the previous JoinHandle 63 | let _ = self 64 | .shared 65 | .respawn_thread(self.thread_index, |thread_index| { 66 | (self.respawn_fn)(thread_index, &self.shared) 67 | }); 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /src/hive/util.rs: -------------------------------------------------------------------------------- 1 | //! Internal utilities for the `hive` module. 2 | use crossbeam_utils::Backoff; 3 | use std::sync::Arc; 4 | use std::time::{Duration, Instant}; 5 | 6 | const MAX_WAIT: Duration = Duration::from_secs(10); 7 | 8 | /// Utility function to loop (with exponential backoff) waiting for other references to `arc` to 9 | /// drop so it can be unwrapped into its inner value. 10 | /// 11 | /// If `arc` cannot be unwrapped within a certain amount of time (with an exponentially increasing 12 | /// gap between each iteration), `arc` is returned as an error. 13 | pub fn unwrap_arc(mut arc: Arc) -> Result> { 14 | // wait for worker threads to drop, then take ownership of the shared data and convert it 15 | // into a Husk 16 | let mut backoff = None::; 17 | let mut start = None::; 18 | loop { 19 | arc = match std::sync::Arc::try_unwrap(arc) { 20 | Ok(inner) => { 21 | return Ok(inner); 22 | } 23 | Err(arc) if start.is_none() => { 24 | let _ = start.insert(Instant::now()); 25 | arc 26 | } 27 | Err(arc) if Instant::now() - start.unwrap() > MAX_WAIT => return Err(arc), 28 | Err(arc) => { 29 | backoff.get_or_insert_with(Backoff::new).spin(); 30 | arc 31 | } 32 | }; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /src/hive/weighted.rs: -------------------------------------------------------------------------------- 1 | //! Weighted value used for task submission with the `local-batch` feature. 2 | use num::ToPrimitive; 3 | use std::ops::Deref; 4 | 5 | /// Wraps a value of type `T` and an associated weight. 6 | #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] 7 | pub struct Weighted { 8 | value: T, 9 | weight: u32, 10 | } 11 | 12 | impl Weighted { 13 | /// Creates a new `Weighted` instance with the given value and weight. 14 | pub fn new(value: T, weight: P) -> Self { 15 | Self { 16 | value, 17 | weight: weight.to_u32().unwrap(), 18 | } 19 | } 20 | 21 | /// Creates a new `Weighted` instance with the given value and weight obtained from calling the 22 | /// given function on `value`. 23 | pub fn from_fn(value: T, f: F) -> Self 24 | where 25 | F: FnOnce(&T) -> u32, 26 | { 27 | let weight = f(&value); 28 | Self::new(value, weight) 29 | } 30 | 31 | /// Creates a new `Weighted` instance with the given value and weight obtained by converting 32 | /// the value into a `u32`. 33 | pub fn from_identity(value: T) -> Self 34 | where 35 | T: ToPrimitive + Clone, 36 | { 37 | let weight = value.clone().to_u32().unwrap(); 38 | Self::new(value, weight) 39 | } 40 | 41 | /// Returns the weight associated with this `Weighted` value. 42 | pub fn weight(&self) -> u32 { 43 | self.weight 44 | } 45 | 46 | /// Returns the value and weight as a tuple. 47 | pub fn into_parts(self) -> (T, u32) { 48 | (self.value, self.weight) 49 | } 50 | } 51 | 52 | impl Deref for Weighted { 53 | type Target = T; 54 | 55 | fn deref(&self) -> &Self::Target { 56 | &self.value 57 | } 58 | } 59 | 60 | impl From for Weighted { 61 | fn from(value: T) -> Self { 62 | Self::new(value, 0) 63 | } 64 | } 65 | 66 | impl From<(T, P)> for Weighted { 67 | fn from((value, weight): (T, P)) -> Self { 68 | Self::new(value, weight) 69 | } 70 | } 71 | 72 | /// Extends `IntoIterator` to add methods to convert any iterator into an iterator over `Weighted` 73 | /// items. 74 | pub trait WeightedIteratorExt: IntoIterator + Sized { 75 | /// Converts this iterator over (T, P) items into an iterator over `Weighted` items with 76 | /// weights set to `P::into_u32()`. 77 | fn into_weighted(self) -> impl Iterator> 78 | where 79 | P: ToPrimitive, 80 | Self: IntoIterator, 81 | { 82 | self.into_iter() 83 | .map(|(value, weight)| Weighted::new(value, weight)) 84 | } 85 | 86 | /// Converts this iterator into an iterator over `Weighted` with weights set to 0. 87 | fn into_default_weighted(self) -> impl Iterator> { 88 | self.into_iter().map(Into::into) 89 | } 90 | 91 | /// Converts this iterator into an iterator over `Weighted` with weights set to 92 | /// `weight`. 93 | fn into_const_weighted(self, weight: u32) -> impl Iterator> { 94 | self.into_iter() 95 | .map(move |item| Weighted::new(item, weight)) 96 | } 97 | 98 | /// Converts this iterator into an iterator over `Weighted` with weights set to 99 | /// `item.clone().into_u32()`. 100 | fn into_identity_weighted(self) -> impl Iterator> 101 | where 102 | Self::Item: ToPrimitive + Clone, 103 | { 104 | self.into_iter().map(Weighted::from_identity) 105 | } 106 | 107 | /// Zips this iterator with `weights` and converts each tuple into a `Weighted` 108 | /// with the weight set to the corresponding value from `weights`. 109 | fn into_weighted_zip(self, weights: W) -> impl Iterator> 110 | where 111 | P: ToPrimitive + Clone + Default, 112 | W: IntoIterator, 113 | W::IntoIter: 'static, 114 | { 115 | self.into_iter() 116 | .zip(weights.into_iter().chain(std::iter::repeat(P::default()))) 117 | .map(Into::into) 118 | } 119 | 120 | /// Converts this interator into an iterator over `Weighted` with weights set to 121 | /// the result of calling `f` on each item. 122 | fn into_weighted_with(self, f: F) -> impl Iterator> 123 | where 124 | F: Fn(&Self::Item) -> u32, 125 | { 126 | self.into_iter().map(move |item| { 127 | let weight = f(&item); 128 | Weighted::new(item, weight) 129 | }) 130 | } 131 | 132 | /// Converts this `ExactSizeIterator` over (T, P) items into an `ExactSizeIterator` over 133 | /// `Weighted` items with weights set to `P::into_u32()`. 134 | fn into_weighted_exact(self) -> impl ExactSizeIterator> 135 | where 136 | Self: IntoIterator, 137 | Self::IntoIter: ExactSizeIterator + 'static, 138 | { 139 | self.into_iter() 140 | .map(|(value, weight)| Weighted::new(value, weight)) 141 | } 142 | 143 | /// Converts this `ExactSizeIterator` into an `ExactSizeIterator` over `Weighted` 144 | /// with weights set to 0. 145 | fn into_default_weighted_exact(self) -> impl ExactSizeIterator> 146 | where 147 | Self::IntoIter: ExactSizeIterator + 'static, 148 | { 149 | self.into_iter().map(Into::into) 150 | } 151 | 152 | /// Converts this `ExactSizeIterator` into an `ExactSizeIterator` over `Weighted` 153 | /// with weights set to `weight`. 154 | fn into_const_weighted_exact( 155 | self, 156 | weight: u32, 157 | ) -> impl ExactSizeIterator> 158 | where 159 | Self::IntoIter: ExactSizeIterator + 'static, 160 | { 161 | self.into_iter() 162 | .map(move |item| Weighted::new(item, weight)) 163 | } 164 | 165 | /// Converts this `ExactSizeIterator` into an `ExactSizeIterator` over `Weighted` 166 | /// with weights set to `item.clone().into_u32()`. 167 | fn into_identity_weighted_exact(self) -> impl ExactSizeIterator> 168 | where 169 | Self::Item: ToPrimitive + Clone, 170 | Self::IntoIter: ExactSizeIterator + 'static, 171 | { 172 | self.into_iter().map(Weighted::from_identity) 173 | } 174 | 175 | /// Converts this `ExactSizeIterator` into an `ExactSizeIterator` over `Weighted` 176 | /// with weights set to the result of calling `f` on each item. 177 | fn into_weighted_exact_with( 178 | self, 179 | f: F, 180 | ) -> impl ExactSizeIterator> 181 | where 182 | Self::IntoIter: ExactSizeIterator + 'static, 183 | F: Fn(&Self::Item) -> u32, 184 | { 185 | self.into_iter().map(move |item| { 186 | let weight = f(&item); 187 | Weighted::new(item, weight) 188 | }) 189 | } 190 | } 191 | 192 | impl WeightedIteratorExt for T {} 193 | 194 | #[cfg(test)] 195 | #[cfg_attr(coverage_nightly, coverage(off))] 196 | mod tests { 197 | use super::*; 198 | 199 | #[test] 200 | fn test_new() { 201 | let weighted = Weighted::new(42, 10); 202 | assert_eq!(*weighted, 42); 203 | assert_eq!(weighted.weight(), 10); 204 | assert_eq!(weighted.into_parts(), (42, 10)); 205 | } 206 | 207 | #[test] 208 | fn test_from_fn() { 209 | let weighted = Weighted::from_fn(42, |x| x * 2); 210 | assert_eq!(*weighted, 42); 211 | assert_eq!(weighted.weight(), 84); 212 | } 213 | 214 | #[test] 215 | fn test_from_identity() { 216 | let weighted = Weighted::from_identity(42); 217 | assert_eq!(*weighted, 42); 218 | assert_eq!(weighted.weight(), 42); 219 | } 220 | 221 | #[test] 222 | fn test_from_unweighted() { 223 | let weighted = Weighted::from(42); 224 | assert_eq!(*weighted, 42); 225 | assert_eq!(weighted.weight(), 0); 226 | } 227 | 228 | #[test] 229 | fn test_from_tuple() { 230 | let weighted: Weighted = Weighted::from((42, 10)); 231 | assert_eq!(*weighted, 42); 232 | assert_eq!(weighted.weight(), 10); 233 | assert_eq!(weighted.into_parts(), (42, 10)); 234 | } 235 | 236 | #[test] 237 | fn test_into_weighted() { 238 | (0..10) 239 | .map(|i| (i, i)) 240 | .into_weighted() 241 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value)); 242 | } 243 | 244 | #[test] 245 | fn test_into_default_weighted() { 246 | (0..10) 247 | .into_default_weighted() 248 | .for_each(|weighted| assert_eq!(weighted.weight(), 0)); 249 | } 250 | 251 | #[test] 252 | fn test_into_identity_weighted() { 253 | (0..10) 254 | .into_identity_weighted() 255 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value)); 256 | } 257 | 258 | #[test] 259 | fn test_into_const_weighted() { 260 | (0..10) 261 | .into_const_weighted(5) 262 | .for_each(|weighted| assert_eq!(weighted.weight(), 5)); 263 | } 264 | 265 | #[test] 266 | fn test_into_weighted_zip() { 267 | (0..10) 268 | .into_weighted_zip(10..20) 269 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value + 10)); 270 | } 271 | 272 | #[test] 273 | fn test_into_weighted_with() { 274 | (0..10) 275 | .into_weighted_with(|i| i * 2) 276 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value * 2)); 277 | } 278 | 279 | #[test] 280 | fn test_into_weighted_exact() { 281 | (0..10) 282 | .map(|i| (i, i)) 283 | .into_weighted_exact() 284 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value)); 285 | } 286 | 287 | #[test] 288 | fn test_into_default_weighted_exact() { 289 | (0..10) 290 | .into_default_weighted_exact() 291 | .for_each(|weighted| assert_eq!(weighted.weight(), 0)); 292 | } 293 | 294 | #[test] 295 | fn test_into_identity_weighted_exact() { 296 | (0..10) 297 | .into_identity_weighted_exact() 298 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value)); 299 | } 300 | 301 | #[test] 302 | fn test_into_const_weighted_exact() { 303 | (0..10) 304 | .into_const_weighted_exact(5) 305 | .for_each(|weighted| assert_eq!(weighted.weight(), 5)); 306 | } 307 | 308 | #[test] 309 | fn test_into_weighted_exact_with() { 310 | (0..10) 311 | .into_weighted_exact_with(|i| i * 2) 312 | .for_each(|weighted| assert_eq!(weighted.weight(), weighted.value * 2)); 313 | } 314 | } 315 | -------------------------------------------------------------------------------- /src/panic.rs: -------------------------------------------------------------------------------- 1 | //! Data type that wraps a `panic` payload. 2 | use super::boxed::BoxedFnOnce; 3 | use derive_more::Debug; 4 | use std::any::Any; 5 | use std::fmt; 6 | use std::panic::AssertUnwindSafe; 7 | 8 | pub type PanicPayload = Box; 9 | 10 | /// Wraps a payload from a caught `panic` with an optional `detail`. 11 | #[derive(Debug)] 12 | pub struct Panic { 13 | #[debug("")] 14 | payload: PanicPayload, 15 | detail: Option, 16 | } 17 | 18 | impl Panic { 19 | /// Attempts to call the provided function `f` and catches any panic. Returns either the return 20 | /// value of the function or a `Panic` created from the panic payload and the provided `detail`. 21 | pub fn try_call O>(detail: Option, f: F) -> Result { 22 | std::panic::catch_unwind(AssertUnwindSafe(f)).map_err(|payload| Self { payload, detail }) 23 | } 24 | 25 | pub(crate) fn try_call_boxed + ?Sized>( 26 | detail: Option, 27 | f: Box, 28 | ) -> Result { 29 | std::panic::catch_unwind(AssertUnwindSafe(|| f.call_box())) 30 | .map_err(|payload| Self { payload, detail }) 31 | } 32 | 33 | /// Returns the payload of the panic. 34 | pub fn payload(&self) -> &PanicPayload { 35 | &self.payload 36 | } 37 | 38 | /// Returns the optional detail of the panic. 39 | pub fn detail(&self) -> Option<&T> { 40 | self.detail.as_ref() 41 | } 42 | 43 | /// Consumes this `Panic` and resumes unwinding the thread. 44 | pub fn resume(self) -> ! { 45 | std::panic::resume_unwind(self.payload) 46 | } 47 | } 48 | 49 | impl PartialEq for Panic { 50 | fn eq(&self, other: &Self) -> bool { 51 | (*self.payload).type_id() == (*other.payload).type_id() && self.detail == other.detail 52 | } 53 | } 54 | 55 | impl Eq for Panic {} 56 | 57 | #[cfg(test)] 58 | #[cfg_attr(coverage_nightly, coverage(off))] 59 | mod tests { 60 | use super::Panic; 61 | use std::fmt::Debug; 62 | 63 | impl Panic { 64 | /// Panics with `msg` and immediately catches it to create a new `Panic` instance for testing. 65 | pub fn new(msg: &str, detail: Option) -> Self { 66 | let payload = std::panic::catch_unwind(|| panic!("{}", msg)) 67 | .err() 68 | .unwrap(); 69 | Self { payload, detail } 70 | } 71 | } 72 | 73 | #[test] 74 | fn test_catch_panic() { 75 | let result = Panic::try_call("test".into(), || panic!("panic!")); 76 | let panic = result.unwrap_err(); 77 | assert_eq!(*panic.detail().unwrap(), "test"); 78 | } 79 | 80 | #[test] 81 | #[should_panic] 82 | fn test_resume_panic() { 83 | let result = Panic::try_call("test".into(), || panic!("panic!")); 84 | let panic = result.unwrap_err(); 85 | panic.resume(); 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/util.rs: -------------------------------------------------------------------------------- 1 | //! Utility functions for simple use cases. 2 | //! 3 | //! In all cases, the number of threads is specified as a parameter, and the function takes care of 4 | //! creating the [`Hive`](crate::hive::Hive) (with channel-based task queues), submitting tasks, 5 | //! collecting results, and shutting down the `Hive` properly. 6 | use crate::bee::stock::{Caller, OnceCaller}; 7 | use crate::hive::{Builder, ChannelBuilder, Outcome, OutcomeBatch, TaskQueuesBuilder}; 8 | use std::fmt::Debug; 9 | 10 | /// Convenience function that creates a `Hive` with `num_threads` worker threads that execute the 11 | /// provided callable on the provided inputs and returns a `Vec` of the results. 12 | /// 13 | /// The provided function should not panic. For falible operations, use one of the `try_map_*` 14 | /// functions. 15 | /// 16 | /// # Examples 17 | /// 18 | /// ``` 19 | /// # fn main() { 20 | /// let outputs: Vec = beekeeper::util::map(4, 2..9usize, |i| i + 1); 21 | /// assert_eq!(outputs.into_iter().sum::(), 42); 22 | /// # } 23 | /// ``` 24 | pub fn map(num_threads: usize, inputs: Inputs, f: F) -> Vec 25 | where 26 | I: Send + Sync + 'static, 27 | O: Send + Sync + 'static, 28 | Inputs: IntoIterator, 29 | F: FnMut(I) -> O + Send + Sync + Clone + 'static, 30 | { 31 | ChannelBuilder::default() 32 | .num_threads(num_threads) 33 | .with_worker(Caller::from(f)) 34 | .build() 35 | .map(inputs) 36 | .map(Outcome::unwrap) 37 | .collect() 38 | } 39 | 40 | /// Convenience function that creates a `Hive` with `num_threads` worker threads that execute the 41 | /// provided callable on the provided inputs and returns a `Vec` of the results, or an error if any 42 | /// of the tasks failed. 43 | /// 44 | /// # Examples 45 | /// 46 | /// ``` 47 | /// # use beekeeper::hive::OutcomeStore; 48 | /// # fn main() { 49 | /// let result = beekeeper::util::try_map(4, 0..10, |i| { 50 | /// if i == 5 { 51 | /// Err("No fives allowed!") 52 | /// } else { 53 | /// Ok(i * i) 54 | /// } 55 | /// }); 56 | /// assert!(result.has_failures()); 57 | /// # } 58 | /// ``` 59 | pub fn try_map( 60 | num_threads: usize, 61 | inputs: Inputs, 62 | f: F, 63 | ) -> OutcomeBatch> 64 | where 65 | I: Send + Sync + 'static, 66 | O: Send + Sync + 'static, 67 | E: Send + Sync + Debug + 'static, 68 | Inputs: IntoIterator, 69 | F: FnMut(I) -> Result + Send + Sync + Clone + 'static, 70 | { 71 | ChannelBuilder::default() 72 | .num_threads(num_threads) 73 | .with_worker(OnceCaller::from(f)) 74 | .build() 75 | .map(inputs) 76 | .into() 77 | } 78 | 79 | #[cfg(test)] 80 | #[cfg_attr(coverage_nightly, coverage(off))] 81 | mod tests { 82 | use crate::hive::{Outcome, OutcomeStore}; 83 | 84 | #[test] 85 | fn test_map() { 86 | let outputs = super::map(4, 0..100, |i| i + 1); 87 | assert_eq!(outputs, (1..=100).collect::>()); 88 | } 89 | 90 | #[test] 91 | fn test_try_map() { 92 | let result = super::try_map( 93 | 4, 94 | 0..100, 95 | |i| { 96 | if i == 50 { Err("Fiddy!") } else { Ok(i + 1) } 97 | }, 98 | ); 99 | assert!(result.has_failures()); 100 | assert_eq!(1, result.num_failures()); 101 | assert!(matches!( 102 | result.iter_failures().next().unwrap(), 103 | Outcome::Failure { .. } 104 | )); 105 | assert_eq!(99, result.num_successes()); 106 | assert!(result.ok_or_unwrap_errors(true).is_err()); 107 | } 108 | } 109 | 110 | #[cfg(feature = "retry")] 111 | pub use retry::try_map_retryable; 112 | 113 | #[cfg(feature = "retry")] 114 | mod retry { 115 | use crate::bee::stock::RetryCaller; 116 | use crate::bee::{ApplyError, Context}; 117 | use crate::hive::{Builder, ChannelBuilder, OutcomeBatch, TaskQueuesBuilder}; 118 | use std::fmt::Debug; 119 | 120 | /// Convenience function that creates a `Hive` with `num_threads` worker threads that execute the 121 | /// provided callable on the provided inputs and returns a `Vec` of the results, or an error if any 122 | /// of the tasks failed. 123 | /// 124 | /// A task that fails with an `ApplyError::Retryable` error will be retried up to `max_retries` 125 | /// times. 126 | /// 127 | /// # Examples 128 | /// 129 | /// ``` 130 | /// use beekeeper::bee::ApplyError; 131 | /// use beekeeper::bee::stock::RetryCaller; 132 | /// use beekeeper::hive::OutcomeStore; 133 | /// 134 | /// # fn main() { 135 | /// let result = beekeeper::util::try_map_retryable::(4, 3, 0..10, |i, _| if i == 5 { 136 | /// Err(ApplyError::Fatal { input: Some(i), error: "No fives allowed!".into() }) 137 | /// } else if i == 7 { 138 | /// Err(ApplyError::Retryable { input: i, error: "Re-roll a 7".into() }) 139 | /// } else { 140 | /// Ok(i * i) 141 | /// }); 142 | /// assert!(result.has_failures()); 143 | /// # } 144 | /// ``` 145 | pub fn try_map_retryable( 146 | num_threads: usize, 147 | max_retries: u8, 148 | inputs: Inputs, 149 | f: F, 150 | ) -> OutcomeBatch> 151 | where 152 | I: Send + Sync + 'static, 153 | O: Send + Sync + 'static, 154 | E: Send + Sync + Debug + 'static, 155 | Inputs: IntoIterator, 156 | F: FnMut(I, &Context) -> Result> + Send + Sync + Clone + 'static, 157 | { 158 | ChannelBuilder::default() 159 | .num_threads(num_threads) 160 | .max_retries(max_retries) 161 | .with_worker(RetryCaller::from(f)) 162 | .build() 163 | .map(inputs) 164 | .into() 165 | } 166 | 167 | #[cfg(test)] 168 | #[cfg_attr(coverage_nightly, coverage(off))] 169 | mod tests { 170 | use crate::bee::ApplyError; 171 | use crate::hive::{Outcome, OutcomeStore}; 172 | 173 | #[test] 174 | fn test_try_map_retryable() { 175 | let result = super::try_map_retryable(4, 3, 0..100, |i, ctx| { 176 | if i != 50 { 177 | Ok(i + 1) 178 | } else if ctx.attempt() == 3 { 179 | Ok(500) 180 | } else { 181 | Err(ApplyError::Retryable { 182 | input: 50, 183 | error: format!("Fiddy {}", ctx.attempt()), 184 | }) 185 | } 186 | }); 187 | assert!(!result.has_failures()); 188 | } 189 | 190 | #[test] 191 | fn test_try_map_retyrable_fail() { 192 | let result = super::try_map_retryable(4, 3, 0..100, |i, ctx| { 193 | if i != 50 { 194 | Ok(i + 1) 195 | } else { 196 | Err(ApplyError::Retryable { 197 | input: 50, 198 | error: format!("Fiddy {}", ctx.attempt()), 199 | }) 200 | } 201 | }); 202 | assert!(result.has_failures()); 203 | assert!(result.num_failures() == 1); 204 | assert!(matches!( 205 | result.iter_failures().next().unwrap(), 206 | Outcome::MaxRetriesAttempted { .. } 207 | )) 208 | } 209 | } 210 | } 211 | --------------------------------------------------------------------------------