├── benchmarks ├── src │ └── lib.rs ├── Cargo.toml └── benches │ └── pool.rs ├── .gitignore ├── .github ├── dependabot.yml ├── workflows │ ├── release-management.yaml │ ├── release.yml │ ├── sponsors.yml │ └── ci.yml └── release-drafter.yml ├── .editorconfig ├── rustfmt.toml ├── src ├── wakers.rs ├── lib.rs ├── error.rs ├── common.rs ├── worker.rs ├── task.rs └── pool.rs ├── Cargo.toml ├── LICENSE ├── README.md └── tests └── tests.rs /benchmarks/src/lib.rs: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "11:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | [*] 2 | indent_style = space 3 | indent_size = 4 4 | charset = utf-8 5 | trim_trailing_whitespace = true 6 | insert_final_newline = true 7 | 8 | [*.{yaml,yml}] 9 | indent_size = 2 10 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | edition = "2018" 2 | imports_layout = "HorizontalVertical" 3 | merge_imports = true 4 | overflow_delimited_expr = true 5 | struct_lit_single_line = false 6 | use_field_init_shorthand = true 7 | version = "Two" 8 | -------------------------------------------------------------------------------- /.github/workflows/release-management.yaml: -------------------------------------------------------------------------------- 1 | name: release management 2 | on: 3 | push: 4 | branches: [master] 5 | 6 | jobs: 7 | update-draft-release: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: toolmantim/release-drafter@v5 11 | env: 12 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 13 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | on: 3 | release: 4 | types: [published] 5 | 6 | jobs: 7 | publish: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | 12 | - name: Publish to crates.io 13 | run: cargo publish --token "${CARGO_TOKEN}" --no-verify 14 | env: 15 | CARGO_TOKEN: ${{ secrets.CARGO_TOKEN }} 16 | -------------------------------------------------------------------------------- /benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "threadfin-benchmarks" 3 | version = "0.0.0" 4 | authors = ["Stephen M. Coakley "] 5 | license = "MIT" 6 | edition = "2018" 7 | 8 | [dependencies.threadfin] 9 | path = ".." 10 | 11 | [dev-dependencies] 12 | criterion = "0.3" 13 | num_cpus = "1" 14 | rusty_pool = "0.7" 15 | threadpool = "1" 16 | 17 | [[bench]] 18 | name = "pool" 19 | harness = false 20 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | categories: 2 | - title: "Security" 3 | label: security 4 | - title: "Added" 5 | labels: 6 | - feature 7 | - enhancement 8 | - title: "Fixed" 9 | label: bug 10 | - title: "Dependency Updates" 11 | label: dependencies 12 | change-template: '- $TITLE (#$NUMBER) @$AUTHOR' 13 | no-changes-template: '- No changes' 14 | template: | 15 | ## Changed 16 | 17 | $CHANGES 18 | -------------------------------------------------------------------------------- /src/wakers.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | task::Waker, 3 | thread::{self, Thread}, 4 | }; 5 | 6 | use once_cell::sync::Lazy; 7 | 8 | /// Creates a dummy waker that does nothing. 9 | pub(crate) fn empty_waker() -> Waker { 10 | static WAKER: Lazy = Lazy::new(|| waker_fn::waker_fn(move || {})); 11 | 12 | WAKER.clone() 13 | } 14 | 15 | /// Creates a waker that unparks the current thread. 16 | pub(crate) fn current_thread_waker() -> Waker { 17 | thread_waker(thread::current()) 18 | } 19 | 20 | /// Creates a waker that unparks a thread. 21 | pub(crate) fn thread_waker(thread: Thread) -> Waker { 22 | waker_fn::waker_fn(move || thread.unpark()) 23 | } 24 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "threadfin" 3 | version = "0.1.2" 4 | description = "A thread pool for running multiple tasks on a configurable group of threads." 5 | authors = ["Stephen M. Coakley "] 6 | license = "MIT" 7 | keywords = ["threadpool", "thread", "pool", "parallel", "async"] 8 | categories = ["concurrency"] 9 | repository = "https://github.com/sagebind/threadfin" 10 | documentation = "https://docs.rs/threadfin/" 11 | readme = "README.md" 12 | edition = "2018" 13 | 14 | [dependencies] 15 | crossbeam-channel = "0.5" 16 | num_cpus = "1" 17 | once_cell = ">=1.0, <=1.14" 18 | waker-fn = "1" 19 | 20 | [dev-dependencies] 21 | futures-timer = "3" 22 | 23 | [workspace] 24 | members = ["benchmarks"] 25 | -------------------------------------------------------------------------------- /.github/workflows/sponsors.yml: -------------------------------------------------------------------------------- 1 | name: Update Sponsors README 2 | on: 3 | schedule: 4 | - cron: '42 3 */2 * *' 5 | 6 | jobs: 7 | update: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | 12 | - uses: JamesIves/github-sponsors-readme-action@1.0.5 13 | with: 14 | token: ${{ secrets.SPONSORS_PAT }} 15 | minimum: 1000 16 | file: 'README.md' 17 | template: >- 18 | {{{ login }}} 19 | 20 | - uses: stefanzweifel/git-auto-commit-action@v4 21 | with: 22 | commit_message: Update sponsors list in README 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Stephen M. Coakley 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | on: 3 | push: 4 | branches: [master] 5 | pull_request: 6 | 7 | jobs: 8 | test: 9 | strategy: 10 | matrix: 11 | include: 12 | - runner: ubuntu-latest 13 | target: x86_64-unknown-linux-gnu 14 | - runner: macos-11 15 | target: x86_64-apple-darwin 16 | - runner: windows-latest 17 | target: x86_64-pc-windows-msvc 18 | runs-on: ${{ matrix.runner }} 19 | timeout-minutes: 10 20 | env: 21 | RUST_BACKTRACE: 1 22 | steps: 23 | - uses: actions/checkout@v2 24 | 25 | - uses: actions-rs/toolchain@v1 26 | with: 27 | profile: minimal 28 | toolchain: "1.46.0" 29 | target: ${{ matrix.target }} 30 | default: true 31 | 32 | - run: cargo test --target ${{ matrix.target }} 33 | 34 | cross-compile: 35 | strategy: 36 | matrix: 37 | include: 38 | - runner: ubuntu-latest 39 | target: armv5te-unknown-linux-gnueabi 40 | - runner: ubuntu-latest 41 | target: mipsel-unknown-linux-gnu 42 | runs-on: ${{ matrix.runner }} 43 | timeout-minutes: 10 44 | env: 45 | RUST_BACKTRACE: 1 46 | steps: 47 | - uses: actions/checkout@v2 48 | 49 | - uses: actions-rs/toolchain@v1 50 | with: 51 | profile: minimal 52 | toolchain: "1.46.0" 53 | target: ${{ matrix.target }} 54 | default: true 55 | 56 | - run: cargo build --target ${{ matrix.target }} 57 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! A thread pool for running multiple tasks on a configurable group of threads. 2 | //! 3 | //! Extra features: 4 | //! 5 | //! - Dynamic pool size based on load 6 | //! - Support for async tasks 7 | //! - Tasks return a handle which can be joined or awaited for the return value 8 | //! - Optional common process-wide thread pool 9 | //! 10 | //! ## Async support 11 | //! 12 | //! Threadfin supports asynchronous usage via futures, and allows you to mix and 13 | //! match both synchronous and asynchronous tasks within a single thread pool. 14 | //! 15 | //! ## Examples 16 | //! 17 | //! ``` 18 | //! // Create a new pool. 19 | //! let pool = threadfin::builder().size(8).build(); 20 | //! 21 | //! // Schedule some work. 22 | //! let compute_task = pool.execute(|| { 23 | //! // Some expensive computation 24 | //! 2 + 2 25 | //! }); 26 | //! 27 | //! // Do something in the meantime. 28 | //! println!("Waiting for result..."); 29 | //! 30 | //! // Wait for the task to complete and get the result. 31 | //! let sum = compute_task.join(); 32 | //! println!("Sum: 2 + 2 = {}", sum); 33 | //! ``` 34 | 35 | mod common; 36 | mod error; 37 | mod pool; 38 | mod task; 39 | mod wakers; 40 | mod worker; 41 | 42 | pub use crate::{ 43 | common::*, 44 | error::*, 45 | pool::{Builder, PerCore, SizeConstraint, ThreadPool}, 46 | task::Task, 47 | }; 48 | 49 | /// Get a builder for creating a customized thread pool. 50 | /// 51 | /// A shorthand for [`ThreadPool::builder`]. 52 | #[inline] 53 | pub fn builder() -> Builder { 54 | ThreadPool::builder() 55 | } 56 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, fmt}; 2 | 3 | /// An error returned when a task could not be executed because a thread pool 4 | /// was full. 5 | /// 6 | /// Contains the original task that failed to be submitted. This allows you to 7 | /// try the submission again later or take some other action. 8 | pub struct PoolFullError(pub(crate) T); 9 | 10 | impl PoolFullError { 11 | /// Extracts the inner task that could not be executed. 12 | pub fn into_inner(self) -> T { 13 | self.0 14 | } 15 | } 16 | 17 | impl Error for PoolFullError {} 18 | 19 | impl fmt::Debug for PoolFullError { 20 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 21 | f.write_str("PoolFullError(..)") 22 | } 23 | } 24 | 25 | impl fmt::Display for PoolFullError { 26 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 27 | f.write_str("thread pool is full") 28 | } 29 | } 30 | 31 | /// An error returned when attempting to configure the common thread pool after 32 | /// it has already been initialized. 33 | pub struct CommonAlreadyInitializedError(()); 34 | 35 | impl CommonAlreadyInitializedError { 36 | pub(crate) fn new() -> Self { 37 | Self(()) 38 | } 39 | } 40 | 41 | impl Error for CommonAlreadyInitializedError {} 42 | 43 | impl fmt::Debug for CommonAlreadyInitializedError { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 45 | f.write_str("CommonAlreadyInitializedError") 46 | } 47 | } 48 | 49 | impl fmt::Display for CommonAlreadyInitializedError { 50 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 51 | f.write_str("common thread pool already initialized") 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /benchmarks/benches/pool.rs: -------------------------------------------------------------------------------- 1 | use criterion::*; 2 | 3 | fn criterion_benchmark(c: &mut Criterion) { 4 | let threads = num_cpus::get().max(1); 5 | 6 | let tasks = 1000; 7 | 8 | let mut group = c.benchmark_group("pool"); 9 | group.sample_size(10); 10 | 11 | group.bench_function("threadfin", |b| { 12 | b.iter_batched( 13 | || threadfin::ThreadPool::builder().size(threads).build(), 14 | |pool| { 15 | for _ in 0..tasks { 16 | pool.execute(|| { 17 | let _ = black_box(8 + 9); 18 | }); 19 | } 20 | 21 | pool.join(); 22 | }, 23 | BatchSize::LargeInput, 24 | ); 25 | }); 26 | 27 | group.bench_function("threadpool", |b| { 28 | b.iter_batched( 29 | || threadpool::ThreadPool::new(threads), 30 | |pool| { 31 | for _ in 0..tasks { 32 | pool.execute(|| { 33 | let _ = black_box(8 + 9); 34 | }); 35 | } 36 | 37 | pool.join(); 38 | }, 39 | BatchSize::LargeInput, 40 | ); 41 | }); 42 | 43 | group.bench_function("rusty_pool", |b| { 44 | b.iter_batched( 45 | || rusty_pool::ThreadPool::new(threads, threads, std::time::Duration::ZERO), 46 | |pool| { 47 | for _ in 0..tasks { 48 | pool.execute(|| { 49 | let _ = black_box(8 + 9); 50 | }); 51 | } 52 | 53 | pool.shutdown_join(); 54 | }, 55 | BatchSize::LargeInput, 56 | ); 57 | }); 58 | } 59 | 60 | criterion_group!(benches, criterion_benchmark); 61 | criterion_main!(benches); 62 | -------------------------------------------------------------------------------- /src/common.rs: -------------------------------------------------------------------------------- 1 | use crate::{Builder, CommonAlreadyInitializedError, ThreadPool}; 2 | use once_cell::sync::OnceCell; 3 | 4 | static COMMON: OnceCell = OnceCell::new(); 5 | 6 | /// Get a shared reference to a common thread pool for the entire process. 7 | /// 8 | /// # Examples 9 | /// 10 | /// ``` 11 | /// let result = threadfin::common().execute(|| 2 + 2).join(); 12 | /// 13 | /// assert_eq!(result, 4); 14 | /// ``` 15 | pub fn common() -> &'static ThreadPool { 16 | COMMON.get_or_init(|| common_builder().build()) 17 | } 18 | 19 | /// Configure the common thread pool. 20 | /// 21 | /// This should be done near the start of your program before any other code 22 | /// uses the common pool, as this function will return an error if the common 23 | /// pool has already been initialized. 24 | /// 25 | /// Only programs should use this function! Libraries should not use this 26 | /// function and instead allow the running program to configure the common pool. 27 | /// If you need a customized pool in a library then you should use a separate 28 | /// pool instance. 29 | /// 30 | /// # Examples 31 | /// 32 | /// ``` 33 | /// threadfin::configure_common(|builder| builder 34 | /// .size(3) 35 | /// .queue_limit(1024)) 36 | /// .unwrap(); 37 | /// 38 | /// assert_eq!(threadfin::common().threads(), 3); 39 | /// ``` 40 | pub fn configure_common(f: F) -> Result<(), CommonAlreadyInitializedError> 41 | where 42 | F: FnOnce(Builder) -> Builder, 43 | { 44 | let mut was_initialized = true; 45 | 46 | COMMON.get_or_init(|| { 47 | was_initialized = false; 48 | f(common_builder()).build() 49 | }); 50 | 51 | if was_initialized { 52 | Err(CommonAlreadyInitializedError::new()) 53 | } else { 54 | Ok(()) 55 | } 56 | } 57 | 58 | fn common_builder() -> Builder { 59 | Builder::default().name("common-pool") 60 | } 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Threadfin 2 | 3 | A thread pool for running multiple tasks on a configurable group of threads. 4 | 5 | [![Crates.io](https://img.shields.io/crates/v/threadfin.svg)](https://crates.io/crates/threadfin) 6 | [![Documentation](https://docs.rs/threadfin/badge.svg)](https://docs.rs/threadfin) 7 | [![License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) 8 | [![Minimum supported Rust version](https://img.shields.io/badge/rustc-1.51+-yellow.svg)](#minimum-supported-rust-version) 9 | [![Build](https://github.com/sagebind/threadfin/workflows/ci/badge.svg)](https://github.com/sagebind/threadfin/actions) 10 | 11 | Extra features: 12 | 13 | - Dynamic pool size based on load 14 | - Support for async tasks 15 | - Tasks return a handle which can be joined or awaited for the return value 16 | - Optional common process-wide thread pool 17 | 18 | ## Async support 19 | 20 | Threadfin supports asynchronous usage via futures, and allows you to mix and match both synchronous and asynchronous tasks within a single thread pool. 21 | 22 | ## Examples 23 | 24 | ```rust 25 | // Create a new pool. 26 | let pool = threadfin::builder().size(8).build(); 27 | 28 | // Schedule some work. 29 | let compute_task = pool.execute(|| { 30 | // Some expensive computation 31 | 2 + 2 32 | }); 33 | 34 | // Do something in the meantime. 35 | println!("Waiting for result..."); 36 | 37 | // Wait for the task to complete and get the result. 38 | let sum = compute_task.join(); 39 | println!("Sum: 2 + 2 = {}", sum); 40 | ``` 41 | 42 | ## Installation 43 | 44 | Install via Cargo by adding to your Cargo.toml file: 45 | 46 | ```toml 47 | [dependencies] 48 | threadfin = "0.1" 49 | ``` 50 | 51 | ### Minimum supported Rust version 52 | 53 | The minimum supported Rust version (or MSRV) for Threadfin is stable Rust 1.46 or greater, meaning we only guarantee that Threadfin will compile if you use a rustc version of at least 1.46. It might compile with older versions but that could change at any time. 54 | 55 | This version is explicitly tested in CI and may only be bumped in new minor versions. Any changes to the supported minimum version will be called out in the release notes. 56 | 57 | ## Other libraries 58 | 59 | - [threadpool](https://github.com/rust-threadpool/rust-threadpool) 60 | - [scoped_threadpool](https://github.com/kimundi/scoped-threadpool-rs) 61 | - [rusty_pool](https://github.com/robinfriedli/rusty_pool) 62 | - [rayon](https://github.com/rayon-rs/rayon) 63 | 64 | ## Sponsors 65 | 66 | Special thanks to sponsors of my open-source work! 67 | 68 | da-moon 69 | 70 | ## License 71 | 72 | Licensed under the MIT license. See the [LICENSE](LICENSE) file for details. 73 | -------------------------------------------------------------------------------- /tests/tests.rs: -------------------------------------------------------------------------------- 1 | use std::{panic::catch_unwind, thread, time::Duration}; 2 | 3 | use futures_timer::Delay; 4 | use threadfin::ThreadPool; 5 | 6 | fn single_thread() -> ThreadPool { 7 | ThreadPool::builder().size(0..1).build() 8 | } 9 | 10 | #[test] 11 | #[should_panic(expected = "thread pool name must not contain null bytes")] 12 | fn name_with_null_bytes_panics() { 13 | ThreadPool::builder().name("uh\0oh").build(); 14 | } 15 | 16 | #[test] 17 | #[allow(clippy::reversed_empty_ranges)] 18 | #[should_panic(expected = "thread pool minimum size cannot be larger than maximum size")] 19 | fn invalid_size_panics() { 20 | ThreadPool::builder().size(2..1); 21 | } 22 | 23 | #[test] 24 | #[should_panic(expected = "thread pool maximum size must be non-zero")] 25 | fn invalid_size_zero_panics() { 26 | ThreadPool::builder().size(0); 27 | } 28 | 29 | #[test] 30 | fn execute() { 31 | let pool = single_thread(); 32 | 33 | let result = pool.execute(|| 2 + 2).join(); 34 | 35 | assert_eq!(result, 4); 36 | } 37 | 38 | #[test] 39 | fn execute_future() { 40 | let pool = single_thread(); 41 | 42 | let result = pool.execute_future(async { 2 + 2 }).join(); 43 | 44 | assert_eq!(result, 4); 45 | } 46 | 47 | #[test] 48 | fn task_join_timeout() { 49 | let pool = single_thread(); 50 | 51 | let result = pool 52 | .execute(|| thread::sleep(Duration::from_secs(5))) 53 | .join_timeout(Duration::from_millis(10)); 54 | 55 | assert!(result.is_err()); 56 | } 57 | 58 | #[test] 59 | fn futures_that_yield_are_run_concurrently() { 60 | let pool = single_thread(); 61 | 62 | assert_eq!(pool.running_tasks(), 0); 63 | 64 | let first = pool 65 | .try_execute_future(Delay::new(Duration::from_millis(400))) 66 | .unwrap(); 67 | 68 | // Even though there's only one worker thread, it should become idle quickly 69 | // and start polling for more work, because a delay future yields 70 | // immediately and doesn't wake for a while. 71 | thread::sleep(Duration::from_millis(100)); 72 | 73 | assert_eq!(pool.running_tasks(), 1); 74 | 75 | let second = pool 76 | .try_execute_future(Delay::new(Duration::from_millis(200))) 77 | .unwrap(); 78 | 79 | thread::sleep(Duration::from_millis(100)); 80 | 81 | // Now both tasks are running, but there's still only 1 worker thread! 82 | assert_eq!(pool.running_tasks(), 2); 83 | assert_eq!(pool.threads(), 1); 84 | 85 | first.join(); 86 | second.join(); 87 | 88 | // Both tasks completed. 89 | assert_eq!(pool.completed_tasks(), 2); 90 | } 91 | 92 | #[test] 93 | fn try_execute_under_core_count() { 94 | let pool = ThreadPool::builder().size(1).build(); 95 | 96 | // Give some time for thread to start... 97 | thread::sleep(Duration::from_millis(100)); 98 | assert_eq!(pool.threads(), 1); 99 | 100 | assert!(pool.try_execute(|| 2 + 2).is_ok()); 101 | } 102 | 103 | #[test] 104 | fn try_execute_over_core_count() { 105 | let pool = ThreadPool::builder().size(0..1).build(); 106 | 107 | assert!(pool.try_execute(|| 2 + 2).is_ok()); 108 | } 109 | 110 | #[test] 111 | fn try_execute_over_limit() { 112 | let pool = ThreadPool::builder().size(0..1).queue_limit(0).build(); 113 | 114 | assert!(pool.try_execute(|| 2 + 2).is_ok()); 115 | assert!(pool.try_execute(|| 2 + 2).is_err()); 116 | 117 | fn task() -> usize { 118 | 2 + 2 119 | } 120 | 121 | // The returned function in the error is identical to the function given. 122 | let error = pool.try_execute(task).unwrap_err(); 123 | assert_eq!(error.into_inner() as usize, task as usize); 124 | } 125 | 126 | #[test] 127 | fn name() { 128 | let pool = ThreadPool::builder().name("foo").build(); 129 | 130 | let name = pool 131 | .execute(|| thread::current().name().unwrap().to_owned()) 132 | .join(); 133 | 134 | assert_eq!(name, "foo"); 135 | } 136 | 137 | #[test] 138 | #[should_panic(expected = "oh no!")] 139 | fn panic_propagates_to_task() { 140 | let pool = single_thread(); 141 | 142 | pool.execute(|| panic!("oh no!")).join(); 143 | } 144 | 145 | #[test] 146 | fn panic_count() { 147 | let pool = single_thread(); 148 | assert_eq!(pool.panicked_tasks(), 0); 149 | 150 | let task = pool.execute(|| panic!("oh no!")); 151 | let _ = catch_unwind(move || { 152 | task.join(); 153 | }); 154 | 155 | assert_eq!(pool.panicked_tasks(), 1); 156 | } 157 | 158 | #[test] 159 | fn thread_count() { 160 | let pool = ThreadPool::builder().size(0..1).build(); 161 | 162 | assert_eq!(pool.threads(), 0); 163 | 164 | pool.execute(|| 2 + 2).join(); 165 | assert_eq!(pool.threads(), 1); 166 | 167 | let pool_with_starting_threads = ThreadPool::builder().size(1).build(); 168 | 169 | // Give some time for thread to start... 170 | thread::sleep(Duration::from_millis(50)); 171 | assert_eq!(pool_with_starting_threads.threads(), 1); 172 | } 173 | 174 | #[test] 175 | fn idle_shutdown() { 176 | let pool = ThreadPool::builder() 177 | .size(0..1) 178 | .keep_alive(Duration::from_millis(100)) 179 | .build(); 180 | assert_eq!(pool.threads(), 0, "pool starts out empty"); 181 | 182 | pool.execute(|| 2 + 2).join(); 183 | assert_eq!(pool.threads(), 1, "one thread was added"); 184 | 185 | thread::sleep(Duration::from_millis(200)); 186 | assert_eq!( 187 | pool.threads(), 188 | 0, 189 | "thread became idle and terminated after timeout" 190 | ); 191 | } 192 | 193 | #[test] 194 | fn join() { 195 | // Just a dumb test to make sure join doesn't do anything strange. 196 | ThreadPool::default().join(); 197 | } 198 | 199 | #[test] 200 | fn join_timeout_expiring() { 201 | let pool = ThreadPool::builder().size(1).build(); 202 | assert_eq!(pool.threads(), 1); 203 | 204 | // Schedule a slow task on the only thread. We have to keep the task 205 | // around, because dropping it could cancel the task. 206 | let _task = pool.execute(|| thread::sleep(Duration::from_millis(500))); 207 | 208 | // Joining should time out since there's one task still running longer 209 | // than our join timeout. 210 | assert!(!pool.join_timeout(Duration::from_millis(10))); 211 | } 212 | 213 | #[test] 214 | fn configure_common_after_init_returns_error() { 215 | threadfin::common(); // init 216 | 217 | assert!(threadfin::configure_common(|b| b).is_err()); 218 | } 219 | -------------------------------------------------------------------------------- /src/worker.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::HashMap, time::Duration}; 2 | 3 | use crossbeam_channel::{unbounded, Receiver, Select, Sender}; 4 | 5 | use crate::task::{Coroutine, RunResult}; 6 | 7 | /// A type which receives notifications from a worker. 8 | pub(crate) trait Listener { 9 | fn on_task_started(&mut self) {} 10 | 11 | fn on_task_completed(&mut self, _panicked: bool) {} 12 | 13 | fn on_idle(&mut self) -> bool { 14 | true 15 | } 16 | } 17 | 18 | /// A worker thread which belongs to a thread pool and executes tasks. 19 | pub(crate) struct Worker { 20 | keep_alive: Duration, 21 | 22 | concurrency_limit: usize, 23 | 24 | /// An initial task this worker should be run before polling for new work. 25 | initial_task: Option, 26 | 27 | /// Pending tasks being run by this worker. Any task that yields without 28 | /// being immediately complete is moved to this location to be polled again. 29 | pending_tasks: HashMap, 30 | 31 | /// Queue of new tasks to run. The worker pulls more tasks from this queue 32 | /// when idle. 33 | queue: Receiver, 34 | immediate_queue: Receiver, 35 | 36 | /// Channel used to receive notifications from wakers for pending tasks. 37 | wake_notifications: (Sender, Receiver), 38 | 39 | /// Set to true when the worker is running and wants to consume more work. 40 | active: bool, 41 | 42 | /// Receiver of various worker events. 43 | listener: L, 44 | } 45 | 46 | impl Worker { 47 | /// Create a new worker. 48 | pub(crate) fn new( 49 | initial_task: Option, 50 | queue: Receiver, 51 | immediate_queue: Receiver, 52 | concurrency_limit: usize, 53 | keep_alive: Duration, 54 | listener: L, 55 | ) -> Self { 56 | Self { 57 | keep_alive, 58 | concurrency_limit, 59 | initial_task, 60 | pending_tasks: HashMap::new(), 61 | queue, 62 | immediate_queue, 63 | wake_notifications: unbounded(), 64 | active: false, 65 | listener, 66 | } 67 | } 68 | 69 | /// Run the worker on the current thread until the work queue is closed. 70 | pub(crate) fn run(mut self) { 71 | self.active = true; 72 | 73 | if let Some(coroutine) = self.initial_task.take() { 74 | self.run_now_or_reschedule(coroutine); 75 | } 76 | 77 | // Main worker loop, keep running until the pool shuts down and pending 78 | // tasks complete. 79 | while self.active || !self.pending_tasks.is_empty() { 80 | match self.poll_work() { 81 | PollResult::Work(coroutine) => self.run_now_or_reschedule(coroutine), 82 | PollResult::Wake(id) => self.run_pending_by_id(id), 83 | PollResult::ShutDown => self.active = false, 84 | PollResult::Timeout => { 85 | // If this worker doesn't have an pending tasks, then we can 86 | // potentially shut down the worker due to inactivity. 87 | if self.pending_tasks.is_empty() { 88 | // If the listener tells us we ought to shut down, then 89 | // do so. 90 | if self.listener.on_idle() { 91 | self.active = false; 92 | } 93 | } 94 | } 95 | } 96 | } 97 | } 98 | 99 | /// Poll for the next work item the worker should work on. 100 | fn poll_work(&mut self) -> PollResult { 101 | let mut queue_id = None; 102 | let mut immediate_queue_id = None; 103 | let mut wake_id = None; 104 | let mut select = Select::new(); 105 | 106 | // As long as we haven't reached our concurrency limit, poll for 107 | // additional work. 108 | if self.active && self.pending_tasks.len() < self.concurrency_limit { 109 | queue_id = Some(select.recv(&self.queue)); 110 | immediate_queue_id = Some(select.recv(&self.immediate_queue)); 111 | } 112 | 113 | // If we have pending tasks, poll for waker notifications as well. 114 | if !self.pending_tasks.is_empty() { 115 | wake_id = Some(select.recv(&self.wake_notifications.1)); 116 | } 117 | 118 | match select.select_timeout(self.keep_alive) { 119 | Ok(op) if Some(op.index()) == queue_id => { 120 | if let Ok(coroutine) = op.recv(&self.queue) { 121 | PollResult::Work(coroutine) 122 | } else { 123 | PollResult::ShutDown 124 | } 125 | } 126 | Ok(op) if Some(op.index()) == immediate_queue_id => { 127 | if let Ok(coroutine) = op.recv(&self.immediate_queue) { 128 | PollResult::Work(coroutine) 129 | } else { 130 | PollResult::ShutDown 131 | } 132 | } 133 | Ok(op) if Some(op.index()) == wake_id => { 134 | PollResult::Wake(op.recv(&self.wake_notifications.1).unwrap()) 135 | } 136 | Ok(_) => unreachable!(), 137 | Err(_) => PollResult::Timeout, 138 | } 139 | } 140 | 141 | fn run_now_or_reschedule(&mut self, mut coroutine: Coroutine) { 142 | // If it is possible for this task to yield, we need to prepare a new 143 | // waker to receive notifications with. 144 | if coroutine.might_yield() { 145 | let sender = self.wake_notifications.0.clone(); 146 | let coroutine_addr = coroutine.addr(); 147 | 148 | coroutine.set_waker(waker_fn::waker_fn(move || { 149 | let _ = sender.send(coroutine_addr); 150 | })); 151 | } 152 | 153 | self.listener.on_task_started(); 154 | 155 | if let RunResult::Complete { 156 | panicked, 157 | } = coroutine.run() 158 | { 159 | self.listener.on_task_completed(panicked); 160 | coroutine.complete(); 161 | } else { 162 | // This should never happen if the task promised not to yield! 163 | debug_assert!(coroutine.might_yield()); 164 | 165 | // Task yielded, so we'll need to reschedule the task to be polled 166 | // again when its waker is called. We do this by storing the future 167 | // in a collection local to this worker where we can retrieve it 168 | // again. 169 | // 170 | // The benefit of doing it this way instead of sending the future 171 | // back through the queue is that the future gets executed (almost) 172 | // immediately once it wakes instead of being put behind a queue of 173 | // _new_ tasks. 174 | self.pending_tasks.insert(coroutine.addr(), coroutine); 175 | } 176 | } 177 | 178 | fn run_pending_by_id(&mut self, id: usize) { 179 | if let Some(coroutine) = self.pending_tasks.get_mut(&id) { 180 | if let RunResult::Complete { 181 | panicked, 182 | } = coroutine.run() 183 | { 184 | self.listener.on_task_completed(panicked); 185 | 186 | // Task is complete, we can de-allocate it and complete it. 187 | self.pending_tasks.remove(&id).unwrap().complete(); 188 | } 189 | } 190 | } 191 | } 192 | 193 | enum PollResult { 194 | /// New work has arrived for this worker. 195 | Work(Coroutine), 196 | 197 | /// An existing pending task has woken. 198 | Wake(usize), 199 | 200 | /// No activity occurred within the time limit. 201 | Timeout, 202 | 203 | /// The thread pool has been shut down. 204 | ShutDown, 205 | } 206 | -------------------------------------------------------------------------------- /src/task.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of a task, as well as underlying primitives used to drive 2 | //! their execution. 3 | 4 | use std::{ 5 | any::Any, 6 | fmt, 7 | future::Future, 8 | panic::{catch_unwind, resume_unwind, AssertUnwindSafe}, 9 | pin::Pin, 10 | sync::{Arc, Mutex}, 11 | task::{Context, Poll, Waker}, 12 | thread, 13 | time::{Duration, Instant}, 14 | }; 15 | 16 | /// A type of future representing the result of a background computation in a 17 | /// thread pool. Tasks are returned when submitting a closure or future to a 18 | /// thread pool. 19 | /// 20 | /// Tasks implement [`Future`], so you can `.await` their completion 21 | /// asynchronously. Or, you can wait for their completion synchronously using 22 | /// the various [`join`](Task::join) methods provided. 23 | /// 24 | /// Dropping a task detaches the running task, but does not cancel it. The task 25 | /// will continue to run on the thread pool until completion, but there will no 26 | /// longer be any way to check its completion or to retrieve its returned value. 27 | /// 28 | /// # Examples 29 | /// 30 | /// Creating a task: 31 | /// 32 | /// ``` 33 | /// use threadfin::ThreadPool; 34 | /// 35 | /// let pool = ThreadPool::new(); 36 | /// 37 | /// let task = pool.execute(|| { 38 | /// // do some work 39 | /// }); 40 | /// ``` 41 | /// 42 | /// Blocking on a task: 43 | /// 44 | /// ``` 45 | /// use threadfin::ThreadPool; 46 | /// 47 | /// let pool = ThreadPool::new(); 48 | /// 49 | /// let task = pool.execute(|| { 50 | /// // some expensive computation 51 | /// 2 + 2 52 | /// }); 53 | /// 54 | /// // do something in the meantime 55 | /// 56 | /// // now block on the result 57 | /// let sum = task.join(); 58 | /// assert_eq!(sum, 4); 59 | /// ``` 60 | /// 61 | /// Awaiting a task asynchronously: 62 | /// 63 | /// ``` 64 | /// # threadfin::common().execute_future(async { 65 | /// use threadfin::ThreadPool; 66 | /// 67 | /// let pool = ThreadPool::new(); 68 | /// 69 | /// let task = pool.execute(|| { 70 | /// // some expensive, synchronous computation 71 | /// 2 + 2 72 | /// }); 73 | /// 74 | /// // do something in the meantime 75 | /// 76 | /// // now await on the result 77 | /// let sum = task.await; 78 | /// assert_eq!(sum, 4); 79 | /// # }).join(); 80 | /// ``` 81 | /// 82 | /// Detaching a task: 83 | /// 84 | /// ``` 85 | /// use std::sync::{Arc, atomic::{AtomicBool, Ordering}}; 86 | /// use std::thread::sleep; 87 | /// use std::time::Duration; 88 | /// use threadfin::ThreadPool; 89 | /// 90 | /// let pool = Arc::new(ThreadPool::new()); 91 | /// let completed = Arc::new(AtomicBool::from(false)); 92 | /// 93 | /// // Clone the shared values to be used inside the task. 94 | /// let pool_clone = pool.clone(); 95 | /// let completed_clone = completed.clone(); 96 | /// 97 | /// pool.execute(move || { 98 | /// let _inner_task = pool_clone.execute(move || { 99 | /// // Short delay simulating some work. 100 | /// sleep(Duration::from_millis(100)); 101 | /// 102 | /// // Set as complete. 103 | /// completed_clone.store(true, Ordering::SeqCst); 104 | /// }); 105 | /// 106 | /// // Inner task is detached, but will still complete. 107 | /// }); 108 | /// 109 | /// // Give the task some time to complete. 110 | /// sleep(Duration::from_millis(1000)); 111 | /// 112 | /// // Inner task completed even though it was detached. 113 | /// assert_eq!(completed.load(Ordering::SeqCst), true); 114 | /// ``` 115 | pub struct Task { 116 | inner: Arc>>, 117 | } 118 | 119 | struct Inner { 120 | result: Option>, 121 | waker: Option, 122 | } 123 | 124 | impl Task { 125 | /// Create a new task from a closure. 126 | pub(crate) fn from_closure(closure: F) -> (Self, Coroutine) 127 | where 128 | F: FnOnce() -> T + Send + 'static, 129 | T: Send + 'static, 130 | { 131 | let task = Self::pending(); 132 | 133 | let coroutine = Coroutine { 134 | might_yield: false, 135 | waker: crate::wakers::empty_waker(), 136 | poller: Box::new(ClosurePoller { 137 | closure: Some(closure), 138 | result: None, 139 | task: task.inner.clone(), 140 | }), 141 | }; 142 | 143 | (task, coroutine) 144 | } 145 | 146 | /// Create a new asynchronous task from a future. 147 | pub(crate) fn from_future(future: F) -> (Self, Coroutine) 148 | where 149 | F: Future + Send + 'static, 150 | T: Send + 'static, 151 | { 152 | let task = Self::pending(); 153 | 154 | let coroutine = Coroutine { 155 | might_yield: true, 156 | waker: crate::wakers::empty_waker(), 157 | poller: Box::new(FuturePoller { 158 | future, 159 | result: None, 160 | task: task.inner.clone(), 161 | }), 162 | }; 163 | 164 | (task, coroutine) 165 | } 166 | 167 | fn pending() -> Self { 168 | Self { 169 | inner: Arc::new(Mutex::new(Inner { 170 | result: None, 171 | waker: None, 172 | })), 173 | } 174 | } 175 | 176 | /// Check if the task is done yet. 177 | /// 178 | /// If this method returns true, then calling [`join`](Task::join) will not 179 | /// block. 180 | pub fn is_done(&self) -> bool { 181 | self.inner.lock().unwrap().result.is_some() 182 | } 183 | 184 | /// Block the current thread until the task completes and return the value 185 | /// the task produced. 186 | /// 187 | /// # Panics 188 | /// 189 | /// If the underlying task panics, the panic will propagate to this call. 190 | pub fn join(self) -> T { 191 | match self.join_catch() { 192 | Ok(value) => value, 193 | Err(e) => resume_unwind(e), 194 | } 195 | } 196 | 197 | fn join_catch(self) -> thread::Result { 198 | let mut inner = self.inner.lock().unwrap(); 199 | 200 | if let Some(result) = inner.result.take() { 201 | result 202 | } else { 203 | inner.waker = Some(crate::wakers::current_thread_waker()); 204 | drop(inner); 205 | 206 | loop { 207 | thread::park(); 208 | 209 | if let Some(result) = self.inner.lock().unwrap().result.take() { 210 | break result; 211 | } 212 | } 213 | } 214 | } 215 | 216 | /// Block the current thread until the task completes or a timeout is 217 | /// reached. 218 | /// 219 | /// # Panics 220 | /// 221 | /// If the underlying task panics, the panic will propagate to this call. 222 | pub fn join_timeout(self, timeout: Duration) -> Result { 223 | self.join_deadline(Instant::now() + timeout) 224 | } 225 | 226 | /// Block the current thread until the task completes or a timeout is 227 | /// reached. 228 | /// 229 | /// # Panics 230 | /// 231 | /// If the underlying task panics, the panic will propagate to this call. 232 | pub fn join_deadline(self, deadline: Instant) -> Result { 233 | match { 234 | let mut inner = self.inner.lock().unwrap(); 235 | 236 | if let Some(result) = inner.result.take() { 237 | result 238 | } else { 239 | inner.waker = Some(crate::wakers::current_thread_waker()); 240 | drop(inner); 241 | 242 | loop { 243 | if let Some(timeout) = deadline.checked_duration_since(Instant::now()) { 244 | thread::park_timeout(timeout); 245 | } else { 246 | return Err(self); 247 | } 248 | 249 | if let Some(result) = self.inner.lock().unwrap().result.take() { 250 | break result; 251 | } 252 | } 253 | } 254 | } { 255 | Ok(value) => Ok(value), 256 | Err(e) => resume_unwind(e), 257 | } 258 | } 259 | } 260 | 261 | impl Future for Task { 262 | type Output = T; 263 | 264 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 265 | let mut inner = self.inner.lock().unwrap(); 266 | 267 | match inner.result.take() { 268 | Some(Ok(value)) => Poll::Ready(value), 269 | Some(Err(e)) => resume_unwind(e), 270 | None => { 271 | inner.waker = Some(cx.waker().clone()); 272 | Poll::Pending 273 | } 274 | } 275 | } 276 | } 277 | 278 | impl fmt::Debug for Task { 279 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 280 | f.debug_struct("Task") 281 | .field("done", &self.is_done()) 282 | .finish() 283 | } 284 | } 285 | 286 | /// The worker side of an allocated task, which provides methods for running the 287 | /// underlying future to completion. 288 | pub(crate) struct Coroutine { 289 | might_yield: bool, 290 | waker: Waker, 291 | poller: Box, 292 | } 293 | 294 | impl Coroutine { 295 | /// Determine whether this task might yield. This can be used for 296 | /// optimizations if you know for certain a waker will never be used. 297 | pub(crate) fn might_yield(&self) -> bool { 298 | self.might_yield 299 | } 300 | 301 | /// Get the unique memory address for this coroutine. 302 | pub(crate) fn addr(&self) -> usize { 303 | &*self.poller as *const dyn CoroutinePoller as *const () as usize 304 | } 305 | 306 | /// Set the waker to use with this task. 307 | pub(crate) fn set_waker(&mut self, waker: Waker) { 308 | self.waker = waker; 309 | } 310 | 311 | /// Run the coroutine until it yields or completes. 312 | /// 313 | /// Once this function returns `Complete` it should not be called again. Doing 314 | /// so may panic, return weird results, or cause other problems. 315 | pub(crate) fn run(&mut self) -> RunResult { 316 | let mut cx = Context::from_waker(&self.waker); 317 | self.poller.run(&mut cx) 318 | } 319 | 320 | /// Complete the task with the final value produced by this coroutine and 321 | /// notify any listeners on this task that the task's state has updated. 322 | /// 323 | /// Must not be called unless `run` has returned `Complete`. This method may 324 | /// panic or cause other strange behavior otherwise. 325 | /// 326 | /// You must call this yourself when the task completes. It won't be called 327 | /// automatically! 328 | pub(crate) fn complete(mut self) { 329 | self.poller.complete(); 330 | } 331 | 332 | /// Unwrap the original closure the coroutine was created from. Panics if 333 | /// the coroutine was not created from a closure. 334 | pub(crate) fn into_inner_closure(self) -> F 335 | where 336 | F: FnOnce() -> T + Send + 'static, 337 | T: Send + 'static, 338 | { 339 | self 340 | .poller 341 | .into_any() 342 | .downcast::>() 343 | .unwrap() 344 | .closure 345 | .take() 346 | .unwrap() 347 | } 348 | 349 | /// Unwrap the original future the coroutine was created from. Panics if the 350 | /// coroutine was not created from a future. 351 | pub(crate) fn into_inner_future(self) -> F 352 | where 353 | F: Future + Send + 'static, 354 | T: Send + 'static, 355 | { 356 | self 357 | .poller 358 | .into_any() 359 | .downcast::>() 360 | .unwrap() 361 | .future 362 | } 363 | } 364 | 365 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 366 | pub(crate) enum RunResult { 367 | /// The coroutine has yielded. You should call `run` on the coroutine again 368 | /// once the waker associated with the coroutine is called. 369 | Yield, 370 | 371 | /// The coroutine and its associated task has completed. You should call 372 | /// [`Coroutine::complete`] to wake any consumers of the task to receive the 373 | /// task result. 374 | Complete { panicked: bool }, 375 | } 376 | 377 | /// Inner implementation of a coroutine. This trait is used to erase the return 378 | /// value from the coroutine type as well as to abstract over futures and 379 | /// synchronous closures. Bundling all the required operations into this trait 380 | /// also allows us to minimize the number of heap allocations per task. 381 | trait CoroutinePoller: Send + 'static { 382 | fn run(&mut self, cx: &mut Context) -> RunResult; 383 | 384 | fn complete(&mut self); 385 | 386 | fn into_any(self: Box) -> Box; 387 | } 388 | 389 | struct ClosurePoller { 390 | closure: Option, 391 | result: Option>, 392 | task: Arc>>, 393 | } 394 | 395 | impl CoroutinePoller for ClosurePoller 396 | where 397 | F: FnOnce() -> T + Send + 'static, 398 | T: Send + 'static, 399 | { 400 | fn run(&mut self, _cx: &mut Context) -> RunResult { 401 | let closure = self 402 | .closure 403 | .take() 404 | .expect("closure already ran to completion"); 405 | let result = catch_unwind(AssertUnwindSafe(closure)); 406 | let panicked = result.is_err(); 407 | 408 | self.result = Some(result); 409 | 410 | RunResult::Complete { 411 | panicked, 412 | } 413 | } 414 | 415 | fn complete(&mut self) { 416 | if let Some(result) = self.result.take() { 417 | let mut task = self.task.lock().unwrap(); 418 | 419 | task.result = Some(result); 420 | 421 | if let Some(waker) = task.waker.as_ref() { 422 | waker.wake_by_ref(); 423 | }; 424 | } 425 | } 426 | 427 | fn into_any(self: Box) -> Box { 428 | self 429 | } 430 | } 431 | 432 | struct FuturePoller { 433 | future: F, 434 | result: Option>, 435 | task: Arc>>, 436 | } 437 | 438 | impl CoroutinePoller for FuturePoller 439 | where 440 | F: Future + Send + 'static, 441 | T: Send + 'static, 442 | { 443 | fn run(&mut self, cx: &mut Context) -> RunResult { 444 | // Safety: This struct is only ever used inside a box, so we know that 445 | // neither self nor this future will move. 446 | let future = unsafe { Pin::new_unchecked(&mut self.future) }; 447 | 448 | match catch_unwind(AssertUnwindSafe(|| future.poll(cx))) { 449 | Ok(Poll::Pending) => RunResult::Yield, 450 | Ok(Poll::Ready(value)) => { 451 | self.result = Some(Ok(value)); 452 | 453 | RunResult::Complete { 454 | panicked: false, 455 | } 456 | } 457 | Err(e) => { 458 | self.result = Some(Err(e)); 459 | 460 | RunResult::Complete { 461 | panicked: true, 462 | } 463 | } 464 | } 465 | } 466 | 467 | fn complete(&mut self) { 468 | if let Some(result) = self.result.take() { 469 | let mut task = self.task.lock().unwrap(); 470 | 471 | task.result = Some(result); 472 | 473 | if let Some(waker) = task.waker.as_ref() { 474 | waker.wake_by_ref(); 475 | }; 476 | } 477 | } 478 | 479 | fn into_any(self: Box) -> Box { 480 | self 481 | } 482 | } 483 | -------------------------------------------------------------------------------- /src/pool.rs: -------------------------------------------------------------------------------- 1 | //! Implementation of the thread pool itself. 2 | 3 | use std::{ 4 | fmt, 5 | future::Future, 6 | ops::{Range, RangeInclusive, RangeTo, RangeToInclusive}, 7 | sync::{ 8 | atomic::{AtomicUsize, Ordering}, 9 | Arc, 10 | Condvar, 11 | Mutex, 12 | }, 13 | thread, 14 | time::{Duration, Instant}, 15 | }; 16 | 17 | use crossbeam_channel::{bounded, unbounded, Receiver, Sender}; 18 | use once_cell::sync::Lazy; 19 | 20 | use crate::{ 21 | error::PoolFullError, 22 | task::{Coroutine, Task}, 23 | worker::{Listener, Worker}, 24 | }; 25 | 26 | #[cfg(threadfin_has_atomic64)] 27 | type AtomicCounter = std::sync::atomic::AtomicU64; 28 | 29 | #[cfg(not(threadfin_has_atomic64))] 30 | type AtomicCounter = std::sync::atomic::AtomicU32; 31 | 32 | /// A value describing a size constraint for a thread pool. 33 | /// 34 | /// Any size constraint can be wrapped in [`PerCore`] to be made relative to the 35 | /// number of available CPU cores on the current system. 36 | /// 37 | /// See [`Builder::size`] for details. 38 | pub trait SizeConstraint { 39 | /// Get the minimum number of threads to be in the thread pool. 40 | fn min(&self) -> usize; 41 | 42 | /// Get the maximum number of threads to be in the thread pool. 43 | fn max(&self) -> usize; 44 | } 45 | 46 | impl SizeConstraint for usize { 47 | fn min(&self) -> usize { 48 | *self 49 | } 50 | 51 | fn max(&self) -> usize { 52 | *self 53 | } 54 | } 55 | 56 | impl SizeConstraint for Range { 57 | fn min(&self) -> usize { 58 | self.start 59 | } 60 | 61 | fn max(&self) -> usize { 62 | self.end 63 | } 64 | } 65 | 66 | impl SizeConstraint for RangeInclusive { 67 | fn min(&self) -> usize { 68 | *self.start() 69 | } 70 | 71 | fn max(&self) -> usize { 72 | *self.end() 73 | } 74 | } 75 | 76 | impl SizeConstraint for RangeTo { 77 | fn min(&self) -> usize { 78 | 0 79 | } 80 | 81 | fn max(&self) -> usize { 82 | self.end 83 | } 84 | } 85 | 86 | impl SizeConstraint for RangeToInclusive { 87 | fn min(&self) -> usize { 88 | 0 89 | } 90 | 91 | fn max(&self) -> usize { 92 | self.end 93 | } 94 | } 95 | 96 | /// Modifies a size constraint to be per available CPU core. 97 | /// 98 | /// # Examples 99 | /// 100 | /// ``` 101 | /// # use threadfin::PerCore; 102 | /// // one thread per core 103 | /// let size = PerCore(1); 104 | /// 105 | /// // four threads per core 106 | /// let size = PerCore(4); 107 | /// 108 | /// // at least 1 thread per core and at most 2 threads per core 109 | /// let size = PerCore(1..2); 110 | /// ``` 111 | pub struct PerCore(pub T); 112 | 113 | static CORE_COUNT: Lazy = Lazy::new(|| num_cpus::get().max(1)); 114 | 115 | impl From for PerCore { 116 | fn from(size: T) -> Self { 117 | Self(size) 118 | } 119 | } 120 | 121 | impl SizeConstraint for PerCore { 122 | fn min(&self) -> usize { 123 | *CORE_COUNT * self.0.min() 124 | } 125 | 126 | fn max(&self) -> usize { 127 | *CORE_COUNT * self.0.max() 128 | } 129 | } 130 | 131 | /// A builder for constructing a customized [`ThreadPool`]. 132 | /// 133 | /// # Examples 134 | /// 135 | /// ``` 136 | /// let custom_pool = threadfin::builder() 137 | /// .name("my-pool") 138 | /// .size(2) 139 | /// .build(); 140 | /// ``` 141 | #[derive(Debug)] 142 | pub struct Builder { 143 | name: Option, 144 | size: Option<(usize, usize)>, 145 | stack_size: Option, 146 | queue_limit: Option, 147 | worker_concurrency_limit: usize, 148 | keep_alive: Duration, 149 | } 150 | 151 | impl Default for Builder { 152 | fn default() -> Self { 153 | Self { 154 | name: None, 155 | size: None, 156 | stack_size: None, 157 | queue_limit: None, 158 | worker_concurrency_limit: 16, 159 | keep_alive: Duration::from_secs(60), 160 | } 161 | } 162 | } 163 | 164 | impl Builder { 165 | /// Set a custom thread name for threads spawned by this thread pool. 166 | /// 167 | /// # Panics 168 | /// 169 | /// Panics if the name contains null bytes (`\0`). 170 | /// 171 | /// # Examples 172 | /// 173 | /// ``` 174 | /// let pool = threadfin::builder().name("my-pool").build(); 175 | /// ``` 176 | pub fn name>(mut self, name: T) -> Self { 177 | let name = name.into(); 178 | 179 | if name.as_bytes().contains(&0) { 180 | panic!("thread pool name must not contain null bytes"); 181 | } 182 | 183 | self.name = Some(name); 184 | self 185 | } 186 | 187 | /// Set the number of threads to be managed by this thread pool. 188 | /// 189 | /// If a `usize` is supplied, the pool will have a fixed number of threads. 190 | /// If a range is supplied, the lower bound will be the core pool size while 191 | /// the upper bound will be a maximum pool size the pool is allowed to burst 192 | /// up to when the core threads are busy. 193 | /// 194 | /// Any size constraint can be wrapped in [`PerCore`] to be made relative to 195 | /// the number of available CPU cores on the current system. 196 | /// 197 | /// If not set, a reasonable size will be selected based on the number of 198 | /// CPU cores on the current system. 199 | /// 200 | /// # Examples 201 | /// 202 | /// ``` 203 | /// // Create a thread pool with exactly 2 threads. 204 | /// let pool = threadfin::builder().size(2).build(); 205 | /// ``` 206 | /// 207 | /// ``` 208 | /// // Create a thread pool with no idle threads, but will spawn up to 4 209 | /// // threads lazily when there's work to be done. 210 | /// let pool = threadfin::builder().size(0..4).build(); 211 | /// 212 | /// // Or equivalently: 213 | /// let pool = threadfin::builder().size(..4).build(); 214 | /// ``` 215 | /// 216 | /// ``` 217 | /// use threadfin::PerCore; 218 | /// 219 | /// // Create a thread pool with two threads per core. 220 | /// let pool = threadfin::builder().size(PerCore(2)).build(); 221 | /// ``` 222 | /// 223 | /// # Panics 224 | /// 225 | /// Panics if an invalid range is supplied with a lower bound larger than 226 | /// the upper bound, or if the upper bound is 0. 227 | pub fn size(mut self, size: S) -> Self { 228 | let (min, max) = (size.min(), size.max()); 229 | 230 | if min > max { 231 | panic!("thread pool minimum size cannot be larger than maximum size"); 232 | } 233 | 234 | if max == 0 { 235 | panic!("thread pool maximum size must be non-zero"); 236 | } 237 | 238 | self.size = Some((min, max)); 239 | self 240 | } 241 | 242 | /// Set the size of the stack (in bytes) for threads in this thread pool. 243 | /// 244 | /// The actual stack size may be greater than this value if the platform 245 | /// enforces a larger minimum stack size. 246 | /// 247 | /// The stack size if not specified will be the default size for new Rust 248 | /// threads, currently 2 MiB. This can also be overridden by setting the 249 | /// `RUST_MIN_STACK` environment variable if not specified in code. 250 | /// 251 | /// # Examples 252 | /// 253 | /// ``` 254 | /// // Worker threads will have a stack size of at least 32 KiB. 255 | /// let pool = threadfin::builder().stack_size(32 * 1024).build(); 256 | /// ``` 257 | pub fn stack_size(mut self, size: usize) -> Self { 258 | self.stack_size = Some(size); 259 | self 260 | } 261 | 262 | /// Set a maximum number of pending tasks allowed to be submitted before 263 | /// blocking. 264 | /// 265 | /// If set to zero, queueing will be disabled and attempting to execute a 266 | /// new task will block until an idle worker thread can immediately begin 267 | /// executing the task or a new worker thread can be created to execute the 268 | /// task. 269 | /// 270 | /// If not set, no limit is enforced. 271 | pub fn queue_limit(mut self, limit: usize) -> Self { 272 | self.queue_limit = Some(limit); 273 | self 274 | } 275 | 276 | /// Set a duration for how long to keep idle worker threads alive. 277 | /// 278 | /// If the pool has more than the minimum configured number of threads and 279 | /// threads remain idle for more than this duration, they will be terminated 280 | /// until the minimum thread count is reached. 281 | pub fn keep_alive(mut self, duration: Duration) -> Self { 282 | self.keep_alive = duration; 283 | self 284 | } 285 | 286 | /// Set a limit on the number of concurrent tasks that can be run by a 287 | /// single worker thread. 288 | /// 289 | /// When executing asynchronous tasks, if the underlying future being 290 | /// executed yields, that worker thread can begin working on new tasks 291 | /// concurrently while waiting on the prior task to resume. This allows for 292 | /// a primitive M:N scheduling model that supports running significantly 293 | /// more futures concurrently than the number of threads in the thread pool. 294 | /// 295 | /// To prevent a worker thread from over-committing to too many tasks at 296 | /// once (which could result in extra latency if a task wakes but its 297 | /// assigned worker is too busy with other tasks) worker threads limit 298 | /// themselves to a maximum number of concurrent tasks. This method allows 299 | /// you to customize that limit. 300 | /// 301 | /// The default limit if not specified is 16. 302 | pub fn worker_concurrency_limit(mut self, limit: usize) -> Self { 303 | self.worker_concurrency_limit = limit; 304 | self 305 | } 306 | 307 | /// Create a thread pool according to the configuration set with this 308 | /// builder. 309 | pub fn build(self) -> ThreadPool { 310 | let size = self.size.unwrap_or_else(|| { 311 | let size = PerCore(1..2); 312 | 313 | (size.min(), size.max()) 314 | }); 315 | 316 | let shared = Shared { 317 | min_threads: size.0, 318 | max_threads: size.1, 319 | thread_count: Default::default(), 320 | running_tasks_count: Default::default(), 321 | completed_tasks_count: Default::default(), 322 | panicked_tasks_count: Default::default(), 323 | keep_alive: self.keep_alive, 324 | shutdown_cvar: Condvar::new(), 325 | }; 326 | 327 | let pool = ThreadPool { 328 | thread_name: self.name, 329 | stack_size: self.stack_size, 330 | concurrency_limit: self.worker_concurrency_limit, 331 | queue: self.queue_limit.map(bounded).unwrap_or_else(unbounded), 332 | immediate_queue: bounded(0), 333 | shared: Arc::new(shared), 334 | }; 335 | 336 | for _ in 0..size.0 { 337 | let result = pool.spawn_thread(None); 338 | assert!(result.is_ok()); 339 | } 340 | 341 | pool 342 | } 343 | } 344 | 345 | /// A thread pool for running multiple tasks on a configurable group of threads. 346 | /// 347 | /// Thread pools can improve performance when executing a large number of 348 | /// concurrent tasks since the expensive overhead of spawning threads is 349 | /// minimized as threads are re-used for multiple tasks. Thread pools are also 350 | /// useful for controlling and limiting parallelism. 351 | /// 352 | /// Dropping the thread pool will prevent any further tasks from being scheduled 353 | /// on the pool and detaches all threads in the pool. If you want to block until 354 | /// all pending tasks have completed and the pool is entirely shut down, then 355 | /// use one of the available [`join`](ThreadPool::join) methods. 356 | /// 357 | /// # Pool size 358 | /// 359 | /// Every thread pool has a minimum and maximum number of worker threads that it 360 | /// will spawn for executing tasks. This range is known as the _pool size_, and 361 | /// affects pool behavior in the following ways: 362 | /// 363 | /// - **Minimum size**: A guaranteed number of threads that will always be 364 | /// created and maintained by the thread pool. Threads will be eagerly created 365 | /// to meet this minimum size when the pool is created, and at least this many 366 | /// threads will be kept running in the pool until the pool is shut down. 367 | /// - **Maximum size**: A limit on the number of additional threads to spawn to 368 | /// execute more work. 369 | /// 370 | /// # Queueing 371 | /// 372 | /// If a new or existing worker thread is unable to immediately start processing 373 | /// a submitted task, that task will be placed in a queue for worker threads to 374 | /// take from when they complete their current tasks. Queueing is only used when 375 | /// it is not possible to directly handoff a task to an existing thread and 376 | /// spawning a new thread would exceed the pool's configured maximum size. 377 | /// 378 | /// By default, thread pools are configured to use an _unbounded_ queue which 379 | /// can hold an unlimited number of pending tasks. This is a sensible default, 380 | /// but is not desirable in all use-cases and can be changed with 381 | /// [`Builder::queue_limit`]. 382 | /// 383 | /// # Monitoring 384 | /// 385 | /// Each pool instance provides methods for gathering various statistics on the 386 | /// pool's usage, such as number of current number of threads, tasks completed 387 | /// over time, and queued tasks. While these methods provide the most up-to-date 388 | /// numbers upon invocation, they should not be used for controlling program 389 | /// behavior since they can become immediately outdated due to the live nature 390 | /// of the pool. 391 | pub struct ThreadPool { 392 | thread_name: Option, 393 | stack_size: Option, 394 | concurrency_limit: usize, 395 | queue: (Sender, Receiver), 396 | immediate_queue: (Sender, Receiver), 397 | shared: Arc, 398 | } 399 | 400 | impl Default for ThreadPool { 401 | fn default() -> Self { 402 | Self::new() 403 | } 404 | } 405 | 406 | impl ThreadPool { 407 | /// Create a new thread pool with the default configuration. 408 | /// 409 | /// If you'd like to customize the thread pool's behavior then use 410 | /// [`ThreadPool::builder`]. 411 | #[inline] 412 | pub fn new() -> Self { 413 | Self::builder().build() 414 | } 415 | 416 | /// Get a builder for creating a customized thread pool. 417 | #[inline] 418 | pub fn builder() -> Builder { 419 | Builder::default() 420 | } 421 | 422 | /// Get the number of threads currently running in the thread pool. 423 | pub fn threads(&self) -> usize { 424 | *self.shared.thread_count.lock().unwrap() 425 | } 426 | 427 | /// Get the number of tasks queued for execution, but not yet started. 428 | /// 429 | /// This number will always be less than or equal to the configured 430 | /// [`queue_limit`](Builder::queue_limit), if any. 431 | /// 432 | /// Note that the number returned may become immediately outdated after 433 | /// invocation. 434 | /// 435 | /// # Examples 436 | /// 437 | /// ``` 438 | /// use std::{thread::sleep, time::Duration}; 439 | /// 440 | /// // Create a pool with just one thread. 441 | /// let pool = threadfin::builder().size(1).build(); 442 | /// 443 | /// // Nothing is queued yet. 444 | /// assert_eq!(pool.queued_tasks(), 0); 445 | /// 446 | /// // Start a slow task. 447 | /// let task = pool.execute(|| { 448 | /// sleep(Duration::from_millis(100)); 449 | /// }); 450 | /// 451 | /// // Wait a little for the task to start. 452 | /// sleep(Duration::from_millis(10)); 453 | /// assert_eq!(pool.queued_tasks(), 0); 454 | /// 455 | /// // Enqueue some more tasks. 456 | /// let count = 4; 457 | /// for _ in 0..count { 458 | /// pool.execute(|| { 459 | /// // work to do 460 | /// }); 461 | /// } 462 | /// 463 | /// // The tasks should still be in the queue because the slow task is 464 | /// // running on the only thread. 465 | /// assert_eq!(pool.queued_tasks(), count); 466 | /// # pool.join(); 467 | /// ``` 468 | #[inline] 469 | pub fn queued_tasks(&self) -> usize { 470 | self.queue.0.len() 471 | } 472 | 473 | /// Get the number of tasks currently running. 474 | /// 475 | /// Note that the number returned may become immediately outdated after 476 | /// invocation. 477 | /// 478 | /// # Examples 479 | /// 480 | /// ``` 481 | /// use std::{thread::sleep, time::Duration}; 482 | /// 483 | /// let pool = threadfin::ThreadPool::new(); 484 | /// 485 | /// // Nothing is running yet. 486 | /// assert_eq!(pool.running_tasks(), 0); 487 | /// 488 | /// // Start a task. 489 | /// let task = pool.execute(|| { 490 | /// sleep(Duration::from_millis(100)); 491 | /// }); 492 | /// 493 | /// // Wait a little for the task to start. 494 | /// sleep(Duration::from_millis(10)); 495 | /// assert_eq!(pool.running_tasks(), 1); 496 | /// 497 | /// // Wait for the task to complete. 498 | /// task.join(); 499 | /// assert_eq!(pool.running_tasks(), 0); 500 | /// ``` 501 | #[inline] 502 | pub fn running_tasks(&self) -> usize { 503 | self.shared.running_tasks_count.load(Ordering::Relaxed) 504 | } 505 | 506 | /// Get the number of tasks completed (successfully or otherwise) by this 507 | /// pool since it was created. 508 | /// 509 | /// Note that the number returned may become immediately outdated after 510 | /// invocation. 511 | /// 512 | /// # Examples 513 | /// 514 | /// ``` 515 | /// let pool = threadfin::ThreadPool::new(); 516 | /// assert_eq!(pool.completed_tasks(), 0); 517 | /// 518 | /// pool.execute(|| 2 + 2).join(); 519 | /// assert_eq!(pool.completed_tasks(), 1); 520 | /// 521 | /// pool.execute(|| 2 + 2).join(); 522 | /// assert_eq!(pool.completed_tasks(), 2); 523 | /// ``` 524 | #[inline] 525 | #[allow(clippy::useless_conversion)] 526 | pub fn completed_tasks(&self) -> u64 { 527 | self.shared.completed_tasks_count.load(Ordering::Relaxed).into() 528 | } 529 | 530 | /// Get the number of tasks that have panicked since the pool was created. 531 | /// 532 | /// Note that the number returned may become immediately outdated after 533 | /// invocation. 534 | /// 535 | /// # Examples 536 | /// 537 | /// ``` 538 | /// use std::{thread::sleep, time::Duration}; 539 | /// 540 | /// let pool = threadfin::ThreadPool::new(); 541 | /// assert_eq!(pool.panicked_tasks(), 0); 542 | /// 543 | /// let task = pool.execute(|| { 544 | /// panic!("this task panics"); 545 | /// }); 546 | /// 547 | /// while !task.is_done() { 548 | /// sleep(Duration::from_millis(100)); 549 | /// } 550 | /// 551 | /// assert_eq!(pool.panicked_tasks(), 1); 552 | /// ``` 553 | #[inline] 554 | #[allow(clippy::useless_conversion)] 555 | pub fn panicked_tasks(&self) -> u64 { 556 | self.shared.panicked_tasks_count.load(Ordering::SeqCst).into() 557 | } 558 | 559 | /// Submit a closure to be executed by the thread pool. 560 | /// 561 | /// If all worker threads are busy, but there are less threads than the 562 | /// configured maximum, an additional thread will be created and added to 563 | /// the pool to execute this task. 564 | /// 565 | /// If all worker threads are busy and the pool has reached the configured 566 | /// maximum number of threads, the task will be enqueued. If the queue is 567 | /// configured with a limit, this call will block until space becomes 568 | /// available in the queue. 569 | /// 570 | /// # Examples 571 | /// 572 | /// ``` 573 | /// let pool = threadfin::ThreadPool::new(); 574 | /// let task = pool.execute(|| { 575 | /// 2 + 2 // some expensive computation 576 | /// }); 577 | /// 578 | /// // do something in the meantime 579 | /// 580 | /// // now wait for the result 581 | /// let sum = task.join(); 582 | /// assert_eq!(sum, 4); 583 | /// ``` 584 | pub fn execute(&self, closure: F) -> Task 585 | where 586 | T: Send + 'static, 587 | F: FnOnce() -> T + Send + 'static, 588 | { 589 | let (task, coroutine) = Task::from_closure(closure); 590 | 591 | self.execute_coroutine(coroutine); 592 | 593 | task 594 | } 595 | 596 | /// Submit a future to be executed by the thread pool. 597 | /// 598 | /// If all worker threads are busy, but there are less threads than the 599 | /// configured maximum, an additional thread will be created and added to 600 | /// the pool to execute this task. 601 | /// 602 | /// If all worker threads are busy and the pool has reached the configured 603 | /// maximum number of threads, the task will be enqueued. If the queue is 604 | /// configured with a limit, this call will block until space becomes 605 | /// available in the queue. 606 | /// 607 | /// # Thread locality 608 | /// 609 | /// While the given future must implement [`Send`] to be moved into a thread 610 | /// in the pool to be processed, once the future is assigned a thread it 611 | /// will stay assigned to that single thread until completion. This improves 612 | /// cache locality even across `.await` points in the future. 613 | /// 614 | /// ``` 615 | /// let pool = threadfin::ThreadPool::new(); 616 | /// let task = pool.execute_future(async { 617 | /// 2 + 2 // some asynchronous code 618 | /// }); 619 | /// 620 | /// // do something in the meantime 621 | /// 622 | /// // now wait for the result 623 | /// let sum = task.join(); 624 | /// assert_eq!(sum, 4); 625 | /// ``` 626 | pub fn execute_future(&self, future: F) -> Task 627 | where 628 | T: Send + 'static, 629 | F: Future + Send + 'static, 630 | { 631 | let (task, coroutine) = Task::from_future(future); 632 | 633 | self.execute_coroutine(coroutine); 634 | 635 | task 636 | } 637 | 638 | /// Attempts to execute a closure on the thread pool without blocking. 639 | /// 640 | /// If the pool is at its max thread count and the task queue is full, the 641 | /// task is rejected and an error is returned. The original closure can be 642 | /// extracted from the error. 643 | /// 644 | /// # Examples 645 | /// 646 | /// One use for this method is implementing backpressure by executing a 647 | /// closure on the current thread if a pool is currently full. 648 | /// 649 | /// ``` 650 | /// let pool = threadfin::ThreadPool::new(); 651 | /// 652 | /// // Try to run a closure in the thread pool. 653 | /// let result = pool.try_execute(|| 2 + 2) 654 | /// // If successfully submitted, block until the task completes. 655 | /// .map(|task| task.join()) 656 | /// // If the pool was full, invoke the closure here and now. 657 | /// .unwrap_or_else(|error| error.into_inner()()); 658 | /// 659 | /// assert_eq!(result, 4); 660 | /// ``` 661 | pub fn try_execute(&self, closure: F) -> Result, PoolFullError> 662 | where 663 | T: Send + 'static, 664 | F: FnOnce() -> T + Send + 'static, 665 | { 666 | let (task, coroutine) = Task::from_closure(closure); 667 | 668 | self.try_execute_coroutine(coroutine) 669 | .map(|_| task) 670 | .map_err(|coroutine| PoolFullError(coroutine.into_inner_closure())) 671 | } 672 | 673 | /// Attempts to execute a future on the thread pool without blocking. 674 | /// 675 | /// If the pool is at its max thread count and the task queue is full, the 676 | /// task is rejected and an error is returned. The original future can be 677 | /// extracted from the error. 678 | pub fn try_execute_future(&self, future: F) -> Result, PoolFullError> 679 | where 680 | T: Send + 'static, 681 | F: Future + Send + 'static, 682 | { 683 | let (task, coroutine) = Task::from_future(future); 684 | 685 | self.try_execute_coroutine(coroutine) 686 | .map(|_| task) 687 | .map_err(|coroutine| PoolFullError(coroutine.into_inner_future())) 688 | } 689 | 690 | fn execute_coroutine(&self, coroutine: Coroutine) { 691 | if let Err(coroutine) = self.try_execute_coroutine(coroutine) { 692 | // Cannot fail because we hold a reference to both the channel 693 | // sender and receiver and it cannot be closed here. 694 | self.queue.0.send(coroutine).unwrap(); 695 | } 696 | } 697 | 698 | fn try_execute_coroutine(&self, coroutine: Coroutine) -> Result<(), Coroutine> { 699 | // First, try to pass the coroutine to an idle worker currently polling 700 | // for work. This is the most favorable scenario for a task to begin 701 | // processing. 702 | if let Err(e) = self.immediate_queue.0.try_send(coroutine) { 703 | // Error means no workers are currently polling the queue. 704 | debug_assert!(!e.is_disconnected()); 705 | 706 | // If possible, spawn an additional thread to handle the task. 707 | if let Err(e) = self.spawn_thread(Some(e.into_inner())) { 708 | // Finally as a last resort, enqueue the task into the queue, 709 | // but only if it isn't full. 710 | if let Err(e) = self.queue.0.try_send(e.unwrap()) { 711 | return Err(e.into_inner()); 712 | } 713 | } 714 | } 715 | 716 | Ok(()) 717 | } 718 | 719 | /// Shut down this thread pool and block until all existing tasks have 720 | /// completed and threads have stopped. 721 | pub fn join(self) { 722 | self.join_internal(None); 723 | } 724 | 725 | /// Shut down this thread pool and block until all existing tasks have 726 | /// completed and threads have stopped, or until the given timeout passes. 727 | /// 728 | /// Returns `true` if the thread pool shut down fully before the timeout. 729 | pub fn join_timeout(self, timeout: Duration) -> bool { 730 | self.join_deadline(Instant::now() + timeout) 731 | } 732 | 733 | /// Shut down this thread pool and block until all existing tasks have 734 | /// completed and threads have stopped, or the given deadline passes. 735 | /// 736 | /// Returns `true` if the thread pool shut down fully before the deadline. 737 | pub fn join_deadline(self, deadline: Instant) -> bool { 738 | self.join_internal(Some(deadline)) 739 | } 740 | 741 | fn join_internal(self, deadline: Option) -> bool { 742 | // Closing this channel will interrupt any idle workers and signal to 743 | // all workers that the pool is shutting down. 744 | drop(self.queue.0); 745 | 746 | let mut thread_count = self.shared.thread_count.lock().unwrap(); 747 | 748 | while *thread_count > 0 { 749 | // If a deadline is set, figure out how much time is remaining and 750 | // wait for that amount. 751 | if let Some(deadline) = deadline { 752 | if let Some(timeout) = deadline.checked_duration_since(Instant::now()) { 753 | thread_count = self 754 | .shared 755 | .shutdown_cvar 756 | .wait_timeout(thread_count, timeout) 757 | .unwrap() 758 | .0; 759 | } else { 760 | return false; 761 | } 762 | } 763 | // If a deadline is not set, wait forever. 764 | else { 765 | thread_count = self.shared.shutdown_cvar.wait(thread_count).unwrap(); 766 | } 767 | } 768 | 769 | true 770 | } 771 | 772 | /// Spawn an additional thread into the thread pool, if possible. 773 | /// 774 | /// If an initial thunk is given, it will be the first thunk the thread 775 | /// executes once ready for work. 776 | fn spawn_thread(&self, initial_task: Option) -> Result<(), Option> { 777 | struct WorkerListener { 778 | shared: Arc, 779 | } 780 | 781 | impl Listener for WorkerListener { 782 | fn on_task_started(&mut self) { 783 | self.shared 784 | .running_tasks_count 785 | .fetch_add(1, Ordering::Relaxed); 786 | } 787 | 788 | fn on_task_completed(&mut self, panicked: bool) { 789 | self.shared 790 | .running_tasks_count 791 | .fetch_sub(1, Ordering::Relaxed); 792 | self.shared 793 | .completed_tasks_count 794 | .fetch_add(1, Ordering::Relaxed); 795 | 796 | if panicked { 797 | self.shared 798 | .panicked_tasks_count 799 | .fetch_add(1, Ordering::SeqCst); 800 | } 801 | } 802 | 803 | fn on_idle(&mut self) -> bool { 804 | // Check if the worker should shut down by seeing if we are over 805 | // the minimum worker count. 806 | *self.shared.thread_count.lock().unwrap() > self.shared.min_threads 807 | } 808 | } 809 | 810 | impl Drop for WorkerListener { 811 | fn drop(&mut self) { 812 | if let Ok(mut count) = self.shared.thread_count.lock() { 813 | *count = count.saturating_sub(1); 814 | self.shared.shutdown_cvar.notify_all(); 815 | } 816 | } 817 | } 818 | 819 | // Lock the thread count to prevent race conditions when determining 820 | // whether new threads can be created. 821 | let mut thread_count = self.shared.thread_count.lock().unwrap(); 822 | 823 | // We've reached the configured limit for threads, do nothing. 824 | if *thread_count >= self.shared.max_threads { 825 | return Err(initial_task); 826 | } 827 | 828 | // Configure the thread based on the thread pool configuration. 829 | let mut builder = thread::Builder::new(); 830 | 831 | if let Some(name) = self.thread_name.as_ref() { 832 | builder = builder.name(name.clone()); 833 | } 834 | 835 | if let Some(size) = self.stack_size { 836 | builder = builder.stack_size(size); 837 | } 838 | 839 | *thread_count += 1; 840 | 841 | let worker = Worker::new( 842 | initial_task, 843 | self.queue.1.clone(), 844 | self.immediate_queue.1.clone(), 845 | self.concurrency_limit, 846 | self.shared.keep_alive, 847 | WorkerListener { 848 | shared: self.shared.clone(), 849 | }, 850 | ); 851 | 852 | // We can now safely unlock the thread count since the worker struct 853 | // will decrement the count again if it is dropped. 854 | drop(thread_count); 855 | 856 | builder.spawn(move || worker.run()).unwrap(); 857 | 858 | Ok(()) 859 | } 860 | } 861 | 862 | impl fmt::Debug for ThreadPool { 863 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 864 | f.debug_struct("ThreadPool") 865 | .field("queued_tasks", &self.queued_tasks()) 866 | .field("running_tasks", &self.running_tasks()) 867 | .field("completed_tasks", &self.completed_tasks()) 868 | .finish() 869 | } 870 | } 871 | 872 | /// Thread pool state shared by the owner and the worker threads. 873 | struct Shared { 874 | min_threads: usize, 875 | max_threads: usize, 876 | thread_count: Mutex, 877 | running_tasks_count: AtomicUsize, 878 | completed_tasks_count: AtomicCounter, 879 | panicked_tasks_count: AtomicCounter, 880 | keep_alive: Duration, 881 | shutdown_cvar: Condvar, 882 | } 883 | --------------------------------------------------------------------------------