├── .github └── workflows │ └── ci.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples ├── basic.rs ├── block_forever.rs ├── over_max.rs ├── threaded.rs ├── try_acquire.rs └── unfair_scheduling.rs ├── helpers ├── Cargo.toml └── src │ └── lib.rs ├── src ├── lib.rs └── linked_list.rs └── tests ├── issue5.rs ├── limits.rs ├── test_core_movements.rs ├── test_drop.rs ├── test_fast_path.rs ├── test_idle.rs ├── test_overflow.rs ├── test_rate_limit_target.rs ├── test_threaded.rs └── test_try_acquire.rs /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | pull_request: {} 5 | push: 6 | branches: 7 | - main 8 | schedule: 9 | - cron: '3 9 * * 0' 10 | 11 | concurrency: 12 | group: ${{ github.workflow }}-${{ github.ref }} 13 | cancel-in-progress: true 14 | 15 | jobs: 16 | test: 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | rust: ['1.70', stable] 22 | steps: 23 | - uses: actions/checkout@v4 24 | - uses: dtolnay/rust-toolchain@master 25 | with: 26 | toolchain: ${{matrix.rust}} 27 | - run: cargo build 28 | - run: cargo build --features tracing 29 | - run: cargo test --all-targets --all-features 30 | if: matrix.rust == 'stable' 31 | - run: cargo test --doc 32 | if: matrix.rust == 'stable' 33 | 34 | clippy: 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v4 38 | - uses: dtolnay/rust-toolchain@stable 39 | with: 40 | components: clippy 41 | - run: cargo clippy --workspace --all-features --all-targets -- -D warnings 42 | 43 | rustfmt: 44 | runs-on: ubuntu-latest 45 | steps: 46 | - uses: actions/checkout@v4 47 | - uses: dtolnay/rust-toolchain@stable 48 | with: 49 | components: rustfmt 50 | - run: cargo fmt --check --all 51 | 52 | miri: 53 | runs-on: ubuntu-latest 54 | steps: 55 | - uses: actions/checkout@v4 56 | - uses: dtolnay/rust-toolchain@nightly 57 | with: 58 | components: miri 59 | - run: cargo miri test --workspace 60 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | /Cargo.lock -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "leaky-bucket" 3 | version = "1.1.2" 4 | authors = ["John-John Tedro "] 5 | edition = "2018" 6 | rust-version = "1.70" 7 | description = "A token-based rate limiter based on the leaky bucket algorithm." 8 | documentation = "https://docs.rs/leaky-bucket" 9 | readme = "README.md" 10 | homepage = "https://github.com/udoprog/leaky-bucket" 11 | repository = "https://github.com/udoprog/leaky-bucket" 12 | license = "MIT OR Apache-2.0" 13 | keywords = ["async", "futures", "ratelimit", "throttle", "tokenbucket"] 14 | categories = ["algorithms", "concurrency", "network-programming"] 15 | 16 | [features] 17 | default = [] 18 | tracing = ["dep:tracing"] 19 | 20 | [dependencies] 21 | parking_lot = "0.12.1" 22 | pin-project-lite = "0.2.14" 23 | tokio = { version = "1.28.1", features = ["time"] } 24 | tracing = { version = "0.1.37", default-features = false, features = ["attributes"], optional = true } 25 | 26 | [dev-dependencies] 27 | anyhow = "1.0.71" 28 | futures = "0.3.28" 29 | helpers = { path = "helpers" } 30 | pin-project = "1.0.12" 31 | tokio = { version = "1.28.1", features = ["macros", "rt-multi-thread", "rt", "time", "test-util"] } 32 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 John-John Tedro 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # leaky-bucket 2 | 3 | [github](https://github.com/udoprog/leaky-bucket) 4 | [crates.io](https://crates.io/crates/leaky-bucket) 5 | [docs.rs](https://docs.rs/leaky-bucket) 6 | [build status](https://github.com/udoprog/leaky-bucket/actions?query=branch%3Amain) 7 | 8 | A token-based rate limiter based on the [leaky bucket] algorithm. 9 | 10 | If the bucket overflows and goes over its max configured capacity, the task 11 | that tried to acquire the tokens will be suspended until the required number 12 | of tokens has been drained from the bucket. 13 | 14 | Since this crate uses timing facilities from tokio it has to be used within 15 | a Tokio runtime with the [`time` feature] enabled. 16 | 17 | This library has some neat features, which includes: 18 | 19 | **Not requiring a background task**. This is usually needed by token bucket 20 | rate limiters to drive progress. Instead, one of the waiting tasks 21 | temporarily assumes the role as coordinator (called the *core*). This 22 | reduces the amount of tasks needing to sleep, which can be a source of 23 | jitter for imprecise sleeping implementations and tight limiters. See below 24 | for more details. 25 | 26 | **Dropped tasks** release any resources they've reserved. So that 27 | constructing and cancellaing asynchronous tasks to not end up taking up wait 28 | slots it never uses which would be the case for cell-based rate limiters. 29 | 30 |
31 | 32 | ## Usage 33 | 34 | The core type is [`RateLimiter`], which allows for limiting the throughput 35 | of a section using its [`acquire`], [`try_acquire`], and [`acquire_one`] 36 | methods. 37 | 38 | The following is a simple example where we wrap requests through a HTTP 39 | `Client`, to ensure that we don't exceed a given limit: 40 | 41 | ```rust 42 | use leaky_bucket::RateLimiter; 43 | 44 | /// A blog client. 45 | pub struct BlogClient { 46 | limiter: RateLimiter, 47 | client: Client, 48 | } 49 | 50 | struct Post { 51 | // .. 52 | } 53 | 54 | impl BlogClient { 55 | /// Get all posts from the service. 56 | pub async fn get_posts(&self) -> Result> { 57 | self.request("posts").await 58 | } 59 | 60 | /// Perform a request against the service, limiting requests to abide by a rate limit. 61 | async fn request(&self, path: &str) -> Result 62 | where 63 | T: DeserializeOwned 64 | { 65 | // Before we start sending a request, we block on acquiring one token. 66 | self.limiter.acquire(1).await; 67 | self.client.request::(path).await 68 | } 69 | } 70 | ``` 71 | 72 |
73 | 74 | ## Implementation details 75 | 76 | Each rate limiter has two acquisition modes. A fast path and a slow path. 77 | The fast path is used if the desired number of tokens are readily available, 78 | and simply involves decrementing the number of tokens available in the 79 | shared pool. 80 | 81 | If the required number of tokens is not available, the task will be forced 82 | to be suspended until the next refill interval. Here one of the acquiring 83 | tasks will switch over to work as a *core*. This is known as *core 84 | switching*. 85 | 86 | ```rust 87 | use leaky_bucket::RateLimiter; 88 | use tokio::time::Duration; 89 | 90 | let limiter = RateLimiter::builder() 91 | .initial(10) 92 | .interval(Duration::from_millis(100)) 93 | .build(); 94 | 95 | // This is instantaneous since the rate limiter starts with 10 tokens to 96 | // spare. 97 | limiter.acquire(10).await; 98 | 99 | // This however needs to core switch and wait for a while until the desired 100 | // number of tokens is available. 101 | limiter.acquire(3).await; 102 | ``` 103 | 104 | The core is responsible for sleeping for the configured interval so that 105 | more tokens can be added. After which it ensures that any tasks that are 106 | waiting to acquire including itself are appropriately unsuspended. 107 | 108 | On-demand core switching is what allows this rate limiter implementation to 109 | work without a coordinating background thread. But we need to ensure that 110 | any asynchronous tasks that uses [`RateLimiter`] must either run an 111 | [`acquire`] call to completion, or be *cancelled* by being dropped. 112 | 113 | If none of these hold, the core might leak and be locked indefinitely 114 | preventing any future use of the rate limiter from making progress. This is 115 | similar to if you would lock an asynchronous [`Mutex`] but never drop its 116 | guard. 117 | 118 | > You can run this example with: 119 | > 120 | > ```sh 121 | > cargo run --example block_forever 122 | > ``` 123 | 124 | ```rust 125 | use std::future::Future; 126 | use std::sync::Arc; 127 | use std::task::Context; 128 | 129 | use leaky_bucket::RateLimiter; 130 | 131 | struct Waker; 132 | 133 | let limiter = Arc::new(RateLimiter::builder().build()); 134 | 135 | let waker = Arc::new(Waker).into(); 136 | let mut cx = Context::from_waker(&waker); 137 | 138 | let mut a0 = Box::pin(limiter.acquire(1)); 139 | // Poll once to ensure that the core task is assigned. 140 | assert!(a0.as_mut().poll(&mut cx).is_pending()); 141 | assert!(a0.is_core()); 142 | 143 | // We leak the core task, preventing the rate limiter from making progress 144 | // by assigning new core tasks. 145 | std::mem::forget(a0); 146 | 147 | // Awaiting acquire here would block forever. 148 | // limiter.acquire(1).await; 149 | ``` 150 | 151 |
152 | 153 | ## Fairness 154 | 155 | By default [`RateLimiter`] uses a *fair* scheduler. This ensures that the 156 | core task makes progress even if there are many tasks waiting to acquire 157 | tokens. This might cause more core switching, increasing the total work 158 | needed. An unfair scheduler is expected to do a bit less work under 159 | contention. But without fair scheduling some tasks might end up taking 160 | longer to acquire than expected. 161 | 162 | Unfair rate limiters also have access to a fast path for acquiring tokens, 163 | which might further improve throughput. 164 | 165 | This behavior can be tweaked with the [`Builder::fair`] option. 166 | 167 | ```rust 168 | use leaky_bucket::RateLimiter; 169 | 170 | let limiter = RateLimiter::builder() 171 | .fair(false) 172 | .build(); 173 | ``` 174 | 175 | The `unfair-scheduling` example can showcase this phenomenon. 176 | 177 | ```sh 178 | cargo run --example unfair_scheduling 179 | ``` 180 | 181 | ```text 182 | # fair 183 | Max: 1011ms, Total: 1012ms 184 | Timings: 185 | 0: 101ms 186 | 1: 101ms 187 | 2: 101ms 188 | 3: 101ms 189 | 4: 101ms 190 | ... 191 | # unfair 192 | Max: 1014ms, Total: 1014ms 193 | Timings: 194 | 0: 1014ms 195 | 1: 101ms 196 | 2: 101ms 197 | 3: 101ms 198 | 4: 101ms 199 | ... 200 | ``` 201 | 202 | As can be seen above the first task in the *unfair* scheduler takes longer 203 | to run because it prioritises releasing other tasks waiting to acquire over 204 | itself. 205 | 206 | [`acquire_one`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.acquire_one 207 | [`acquire`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.acquire 208 | [`Builder::fair`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.Builder.html#method.fair 209 | [`Mutex`]: https://docs.rs/tokio/1/tokio/sync/struct.Mutex.html 210 | [`RateLimiter`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html 211 | [`time` feature]: https://docs.rs/tokio/1/tokio/#feature-flags 212 | [`try_acquire`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.try_acquire 213 | [leaky bucket]: https://en.wikipedia.org/wiki/Leaky_bucket 214 | -------------------------------------------------------------------------------- /examples/basic.rs: -------------------------------------------------------------------------------- 1 | use leaky_bucket::RateLimiter; 2 | use tokio::time::Instant; 3 | 4 | #[tokio::main] 5 | async fn main() { 6 | helpers::init_logging(); 7 | 8 | let limiter = RateLimiter::builder().max(10).initial(0).refill(5).build(); 9 | 10 | let start = Instant::now(); 11 | 12 | println!("Waiting for permit..."); 13 | 14 | // Should take ~400 ms to acquire in total. 15 | let a = limiter.acquire(7); 16 | let b = limiter.acquire(3); 17 | let c = limiter.acquire(10); 18 | 19 | let ((), (), ()) = tokio::join!(a, b, c); 20 | 21 | println!("I made it in {:?}!", Instant::now().duration_since(start)); 22 | } 23 | -------------------------------------------------------------------------------- /examples/block_forever.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::sync::Arc; 3 | use std::task::Context; 4 | 5 | use leaky_bucket::RateLimiter; 6 | 7 | struct Waker; 8 | 9 | impl std::task::Wake for Waker { 10 | fn wake(self: Arc) {} 11 | } 12 | 13 | #[tokio::main] 14 | async fn main() { 15 | let limiter = Arc::new(RateLimiter::builder().build()); 16 | 17 | let waker = Arc::new(Waker).into(); 18 | let mut cx = Context::from_waker(&waker); 19 | 20 | let mut a0 = Box::pin(limiter.acquire(1)); 21 | // Poll once to ensure that the core task is assigned. 22 | assert!(a0.as_mut().poll(&mut cx).is_pending()); 23 | assert!(a0.is_core()); 24 | 25 | // We leak the core task, preventing the rate limiter from making progress 26 | // by assigning new core tasks. 27 | std::mem::forget(a0); 28 | 29 | println!("Blocking forever..."); 30 | limiter.acquire(1).await; 31 | } 32 | -------------------------------------------------------------------------------- /examples/over_max.rs: -------------------------------------------------------------------------------- 1 | use leaky_bucket::RateLimiter; 2 | use tokio::time::Instant; 3 | 4 | #[tokio::main] 5 | async fn main() { 6 | helpers::init_logging(); 7 | 8 | // Note: that despite max being 0, we can still acquire more than max tokens 9 | // because each acquisition is tracked separately. 10 | let limiter = RateLimiter::builder().max(0).initial(0).refill(5).build(); 11 | 12 | let start = Instant::now(); 13 | 14 | println!("Waiting for permit..."); 15 | 16 | // Should take ~400 ms to acquire in total. 17 | let a = limiter.acquire(7); 18 | let b = limiter.acquire(3); 19 | let c = limiter.acquire(10); 20 | 21 | let ((), (), ()) = tokio::join!(a, b, c); 22 | 23 | println!("I made it in {:?}!", Instant::now().duration_since(start)); 24 | } 25 | -------------------------------------------------------------------------------- /examples/threaded.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use tokio::time::Duration; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<()> { 8 | helpers::init_logging(); 9 | 10 | for iteration in 0..5 { 11 | let limiter = Arc::new( 12 | leaky_bucket::RateLimiter::builder() 13 | .initial(100) 14 | .refill(100) 15 | .interval(Duration::from_millis(200)) 16 | .max(100) 17 | .build(), 18 | ); 19 | 20 | let mut tasks = Vec::new(); 21 | let mut expected = Vec::new(); 22 | 23 | for n in 0..10 { 24 | let limiter = limiter.clone(); 25 | 26 | let task = tokio::spawn(async move { 27 | let mut locals = Vec::new(); 28 | 29 | for i in 0..10 { 30 | limiter.acquire(10).await; 31 | println!("tick: {}:{}:{}", iteration, n, i); 32 | locals.push((n, i)); 33 | } 34 | 35 | locals 36 | }); 37 | 38 | for i in 0..10 { 39 | expected.push((n, i)); 40 | } 41 | 42 | tasks.push(task); 43 | } 44 | 45 | let mut globals = Vec::new(); 46 | 47 | for t in tasks { 48 | globals.extend(t.await?); 49 | } 50 | 51 | globals.sort(); 52 | 53 | assert_eq!(expected, globals); 54 | } 55 | 56 | Ok(()) 57 | } 58 | -------------------------------------------------------------------------------- /examples/try_acquire.rs: -------------------------------------------------------------------------------- 1 | use leaky_bucket::RateLimiter; 2 | use tokio::time::Duration; 3 | 4 | #[tokio::main] 5 | async fn main() { 6 | let limiter = RateLimiter::builder().refill(1).initial(1).build(); 7 | 8 | assert!(limiter.try_acquire(1)); 9 | assert!(!limiter.try_acquire(1)); 10 | 11 | tokio::time::sleep(Duration::from_millis(200)).await; 12 | 13 | assert!(limiter.try_acquire(1)); 14 | assert!(limiter.try_acquire(1)); 15 | assert!(!limiter.try_acquire(1)); 16 | } 17 | -------------------------------------------------------------------------------- /examples/unfair_scheduling.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use anyhow::Result; 4 | use leaky_bucket::RateLimiter; 5 | use tokio::time::Instant; 6 | 7 | /// Grind a rate limiter. 8 | async fn grind(what: &str, limiter: &Arc) -> Result<()> { 9 | let mut tasks = Vec::new(); 10 | 11 | for _ in 0..1000 { 12 | let limiter = limiter.clone(); 13 | 14 | tasks.push(tokio::spawn(async move { 15 | let start = Instant::now(); 16 | limiter.acquire(1).await; 17 | Instant::now().saturating_duration_since(start).as_millis() as i64 18 | })); 19 | } 20 | 21 | let mut results = Vec::new(); 22 | 23 | let start = Instant::now(); 24 | 25 | for task in tasks { 26 | results.push(task.await?); 27 | } 28 | 29 | let total = Instant::now().saturating_duration_since(start).as_millis() as i64; 30 | 31 | let max = results.iter().max().unwrap(); 32 | 33 | println!("# {}", what); 34 | println! { 35 | "Max: {}ms, Total: {}ms", 36 | max, total 37 | }; 38 | 39 | println!("Timings:"); 40 | 41 | for (i, n) in results.iter().enumerate().take(5) { 42 | println!(" {}: {}ms", i, n); 43 | } 44 | 45 | println!(" ..."); 46 | Ok(()) 47 | } 48 | 49 | #[tokio::main] 50 | async fn main() -> Result<()> { 51 | let fair = Arc::new(RateLimiter::builder().refill(100).build()); 52 | grind("fair", &fair).await?; 53 | 54 | let unfair = Arc::new(RateLimiter::builder().refill(100).fair(false).build()); 55 | grind("unfair", &unfair).await?; 56 | 57 | Ok(()) 58 | } 59 | -------------------------------------------------------------------------------- /helpers/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "helpers" 3 | version = "0.0.0" 4 | authors = ["John-John Tedro "] 5 | edition = "2018" 6 | publish = false 7 | 8 | [dependencies] 9 | tracing-subscriber = "0.2.18" 10 | -------------------------------------------------------------------------------- /helpers/src/lib.rs: -------------------------------------------------------------------------------- 1 | /// Initialize logging for a given example. 2 | pub fn init_logging() { 3 | use tracing_subscriber::prelude::*; 4 | 5 | tracing_subscriber::registry() 6 | .with(tracing_subscriber::EnvFilter::from_default_env()) 7 | .with( 8 | tracing_subscriber::fmt::layer() 9 | .with_target(false) 10 | .with_level(true) 11 | .compact(), 12 | ) 13 | .init(); 14 | } 15 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! [github](https://github.com/udoprog/leaky-bucket) 2 | //! [crates.io](https://crates.io/crates/leaky-bucket) 3 | //! [docs.rs](https://docs.rs/leaky-bucket) 4 | //! 5 | //! A token-based rate limiter based on the [leaky bucket] algorithm. 6 | //! 7 | //! If the bucket overflows and goes over its max configured capacity, the task 8 | //! that tried to acquire the tokens will be suspended until the required number 9 | //! of tokens has been drained from the bucket. 10 | //! 11 | //! Since this crate uses timing facilities from tokio it has to be used within 12 | //! a Tokio runtime with the [`time` feature] enabled. 13 | //! 14 | //! This library has some neat features, which includes: 15 | //! 16 | //! **Not requiring a background task**. This is usually needed by token bucket 17 | //! rate limiters to drive progress. Instead, one of the waiting tasks 18 | //! temporarily assumes the role as coordinator (called the *core*). This 19 | //! reduces the amount of tasks needing to sleep, which can be a source of 20 | //! jitter for imprecise sleeping implementations and tight limiters. See below 21 | //! for more details. 22 | //! 23 | //! **Dropped tasks** release any resources they've reserved. So that 24 | //! constructing and cancellaing asynchronous tasks to not end up taking up wait 25 | //! slots it never uses which would be the case for cell-based rate limiters. 26 | //! 27 | //!
28 | //! 29 | //! ## Usage 30 | //! 31 | //! The core type is [`RateLimiter`], which allows for limiting the throughput 32 | //! of a section using its [`acquire`], [`try_acquire`], and [`acquire_one`] 33 | //! methods. 34 | //! 35 | //! The following is a simple example where we wrap requests through a HTTP 36 | //! `Client`, to ensure that we don't exceed a given limit: 37 | //! 38 | //! ``` 39 | //! use leaky_bucket::RateLimiter; 40 | //! # struct Client; 41 | //! # impl Client { async fn request(&self, path: &str) -> Result { todo!() } } 42 | //! # trait DeserializeOwned {} 43 | //! # impl DeserializeOwned for Vec {} 44 | //! # type Result = core::result::Result; 45 | //! 46 | //! /// A blog client. 47 | //! pub struct BlogClient { 48 | //! limiter: RateLimiter, 49 | //! client: Client, 50 | //! } 51 | //! 52 | //! struct Post { 53 | //! // .. 54 | //! } 55 | //! 56 | //! impl BlogClient { 57 | //! /// Get all posts from the service. 58 | //! pub async fn get_posts(&self) -> Result> { 59 | //! self.request("posts").await 60 | //! } 61 | //! 62 | //! /// Perform a request against the service, limiting requests to abide by a rate limit. 63 | //! async fn request(&self, path: &str) -> Result 64 | //! where 65 | //! T: DeserializeOwned 66 | //! { 67 | //! // Before we start sending a request, we block on acquiring one token. 68 | //! self.limiter.acquire(1).await; 69 | //! self.client.request::(path).await 70 | //! } 71 | //! } 72 | //! ``` 73 | //! 74 | //!
75 | //! 76 | //! ## Implementation details 77 | //! 78 | //! Each rate limiter has two acquisition modes. A fast path and a slow path. 79 | //! The fast path is used if the desired number of tokens are readily available, 80 | //! and simply involves decrementing the number of tokens available in the 81 | //! shared pool. 82 | //! 83 | //! If the required number of tokens is not available, the task will be forced 84 | //! to be suspended until the next refill interval. Here one of the acquiring 85 | //! tasks will switch over to work as a *core*. This is known as *core 86 | //! switching*. 87 | //! 88 | //! ``` 89 | //! use leaky_bucket::RateLimiter; 90 | //! use tokio::time::Duration; 91 | //! 92 | //! # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 93 | //! let limiter = RateLimiter::builder() 94 | //! .initial(10) 95 | //! .interval(Duration::from_millis(100)) 96 | //! .build(); 97 | //! 98 | //! // This is instantaneous since the rate limiter starts with 10 tokens to 99 | //! // spare. 100 | //! limiter.acquire(10).await; 101 | //! 102 | //! // This however needs to core switch and wait for a while until the desired 103 | //! // number of tokens is available. 104 | //! limiter.acquire(3).await; 105 | //! # } 106 | //! ``` 107 | //! 108 | //! The core is responsible for sleeping for the configured interval so that 109 | //! more tokens can be added. After which it ensures that any tasks that are 110 | //! waiting to acquire including itself are appropriately unsuspended. 111 | //! 112 | //! On-demand core switching is what allows this rate limiter implementation to 113 | //! work without a coordinating background thread. But we need to ensure that 114 | //! any asynchronous tasks that uses [`RateLimiter`] must either run an 115 | //! [`acquire`] call to completion, or be *cancelled* by being dropped. 116 | //! 117 | //! If none of these hold, the core might leak and be locked indefinitely 118 | //! preventing any future use of the rate limiter from making progress. This is 119 | //! similar to if you would lock an asynchronous [`Mutex`] but never drop its 120 | //! guard. 121 | //! 122 | //! > You can run this example with: 123 | //! > 124 | //! > ```sh 125 | //! > cargo run --example block_forever 126 | //! > ``` 127 | //! 128 | //! ```no_run 129 | //! use std::future::Future; 130 | //! use std::sync::Arc; 131 | //! use std::task::Context; 132 | //! 133 | //! use leaky_bucket::RateLimiter; 134 | //! 135 | //! struct Waker; 136 | //! # impl std::task::Wake for Waker { fn wake(self: Arc) { } } 137 | //! 138 | //! # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 139 | //! let limiter = Arc::new(RateLimiter::builder().build()); 140 | //! 141 | //! let waker = Arc::new(Waker).into(); 142 | //! let mut cx = Context::from_waker(&waker); 143 | //! 144 | //! let mut a0 = Box::pin(limiter.acquire(1)); 145 | //! // Poll once to ensure that the core task is assigned. 146 | //! assert!(a0.as_mut().poll(&mut cx).is_pending()); 147 | //! assert!(a0.is_core()); 148 | //! 149 | //! // We leak the core task, preventing the rate limiter from making progress 150 | //! // by assigning new core tasks. 151 | //! std::mem::forget(a0); 152 | //! 153 | //! // Awaiting acquire here would block forever. 154 | //! // limiter.acquire(1).await; 155 | //! # } 156 | //! ``` 157 | //! 158 | //!
159 | //! 160 | //! ## Fairness 161 | //! 162 | //! By default [`RateLimiter`] uses a *fair* scheduler. This ensures that the 163 | //! core task makes progress even if there are many tasks waiting to acquire 164 | //! tokens. This might cause more core switching, increasing the total work 165 | //! needed. An unfair scheduler is expected to do a bit less work under 166 | //! contention. But without fair scheduling some tasks might end up taking 167 | //! longer to acquire than expected. 168 | //! 169 | //! Unfair rate limiters also have access to a fast path for acquiring tokens, 170 | //! which might further improve throughput. 171 | //! 172 | //! This behavior can be tweaked with the [`Builder::fair`] option. 173 | //! 174 | //! ``` 175 | //! use leaky_bucket::RateLimiter; 176 | //! 177 | //! let limiter = RateLimiter::builder() 178 | //! .fair(false) 179 | //! .build(); 180 | //! ``` 181 | //! 182 | //! The `unfair-scheduling` example can showcase this phenomenon. 183 | //! 184 | //! ```sh 185 | //! cargo run --example unfair_scheduling 186 | //! ``` 187 | //! 188 | //! ```text 189 | //! # fair 190 | //! Max: 1011ms, Total: 1012ms 191 | //! Timings: 192 | //! 0: 101ms 193 | //! 1: 101ms 194 | //! 2: 101ms 195 | //! 3: 101ms 196 | //! 4: 101ms 197 | //! ... 198 | //! # unfair 199 | //! Max: 1014ms, Total: 1014ms 200 | //! Timings: 201 | //! 0: 1014ms 202 | //! 1: 101ms 203 | //! 2: 101ms 204 | //! 3: 101ms 205 | //! 4: 101ms 206 | //! ... 207 | //! ``` 208 | //! 209 | //! As can be seen above the first task in the *unfair* scheduler takes longer 210 | //! to run because it prioritises releasing other tasks waiting to acquire over 211 | //! itself. 212 | //! 213 | //! [`acquire_one`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.acquire_one 214 | //! [`acquire`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.acquire 215 | //! [`Builder::fair`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.Builder.html#method.fair 216 | //! [`Mutex`]: https://docs.rs/tokio/1/tokio/sync/struct.Mutex.html 217 | //! [`RateLimiter`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html 218 | //! [`time` feature]: https://docs.rs/tokio/1/tokio/#feature-flags 219 | //! [`try_acquire`]: https://docs.rs/leaky-bucket/1/leaky_bucket/struct.RateLimiter.html#method.try_acquire 220 | //! [leaky bucket]: https://en.wikipedia.org/wiki/Leaky_bucket 221 | 222 | #![no_std] 223 | #![deny(missing_docs)] 224 | 225 | extern crate alloc; 226 | 227 | #[macro_use] 228 | extern crate std; 229 | 230 | use core::cell::UnsafeCell; 231 | use core::convert::TryFrom as _; 232 | use core::fmt; 233 | use core::future::Future; 234 | use core::mem::{self, ManuallyDrop}; 235 | use core::ops::{Deref, DerefMut}; 236 | use core::pin::Pin; 237 | use core::ptr; 238 | use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; 239 | use core::task::{Context, Poll, Waker}; 240 | 241 | use alloc::sync::Arc; 242 | 243 | use parking_lot::{Mutex, MutexGuard}; 244 | use pin_project_lite::pin_project; 245 | use tokio::time::{self, Duration, Instant}; 246 | 247 | #[cfg(feature = "tracing")] 248 | macro_rules! trace { 249 | ($($arg:tt)*) => { 250 | tracing::trace!($($arg)*) 251 | }; 252 | } 253 | 254 | #[cfg(not(feature = "tracing"))] 255 | macro_rules! trace { 256 | ($($arg:tt)*) => {}; 257 | } 258 | 259 | mod linked_list; 260 | use self::linked_list::{LinkedList, Node}; 261 | 262 | /// Default factor for how to calculate max refill value. 263 | const DEFAULT_REFILL_MAX_FACTOR: usize = 10; 264 | 265 | /// Interval to bump the shared mutex guard to allow other parts of the system 266 | /// to make process. Processes which loop should use this number to determine 267 | /// how many times it should loop before calling [Guard::bump]. 268 | /// 269 | /// If we do not respect this limit we might inadvertently end up starving other 270 | /// tasks from making progress so that they can unblock. 271 | const BUMP_LIMIT: usize = 16; 272 | 273 | /// The maximum supported balance. 274 | const MAX_BALANCE: usize = isize::MAX as usize; 275 | 276 | /// Marker trait which indicates that a type represents a unique held critical section. 277 | trait IsCritical {} 278 | impl IsCritical for Critical {} 279 | impl IsCritical for Guard<'_> {} 280 | 281 | /// Linked task state. 282 | struct Task { 283 | /// Remaining tokens that need to be satisfied. 284 | remaining: usize, 285 | /// If this node has been released or not. We make this an atomic to permit 286 | /// access to it without synchronization. 287 | complete: AtomicBool, 288 | /// The waker associated with the node. 289 | waker: Option, 290 | } 291 | 292 | impl Task { 293 | /// Construct a new task state with the given permits remaining. 294 | const fn new() -> Self { 295 | Self { 296 | remaining: 0, 297 | complete: AtomicBool::new(false), 298 | waker: None, 299 | } 300 | } 301 | 302 | /// Test if the current node is completed. 303 | fn is_completed(&self) -> bool { 304 | self.remaining == 0 305 | } 306 | 307 | /// Fill the current node from the given pool of tokens and modify it. 308 | fn fill(&mut self, current: &mut usize) { 309 | let removed = usize::min(self.remaining, *current); 310 | self.remaining -= removed; 311 | *current -= removed; 312 | } 313 | } 314 | 315 | /// A borrowed rate limiter. 316 | struct BorrowedRateLimiter<'a>(&'a RateLimiter); 317 | 318 | impl Deref for BorrowedRateLimiter<'_> { 319 | type Target = RateLimiter; 320 | 321 | #[inline] 322 | fn deref(&self) -> &RateLimiter { 323 | self.0 324 | } 325 | } 326 | 327 | struct Critical { 328 | /// Waiter list. 329 | waiters: LinkedList, 330 | /// The deadline for when more tokens can be be added. 331 | deadline: Instant, 332 | } 333 | 334 | #[repr(transparent)] 335 | struct Guard<'a> { 336 | critical: MutexGuard<'a, Critical>, 337 | } 338 | 339 | impl Guard<'_> { 340 | #[inline] 341 | fn bump(this: &mut Guard<'_>) { 342 | MutexGuard::bump(&mut this.critical) 343 | } 344 | } 345 | 346 | impl Deref for Guard<'_> { 347 | type Target = Critical; 348 | 349 | #[inline] 350 | fn deref(&self) -> &Critical { 351 | &self.critical 352 | } 353 | } 354 | 355 | impl DerefMut for Guard<'_> { 356 | #[inline] 357 | fn deref_mut(&mut self) -> &mut Critical { 358 | &mut self.critical 359 | } 360 | } 361 | 362 | impl Critical { 363 | #[inline] 364 | fn push_task_front(&mut self, task: &mut Node) { 365 | // SAFETY: We both have mutable access to the node being pushed, and 366 | // mutable access to the critical section through `self`. So we know we 367 | // have exclusive tampering rights to the waiter queue. 368 | unsafe { 369 | self.waiters.push_front(task.into()); 370 | } 371 | } 372 | 373 | #[inline] 374 | fn push_task(&mut self, task: &mut Node) { 375 | // SAFETY: We both have mutable access to the node being pushed, and 376 | // mutable access to the critical section through `self`. So we know we 377 | // have exclusive tampering rights to the waiter queue. 378 | unsafe { 379 | self.waiters.push_back(task.into()); 380 | } 381 | } 382 | 383 | #[inline] 384 | fn remove_task(&mut self, task: &mut Node) { 385 | // SAFETY: We both have mutable access to the node being pushed, and 386 | // mutable access to the critical section through `self`. So we know we 387 | // have exclusive tampering rights to the waiter queue. 388 | unsafe { 389 | self.waiters.remove(task.into()); 390 | } 391 | } 392 | 393 | /// Release the current core. Beyond this point the current task may no 394 | /// longer interact exclusively with the core. 395 | #[cfg_attr(feature = "tracing", tracing::instrument(skip(self), level = "trace"))] 396 | fn release(&mut self, state: &mut State<'_>) { 397 | trace!("releasing core"); 398 | state.available = true; 399 | 400 | // Find another task that might take over as core. Once it has acquired 401 | // core status it will have to make sure it is no longer linked into the 402 | // wait queue. 403 | unsafe { 404 | if let Some(node) = self.waiters.front() { 405 | trace!(node = ?node, "waking next core"); 406 | 407 | if let Some(ref waker) = node.as_ref().waker { 408 | waker.wake_by_ref(); 409 | } 410 | } 411 | } 412 | } 413 | } 414 | 415 | #[derive(Debug)] 416 | struct State<'a> { 417 | /// Original state. 418 | state: usize, 419 | /// If the core is available or not. 420 | available: bool, 421 | /// The balance. 422 | balance: usize, 423 | /// The rate limiter the state is associated with. 424 | lim: &'a RateLimiter, 425 | } 426 | 427 | impl<'a> State<'a> { 428 | fn try_fast_path(mut self, permits: usize) -> bool { 429 | let mut attempts = 0; 430 | 431 | // Fast path where we just try to nab any available permit without 432 | // locking. 433 | // 434 | // We do have to race against anyone else grabbing permits here when 435 | // storing the state back. 436 | while self.balance >= permits { 437 | // Abandon fast path if we've tried too many times. 438 | if attempts == BUMP_LIMIT { 439 | break; 440 | } 441 | 442 | self.balance -= permits; 443 | 444 | if let Err(new_state) = self.try_save() { 445 | self = new_state; 446 | attempts += 1; 447 | continue; 448 | } 449 | 450 | return true; 451 | } 452 | 453 | false 454 | } 455 | 456 | /// Add tokens and release any pending tasks. 457 | #[cfg_attr( 458 | feature = "tracing", 459 | tracing::instrument(skip(self, critical, f), level = "trace") 460 | )] 461 | #[inline] 462 | fn add_tokens(&mut self, critical: &mut Guard<'_>, tokens: usize, f: F) -> O 463 | where 464 | F: FnOnce(&mut Guard<'_>, &mut State) -> O, 465 | { 466 | if tokens > 0 { 467 | debug_assert!( 468 | tokens <= MAX_BALANCE, 469 | "Additional tokens {} must be less than {}", 470 | tokens, 471 | MAX_BALANCE 472 | ); 473 | 474 | self.balance = (self.balance + tokens).min(self.lim.max); 475 | drain_wait_queue(critical, self); 476 | let output = f(critical, self); 477 | return output; 478 | } 479 | 480 | f(critical, self) 481 | } 482 | 483 | #[inline] 484 | fn decode(state: usize, lim: &'a RateLimiter) -> Self { 485 | State { 486 | state, 487 | available: state & 1 == 1, 488 | balance: state >> 1, 489 | lim, 490 | } 491 | } 492 | 493 | #[inline] 494 | fn encode(&self) -> usize { 495 | (self.balance << 1) | usize::from(self.available) 496 | } 497 | 498 | /// Try to save the state, but only succeed if it hasn't been modified. 499 | #[inline] 500 | fn try_save(self) -> Result<(), Self> { 501 | let this = ManuallyDrop::new(self); 502 | 503 | match this.lim.state.compare_exchange( 504 | this.state, 505 | this.encode(), 506 | Ordering::Release, 507 | Ordering::Relaxed, 508 | ) { 509 | Ok(_) => Ok(()), 510 | Err(state) => Err(State::decode(state, this.lim)), 511 | } 512 | } 513 | } 514 | 515 | impl Drop for State<'_> { 516 | #[inline] 517 | fn drop(&mut self) { 518 | self.lim.state.store(self.encode(), Ordering::Release); 519 | } 520 | } 521 | 522 | /// A token-bucket rate limiter. 523 | pub struct RateLimiter { 524 | /// Tokens to add every `per` duration. 525 | refill: usize, 526 | /// Interval in milliseconds to add tokens. 527 | interval: Duration, 528 | /// Max number of tokens associated with the rate limiter. 529 | max: usize, 530 | /// If the rate limiter is fair or not. 531 | fair: bool, 532 | /// The state of the rate limiter. 533 | state: AtomicUsize, 534 | /// Critical state of the rate limiter. 535 | critical: Mutex, 536 | } 537 | 538 | impl RateLimiter { 539 | /// Construct a new [`Builder`] for a [`RateLimiter`]. 540 | /// 541 | /// # Examples 542 | /// 543 | /// ``` 544 | /// use leaky_bucket::RateLimiter; 545 | /// use tokio::time::Duration; 546 | /// 547 | /// let limiter = RateLimiter::builder() 548 | /// .initial(100) 549 | /// .refill(100) 550 | /// .max(1000) 551 | /// .interval(Duration::from_millis(250)) 552 | /// .fair(false) 553 | /// .build(); 554 | /// ``` 555 | pub fn builder() -> Builder { 556 | Builder::default() 557 | } 558 | 559 | /// Get the refill amount of this rate limiter as set through 560 | /// [`Builder::refill`]. 561 | /// 562 | /// # Examples 563 | /// 564 | /// ``` 565 | /// use leaky_bucket::RateLimiter; 566 | /// 567 | /// let limiter = RateLimiter::builder() 568 | /// .refill(1024) 569 | /// .build(); 570 | /// 571 | /// assert_eq!(limiter.refill(), 1024); 572 | /// ``` 573 | pub fn refill(&self) -> usize { 574 | self.refill 575 | } 576 | 577 | /// Get the refill interval of this rate limiter as set through 578 | /// [`Builder::interval`]. 579 | /// 580 | /// # Examples 581 | /// 582 | /// ``` 583 | /// use leaky_bucket::RateLimiter; 584 | /// use tokio::time::Duration; 585 | /// 586 | /// let limiter = RateLimiter::builder() 587 | /// .interval(Duration::from_millis(1000)) 588 | /// .build(); 589 | /// 590 | /// assert_eq!(limiter.interval(), Duration::from_millis(1000)); 591 | /// ``` 592 | pub fn interval(&self) -> Duration { 593 | self.interval 594 | } 595 | 596 | /// Get the max value of this rate limiter as set through [`Builder::max`]. 597 | /// 598 | /// # Examples 599 | /// 600 | /// ``` 601 | /// use leaky_bucket::RateLimiter; 602 | /// 603 | /// let limiter = RateLimiter::builder() 604 | /// .max(1024) 605 | /// .build(); 606 | /// 607 | /// assert_eq!(limiter.max(), 1024); 608 | /// ``` 609 | pub fn max(&self) -> usize { 610 | self.max 611 | } 612 | 613 | /// Test if the current rate limiter is fair as specified through 614 | /// [`Builder::fair`]. 615 | /// 616 | /// # Examples 617 | /// 618 | /// ``` 619 | /// use leaky_bucket::RateLimiter; 620 | /// 621 | /// let limiter = RateLimiter::builder() 622 | /// .fair(true) 623 | /// .build(); 624 | /// 625 | /// assert_eq!(limiter.is_fair(), true); 626 | /// ``` 627 | pub fn is_fair(&self) -> bool { 628 | self.fair 629 | } 630 | 631 | /// Get the current token balance. 632 | /// 633 | /// This indicates how many tokens can be requested without blocking. 634 | /// 635 | /// # Examples 636 | /// 637 | /// ``` 638 | /// use leaky_bucket::RateLimiter; 639 | /// 640 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 641 | /// let limiter = RateLimiter::builder() 642 | /// .initial(100) 643 | /// .build(); 644 | /// 645 | /// assert_eq!(limiter.balance(), 100); 646 | /// limiter.acquire(10).await; 647 | /// assert_eq!(limiter.balance(), 90); 648 | /// # } 649 | /// ``` 650 | pub fn balance(&self) -> usize { 651 | self.state.load(Ordering::Acquire) >> 1 652 | } 653 | 654 | /// Acquire a single permit. 655 | /// 656 | /// # Examples 657 | /// 658 | /// ``` 659 | /// use leaky_bucket::RateLimiter; 660 | /// 661 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 662 | /// let limiter = RateLimiter::builder() 663 | /// .initial(10) 664 | /// .build(); 665 | /// 666 | /// limiter.acquire_one().await; 667 | /// # } 668 | /// ``` 669 | pub fn acquire_one(&self) -> Acquire<'_> { 670 | self.acquire(1) 671 | } 672 | 673 | /// Acquire the given number of permits, suspending the current task until 674 | /// they are available. 675 | /// 676 | /// If zero permits are specified, this function never suspends the current 677 | /// task. 678 | /// 679 | /// # Examples 680 | /// 681 | /// ``` 682 | /// use leaky_bucket::RateLimiter; 683 | /// 684 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 685 | /// let limiter = RateLimiter::builder() 686 | /// .initial(10) 687 | /// .build(); 688 | /// 689 | /// limiter.acquire(10).await; 690 | /// # } 691 | /// ``` 692 | pub fn acquire(&self, permits: usize) -> Acquire<'_> { 693 | Acquire { 694 | inner: AcquireFut::new(BorrowedRateLimiter(self), permits), 695 | } 696 | } 697 | 698 | /// Try to acquire the given number of permits, returning `true` if the 699 | /// given number of permits were successfully acquired. 700 | /// 701 | /// If the scheduler is fair, and there are pending tasks waiting to acquire 702 | /// tokens this method will return `false`. 703 | /// 704 | /// If zero permits are specified, this method returns `true`. 705 | /// 706 | /// # Examples 707 | /// 708 | /// ``` 709 | /// use leaky_bucket::RateLimiter; 710 | /// use tokio::time; 711 | /// 712 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 713 | /// let limiter = RateLimiter::builder().refill(1).initial(1).build(); 714 | /// 715 | /// assert!(limiter.try_acquire(1)); 716 | /// assert!(!limiter.try_acquire(1)); 717 | /// assert!(limiter.try_acquire(0)); 718 | /// 719 | /// time::sleep(limiter.interval() * 2).await; 720 | /// 721 | /// assert!(limiter.try_acquire(1)); 722 | /// assert!(limiter.try_acquire(1)); 723 | /// assert!(!limiter.try_acquire(1)); 724 | /// # } 725 | /// ``` 726 | pub fn try_acquire(&self, permits: usize) -> bool { 727 | if self.try_fast_path(permits) { 728 | return true; 729 | } 730 | 731 | let mut critical = self.lock(); 732 | 733 | // Reload the state while we are under the critical lock, this 734 | // ensures that the `available` flag is up-to-date since it is only 735 | // ever modified while holding the critical lock. 736 | let mut state = self.take(); 737 | 738 | // The core is *not* available, which also implies that there are tasks 739 | // ahead which are busy. 740 | if !state.available { 741 | return false; 742 | } 743 | 744 | let now = Instant::now(); 745 | 746 | // Here we try to assume core duty temporarily to see if we can 747 | // release a sufficient number of tokens to allow the current task 748 | // to proceed. 749 | if let Some((tokens, deadline)) = self.calculate_drain(critical.deadline, now) { 750 | state.balance = (state.balance + tokens).min(self.max); 751 | critical.deadline = deadline; 752 | } 753 | 754 | if state.balance >= permits { 755 | state.balance -= permits; 756 | return true; 757 | } 758 | 759 | false 760 | } 761 | 762 | /// Acquire a permit using an owned future. 763 | /// 764 | /// If zero permits are specified, this function never suspends the current 765 | /// task. 766 | /// 767 | /// This required the [`RateLimiter`] to be wrapped inside of an 768 | /// [`std::sync::Arc`] but will in contrast permit the acquire operation to 769 | /// be owned by another struct making it more suitable for embedding. 770 | /// 771 | /// # Examples 772 | /// 773 | /// ``` 774 | /// use leaky_bucket::RateLimiter; 775 | /// use std::sync::Arc; 776 | /// 777 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 778 | /// let limiter = Arc::new(RateLimiter::builder().initial(10).build()); 779 | /// 780 | /// limiter.acquire_owned(10).await; 781 | /// # } 782 | /// ``` 783 | /// 784 | /// Example when embedded into another future. This wouldn't be possible 785 | /// with [`RateLimiter::acquire`] since it would otherwise hold a reference 786 | /// to the corresponding [`RateLimiter`] instance. 787 | /// 788 | /// ``` 789 | /// use std::future::Future; 790 | /// use std::pin::Pin; 791 | /// use std::sync::Arc; 792 | /// use std::task::{Context, Poll}; 793 | /// 794 | /// use leaky_bucket::{AcquireOwned, RateLimiter}; 795 | /// use pin_project::pin_project; 796 | /// 797 | /// #[pin_project] 798 | /// struct MyFuture { 799 | /// limiter: Arc, 800 | /// #[pin] 801 | /// acquire: Option, 802 | /// } 803 | /// 804 | /// impl Future for MyFuture { 805 | /// type Output = (); 806 | /// 807 | /// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 808 | /// let mut this = self.project(); 809 | /// 810 | /// loop { 811 | /// if let Some(acquire) = this.acquire.as_mut().as_pin_mut() { 812 | /// futures::ready!(acquire.poll(cx)); 813 | /// return Poll::Ready(()); 814 | /// } 815 | /// 816 | /// this.acquire.set(Some(this.limiter.clone().acquire_owned(100))); 817 | /// } 818 | /// } 819 | /// } 820 | /// 821 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 822 | /// let limiter = Arc::new(RateLimiter::builder().initial(100).build()); 823 | /// 824 | /// let future = MyFuture { limiter, acquire: None }; 825 | /// future.await; 826 | /// # } 827 | /// ``` 828 | pub fn acquire_owned(self: Arc, permits: usize) -> AcquireOwned { 829 | AcquireOwned { 830 | inner: AcquireFut::new(self, permits), 831 | } 832 | } 833 | 834 | /// Lock the critical section of the rate limiter and return the associated guard. 835 | fn lock(&self) -> Guard<'_> { 836 | Guard { 837 | critical: self.critical.lock(), 838 | } 839 | } 840 | 841 | /// Load the current state. 842 | fn load(&self) -> State<'_> { 843 | State::decode(self.state.load(Ordering::Acquire), self) 844 | } 845 | 846 | /// Take the current state, leaving the core state intact. 847 | fn take(&self) -> State<'_> { 848 | State::decode(self.state.swap(0, Ordering::Acquire), self) 849 | } 850 | 851 | /// Try to use fast path. 852 | fn try_fast_path(&self, permits: usize) -> bool { 853 | if permits == 0 { 854 | return true; 855 | } 856 | 857 | if self.fair { 858 | return false; 859 | } 860 | 861 | self.load().try_fast_path(permits) 862 | } 863 | 864 | /// Calculate refill amount. Returning a tuple of how much to fill and remaining 865 | /// duration to sleep until the next refill time if appropriate. 866 | /// 867 | /// The maximum number of additional tokens this method will ever return is 868 | /// limited to [`MAX_BALANCE`] to ensure that addition with an existing 869 | /// balance will never overflow. 870 | fn calculate_drain(&self, deadline: Instant, now: Instant) -> Option<(usize, Instant)> { 871 | if now < deadline { 872 | return None; 873 | } 874 | 875 | // Time elapsed in milliseconds since the last deadline. 876 | let millis = self.interval.as_millis(); 877 | let since = now.saturating_duration_since(deadline).as_millis(); 878 | 879 | let periods = usize::try_from(since / millis + 1).unwrap_or(usize::MAX); 880 | 881 | let tokens = periods 882 | .checked_mul(self.refill) 883 | .unwrap_or(MAX_BALANCE) 884 | .min(MAX_BALANCE); 885 | 886 | let rem = u64::try_from(since % millis).unwrap_or(u64::MAX); 887 | 888 | // Calculated time remaining until the next deadline. 889 | let deadline = now 890 | + self 891 | .interval 892 | .saturating_sub(time::Duration::from_millis(rem)); 893 | 894 | Some((tokens, deadline)) 895 | } 896 | } 897 | 898 | impl fmt::Debug for RateLimiter { 899 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 900 | f.debug_struct("RateLimiter") 901 | .field("refill", &self.refill) 902 | .field("interval", &self.interval) 903 | .field("max", &self.max) 904 | .field("fair", &self.fair) 905 | .finish_non_exhaustive() 906 | } 907 | } 908 | 909 | /// Refill the wait queue with the given number of tokens. 910 | #[cfg_attr(feature = "tracing", tracing::instrument(skip_all, level = "trace"))] 911 | fn drain_wait_queue(critical: &mut Guard<'_>, state: &mut State<'_>) { 912 | trace!(?state, "releasing waiters"); 913 | 914 | let mut bump = 0; 915 | 916 | // SAFETY: we're holding the lock guard to all the waiters so we can be 917 | // sure that we have exclusive access to the wait queue. 918 | unsafe { 919 | while state.balance > 0 { 920 | let mut node = match critical.waiters.pop_back() { 921 | Some(node) => node, 922 | None => break, 923 | }; 924 | 925 | let n = node.as_mut(); 926 | n.fill(&mut state.balance); 927 | 928 | trace! { 929 | ?state, 930 | remaining = n.remaining, 931 | "filled node", 932 | }; 933 | 934 | if !n.is_completed() { 935 | critical.waiters.push_back(node); 936 | break; 937 | } 938 | 939 | n.complete.store(true, Ordering::Release); 940 | 941 | if let Some(waker) = n.waker.take() { 942 | waker.wake(); 943 | } 944 | 945 | bump += 1; 946 | 947 | if bump == BUMP_LIMIT { 948 | Guard::bump(critical); 949 | bump = 0; 950 | } 951 | } 952 | } 953 | } 954 | 955 | // SAFETY: All the internals of acquire is thread safe and correctly 956 | // synchronized. The embedded waiter queue doesn't have anything inherently 957 | // unsafe in it. 958 | unsafe impl Send for RateLimiter {} 959 | unsafe impl Sync for RateLimiter {} 960 | 961 | /// A builder for a [`RateLimiter`]. 962 | pub struct Builder { 963 | /// The max number of tokens. 964 | max: Option, 965 | /// The initial count of tokens. 966 | initial: usize, 967 | /// Tokens to add every `per` duration. 968 | refill: usize, 969 | /// Interval to add tokens in milliseconds. 970 | interval: Duration, 971 | /// If the rate limiter is fair or not. 972 | fair: bool, 973 | } 974 | 975 | impl Builder { 976 | /// Configure the max number of tokens to use. 977 | /// 978 | /// If unspecified, this will default to be 10 times the [`refill`] or the 979 | /// [`initial`] value, whichever is largest. 980 | /// 981 | /// The maximum supported balance is limited to [`isize::MAX`]. 982 | /// 983 | /// # Examples 984 | /// 985 | /// ``` 986 | /// use leaky_bucket::RateLimiter; 987 | /// 988 | /// let limiter = RateLimiter::builder() 989 | /// .max(10_000) 990 | /// .build(); 991 | /// ``` 992 | /// 993 | /// [`refill`]: Builder::refill 994 | /// [`initial`]: Builder::initial 995 | pub fn max(&mut self, max: usize) -> &mut Self { 996 | self.max = Some(max); 997 | self 998 | } 999 | 1000 | /// Configure the initial number of tokens to configure. The default value 1001 | /// is `0`. 1002 | /// 1003 | /// # Examples 1004 | /// 1005 | /// ``` 1006 | /// use leaky_bucket::RateLimiter; 1007 | /// 1008 | /// let limiter = RateLimiter::builder() 1009 | /// .initial(10) 1010 | /// .build(); 1011 | /// ``` 1012 | pub fn initial(&mut self, initial: usize) -> &mut Self { 1013 | self.initial = initial; 1014 | self 1015 | } 1016 | 1017 | /// Configure the time duration between which we add [`refill`] number to 1018 | /// the bucket rate limiter. 1019 | /// 1020 | /// This is 100ms by default. 1021 | /// 1022 | /// # Panics 1023 | /// 1024 | /// This panics if the provided interval does not fit within the millisecond 1025 | /// bounds of a [usize] or is zero. 1026 | /// 1027 | /// ```should_panic 1028 | /// use leaky_bucket::RateLimiter; 1029 | /// use tokio::time::Duration; 1030 | /// 1031 | /// let limiter = RateLimiter::builder() 1032 | /// .interval(Duration::from_secs(u64::MAX)) 1033 | /// .build(); 1034 | /// ``` 1035 | /// 1036 | /// ```should_panic 1037 | /// use leaky_bucket::RateLimiter; 1038 | /// use tokio::time::Duration; 1039 | /// 1040 | /// let limiter = RateLimiter::builder() 1041 | /// .interval(Duration::from_millis(0)) 1042 | /// .build(); 1043 | /// ``` 1044 | /// 1045 | /// # Examples 1046 | /// 1047 | /// ``` 1048 | /// use leaky_bucket::RateLimiter; 1049 | /// use tokio::time::Duration; 1050 | /// 1051 | /// let limiter = RateLimiter::builder() 1052 | /// .interval(Duration::from_millis(100)) 1053 | /// .build(); 1054 | /// ``` 1055 | /// 1056 | /// [`refill`]: Builder::refill 1057 | pub fn interval(&mut self, interval: Duration) -> &mut Self { 1058 | assert! { 1059 | interval.as_millis() != 0, 1060 | "interval must be non-zero", 1061 | }; 1062 | assert! { 1063 | u64::try_from(interval.as_millis()).is_ok(), 1064 | "interval must fit within a 64-bit integer" 1065 | }; 1066 | self.interval = interval; 1067 | self 1068 | } 1069 | 1070 | /// The number of tokens to add at each [`interval`] interval. The default 1071 | /// value is `1`. 1072 | /// 1073 | /// # Panics 1074 | /// 1075 | /// Panics if a refill amount of `0` is specified. 1076 | /// 1077 | /// # Examples 1078 | /// 1079 | /// ``` 1080 | /// use leaky_bucket::RateLimiter; 1081 | /// 1082 | /// let limiter = RateLimiter::builder() 1083 | /// .refill(100) 1084 | /// .build(); 1085 | /// ``` 1086 | /// 1087 | /// [`interval`]: Builder::interval 1088 | pub fn refill(&mut self, refill: usize) -> &mut Self { 1089 | assert!(refill > 0, "refill amount cannot be zero"); 1090 | self.refill = refill; 1091 | self 1092 | } 1093 | 1094 | /// Configure the rate limiter to be fair. 1095 | /// 1096 | /// Fairness is enabled by deafult. 1097 | /// 1098 | /// Fairness ensures that tasks make progress in the order that they acquire 1099 | /// even when the rate limiter is under contention. An unfair scheduler 1100 | /// might have a higher total throughput. 1101 | /// 1102 | /// Fair scheduling also affects the behavior of 1103 | /// [`RateLimiter::try_acquire`] which will return `false` if there are any 1104 | /// pending tasks since they should be given priority. 1105 | /// 1106 | /// # Examples 1107 | /// 1108 | /// ``` 1109 | /// use leaky_bucket::RateLimiter; 1110 | /// 1111 | /// let limiter = RateLimiter::builder() 1112 | /// .refill(100) 1113 | /// .fair(false) 1114 | /// .build(); 1115 | /// ``` 1116 | pub fn fair(&mut self, fair: bool) -> &mut Self { 1117 | self.fair = fair; 1118 | self 1119 | } 1120 | 1121 | /// Construct a new [`RateLimiter`]. 1122 | /// 1123 | /// # Examples 1124 | /// 1125 | /// ``` 1126 | /// use leaky_bucket::RateLimiter; 1127 | /// use tokio::time::Duration; 1128 | /// 1129 | /// let limiter = RateLimiter::builder() 1130 | /// .refill(100) 1131 | /// .interval(Duration::from_millis(200)) 1132 | /// .max(10_000) 1133 | /// .build(); 1134 | /// ``` 1135 | pub fn build(&self) -> RateLimiter { 1136 | let deadline = Instant::now() + self.interval; 1137 | 1138 | let initial = self.initial.min(MAX_BALANCE); 1139 | let refill = self.refill.min(MAX_BALANCE); 1140 | 1141 | let max = match self.max { 1142 | Some(max) => max.min(MAX_BALANCE), 1143 | None => refill 1144 | .max(initial) 1145 | .saturating_mul(DEFAULT_REFILL_MAX_FACTOR) 1146 | .min(MAX_BALANCE), 1147 | }; 1148 | 1149 | let initial = initial.min(max); 1150 | 1151 | RateLimiter { 1152 | refill, 1153 | interval: self.interval, 1154 | max, 1155 | fair: self.fair, 1156 | state: AtomicUsize::new((initial << 1) | 1), 1157 | critical: Mutex::new(Critical { 1158 | waiters: LinkedList::new(), 1159 | deadline, 1160 | }), 1161 | } 1162 | } 1163 | } 1164 | 1165 | /// Construct a new builder with default options. 1166 | /// 1167 | /// # Examples 1168 | /// 1169 | /// ``` 1170 | /// use leaky_bucket::Builder; 1171 | /// 1172 | /// let limiter = Builder::default().build(); 1173 | /// ``` 1174 | impl Default for Builder { 1175 | fn default() -> Self { 1176 | Self { 1177 | max: None, 1178 | initial: 0, 1179 | refill: 1, 1180 | interval: Duration::from_millis(100), 1181 | fair: true, 1182 | } 1183 | } 1184 | } 1185 | 1186 | /// The state of an acquire operation. 1187 | #[derive(Debug, Clone, Copy)] 1188 | enum AcquireFutState { 1189 | /// Initial unconfigured state. 1190 | Initial, 1191 | /// The acquire is waiting to be released by the core. 1192 | Waiting, 1193 | /// The operation is completed. 1194 | Complete, 1195 | /// The task is currently the core. 1196 | Core, 1197 | } 1198 | 1199 | /// Inner state and methods of the acquire. 1200 | #[repr(transparent)] 1201 | struct AcquireFutInner { 1202 | /// Aliased task state. 1203 | node: UnsafeCell>, 1204 | } 1205 | 1206 | impl AcquireFutInner { 1207 | const fn new() -> AcquireFutInner { 1208 | AcquireFutInner { 1209 | node: UnsafeCell::new(Node::new(Task::new())), 1210 | } 1211 | } 1212 | 1213 | /// Access the completion flag. 1214 | pub fn complete(&self) -> &AtomicBool { 1215 | // SAFETY: This is always safe to access since it's atomic. 1216 | unsafe { &*ptr::addr_of!((*self.node.get()).complete) } 1217 | } 1218 | 1219 | /// Get the underlying task mutably. 1220 | /// 1221 | /// We prove that the caller does indeed have mutable access to the node by 1222 | /// passing in a mutable reference to the critical section. 1223 | #[inline] 1224 | pub fn get_task<'crit, C>( 1225 | self: Pin<&'crit mut Self>, 1226 | critical: &'crit mut C, 1227 | ) -> (&'crit mut C, &'crit mut Node) 1228 | where 1229 | C: IsCritical, 1230 | { 1231 | // SAFETY: Caller has exclusive access to the critical section, since 1232 | // it's passed in as a mutable argument. We can also ensure that none of 1233 | // the borrows outlive the provided closure. 1234 | unsafe { (critical, &mut *self.node.get()) } 1235 | } 1236 | 1237 | /// Update the waiting state for this acquisition task. This might require 1238 | /// that we update the associated waker. 1239 | #[cfg_attr( 1240 | feature = "tracing", 1241 | tracing::instrument(skip(self, critical, waker), level = "trace") 1242 | )] 1243 | fn update(self: Pin<&mut Self>, critical: &mut Guard<'_>, waker: &Waker) { 1244 | let (critical, task) = self.get_task(critical); 1245 | 1246 | if !task.is_linked() { 1247 | critical.push_task_front(task); 1248 | } 1249 | 1250 | let new_waker = match task.waker { 1251 | None => true, 1252 | Some(ref w) => !w.will_wake(waker), 1253 | }; 1254 | 1255 | if new_waker { 1256 | trace!("updating waker"); 1257 | task.waker = Some(waker.clone()); 1258 | } 1259 | } 1260 | 1261 | /// Ensure that the current core task is correctly linked up if needed. 1262 | #[cfg_attr( 1263 | feature = "tracing", 1264 | tracing::instrument(skip(self, critical, lim), level = "trace") 1265 | )] 1266 | fn link_core(self: Pin<&mut Self>, critical: &mut Critical, lim: &RateLimiter) { 1267 | let (critical, task) = self.get_task(critical); 1268 | 1269 | match (lim.fair, task.is_linked()) { 1270 | (true, false) => { 1271 | // Fair scheduling needs to ensure that the core is part of the wait 1272 | // queue, and will be woken up in-order with other tasks. 1273 | critical.push_task(task); 1274 | } 1275 | (false, true) => { 1276 | // Unfair scheduling will not wake the core in order, so 1277 | // don't bother having it linked. 1278 | critical.remove_task(task); 1279 | } 1280 | _ => {} 1281 | } 1282 | } 1283 | 1284 | /// Release any remaining tokens which are associated with this particular task. 1285 | fn release_remaining( 1286 | self: Pin<&mut Self>, 1287 | critical: &mut Guard<'_>, 1288 | state: &mut State<'_>, 1289 | permits: usize, 1290 | ) { 1291 | let (critical, task) = self.get_task(critical); 1292 | 1293 | if task.is_linked() { 1294 | critical.remove_task(task); 1295 | } 1296 | 1297 | // Hand back permits which we've acquired so far. 1298 | let release = permits.saturating_sub(task.remaining); 1299 | state.add_tokens(critical, release, |_, _| ()); 1300 | } 1301 | 1302 | /// Drain the given number of tokens through the core. Returns `true` if the 1303 | /// core has been completed. 1304 | #[cfg_attr( 1305 | feature = "tracing", 1306 | tracing::instrument(skip(self, critical), level = "trace") 1307 | )] 1308 | fn drain_core( 1309 | self: Pin<&mut Self>, 1310 | critical: &mut Guard<'_>, 1311 | state: &mut State<'_>, 1312 | tokens: usize, 1313 | ) -> bool { 1314 | let completed = state.add_tokens(critical, tokens, |critical, state| { 1315 | let (_, task) = self.get_task(critical); 1316 | 1317 | // If the limiter is not fair, we need to in addition to draining 1318 | // remaining tokens from linked nodes, drain it from ourselves. We 1319 | // fill the current holder of the core last (self). To ensure that 1320 | // it stays around for as long as possible. 1321 | if !state.lim.fair { 1322 | task.fill(&mut state.balance); 1323 | } 1324 | 1325 | task.is_completed() 1326 | }); 1327 | 1328 | if completed { 1329 | // Everything was drained, including the current core (if 1330 | // appropriate). So we can release it now. 1331 | critical.release(state); 1332 | } 1333 | 1334 | completed 1335 | } 1336 | 1337 | /// Assume the current core and calculate how long we must sleep for in 1338 | /// order to do it. 1339 | /// 1340 | /// # Safety 1341 | /// 1342 | /// This might link the current task into the task queue, so the caller must 1343 | /// ensure that it is pinned. 1344 | #[cfg_attr( 1345 | feature = "tracing", 1346 | tracing::instrument(skip(self, critical), level = "trace") 1347 | )] 1348 | fn assume_core( 1349 | mut self: Pin<&mut Self>, 1350 | critical: &mut Guard<'_>, 1351 | state: &mut State<'_>, 1352 | now: Instant, 1353 | ) -> bool { 1354 | self.as_mut().link_core(critical, state.lim); 1355 | 1356 | let (tokens, deadline) = match state.lim.calculate_drain(critical.deadline, now) { 1357 | Some(tokens) => tokens, 1358 | None => return true, 1359 | }; 1360 | 1361 | // It is appropriate to update the deadline. 1362 | critical.deadline = deadline; 1363 | !self.drain_core(critical, state, tokens) 1364 | } 1365 | } 1366 | 1367 | pin_project! { 1368 | /// The future associated with acquiring permits from a rate limiter using 1369 | /// [`RateLimiter::acquire`]. 1370 | #[project(!Unpin)] 1371 | pub struct Acquire<'a> { 1372 | #[pin] 1373 | inner: AcquireFut>, 1374 | } 1375 | } 1376 | 1377 | impl Acquire<'_> { 1378 | /// Test if this acquire task is currently coordinating the rate limiter. 1379 | /// 1380 | /// # Examples 1381 | /// 1382 | /// ``` 1383 | /// use leaky_bucket::RateLimiter; 1384 | /// use std::future::Future; 1385 | /// use std::sync::Arc; 1386 | /// use std::task::Context; 1387 | /// 1388 | /// struct Waker; 1389 | /// # impl std::task::Wake for Waker { fn wake(self: Arc) { } } 1390 | /// 1391 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 1392 | /// let limiter = RateLimiter::builder().build(); 1393 | /// 1394 | /// let waker = Arc::new(Waker).into(); 1395 | /// let mut cx = Context::from_waker(&waker); 1396 | /// 1397 | /// let a1 = limiter.acquire(1); 1398 | /// tokio::pin!(a1); 1399 | /// 1400 | /// assert!(!a1.is_core()); 1401 | /// assert!(a1.as_mut().poll(&mut cx).is_pending()); 1402 | /// assert!(a1.is_core()); 1403 | /// 1404 | /// a1.as_mut().await; 1405 | /// 1406 | /// // After completion this is no longer a core. 1407 | /// assert!(!a1.is_core()); 1408 | /// # } 1409 | /// ``` 1410 | pub fn is_core(&self) -> bool { 1411 | self.inner.is_core() 1412 | } 1413 | } 1414 | 1415 | impl Future for Acquire<'_> { 1416 | type Output = (); 1417 | 1418 | #[inline] 1419 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 1420 | self.project().inner.poll(cx) 1421 | } 1422 | } 1423 | 1424 | pin_project! { 1425 | /// The future associated with acquiring permits from a rate limiter using 1426 | /// [`RateLimiter::acquire_owned`]. 1427 | #[project(!Unpin)] 1428 | pub struct AcquireOwned { 1429 | #[pin] 1430 | inner: AcquireFut>, 1431 | } 1432 | } 1433 | 1434 | impl AcquireOwned { 1435 | /// Test if this acquire task is currently coordinating the rate limiter. 1436 | /// 1437 | /// # Examples 1438 | /// 1439 | /// ``` 1440 | /// use leaky_bucket::RateLimiter; 1441 | /// use std::future::Future; 1442 | /// use std::sync::Arc; 1443 | /// use std::task::Context; 1444 | /// 1445 | /// struct Waker; 1446 | /// # impl std::task::Wake for Waker { fn wake(self: Arc) { } } 1447 | /// 1448 | /// # #[tokio::main(flavor="current_thread", start_paused=true)] async fn main() { 1449 | /// let limiter = Arc::new(RateLimiter::builder().build()); 1450 | /// 1451 | /// let waker = Arc::new(Waker).into(); 1452 | /// let mut cx = Context::from_waker(&waker); 1453 | /// 1454 | /// let a1 = limiter.acquire_owned(1); 1455 | /// tokio::pin!(a1); 1456 | /// 1457 | /// assert!(!a1.is_core()); 1458 | /// assert!(a1.as_mut().poll(&mut cx).is_pending()); 1459 | /// assert!(a1.is_core()); 1460 | /// 1461 | /// a1.as_mut().await; 1462 | /// 1463 | /// // After completion this is no longer a core. 1464 | /// assert!(!a1.is_core()); 1465 | /// # } 1466 | /// ``` 1467 | pub fn is_core(&self) -> bool { 1468 | self.inner.is_core() 1469 | } 1470 | } 1471 | 1472 | impl Future for AcquireOwned { 1473 | type Output = (); 1474 | 1475 | #[inline] 1476 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 1477 | self.project().inner.poll(cx) 1478 | } 1479 | } 1480 | 1481 | pin_project! { 1482 | #[project(!Unpin)] 1483 | #[project = AcquireFutProj] 1484 | struct AcquireFut 1485 | where 1486 | T: Deref, 1487 | { 1488 | lim: T, 1489 | permits: usize, 1490 | state: AcquireFutState, 1491 | #[pin] 1492 | sleep: Option, 1493 | #[pin] 1494 | inner: AcquireFutInner, 1495 | } 1496 | 1497 | impl PinnedDrop for AcquireFut 1498 | where 1499 | T: Deref, 1500 | { 1501 | fn drop(this: Pin<&mut Self>) { 1502 | let AcquireFutProj { lim, permits, state, inner, .. } = this.project(); 1503 | 1504 | let is_core = match *state { 1505 | AcquireFutState::Waiting => false, 1506 | AcquireFutState::Core => true, 1507 | _ => return, 1508 | }; 1509 | 1510 | let mut critical = lim.lock(); 1511 | let mut s = lim.take(); 1512 | inner.release_remaining(&mut critical, &mut s, *permits); 1513 | 1514 | if is_core { 1515 | critical.release(&mut s); 1516 | } 1517 | 1518 | *state = AcquireFutState::Complete; 1519 | } 1520 | } 1521 | } 1522 | 1523 | impl AcquireFut 1524 | where 1525 | T: Deref, 1526 | { 1527 | #[inline] 1528 | const fn new(lim: T, permits: usize) -> Self { 1529 | Self { 1530 | lim, 1531 | permits, 1532 | state: AcquireFutState::Initial, 1533 | sleep: None, 1534 | inner: AcquireFutInner::new(), 1535 | } 1536 | } 1537 | 1538 | fn is_core(&self) -> bool { 1539 | matches!(&self.state, AcquireFutState::Core) 1540 | } 1541 | } 1542 | 1543 | // SAFETY: All the internals of acquire is thread safe and correctly 1544 | // synchronized. The embedded waiter queue doesn't have anything inherently 1545 | // unsafe in it. 1546 | unsafe impl Send for AcquireFut where T: Send + Deref {} 1547 | unsafe impl Sync for AcquireFut where T: Sync + Deref {} 1548 | 1549 | impl Future for AcquireFut 1550 | where 1551 | T: Deref, 1552 | { 1553 | type Output = (); 1554 | 1555 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 1556 | let AcquireFutProj { 1557 | lim, 1558 | permits, 1559 | state, 1560 | mut sleep, 1561 | inner: mut internal, 1562 | .. 1563 | } = self.project(); 1564 | 1565 | // Hold onto the critical lock for core operations, but only acquire it 1566 | // when strictly necessary. 1567 | let mut critical; 1568 | 1569 | // Shared state. 1570 | // 1571 | // Once we are holding onto the critical lock, we take the entire state 1572 | // to ensure that any fast-past negotiators do not observe any available 1573 | // permits while potential core work is ongoing. 1574 | let mut s; 1575 | 1576 | // Hold onto any call to `Instant::now` which we might perform, so we 1577 | // don't have to get the current time multiple times. 1578 | let outer_now; 1579 | 1580 | match *state { 1581 | AcquireFutState::Complete => { 1582 | return Poll::Ready(()); 1583 | } 1584 | AcquireFutState::Initial => { 1585 | // If the rate limiter is not fair, try to oppurtunistically 1586 | // just acquire a permit through the known atomic state. 1587 | // 1588 | // This is known as the fast path, but requires acquire to raise 1589 | // against other tasks when storing the state back. 1590 | if lim.try_fast_path(*permits) { 1591 | *state = AcquireFutState::Complete; 1592 | return Poll::Ready(()); 1593 | } 1594 | 1595 | critical = lim.lock(); 1596 | s = lim.take(); 1597 | 1598 | let now = Instant::now(); 1599 | 1600 | // If we've hit a deadline, calculate the number of tokens 1601 | // to drain and perform it in line here. This is necessary 1602 | // because the core isn't aware of how long we sleep between 1603 | // each acquire, so we need to perform some of the drain 1604 | // work here in order to avoid acruing a debt that needs to 1605 | // be filled later in. 1606 | // 1607 | // If we didn't do this, and the process slept for a long 1608 | // time, the next time a core is acquired it would be very 1609 | // far removed from the expected deadline and has no idea 1610 | // when permits were acquired, so it would over-eagerly 1611 | // release a lot of acquires and accumulate permits. 1612 | // 1613 | // This is tested for in the `test_idle` suite of tests. 1614 | let tokens = 1615 | if let Some((tokens, deadline)) = lim.calculate_drain(critical.deadline, now) { 1616 | trace!(tokens, "inline drain"); 1617 | // We pre-emptively update the deadline of the core 1618 | // since it might bump, and we don't want other 1619 | // processes to observe that the deadline has been 1620 | // reached. 1621 | critical.deadline = deadline; 1622 | tokens 1623 | } else { 1624 | 0 1625 | }; 1626 | 1627 | let completed = s.add_tokens(&mut critical, tokens, |critical, s| { 1628 | let (_, task) = internal.as_mut().get_task(critical); 1629 | task.remaining = *permits; 1630 | task.fill(&mut s.balance); 1631 | task.is_completed() 1632 | }); 1633 | 1634 | if completed { 1635 | *state = AcquireFutState::Complete; 1636 | return Poll::Ready(()); 1637 | } 1638 | 1639 | // Try to take over as core. If we're unsuccessful we just 1640 | // ensure that we're linked into the wait queue. 1641 | if !mem::take(&mut s.available) { 1642 | internal.as_mut().update(&mut critical, cx.waker()); 1643 | *state = AcquireFutState::Waiting; 1644 | return Poll::Pending; 1645 | } 1646 | 1647 | // SAFETY: This is done in a pinned section, so we know that 1648 | // the linked section stays alive for the duration of this 1649 | // future due to pinning guarantees. 1650 | internal.as_mut().link_core(&mut critical, lim); 1651 | Guard::bump(&mut critical); 1652 | *state = AcquireFutState::Core; 1653 | outer_now = Some(now); 1654 | } 1655 | AcquireFutState::Waiting => { 1656 | // If we are complete, then return as ready. 1657 | // 1658 | // This field is atomic, so we can safely read it under shared 1659 | // access and do not require a lock. 1660 | if internal.complete().load(Ordering::Acquire) { 1661 | *state = AcquireFutState::Complete; 1662 | return Poll::Ready(()); 1663 | } 1664 | 1665 | // Note: we need to operate under this lock to ensure that 1666 | // the core acquired here (or elsewhere) observes that the 1667 | // current task has been linked up. 1668 | critical = lim.lock(); 1669 | s = lim.take(); 1670 | 1671 | // Try to take over as core. If we're unsuccessful we 1672 | // just ensure that we're linked into the wait queue. 1673 | if !mem::take(&mut s.available) { 1674 | internal.update(&mut critical, cx.waker()); 1675 | return Poll::Pending; 1676 | } 1677 | 1678 | let now = Instant::now(); 1679 | 1680 | // This is done in a pinned section, so we know that the linked 1681 | // section stays alive for the duration of this future due to 1682 | // pinning guarantees. 1683 | if !internal.as_mut().assume_core(&mut critical, &mut s, now) { 1684 | // Marks as completed. 1685 | *state = AcquireFutState::Complete; 1686 | return Poll::Ready(()); 1687 | } 1688 | 1689 | Guard::bump(&mut critical); 1690 | *state = AcquireFutState::Core; 1691 | outer_now = Some(now); 1692 | } 1693 | AcquireFutState::Core => { 1694 | critical = lim.lock(); 1695 | s = lim.take(); 1696 | outer_now = None; 1697 | } 1698 | } 1699 | 1700 | trace!(until = ?critical.deadline, "taking over core and sleeping"); 1701 | 1702 | let mut sleep = match sleep.as_mut().as_pin_mut() { 1703 | Some(mut sleep) => { 1704 | if sleep.deadline() != critical.deadline { 1705 | sleep.as_mut().reset(critical.deadline); 1706 | } 1707 | 1708 | sleep 1709 | } 1710 | None => { 1711 | sleep.set(Some(time::sleep_until(critical.deadline))); 1712 | sleep.as_mut().as_pin_mut().unwrap() 1713 | } 1714 | }; 1715 | 1716 | if sleep.as_mut().poll(cx).is_pending() { 1717 | return Poll::Pending; 1718 | } 1719 | 1720 | critical.deadline = outer_now.unwrap_or_else(Instant::now) + lim.interval; 1721 | 1722 | if internal.drain_core(&mut critical, &mut s, lim.refill) { 1723 | *state = AcquireFutState::Complete; 1724 | return Poll::Ready(()); 1725 | } 1726 | 1727 | cx.waker().wake_by_ref(); 1728 | Poll::Pending 1729 | } 1730 | } 1731 | 1732 | #[cfg(test)] 1733 | mod tests { 1734 | use super::{Acquire, AcquireOwned, RateLimiter}; 1735 | 1736 | fn is_send() {} 1737 | fn is_sync() {} 1738 | 1739 | #[test] 1740 | fn assert_send_sync() { 1741 | is_send::(); 1742 | is_sync::(); 1743 | 1744 | is_send::(); 1745 | is_sync::(); 1746 | 1747 | is_send::>(); 1748 | is_sync::>(); 1749 | } 1750 | } 1751 | -------------------------------------------------------------------------------- /src/linked_list.rs: -------------------------------------------------------------------------------- 1 | //! An intrusive linked list of waiters. 2 | 3 | use core::fmt; 4 | use core::marker; 5 | use core::ops; 6 | use core::ptr; 7 | 8 | pub struct Node { 9 | /// The next node. 10 | next: Option>>, 11 | /// The previous node. 12 | prev: Option>>, 13 | /// If we are linked or not. 14 | linked: bool, 15 | /// The value inside of the node. 16 | value: T, 17 | /// Avoids noalias heuristics from kicking in on references to a `Node` 18 | /// struct. 19 | _pin: marker::PhantomPinned, 20 | } 21 | 22 | impl Node { 23 | /// Construct a new unlinked node. 24 | pub(crate) const fn new(value: T) -> Self { 25 | Self { 26 | next: None, 27 | prev: None, 28 | linked: false, 29 | value, 30 | _pin: marker::PhantomPinned, 31 | } 32 | } 33 | 34 | #[inline(always)] 35 | pub(crate) fn is_linked(&self) -> bool { 36 | self.linked 37 | } 38 | 39 | /// Set the next node. 40 | #[inline(always)] 41 | unsafe fn set_next(&mut self, node: Option>) { 42 | ptr::addr_of_mut!(self.next).write(node); 43 | } 44 | 45 | /// Take the next node. 46 | #[inline(always)] 47 | unsafe fn take_next(&mut self) -> Option> { 48 | ptr::addr_of_mut!(self.next).replace(None) 49 | } 50 | 51 | /// Set the previous node. 52 | #[inline(always)] 53 | unsafe fn set_prev(&mut self, node: Option>) { 54 | ptr::addr_of_mut!(self.prev).write(node); 55 | } 56 | 57 | /// Take the previous node. 58 | #[inline(always)] 59 | unsafe fn take_prev(&mut self) -> Option> { 60 | ptr::addr_of_mut!(self.prev).replace(None) 61 | } 62 | } 63 | 64 | impl ops::Deref for Node { 65 | type Target = T; 66 | 67 | fn deref(&self) -> &Self::Target { 68 | &self.value 69 | } 70 | } 71 | 72 | impl ops::DerefMut for Node 73 | where 74 | T: Unpin, 75 | { 76 | fn deref_mut(&mut self) -> &mut Self::Target { 77 | &mut self.value 78 | } 79 | } 80 | 81 | /// An intrusive linked list. 82 | /// 83 | /// Because of the intrusive nature of the list, the list itself can only assert 84 | /// that you have shared or exlusive access to the underlying link structure by 85 | /// requiring `&self` or `&mut self`. It cannot however ensure that the returned 86 | /// node is available for the given access node since it might be stored and 87 | /// used somewhere else so this must be externally synchronized. 88 | /// 89 | /// In terms of access to the nodes processed by this list, you can correctly 90 | /// dereference the returned pointers shared or exclusively depending on the 91 | /// signature of the function used in this list. If it takes `&self`, you can 92 | /// correctly use methods such as [ptr::NonNull::as_ref]. Conversely if it takes 93 | /// `&mut self`, you can use methods such as [ptr::NonNull::as_mut]. 94 | pub struct LinkedList { 95 | head: Option>>, 96 | tail: Option>>, 97 | } 98 | 99 | impl LinkedList { 100 | /// Construct a new empty list. 101 | pub(crate) const fn new() -> Self { 102 | Self { 103 | head: None, 104 | tail: None, 105 | } 106 | } 107 | 108 | /// Push to the front of the linked list. 109 | /// 110 | /// Returns a boolean that if `true` indicates that this was the first 111 | /// element in the list. 112 | /// 113 | /// # Safety 114 | /// 115 | /// The soundness of manipulating the data in the list depends entirely on 116 | /// what was pushed. If you intend to mutate the data, you must push a 117 | /// pointer that is based out of something that was exclusively borrowed 118 | /// (example below). 119 | /// 120 | /// The caller also must ensure that the data pushed doesn't outlive its 121 | /// use. 122 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 123 | pub(crate) unsafe fn push_front(&mut self, mut node: ptr::NonNull>) { 124 | trace!(head = ?self.head, tail = ?self.tail, node = ?node, "push_front"); 125 | 126 | debug_assert!(node.as_ref().next.is_none()); 127 | debug_assert!(node.as_ref().prev.is_none()); 128 | debug_assert!(!node.as_ref().linked); 129 | 130 | if let Some(mut head) = self.head.take() { 131 | node.as_mut().set_next(Some(head)); 132 | head.as_mut().set_prev(Some(node)); 133 | self.head = Some(node); 134 | } else { 135 | self.head = Some(node); 136 | self.tail = Some(node); 137 | } 138 | 139 | node.as_mut().linked = true; 140 | } 141 | 142 | /// Push to the front of the linked list. 143 | /// 144 | /// Returns a boolean that if `true` indicates that this was the first 145 | /// element in the list. 146 | /// 147 | /// # Safety 148 | /// 149 | /// The soundness of manipulating the data in the list depends entirely on 150 | /// what was pushed. If you intend to mutate the data, you must push a 151 | /// pointer that is based out of something that was exclusively borrowed 152 | /// (example below). 153 | /// 154 | /// The caller also must ensure that the data pushed doesn't outlive its 155 | /// use. 156 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 157 | pub(crate) unsafe fn push_back(&mut self, mut node: ptr::NonNull>) { 158 | trace!(head = ?self.head, tail = ?self.tail, node = ?node, "push_back"); 159 | 160 | debug_assert!(node.as_ref().next.is_none()); 161 | debug_assert!(node.as_ref().prev.is_none()); 162 | debug_assert!(!node.as_ref().linked); 163 | 164 | if let Some(mut tail) = self.tail.take() { 165 | node.as_mut().set_prev(Some(tail)); 166 | tail.as_mut().set_next(Some(node)); 167 | self.tail = Some(node); 168 | } else { 169 | self.head = Some(node); 170 | self.tail = Some(node); 171 | } 172 | 173 | node.as_mut().linked = true; 174 | } 175 | 176 | #[cfg(test)] 177 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 178 | unsafe fn pop_front(&mut self) -> Option>> { 179 | trace!(head = ?self.head, tail = ?self.tail, "pop_front"); 180 | 181 | let mut head = self.head?; 182 | debug_assert!(head.as_ref().linked); 183 | 184 | if let Some(mut next) = head.as_mut().take_next() { 185 | next.as_mut().set_prev(None); 186 | self.head = Some(next); 187 | } else { 188 | debug_assert_eq!(self.tail, Some(head)); 189 | self.head = None; 190 | self.tail = None; 191 | } 192 | 193 | debug_assert!(head.as_ref().prev.is_none()); 194 | debug_assert!(head.as_ref().next.is_none()); 195 | head.as_mut().linked = false; 196 | Some(head) 197 | } 198 | 199 | /// Pop the back element from the list. 200 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 201 | pub(crate) unsafe fn pop_back(&mut self) -> Option>> { 202 | trace!(head = ?self.head, tail = ?self.tail, "pop_back"); 203 | 204 | let mut tail = self.tail?; 205 | debug_assert!(tail.as_ref().linked); 206 | 207 | if let Some(mut prev) = tail.as_mut().take_prev() { 208 | prev.as_mut().set_next(None); 209 | self.tail = Some(prev); 210 | } else { 211 | debug_assert_eq!(self.head, Some(tail)); 212 | self.head = None; 213 | self.tail = None; 214 | } 215 | 216 | debug_assert!(tail.as_ref().prev.is_none()); 217 | debug_assert!(tail.as_ref().next.is_none()); 218 | tail.as_mut().linked = false; 219 | Some(tail) 220 | } 221 | 222 | /// Remove the specified node. 223 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 224 | pub(crate) unsafe fn remove(&mut self, mut node: ptr::NonNull>) { 225 | trace!(head = ?self.head, tail = ?self.tail, node = ?node, "remove"); 226 | 227 | debug_assert!(node.as_ref().linked); 228 | 229 | let next = node.as_mut().take_next(); 230 | let prev = node.as_mut().take_prev(); 231 | 232 | if let Some(mut next) = next { 233 | next.as_mut().set_prev(prev); 234 | } else { 235 | debug_assert_eq!(self.tail, Some(node)); 236 | self.tail = prev; 237 | } 238 | 239 | if let Some(mut prev) = prev { 240 | prev.as_mut().set_next(next); 241 | } else { 242 | debug_assert_eq!(self.head, Some(node)); 243 | self.head = next; 244 | } 245 | 246 | node.as_mut().linked = false; 247 | } 248 | 249 | /// Mutably get the front of the list. 250 | /// 251 | /// This returns a raw pointer which can correctly be mutably accessed since 252 | /// the signature of this method ensures exclusive access to the list. 253 | #[cfg_attr(feature = "tracing", tracing::instrument(level = "trace"))] 254 | pub(crate) unsafe fn front(&mut self) -> Option>> { 255 | self.head 256 | } 257 | } 258 | 259 | impl fmt::Debug for LinkedList { 260 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 261 | f.debug_struct("LinkedList") 262 | .field("head", &self.head) 263 | .field("tail", &self.tail) 264 | .finish() 265 | } 266 | } 267 | 268 | #[cfg(test)] 269 | mod tests { 270 | use std::ptr; 271 | 272 | use super::*; 273 | 274 | #[test] 275 | fn push_back() { 276 | let mut list = LinkedList::new(); 277 | 278 | let mut a = Node::new(0); 279 | let mut b = Node::new(0); 280 | 281 | unsafe { 282 | list.push_back(ptr::NonNull::from(&mut a)); 283 | list.push_back(ptr::NonNull::from(&mut b)); 284 | 285 | let mut n = 1; 286 | 287 | while let Some(mut last) = list.pop_back() { 288 | **last.as_mut() += n; 289 | n <<= 1; 290 | } 291 | } 292 | 293 | assert_eq!(*a, 2); 294 | assert_eq!(*b, 1); 295 | } 296 | 297 | #[test] 298 | fn push_front() { 299 | let mut list = LinkedList::new(); 300 | 301 | let mut a = Node::new(0); 302 | let mut b = Node::new(0); 303 | 304 | unsafe { 305 | list.push_front(ptr::NonNull::from(&mut a)); 306 | list.push_front(ptr::NonNull::from(&mut b)); 307 | 308 | let mut n = 1; 309 | 310 | while let Some(mut last) = list.pop_back() { 311 | **last.as_mut() += n; 312 | n <<= 1; 313 | } 314 | } 315 | 316 | assert_eq!(*a, 1); 317 | assert_eq!(*b, 2); 318 | } 319 | 320 | #[test] 321 | fn remove() { 322 | let mut list = LinkedList::new(); 323 | 324 | let mut a = Node::new(0); 325 | let mut b = Node::new(0); 326 | let mut c = Node::new(0); 327 | let mut d = Node::new(0); 328 | 329 | unsafe { 330 | list.push_back(ptr::NonNull::from(&mut a)); 331 | list.push_back(ptr::NonNull::from(&mut b)); 332 | list.push_back(ptr::NonNull::from(&mut c)); 333 | list.push_back(ptr::NonNull::from(&mut d)); 334 | 335 | list.remove(ptr::NonNull::from(&mut b)); 336 | list.remove(ptr::NonNull::from(&mut d)); 337 | 338 | let mut n = 1; 339 | 340 | while let Some(mut last) = list.pop_back() { 341 | **last.as_mut() += n; 342 | n <<= 1; 343 | } 344 | } 345 | 346 | assert_eq!(*a, 2); 347 | assert_eq!(*b, 0); 348 | assert_eq!(*c, 1); 349 | assert_eq!(*d, 0); 350 | } 351 | 352 | #[test] 353 | fn front_mut() { 354 | let mut list = LinkedList::new(); 355 | 356 | let mut a = Node::new(0); 357 | let mut b = Node::new(1); 358 | 359 | unsafe { 360 | list.push_back(ptr::NonNull::from(&mut a)); 361 | list.push_back(ptr::NonNull::from(&mut b)); 362 | 363 | let mut n = 1; 364 | 365 | while let Some(mut node) = list.pop_front() { 366 | **node.as_mut() += n; 367 | n <<= 1; 368 | } 369 | 370 | assert!(list.front().is_none()); 371 | } 372 | 373 | assert_eq!(*a, 1); 374 | assert_eq!(*b, 3); 375 | } 376 | 377 | #[test] 378 | fn pop_back() { 379 | let mut list = LinkedList::new(); 380 | 381 | let mut a = Node::new(0); 382 | let mut b = Node::new(0); 383 | 384 | unsafe { 385 | list.push_back(ptr::NonNull::from(&mut a)); 386 | list.push_back(ptr::NonNull::from(&mut b)); 387 | 388 | let mut n = 1; 389 | 390 | while let Some(mut last) = list.pop_back() { 391 | **last.as_mut() += n; 392 | n <<= 1; 393 | } 394 | } 395 | 396 | assert_eq!(*a, 2); 397 | assert_eq!(*b, 1); 398 | } 399 | } 400 | -------------------------------------------------------------------------------- /tests/issue5.rs: -------------------------------------------------------------------------------- 1 | use leaky_bucket::RateLimiter; 2 | use tokio::time::{Duration, Instant}; 3 | 4 | #[tokio::test(start_paused = true)] 5 | async fn test_issue5_a() { 6 | let limiter = RateLimiter::builder() 7 | .refill(1) 8 | .interval(Duration::from_millis(100)) 9 | .build(); 10 | 11 | let begin = Instant::now(); 12 | 13 | for _ in 0..10 { 14 | limiter.acquire_one().await; 15 | } 16 | 17 | let elapsed = Instant::now().duration_since(begin); 18 | assert_eq!(elapsed, Duration::from_secs(1)); 19 | } 20 | 21 | #[tokio::test(start_paused = true)] 22 | async fn test_issue5_b() { 23 | let limiter = RateLimiter::builder() 24 | .refill(1) 25 | .interval(Duration::from_secs(2)) 26 | .build(); 27 | 28 | let begin = Instant::now(); 29 | 30 | for _ in 0..2 { 31 | limiter.acquire_one().await; 32 | } 33 | 34 | let elapsed = Instant::now().duration_since(begin); 35 | // once per 2 seconds => 4 seconds for 2 permits 36 | assert_eq!(elapsed, Duration::from_secs(4)); 37 | } 38 | -------------------------------------------------------------------------------- /tests/limits.rs: -------------------------------------------------------------------------------- 1 | use leaky_bucket::RateLimiter; 2 | use tokio::time::{Duration, Instant}; 3 | 4 | #[tokio::test(flavor = "current_thread", start_paused = true)] 5 | async fn test_numerical_limits() { 6 | let limiter = RateLimiter::builder().refill(usize::MAX).initial(0).build(); 7 | let start = Instant::now(); 8 | 9 | limiter.acquire(usize::MAX).await; 10 | // Drain the remainder, this should not block. 11 | limiter.acquire(isize::MAX as usize - 1).await; 12 | 13 | // This takes 300ms because isize::MAX is one off from half of usize::MAX, 14 | // so we need to wait for three periods to satisfy usize::MAX. 15 | assert_eq!( 16 | Instant::now().duration_since(start), 17 | Duration::from_millis(300) 18 | ); 19 | 20 | // This will block for 100ms to refill the bucket. 21 | limiter.acquire(1).await; 22 | assert_eq!( 23 | Instant::now().duration_since(start), 24 | Duration::from_millis(400) 25 | ); 26 | } 27 | -------------------------------------------------------------------------------- /tests/test_core_movements.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::sync::Arc; 3 | use std::task::{Context, Wake}; 4 | 5 | use leaky_bucket::RateLimiter; 6 | use tokio::time::Duration; 7 | 8 | struct Waker; 9 | 10 | impl Wake for Waker { 11 | fn wake(self: Arc) {} 12 | } 13 | 14 | #[tokio::test(start_paused = true)] 15 | async fn test_drop_core() { 16 | let limiter = RateLimiter::builder() 17 | .interval(Duration::from_millis(50)) 18 | .build(); 19 | 20 | let waker = Arc::new(Waker).into(); 21 | let mut cx = Context::from_waker(&waker); 22 | 23 | // Test that dropping a core task restores the ability to acquire new cores. 24 | let a1 = limiter.acquire(1); 25 | let b1 = limiter.acquire(1); 26 | let a2 = limiter.acquire(1); 27 | tokio::pin!(a1, b1, a2); 28 | 29 | assert!(!a1.is_core() && !b1.is_core()); 30 | assert!(a1.as_mut().poll(&mut cx).is_pending()); 31 | assert!(b1.as_mut().poll(&mut cx).is_pending()); 32 | assert!(a1.is_core() && !b1.is_core()); 33 | 34 | a1.set(limiter.acquire(0)); 35 | assert!(!a1.is_core()); 36 | assert!(a1.poll(&mut cx).is_ready()); 37 | 38 | assert!(!a2.is_core()); 39 | assert!(a2.as_mut().poll(&mut cx).is_pending()); 40 | assert!(a2.is_core()); 41 | a2.as_mut().await; 42 | assert!(!a2.is_core()); 43 | 44 | b1.await; 45 | } 46 | 47 | #[tokio::test(start_paused = true)] 48 | async fn test_core_move() { 49 | let limiter = RateLimiter::builder() 50 | .interval(Duration::from_millis(50)) 51 | .build(); 52 | 53 | let waker = Arc::new(Waker).into(); 54 | let mut cx = Context::from_waker(&waker); 55 | 56 | // Test that dropping a core task restores the ability to acquire new cores. 57 | let a1 = limiter.acquire(1); 58 | let a2 = limiter.acquire(1); 59 | tokio::pin!(a1, a2); 60 | 61 | assert!(!a1.is_core()); 62 | assert!(a1.as_mut().poll(&mut cx).is_pending()); 63 | assert!(a1.is_core()); 64 | 65 | // a2 is not a core because a1 is already a core. 66 | assert!(!a2.is_core()); 67 | assert!(a2.as_mut().poll(&mut cx).is_pending()); 68 | assert!(!a2.is_core()); 69 | 70 | // drop the previous core and poll a2 again to become the new core. 71 | a1.set(limiter.acquire(1)); 72 | 73 | assert!(a2.as_mut().poll(&mut cx).is_pending()); 74 | assert!(a2.is_core()); 75 | 76 | let ((), ()) = tokio::join!(a1, a2); 77 | } 78 | -------------------------------------------------------------------------------- /tests/test_drop.rs: -------------------------------------------------------------------------------- 1 | use std::pin::pin; 2 | use std::sync::Arc; 3 | use tokio::time::{self, Duration, Instant}; 4 | 5 | use leaky_bucket::RateLimiter; 6 | use tokio::task::JoinSet; 7 | 8 | #[tokio::test(start_paused = true)] 9 | async fn test_drop() -> anyhow::Result<()> { 10 | let limiter = Arc::new( 11 | RateLimiter::builder() 12 | .initial(0) 13 | .refill(10) 14 | .interval(Duration::from_millis(50)) 15 | .max(100) 16 | .build(), 17 | ); 18 | 19 | let limiter = limiter.clone(); 20 | 21 | let mut task = pin!(Some(limiter.acquire(10000))); 22 | 23 | let mut join_set = JoinSet::new(); 24 | 25 | for _ in 0..10 { 26 | let limiter = limiter.clone(); 27 | 28 | join_set.spawn(async move { 29 | limiter.acquire(10).await; 30 | }); 31 | } 32 | 33 | tokio::select! { 34 | _ = time::sleep(Duration::from_millis(1000)) => { 35 | // Drop the task 36 | task.set(None); 37 | } 38 | _ = Option::as_pin_mut(task.as_mut()).unwrap() => { 39 | } 40 | // Should never complete, because we have a giant task waiting. 41 | _ = join_set.join_next() => { 42 | } 43 | } 44 | 45 | let mut released = 0; 46 | 47 | let start = Instant::now(); 48 | 49 | while join_set.join_next().await.is_some() { 50 | released += 1; 51 | } 52 | 53 | assert!(Instant::now().duration_since(start).as_millis() <= 10); 54 | assert_eq!(released, 10); 55 | Ok(()) 56 | } 57 | -------------------------------------------------------------------------------- /tests/test_fast_path.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::pin; 3 | use std::sync::Arc; 4 | use std::task::Context; 5 | use std::thread; 6 | 7 | use leaky_bucket::RateLimiter; 8 | 9 | struct Waker; 10 | 11 | impl std::task::Wake for Waker { 12 | fn wake(self: Arc) {} 13 | } 14 | 15 | #[test] 16 | fn test_fast_paths() { 17 | let limiter = Arc::new( 18 | RateLimiter::builder() 19 | .max(10000) 20 | .initial(10000) 21 | .fair(false) 22 | .build(), 23 | ); 24 | 25 | let mut threads = Vec::new(); 26 | 27 | for _ in 0..100 { 28 | let limiter = limiter.clone(); 29 | 30 | threads.push(thread::spawn(move || { 31 | let waker = Arc::new(Waker).into(); 32 | let mut cx = Context::from_waker(&waker); 33 | 34 | for _ in 0..100 { 35 | let acquire = pin!(limiter.acquire(1)); 36 | assert!(acquire.poll(&mut cx).is_ready()); 37 | } 38 | })); 39 | } 40 | 41 | for thread in threads { 42 | thread.join().unwrap(); 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /tests/test_idle.rs: -------------------------------------------------------------------------------- 1 | //! Stolen from: 2 | //! https://github.com/Gelbpunkt/leaky-bucket-lite/blob/main/tests/test_ao_issue.rs 3 | 4 | use leaky_bucket::RateLimiter; 5 | use tokio::time::{self, Duration, Instant}; 6 | 7 | #[tokio::test(start_paused = true)] 8 | async fn test_idle_1() { 9 | let limiter = RateLimiter::builder() 10 | .refill(1) 11 | .interval(Duration::from_secs(2)) 12 | .max(5) 13 | .initial(5) 14 | .build(); 15 | 16 | time::sleep(Duration::from_millis(10000)).await; 17 | 18 | let start = Instant::now(); 19 | 20 | // These ones drain the available permits. 21 | for _ in 0..5 { 22 | limiter.acquire_one().await; 23 | } 24 | 25 | assert_eq!(Instant::now().duration_since(start), Duration::from_secs(0)); 26 | 27 | // These ones need to sleep for 2 seconds for each permit. 28 | for _ in 0..5 { 29 | limiter.acquire_one().await; 30 | } 31 | 32 | assert_eq!( 33 | Instant::now().duration_since(start), 34 | Duration::from_secs(10) 35 | ); 36 | } 37 | 38 | #[tokio::test(start_paused = true)] 39 | async fn test_idle_2() { 40 | let limiter = RateLimiter::builder() 41 | .refill(1) 42 | .interval(Duration::from_secs(2)) 43 | .max(5) 44 | .initial(5) 45 | .build(); 46 | 47 | time::sleep(Duration::from_millis(10000)).await; 48 | 49 | let start = Instant::now(); 50 | 51 | for _ in 0..5 { 52 | limiter.acquire_one().await; 53 | } 54 | 55 | assert_eq!(Instant::now().duration_since(start), Duration::from_secs(0)); 56 | // This one will have to wait for 2 seconds. 57 | limiter.acquire_one().await; 58 | assert_eq!(Instant::now().duration_since(start), Duration::from_secs(2)); 59 | } 60 | 61 | #[tokio::test(start_paused = true)] 62 | async fn test_idle_3() { 63 | // We expect 100 milliseconds to pass, because the first five permits 64 | // acquired falls within one time window, which after 100 milliseconds rolls 65 | // over into the next. 66 | const EXPECTED: Duration = Duration::from_millis(100); 67 | 68 | let limiter = RateLimiter::builder() 69 | .refill(1) 70 | .interval(Duration::from_secs(2)) 71 | .max(5) 72 | .initial(5) 73 | .build(); 74 | 75 | limiter.acquire_one().await; 76 | time::sleep(Duration::from_millis(3900)).await; 77 | 78 | limiter.acquire_one().await; 79 | 80 | let start = Instant::now(); 81 | 82 | for _ in 0..4 { 83 | limiter.acquire_one().await; 84 | } 85 | 86 | limiter.acquire_one().await; 87 | 88 | let elapsed = Instant::now().duration_since(start); 89 | assert_eq!(elapsed, EXPECTED); 90 | } 91 | -------------------------------------------------------------------------------- /tests/test_overflow.rs: -------------------------------------------------------------------------------- 1 | //! Stolen from: 2 | //! https://github.com/Gelbpunkt/leaky-bucket-lite/blob/main/tests/test_overflow.rs 3 | 4 | use leaky_bucket::RateLimiter; 5 | use tokio::time::{Duration, Instant}; 6 | 7 | #[tokio::test(start_paused = true)] 8 | async fn test_overflow() { 9 | let limiter = RateLimiter::builder() 10 | .max(5) 11 | .initial(5) 12 | .refill(1) 13 | .interval(Duration::from_millis(100)) 14 | .build(); 15 | 16 | let begin = Instant::now(); 17 | 18 | for _ in 0..10 { 19 | limiter.acquire_one().await; 20 | } 21 | 22 | let elapsed = Instant::now().duration_since(begin); 23 | println!("Elapsed: {:?}", elapsed); 24 | assert!(elapsed.as_millis() >= 500 && elapsed.as_millis() <= 550); 25 | } 26 | 27 | #[tokio::test(start_paused = true)] 28 | async fn test_overflow_2() { 29 | let limiter = RateLimiter::builder() 30 | .max(5) 31 | .initial(5) 32 | .refill(1) 33 | .interval(Duration::from_millis(100)) 34 | .build(); 35 | 36 | let begin = Instant::now(); 37 | 38 | limiter.acquire(10).await; 39 | 40 | let elapsed = Instant::now().duration_since(begin); 41 | println!("Elapsed: {:?}", elapsed); 42 | assert!(elapsed.as_millis() >= 500 && elapsed.as_millis() <= 550); 43 | } 44 | -------------------------------------------------------------------------------- /tests/test_rate_limit_target.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | use std::sync::Arc; 3 | 4 | use leaky_bucket::RateLimiter; 5 | use tokio::time::{Duration, Instant}; 6 | 7 | /// Test that a bunch of threads spinning on a rate limiter refilling a 8 | /// reasonable amount of tokens at a slowish rate reaches the given target. 9 | #[tokio::test(start_paused = true)] 10 | async fn test_rate_limit_target() { 11 | const TARGET: usize = 1000; 12 | const INTERVALS: usize = 10; 13 | const DURATION: u64 = 2000; 14 | 15 | let limiter = RateLimiter::builder() 16 | .refill(TARGET / INTERVALS) 17 | .interval(Duration::from_millis(DURATION / INTERVALS as u64)) 18 | .build(); 19 | 20 | let limiter = Arc::new(limiter); 21 | let c = Arc::new(AtomicUsize::new(0)); 22 | 23 | let start = Instant::now(); 24 | 25 | let mut tasks = Vec::new(); 26 | 27 | for _ in 0..100 { 28 | let limiter = limiter.clone(); 29 | let c = c.clone(); 30 | 31 | tasks.push(tokio::spawn(async move { 32 | while c.fetch_add(1, Ordering::SeqCst) < TARGET { 33 | limiter.acquire_one().await; 34 | } 35 | })); 36 | } 37 | 38 | for t in tasks { 39 | t.await.unwrap(); 40 | } 41 | 42 | let duration = Instant::now().duration_since(start); 43 | assert_eq!(duration, Duration::from_secs(2)); 44 | } 45 | -------------------------------------------------------------------------------- /tests/test_threaded.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use leaky_bucket::RateLimiter; 4 | use tokio::time::Duration; 5 | 6 | #[tokio::test(start_paused = true)] 7 | async fn test_threaded() -> anyhow::Result<()> { 8 | let limiter = Arc::new( 9 | RateLimiter::builder() 10 | .initial(100) 11 | .refill(100) 12 | .interval(Duration::from_millis(50)) 13 | .max(100) 14 | .build(), 15 | ); 16 | 17 | let mut tasks = Vec::new(); 18 | let mut expected = Vec::new(); 19 | 20 | for n in 0..10 { 21 | let limiter = limiter.clone(); 22 | 23 | let task = tokio::spawn(async move { 24 | let mut locals = Vec::new(); 25 | 26 | for i in 0..10 { 27 | limiter.acquire(10).await; 28 | locals.push((n, i)); 29 | } 30 | 31 | locals 32 | }); 33 | 34 | for i in 0..10 { 35 | expected.push((n, i)); 36 | } 37 | 38 | tasks.push(task); 39 | } 40 | 41 | let mut globals = Vec::new(); 42 | 43 | for t in tasks { 44 | globals.extend(t.await?); 45 | } 46 | 47 | globals.sort(); 48 | 49 | assert_eq!(expected, globals); 50 | Ok(()) 51 | } 52 | -------------------------------------------------------------------------------- /tests/test_try_acquire.rs: -------------------------------------------------------------------------------- 1 | use std::future::Future; 2 | use std::pin::pin; 3 | use std::sync::Arc; 4 | use std::task::Context; 5 | 6 | use leaky_bucket::RateLimiter; 7 | use tokio::time; 8 | 9 | struct Waker; 10 | 11 | impl std::task::Wake for Waker { 12 | fn wake(self: Arc) {} 13 | } 14 | 15 | #[tokio::test(flavor = "current_thread", start_paused = true)] 16 | async fn test_try_acquire() { 17 | let limiter = RateLimiter::builder().refill(1).initial(1).build(); 18 | 19 | assert!(limiter.try_acquire(1)); 20 | assert!(!limiter.try_acquire(1)); 21 | 22 | time::sleep(limiter.interval() * 2).await; 23 | 24 | assert!(limiter.try_acquire(1)); 25 | assert!(limiter.try_acquire(1)); 26 | assert!(!limiter.try_acquire(1)); 27 | } 28 | 29 | #[tokio::test(flavor = "current_thread", start_paused = true)] 30 | async fn test_try_acquire_contended() { 31 | let limiter = RateLimiter::builder().refill(2).initial(1).build(); 32 | 33 | let waker = Arc::new(Waker).into(); 34 | let mut cx = Context::from_waker(&waker); 35 | 36 | { 37 | let mut waiting = pin!(limiter.acquire(1)); 38 | // Task is not linked. 39 | assert!(limiter.try_acquire(1)); 40 | assert!(waiting.as_mut().poll(&mut cx).is_pending()); 41 | // Task is now linked, so we cannot acquire. 42 | assert!(!limiter.try_acquire(1)); 43 | } 44 | 45 | time::sleep(limiter.interval()).await; 46 | 47 | assert!(limiter.try_acquire(2)); 48 | assert!(!limiter.try_acquire(1)); 49 | } 50 | 51 | #[tokio::test(flavor = "current_thread", start_paused = true)] 52 | async fn test_try_acquire_max() { 53 | let limiter = RateLimiter::builder().refill(100).initial(1).max(1).build(); 54 | time::sleep(limiter.interval()).await; 55 | assert!(!limiter.try_acquire(2)); 56 | } 57 | --------------------------------------------------------------------------------