├── .gitattributes ├── .github └── workflows │ └── build.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── rustfmt.toml └── src ├── doc ├── Main.md ├── PageError ├── PageItems ├── PageTurner ├── PageTurnerFuture ├── PageTurner__into_pages ├── PageTurner__into_pages_ahead ├── PageTurner__into_pages_ahead_unordered ├── PageTurner__pages ├── PageTurner__pages_ahead ├── PageTurner__pages_ahead_unordered ├── PageTurner__turn_page ├── PagesStream ├── PagesStream__items ├── TurnedPageResult └── prelude ├── internal ├── itertools.rs ├── mod.rs ├── pages.rs ├── pages_ahead.rs └── pages_ahead_unordered.rs ├── lib.rs ├── local ├── mod.rs └── tests.rs ├── mt ├── mod.rs └── tests.rs └── test_utils.rs /.gitattributes: -------------------------------------------------------------------------------- 1 | *.rs diff=rust 2 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Check 20 | run: cargo clippy --all-features --verbose 21 | - name: Run tests 22 | run: cargo test --verbose 23 | - name: Check format 24 | run: cargo fmt --check 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # macOS 2 | .DS_Store 3 | 4 | # Generated by Cargo 5 | # will have compiled files and executables 6 | target/ 7 | 8 | # These are backup files generated by rustfmt 9 | **/*.rs.bk 10 | 11 | # ignore clion folder 12 | .idea/ 13 | 14 | # Ignore for library 15 | Cargo.lock 16 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | - **1.0.0:** 2 | - Replace `#[async_trait]` with partially stabilized `async trait` using 3 | RPITIT. Set MSRV to 1.75. 4 | - `PageTurner` doesn't require to return `Vec` anymore. `type 5 | PageItem` is renamed into `type PageItems` and a user can specify a full 6 | return type with it like `type PageItems = HashMap>`; 7 | - `PagesStream` is now a `Stream` extension trait and not a separate type. 8 | - Add extra `PageItems`, `PageError`, `PageTurnerFuture` type aliases, 9 | rename `PageTurnerOutput` into `TurnedPageResult`. 10 | - Implement more optimal sliding window request scheduling strategy for 11 | `pages_ahead` and `pages_ahead_unordered` streams. Details: 12 | . `into_pages_ahead` and 13 | `into_pages_ahead_unordered` now require `Clone` explicitly and don't use 14 | `Arc` under the hood. 15 | - Internal refactorings, module restructurings, and a huge simplification 16 | of internal streams implementation. All copy-paste is gone! 17 | - Introduce different page turner flavors with relaxed constraints behind 18 | feature flags for use in singlethreaded environments. `local` doesn't 19 | require anything to be `Send` and `mutable` allows to mutate client 20 | during querying. Bring back `async_trait` version of page turner behind 21 | the `dynamic` feature flag. 22 | - Extra tests that check that everything has correct constraints and is 23 | send/object safe where required. 24 | - README, CHANGELOG and documentation overhauls. 25 | 26 | - **0.8.2:** 27 | - Fix typo in docs. 28 | 29 | - **0.8.1:** 30 | - Bugfix in internal chunking iterator that yilded empty chunks for 31 | `chunk_size = 1` in previous version. (0.8.0 yanked) 32 | 33 | - **0.8.0:** 34 | - Introduce [`RequestAhead`] and [`PageTurner::pages_ahead`], 35 | [`PageTurner::pages_ahead_unordered`] for concurrent page querying 36 | 37 | - **0.7.0:** 38 | - Hotfix lifetime bounds in [`PagesStream`] for `T` and `E`. (0.6.0 yanked) 39 | 40 | - **0.6.0:** 41 | - Initial public release 42 | 43 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "page-turner" 3 | version = "1.0.0" 4 | authors = ["a1akris "] 5 | edition = "2021" 6 | license = "MIT OR Apache-2.0" 7 | rust-version = "1.75" 8 | 9 | description = "A generic abstraction of APIs with pagination" 10 | readme = "README.md" 11 | repository = "https://github.com/a1akris/page-turner" 12 | documentation = "https://docs.rs/page-turner" 13 | keywords = ["pagination", "paginated", "pageturner", "pages", "page-turner"] 14 | categories = ["rust-patterns", "asynchronous", "web-programming", "network-programming", "concurrency"] 15 | 16 | exclude = [ 17 | ".github/*" 18 | ] 19 | 20 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 21 | 22 | [features] 23 | default = ["mt"] 24 | local = [] 25 | mt = [] 26 | mutable = ["local"] 27 | dynamic = ["mt", "async-trait"] 28 | 29 | [dependencies] 30 | async-trait = { version = "0.1.77", optional = true } 31 | futures = { version = "0.3.30", default_features = false, features = ["std"] } 32 | 33 | [dev-dependencies] 34 | tokio = { version = "1", features = ["test-util", "macros", "rt-multi-thread"] } 35 | 36 | [package.metadata.docs.rs] 37 | all-features = true 38 | rustdoc-args = ["--cfg", "docsrs"] 39 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any 2 | person obtaining a copy of this software and associated 3 | documentation files (the "Software"), to deal in the 4 | Software without restriction, including without 5 | limitation the rights to use, copy, modify, merge, 6 | publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software 8 | is furnished to do so, subject to the following 9 | conditions: 10 | 11 | The above copyright notice and this permission notice 12 | shall be included in all copies or substantial portions 13 | of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 16 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 17 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 18 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 19 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 22 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | DEALINGS IN THE SOFTWARE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Build](https://github.com/a1akris/page-turner/actions/workflows/build.yml/badge.svg) 2 | 3 | # page-turner 4 | 5 | A generic abstraction of paginated APIs 6 | 7 | ## In a nutshell 8 | 9 | If you have a paginated request implement a page turner trait for this request 10 | on your client: 11 | 12 | ```rust 13 | use page_turner::prelude::*; 14 | 15 | impl PageTurner for ApiClient { 16 | type PageItems = Vec; 17 | type PageError = ApiError; 18 | 19 | async fn turn_page(&self, request: GetReviewsRequest) -> TurnedPageResult { 20 | let response = self.execute(request).await?; 21 | 22 | let turned_page = match response.next_page_token { 23 | Some(token) => TurnedPage::next(response.reviews, GetReviewsRequest::from(token)), 24 | None => TurnedPage::last(response.reviews), 25 | }; 26 | 27 | Ok(turned_page) 28 | } 29 | } 30 | ``` 31 | 32 | The page turner then provides stream-based APIs that allow data to be queried 33 | as if pagination does not exist: 34 | 35 | ```rust 36 | use page_turner::prelude::*; 37 | use futures::{StreamExt, TryStreamExt}; 38 | 39 | async fn first_n_positive_reviews(client: &ApiClient, request: GetReviewsRequest, count: usize) -> ApiResult> { 40 | client 41 | .pages(request) 42 | .items() 43 | .try_filter(|review| std::future::ready(review.is_positive())) 44 | .take(count) 45 | .await 46 | } 47 | 48 | ``` 49 | 50 | Both cursor and non-cursor pagination patterns are supported and for the later 51 | one you can enable a concurrent querying by implementing the `RequestAhead` 52 | trait: 53 | 54 | ```rust 55 | pub struct GetCommentsRequest { 56 | pub page_id: PageId, 57 | } 58 | 59 | impl RequestAhead for GetCommentsRequest { 60 | fn next_request(&self) -> Self { 61 | Self { 62 | page_id: self.page_id + 1, 63 | } 64 | } 65 | } 66 | ``` 67 | 68 | Now you can use `pages_ahead`/`pages_ahead_unordered` family of methods to 69 | request multiple pages concurrently using a quite optimal [sliding 70 | window](https://docs.rs/page-turner/1.0.0/page_turner/mt/trait.PageTurner.html#method.pages_ahead) 71 | request scheduling under the hood: 72 | 73 | ```rust 74 | use page_turner::prelude::*; 75 | use futures::TryStreamExt; 76 | 77 | async fn retrieve_user_comments(username: &str) -> ResponseResult> { 78 | let client = ForumClient::new(); 79 | 80 | client.pages_ahead(4, Limit::None, GetCommentsRequest { page_id: 1 }) 81 | .items() 82 | .try_filter(|comment| std::future::ready(comment.author == username)) 83 | .try_collect() 84 | .await 85 | 86 | } 87 | ``` 88 | 89 | The example above schedules requests for 4 pages simultaneously and then issues 90 | a request as soon as you receive a response concurrently awaiting for 4 91 | responses all the time while you're processing results. 92 | 93 | 94 | ## v1.0.0 release 95 | 96 | The `v1.0.0` uses features like RPITIT stabilized in Rust 1.75, so MSRV for 97 | `v1.0.0` is `1.75.0`. If you can't afford to upgrade to Rust `1.75` use `0.8.2` 98 | version of the crate. It's quite similar and supports Rust versions the 99 | `async_trait` crate supports. 100 | 101 | See [docs](https://docs.rs/page-turner) for details about new supported 102 | features. 103 | 104 | See [CHANGELOG.md](CHANGELOG.md) for full changes history. 105 | 106 | 107 | ### Migration to v1.0.0 from older versions 108 | 109 | There are several major breaking changes in v1.0.0, here are instructions how 110 | to quickly adopt them: 111 | 112 | 1. No more `#[async_trait]` by default. Remove `#[async_trait]` from your page 113 | turner impls and everything should work. If you for some reason rely on `dyn 114 | PageTurner` then enable feature `dynamic` and use 115 | `page_turner::dynamic::prelude::*` instead of the `page_turner::prelude::*`. 116 | 117 | 1. New page turners don't enforce you to return `Vec` anymore, now 118 | you can return whatever you like(`HashMap` is a one example of a popular 119 | alternative). To quickly make your code compile retaining the old behavior 120 | replace `type PageItem = YourItem;` with `type PageItems = Vec;`. 121 | Note that `s` in `PageItems` :) 122 | 123 | 1. `PageTurnerOutput` was renamed into `TurnedPageResult` but it is the same 124 | type alias so a simple global search&replace should do the trick. 125 | 126 | 1. `into_pages_ahead` and `into_pages_ahead_unordered` methods now require 127 | implementors to be clonable. Previously, they used `Arc` under the hood, but 128 | now it's up to you. Most likely your clients are already cheaply clonable 129 | but if not then the quickest way to fix `doesn't implement Clone` errors is 130 | to wrap your clients into `Arc` like 131 | `Arc::new(client).into_pages_ahead(..)`. 132 | 133 | 134 | ##### License 135 | 136 | Licensed under either of [Apache License, Version 2.0](LICENSE-APACHE) or [MIT 137 | license](LICENSE-MIT) at your option. 138 | 139 | Unless you explicitly state otherwise, any contribution intentionally submitted 140 | for inclusion in this crate by you, as defined in the Apache-2.0 license, shall 141 | be dual licensed as above, without any additional terms or conditions. 142 | -------------------------------------------------------------------------------- /rustfmt.toml: -------------------------------------------------------------------------------- 1 | newline_style = "Unix" 2 | -------------------------------------------------------------------------------- /src/doc/Main.md: -------------------------------------------------------------------------------- 1 | # A generic abstraction of paginated APIs 2 | 3 | Imagine, you need to use the following API to find the most upvoted comment 4 | under a blog post. 5 | 6 | ```rust 7 | struct GetCommentsRequest { 8 | blog_post_id: BlogPostId, 9 | page_number: u32, 10 | } 11 | 12 | struct GetCommentsResponse { 13 | comments: Vec, 14 | more_comments_available: bool, 15 | } 16 | 17 | # struct Comment { 18 | # upvotes: u32, 19 | # text: String, 20 | # } 21 | # 22 | # type BlogPostId = u32; 23 | ``` 24 | 25 | In order to do that you will need to write a hairy loop that checks the 26 | `more_comments_available` flag, increments `page_number`, and updates a 27 | variable that stores the resulting value. This crate helps to abstract away any 28 | paginated API and allows you to work with such APIs uniformly with the help of 29 | async streams. Both cursor and non-cursor pagniations are supported. All you 30 | need to do is to implement the [`PageTurner`] trait for the client that sends 31 | `GetCommentsRequest`. 32 | 33 | In [`PageTurner`] you specify what data you fetch and what errors may occur for 34 | a particular request, then you implement the `turn_page` method where you 35 | describe how to query a single page and how to prepare a request for the next 36 | page. 37 | 38 | ```rust 39 | use page_turner::prelude::*; 40 | 41 | impl PageTurner for BlogClient { 42 | type PageItems = Vec; 43 | type PageError = BlogClientError; 44 | 45 | async fn turn_page(&self, mut request: GetCommentsRequest) -> TurnedPageResult { 46 | let response = self.get_comments(request.clone()).await?; 47 | 48 | if response.more_comments_available { 49 | request.page_number += 1; 50 | Ok(TurnedPage::next(response.comments, request)) 51 | } else { 52 | Ok(TurnedPage::last(response.comments)) 53 | } 54 | } 55 | } 56 | 57 | # struct BlogClient {} 58 | # 59 | # impl BlogClient { 60 | # async fn get_comments(&self, req: GetCommentsRequest) -> Result { 61 | # todo!() 62 | # } 63 | # } 64 | # 65 | # struct Comment {} 66 | # struct BlogClientError {} 67 | # type BlogPostId = u32; 68 | # 69 | # #[derive(Clone)] 70 | # struct GetCommentsRequest { 71 | # blog_post_id: BlogPostId, 72 | # page_number: u32, 73 | # } 74 | # 75 | # struct GetCommentsResponse { 76 | # comments: Vec, 77 | # more_comments_available: bool, 78 | # } 79 | ``` 80 | 81 | [`PageTurner`] then provides default implementations for [`PageTurner::pages`] 82 | and [`PageTurner::into_pages`] methods that you can use to get a stream of 83 | pages and, optionally, to turn it into a stream of page items if you need. Now 84 | we can use our client to find the most upvoted comment like that: 85 | 86 | ```rust 87 | # type BlogPostId = u32; 88 | # 89 | # #[derive(Clone)] 90 | # struct GetCommentsRequest { 91 | # blog_post_id: BlogPostId, 92 | # page_number: u32, 93 | # } 94 | # 95 | # struct GetCommentsResponse { 96 | # comments: Vec, 97 | # more_comments_available: bool, 98 | # } 99 | # 100 | # struct Comment { 101 | # upvotes: u32, 102 | # text: String, 103 | # } 104 | # 105 | # struct BlogClient {} 106 | # 107 | # impl BlogClient { 108 | # fn new() -> Self { 109 | # Self {} 110 | # } 111 | # 112 | # async fn get_comments(&self, req: GetCommentsRequest) -> Result { 113 | # Ok(GetCommentsResponse { 114 | # comments: vec![ 115 | # Comment { 116 | # text: "First".to_owned(), 117 | # upvotes: 0, 118 | # }, 119 | # Comment { 120 | # text: "Second".to_owned(), 121 | # upvotes: 2, 122 | # }, 123 | # Comment { 124 | # text: "Yeet".to_owned(), 125 | # upvotes: 5, 126 | # } 127 | # ], 128 | # more_comments_available: false, 129 | # }) 130 | # } 131 | # } 132 | # 133 | # #[derive(Debug)] 134 | # struct BlogClientError {} 135 | # 136 | # use page_turner::prelude::*; 137 | # 138 | # impl PageTurner for BlogClient { 139 | # type PageItems = Vec; 140 | # type PageError = BlogClientError; 141 | # 142 | # async fn turn_page(&self, mut request: GetCommentsRequest) -> TurnedPageResult { 143 | # let response = self.get_comments(request.clone()).await?; 144 | # 145 | # if response.more_comments_available { 146 | # request.page_number += 1; 147 | # Ok(TurnedPage::next(response.comments, request)) 148 | # } else { 149 | # Ok(TurnedPage::last(response.comments)) 150 | # } 151 | # } 152 | # } 153 | # 154 | # 155 | # use futures::TryStreamExt; 156 | # 157 | # #[tokio::main(flavor = "current_thread")] 158 | # async fn main() -> Result<(), BlogClientError> { 159 | # let blog_post_id = 1337; 160 | let client = BlogClient::new(); 161 | 162 | let most_upvoted_comment = client 163 | .pages(GetCommentsRequest { blog_post_id, page_number: 1 }) 164 | .items() 165 | .try_fold(None::, |most_upvoted, next_comment| async move { 166 | match most_upvoted { 167 | Some(comment) if next_comment.upvotes > comment.upvotes => Ok(Some(next_comment)), 168 | current @ Some(_) => Ok(current), 169 | None => Ok(Some(next_comment)), 170 | } 171 | }) 172 | .await? 173 | .unwrap(); 174 | 175 | assert_eq!(most_upvoted_comment.text, "Yeet"); 176 | assert_eq!(most_upvoted_comment.upvotes, 5); 177 | 178 | // Or we can process the whole pages if needed 179 | 180 | let mut comment_pages = std::pin::pin!(client.pages(GetCommentsRequest { blog_post_id, page_number: 1 })); 181 | 182 | while let Some(comment_page) = comment_pages.try_next().await? { 183 | detect_spam(comment_page); 184 | } 185 | 186 | # Ok(()) 187 | # } 188 | # 189 | # fn detect_spam(page: Vec) -> bool { 190 | # false 191 | # } 192 | ``` 193 | 194 | Notice, that with this kind of the API we don't require any data from response 195 | to construct the next valid request. We can take an advantage on such APIs by 196 | implementing the [`RequestAhead`] trait on a request type. For requests that 197 | implement [`RequestAhead`] [`PageTurner`] provides additional methods - 198 | [`PageTurner::pages_ahead`] and [`PageTurner::pages_ahead_unordered`]. These 199 | methods allow to query multiple pages concurrently using an optimal [sliding 200 | window](PageTurner::pages_ahead) request scheduling. 201 | 202 | ```rust 203 | # type BlogPostId = u32; 204 | # 205 | # #[derive(Clone)] 206 | # struct GetCommentsRequest { 207 | # blog_post_id: BlogPostId, 208 | # page_number: u32, 209 | # } 210 | # 211 | # struct GetCommentsResponse { 212 | # comments: Vec, 213 | # more_comments_available: bool, 214 | # } 215 | # 216 | # struct Comment { 217 | # upvotes: u32, 218 | # text: String, 219 | # } 220 | # 221 | # struct BlogClient {} 222 | # 223 | # impl BlogClient { 224 | # fn new() -> Self { 225 | # Self {} 226 | # } 227 | # 228 | # async fn get_comments(&self, req: GetCommentsRequest) -> Result { 229 | # Ok(GetCommentsResponse { 230 | # comments: vec![ 231 | # Comment { 232 | # text: "First".to_owned(), 233 | # upvotes: 0, 234 | # }, 235 | # Comment { 236 | # text: "Second".to_owned(), 237 | # upvotes: 2, 238 | # }, 239 | # Comment { 240 | # text: "Yeet".to_owned(), 241 | # upvotes: 5, 242 | # } 243 | # ], 244 | # more_comments_available: false, 245 | # }) 246 | # } 247 | # } 248 | # 249 | # #[derive(Debug)] 250 | # struct BlogClientError {} 251 | # 252 | # use page_turner::prelude::*; 253 | # 254 | # impl PageTurner for BlogClient { 255 | # type PageItems = Vec; 256 | # type PageError = BlogClientError; 257 | # 258 | # async fn turn_page(&self, mut request: GetCommentsRequest) -> TurnedPageResult { 259 | # let response = self.get_comments(request.clone()).await?; 260 | # 261 | # if response.more_comments_available { 262 | # request.page_number += 1; 263 | # Ok(TurnedPage::next(response.comments, request)) 264 | # } else { 265 | # Ok(TurnedPage::last(response.comments)) 266 | # } 267 | # } 268 | # } 269 | # 270 | # 271 | # use futures::TryStreamExt; 272 | # 273 | # #[tokio::main(flavor = "current_thread")] 274 | # async fn main() -> Result<(), BlogClientError> { 275 | # let blog_post_id = 1337; 276 | impl RequestAhead for GetCommentsRequest { 277 | fn next_request(&self) -> Self { 278 | Self { 279 | blog_post_id: self.blog_post_id, 280 | page_number: self.page_number + 1, 281 | } 282 | } 283 | } 284 | 285 | let client = BlogClient::new(); 286 | 287 | // Now instead of querying pages one by one we make 4 concurrent requests 288 | // for multiple pages under the hood but besides using a different PageTurner 289 | // method nothing changes in the user code. 290 | let most_upvoted_comment = client 291 | .pages_ahead(4, Limit::None, GetCommentsRequest { blog_post_id, page_number: 1 }) 292 | .items() 293 | .try_fold(None::, |most_upvoted, next_comment| async move { 294 | match most_upvoted { 295 | Some(comment) if next_comment.upvotes > comment.upvotes => Ok(Some(next_comment)), 296 | current @ Some(_) => Ok(current), 297 | None => Ok(Some(next_comment)), 298 | } 299 | }) 300 | .await? 301 | .unwrap(); 302 | 303 | assert_eq!(most_upvoted_comment.text, "Yeet"); 304 | assert_eq!(most_upvoted_comment.upvotes, 5); 305 | 306 | // In the example above the order of pages being returned corresponds to the order 307 | // of requests which means the stream is blocked until the first page is ready 308 | // even if the second and the third pages are already received. For this use case 309 | // we don't really care about the order of the comments so we can use 310 | // pages_ahead_unordered to unblock the stream as soon as we receive a response to 311 | // any of the concurrent requests. 312 | let most_upvoted_comment = client 313 | .pages_ahead_unordered(4, Limit::None, GetCommentsRequest { blog_post_id, page_number: 1 }) 314 | .items() 315 | .try_fold(None::, |most_upvoted, next_comment| async move { 316 | match most_upvoted { 317 | Some(comment) if next_comment.upvotes > comment.upvotes => Ok(Some(next_comment)), 318 | current @ Some(_) => Ok(current), 319 | None => Ok(Some(next_comment)), 320 | } 321 | }) 322 | .await? 323 | .unwrap(); 324 | 325 | assert_eq!(most_upvoted_comment.text, "Yeet"); 326 | assert_eq!(most_upvoted_comment.upvotes, 5); 327 | # Ok(()) 328 | # } 329 | ``` 330 | 331 | ## Page turner flavors and crate features 332 | 333 | There are multiple falvors of page turners suitable for different contexts and 334 | each flavor is available behind a feature flag. By default the `mt` feature is 335 | enabled which provides you [`crate::mt::PageTurner`] that works with 336 | multithreaded executors. The crate root `prelude` just reexports the 337 | `crate::mt::prelude::*`, and types at the crate root are reexported from 338 | `crate::mt`. Overall there are the following page turner flavors and 339 | corresponding features available: 340 | 341 | - [local](crate::local): Provides a less constrained [`crate::local::PageTurner`] suitable 342 | for singlethreaded executors. Use `page_turner::local::prelude::*` to work 343 | with it. 344 | - [mutable](crate::mutable): A [`crate::mutable::PageTurner`] is like `local` but even allows your 345 | client to mutate during the request execution. Use 346 | `page_turner::mutable::prelude::*` to work with it. 347 | - [mt](crate::mt): A [`crate::mt::PageTurner`] for multithreaded executors. It's 348 | reexported by default but if you enable additional features in the same 349 | project it is recommended to use `page_turner::mt::prelude::*` to distinguish 350 | between different flavors of page turners. 351 | - [dynamic](crate::dynamic): An object safe [`crate::dynamic::PageTurner`] that requires 352 | `async_trait` to be implemented and can be used as an object with dynamic 353 | dispatch. 354 | 355 | -------------------------------------------------------------------------------- /src/doc/PageError: -------------------------------------------------------------------------------- 1 | A type alias helpful to resolve [`PageTurner::PageError`] in generic contexts 2 | -------------------------------------------------------------------------------- /src/doc/PageItems: -------------------------------------------------------------------------------- 1 | A type alias helpful to resolve [`PageTurner::PageItems`] in generic contexts 2 | -------------------------------------------------------------------------------- /src/doc/PageTurner: -------------------------------------------------------------------------------- 1 | The trait is supposed to be implemented on API clients. 2 | You need to specify the [`PageTurner::PageItems`]s to 3 | return and [`PageTurner::PageError`]s that may occur for 4 | a particular request. Then you should implement the 5 | [`PageTurner::turn_page`] method to describe how to 6 | query a single page and how to prepare a request for the 7 | next page. After that default [`PageTurner::pages`] and 8 | [`PageTurner::into_pages`] methods become available to 9 | provide a stream based querying API. 10 | -------------------------------------------------------------------------------- /src/doc/PageTurnerFuture: -------------------------------------------------------------------------------- 1 | A type alias that names the [`PageTurner::turn_page`] future if it needs to be stored somewhere. As of Rust 1.75 we must use a dyn object to store `impl Future` futures. 2 | -------------------------------------------------------------------------------- /src/doc/PageTurner__into_pages: -------------------------------------------------------------------------------- 1 | Returns an owned [`PagesStream`]. 2 | 3 | In certain situations you can't use streams with borrows. For example, you 4 | can't use a stream with a borrow if a stream should outlive a client in APIs 5 | like this one: 6 | 7 | ```ignore 8 | fn get_stuff(params: Params) -> impl Stream { 9 | // This client is not needed anywhere else and is cheap to create. 10 | let client = StuffClient::new(); 11 | client.pages(GetStuff::from_params(params)) 12 | } 13 | ``` 14 | 15 | The client gets dropped after the `.pages` call but the stream we're 16 | returning needs an internal reference to the client in order to perform 17 | the querying. This situation can be fixed by simply using `into_pages` variant 18 | instead: 19 | 20 | 21 | ```ignore 22 | fn get_stuff(params: Params) -> impl Stream { 23 | let client = StuffClient::new(params); 24 | client.into_pages(GetStuff::from_params(params)) 25 | } 26 | ``` 27 | 28 | Now the client is consumed into the stream to be used internally. 29 | -------------------------------------------------------------------------------- /src/doc/PageTurner__into_pages_ahead: -------------------------------------------------------------------------------- 1 | This method exists for the same reason described in [`PageTurner::into_pages`]. See [`PageTurner::pages_ahead`] for the description. 2 | -------------------------------------------------------------------------------- /src/doc/PageTurner__into_pages_ahead_unordered: -------------------------------------------------------------------------------- 1 | This method exists for the same reason described in [`PageTurner::into_pages`]. 2 | See [`PageTurner::pages_ahead_unordered`] for the behavior docs. 3 | -------------------------------------------------------------------------------- /src/doc/PageTurner__pages: -------------------------------------------------------------------------------- 1 | Returns a stream that queries pages one by one. The stream borrows the client 2 | internally and this may dissappoint the borrow checker in certain 3 | situations so you can use [`PageTurner::into_pages`] if you need an owned 4 | stream. 5 | -------------------------------------------------------------------------------- /src/doc/PageTurner__pages_ahead: -------------------------------------------------------------------------------- 1 | Executes a chunk of `requests_ahead_count` requests concurrently to query 2 | multiple pages at once. Returns pages in the requests order(first page 3 | corresponds to the first request and so on). Because of that the stream will be 4 | blocked until the first page becomes available even if the second page is 5 | already received. Use [`PageTurner::pages_ahead_unordered`] if the order is not 6 | important to unblock the stream as soon as any page arrives. 7 | 8 | The implementation uses a sliding window scheduling meaning that as soon as you receive the first page a request for the next page is being sent. Because of that there are always some redundant requests that query non-existing pages past the last existing page. Example: 9 | 10 | ```text 11 | Pages on resource: [1,2,3] 12 | Initial Requests for requests_ahead_count = 2: [[1,2]] 13 | 14 | Requests after receiving 1st page: [1+,[2,3]] 15 | Requests after receiving 2nd page: [1+,2+,[3,4]] 16 | Requests after receiving 3rd page: [1+,2+,3+ | [4*,5*]] 17 | ``` 18 | 19 | The stream ends after we received the 3rd page, however, requests `4` and `5` were scheduled in the process and were executing concurrently despite being canceled at the end. To avoid scheduling such redundant requests specify [`Limit::Pages`] when you know exactly how many pages you need to query. 20 | 21 | To discover the end of the stream [`TurnedPage::next_request`] is being checked 22 | for the availability of the next request but the actual `next_request` is 23 | always taken from [`RequestAhead::next_request`] thus it's possible to get 24 | different results when using [`PageTurner::pages`] vs 25 | [`PageTurner::pages_ahead`] streams if you construct next requests in 26 | [`RequestAhead::next_request`] and in [`PageTurner::turn_page`] 27 | differently. This is considered to be a logical bug, by the contract 28 | streams must be identical and it's up to you to ensure that. 29 | 30 | # Errors 31 | 32 | If errors appear past the last existing page they're being discarded. 33 | Otherwise, the error for the first failed page is being returned and the stream 34 | ends after it. 35 | 36 | -------------------------------------------------------------------------------- /src/doc/PageTurner__pages_ahead_unordered: -------------------------------------------------------------------------------- 1 | Behaves like [`PageTurner::pages_ahead`] with the difference that pages are 2 | returned as soon as they become available in an arbitrary order. This has an 3 | important consequence though, this method postpones returning an error until 4 | all `request_pages_ahead` requests in a chunk are completed, and it awaits for 5 | all scheduled requests to complete after it finds the last existing page too, 6 | therefore, it's recommended to pick small `request_pages_ahead` amounts to make 7 | the method behave optimally. 8 | 9 | Be careful using `take` and similar stream combinators on this stream as its 10 | unordered nature will cause inconsistent results. Prefer [`Limit::Pages`] 11 | instead. 12 | 13 | # Errors 14 | 15 | If errors appear past the last existing page they're being discarded. 16 | Otherwise, the error for the first(in the requests generation order) failed 17 | page is being returned and the stream ends after it. 18 | 19 | # Detailed explanation 20 | 21 | Unlike [`PageTurner::pages_ahead`] this method will return all successful 22 | responses in a chunk even after some error has occured. It is also guaranteed 23 | that if multiple errors have occured only the one for the earliest issued request 24 | will be returned no mater the actual request completion order. 25 | 26 | Successful case example: 27 | 28 | ```text 29 | Pages on resource: [1,2,3,4] 30 | Initial Requests for requests_ahead_count = 3: [[1,2,3]] 31 | 32 | Got response for 2nd page: [2+,[1,3,4]] 33 | Got response for 3rd page: [2+, 3+, [1,4,5]] 34 | Got error for 5th page: [2+, 3+, [1,4]], postponed(5) 35 | Got response for 4st page: [2+, 3+, 4+, [1] | 5* ] 36 | Got response for 1st page: [2+, 3+, 4+, 1+] 37 | 38 | 39 | Resulting stream: [2, 3, 4, 1] 40 | ``` 41 | 42 | In the example above when the first error occurs we stop scheduling new futures 43 | and wait for the already scheduled ones `[1, 4]` to complete postponing the 44 | error of the 5th page for later. Then we receive `4` and detect that it is the 45 | last page, therefore, the error for the 5th page is being discarded. After 46 | we've found the last page we simply wait until all scheduled futures are 47 | resolved and there is only a single one `[1]` that ends the stream upon 48 | completion. 49 | 50 | Error case example: 51 | 52 | ```text 53 | Pages on resource: [1,2,3,4] 54 | Initial Requests for requests_ahead_count = 4: [[1,2,3,4]] 55 | 56 | Got response for 2nd page: [2+,[1,3,4,5]] 57 | Got error for 3rd page: [2+, 3+, [1,4,5]], postponed(3), 58 | Got error for 5th page: [2+, 3+, [1,4]], postponed(3), discarded(5) 59 | Got error for 1st page: [2+, 3+, [4])], postponed(1), discarded(3) 60 | Got response for 4st page: [2+, 3+, 4+, Err(1) | ] 61 | 62 | 63 | Resulting stream: [2, 3, 4, Err(1)] 64 | ``` 65 | 66 | In the example above when we receive an error for the 5th page we discard it 67 | immediately as we already have an error for the earlier 3rd request, then when 68 | the 1st request results in an error we replace the error of the 3rd page with 69 | it as the 1st request comes before. At the last step we receive the 4th page 70 | and detect that it is the last existing page, there are no more futures left in 71 | a chunk, and the error we have comes before the last existing page, therefore we 72 | include the error at the end of the stream. 73 | -------------------------------------------------------------------------------- /src/doc/PageTurner__turn_page: -------------------------------------------------------------------------------- 1 | Return either Ok([`TurnedPage`]) or Err([`PageTurner::PageError`]). Be careful 2 | not to return the same request in the [`TurnedPage::next_request`] or you will 3 | be stuck in the infinite loop. 4 | -------------------------------------------------------------------------------- /src/doc/PagesStream: -------------------------------------------------------------------------------- 1 | An extension of the [`futures::stream::Stream`] that provides some extra ergonomic methods specifically for [`PageTurner`] use cases. 2 | -------------------------------------------------------------------------------- /src/doc/PagesStream__items: -------------------------------------------------------------------------------- 1 | Allows to iterate over individual page items instead of whole pages if 2 | `PageItems` is a container type(yields `String` if `PageItems = Vec`). 3 | -------------------------------------------------------------------------------- /src/doc/TurnedPageResult: -------------------------------------------------------------------------------- 1 | A successfully [`TurnedPage`] or a [`PageTurner::PageError`] 2 | -------------------------------------------------------------------------------- /src/doc/prelude: -------------------------------------------------------------------------------- 1 | Minimal reexports you need to work with a page turner. 2 | -------------------------------------------------------------------------------- /src/internal/itertools.rs: -------------------------------------------------------------------------------- 1 | use crate::{Limit, RequestAhead}; 2 | 3 | pub type RequestChunks = Chunks>; 4 | pub type EnumerableRequestChunks = Chunks>>; 5 | 6 | pub struct RequestIter { 7 | cur_request: Option, 8 | limit: Limit, 9 | counter: usize, 10 | } 11 | 12 | impl RequestIter { 13 | pub fn new(req: R, limit: Limit) -> Self { 14 | Self { 15 | cur_request: Some(req), 16 | limit, 17 | counter: 0, 18 | } 19 | } 20 | } 21 | 22 | impl Iterator for RequestIter 23 | where 24 | R: RequestAhead, 25 | { 26 | type Item = R; 27 | 28 | fn next(&mut self) -> Option { 29 | if let Limit::Pages(pages) = self.limit { 30 | if self.counter >= pages { 31 | return None; 32 | } 33 | } 34 | 35 | let next_request = self 36 | .cur_request 37 | .as_ref() 38 | .map(::next_request); 39 | 40 | let request_to_ret = self.cur_request.take(); 41 | 42 | self.cur_request = next_request; 43 | self.counter += 1; 44 | 45 | request_to_ret 46 | } 47 | } 48 | 49 | pub trait ChunksExt: Sized { 50 | fn chunks(self, chunk_size: usize) -> Chunks; 51 | } 52 | 53 | impl ChunksExt for I { 54 | fn chunks(self, chunk_size: usize) -> Chunks { 55 | Chunks::new(self, chunk_size) 56 | } 57 | } 58 | 59 | pub struct Chunks { 60 | iter: I, 61 | chunk_size: usize, 62 | } 63 | 64 | impl Chunks { 65 | pub fn new(iter: I, chunk_size: usize) -> Self { 66 | Self { iter, chunk_size } 67 | } 68 | } 69 | 70 | impl Chunks { 71 | pub fn next_chunk(&mut self) -> Option> { 72 | if self.chunk_size == 0 { 73 | None 74 | } else { 75 | self.iter.next().map(|first| Chunk::new(self, first)) 76 | } 77 | } 78 | 79 | pub fn next_item(&mut self) -> Option { 80 | self.iter.next() 81 | } 82 | } 83 | 84 | pub struct Chunk<'c, I: Iterator> { 85 | chunks: &'c mut Chunks, 86 | first: Option, 87 | yielded_count: usize, 88 | } 89 | 90 | impl<'c, I: Iterator> Chunk<'c, I> { 91 | pub fn new(chunks: &'c mut Chunks, first: I::Item) -> Self { 92 | Self { 93 | chunks, 94 | first: Some(first), 95 | yielded_count: 0, 96 | } 97 | } 98 | } 99 | 100 | impl<'c, I: Iterator> Iterator for Chunk<'c, I> { 101 | type Item = I::Item; 102 | 103 | fn next(&mut self) -> Option { 104 | if self.yielded_count < self.chunks.chunk_size { 105 | self.yielded_count += 1; 106 | 107 | match self.first.take() { 108 | first @ Some(_) => first, 109 | None => self.chunks.iter.next(), 110 | } 111 | } else { 112 | None 113 | } 114 | } 115 | } 116 | 117 | #[cfg(test)] 118 | mod tests { 119 | use super::*; 120 | 121 | struct DumbRequest { 122 | page: usize, 123 | } 124 | 125 | impl Default for DumbRequest { 126 | fn default() -> Self { 127 | Self { page: 1 } 128 | } 129 | } 130 | 131 | impl RequestAhead for DumbRequest { 132 | fn next_request(&self) -> Self { 133 | Self { 134 | page: self.page + 1, 135 | } 136 | } 137 | } 138 | 139 | #[test] 140 | fn request_iter() { 141 | let last = RequestIter::new(DumbRequest::default(), Limit::None) 142 | .take(20) 143 | .last(); 144 | 145 | assert_eq!(last.map(|req| req.page), Some(20)); 146 | 147 | let last = RequestIter::new(DumbRequest::default(), Limit::Pages(8)) 148 | .take(20) 149 | .last(); 150 | 151 | assert_eq!(last.map(|req| req.page), Some(8)); 152 | 153 | let last = RequestIter::new(DumbRequest::default(), Limit::Pages(0)) 154 | .take(20) 155 | .last(); 156 | 157 | assert_eq!(last.map(|req| req.page), None); 158 | } 159 | 160 | #[test] 161 | fn chunks_ext() { 162 | let mut chunks = RequestIter::new(DumbRequest::default(), Limit::Pages(20)).chunks(4); 163 | let mut chunks_count = 0; 164 | while let Some(chunk) = chunks.next_chunk() { 165 | let requests: Vec<_> = chunk.collect(); 166 | 167 | assert_eq!(requests.len(), 4); 168 | assert_eq!(requests.last().unwrap().page % 4, 0); 169 | 170 | chunks_count += 1; 171 | } 172 | 173 | assert_eq!(chunks_count, 5); 174 | 175 | let mut chunks = RequestIter::new(DumbRequest::default(), Limit::Pages(13)).chunks(4); 176 | 177 | let mut chunks_count = 0; 178 | while let Some(chunk) = chunks.next_chunk() { 179 | let requests: Vec<_> = chunk.collect(); 180 | 181 | if chunks_count != 3 { 182 | assert_eq!(requests.len(), 4); 183 | assert_eq!(requests.last().unwrap().page % 4, 0); 184 | } else { 185 | assert_eq!(requests.len(), 1); 186 | assert_eq!(requests.last().unwrap().page, 13); 187 | } 188 | 189 | chunks_count += 1; 190 | } 191 | 192 | assert_eq!(chunks_count, 4); 193 | 194 | let mut chunks = RequestIter::new(DumbRequest::default(), Limit::Pages(20)).chunks(1); 195 | let mut chunks_count = 0; 196 | while let Some(chunk) = chunks.next_chunk() { 197 | let requests: Vec<_> = chunk.collect(); 198 | 199 | assert_eq!(requests.len(), 1); 200 | assert_eq!(requests.last().unwrap().page - 1, chunks_count); 201 | 202 | chunks_count += 1; 203 | } 204 | 205 | assert_eq!(chunks_count, 20); 206 | 207 | let mut chunks = RequestIter::new(DumbRequest::default(), Limit::None).chunks(0); 208 | assert!(chunks.next_chunk().is_none()) 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/internal/mod.rs: -------------------------------------------------------------------------------- 1 | //! Module with page turner methods implementation. 2 | //! 3 | //! Most of the logic is put inside macro_rules! because the crate provides different types of page 4 | //! turners and all of them have slightly different signatures despite function bodies and trait 5 | //! impls are same for them. 6 | //! 7 | //! WARNING: All macros here are magical! They implicitly expect some defined/imported types and 8 | //! type aliases in the calling contexts and you must read their bodies in order to understand what 9 | //! arguments they accept. The idea is by calling a macro in a context with specific imports/type 10 | //! aliases you generate specific for those types code. This was done as a compromise between 11 | //! public API readbility and a code verbosity, there are just too many types involved to hide them 12 | //! inside macros or to bother with some macro syntax to enumerate them all. 13 | //! 14 | //! It turned out that every page turner requires everything from this module to be fully 15 | //! implemented so it's ok to abuse glob imports(`use internal::*;`) in page turner modules. 16 | 17 | pub mod itertools; 18 | pub mod pages; 19 | pub mod pages_ahead; 20 | pub mod pages_ahead_unordered; 21 | 22 | pub use itertools::*; 23 | pub use pages::PagesState; 24 | 25 | pub(crate) use pages::request_next_page_decl; 26 | pub(crate) use pages_ahead::{pages_ahead_state_def, request_pages_ahead_decl}; 27 | pub(crate) use pages_ahead_unordered::{ 28 | pages_ahead_unordered_state_def, request_pages_ahead_unordered_decl, 29 | }; 30 | -------------------------------------------------------------------------------- /src/internal/pages.rs: -------------------------------------------------------------------------------- 1 | pub struct PagesState { 2 | pub page_turner: P, 3 | pub next_request: Option, 4 | } 5 | 6 | impl PagesState { 7 | pub fn new(page_turner: P, request: R) -> Self { 8 | Self { 9 | page_turner, 10 | next_request: Some(request), 11 | } 12 | } 13 | } 14 | 15 | macro_rules! request_next_page_decl { 16 | ($($extra_bounds:tt)*) => { 17 | async fn request_next_page( 18 | mut state: crate::internal::pages::PagesState, 19 | ) -> Result, crate::internal::pages::PagesState)>, PageError> 20 | where 21 | P: PageTurner, 22 | $($extra_bounds)* 23 | { 24 | let request = match state.next_request { 25 | Some(request) => request, 26 | None => return Ok(None), 27 | }; 28 | 29 | let TurnedPage { 30 | items, 31 | next_request, 32 | } = state.page_turner.turn_page(request).await?; 33 | 34 | state.next_request = next_request; 35 | Ok(Some((items, state))) 36 | } 37 | }; 38 | } 39 | 40 | pub(crate) use request_next_page_decl; 41 | -------------------------------------------------------------------------------- /src/internal/pages_ahead.rs: -------------------------------------------------------------------------------- 1 | macro_rules! pages_ahead_state_def { 2 | ($($extra_bounds:tt)*) => { 3 | struct PagesAheadState<'p, P, R> 4 | where 5 | P: 'p + PageTurner, 6 | $($extra_bounds)* 7 | { 8 | page_turner: P, 9 | requests: RequestChunks, 10 | in_progress: FuturesOrdered>, 11 | last_page_queried: bool, 12 | } 13 | 14 | impl<'p, P, R> PagesAheadState<'p, P, R> 15 | where 16 | P: 'p + PageTurner, 17 | R: 'p + RequestAhead, 18 | $($extra_bounds)* 19 | { 20 | pub fn new(page_turner: P, request: R, chunk_size: usize, limit: Limit) -> Self { 21 | let requests = RequestIter::new(request, limit).chunks(chunk_size); 22 | Self { 23 | page_turner, 24 | requests, 25 | in_progress: FuturesOrdered::new(), 26 | last_page_queried: false, 27 | } 28 | } 29 | } 30 | }; 31 | } 32 | 33 | macro_rules! request_pages_ahead_decl { 34 | ($($extra_bounds:tt)*) => { 35 | async fn request_pages_ahead<'p, P, R>( 36 | mut state: Box>, 37 | ) -> Result, Box>)>, PageError> 38 | where 39 | P: 'p + Clone + PageTurner, 40 | R: 'p + RequestAhead, 41 | $($extra_bounds)* 42 | { 43 | if state.last_page_queried { 44 | return Ok(None); 45 | } 46 | 47 | if state.in_progress.is_empty() { 48 | match state.requests.next_chunk() { 49 | // If chunk is some then there is at least 1 request inside 50 | Some(chunk) => { 51 | for req in chunk { 52 | let local_page_turner = state.page_turner.clone(); 53 | state.in_progress.push_back(Box::pin(async move { 54 | local_page_turner.turn_page(req).await 55 | })); 56 | } 57 | } 58 | None => { 59 | return Ok(None); 60 | } 61 | } 62 | } else { 63 | // At this point the first request succeeded. Lets push the next one from the next_chunk to proceed in 64 | // a sliding window maner. 65 | if let Some(req) = state.requests.next_item() { 66 | let local_page_turner = state.page_turner.clone(); 67 | state.in_progress.push_back(Box::pin( 68 | async move { local_page_turner.turn_page(req).await }, 69 | )) 70 | } 71 | } 72 | 73 | match state.in_progress.try_next().await? { 74 | Some(TurnedPage { 75 | items, 76 | next_request, 77 | }) => { 78 | state.last_page_queried = next_request.is_none(); 79 | Ok(Some((items, state))) 80 | } 81 | None => { 82 | unreachable!( 83 | "BUG(page-turner): We ensured that the ordered futures queue is not empty right above" 84 | ) 85 | } 86 | } 87 | } 88 | 89 | }; 90 | } 91 | 92 | pub(crate) use pages_ahead_state_def; 93 | pub(crate) use request_pages_ahead_decl; 94 | -------------------------------------------------------------------------------- /src/internal/pages_ahead_unordered.rs: -------------------------------------------------------------------------------- 1 | macro_rules! pages_ahead_unordered_state_def { 2 | ($($extra_bounds:tt)*) => { 3 | struct PagesAheadUnorderedState<'p, P, R> 4 | where 5 | P: 'p + PageTurner, 6 | $($extra_bounds)* 7 | { 8 | page_turner: P, 9 | numbered_requests: EnumerableRequestChunks, 10 | in_progress: FuturesUnordered>, 11 | first_error: Option<(usize, PageError)>, 12 | last_page: Option, 13 | } 14 | 15 | impl<'p, P, R> PagesAheadUnorderedState<'p, P, R> 16 | where 17 | P: 'p + PageTurner, 18 | R: 'p + RequestAhead, 19 | $($extra_bounds)* 20 | { 21 | fn new(page_turner: P, request: R, chunk_size: usize, limit: Limit) -> Self { 22 | let numbered_requests = RequestIter::new(request, limit) 23 | .enumerate() 24 | .chunks(chunk_size); 25 | 26 | Self { 27 | page_turner, 28 | numbered_requests, 29 | in_progress: FuturesUnordered::new(), 30 | first_error: None, 31 | last_page: None, 32 | } 33 | } 34 | 35 | /// Updates the error so that an error with the least `new_err_num` remains while other ones 36 | /// get discarded 37 | fn update_err(&mut self, new_err_num: usize, new_err: PageError) { 38 | match &self.first_error { 39 | Some((old_err_num, _)) if new_err_num < *old_err_num => { 40 | self.first_error = Some((new_err_num, new_err)); 41 | } 42 | Some(_) => {} 43 | None => self.first_error = Some((new_err_num, new_err)), 44 | } 45 | } 46 | } 47 | }; 48 | } 49 | 50 | macro_rules! request_pages_ahead_unordered_decl { 51 | ($($extra_bounds:tt)*) => { 52 | async fn request_pages_ahead_unordered<'p, P, R>( 53 | mut state: Box>, 54 | ) -> Result, Box>)>, PageError> 55 | where 56 | P: 'p + Clone + PageTurner, 57 | R: 'p + RequestAhead, 58 | $($extra_bounds)* 59 | { 60 | // This and nested loops are required to discard all errors except the error for the first failed request without yielding them to the user. 61 | loop { 62 | // Once we're in this branch no code below will be executed 63 | if let Some(last_page_num) = state.last_page { 64 | while let Some((num, result)) = state.in_progress.next().await { 65 | match result { 66 | Ok(turned_page) => return Ok(Some((turned_page.items, state))), 67 | Err(new_err) => { 68 | state.update_err(num, new_err); 69 | } 70 | } 71 | } 72 | 73 | match state.first_error.take() { 74 | Some((err_num, err)) if err_num <= last_page_num => { 75 | return Err(err); 76 | } 77 | // If an error occured past the last existing page it will be discarded at this 78 | // point 79 | _ => { 80 | return Ok(None); 81 | } 82 | } 83 | } 84 | 85 | // Once we're in this branch no code below will be executed 86 | while state.first_error.is_some() { 87 | match state.in_progress.next().await { 88 | Some((num, result)) => match result { 89 | Ok(TurnedPage { 90 | items, 91 | next_request, 92 | }) => { 93 | if next_request.is_none() { 94 | state.last_page = Some(num); 95 | } 96 | 97 | return Ok(Some((items, state))); 98 | } 99 | Err(new_err) => state.update_err(num, new_err), 100 | }, 101 | // If at least one of `requests_ahead_count` futures returned an error and 102 | // we haven't found the last page in other responses - return the first error 103 | None => return Err(state.first_error.unwrap().1), 104 | } 105 | } 106 | 107 | // Schedule 108 | if state.in_progress.is_empty() { 109 | // Initial schedule of the first futures chunk 110 | match state.numbered_requests.next_chunk() { 111 | // If chunk is some then there is at least 1 request inside 112 | Some(chunk) => { 113 | for req in chunk { 114 | let local_page_turner = state.page_turner.clone(); 115 | state.in_progress.push(Box::pin(async move { 116 | (req.0, local_page_turner.turn_page(req.1).await) 117 | })); 118 | } 119 | } 120 | None => { 121 | return Ok(None); 122 | } 123 | } 124 | } else { 125 | // At this point one of the first requests succeeded. Lets push the next one from the next_chunk to proceed in 126 | // a sliding window maner. 127 | if let Some(req) = state.numbered_requests.next_item() { 128 | let local_page_turner = state.page_turner.clone(); 129 | state.in_progress.push(Box::pin(async move { 130 | (req.0, local_page_turner.turn_page(req.1).await) 131 | })) 132 | } 133 | } 134 | 135 | match state.in_progress.next().await { 136 | Some((num, result)) => match result { 137 | Ok(TurnedPage { 138 | items, 139 | next_request, 140 | }) => { 141 | if next_request.is_none() { 142 | state.last_page = Some(num); 143 | } 144 | 145 | return Ok(Some((items, state))); 146 | } 147 | // Don't return an error immediately, continue the loop to find the one for the 148 | // first failed page instead, or to discard an error if it occured past the last existing page 149 | Err(new_err) => state.update_err(num, new_err), 150 | }, 151 | None => { 152 | unreachable!( 153 | "BUG(page-turner): We ensured that the unordered futures queue is not empty right above") 154 | } 155 | } 156 | } 157 | } 158 | }; 159 | } 160 | 161 | pub(crate) use pages_ahead_unordered_state_def; 162 | pub(crate) use request_pages_ahead_unordered_decl; 163 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![cfg_attr(docsrs, feature(doc_cfg))] 2 | #![doc = include_str!("doc/Main.md")] 3 | 4 | #[cfg(feature = "local")] 5 | #[cfg_attr(docsrs, doc(cfg(feature = "local")))] 6 | pub mod local; 7 | 8 | #[cfg(feature = "mt")] 9 | pub mod mt; 10 | 11 | #[cfg(feature = "mutable")] 12 | #[cfg_attr(docsrs, doc(cfg(feature = "mutable")))] 13 | pub use local::mutable; 14 | 15 | #[cfg(feature = "dynamic")] 16 | #[cfg_attr(docsrs, doc(cfg(feature = "dynamic")))] 17 | pub use mt::dynamic; 18 | 19 | // `mt` is enabled by default so prelude reexports the mt::prelude. Users will need to specify a 20 | // prelude module manually like `page_turner::local::prelude*` if they want to use other flavours 21 | // of page turner. 22 | #[cfg(feature = "mt")] 23 | #[doc = include_str!("doc/prelude")] 24 | pub mod prelude { 25 | pub use crate::mt::prelude::*; 26 | } 27 | 28 | // `mt` is enabled by default so it's reexported into the root. 29 | #[cfg(feature = "mt")] 30 | pub use crate::mt::*; 31 | 32 | /// A struct that combines items queried for the current page and an optional request to query the 33 | /// next page. If `next_request` is `None` `PageTurner` stops querying pages. 34 | /// 35 | /// [`TurnedPage::next`] and [`TurnedPage::last`] constructors can be used for convenience. 36 | pub struct TurnedPage { 37 | pub items: I, 38 | pub next_request: Option, 39 | } 40 | 41 | impl TurnedPage { 42 | pub fn new(items: I, next_request: Option) -> Self { 43 | Self { 44 | items, 45 | next_request, 46 | } 47 | } 48 | 49 | pub fn next(items: I, next_request: R) -> Self { 50 | Self { 51 | items, 52 | next_request: Some(next_request), 53 | } 54 | } 55 | 56 | pub fn last(items: I) -> Self { 57 | Self { 58 | items, 59 | next_request: None, 60 | } 61 | } 62 | } 63 | 64 | /// If a request for the next page doesn't require any data from the response and can be made out 65 | /// of the request for the current page implement this trait to enable `pages_ahead`, 66 | /// `pages_ahead_unordered` families of methods that query pages concurrently. 67 | /// 68 | /// # Caveats 69 | /// 70 | /// - Ensure that page turner's `turn_page` returns [`TurnedPage::last`] at some point or that you 71 | /// always use [`Limit::Pages`] in `*pages_ahead*` methods, otherwise `*pages_ahead*` streams will 72 | /// always end with errors. 73 | /// 74 | /// - Ensure that page turner's `turn_page` produces equivalent next requests that query the same 75 | /// data so that `*pages_ahead*` streams and `pages` stream yield the same results. 76 | pub trait RequestAhead { 77 | fn next_request(&self) -> Self; 78 | } 79 | 80 | /// If you use `pages_ahead` or `pages_ahead_unordered` families of methods and you know in advance 81 | /// how many pages you need to query, specify [`Limit::Pages`] to prevent redundant querying past 82 | /// the last existing page from being executed. 83 | #[allow(dead_code)] 84 | #[derive(Default, Debug, Clone, Copy, PartialEq, Eq)] 85 | pub enum Limit { 86 | #[default] 87 | None, 88 | Pages(usize), 89 | } 90 | 91 | mod internal; 92 | 93 | #[cfg(test)] 94 | mod test_utils; 95 | -------------------------------------------------------------------------------- /src/local/mod.rs: -------------------------------------------------------------------------------- 1 | //! A page turner suitable for singlethreaded executors. See [`mutable`] for a version that 2 | //! allows to use &mut self in methods 3 | 4 | use crate::internal::*; 5 | use futures::{ 6 | stream::{self, FuturesOrdered, FuturesUnordered}, 7 | Stream, StreamExt, TryStreamExt, 8 | }; 9 | use std::{future::Future, pin::Pin}; 10 | 11 | pub use crate::{Limit, RequestAhead, TurnedPage}; 12 | #[doc = include_str!("../doc/prelude")] 13 | pub mod prelude { 14 | pub use super::{Limit, PageTurner, PagesStream, RequestAhead, TurnedPage, TurnedPageResult}; 15 | } 16 | 17 | #[doc = include_str!("../doc/PageItems")] 18 | pub type PageItems =

>::PageItems; 19 | #[doc = include_str!("../doc/PageError")] 20 | pub type PageError =

>::PageError; 21 | #[doc = include_str!("../doc/TurnedPageResult")] 22 | pub type TurnedPageResult = Result, R>, PageError>; 23 | #[doc = include_str!("../doc/PageTurnerFuture")] 24 | pub type PageTurnerFuture<'a, P, R> = Pin>>>; 25 | 26 | type NumberedRequestFuture<'a, P, R> = 27 | Pin)>>>; 28 | 29 | /// This is one of the less constrained page turners which produces `?Send`(may be Send) futures 30 | /// and streams that should run on single threaded executors. Occasionally, it might also work with 31 | /// multithreaded executors but it's not recommended to abuse that if you write a maintainable 32 | /// code. 33 | /// 34 | #[doc = include_str!("../doc/PageTurner")] 35 | pub trait PageTurner: Sized { 36 | type PageItems; 37 | type PageError; 38 | 39 | #[doc = include_str!("../doc/PageTurner__turn_page")] 40 | fn turn_page(&self, request: R) -> impl Future>; 41 | 42 | #[doc = include_str!("../doc/PageTurner__pages")] 43 | fn pages<'s>(&self, request: R) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 44 | where 45 | R: 's, 46 | { 47 | stream::try_unfold(PagesState::new(self, request), request_next_page) 48 | } 49 | 50 | #[doc = include_str!("../doc/PageTurner__into_pages")] 51 | fn into_pages<'s>(self, request: R) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 52 | where 53 | Self: 's, 54 | R: 's, 55 | { 56 | stream::try_unfold(PagesState::new(self, request), request_next_page) 57 | } 58 | 59 | #[doc = include_str!("../doc/PageTurner__pages_ahead")] 60 | fn pages_ahead<'s>( 61 | &'s self, 62 | requests_ahead_count: usize, 63 | limit: Limit, 64 | request: R, 65 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 66 | where 67 | R: 's + RequestAhead, 68 | { 69 | stream::try_unfold( 70 | Box::new(PagesAheadState::new( 71 | self, 72 | request, 73 | requests_ahead_count, 74 | limit, 75 | )), 76 | request_pages_ahead, 77 | ) 78 | } 79 | 80 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead")] 81 | fn into_pages_ahead<'s>( 82 | self, 83 | requests_ahead_count: usize, 84 | limit: Limit, 85 | request: R, 86 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 87 | where 88 | Self: 's + Clone, 89 | R: 's + RequestAhead, 90 | { 91 | stream::try_unfold( 92 | Box::new(PagesAheadState::new( 93 | self, 94 | request, 95 | requests_ahead_count, 96 | limit, 97 | )), 98 | request_pages_ahead, 99 | ) 100 | } 101 | 102 | #[doc = include_str!("../doc/PageTurner__pages_ahead_unordered")] 103 | fn pages_ahead_unordered<'s>( 104 | &'s self, 105 | requests_ahead_count: usize, 106 | limit: Limit, 107 | request: R, 108 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 109 | where 110 | R: 's + RequestAhead, 111 | { 112 | stream::try_unfold( 113 | Box::new(PagesAheadUnorderedState::new( 114 | self, 115 | request, 116 | requests_ahead_count, 117 | limit, 118 | )), 119 | request_pages_ahead_unordered, 120 | ) 121 | } 122 | 123 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead_unordered")] 124 | fn into_pages_ahead_unordered<'s>( 125 | self, 126 | requests_ahead_count: usize, 127 | limit: Limit, 128 | request: R, 129 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 130 | where 131 | Self: 's + Clone, 132 | R: 's + RequestAhead, 133 | { 134 | stream::try_unfold( 135 | Box::new(PagesAheadUnorderedState::new( 136 | self, 137 | request, 138 | requests_ahead_count, 139 | limit, 140 | )), 141 | request_pages_ahead_unordered, 142 | ) 143 | } 144 | } 145 | 146 | impl PageTurner for D 147 | where 148 | D: std::ops::Deref, 149 | P: PageTurner, 150 | { 151 | type PageItems = PageItems; 152 | type PageError = PageError; 153 | 154 | async fn turn_page(&self, request: R) -> TurnedPageResult { 155 | self.deref().turn_page(request).await 156 | } 157 | } 158 | 159 | #[doc = include_str!("../doc/PagesStream")] 160 | pub trait PagesStream<'a, T, E>: Stream> { 161 | #[doc = include_str!("../doc/PagesStream__items")] 162 | fn items(self) -> impl 'a + Stream::Item, E>> 163 | where 164 | Self: 'a, 165 | T: IntoIterator; 166 | } 167 | 168 | impl<'a, S, T, E> PagesStream<'a, T, E> for S 169 | where 170 | S: Stream>, 171 | { 172 | fn items(self) -> impl 'a + Stream::Item, E>> 173 | where 174 | Self: 'a, 175 | T: IntoIterator, 176 | { 177 | self.map_ok(|items| stream::iter(items.into_iter().map(Ok))) 178 | .try_flatten() 179 | } 180 | } 181 | 182 | pages_ahead_state_def!(); 183 | pages_ahead_unordered_state_def!(); 184 | 185 | request_next_page_decl!(); 186 | request_pages_ahead_decl!(); 187 | request_pages_ahead_unordered_decl!(); 188 | 189 | #[cfg(feature = "mutable")] 190 | #[cfg_attr(docsrs, doc(cfg(feature = "mutable")))] 191 | pub mod mutable { 192 | //! Provides a page turner which takes `&mut self` instead of `&self` if you don't want to bother 193 | //! with interior mutability in single threaded contexts. 194 | 195 | use crate::internal::*; 196 | use futures::stream; 197 | use std::{future::Future, pin::Pin}; 198 | 199 | pub use super::PagesStream; 200 | pub use crate::{Limit, RequestAhead, TurnedPage}; 201 | #[doc = include_str!("../doc/prelude")] 202 | pub mod prelude { 203 | pub use super::{ 204 | Limit, PageTurner, PagesStream, RequestAhead, TurnedPage, TurnedPageResult, 205 | }; 206 | } 207 | 208 | #[doc = include_str!("../doc/PageItems")] 209 | pub type PageItems =

>::PageItems; 210 | #[doc = include_str!("../doc/PageError")] 211 | pub type PageError =

>::PageError; 212 | #[doc = include_str!("../doc/TurnedPageResult")] 213 | pub type TurnedPageResult = Result, R>, PageError>; 214 | #[doc = include_str!("../doc/PageTurnerFuture")] 215 | pub type PageTurnerFuture<'a, P, R> = 216 | Pin>>>; 217 | 218 | /// The least constrained page turner that allows an implementor to mutate during request 219 | /// execution and, therefore, doesn't provide the `pages_ahead` family of methods as it's 220 | /// invalid to hold multiple `&mut self` references concurrently. For uses in single threaded 221 | /// contexts when you don't want to bother with interior mutability of the implementor. 222 | /// 223 | #[doc = include_str!("../doc/PageTurner")] 224 | pub trait PageTurner: Sized { 225 | type PageItems; 226 | type PageError; 227 | 228 | #[doc = include_str!("../doc/PageTurner__turn_page")] 229 | fn turn_page(&mut self, request: R) -> impl Future>; 230 | 231 | #[doc = include_str!("../doc/PageTurner__pages")] 232 | fn pages<'s>( 233 | &'s mut self, 234 | request: R, 235 | ) -> impl PagesStream<'s, PageItems, PageError> 236 | where 237 | R: 's, 238 | { 239 | stream::try_unfold(PagesState::new(self, request), request_next_page) 240 | } 241 | 242 | #[doc = include_str!("../doc/PageTurner__into_pages")] 243 | fn into_pages<'s>( 244 | self, 245 | request: R, 246 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 247 | where 248 | Self: 's, 249 | R: 's, 250 | { 251 | stream::try_unfold(PagesState::new(self, request), request_next_page) 252 | } 253 | } 254 | 255 | impl PageTurner for &mut P 256 | where 257 | P: PageTurner, 258 | { 259 | type PageItems = PageItems; 260 | type PageError = PageError; 261 | 262 | fn turn_page(&mut self, request: R) -> impl Future> { 263 | P::turn_page(self, request) 264 | } 265 | } 266 | 267 | impl PageTurner for Box

268 | where 269 | P: PageTurner, 270 | { 271 | type PageItems = PageItems; 272 | type PageError = PageError; 273 | 274 | fn turn_page(&mut self, request: R) -> impl Future> { 275 | P::turn_page(self.as_mut(), request) 276 | } 277 | } 278 | 279 | request_next_page_decl!(); 280 | } 281 | 282 | #[cfg(test)] 283 | mod tests; 284 | -------------------------------------------------------------------------------- /src/local/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::local::{prelude::*, PageError, PageItems}; 2 | use crate::test_utils::*; 3 | use futures::TryStreamExt; 4 | 5 | #[tokio::test(flavor = "current_thread")] 6 | async fn pages() { 7 | pages_base_test!().await; 8 | generic_pages_usage(NumbersClient::new(17, 3), GetNumbersQuery::default()).await; 9 | } 10 | 11 | #[tokio::test(flavor = "current_thread")] 12 | async fn pages_ahead() { 13 | pages_ahead_base_test!().await; 14 | generic_pages_ahead_usage(BlogClient::new(49), GetContentRequest { page: 0 }).await; 15 | } 16 | 17 | #[tokio::test(flavor = "current_thread")] 18 | async fn pages_ahead_unordered() { 19 | pages_ahead_unordered_base_test!().await; 20 | generic_pages_ahead_unordered_usage(BlogClient::new(49), GetContentRequest { page: 0 }).await; 21 | } 22 | 23 | page_turner_impls!(); 24 | 25 | async fn generic_pages_usage(p: P, req: R) 26 | where 27 | P: PageTurner, 28 | PageItems: IntoIterator, 29 | PageError: std::fmt::Debug, 30 | { 31 | let pages_stream = p.pages(req); 32 | generic_pages_stream_usage(pages_stream).await; 33 | } 34 | 35 | async fn generic_pages_ahead_usage(p: P, req: R) 36 | where 37 | P: PageTurner, 38 | R: RequestAhead, 39 | PageItems: IntoIterator, 40 | PageError: std::fmt::Debug, 41 | { 42 | let pages_stream = p.pages_ahead(2, Limit::None, req); 43 | generic_pages_stream_usage(pages_stream).await; 44 | } 45 | 46 | async fn generic_pages_ahead_unordered_usage(p: P, req: R) 47 | where 48 | P: PageTurner, 49 | R: RequestAhead, 50 | PageItems: IntoIterator, 51 | PageError: std::fmt::Debug, 52 | { 53 | let pages_stream = p.pages_ahead(2, Limit::None, req); 54 | generic_pages_stream_usage(pages_stream).await; 55 | } 56 | 57 | async fn generic_pages_stream_usage<'p, T, E>(s: impl 'p + PagesStream<'p, T, E>) 58 | where 59 | T: IntoIterator, 60 | E: std::fmt::Debug, 61 | { 62 | std::pin::pin!(s.items()).try_next().await.unwrap(); 63 | } 64 | 65 | #[cfg(feature = "mutable")] 66 | mod mutable { 67 | use crate::mutable::{prelude::*, PageError, PageItems}; 68 | use crate::test_utils::*; 69 | use futures::TryStreamExt; 70 | 71 | #[tokio::test(flavor = "current_thread")] 72 | async fn pages() { 73 | pages_base_test!(mut).await; 74 | generic_pages_usage(NumbersClient::new(19, 5), GetNumbersQuery::default()).await; 75 | } 76 | 77 | page_turner_impls!(mut); 78 | 79 | async fn generic_pages_usage(mut p: P, req: R) 80 | where 81 | P: PageTurner, 82 | PageItems: IntoIterator, 83 | PageError: std::fmt::Debug, 84 | { 85 | let pages_stream = p.pages(req); 86 | generic_pages_stream_usage(pages_stream).await; 87 | } 88 | 89 | async fn generic_pages_stream_usage<'p, T, E>(s: impl 'p + PagesStream<'p, T, E>) 90 | where 91 | T: IntoIterator, 92 | E: std::fmt::Debug, 93 | { 94 | std::pin::pin!(s.items()).try_next().await.unwrap(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /src/mt/mod.rs: -------------------------------------------------------------------------------- 1 | //! A page turner suitable for multithreaded executors. This is what you need in most cases. See 2 | //! [`dynamic`] if you also need `dyn PageTurner` objects for some reason. 3 | 4 | use crate::internal::*; 5 | use futures::stream::{self, FuturesOrdered, FuturesUnordered, Stream, StreamExt, TryStreamExt}; 6 | use std::{future::Future, pin::Pin}; 7 | 8 | pub use crate::{Limit, RequestAhead, TurnedPage}; 9 | #[doc = include_str!("../doc/prelude")] 10 | pub mod prelude { 11 | pub use super::{Limit, PageTurner, PagesStream, RequestAhead, TurnedPage, TurnedPageResult}; 12 | } 13 | 14 | #[doc = include_str!("../doc/PageItems")] 15 | pub type PageItems =

>::PageItems; 16 | #[doc = include_str!("../doc/PageError")] 17 | pub type PageError =

>::PageError; 18 | #[doc = include_str!("../doc/TurnedPageResult")] 19 | pub type TurnedPageResult = Result, R>, PageError>; 20 | #[doc = include_str!("../doc/PageTurnerFuture")] 21 | pub type PageTurnerFuture<'a, P, R> = 22 | Pin>>>; 23 | 24 | type NumberedRequestFuture<'a, P, R> = 25 | Pin)>>>; 26 | 27 | /// A page turner suitable for use in multithreaded contexts 28 | /// 29 | #[doc = include_str!("../doc/PageTurner")] 30 | pub trait PageTurner: Sized + Send + Sync 31 | where 32 | R: Send, 33 | { 34 | type PageItems: Send; 35 | type PageError: Send; 36 | 37 | #[doc = include_str!("../doc/PageTurner__turn_page")] 38 | fn turn_page(&self, request: R) -> impl Send + Future>; 39 | 40 | #[doc = include_str!("../doc/PageTurner__pages")] 41 | fn pages<'s>(&'s self, request: R) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 42 | where 43 | R: 's, 44 | { 45 | stream::try_unfold(PagesState::new(self, request), request_next_page) 46 | } 47 | 48 | #[doc = include_str!("../doc/PageTurner__into_pages")] 49 | fn into_pages<'s>(self, request: R) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 50 | where 51 | R: 's, 52 | Self: 's, 53 | { 54 | stream::try_unfold(PagesState::new(self, request), request_next_page) 55 | } 56 | 57 | #[doc = include_str!("../doc/PageTurner__pages_ahead")] 58 | fn pages_ahead<'s>( 59 | &'s self, 60 | requests_ahead_count: usize, 61 | limit: Limit, 62 | request: R, 63 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 64 | where 65 | R: 's + RequestAhead, 66 | { 67 | stream::try_unfold( 68 | Box::new(PagesAheadState::new( 69 | self, 70 | request, 71 | requests_ahead_count, 72 | limit, 73 | )), 74 | request_pages_ahead, 75 | ) 76 | } 77 | 78 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead")] 79 | fn into_pages_ahead<'s>( 80 | self, 81 | requests_ahead_count: usize, 82 | limit: Limit, 83 | request: R, 84 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 85 | where 86 | R: 's + RequestAhead, 87 | Self: 's + Clone, 88 | { 89 | stream::try_unfold( 90 | Box::new(PagesAheadState::new( 91 | self, 92 | request, 93 | requests_ahead_count, 94 | limit, 95 | )), 96 | request_pages_ahead, 97 | ) 98 | } 99 | 100 | #[doc = include_str!("../doc/PageTurner__pages_ahead_unordered")] 101 | fn pages_ahead_unordered<'s>( 102 | &'s self, 103 | requests_ahead_count: usize, 104 | limit: Limit, 105 | request: R, 106 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 107 | where 108 | R: 's + RequestAhead, 109 | { 110 | stream::try_unfold( 111 | Box::new(PagesAheadUnorderedState::new( 112 | self, 113 | request, 114 | requests_ahead_count, 115 | limit, 116 | )), 117 | request_pages_ahead_unordered, 118 | ) 119 | } 120 | 121 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead_unordered")] 122 | fn into_pages_ahead_unordered<'s>( 123 | self, 124 | requests_ahead_count: usize, 125 | limit: Limit, 126 | request: R, 127 | ) -> impl PagesStream<'s, Self::PageItems, Self::PageError> 128 | where 129 | Self: 's + Clone, 130 | R: 's + RequestAhead, 131 | { 132 | stream::try_unfold( 133 | Box::new(PagesAheadUnorderedState::new( 134 | self, 135 | request, 136 | requests_ahead_count, 137 | limit, 138 | )), 139 | request_pages_ahead_unordered, 140 | ) 141 | } 142 | } 143 | 144 | impl PageTurner for D 145 | where 146 | D: Send + Sync + std::ops::Deref, 147 | P: PageTurner, 148 | R: Send, 149 | { 150 | type PageItems = PageItems; 151 | type PageError = PageError; 152 | 153 | async fn turn_page(&self, request: R) -> TurnedPageResult { 154 | self.deref().turn_page(request).await 155 | } 156 | } 157 | 158 | #[doc = include_str!("../doc/PagesStream")] 159 | pub trait PagesStream<'a, T, E>: Send + Stream> 160 | where 161 | T: Send, 162 | E: Send, 163 | { 164 | #[doc = include_str!("../doc/PagesStream__items")] 165 | fn items(self) -> impl 'a + Send + Stream::Item, E>> 166 | where 167 | Self: 'a, 168 | T: IntoIterator, 169 | ::Item: Send, 170 | ::IntoIter: Send; 171 | } 172 | 173 | impl<'a, S, T, E> PagesStream<'a, T, E> for S 174 | where 175 | T: Send, 176 | E: Send, 177 | S: Send + Stream>, 178 | { 179 | fn items(self) -> impl 'a + Send + Stream::Item, E>> 180 | where 181 | Self: 'a, 182 | T: IntoIterator, 183 | ::Item: Send, 184 | ::IntoIter: Send, 185 | { 186 | self.map_ok(|items| stream::iter(items.into_iter().map(Ok))) 187 | .try_flatten() 188 | } 189 | } 190 | 191 | pages_ahead_state_def!(R: Send); 192 | pages_ahead_unordered_state_def!(R: Send); 193 | 194 | request_next_page_decl!(R: Send); 195 | request_pages_ahead_decl!(R: Send); 196 | request_pages_ahead_unordered_decl!(R: Send); 197 | 198 | #[cfg(feature = "dynamic")] 199 | #[cfg_attr(docsrs, doc(cfg(feature = "dynamic")))] 200 | pub mod dynamic { 201 | //! A page turner that can be used as a `dyn` object and which yields concrete boxed types 202 | 203 | use crate::internal::*; 204 | use async_trait::async_trait; 205 | use futures::stream::{ 206 | self, BoxStream, FuturesOrdered, FuturesUnordered, Stream, StreamExt, TryStreamExt, 207 | }; 208 | use std::{future::Future, pin::Pin}; 209 | 210 | pub use super::PagesStream; 211 | pub use crate::{Limit, RequestAhead, TurnedPage}; 212 | #[doc = include_str!("../doc/prelude")] 213 | pub mod prelude { 214 | pub use super::{ 215 | BoxedPagesStream, Limit, PageTurner, PagesStream, RequestAhead, TurnedPage, 216 | TurnedPageResult, 217 | }; 218 | } 219 | 220 | #[doc = include_str!("../doc/PageItems")] 221 | pub type PageItems =

>::PageItems; 222 | #[doc = include_str!("../doc/PageError")] 223 | pub type PageError =

>::PageError; 224 | #[doc = include_str!("../doc/TurnedPageResult")] 225 | pub type TurnedPageResult = Result, R>, PageError>; 226 | #[doc = include_str!("../doc/PageTurnerFuture")] 227 | pub type PageTurnerFuture<'a, P, R> = 228 | Pin>>>; 229 | 230 | type NumberedRequestFuture<'a, P, R> = 231 | Pin)>>>; 232 | 233 | /// A page turner which yields dynamic objects. All methods are object safe and can be used 234 | /// with dynamic dispatch. Requires `#[async_trait]` to be implemented 235 | /// 236 | #[doc = include_str!("../doc/PageTurner")] 237 | #[async_trait] 238 | pub trait PageTurner: Send + Sync 239 | where 240 | R: 'static + Send, 241 | { 242 | type PageItems: 'static + Send; 243 | type PageError: 'static + Send; 244 | 245 | #[doc = include_str!("../doc/PageTurner__turn_page")] 246 | async fn turn_page(&self, request: R) -> TurnedPageResult; 247 | 248 | #[doc = include_str!("../doc/PageTurner__pages")] 249 | fn pages(&self, request: R) -> BoxedPagesStream<'_, Self::PageItems, Self::PageError> { 250 | BoxedPagesStream( 251 | stream::try_unfold(PagesState::new(self, request), request_next_page).boxed(), 252 | ) 253 | } 254 | 255 | #[doc = include_str!("../doc/PageTurner__into_pages")] 256 | fn into_pages<'s>( 257 | self, 258 | request: R, 259 | ) -> BoxedPagesStream<'s, Self::PageItems, Self::PageError> 260 | where 261 | Self: 's + Sized, 262 | { 263 | BoxedPagesStream( 264 | stream::try_unfold(PagesState::new(self, request), request_next_page).boxed(), 265 | ) 266 | } 267 | 268 | #[doc = include_str!("../doc/PageTurner__pages_ahead")] 269 | fn pages_ahead<'s>( 270 | &'s self, 271 | requests_ahead_count: usize, 272 | limit: Limit, 273 | request: R, 274 | ) -> BoxedPagesStream<'s, Self::PageItems, Self::PageError> 275 | where 276 | R: 's + RequestAhead, 277 | { 278 | BoxedPagesStream( 279 | stream::try_unfold( 280 | Box::new(PagesAheadState::new( 281 | self, 282 | request, 283 | requests_ahead_count, 284 | limit, 285 | )), 286 | request_pages_ahead, 287 | ) 288 | .boxed(), 289 | ) 290 | } 291 | 292 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead")] 293 | fn into_pages_ahead<'s>( 294 | self, 295 | requests_ahead_count: usize, 296 | limit: Limit, 297 | request: R, 298 | ) -> BoxedPagesStream<'s, Self::PageItems, Self::PageError> 299 | where 300 | Self: 's + Clone + Sized, 301 | R: RequestAhead, 302 | { 303 | BoxedPagesStream( 304 | stream::try_unfold( 305 | Box::new(PagesAheadState::new( 306 | self, 307 | request, 308 | requests_ahead_count, 309 | limit, 310 | )), 311 | request_pages_ahead, 312 | ) 313 | .boxed(), 314 | ) 315 | } 316 | 317 | #[doc = include_str!("../doc/PageTurner__pages_ahead_unordered")] 318 | fn pages_ahead_unordered<'s>( 319 | &'s self, 320 | requests_ahead_count: usize, 321 | limit: Limit, 322 | request: R, 323 | ) -> BoxedPagesStream<'s, Self::PageItems, Self::PageError> 324 | where 325 | R: 's + RequestAhead, 326 | { 327 | BoxedPagesStream( 328 | stream::try_unfold( 329 | Box::new(PagesAheadUnorderedState::new( 330 | self, 331 | request, 332 | requests_ahead_count, 333 | limit, 334 | )), 335 | request_pages_ahead_unordered, 336 | ) 337 | .boxed(), 338 | ) 339 | } 340 | 341 | #[doc = include_str!("../doc/PageTurner__into_pages_ahead_unordered")] 342 | fn into_pages_ahead_unordered<'s>( 343 | self, 344 | requests_ahead_count: usize, 345 | limit: Limit, 346 | request: R, 347 | ) -> BoxedPagesStream<'s, Self::PageItems, Self::PageError> 348 | where 349 | Self: 's + Clone + Sized, 350 | R: RequestAhead, 351 | { 352 | BoxedPagesStream( 353 | stream::try_unfold( 354 | Box::new(PagesAheadUnorderedState::new( 355 | self, 356 | request, 357 | requests_ahead_count, 358 | limit, 359 | )), 360 | request_pages_ahead_unordered, 361 | ) 362 | .boxed(), 363 | ) 364 | } 365 | } 366 | 367 | #[async_trait] 368 | impl PageTurner for D 369 | where 370 | D: Send + Sync + std::ops::Deref, 371 | P: ?Sized + PageTurner, 372 | R: 'static + Send, 373 | { 374 | type PageItems = PageItems; 375 | type PageError = PageError; 376 | 377 | async fn turn_page(&self, request: R) -> TurnedPageResult { 378 | self.deref().turn_page(request).await 379 | } 380 | } 381 | 382 | /// A boxed version of a pages stream to satisfy object safety requirements 383 | /// of [`PageTurner`] 384 | pub struct BoxedPagesStream<'a, T, E>(BoxStream<'a, Result>); 385 | 386 | impl<'a, T, E> Stream for BoxedPagesStream<'a, T, E> 387 | where 388 | T: 'static + Send, 389 | E: 'static + Send, 390 | { 391 | type Item = Result; 392 | 393 | fn poll_next( 394 | mut self: std::pin::Pin<&mut Self>, 395 | cx: &mut std::task::Context<'_>, 396 | ) -> std::task::Poll> { 397 | self.0.poll_next_unpin(cx) 398 | } 399 | } 400 | 401 | pages_ahead_state_def!(R: 'static + Send); 402 | pages_ahead_unordered_state_def!(R: 'static + Send); 403 | 404 | request_next_page_decl!(R: 'static + Send); 405 | request_pages_ahead_decl!(R: 'static + Send); 406 | request_pages_ahead_unordered_decl!(R: 'static + Send); 407 | } 408 | 409 | #[cfg(test)] 410 | mod tests; 411 | -------------------------------------------------------------------------------- /src/mt/tests.rs: -------------------------------------------------------------------------------- 1 | use crate::mt::{prelude::*, PageError, PageItems}; 2 | use crate::test_utils::*; 3 | use futures::TryStreamExt; 4 | 5 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 6 | async fn pages() { 7 | pages_base_test!().await; 8 | generic_pages_usage(NumbersClient::new(48, 7), GetNumbersQuery::default()).await; 9 | } 10 | 11 | #[tokio::test(flavor = "multi_thread")] 12 | async fn pages_ahead() { 13 | pages_ahead_base_test!().await; 14 | generic_pages_ahead_usage(BlogClient::new(48), GetContentRequest { page: 0 }).await; 15 | } 16 | 17 | #[tokio::test(flavor = "multi_thread")] 18 | async fn pages_ahead_unordered() { 19 | pages_ahead_unordered_base_test!().await; 20 | generic_pages_ahead_unordered_usage(BlogClient::new(48), GetContentRequest { page: 0 }).await; 21 | } 22 | 23 | page_turner_impls!(); 24 | 25 | async fn generic_pages_usage(p: P, req: R) 26 | where 27 | P: PageTurner, 28 | R: Clone + Send, 29 | PageItems: IntoIterator, 30 | as IntoIterator>::IntoIter: Send, 31 | as IntoIterator>::Item: Send, 32 | PageError: std::fmt::Debug, 33 | { 34 | is_send(p.turn_page(req.clone())); 35 | 36 | let pages_stream = is_send(p.pages(req.clone())); 37 | generic_pages_stream_usage(pages_stream).await; 38 | } 39 | 40 | async fn generic_pages_ahead_usage(p: P, req: R) 41 | where 42 | P: PageTurner, 43 | PageItems: IntoIterator, 44 | as IntoIterator>::IntoIter: Send, 45 | as IntoIterator>::Item: Send, 46 | R: RequestAhead + Clone + Send, 47 | PageError: std::fmt::Debug, 48 | { 49 | is_send(p.turn_page(req.clone())); 50 | 51 | let pages_stream = is_send(p.pages_ahead(4, Limit::None, req.clone())); 52 | generic_pages_stream_usage(pages_stream).await; 53 | } 54 | 55 | async fn generic_pages_ahead_unordered_usage<'p, P, R>(p: P, req: R) 56 | where 57 | P: PageTurner, 58 | R: RequestAhead + Clone + Send, 59 | PageItems: IntoIterator, 60 | as IntoIterator>::IntoIter: Send, 61 | as IntoIterator>::Item: Send, 62 | PageError: std::fmt::Debug, 63 | { 64 | is_send(p.turn_page(req.clone())); 65 | 66 | let pages_stream = is_send(p.pages_ahead_unordered(4, Limit::None, req)); 67 | generic_pages_stream_usage(pages_stream).await; 68 | } 69 | 70 | async fn generic_pages_stream_usage<'p, T, E>(s: impl 'p + PagesStream<'p, T, E>) 71 | where 72 | T: Send + IntoIterator, 73 | E: std::fmt::Debug + Send, 74 | ::Item: Send, 75 | ::IntoIter: Send, 76 | { 77 | std::pin::pin!(is_send(s.items())).try_next().await.unwrap(); 78 | } 79 | 80 | fn is_send(t: T) -> T { 81 | t 82 | } 83 | 84 | #[cfg(feature = "dynamic")] 85 | mod dynamic { 86 | use super::is_send; 87 | use crate::dynamic::prelude::*; 88 | use crate::test_utils::*; 89 | use async_trait::async_trait; 90 | use futures::TryStreamExt; 91 | use std::sync::Arc; 92 | 93 | #[tokio::test(flavor = "multi_thread", worker_threads = 2)] 94 | async fn pages() { 95 | pages_base_test!().await; 96 | dyn_pages_usage(Arc::new(BlogClient::new(42))).await; 97 | } 98 | 99 | #[tokio::test(flavor = "multi_thread")] 100 | async fn pages_ahead() { 101 | pages_ahead_base_test!().await; 102 | dyn_pages_ahead_usage(Arc::new(BlogClient::new(42))).await; 103 | } 104 | 105 | #[tokio::test(flavor = "multi_thread")] 106 | async fn pages_ahead_unordered() { 107 | pages_ahead_unordered_base_test!().await; 108 | dyn_pages_ahead_unordered_usage(Arc::new(BlogClient::new(42))).await; 109 | } 110 | 111 | page_turner_impls!(async_trait); 112 | 113 | async fn dyn_pages_usage( 114 | p: Arc, PageError = String>>, 115 | ) { 116 | is_send(p.turn_page(GetContentRequest { page: 0 })); 117 | 118 | let pages_stream = is_send(p.pages(GetContentRequest { page: 0 })); 119 | generic_pages_stream_usage(pages_stream).await; 120 | } 121 | 122 | async fn dyn_pages_ahead_usage( 123 | p: Arc, PageError = String>>, 124 | ) { 125 | is_send(p.turn_page(GetContentRequest { page: 0 })); 126 | 127 | let pages_stream = is_send(p.pages_ahead(3, Limit::None, GetContentRequest { page: 0 })); 128 | generic_pages_stream_usage(pages_stream).await; 129 | } 130 | 131 | async fn dyn_pages_ahead_unordered_usage( 132 | p: Arc, PageError = String>>, 133 | ) { 134 | is_send(p.turn_page(GetContentRequest { page: 0 })); 135 | 136 | let pages_stream = 137 | is_send(p.pages_ahead_unordered(2, Limit::None, GetContentRequest { page: 0 })); 138 | 139 | generic_pages_stream_usage(pages_stream).await; 140 | } 141 | 142 | async fn generic_pages_stream_usage<'p, T, E>(s: impl 'p + PagesStream<'p, T, E>) 143 | where 144 | T: Send + IntoIterator, 145 | E: std::fmt::Debug + Send, 146 | ::Item: Send, 147 | ::IntoIter: Send, 148 | { 149 | std::pin::pin!(is_send(s.items())).try_next().await.unwrap(); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/test_utils.rs: -------------------------------------------------------------------------------- 1 | #[derive(Debug)] 2 | pub struct NumbersClient { 3 | pub numbers: Vec, 4 | pub page_size: usize, 5 | } 6 | 7 | #[derive(Default, Clone)] 8 | pub struct GetNumbersQuery { 9 | pub key: usize, 10 | } 11 | 12 | impl NumbersClient { 13 | pub fn new(last_number: usize, page_size: usize) -> Self { 14 | NumbersClient { 15 | numbers: (1..=last_number).collect(), 16 | page_size, 17 | } 18 | } 19 | } 20 | 21 | pub struct BlogClient { 22 | content: Vec>, 23 | } 24 | 25 | impl Clone for BlogClient { 26 | fn clone(&self) -> Self { 27 | panic!("BlogClient Clone MUST NOT BE TRIGGERED"); 28 | } 29 | } 30 | 31 | #[derive(Debug, Clone, Copy, PartialEq, Eq)] 32 | pub struct BlogRecord(pub usize); 33 | 34 | #[derive(Debug, Clone)] 35 | pub struct GetContentRequest { 36 | pub page: usize, 37 | } 38 | 39 | impl RequestAhead for GetContentRequest { 40 | fn next_request(&self) -> Self { 41 | Self { 42 | page: self.page + 1, 43 | } 44 | } 45 | } 46 | 47 | pub struct GetContentResponse { 48 | pub record: BlogRecord, 49 | pub next_page: Option, 50 | } 51 | 52 | impl BlogClient { 53 | pub fn new(amount: usize) -> Self { 54 | Self { 55 | content: (0..amount).map(BlogRecord).map(Ok).collect(), 56 | } 57 | } 58 | 59 | pub async fn get_content(&self, req: GetContentRequest) -> Result { 60 | let record = self 61 | .content 62 | .get(req.page) 63 | .ok_or("The page is out of bound")? 64 | .clone()?; 65 | 66 | let next_page = (req.page + 1 < self.content.len()).then_some(req.page + 1); 67 | Ok(GetContentResponse { record, next_page }) 68 | } 69 | 70 | pub fn set_error(&mut self, pos: usize) { 71 | self.set_error_with_msg(pos, "Custom error"); 72 | } 73 | 74 | pub fn set_error_with_msg(&mut self, pos: usize, msg: &'static str) { 75 | self.content[pos] = Err(msg.into()) 76 | } 77 | } 78 | 79 | macro_rules! numbers_client_page_turner_impl { 80 | (@types) => { 81 | type PageItems = Vec; 82 | type PageError = (); 83 | }; 84 | (@body, $client:ident, $query:ident) => {{ 85 | let index = $query.key; 86 | 87 | let response: Vec<_> = $client.numbers[index..] 88 | .iter() 89 | .copied() 90 | .take($client.page_size) 91 | .collect(); 92 | 93 | if index + $client.page_size < $client.numbers.len() { 94 | Ok(TurnedPage::next( 95 | response, 96 | GetNumbersQuery { 97 | key: index + $client.page_size, 98 | }, 99 | )) 100 | } else { 101 | Ok(TurnedPage::last(response)) 102 | } 103 | }}; 104 | (async_trait) => { 105 | #[async_trait] 106 | impl PageTurner for NumbersClient { 107 | numbers_client_page_turner_impl!(@types); 108 | 109 | async fn turn_page( 110 | &self, 111 | request: GetNumbersQuery, 112 | ) -> TurnedPageResult { 113 | numbers_client_page_turner_impl!(@body, self, request) 114 | } 115 | } 116 | }; 117 | ($($mutability:tt)*) => { 118 | impl PageTurner for NumbersClient { 119 | numbers_client_page_turner_impl!(@types); 120 | 121 | async fn turn_page( 122 | &$($mutability)* self, 123 | request: GetNumbersQuery, 124 | ) -> TurnedPageResult { 125 | numbers_client_page_turner_impl!(@body, self, request) 126 | } 127 | } 128 | }; 129 | } 130 | 131 | macro_rules! numbers_client_pages_base_test { 132 | ($($mutability:tt)*) => { 133 | async { 134 | let $($mutability)* client = NumbersClient::new(30, 30); 135 | let expected: Vec<_> = (1..=30).collect(); 136 | 137 | let pages: Vec<_> = client 138 | .pages(GetNumbersQuery::default()) 139 | .try_collect() 140 | .await 141 | .unwrap(); 142 | 143 | assert_eq!(pages.len(), 1, "There should be only one page"); 144 | assert_eq!(pages[0].len(), 30, "The page must contain 30 items"); 145 | 146 | let output: Vec<_> = client 147 | .into_pages(GetNumbersQuery::default()) 148 | .items() 149 | .try_collect() 150 | .await 151 | .unwrap(); 152 | 153 | assert_eq!( 154 | output, expected, 155 | "After paginated query with page_size = 30" 156 | ); 157 | 158 | let $($mutability)* client = NumbersClient::new(30, 10); 159 | 160 | let pages: Vec<_> = client 161 | .pages(GetNumbersQuery::default()) 162 | .try_collect() 163 | .await 164 | .unwrap(); 165 | 166 | assert_eq!(pages.len(), 3, "There should be 3 pages"); 167 | 168 | for page in pages { 169 | assert_eq!(page.len(), 10, "Each page must contain 10 elements") 170 | } 171 | 172 | let output: Vec<_> = client 173 | .into_pages(GetNumbersQuery::default()) 174 | .items() 175 | .try_collect() 176 | .await 177 | .unwrap(); 178 | 179 | assert_eq!( 180 | output, expected, 181 | "After paginated query with page_size = 10" 182 | ); 183 | 184 | let $($mutability)* client = NumbersClient::new(30, 19); 185 | 186 | let pages: Vec<_> = client 187 | .pages(GetNumbersQuery::default()) 188 | .try_collect() 189 | .await 190 | .unwrap(); 191 | 192 | assert_eq!(pages.len(), 2, "There should be 2 pages"); 193 | 194 | assert_eq!(pages[0].len(), 19, "The first page must contain 19 items"); 195 | assert_eq!(pages[1].len(), 11, "The second page must contain 11 items"); 196 | 197 | let output: Vec<_> = client 198 | .into_pages(GetNumbersQuery::default()) 199 | .items() 200 | .try_collect() 201 | .await 202 | .unwrap(); 203 | 204 | assert_eq!( 205 | output, expected, 206 | "After paginated query with page_size = 19" 207 | ); 208 | 209 | fn consumed_numbers_client() -> impl futures::stream::Stream, ()>> { 210 | let client = NumbersClient::new(48, 13); 211 | client.into_pages(GetNumbersQuery::default()) 212 | } 213 | 214 | let pages: Vec<_> = consumed_numbers_client().try_collect().await.unwrap(); 215 | assert_eq!(pages.len(), 4, "Consumed numbers client must produce 4 pages"); 216 | } 217 | }; 218 | } 219 | 220 | macro_rules! blogs_client_page_turner_impl { 221 | (@types) => { 222 | type PageItems = Vec; 223 | type PageError = String; 224 | }; 225 | (@body, $self:ident, $query:ident) => {{ 226 | let response = $self.get_content($query).await?; 227 | 228 | match response.next_page { 229 | Some(page) => Ok(TurnedPage::next( 230 | vec![response.record], 231 | GetContentRequest { page }, 232 | )), 233 | None => Ok(TurnedPage::last(vec![response.record])), 234 | } 235 | }}; 236 | (async_trait) => { 237 | #[async_trait] 238 | impl PageTurner for BlogClient { 239 | blogs_client_page_turner_impl!(@types); 240 | 241 | async fn turn_page( 242 | &self, 243 | req: GetContentRequest, 244 | ) -> TurnedPageResult { 245 | blogs_client_page_turner_impl!(@body, self, req) 246 | } 247 | } 248 | }; 249 | ($($mutability:tt)*) => { 250 | impl PageTurner for BlogClient { 251 | blogs_client_page_turner_impl!(@types); 252 | 253 | async fn turn_page( 254 | &$($mutability)* self, 255 | req: GetContentRequest, 256 | ) -> TurnedPageResult { 257 | blogs_client_page_turner_impl!(@body, self, req) 258 | } 259 | } 260 | }; 261 | } 262 | 263 | macro_rules! blogs_client_pages_base_test { 264 | ($($modifier:tt)*) => { 265 | async { 266 | let mut blog = BlogClient::new(41); 267 | blog.set_error(0); 268 | 269 | let mut stream = std::pin::pin!(blog.pages(GetContentRequest { page: 0 }).items()); 270 | 271 | let item = stream.try_next().await; 272 | assert_eq!(item, Err("Custom error".to_owned())); 273 | 274 | let item = stream.try_next().await; 275 | assert_eq!(item, Ok(None), "pages stream must end after an error"); 276 | } 277 | }; 278 | } 279 | 280 | macro_rules! blogs_client_pages_ahead_base_test { 281 | ($($modifier:tt)*) => { 282 | async { 283 | let blog = BlogClient::new(33); 284 | 285 | // Basic case 286 | let results: Vec<_> = blog 287 | .pages_ahead(5, Limit::None, GetContentRequest { page: 0 }) 288 | .items() 289 | .try_collect() 290 | .await 291 | .unwrap(); 292 | 293 | assert_eq!(results.len(), 33); 294 | 295 | for (ix, res) in results.into_iter().enumerate() { 296 | assert_eq!(res.0, ix); 297 | } 298 | 299 | let blog = std::sync::Arc::new(blog); 300 | 301 | // Pages limiting 302 | let results: Vec<_> = blog 303 | .clone() 304 | .into_pages_ahead(11, Limit::Pages(22), GetContentRequest { page: 0 }) 305 | .items() 306 | .try_collect() 307 | .await 308 | .unwrap(); 309 | 310 | assert_eq!(results.len(), 22); 311 | assert_eq!(results.last().unwrap(), &BlogRecord(21)); 312 | 313 | // Zero corner case 314 | let results: Vec<_> = blog 315 | .pages_ahead(0, Limit::None, GetContentRequest { page: 0 }) 316 | .items() 317 | .try_collect() 318 | .await 319 | .unwrap(); 320 | 321 | assert_eq!(results.len(), 0); 322 | 323 | let results: Vec<_> = blog 324 | .clone() 325 | .into_pages_ahead(5, Limit::Pages(0), GetContentRequest { page: 0 }) 326 | .items() 327 | .try_collect() 328 | .await 329 | .unwrap(); 330 | 331 | assert_eq!(results.len(), 0); 332 | 333 | let mut blog = std::sync::Arc::into_inner(blog).unwrap(); 334 | blog.set_error(1); 335 | 336 | let mut stream = std::pin::pin!(blog 337 | .pages_ahead(4, Limit::None, GetContentRequest { page: 0 }) 338 | .items()); 339 | 340 | let item = stream.try_next().await; 341 | assert_eq!(item.unwrap().unwrap(), BlogRecord(0)); 342 | 343 | let item = stream.try_next().await; 344 | assert_eq!(item, Err("Custom error".to_owned())); 345 | 346 | let item = stream.try_next().await; 347 | assert_eq!(item, Ok(None), "pages_ahead stream must end after an error"); 348 | } 349 | }; 350 | } 351 | 352 | macro_rules! blogs_client_pages_ahead_unordered_base_test { 353 | ($($modifier:tt)*) => { 354 | async { 355 | // Because in tests our futures resolve immediately `page_ahead_unordered` yields results in the 356 | // scheduling order and is equivalent to the `page_ahead` in all aspects except of error 357 | // handling. 358 | let blog = BlogClient::new(33); 359 | 360 | // Basic case 361 | let results: Vec<_> = blog 362 | .pages_ahead_unordered(5, Limit::None, GetContentRequest { page: 0 }) 363 | .items() 364 | .try_collect() 365 | .await 366 | .unwrap(); 367 | 368 | assert_eq!(results.len(), 33); 369 | 370 | for (ix, res) in results.into_iter().enumerate() { 371 | assert_eq!(res.0, ix); 372 | } 373 | 374 | let blog = std::sync::Arc::new(blog); 375 | // Pages limiting 376 | let results: Vec<_> = blog 377 | .clone() 378 | .into_pages_ahead_unordered(11, Limit::Pages(22), GetContentRequest { page: 0 }) 379 | .items() 380 | .try_collect() 381 | .await 382 | .unwrap(); 383 | 384 | assert_eq!(results.len(), 22); 385 | assert_eq!(results.last().unwrap(), &BlogRecord(21)); 386 | 387 | // Zero corner case 388 | let results: Vec<_> = blog 389 | .pages_ahead_unordered(0, Limit::None, GetContentRequest { page: 0 }) 390 | .items() 391 | .try_collect() 392 | .await 393 | .unwrap(); 394 | 395 | assert_eq!(results.len(), 0); 396 | 397 | let results: Vec<_> = blog 398 | .clone() 399 | .into_pages_ahead_unordered(5, Limit::Pages(0), GetContentRequest { page: 0 }) 400 | .items() 401 | .try_collect() 402 | .await 403 | .unwrap(); 404 | 405 | assert_eq!(results.len(), 0); 406 | 407 | let mut blog = std::sync::Arc::into_inner(blog).unwrap(); 408 | // Error case 409 | blog.set_error_with_msg(1, "1"); 410 | blog.set_error_with_msg(2, "2"); 411 | blog.set_error_with_msg(3, "3"); 412 | 413 | let mut stream = std::pin::pin!(blog 414 | .pages_ahead_unordered(5, Limit::None, GetContentRequest { page: 0 }) 415 | .items()); 416 | 417 | let item = stream.try_next().await; 418 | assert_eq!(item.unwrap().unwrap(), BlogRecord(0)); 419 | 420 | let item = stream.try_next().await; 421 | assert_eq!(item.unwrap().unwrap(), BlogRecord(4)); 422 | 423 | // This comes from a sliding window shift! 424 | let item = stream.try_next().await; 425 | assert_eq!(item.unwrap().unwrap(), BlogRecord(5)); 426 | 427 | let item = stream.try_next().await; 428 | assert_eq!(item, Err("1".to_owned())); 429 | 430 | let item = stream.try_next().await; 431 | assert_eq!( 432 | item, 433 | Ok(None), 434 | "pages_ahead_unordered stream must end after an error" 435 | ); 436 | } 437 | }; 438 | } 439 | 440 | macro_rules! page_turner_impls { 441 | ($($modifier:tt)*) => { 442 | numbers_client_page_turner_impl!($($modifier)*); 443 | blogs_client_page_turner_impl!($($modifier)*); 444 | } 445 | } 446 | 447 | macro_rules! pages_base_test { 448 | ($($modifier:tt)*) => { async { 449 | numbers_client_pages_base_test!($($modifier)*).await; 450 | blogs_client_pages_base_test!($($modifier)*).await; 451 | }}; 452 | } 453 | 454 | macro_rules! pages_ahead_base_test { 455 | ($($modifier:tt)*) => { async { 456 | blogs_client_pages_ahead_base_test!($($modifier)*).await; 457 | }}; 458 | } 459 | 460 | macro_rules! pages_ahead_unordered_base_test { 461 | ($($modifier:tt)*) => { async { 462 | blogs_client_pages_ahead_unordered_base_test!($($modifier)*).await; 463 | }}; 464 | } 465 | 466 | pub(crate) use blogs_client_page_turner_impl; 467 | pub(crate) use blogs_client_pages_ahead_base_test; 468 | pub(crate) use blogs_client_pages_ahead_unordered_base_test; 469 | pub(crate) use blogs_client_pages_base_test; 470 | pub(crate) use numbers_client_page_turner_impl; 471 | pub(crate) use numbers_client_pages_base_test; 472 | pub(crate) use page_turner_impls; 473 | pub(crate) use pages_ahead_base_test; 474 | pub(crate) use pages_ahead_unordered_base_test; 475 | pub(crate) use pages_base_test; 476 | 477 | use super::RequestAhead; 478 | --------------------------------------------------------------------------------