├── .github ├── FUNDING.yml └── workflows │ └── rust.yml ├── .gitignore ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples ├── async_graphql.rs ├── cached.rs ├── juniper.rs └── non_cached.rs ├── src ├── batch_fn.rs ├── cached.rs ├── lib.rs ├── non_cached.rs └── runtime.rs └── tests ├── cached.rs ├── generic.rs └── non_cached.rs /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: cksac 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v2 16 | - name: Build 17 | run: cargo build --verbose 18 | - name: Run tests 19 | run: cargo test --verbose 20 | - name: Run tests tokio 21 | run: cargo test --verbose --features runtime-tokio --no-default-features 22 | 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "dataloader" 3 | version = "0.18.0" 4 | edition = "2018" 5 | authors = ["cksac "] 6 | description = "Rust implementation of Facebook's DataLoader using async-await." 7 | keywords = ["batcher", "dataloader", "cache"] 8 | categories = ["asynchronous", "caching"] 9 | license = "MIT/Apache-2.0" 10 | readme = "README.md" 11 | repository = "https://github.com/cksac/dataloader-rs" 12 | homepage = "https://github.com/cksac/dataloader-rs" 13 | documentation = "https://docs.rs/dataloader" 14 | 15 | [badges] 16 | travis-ci = { repository = "/cksac/dataloader-rs" } 17 | 18 | [features] 19 | default = ["runtime-async-std"] 20 | runtime-async-std = [ 21 | "async-std", 22 | ] 23 | runtime-tokio = [ 24 | "tokio" 25 | ] 26 | 27 | [dependencies] 28 | async-std = { version = "1", optional = true } 29 | tokio = { version = "1", features = [ "sync", "rt" ], optional = true } 30 | 31 | [dev-dependencies] 32 | futures = "0.3" 33 | fake = { version = "3", features = ["derive"] } 34 | rand = "0.8" 35 | juniper = "0.16" 36 | async-graphql = { version = "7", default-features = false } 37 | serde_json = "1" 38 | 39 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright (c) 2017 cksac 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 cksac 2 | 3 | Permission is hereby granted, free of charge, to any 4 | person obtaining a copy of this software and associated 5 | documentation files (the "Software"), to deal in the 6 | Software without restriction, including without 7 | limitation the rights to use, copy, modify, merge, 8 | publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software 10 | is furnished to do so, subject to the following 11 | conditions: 12 | 13 | The above copyright notice and this permission notice 14 | shall be included in all copies or substantial portions 15 | of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF 18 | ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 19 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 20 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 21 | SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 22 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 23 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR 24 | IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 25 | DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dataloader 2 | 3 | ![Rust](https://github.com/cksac/dataloader-rs/workflows/Rust/badge.svg) 4 | [![Crates.io](https://img.shields.io/crates/v/dataloader.svg)](https://crates.io/crates/dataloader) 5 | 6 | Rust implementation of [Facebook's DataLoader](https://github.com/facebook/dataloader) using async-await. 7 | 8 | [Documentation](https://docs.rs/dataloader) 9 | 10 | ## Features 11 | * [x] Batching load requests with caching 12 | * [x] Batching load requests without caching 13 | 14 | ## Usage 15 | ### Switching runtime, by using cargo features 16 | - `runtime-async-std` (default), to use the [async-std](https://async.rs) runtime 17 | - dataloader = "0.18" 18 | - `runtime-tokio` to use the [Tokio](https://tokio.rs) runtime 19 | - dataloader = { version = "0.18", default-features = false, features = ["runtime-tokio"]} 20 | 21 | 22 | ### Add to your `Cargo.toml`: 23 | ```toml 24 | [dependencies] 25 | dataloader = "0.18" 26 | futures = "0.3" 27 | ``` 28 | 29 | ### Example: 30 | ```rust 31 | use dataloader::cached::Loader; 32 | use dataloader::BatchFn; 33 | use futures::executor::block_on; 34 | use futures::future::ready; 35 | use std::collections::HashMap; 36 | use std::thread; 37 | 38 | struct MyLoadFn; 39 | 40 | impl BatchFn for MyLoadFn { 41 | async fn load(&mut self, keys: &[usize]) -> HashMap { 42 | println!("BatchFn load keys {:?}", keys); 43 | let ret = keys.iter() 44 | .map(|v| (v.clone(), v.clone())) 45 | .collect::>(); 46 | ready(ret).await 47 | } 48 | } 49 | 50 | fn main() { 51 | let mut i = 0; 52 | while i < 2 { 53 | let a = MyLoadFn; 54 | let loader = Loader::new(a).with_max_batch_size(4); 55 | 56 | let l1 = loader.clone(); 57 | let h1 = thread::spawn(move || { 58 | let r1 = l1.load(1); 59 | let r2 = l1.load(2); 60 | let r3 = l1.load(3); 61 | 62 | let r4 = l1.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 63 | let f = futures::future::join4(r1, r2, r3, r4); 64 | println!("{:?}", block_on(f)); 65 | }); 66 | 67 | let l2 = loader.clone(); 68 | let h2 = thread::spawn(move || { 69 | let r1 = l2.load(1); 70 | let r2 = l2.load(2); 71 | let r3 = l2.load(3); 72 | let r4 = l2.load(4); 73 | let f = futures::future::join4(r1, r2, r3, r4); 74 | println!("{:?}", block_on(f)); 75 | }); 76 | 77 | h1.join().unwrap(); 78 | h2.join().unwrap(); 79 | i += 1; 80 | } 81 | } 82 | ``` 83 | 84 | # LICENSE 85 | 86 | This project is licensed under either of 87 | 88 | * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or 89 | http://www.apache.org/licenses/LICENSE-2.0) 90 | * MIT license ([LICENSE-MIT](LICENSE-MIT) or 91 | http://opensource.org/licenses/MIT) 92 | 93 | at your option. -------------------------------------------------------------------------------- /examples/async_graphql.rs: -------------------------------------------------------------------------------- 1 | use async_graphql::{Context, EmptyMutation, EmptySubscription, Schema}; 2 | use dataloader::cached::Loader; 3 | use dataloader::BatchFn; 4 | use fake::faker::company::en::CompanyName; 5 | use fake::faker::name::en::Name; 6 | use fake::{Dummy, Fake, Faker}; 7 | use futures::executor::block_on; 8 | use std::collections::HashMap; 9 | use std::future::ready; 10 | 11 | pub struct CultBatcher; 12 | 13 | impl BatchFn for CultBatcher { 14 | async fn load(&mut self, keys: &[i32]) -> HashMap { 15 | println!("load cult by batch {:?}", keys); 16 | let ret = keys 17 | .iter() 18 | .map(|k| { 19 | let mut cult: Cult = Faker.fake(); 20 | cult.id = k.clone(); 21 | (k.clone(), cult) 22 | }) 23 | .collect(); 24 | 25 | ready(ret).await 26 | } 27 | } 28 | 29 | #[derive(Clone)] 30 | pub struct AppContext { 31 | cult_loader: Loader, 32 | } 33 | 34 | impl AppContext { 35 | pub fn new() -> AppContext { 36 | AppContext { 37 | cult_loader: Loader::new(CultBatcher), 38 | } 39 | } 40 | } 41 | 42 | struct Query; 43 | 44 | #[async_graphql::Object] 45 | impl Query { 46 | async fn persons(&self, _ctx: &Context<'_>) -> Vec { 47 | let persons = fake::vec![Person; 10..20]; 48 | persons 49 | } 50 | 51 | async fn cult(&self, ctx: &Context<'_>, id: i32) -> Cult { 52 | ctx.data_unchecked::() 53 | .cult_loader 54 | .load(id) 55 | .await 56 | } 57 | } 58 | 59 | #[derive(Debug, Clone, Dummy)] 60 | pub struct Person { 61 | #[dummy(faker = "1..999")] 62 | pub id: i32, 63 | #[dummy(faker = "Name()")] 64 | pub name: String, 65 | #[dummy(faker = "1..999")] 66 | pub cult: i32, 67 | } 68 | 69 | #[async_graphql::Object] 70 | impl Person { 71 | async fn id(&self) -> i32 { 72 | self.id 73 | } 74 | 75 | async fn name(&self) -> &str { 76 | self.name.as_str() 77 | } 78 | 79 | async fn cult(&self, ctx: &Context<'_>) -> Cult { 80 | ctx.data_unchecked::() 81 | .cult_loader 82 | .load(self.cult) 83 | .await 84 | } 85 | 86 | async fn cult_by_id(&self, ctx: &Context<'_>, id: i32) -> Cult { 87 | ctx.data_unchecked::() 88 | .cult_loader 89 | .load(id) 90 | .await 91 | } 92 | } 93 | 94 | #[derive(Debug, Clone, Dummy)] 95 | pub struct Cult { 96 | #[dummy(faker = "1..999")] 97 | pub id: i32, 98 | #[dummy(faker = "CompanyName()")] 99 | pub name: String, 100 | } 101 | 102 | #[async_graphql::Object] 103 | impl Cult { 104 | async fn id(&self) -> i32 { 105 | self.id 106 | } 107 | 108 | async fn name(&self) -> &str { 109 | self.name.as_str() 110 | } 111 | } 112 | 113 | fn main() { 114 | let schema = Schema::build(Query, EmptyMutation, EmptySubscription) 115 | .data(AppContext::new()) 116 | .finish(); 117 | let q = r#" 118 | query { 119 | c1: cult(id: 1) { 120 | id 121 | name 122 | } 123 | c2: cult(id: 2) { 124 | id 125 | name 126 | } 127 | c3: cult(id: 3) { 128 | id 129 | name 130 | } 131 | persons { 132 | id 133 | name 134 | cult { 135 | id 136 | name 137 | } 138 | c1: cultById(id: 4) { 139 | id 140 | name 141 | } 142 | c2: cultById(id: 5) { 143 | id 144 | name 145 | } 146 | c3: cultById(id: 6) { 147 | id 148 | name 149 | } 150 | } 151 | }"#; 152 | let f = schema.execute(q); 153 | let _r = block_on(f); 154 | } 155 | -------------------------------------------------------------------------------- /examples/cached.rs: -------------------------------------------------------------------------------- 1 | use dataloader::cached::Loader; 2 | use dataloader::BatchFn; 3 | use futures::executor::block_on; 4 | use futures::future::ready; 5 | use std::collections::HashMap; 6 | use std::thread; 7 | 8 | struct MyLoadFn; 9 | 10 | impl BatchFn for MyLoadFn { 11 | async fn load(&mut self, keys: &[usize]) -> HashMap { 12 | println!("BatchFn load keys {:?}", keys); 13 | let ret = keys 14 | .iter() 15 | .map(|v| (v.clone(), v.clone())) 16 | .collect::>(); 17 | ready(ret).await 18 | } 19 | } 20 | 21 | fn main() { 22 | let mut i = 0; 23 | while i < 2 { 24 | let a = MyLoadFn; 25 | let loader = Loader::new(a).with_max_batch_size(4); 26 | 27 | let l1 = loader.clone(); 28 | let h1 = thread::spawn(move || { 29 | let r1 = l1.load(1); 30 | let r2 = l1.load(2); 31 | let r3 = l1.load(3); 32 | 33 | let r4 = l1.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 34 | let f = futures::future::join4(r1, r2, r3, r4); 35 | println!("{:?}", block_on(f)); 36 | }); 37 | 38 | let l2 = loader.clone(); 39 | let h2 = thread::spawn(move || { 40 | let r1 = l2.load(1); 41 | let r2 = l2.load(2); 42 | let r3 = l2.load(3); 43 | let r4 = l2.load(4); 44 | let f = futures::future::join4(r1, r2, r3, r4); 45 | println!("{:?}", block_on(f)); 46 | }); 47 | 48 | h1.join().unwrap(); 49 | h2.join().unwrap(); 50 | i += 1; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /examples/juniper.rs: -------------------------------------------------------------------------------- 1 | use dataloader::cached::Loader; 2 | use dataloader::BatchFn; 3 | use fake::faker::company::en::CompanyName; 4 | use fake::faker::name::en::Name; 5 | use fake::{Dummy, Fake, Faker}; 6 | use futures::executor::block_on; 7 | use juniper::{self, EmptyMutation, EmptySubscription, FieldResult, Variables}; 8 | use std::collections::HashMap; 9 | use std::future::ready; 10 | 11 | pub struct CultBatcher; 12 | 13 | impl BatchFn for CultBatcher { 14 | async fn load(&mut self, keys: &[i32]) -> HashMap { 15 | println!("load cult by batch {:?}", keys); 16 | let ret = keys 17 | .iter() 18 | .map(|k| { 19 | let mut cult: Cult = Faker.fake(); 20 | cult.id = k.clone(); 21 | (k.clone(), cult) 22 | }) 23 | .collect(); 24 | ready(ret).await 25 | } 26 | } 27 | 28 | #[derive(Clone)] 29 | pub struct AppContext { 30 | cult_loader: Loader, 31 | } 32 | 33 | impl AppContext { 34 | pub fn new() -> AppContext { 35 | AppContext { 36 | cult_loader: Loader::new(CultBatcher), 37 | } 38 | } 39 | } 40 | 41 | impl juniper::Context for AppContext {} 42 | 43 | struct Query; 44 | 45 | #[juniper::graphql_object(Context = AppContext)] 46 | impl Query { 47 | async fn persons(_context: &AppContext) -> FieldResult> { 48 | let persons = fake::vec![Person; 10..20]; 49 | Ok(persons) 50 | } 51 | 52 | async fn cult(&self, id: i32, ctx: &AppContext) -> Cult { 53 | ctx.cult_loader.load(id).await 54 | } 55 | } 56 | 57 | type Schema = 58 | juniper::RootNode<'static, Query, EmptyMutation, EmptySubscription>; 59 | 60 | #[derive(Debug, Clone, Dummy)] 61 | pub struct Person { 62 | #[dummy(faker = "1..999")] 63 | pub id: i32, 64 | #[dummy(faker = "Name()")] 65 | pub name: String, 66 | #[dummy(faker = "1..999")] 67 | pub cult: i32, 68 | } 69 | 70 | #[juniper::graphql_object(Context = AppContext)] 71 | impl Person { 72 | pub fn id(&self) -> i32 { 73 | self.id 74 | } 75 | 76 | pub fn name(&self) -> &str { 77 | self.name.as_str() 78 | } 79 | 80 | pub async fn cult(&self, ctx: &AppContext) -> FieldResult> { 81 | let fut = ctx.cult_loader.load(self.cult); 82 | Ok(Some(fut.await)) 83 | } 84 | 85 | pub async fn cult_by_id(&self, id: i32, ctx: &AppContext) -> Cult { 86 | ctx.cult_loader.load(id).await 87 | } 88 | } 89 | 90 | #[derive(Debug, Clone, Dummy)] 91 | pub struct Cult { 92 | #[dummy(faker = "1..999")] 93 | pub id: i32, 94 | #[dummy(faker = "CompanyName()")] 95 | pub name: String, 96 | } 97 | 98 | #[juniper::graphql_object(Context = AppContext)] 99 | impl Cult { 100 | pub fn id(&self) -> i32 { 101 | self.id 102 | } 103 | 104 | pub fn name(&self) -> &str { 105 | self.name.as_str() 106 | } 107 | } 108 | 109 | fn main() { 110 | let ctx = AppContext::new(); 111 | let schema = Schema::new(Query, EmptyMutation::new(), EmptySubscription::new()); 112 | let vars = Variables::new(); 113 | let q = r#" 114 | query { 115 | c1: cult(id: 1) { 116 | id 117 | name 118 | } 119 | c2: cult(id: 2) { 120 | id 121 | name 122 | } 123 | c3: cult(id: 3) { 124 | id 125 | name 126 | } 127 | persons { 128 | id 129 | name 130 | cult { 131 | id 132 | name 133 | } 134 | c1: cultById(id: 4) { 135 | id 136 | name 137 | } 138 | c2: cultById(id: 5) { 139 | id 140 | name 141 | } 142 | c3: cultById(id: 6) { 143 | id 144 | name 145 | } 146 | } 147 | }"#; 148 | let f = juniper::execute(q, None, &schema, &vars, &ctx); 149 | let (_res, _errors) = block_on(f).unwrap(); 150 | } 151 | -------------------------------------------------------------------------------- /examples/non_cached.rs: -------------------------------------------------------------------------------- 1 | use dataloader::non_cached::Loader; 2 | use dataloader::BatchFn; 3 | use futures::executor::block_on; 4 | use std::collections::HashMap; 5 | use std::future::ready; 6 | use std::thread; 7 | 8 | struct MyLoadFn; 9 | 10 | impl BatchFn for MyLoadFn { 11 | async fn load(&mut self, keys: &[usize]) -> HashMap { 12 | println!("BatchFn load keys {:?}", keys); 13 | let ret = keys 14 | .iter() 15 | .map(|v| (v.clone(), v.clone())) 16 | .collect::>(); 17 | ready(ret).await 18 | } 19 | } 20 | 21 | fn main() { 22 | let mut i = 0; 23 | while i < 2 { 24 | let a = MyLoadFn; 25 | let loader = Loader::new(a).with_max_batch_size(4); 26 | 27 | let l1 = loader.clone(); 28 | let h1 = thread::spawn(move || { 29 | let r1 = l1.load(1); 30 | let r2 = l1.load(2); 31 | let r3 = l1.load(3); 32 | 33 | let r4 = l1.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 34 | let f = futures::future::join4(r1, r2, r3, r4); 35 | println!("{:?}", block_on(f)); 36 | }); 37 | 38 | let l2 = loader.clone(); 39 | let h2 = thread::spawn(move || { 40 | let r1 = l2.load(1); 41 | let r2 = l2.load(2); 42 | let r3 = l2.load(3); 43 | let r4 = l2.load(4); 44 | let f = futures::future::join4(r1, r2, r3, r4); 45 | println!("{:?}", block_on(f)); 46 | }); 47 | 48 | h1.join().unwrap(); 49 | h2.join().unwrap(); 50 | i += 1; 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/batch_fn.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | pub trait BatchFn { 4 | fn load(&mut self, keys: &[K]) -> impl std::future::Future>; 5 | } 6 | -------------------------------------------------------------------------------- /src/cached.rs: -------------------------------------------------------------------------------- 1 | use crate::runtime::{Arc, Mutex}; 2 | use crate::{yield_fn, BatchFn, WaitForWorkFn}; 3 | use std::collections::{HashMap, HashSet}; 4 | use std::fmt::Debug; 5 | use std::hash::{BuildHasher, Hash}; 6 | use std::io::{Error, ErrorKind}; 7 | use std::iter::IntoIterator; 8 | 9 | pub trait Cache { 10 | type Key; 11 | type Val; 12 | fn get(&mut self, key: &Self::Key) -> Option<&Self::Val>; 13 | fn insert(&mut self, key: Self::Key, val: Self::Val); 14 | fn remove(&mut self, key: &Self::Key) -> Option; 15 | fn clear(&mut self); 16 | } 17 | 18 | impl Cache for HashMap 19 | where 20 | K: Eq + Hash, 21 | { 22 | type Key = K; 23 | type Val = V; 24 | 25 | #[inline] 26 | fn get(&mut self, key: &K) -> Option<&V> { 27 | HashMap::get(self, key) 28 | } 29 | 30 | #[inline] 31 | fn insert(&mut self, key: K, val: V) { 32 | HashMap::insert(self, key, val); 33 | } 34 | 35 | #[inline] 36 | fn remove(&mut self, key: &K) -> Option { 37 | HashMap::remove(self, key) 38 | } 39 | 40 | #[inline] 41 | fn clear(&mut self) { 42 | HashMap::clear(self) 43 | } 44 | } 45 | 46 | struct State> 47 | where 48 | C: Cache, 49 | { 50 | completed: C, 51 | pending: HashSet, 52 | } 53 | 54 | impl State 55 | where 56 | C: Cache, 57 | { 58 | fn with_cache(cache: C) -> Self { 59 | State { 60 | completed: cache, 61 | pending: HashSet::new(), 62 | } 63 | } 64 | } 65 | 66 | pub struct Loader> 67 | where 68 | K: Eq + Hash + Clone, 69 | V: Clone, 70 | F: BatchFn, 71 | C: Cache, 72 | { 73 | state: Arc>>, 74 | load_fn: Arc>, 75 | wait_for_work_fn: Arc, 76 | max_batch_size: usize, 77 | } 78 | 79 | impl Clone for Loader 80 | where 81 | K: Eq + Hash + Clone, 82 | V: Clone, 83 | F: BatchFn, 84 | C: Cache, 85 | { 86 | fn clone(&self) -> Self { 87 | Loader { 88 | state: self.state.clone(), 89 | max_batch_size: self.max_batch_size, 90 | load_fn: self.load_fn.clone(), 91 | wait_for_work_fn: self.wait_for_work_fn.clone(), 92 | } 93 | } 94 | } 95 | 96 | #[allow(clippy::implicit_hasher)] 97 | impl Loader> 98 | where 99 | K: Eq + Hash + Clone + Debug, 100 | V: Clone, 101 | F: BatchFn, 102 | { 103 | pub fn new(load_fn: F) -> Loader> { 104 | Loader::with_cache(load_fn, HashMap::new()) 105 | } 106 | } 107 | 108 | impl Loader 109 | where 110 | K: Eq + Hash + Clone + Debug, 111 | V: Clone, 112 | F: BatchFn, 113 | C: Cache, 114 | { 115 | pub fn with_cache(load_fn: F, cache: C) -> Loader { 116 | Loader { 117 | state: Arc::new(Mutex::new(State::with_cache(cache))), 118 | load_fn: Arc::new(Mutex::new(load_fn)), 119 | max_batch_size: 200, 120 | wait_for_work_fn: Arc::new(yield_fn(10)), 121 | } 122 | } 123 | 124 | pub fn with_max_batch_size(mut self, max_batch_size: usize) -> Self { 125 | self.max_batch_size = max_batch_size; 126 | self 127 | } 128 | 129 | pub fn with_yield_count(mut self, yield_count: usize) -> Self { 130 | self.wait_for_work_fn = Arc::new(yield_fn(yield_count)); 131 | self 132 | } 133 | 134 | /// Replaces the yielding for work behavior with an arbitrary future. Rather than yielding 135 | /// the runtime repeatedly this will generate and `.await` a future of your choice. 136 | /// ***This is incompatible with*** [`Self::with_yield_count()`]. 137 | pub fn with_custom_wait_for_work(mut self, wait_for_work_fn: impl WaitForWorkFn) -> Self { 138 | self.wait_for_work_fn = Arc::new(wait_for_work_fn); 139 | self 140 | } 141 | 142 | pub fn max_batch_size(&self) -> usize { 143 | self.max_batch_size 144 | } 145 | 146 | pub async fn try_load(&self, key: K) -> Result { 147 | let mut state = self.state.lock().await; 148 | if let Some(v) = state.completed.get(&key) { 149 | return Ok((*v).clone()); 150 | } 151 | 152 | if !state.pending.contains(&key) { 153 | state.pending.insert(key.clone()); 154 | if state.pending.len() >= self.max_batch_size { 155 | let keys = state.pending.drain().collect::>(); 156 | let mut load_fn = self.load_fn.lock().await; 157 | let load_ret = load_fn.load(keys.as_ref()).await; 158 | drop(load_fn); 159 | for (k, v) in load_ret.into_iter() { 160 | state.completed.insert(k, v); 161 | } 162 | return state.completed.get(&key).cloned().ok_or(Error::new( 163 | ErrorKind::NotFound, 164 | format!("could not lookup result for given key: {:?}", key), 165 | )); 166 | } 167 | } 168 | drop(state); 169 | 170 | (self.wait_for_work_fn)().await; 171 | 172 | let mut state = self.state.lock().await; 173 | if let Some(v) = state.completed.get(&key) { 174 | return Ok((*v).clone()); 175 | } 176 | 177 | if !state.pending.is_empty() { 178 | let keys = state.pending.drain().collect::>(); 179 | let mut load_fn = self.load_fn.lock().await; 180 | let load_ret = load_fn.load(keys.as_ref()).await; 181 | drop(load_fn); 182 | for (k, v) in load_ret.into_iter() { 183 | state.completed.insert(k, v); 184 | } 185 | } 186 | 187 | state.completed.get(&key).cloned().ok_or(Error::new( 188 | ErrorKind::NotFound, 189 | format!("could not lookup result for given key: {:?}", key), 190 | )) 191 | } 192 | 193 | pub async fn load(&self, key: K) -> V { 194 | self.try_load(key).await.unwrap_or_else(|e| panic!("{}", e)) 195 | } 196 | 197 | pub async fn try_load_many(&self, keys: Vec) -> Result, Error> { 198 | let mut state = self.state.lock().await; 199 | let mut ret = HashMap::new(); 200 | let mut rest = Vec::new(); 201 | for key in keys.into_iter() { 202 | if let Some(v) = state.completed.get(&key).cloned() { 203 | ret.insert(key, v); 204 | continue; 205 | } 206 | if !state.pending.contains(&key) { 207 | state.pending.insert(key.clone()); 208 | if state.pending.len() >= self.max_batch_size { 209 | let keys = state.pending.drain().collect::>(); 210 | let mut load_fn = self.load_fn.lock().await; 211 | let load_ret = load_fn.load(keys.as_ref()).await; 212 | drop(load_fn); 213 | for (k, v) in load_ret.into_iter() { 214 | state.completed.insert(k, v); 215 | } 216 | } 217 | } 218 | rest.push(key); 219 | } 220 | drop(state); 221 | 222 | (self.wait_for_work_fn)().await; 223 | 224 | if !rest.is_empty() { 225 | let mut state = self.state.lock().await; 226 | if !state.pending.is_empty() { 227 | let keys = state.pending.drain().collect::>(); 228 | let mut load_fn = self.load_fn.lock().await; 229 | let load_ret = load_fn.load(keys.as_ref()).await; 230 | drop(load_fn); 231 | for (k, v) in load_ret.into_iter() { 232 | state.completed.insert(k, v); 233 | } 234 | } 235 | 236 | for key in rest.into_iter() { 237 | let v = state.completed.get(&key).cloned().ok_or(Error::new( 238 | ErrorKind::NotFound, 239 | format!("could not lookup result for given key: {:?}", key), 240 | ))?; 241 | 242 | ret.insert(key, v); 243 | } 244 | } 245 | 246 | Ok(ret) 247 | } 248 | 249 | pub async fn load_many(&self, keys: Vec) -> HashMap { 250 | self.try_load_many(keys) 251 | .await 252 | .unwrap_or_else(|e| panic!("{}", e)) 253 | } 254 | 255 | pub async fn prime(&self, key: K, val: V) { 256 | let mut state = self.state.lock().await; 257 | state.completed.insert(key, val); 258 | } 259 | 260 | pub async fn prime_many(&self, values: impl IntoIterator) { 261 | let mut state = self.state.lock().await; 262 | for (k, v) in values.into_iter() { 263 | state.completed.insert(k, v); 264 | } 265 | } 266 | 267 | pub async fn clear(&self, key: K) { 268 | let mut state = self.state.lock().await; 269 | state.completed.remove(&key); 270 | } 271 | 272 | pub async fn clear_all(&self) { 273 | let mut state = self.state.lock().await; 274 | state.completed.clear() 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | mod batch_fn; 2 | pub mod cached; 3 | pub mod non_cached; 4 | mod runtime; 5 | 6 | pub use batch_fn::BatchFn; 7 | 8 | use std::{future::Future, pin::Pin}; 9 | 10 | /// A trait alias. Read as "a function which returns a pinned box containing a future" 11 | pub trait WaitForWorkFn: 12 | Fn() -> Pin + Send + Sync>> + Send + Sync + 'static 13 | { 14 | } 15 | 16 | impl WaitForWorkFn for T where 17 | T: Fn() -> Pin + Send + Sync>> + Send + Sync + 'static 18 | { 19 | } 20 | 21 | pub(crate) fn yield_fn(count: usize) -> impl WaitForWorkFn { 22 | move || { 23 | Box::pin(async move { 24 | // yield for other load to append request 25 | for _ in 0..count { 26 | runtime::yield_now().await; 27 | } 28 | }) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /src/non_cached.rs: -------------------------------------------------------------------------------- 1 | use crate::runtime::{Arc, Mutex}; 2 | use crate::{yield_fn, BatchFn, WaitForWorkFn}; 3 | use std::collections::{HashMap, HashSet}; 4 | use std::fmt::Debug; 5 | use std::hash::Hash; 6 | use std::io::{Error, ErrorKind}; 7 | 8 | type RequestId = usize; 9 | 10 | struct State { 11 | completed: HashMap, 12 | failed: HashMap, 13 | pending: HashMap, 14 | id_seq: RequestId, 15 | } 16 | 17 | impl State { 18 | fn new() -> Self { 19 | State { 20 | completed: HashMap::new(), 21 | failed: HashMap::new(), 22 | pending: HashMap::new(), 23 | id_seq: 0, 24 | } 25 | } 26 | fn next_request_id(&mut self) -> RequestId { 27 | self.id_seq = self.id_seq.wrapping_add(1); 28 | self.id_seq 29 | } 30 | } 31 | 32 | pub struct Loader 33 | where 34 | K: Eq + Hash + Clone, 35 | V: Clone, 36 | F: BatchFn, 37 | { 38 | state: Arc>>, 39 | load_fn: Arc>, 40 | wait_for_work_fn: Arc, 41 | max_batch_size: usize, 42 | } 43 | 44 | impl Clone for Loader 45 | where 46 | K: Eq + Hash + Clone, 47 | V: Clone, 48 | F: BatchFn, 49 | { 50 | fn clone(&self) -> Self { 51 | Loader { 52 | state: self.state.clone(), 53 | load_fn: self.load_fn.clone(), 54 | max_batch_size: self.max_batch_size, 55 | wait_for_work_fn: self.wait_for_work_fn.clone(), 56 | } 57 | } 58 | } 59 | 60 | impl Loader 61 | where 62 | K: Eq + Hash + Clone + Debug, 63 | V: Clone, 64 | F: BatchFn, 65 | { 66 | pub fn new(load_fn: F) -> Loader { 67 | Loader { 68 | state: Arc::new(Mutex::new(State::new())), 69 | load_fn: Arc::new(Mutex::new(load_fn)), 70 | max_batch_size: 200, 71 | wait_for_work_fn: Arc::new(yield_fn(10)), 72 | } 73 | } 74 | 75 | pub fn with_max_batch_size(mut self, max_batch_size: usize) -> Self { 76 | self.max_batch_size = max_batch_size; 77 | self 78 | } 79 | 80 | pub fn with_yield_count(mut self, yield_count: usize) -> Self { 81 | self.wait_for_work_fn = Arc::new(yield_fn(yield_count)); 82 | self 83 | } 84 | 85 | /// Replaces the yielding for work behavior with an arbitrary future. Rather than yielding 86 | /// the runtime repeatedly this will generate and `.await` a future of your choice. 87 | /// ***This is incompatible with*** [`Self::with_yield_count()`]. 88 | pub fn with_custom_wait_for_work(mut self, wait_for_work_fn: impl WaitForWorkFn) -> Self { 89 | self.wait_for_work_fn = Arc::new(wait_for_work_fn); 90 | self 91 | } 92 | 93 | pub fn max_batch_size(&self) -> usize { 94 | self.max_batch_size 95 | } 96 | 97 | pub async fn try_load(&self, key: K) -> Result { 98 | let mut state = self.state.lock().await; 99 | let request_id = state.next_request_id(); 100 | state.pending.insert(request_id, key); 101 | if state.pending.len() >= self.max_batch_size { 102 | let batch = state.pending.drain().collect::>(); 103 | let keys: Vec = batch 104 | .values() 105 | .cloned() 106 | .collect::>() 107 | .into_iter() 108 | .collect(); 109 | let mut load_fn = self.load_fn.lock().await; 110 | let load_ret = load_fn.load(keys.as_ref()).await; 111 | drop(load_fn); 112 | for (request_id, key) in batch.into_iter() { 113 | if load_ret 114 | .get(&key) 115 | .and_then(|v| state.completed.insert(request_id, v.clone())) 116 | .is_none() 117 | { 118 | state.failed.insert(request_id, key); 119 | } 120 | } 121 | return state.completed.remove(&request_id).ok_or_else(|| { 122 | Error::new( 123 | ErrorKind::NotFound, 124 | format!( 125 | "could not lookup result for given key: {:?}", 126 | state.failed.remove(&request_id).expect("failed") 127 | ), 128 | ) 129 | }); 130 | } 131 | drop(state); 132 | 133 | (self.wait_for_work_fn)().await; 134 | 135 | let mut state = self.state.lock().await; 136 | 137 | if !state.completed.contains_key(&request_id) { 138 | let batch = state.pending.drain().collect::>(); 139 | if !batch.is_empty() { 140 | let keys: Vec = batch 141 | .values() 142 | .cloned() 143 | .collect::>() 144 | .into_iter() 145 | .collect(); 146 | let mut load_fn = self.load_fn.lock().await; 147 | let load_ret = load_fn.load(keys.as_ref()).await; 148 | drop(load_fn); 149 | for (request_id, key) in batch.into_iter() { 150 | if load_ret 151 | .get(&key) 152 | .and_then(|v| state.completed.insert(request_id, v.clone())) 153 | .is_none() 154 | { 155 | state.failed.insert(request_id, key); 156 | } 157 | } 158 | } 159 | } 160 | state.completed.remove(&request_id).ok_or_else(|| { 161 | Error::new( 162 | ErrorKind::NotFound, 163 | format!( 164 | "could not lookup result for given key: {:?}", 165 | state.failed.remove(&request_id).expect("failed") 166 | ), 167 | ) 168 | }) 169 | } 170 | 171 | pub async fn load(&self, key: K) -> V { 172 | self.try_load(key).await.unwrap_or_else(|e| panic!("{}", e)) 173 | } 174 | 175 | pub async fn load_many(&self, keys: Vec) -> HashMap { 176 | self.try_load_many(keys) 177 | .await 178 | .unwrap_or_else(|e| panic!("{}", e)) 179 | } 180 | 181 | pub async fn try_load_many(&self, keys: Vec) -> Result, Error> { 182 | let mut state = self.state.lock().await; 183 | let mut ret = HashMap::new(); 184 | let mut requests = Vec::new(); 185 | for key in keys.into_iter() { 186 | let request_id = state.next_request_id(); 187 | requests.push((request_id, key.clone())); 188 | state.pending.insert(request_id, key); 189 | if state.pending.len() >= self.max_batch_size { 190 | let batch = state.pending.drain().collect::>(); 191 | let keys: Vec = batch 192 | .values() 193 | .cloned() 194 | .collect::>() 195 | .into_iter() 196 | .collect(); 197 | let mut load_fn = self.load_fn.lock().await; 198 | let load_ret = load_fn.load(keys.as_ref()).await; 199 | drop(load_fn); 200 | for (request_id, key) in batch.into_iter() { 201 | if load_ret 202 | .get(&key) 203 | .and_then(|v| state.completed.insert(request_id, v.clone())) 204 | .is_none() 205 | { 206 | state.failed.insert(request_id, key); 207 | } 208 | } 209 | } 210 | } 211 | 212 | drop(state); 213 | 214 | (self.wait_for_work_fn)().await; 215 | 216 | let mut state = self.state.lock().await; 217 | 218 | let mut rest = Vec::new(); 219 | for (request_id, key) in requests.into_iter() { 220 | if let Some(v) = state.completed.remove(&request_id) { 221 | ret.insert(key, v); 222 | } else { 223 | rest.push((request_id, key)); 224 | } 225 | } 226 | 227 | if !rest.is_empty() { 228 | let batch = state.pending.drain().collect::>(); 229 | if !batch.is_empty() { 230 | let keys: Vec = batch 231 | .values() 232 | .cloned() 233 | .collect::>() 234 | .into_iter() 235 | .collect(); 236 | let mut load_fn = self.load_fn.lock().await; 237 | let load_ret = load_fn.load(keys.as_ref()).await; 238 | drop(load_fn); 239 | for (request_id, key) in batch.into_iter() { 240 | if load_ret 241 | .get(&key) 242 | .and_then(|v| state.completed.insert(request_id, v.clone())) 243 | .is_none() 244 | { 245 | state.failed.insert(request_id, key); 246 | } 247 | } 248 | } 249 | for (request_id, key) in rest.into_iter() { 250 | let v = state.completed.remove(&request_id).ok_or_else(|| { 251 | Error::new( 252 | ErrorKind::NotFound, 253 | format!( 254 | "could not lookup result for given key: {:?}", 255 | state.failed.remove(&request_id).expect("failed") 256 | ), 257 | ) 258 | })?; 259 | 260 | ret.insert(key, v); 261 | } 262 | } 263 | 264 | Ok(ret) 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /src/runtime.rs: -------------------------------------------------------------------------------- 1 | // runtime-async-std 2 | #[cfg(feature = "runtime-async-std")] 3 | pub type Arc = async_std::sync::Arc; 4 | 5 | #[cfg(feature = "runtime-async-std")] 6 | pub type Mutex = async_std::sync::Mutex; 7 | 8 | #[cfg(feature = "runtime-async-std")] 9 | pub use async_std::task::yield_now; 10 | 11 | // runtime-tokio 12 | #[cfg(feature = "runtime-tokio")] 13 | pub type Arc = std::sync::Arc; 14 | 15 | #[cfg(feature = "runtime-tokio")] 16 | pub type Mutex = tokio::sync::Mutex; 17 | 18 | #[cfg(feature = "runtime-tokio")] 19 | pub use tokio::task::yield_now; 20 | -------------------------------------------------------------------------------- /tests/cached.rs: -------------------------------------------------------------------------------- 1 | use dataloader::cached::Loader; 2 | use dataloader::BatchFn; 3 | use futures::executor::block_on; 4 | use std::collections::{HashMap, HashSet}; 5 | use std::future::ready; 6 | use std::sync::{Arc, Mutex}; 7 | use std::{panic, thread}; 8 | 9 | struct MyLoadFn; 10 | 11 | impl BatchFn for MyLoadFn { 12 | async fn load(&mut self, keys: &[usize]) -> HashMap { 13 | let ret = keys 14 | .iter() 15 | .map(|v| (v.clone(), v.clone())) 16 | .collect::>(); 17 | ready(ret).await 18 | } 19 | } 20 | 21 | #[derive(Clone)] 22 | struct Object(usize); 23 | 24 | impl BatchFn for MyLoadFn { 25 | async fn load(&mut self, keys: &[usize]) -> HashMap { 26 | let ret = keys 27 | .iter() 28 | .map(|v| (v.clone(), Object(v.clone()))) 29 | .collect::>(); 30 | ready(ret).await 31 | } 32 | } 33 | 34 | #[test] 35 | fn assert_kinds() { 36 | fn _assert_send() {} 37 | fn _assert_sync() {} 38 | fn _assert_clone() {} 39 | _assert_send::>(); 40 | _assert_sync::>(); 41 | _assert_clone::>(); 42 | 43 | _assert_send::>(); 44 | _assert_sync::>(); 45 | _assert_clone::>(); 46 | } 47 | 48 | #[derive(Clone)] 49 | struct LoadFnWithHistory { 50 | loaded_keys: Arc>>, 51 | max_batch_loaded: Arc>, 52 | } 53 | 54 | impl BatchFn for LoadFnWithHistory { 55 | async fn load(&mut self, keys: &[usize]) -> HashMap { 56 | // println!("BatchFn load keys {:?}", keys); 57 | let mut loaded_keys = self.loaded_keys.lock().unwrap(); 58 | let mut max_batch_loaded = self.max_batch_loaded.lock().unwrap(); 59 | if keys.len() > *max_batch_loaded { 60 | *max_batch_loaded = keys.len(); 61 | } 62 | for k in keys { 63 | if loaded_keys.contains(k) { 64 | panic!("already loaded, loader should not request same key"); 65 | } 66 | } 67 | 68 | let ret = keys 69 | .iter() 70 | .map(|v| { 71 | loaded_keys.insert(v.clone()); 72 | (v.clone(), v.clone()) 73 | }) 74 | .collect::>(); 75 | ready(ret).await 76 | } 77 | } 78 | 79 | #[derive(Clone)] 80 | struct LoadFnForEmptyTest; 81 | 82 | impl BatchFn for LoadFnForEmptyTest { 83 | async fn load(&mut self, _keys: &[usize]) -> HashMap { 84 | ready(HashMap::new()).await 85 | } 86 | } 87 | 88 | #[test] 89 | fn test_load() { 90 | let mut i = 0; 91 | while i < 1000 { 92 | let load_fn = LoadFnWithHistory { 93 | loaded_keys: Arc::new(Mutex::new(HashSet::new())), 94 | max_batch_loaded: Arc::new(Mutex::new(0)), 95 | }; 96 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 97 | 98 | let l1 = loader.clone(); 99 | let h1 = thread::spawn(move || { 100 | let r1 = l1.load(1); 101 | let r2 = l1.load(2); 102 | let r3 = l1.load(3); 103 | let r4 = l1.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 104 | let f = futures::future::join4(r1, r2, r3, r4); 105 | let fv = block_on(f); 106 | 107 | let (_v1, _v2, _v3, v4) = fv; 108 | let mut v4_keys = v4.keys().cloned().collect::>(); 109 | v4_keys.sort(); 110 | assert_eq!(vec![2, 3, 4, 5, 6, 7, 8], v4_keys); 111 | }); 112 | 113 | let l2 = loader.clone(); 114 | let h2 = thread::spawn(move || { 115 | let r1 = l2.load(1); 116 | let r2 = l2.load(2); 117 | let r3 = l2.load(3); 118 | let r4 = l2.load(4); 119 | let f = futures::future::join4(r1, r2, r3, r4); 120 | let _fv = block_on(f); 121 | }); 122 | 123 | let l3 = loader.clone(); 124 | let h3 = thread::spawn(move || { 125 | let r1 = l3.load_many(vec![12, 13, 14, 1, 2, 3, 4]); 126 | let r2 = l3.load_many(vec![1, 2, 3, 4, 5, 6, 7]); 127 | let r3 = l3.load_many(vec![9, 10, 11, 12, 13, 14]); 128 | let f = futures::future::join3(r1, r2, r3); 129 | let fv = block_on(f); 130 | 131 | let (v1, v2, v3) = fv; 132 | let mut v1_keys = v1.keys().cloned().collect::>(); 133 | v1_keys.sort(); 134 | assert_eq!(vec![1, 2, 3, 4, 12, 13, 14], v1_keys); 135 | 136 | let mut v2_keys = v2.keys().cloned().collect::>(); 137 | v2_keys.sort(); 138 | assert_eq!(vec![1, 2, 3, 4, 5, 6, 7], v2_keys); 139 | 140 | let mut v3_keys = v3.keys().cloned().collect::>(); 141 | v3_keys.sort(); 142 | assert_eq!(vec![9, 10, 11, 12, 13, 14], v3_keys); 143 | }); 144 | 145 | h1.join().unwrap(); 146 | h2.join().unwrap(); 147 | h3.join().unwrap(); 148 | i += 1; 149 | 150 | let max_batch_size = loader.max_batch_size(); 151 | let max_batch_loaded = load_fn.max_batch_loaded.lock().unwrap(); 152 | assert!(*max_batch_loaded > 1); 153 | assert!( 154 | *max_batch_loaded <= max_batch_size, 155 | "max_batch_loaded({}) <= max_batch_size({})", 156 | *max_batch_loaded, 157 | max_batch_size 158 | ); 159 | } 160 | } 161 | 162 | #[test] 163 | #[should_panic(expected = "could not lookup result for given key: 1337")] 164 | fn test_load_unresolved_key() { 165 | let load_fn = LoadFnForEmptyTest; 166 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 167 | 168 | let h1 = thread::spawn(move || { 169 | let r1 = loader.load(1337); 170 | block_on(r1); 171 | }); 172 | 173 | let _ = h1.join().map_err(|e| panic::resume_unwind(e)); 174 | } 175 | 176 | #[test] 177 | fn test_try_load_unresolved_key() { 178 | let load_fn = LoadFnForEmptyTest; 179 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 180 | 181 | let h1 = thread::spawn(move || { 182 | let r1 = loader.try_load(1337); 183 | let fv = block_on(r1); 184 | 185 | assert!(fv.is_err()) 186 | }); 187 | 188 | let _ = h1.join().unwrap(); 189 | } 190 | 191 | #[test] 192 | fn test_try_load_unresolved_key_from_multiple_requests() { 193 | let load_fn = LoadFnForEmptyTest; 194 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 195 | 196 | let h1 = thread::spawn(move || { 197 | let r1 = loader.try_load(1337); 198 | let r2 = loader.try_load(1338); 199 | let (f1, f2) = block_on(futures::future::join(r1, r2)); 200 | 201 | assert!(f1.is_err()); 202 | assert!(f2.is_err()); 203 | }); 204 | 205 | let _ = h1.join().unwrap(); 206 | } 207 | 208 | #[test] 209 | fn test_try_load_unresolved_key_from_multiple_requests_beyond_max_batch_size() { 210 | let load_fn = LoadFnForEmptyTest; 211 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(2); 212 | 213 | let l1 = loader.clone(); 214 | let h1 = thread::spawn(move || { 215 | let r1 = l1.try_load(1337); 216 | let r2 = l1.try_load(1338); 217 | let r3 = l1.try_load(1339); 218 | 219 | let (f1, f2, f3) = block_on(futures::future::join3(r1, r2, r3)); 220 | assert!(f1.is_err()); 221 | assert!(f2.is_err()); 222 | assert!(f3.is_err()); 223 | }); 224 | 225 | let _ = h1.join().unwrap(); 226 | } 227 | 228 | #[test] 229 | fn test_load_many() { 230 | let mut i = 0; 231 | while i < 10 { 232 | let load_fn = LoadFnWithHistory { 233 | loaded_keys: Arc::new(Mutex::new(HashSet::new())), 234 | max_batch_loaded: Arc::new(Mutex::new(0)), 235 | }; 236 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 237 | 238 | let r = loader.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 239 | let _fv = block_on(r); 240 | i += 1; 241 | 242 | let max_batch_size = loader.max_batch_size(); 243 | let max_batch_loaded = load_fn.max_batch_loaded.lock().unwrap(); 244 | assert!(*max_batch_loaded > 1); 245 | assert!( 246 | *max_batch_loaded <= max_batch_size, 247 | "max_batch_loaded({}) <= max_batch_size({})", 248 | *max_batch_loaded, 249 | max_batch_size 250 | ); 251 | } 252 | } 253 | -------------------------------------------------------------------------------- /tests/generic.rs: -------------------------------------------------------------------------------- 1 | use dataloader::cached::Loader; 2 | use dataloader::BatchFn; 3 | use futures::executor::block_on; 4 | use std::collections::HashMap; 5 | use std::future::ready; 6 | 7 | #[derive(Debug, Clone, Eq, PartialEq, Hash)] 8 | struct ObjectId(usize); 9 | 10 | trait Model { 11 | async fn load_many(keys: &[ObjectId]) -> HashMap> 12 | where 13 | Self: Sized; 14 | } 15 | 16 | #[derive(Debug, Clone)] 17 | struct MyModel; 18 | 19 | impl Model for MyModel { 20 | async fn load_many(keys: &[ObjectId]) -> HashMap> 21 | where 22 | Self: Sized, 23 | { 24 | let ret = keys.iter().map(|k| (k.clone(), Some(MyModel))).collect(); 25 | ready(ret).await 26 | } 27 | } 28 | 29 | pub struct ModelBatcher; 30 | 31 | impl BatchFn> for ModelBatcher 32 | where 33 | T: Model, 34 | { 35 | async fn load(&mut self, keys: &[ObjectId]) -> HashMap> { 36 | println!("load batch {:?}", keys); 37 | T::load_many(&keys).await 38 | } 39 | } 40 | 41 | #[test] 42 | fn test_generic() { 43 | let loader = Loader::new(ModelBatcher); 44 | let f = loader.load_many(vec![ObjectId(1), ObjectId(3), ObjectId(2)]); 45 | let my_model: HashMap> = block_on(f); 46 | println!("{:?}", my_model); 47 | } 48 | -------------------------------------------------------------------------------- /tests/non_cached.rs: -------------------------------------------------------------------------------- 1 | use dataloader::non_cached::Loader; 2 | use dataloader::BatchFn; 3 | use futures::executor::block_on; 4 | use std::collections::HashMap; 5 | use std::future::ready; 6 | use std::sync::{Arc, Mutex}; 7 | use std::{panic, thread}; 8 | 9 | struct MyLoadFn; 10 | 11 | impl BatchFn for MyLoadFn { 12 | async fn load(&mut self, keys: &[usize]) -> HashMap { 13 | let ret = keys 14 | .iter() 15 | .map(|v| (v.clone(), v.clone())) 16 | .collect::>(); 17 | ready(ret).await 18 | } 19 | } 20 | 21 | #[allow(dead_code)] 22 | #[derive(Clone)] 23 | struct Object(usize); 24 | 25 | impl BatchFn for MyLoadFn { 26 | async fn load(&mut self, keys: &[usize]) -> HashMap { 27 | let ret = keys 28 | .iter() 29 | .map(|v| (v.clone(), Object(v.clone()))) 30 | .collect::>(); 31 | ready(ret).await 32 | } 33 | } 34 | 35 | #[test] 36 | fn assert_kinds() { 37 | fn _assert_send() {} 38 | fn _assert_sync() {} 39 | fn _assert_clone() {} 40 | _assert_send::>(); 41 | _assert_sync::>(); 42 | _assert_clone::>(); 43 | 44 | _assert_send::>(); 45 | _assert_sync::>(); 46 | _assert_clone::>(); 47 | } 48 | 49 | #[derive(Clone)] 50 | struct LoadFnWithHistory { 51 | max_batch_loaded: Arc>, 52 | } 53 | 54 | impl BatchFn for LoadFnWithHistory { 55 | async fn load(&mut self, keys: &[usize]) -> HashMap { 56 | // println!("BatchFn load keys {:?}", keys); 57 | let mut max_batch_loaded = self.max_batch_loaded.lock().unwrap(); 58 | if keys.len() > *max_batch_loaded { 59 | *max_batch_loaded = keys.len(); 60 | } 61 | let ret = keys 62 | .iter() 63 | .map(|v| (v.clone(), v.clone())) 64 | .collect::>(); 65 | ready(ret).await 66 | } 67 | } 68 | 69 | #[derive(Clone)] 70 | struct LoadFnForEmptyTest; 71 | 72 | impl BatchFn for LoadFnForEmptyTest { 73 | async fn load(&mut self, _keys: &[usize]) -> HashMap { 74 | ready(HashMap::new()).await 75 | } 76 | } 77 | 78 | #[test] 79 | fn test_load() { 80 | let mut i = 0; 81 | while i < 1000 { 82 | let load_fn = LoadFnWithHistory { 83 | max_batch_loaded: Arc::new(Mutex::new(0)), 84 | }; 85 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 86 | 87 | let l1 = loader.clone(); 88 | let h1 = thread::spawn(move || { 89 | let r1 = l1.load(1); 90 | let r2 = l1.load(2); 91 | let r3 = l1.load(3); 92 | let r4 = l1.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 93 | let f = futures::future::join4(r1, r2, r3, r4); 94 | let fv = block_on(f); 95 | let (_v1, _v2, _v3, v4) = fv; 96 | let mut v4_keys = v4.keys().cloned().collect::>(); 97 | v4_keys.sort(); 98 | assert_eq!(vec![2, 3, 4, 5, 6, 7, 8], v4_keys); 99 | }); 100 | 101 | let l2 = loader.clone(); 102 | let h2 = thread::spawn(move || { 103 | let r1 = l2.load(1); 104 | let r2 = l2.load(2); 105 | let r3 = l2.load(3); 106 | let r4 = l2.load(4); 107 | let f = futures::future::join4(r1, r2, r3, r4); 108 | let _fv = block_on(f); 109 | }); 110 | 111 | let l3 = loader.clone(); 112 | let h3 = thread::spawn(move || { 113 | let r1 = l3.load_many(vec![12, 13, 14, 1, 2, 3, 4]); 114 | let r2 = l3.load_many(vec![1, 2, 3, 4, 5, 6, 7]); 115 | let r3 = l3.load_many(vec![9, 10, 11, 12, 13, 14]); 116 | let f = futures::future::join3(r1, r2, r3); 117 | let fv = block_on(f); 118 | 119 | let (v1, v2, v3) = fv; 120 | let mut v1_keys = v1.keys().cloned().collect::>(); 121 | v1_keys.sort(); 122 | assert_eq!(vec![1, 2, 3, 4, 12, 13, 14], v1_keys); 123 | 124 | let mut v2_keys = v2.keys().cloned().collect::>(); 125 | v2_keys.sort(); 126 | assert_eq!(vec![1, 2, 3, 4, 5, 6, 7], v2_keys); 127 | 128 | let mut v3_keys = v3.keys().cloned().collect::>(); 129 | v3_keys.sort(); 130 | assert_eq!(vec![9, 10, 11, 12, 13, 14], v3_keys); 131 | }); 132 | 133 | h1.join().unwrap(); 134 | h2.join().unwrap(); 135 | h3.join().unwrap(); 136 | i += 1; 137 | 138 | let max_batch_size = loader.max_batch_size(); 139 | let max_batch_loaded = load_fn.max_batch_loaded.lock().unwrap(); 140 | assert!(*max_batch_loaded > 1); 141 | assert!( 142 | *max_batch_loaded <= max_batch_size, 143 | "max_batch_loaded({}) <= max_batch_size({})", 144 | *max_batch_loaded, 145 | max_batch_size 146 | ); 147 | } 148 | } 149 | 150 | #[test] 151 | #[should_panic(expected = "could not lookup result for given key: 1337")] 152 | fn test_load_unresolved_key() { 153 | let load_fn = LoadFnForEmptyTest; 154 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 155 | 156 | let h1 = thread::spawn(move || { 157 | let r1 = loader.load(1337); 158 | block_on(r1); 159 | }); 160 | 161 | let _ = h1.join().map_err(|e| panic::resume_unwind(e)); 162 | } 163 | 164 | #[test] 165 | fn test_load_safe_unresolved_key() { 166 | let load_fn = LoadFnForEmptyTest; 167 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 168 | 169 | let h1 = thread::spawn(move || { 170 | let r1 = loader.try_load(1337); 171 | let fv = block_on(r1); 172 | 173 | assert!(fv.is_err()) 174 | }); 175 | 176 | let _ = h1.join().unwrap(); 177 | } 178 | 179 | #[test] 180 | fn test_try_load_unresolved_key_multiple_requests() { 181 | let load_fn = LoadFnForEmptyTest; 182 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 183 | 184 | let l1 = loader.clone(); 185 | let h1 = thread::spawn(move || { 186 | let r1 = l1.try_load(1337); 187 | let r2 = l1.try_load(1338); 188 | 189 | let (f1, f2) = block_on(futures::future::join(r1, r2)); 190 | assert!(f1.is_err()); 191 | assert!(f2.is_err()); 192 | }); 193 | 194 | let _ = h1.join().unwrap(); 195 | } 196 | 197 | #[test] 198 | fn test_try_load_unresolved_key_multiple_requests_beyond_max_batch_size() { 199 | let load_fn = LoadFnForEmptyTest; 200 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(2); 201 | 202 | let l1 = loader.clone(); 203 | let h1 = thread::spawn(move || { 204 | let r1 = l1.try_load(1337); 205 | let r2 = l1.try_load(1338); 206 | let r3 = l1.try_load(1339); 207 | 208 | let (f1, f2, f3) = block_on(futures::future::join3(r1, r2, r3)); 209 | assert!(f1.is_err()); 210 | assert!(f2.is_err()); 211 | assert!(f3.is_err()); 212 | }); 213 | 214 | let _ = h1.join().unwrap(); 215 | } 216 | 217 | #[test] 218 | fn test_load_many() { 219 | let mut i = 0; 220 | while i < 10 { 221 | let load_fn = LoadFnWithHistory { 222 | max_batch_loaded: Arc::new(Mutex::new(0)), 223 | }; 224 | let loader = Loader::new(load_fn.clone()).with_max_batch_size(4); 225 | let r = loader.load_many(vec![2, 3, 4, 5, 6, 7, 8]); 226 | let _fv = block_on(r); 227 | i += 1; 228 | 229 | let max_batch_size = loader.max_batch_size(); 230 | let max_batch_loaded = load_fn.max_batch_loaded.lock().unwrap(); 231 | assert!(*max_batch_loaded > 1); 232 | assert!( 233 | *max_batch_loaded <= max_batch_size, 234 | "max_batch_loaded({}) <= max_batch_size({})", 235 | *max_batch_loaded, 236 | max_batch_size 237 | ); 238 | } 239 | } 240 | --------------------------------------------------------------------------------