├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── doc └── 16-pages-resources-buffered.svg └── examples ├── 01-pages-hello-ok.rs ├── 02-pages-stream-1-fail.rs ├── 03-pages-stream-2-fail.rs ├── 04-pages-stream-3-fail.rs ├── 05-pages-stream-4-ok.rs ├── 06-pages-stream-5-fail.rs ├── 07-pages-stream-6-fail.rs ├── 08-pages-stream-7-ok.rs ├── 09-pages-buffered-1-fail.rs ├── 10-pages-buffered-2-ok.rs ├── 11-pages-take-then-buffered-ok.rs ├── 12-pages-buffer_unordered-then-take-ok.rs ├── 13-pages-take-then-buffer_unordered-ok.rs ├── 14-pages-ids-ok.rs ├── 15-pages-ids-n-items-ok.rs ├── 16-pages-resources-ok.rs ├── 17-pages-resources-expanded-ok.rs ├── 18-pages-resources-map-insteadof-then-fail.rs ├── 19-pages-resources-map-insteadof-then-expanded-fail.rs ├── 20-pages-resources-then-insteadof-map-fail.rs ├── 21-pages-resources-then-insteadof-map-expanded-fail.rs ├── 22-ui-hello-1-fail.rs ├── 23-ui-hello-2-ok.rs ├── 24-ui-get_data-ok.rs ├── 25-ui-buffered-1-fail.rs ├── 26-ui-buffered-2-ok.rs ├── 27-ui-no-cancel-1-ok.rs ├── 28-ui-no-cancel-2-fail.rs ├── 29-ui-cancel-buffered-1-fail.rs ├── 30-ui-cancel-buffered-2-ok.rs ├── 31-ui-cancel-buffer_unordered-ok.rs ├── 32-ui-cancel-buffered-3-fail.rs ├── 33-ui-cancel-combinator-1-fail.rs ├── 34-ui-cancel-combinator-2-fail.rs ├── 35-ui-cancel-combinator-3-fail.rs ├── 36-ui-cancel-combinator-4-ok.rs ├── 37-ui-cancel-buffered-4-ok.rs └── plot.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-examples" 3 | version = "0.1.0" 4 | authors = ["G. Endignoux "] 5 | edition = "2018" 6 | 7 | [dependencies] 8 | futures = "0.3.13" 9 | lazy_static = "1.4.0" 10 | rand = "0.8.3" 11 | tokio = { version = "1.4.0", features = ["macros", "rt-multi-thread", "time"] } 12 | 13 | # To plot the results 14 | plotters = "0.3.0" 15 | regex = "1" 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Guillaume Endignoux 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Rust asynchronous examples 2 | 3 | This repository contains examples of `async` programming in Rust, with the [tokio](https://crates.io/crates/tokio) and [futures](https://crates.io/crates/futures) libraries. 4 | They show how to compose [streams](https://docs.rs/futures/0.3.13/futures/stream/trait.Stream.html), in particular buffering and cancellation, to illustrate the following series of blog posts. 5 | 6 | - [Asynchronous streams in Rust (part 1) - Futures, buffering and mysterious compilation error messages](https://gendignoux.com/blog/2021/04/01/rust-async-streams-futures-part1.html) 7 | - [Asynchronous streams in Rust (part 2) - Cancelling expired requests](https://gendignoux.com/blog/2021/04/08/rust-async-streams-futures-part2.html) 8 | 9 | You can run each example with `cargo run --example `, and I've tested them with Rust 1.50.0. 10 | 11 | ``` 12 | $ cargo --version 13 | cargo 1.50.0 (f04e7fab7 2021-02-04) 14 | ``` 15 | 16 | Examples files with the `.ok.rs` extension should work, whereas those finishing by `.fail.rs` illustrate some compilation error. 17 | 18 | The `plot` example takes the output of other examples and displays a timeline of the asynchronous requests, exported in SVG format. 19 | It is built with the [regex](https://crates.io/crates/regex) and [plotters](https://crates.io/crates/plotters) libraries. 20 | 21 | ![Plot example](doc/16-pages-resources-buffered.svg) 22 | -------------------------------------------------------------------------------- /doc/16-pages-resources-buffered.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | get_page(0) 5 | 6 | 7 | 8 | get_page(1) 9 | 10 | 11 | 12 | get_page(2) 13 | 14 | 15 | 16 | fetch_resource(0) 17 | 18 | 19 | 20 | fetch_resource(1) 21 | 22 | 23 | 24 | fetch_resource(2) 25 | 26 | 27 | 28 | fetch_resource(3) 29 | 30 | 31 | 32 | fetch_resource(4) 33 | 34 | 35 | 36 | fetch_resource(10) 37 | 38 | 39 | 40 | fetch_resource(11) 41 | 42 | 43 | 44 | fetch_resource(12) 45 | 46 | 47 | 48 | fetch_resource(13) 49 | 50 | 51 | 52 | fetch_resource(14) 53 | 54 | 55 | 56 | fetch_resource(20) 57 | 58 | 59 | 60 | fetch_resource(21) 61 | 62 | 63 | 64 | fetch_resource(22) 65 | 66 | 67 | 68 | get_page(3) 69 | 70 | 71 | 72 | get_page(4) 73 | 74 | 75 | 76 | fetch_resource(23) 77 | 78 | 79 | 80 | fetch_resource(24) 81 | 82 | 83 | 84 | fetch_resource(30) 85 | 86 | 87 | 88 | fetch_resource(31) 89 | 90 | 91 | 92 | fetch_resource(32) 93 | 94 | 95 | 96 | fetch_resource(33) 97 | 98 | 99 | 100 | fetch_resource(34) 101 | 102 | 103 | 104 | fetch_resource(40) 105 | 106 | 107 | 108 | fetch_resource(41) 109 | 110 | 111 | 112 | fetch_resource(42) 113 | 114 | 115 | 116 | fetch_resource(43) 117 | 118 | 119 | 120 | fetch_resource(44) 121 | 122 | 123 | -------------------------------------------------------------------------------- /examples/01-pages-hello-ok.rs: -------------------------------------------------------------------------------- 1 | use lazy_static::lazy_static; 2 | use rand::distributions::{Distribution, Uniform}; 3 | use std::time::Duration; 4 | use tokio::time::{sleep, Instant}; 5 | 6 | lazy_static! { 7 | static ref START_TIME: Instant = Instant::now(); 8 | } 9 | 10 | #[tokio::main] 11 | async fn main() { 12 | let page = get_page(42).await; 13 | println!("Page #42: {:?}", page); 14 | } 15 | 16 | async fn get_page(i: usize) -> Vec { 17 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 18 | println!( 19 | "[{}] # get_page({}) will complete in {} ms", 20 | START_TIME.elapsed().as_millis(), 21 | i, 22 | millis 23 | ); 24 | 25 | sleep(Duration::from_millis(millis)).await; 26 | println!( 27 | "[{}] # get_page({}) completed", 28 | START_TIME.elapsed().as_millis(), 29 | i 30 | ); 31 | 32 | (10 * i..10 * (i + 1)).collect() 33 | } 34 | -------------------------------------------------------------------------------- /examples/02-pages-stream-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).map(|i| get_page(i)) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/03-pages-stream-2-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).map(|i| async { get_page(i) }) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/04-pages-stream-3-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).map(|i| async { get_page(i).await }) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/05-pages-stream-4-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).then(|i| get_page(i)) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/06-pages-stream-5-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).then(|i| async { get_page(i) }) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/07-pages-stream-6-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).then(|i| async { get_page(i).await }) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/08-pages-stream-7-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!("First 10 pages:\n{:?}", get_n_pages(10).await); 14 | } 15 | 16 | async fn get_n_pages(n: usize) -> Vec> { 17 | get_pages().take(n).collect().await 18 | } 19 | 20 | fn get_pages() -> impl Stream> { 21 | stream::iter(0..).then(|i| async move { get_page(i).await }) 22 | } 23 | 24 | async fn get_page(i: usize) -> Vec { 25 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 26 | println!( 27 | "[{}] # get_page({}) will complete in {} ms", 28 | START_TIME.elapsed().as_millis(), 29 | i, 30 | millis 31 | ); 32 | 33 | sleep(Duration::from_millis(millis)).await; 34 | println!( 35 | "[{}] # get_page({}) completed", 36 | START_TIME.elapsed().as_millis(), 37 | i 38 | ); 39 | 40 | (10 * i..10 * (i + 1)).collect() 41 | } 42 | -------------------------------------------------------------------------------- /examples/09-pages-buffered-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "First 10 pages, buffered by 5:\n{:?}", 15 | get_n_pages_buffered(10, 5).await 16 | ); 17 | } 18 | 19 | async fn get_n_pages_buffered(n: usize, buf_factor: usize) -> Vec> { 20 | get_pages_buffered(buf_factor).take(n).collect().await 21 | } 22 | 23 | fn get_pages_buffered(buf_factor: usize) -> impl Stream> { 24 | get_pages().buffered(buf_factor) 25 | } 26 | 27 | fn get_pages() -> impl Stream> { 28 | stream::iter(0..).then(|i| get_page(i)) 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * (i + 1)).collect() 48 | } 49 | -------------------------------------------------------------------------------- /examples/10-pages-buffered-2-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "First 10 pages, buffered by 5:\n{:?}", 15 | get_n_pages_buffered(10, 5).await 16 | ); 17 | } 18 | 19 | async fn get_n_pages_buffered(n: usize, buf_factor: usize) -> Vec> { 20 | get_pages_buffered(buf_factor).take(n).collect().await 21 | } 22 | 23 | fn get_pages_buffered(buf_factor: usize) -> impl Stream> { 24 | get_pages_futures().buffered(buf_factor) 25 | } 26 | 27 | fn get_pages_futures() -> impl Stream>> { 28 | stream::iter(0..).map(|i| get_page(i)) 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * (i + 1)).collect() 48 | } 49 | -------------------------------------------------------------------------------- /examples/11-pages-take-then-buffered-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "First 10 pages, buffered by 5:\n{:?}", 15 | get_n_pages_buffered(10, 5).await 16 | ); 17 | } 18 | 19 | async fn get_n_pages_buffered(n: usize, buf_factor: usize) -> Vec> { 20 | get_pages_futures() 21 | .take(n) 22 | .buffered(buf_factor) 23 | .collect() 24 | .await 25 | } 26 | 27 | fn get_pages_futures() -> impl Stream>> { 28 | stream::iter(0..).map(|i| get_page(i)) 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * (i + 1)).collect() 48 | } 49 | -------------------------------------------------------------------------------- /examples/12-pages-buffer_unordered-then-take-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "First 10 pages, buffer-unordered by 5:\n{:?}", 15 | get_n_pages_buffer_unordered(10, 5).await 16 | ); 17 | } 18 | 19 | async fn get_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> Vec> { 20 | get_pages_futures() 21 | .buffer_unordered(buf_factor) 22 | .take(n) 23 | .collect() 24 | .await 25 | } 26 | 27 | fn get_pages_futures() -> impl Stream>> { 28 | stream::iter(0..).map(|i| get_page(i)) 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * (i + 1)).collect() 48 | } 49 | -------------------------------------------------------------------------------- /examples/13-pages-take-then-buffer_unordered-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "First 10 pages, buffer-unordered by 5:\n{:?}", 15 | get_n_pages_buffer_unordered(10, 5).await 16 | ); 17 | } 18 | 19 | async fn get_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> Vec> { 20 | get_pages_futures() 21 | .take(n) 22 | .buffer_unordered(buf_factor) 23 | .collect() 24 | .await 25 | } 26 | 27 | fn get_pages_futures() -> impl Stream>> { 28 | stream::iter(0..).map(|i| get_page(i)) 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * (i + 1)).collect() 48 | } 49 | -------------------------------------------------------------------------------- /examples/14-pages-ids-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "IDs from first 5 pages:\n{:?}", 15 | get_ids_n_pages(5).collect::>().await 16 | ); 17 | println!( 18 | "IDs from first 5 pages, buffered by 3:\n{:?}", 19 | get_ids_n_pages_buffered(5, 3).collect::>().await 20 | ); 21 | println!( 22 | "IDs from first 5 pages, buffer-unordered by 3:\n{:?}", 23 | get_ids_n_pages_buffer_unordered(5, 3) 24 | .collect::>() 25 | .await 26 | ); 27 | } 28 | 29 | fn get_ids_n_pages(n: usize) -> impl Stream { 30 | get_pages().take(n).flat_map(|page| stream::iter(page)) 31 | } 32 | 33 | fn get_ids_n_pages_buffered(n: usize, buf_factor: usize) -> impl Stream { 34 | get_pages_futures() 35 | .take(n) 36 | .buffered(buf_factor) 37 | .flat_map(|page| stream::iter(page)) 38 | } 39 | 40 | fn get_ids_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> impl Stream { 41 | get_pages_futures() 42 | .take(n) 43 | .buffer_unordered(buf_factor) 44 | .flat_map(|page| stream::iter(page)) 45 | } 46 | 47 | fn get_pages() -> impl Stream> { 48 | stream::iter(0..).then(|i| get_page(i)) 49 | } 50 | 51 | fn get_pages_futures() -> impl Stream>> { 52 | stream::iter(0..).map(|i| get_page(i)) 53 | } 54 | 55 | async fn get_page(i: usize) -> Vec { 56 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 57 | println!( 58 | "[{}] # get_page({}) will complete in {} ms", 59 | START_TIME.elapsed().as_millis(), 60 | i, 61 | millis 62 | ); 63 | 64 | sleep(Duration::from_millis(millis)).await; 65 | println!( 66 | "[{}] # get_page({}) completed", 67 | START_TIME.elapsed().as_millis(), 68 | i 69 | ); 70 | 71 | (10 * i..10 * i + 5).collect() 72 | } 73 | -------------------------------------------------------------------------------- /examples/15-pages-ids-n-items-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "IDs from first 25 items, buffered by 3 pages:\n{:?}", 15 | get_ids_n_items_buffered(25, 3).collect::>().await 16 | ); 17 | } 18 | 19 | fn get_ids_n_items_buffered(n: usize, buf_factor: usize) -> impl Stream { 20 | get_pages_futures() 21 | .buffered(buf_factor) 22 | .flat_map(|page| stream::iter(page)) 23 | .take(n) 24 | } 25 | 26 | fn get_pages_futures() -> impl Stream>> { 27 | stream::iter(0..).map(|i| get_page(i)) 28 | } 29 | 30 | async fn get_page(i: usize) -> Vec { 31 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 32 | println!( 33 | "[{}] # get_page({}) will complete in {} ms", 34 | START_TIME.elapsed().as_millis(), 35 | i, 36 | millis 37 | ); 38 | 39 | sleep(Duration::from_millis(millis)).await; 40 | println!( 41 | "[{}] # get_page({}) completed", 42 | START_TIME.elapsed().as_millis(), 43 | i 44 | ); 45 | 46 | (10 * i..10 * i + 5).collect() 47 | } 48 | -------------------------------------------------------------------------------- /examples/16-pages-resources-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages:\n{:?}", 15 | collect_resources_n_pages(5).await 16 | ); 17 | println!( 18 | "Resources from first 5 pages, buffered by 3:\n{:?}", 19 | collect_resources_n_pages_buffered(5, 3).await 20 | ); 21 | println!( 22 | "Resources from first 5 pages, buffer-unordered by 3:\n{:?}", 23 | collect_resources_n_pages_buffer_unordered(5, 3).await 24 | ); 25 | } 26 | 27 | async fn collect_resources_n_pages(n: usize) -> Vec { 28 | get_ids_n_pages(n) 29 | .then(|id| fetch_resource(id)) 30 | .collect() 31 | .await 32 | } 33 | 34 | async fn collect_resources_n_pages_buffered(n: usize, buf_factor: usize) -> Vec { 35 | get_ids_n_pages_buffered(n, buf_factor) 36 | .map(|id| fetch_resource(id)) 37 | .buffered(buf_factor) 38 | .collect() 39 | .await 40 | } 41 | 42 | async fn collect_resources_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> Vec { 43 | get_ids_n_pages_buffer_unordered(n, buf_factor) 44 | .map(|id| fetch_resource(id)) 45 | .buffer_unordered(buf_factor) 46 | .collect() 47 | .await 48 | } 49 | 50 | fn get_ids_n_pages(n: usize) -> impl Stream { 51 | get_pages().take(n).flat_map(|page| stream::iter(page)) 52 | } 53 | 54 | fn get_ids_n_pages_buffered(n: usize, buf_factor: usize) -> impl Stream { 55 | get_pages_futures() 56 | .take(n) 57 | .buffered(buf_factor) 58 | .flat_map(|page| stream::iter(page)) 59 | } 60 | 61 | fn get_ids_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> impl Stream { 62 | get_pages_futures() 63 | .take(n) 64 | .buffer_unordered(buf_factor) 65 | .flat_map(|page| stream::iter(page)) 66 | } 67 | 68 | fn get_pages() -> impl Stream> { 69 | stream::iter(0..).then(|i| get_page(i)) 70 | } 71 | 72 | fn get_pages_futures() -> impl Stream>> { 73 | stream::iter(0..).map(|i| get_page(i)) 74 | } 75 | 76 | async fn get_page(i: usize) -> Vec { 77 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 78 | println!( 79 | "[{}] # get_page({}) will complete in {} ms", 80 | START_TIME.elapsed().as_millis(), 81 | i, 82 | millis 83 | ); 84 | 85 | sleep(Duration::from_millis(millis)).await; 86 | println!( 87 | "[{}] # get_page({}) completed", 88 | START_TIME.elapsed().as_millis(), 89 | i 90 | ); 91 | 92 | (10 * i..10 * i + 5).collect() 93 | } 94 | 95 | #[derive(Clone, Copy)] 96 | struct Resource(usize); 97 | 98 | impl std::fmt::Debug for Resource { 99 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 100 | f.write_fmt(format_args!("r:{}", self.0)) 101 | } 102 | } 103 | 104 | async fn fetch_resource(i: usize) -> Resource { 105 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 106 | println!( 107 | "[{}] ## fetch_resource({}) will complete in {} ms", 108 | START_TIME.elapsed().as_millis(), 109 | i, 110 | millis 111 | ); 112 | 113 | sleep(Duration::from_millis(millis)).await; 114 | println!( 115 | "[{}] ## fetch_resource({}) completed", 116 | START_TIME.elapsed().as_millis(), 117 | i 118 | ); 119 | Resource(i) 120 | } 121 | -------------------------------------------------------------------------------- /examples/17-pages-resources-expanded-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages:\n{:?}", 15 | collect_resources_n_pages(5).await 16 | ); 17 | println!( 18 | "Resources from first 5 pages, buffered by 3:\n{:?}", 19 | collect_resources_n_pages_buffered(5, 3).await 20 | ); 21 | println!( 22 | "Resources from first 5 pages, buffer-unordered by 3:\n{:?}", 23 | collect_resources_n_pages_buffer_unordered(5, 3).await 24 | ); 25 | } 26 | 27 | async fn collect_resources_n_pages(n: usize) -> Vec { 28 | stream::iter(0..) 29 | .then(|i| get_page(i)) 30 | .take(n) 31 | .flat_map(|page| stream::iter(page)) 32 | .then(|id| fetch_resource(id)) 33 | .collect() 34 | .await 35 | } 36 | 37 | async fn collect_resources_n_pages_buffered(n: usize, buf_factor: usize) -> Vec { 38 | stream::iter(0..) 39 | .map(|i| get_page(i)) 40 | .take(n) 41 | .buffered(buf_factor) 42 | .flat_map(|page| stream::iter(page)) 43 | .map(|id| fetch_resource(id)) 44 | .buffered(buf_factor) 45 | .collect() 46 | .await 47 | } 48 | 49 | async fn collect_resources_n_pages_buffer_unordered(n: usize, buf_factor: usize) -> Vec { 50 | stream::iter(0..) 51 | .map(|i| get_page(i)) 52 | .take(n) 53 | .buffer_unordered(buf_factor) 54 | .flat_map(|page| stream::iter(page)) 55 | .map(|id| fetch_resource(id)) 56 | .buffer_unordered(buf_factor) 57 | .collect() 58 | .await 59 | } 60 | 61 | async fn get_page(i: usize) -> Vec { 62 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 63 | println!( 64 | "[{}] # get_page({}) will complete in {} ms", 65 | START_TIME.elapsed().as_millis(), 66 | i, 67 | millis 68 | ); 69 | 70 | sleep(Duration::from_millis(millis)).await; 71 | println!( 72 | "[{}] # get_page({}) completed", 73 | START_TIME.elapsed().as_millis(), 74 | i 75 | ); 76 | 77 | (10 * i..10 * i + 5).collect() 78 | } 79 | 80 | #[derive(Clone, Copy)] 81 | struct Resource(usize); 82 | 83 | impl std::fmt::Debug for Resource { 84 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 85 | f.write_fmt(format_args!("r:{}", self.0)) 86 | } 87 | } 88 | 89 | async fn fetch_resource(i: usize) -> Resource { 90 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 91 | println!( 92 | "[{}] ## fetch_resource({}) will complete in {} ms", 93 | START_TIME.elapsed().as_millis(), 94 | i, 95 | millis 96 | ); 97 | 98 | sleep(Duration::from_millis(millis)).await; 99 | println!( 100 | "[{}] ## fetch_resource({}) completed", 101 | START_TIME.elapsed().as_millis(), 102 | i 103 | ); 104 | Resource(i) 105 | } 106 | -------------------------------------------------------------------------------- /examples/18-pages-resources-map-insteadof-then-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages:\n{:?}", 15 | collect_resources_n_pages(5).await 16 | ); 17 | } 18 | 19 | async fn collect_resources_n_pages(n: usize) -> Vec { 20 | get_ids_n_pages(n) 21 | .map(|id| fetch_resource(id)) 22 | .collect() 23 | .await 24 | } 25 | 26 | fn get_ids_n_pages(n: usize) -> impl Stream { 27 | get_pages().take(n).flat_map(|page| stream::iter(page)) 28 | } 29 | 30 | fn get_pages() -> impl Stream> { 31 | stream::iter(0..).then(|i| get_page(i)) 32 | } 33 | 34 | async fn get_page(i: usize) -> Vec { 35 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 36 | println!( 37 | "[{}] # get_page({}) will complete in {} ms", 38 | START_TIME.elapsed().as_millis(), 39 | i, 40 | millis 41 | ); 42 | 43 | sleep(Duration::from_millis(millis)).await; 44 | println!( 45 | "[{}] # get_page({}) completed", 46 | START_TIME.elapsed().as_millis(), 47 | i 48 | ); 49 | 50 | (10 * i..10 * i + 5).collect() 51 | } 52 | 53 | #[derive(Clone, Copy)] 54 | struct Resource(usize); 55 | 56 | impl std::fmt::Debug for Resource { 57 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 58 | f.write_fmt(format_args!("r:{}", self.0)) 59 | } 60 | } 61 | 62 | async fn fetch_resource(i: usize) -> Resource { 63 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 64 | println!( 65 | "[{}] ## fetch_resource({}) will complete in {} ms", 66 | START_TIME.elapsed().as_millis(), 67 | i, 68 | millis 69 | ); 70 | 71 | sleep(Duration::from_millis(millis)).await; 72 | println!( 73 | "[{}] ## fetch_resource({}) completed", 74 | START_TIME.elapsed().as_millis(), 75 | i 76 | ); 77 | Resource(i) 78 | } 79 | -------------------------------------------------------------------------------- /examples/19-pages-resources-map-insteadof-then-expanded-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages:\n{:?}", 15 | collect_resources_n_pages(5).await 16 | ); 17 | } 18 | 19 | async fn collect_resources_n_pages(n: usize) -> Vec { 20 | stream::iter(0..) 21 | .then(|i| get_page(i)) 22 | .take(n) 23 | .flat_map(|page| stream::iter(page)) 24 | .map(|id| fetch_resource(id)) 25 | .collect() 26 | .await 27 | } 28 | 29 | async fn get_page(i: usize) -> Vec { 30 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 31 | println!( 32 | "[{}] # get_page({}) will complete in {} ms", 33 | START_TIME.elapsed().as_millis(), 34 | i, 35 | millis 36 | ); 37 | 38 | sleep(Duration::from_millis(millis)).await; 39 | println!( 40 | "[{}] # get_page({}) completed", 41 | START_TIME.elapsed().as_millis(), 42 | i 43 | ); 44 | 45 | (10 * i..10 * i + 5).collect() 46 | } 47 | 48 | #[derive(Clone, Copy)] 49 | struct Resource(usize); 50 | 51 | impl std::fmt::Debug for Resource { 52 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 53 | f.write_fmt(format_args!("r:{}", self.0)) 54 | } 55 | } 56 | 57 | async fn fetch_resource(i: usize) -> Resource { 58 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 59 | println!( 60 | "[{}] ## fetch_resource({}) will complete in {} ms", 61 | START_TIME.elapsed().as_millis(), 62 | i, 63 | millis 64 | ); 65 | 66 | sleep(Duration::from_millis(millis)).await; 67 | println!( 68 | "[{}] ## fetch_resource({}) completed", 69 | START_TIME.elapsed().as_millis(), 70 | i 71 | ); 72 | Resource(i) 73 | } 74 | -------------------------------------------------------------------------------- /examples/20-pages-resources-then-insteadof-map-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, Future, Stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages, buffered by 3:\n{:?}", 15 | collect_resources_n_pages_buffered(5, 3).await 16 | ); 17 | } 18 | 19 | async fn collect_resources_n_pages_buffered(n: usize, buf_factor: usize) -> Vec { 20 | get_ids_n_pages_buffered(n, buf_factor) 21 | .then(|id| fetch_resource(id)) 22 | .buffered(buf_factor) 23 | .collect() 24 | .await 25 | } 26 | 27 | fn get_ids_n_pages_buffered(n: usize, buf_factor: usize) -> impl Stream { 28 | get_pages_futures() 29 | .take(n) 30 | .buffered(buf_factor) 31 | .flat_map(|page| stream::iter(page)) 32 | } 33 | 34 | fn get_pages_futures() -> impl Stream>> { 35 | stream::iter(0..).map(|i| get_page(i)) 36 | } 37 | 38 | async fn get_page(i: usize) -> Vec { 39 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 40 | println!( 41 | "[{}] # get_page({}) will complete in {} ms", 42 | START_TIME.elapsed().as_millis(), 43 | i, 44 | millis 45 | ); 46 | 47 | sleep(Duration::from_millis(millis)).await; 48 | println!( 49 | "[{}] # get_page({}) completed", 50 | START_TIME.elapsed().as_millis(), 51 | i 52 | ); 53 | 54 | (10 * i..10 * i + 5).collect() 55 | } 56 | 57 | #[derive(Clone, Copy)] 58 | struct Resource(usize); 59 | 60 | impl std::fmt::Debug for Resource { 61 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 62 | f.write_fmt(format_args!("r:{}", self.0)) 63 | } 64 | } 65 | 66 | async fn fetch_resource(i: usize) -> Resource { 67 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 68 | println!( 69 | "[{}] ## fetch_resource({}) will complete in {} ms", 70 | START_TIME.elapsed().as_millis(), 71 | i, 72 | millis 73 | ); 74 | 75 | sleep(Duration::from_millis(millis)).await; 76 | println!( 77 | "[{}] ## fetch_resource({}) completed", 78 | START_TIME.elapsed().as_millis(), 79 | i 80 | ); 81 | Resource(i) 82 | } 83 | -------------------------------------------------------------------------------- /examples/21-pages-resources-then-insteadof-map-expanded-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::{stream, StreamExt}; 2 | use lazy_static::lazy_static; 3 | use rand::distributions::{Distribution, Uniform}; 4 | use std::time::Duration; 5 | use tokio::time::{sleep, Instant}; 6 | 7 | lazy_static! { 8 | static ref START_TIME: Instant = Instant::now(); 9 | } 10 | 11 | #[tokio::main] 12 | async fn main() { 13 | println!( 14 | "Resources from first 5 pages, buffered by 3:\n{:?}", 15 | collect_resources_n_pages_buffered(5, 3).await 16 | ); 17 | } 18 | 19 | async fn collect_resources_n_pages_buffered(n: usize, buf_factor: usize) -> Vec { 20 | stream::iter(0..) 21 | .map(|i| get_page(i)) 22 | .take(n) 23 | .buffered(buf_factor) 24 | .flat_map(|page| stream::iter(page)) 25 | .then(|id| fetch_resource(id)) 26 | .buffered(buf_factor) 27 | .collect() 28 | .await 29 | } 30 | 31 | async fn get_page(i: usize) -> Vec { 32 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 33 | println!( 34 | "[{}] # get_page({}) will complete in {} ms", 35 | START_TIME.elapsed().as_millis(), 36 | i, 37 | millis 38 | ); 39 | 40 | sleep(Duration::from_millis(millis)).await; 41 | println!( 42 | "[{}] # get_page({}) completed", 43 | START_TIME.elapsed().as_millis(), 44 | i 45 | ); 46 | 47 | (10 * i..10 * i + 5).collect() 48 | } 49 | 50 | #[derive(Clone, Copy)] 51 | struct Resource(usize); 52 | 53 | impl std::fmt::Debug for Resource { 54 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 55 | f.write_fmt(format_args!("r:{}", self.0)) 56 | } 57 | } 58 | 59 | async fn fetch_resource(i: usize) -> Resource { 60 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 61 | println!( 62 | "[{}] ## fetch_resource({}) will complete in {} ms", 63 | START_TIME.elapsed().as_millis(), 64 | i, 65 | millis 66 | ); 67 | 68 | sleep(Duration::from_millis(millis)).await; 69 | println!( 70 | "[{}] ## fetch_resource({}) completed", 71 | START_TIME.elapsed().as_millis(), 72 | i 73 | ); 74 | Resource(i) 75 | } 76 | -------------------------------------------------------------------------------- /examples/22-ui-hello-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use tokio::spawn; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | println!("Send 10 queries"); 9 | send_receive(10).await?; 10 | Ok(()) 11 | } 12 | 13 | async fn send_receive(n: usize) -> Result<(), Box> { 14 | let (tx, rx) = unbounded(); 15 | 16 | let send = spawn(async move { 17 | send_task(tx, n).await; 18 | }); 19 | 20 | let receive = spawn(async move { 21 | receive_task(rx).await; 22 | }); 23 | 24 | let (send_res, receive_res) = join!(send, receive); 25 | send_res?; 26 | receive_res?; 27 | Ok(()) 28 | } 29 | 30 | async fn send_task(tx: UnboundedSender, n: usize) { 31 | for i in 0..n { 32 | tx.unbounded_send(i).unwrap(); 33 | } 34 | } 35 | 36 | async fn receive_task(rx: UnboundedReceiver) { 37 | rx.for_each(|i| println!("# query({})", i)).await; 38 | } 39 | -------------------------------------------------------------------------------- /examples/23-ui-hello-2-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use tokio::spawn; 5 | 6 | #[tokio::main] 7 | async fn main() -> Result<(), Box> { 8 | println!("Send 10 queries"); 9 | send_receive(10).await?; 10 | Ok(()) 11 | } 12 | 13 | async fn send_receive(n: usize) -> Result<(), Box> { 14 | let (tx, rx) = unbounded(); 15 | 16 | let send = spawn(async move { 17 | send_task(tx, n).await; 18 | }); 19 | 20 | let receive = spawn(async move { 21 | receive_task(rx).await; 22 | }); 23 | 24 | let (send_res, receive_res) = join!(send, receive); 25 | send_res?; 26 | receive_res?; 27 | Ok(()) 28 | } 29 | 30 | async fn send_task(tx: UnboundedSender, n: usize) { 31 | for i in 0..n { 32 | tx.unbounded_send(i).unwrap(); 33 | } 34 | } 35 | 36 | async fn receive_task(rx: UnboundedReceiver) { 37 | rx.for_each(|i| async move { println!("# query({})", i) }) 38 | .await; 39 | } 40 | -------------------------------------------------------------------------------- /examples/24-ui-get_data-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::time::Duration; 7 | use tokio::spawn; 8 | use tokio::time::{sleep, Instant}; 9 | 10 | lazy_static! { 11 | static ref START_TIME: Instant = Instant::now(); 12 | } 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<(), Box> { 16 | println!("Resolve 10 queries"); 17 | send_receive_queries(10).await?; 18 | Ok(()) 19 | } 20 | 21 | async fn send_receive_queries(n: usize) -> Result<(), Box> { 22 | let (tx, rx) = unbounded(); 23 | 24 | let send = spawn(async move { 25 | send_task(tx, n).await; 26 | }); 27 | 28 | let receive = spawn(async move { 29 | receive_task_queries(rx).await; 30 | }); 31 | 32 | let (send_res, receive_res) = join!(send, receive); 33 | send_res?; 34 | receive_res?; 35 | Ok(()) 36 | } 37 | 38 | async fn send_task(tx: UnboundedSender, n: usize) { 39 | for i in 0..n { 40 | tx.unbounded_send(i).unwrap(); 41 | } 42 | } 43 | 44 | async fn receive_task_queries(rx: UnboundedReceiver) { 45 | rx.for_each(|i| async move { 46 | let data = get_data(i).await; 47 | println!("## data = {:?}", data); 48 | }) 49 | .await; 50 | } 51 | 52 | #[derive(Clone, Copy)] 53 | struct Data(usize); 54 | 55 | impl std::fmt::Debug for Data { 56 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 57 | f.write_fmt(format_args!("d:{}", self.0)) 58 | } 59 | } 60 | 61 | async fn get_data(i: usize) -> Data { 62 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 63 | println!( 64 | "[{}] ## get_data({}) will complete in {} ms", 65 | START_TIME.elapsed().as_millis(), 66 | i, 67 | millis 68 | ); 69 | 70 | sleep(Duration::from_millis(millis)).await; 71 | println!( 72 | "[{}] ## get_data({}) completed", 73 | START_TIME.elapsed().as_millis(), 74 | i 75 | ); 76 | Data(i) 77 | } 78 | -------------------------------------------------------------------------------- /examples/25-ui-buffered-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::time::Duration; 7 | use tokio::spawn; 8 | use tokio::time::{sleep, Instant}; 9 | 10 | lazy_static! { 11 | static ref START_TIME: Instant = Instant::now(); 12 | } 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<(), Box> { 16 | println!("Resolve 10 queries, buffered by 5"); 17 | send_receive_queries_buffered(10, 5).await?; 18 | Ok(()) 19 | } 20 | 21 | async fn send_receive_queries_buffered( 22 | n: usize, 23 | buf_factor: usize, 24 | ) -> Result<(), Box> { 25 | let (tx, rx) = unbounded(); 26 | 27 | let send = spawn(async move { 28 | send_task(tx, n).await; 29 | }); 30 | 31 | let receive = spawn(async move { 32 | receive_task_queries_buffered(rx, buf_factor).await; 33 | }); 34 | 35 | let (send_res, receive_res) = join!(send, receive); 36 | send_res?; 37 | receive_res?; 38 | Ok(()) 39 | } 40 | 41 | async fn send_task(tx: UnboundedSender, n: usize) { 42 | for i in 0..n { 43 | tx.unbounded_send(i).unwrap(); 44 | } 45 | } 46 | 47 | async fn receive_task_queries_buffered(rx: UnboundedReceiver, buf_factor: usize) { 48 | rx.buffered(buf_factor) 49 | .for_each(|i| async move { 50 | let data = get_data(i).await; 51 | println!("## data = {:?}", data); 52 | }) 53 | .await; 54 | } 55 | 56 | #[derive(Clone, Copy)] 57 | struct Data(usize); 58 | 59 | impl std::fmt::Debug for Data { 60 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 61 | f.write_fmt(format_args!("d:{}", self.0)) 62 | } 63 | } 64 | 65 | async fn get_data(i: usize) -> Data { 66 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 67 | println!( 68 | "[{}] ## get_data({}) will complete in {} ms", 69 | START_TIME.elapsed().as_millis(), 70 | i, 71 | millis 72 | ); 73 | 74 | sleep(Duration::from_millis(millis)).await; 75 | println!( 76 | "[{}] ## get_data({}) completed", 77 | START_TIME.elapsed().as_millis(), 78 | i 79 | ); 80 | Data(i) 81 | } 82 | -------------------------------------------------------------------------------- /examples/26-ui-buffered-2-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::time::Duration; 7 | use tokio::spawn; 8 | use tokio::time::{sleep, Instant}; 9 | 10 | lazy_static! { 11 | static ref START_TIME: Instant = Instant::now(); 12 | } 13 | 14 | #[tokio::main] 15 | async fn main() -> Result<(), Box> { 16 | println!("Resolve 10 queries, buffered by 5"); 17 | send_receive_queries_buffered(10, 5).await?; 18 | Ok(()) 19 | } 20 | 21 | async fn send_receive_queries_buffered( 22 | n: usize, 23 | buf_factor: usize, 24 | ) -> Result<(), Box> { 25 | let (tx, rx) = unbounded(); 26 | 27 | let send = spawn(async move { 28 | send_task(tx, n).await; 29 | }); 30 | 31 | let receive = spawn(async move { 32 | receive_task_queries_buffered(rx, buf_factor).await; 33 | }); 34 | 35 | let (send_res, receive_res) = join!(send, receive); 36 | send_res?; 37 | receive_res?; 38 | Ok(()) 39 | } 40 | 41 | async fn send_task(tx: UnboundedSender, n: usize) { 42 | for i in 0..n { 43 | tx.unbounded_send(i).unwrap(); 44 | } 45 | } 46 | 47 | async fn receive_task_queries_buffered(rx: UnboundedReceiver, buf_factor: usize) { 48 | rx.map(|i| get_data(i)) 49 | .buffered(buf_factor) 50 | .for_each(|data| async move { 51 | println!("## data = {:?}", data); 52 | }) 53 | .await; 54 | } 55 | 56 | #[derive(Clone, Copy)] 57 | struct Data(usize); 58 | 59 | impl std::fmt::Debug for Data { 60 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 61 | f.write_fmt(format_args!("d:{}", self.0)) 62 | } 63 | } 64 | 65 | async fn get_data(i: usize) -> Data { 66 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 67 | println!( 68 | "[{}] ## get_data({}) will complete in {} ms", 69 | START_TIME.elapsed().as_millis(), 70 | i, 71 | millis 72 | ); 73 | 74 | sleep(Duration::from_millis(millis)).await; 75 | println!( 76 | "[{}] ## get_data({}) completed", 77 | START_TIME.elapsed().as_millis(), 78 | i 79 | ); 80 | Data(i) 81 | } 82 | -------------------------------------------------------------------------------- /examples/27-ui-no-cancel-1-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Don't cancel 25 queries, buffered by 3"); 20 | congested_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn congested_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_observing(rx, buf_factor, &valid_reader, &counter_writer).await; 39 | }); 40 | 41 | let (send_res, receive_res) = join!(send, receive); 42 | send_res?; 43 | receive_res?; 44 | 45 | counter.print(); 46 | Ok(()) 47 | } 48 | 49 | async fn send_task_tracking_validity( 50 | tx: UnboundedSender, 51 | n: usize, 52 | valid_writer: ValidRange, 53 | ) { 54 | for i in 0..n { 55 | let range = 10 * i..10 * i + 5; 56 | valid_writer.set(range.clone()); 57 | for j in range { 58 | println!("## unbounded_send({})", j); 59 | tx.unbounded_send(j).unwrap(); 60 | } 61 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 62 | println!("## sleep({}) for {} ms", i, millis); 63 | 64 | let duration = Duration::from_millis(millis); 65 | sleep(duration).await; 66 | println!("## sleep({}) completed", i); 67 | } 68 | } 69 | 70 | async fn receive_task_observing( 71 | rx: UnboundedReceiver, 72 | buf_factor: usize, 73 | valid_reader: &ValidRange, 74 | counter_writer: &Arc, 75 | ) { 76 | rx.map(|i| get_data(i)) 77 | .buffered(buf_factor) 78 | .for_each(|data| async move { 79 | let is_valid = valid_reader.is_valid(data.0); 80 | counter_writer.increment(is_valid); 81 | println!( 82 | "## data = {:?} ({})", 83 | data, 84 | if is_valid { "valid" } else { "expired" } 85 | ); 86 | }) 87 | .await; 88 | } 89 | 90 | #[derive(Clone)] 91 | struct ValidRange { 92 | range: Arc>>, 93 | } 94 | 95 | impl ValidRange { 96 | fn new() -> (ValidRange, ValidRange) { 97 | let writer = Arc::new(RwLock::new(0..0)); 98 | let reader = writer.clone(); 99 | (ValidRange { range: writer }, ValidRange { range: reader }) 100 | } 101 | 102 | fn set(&self, range: Range) { 103 | *self.range.write().unwrap() = range; 104 | } 105 | 106 | fn is_valid(&self, x: usize) -> bool { 107 | self.range.read().unwrap().contains(&x) 108 | } 109 | } 110 | 111 | struct ValidCounter { 112 | valid: AtomicUsize, 113 | expired: AtomicUsize, 114 | } 115 | 116 | impl ValidCounter { 117 | fn new() -> ValidCounter { 118 | ValidCounter { 119 | valid: AtomicUsize::new(0), 120 | expired: AtomicUsize::new(0), 121 | } 122 | } 123 | 124 | fn increment(&self, is_valid: bool) { 125 | if is_valid { 126 | self.valid.fetch_add(1, Ordering::SeqCst); 127 | } else { 128 | self.expired.fetch_add(1, Ordering::SeqCst); 129 | } 130 | } 131 | 132 | fn print(&self) { 133 | let valid = self.valid.load(Ordering::SeqCst); 134 | let expired = self.expired.load(Ordering::SeqCst); 135 | 136 | println!( 137 | "Made {} queries, {} results were still valid, {} expired", 138 | valid + expired, 139 | valid, 140 | expired 141 | ); 142 | } 143 | } 144 | 145 | #[derive(Clone, Copy)] 146 | struct Data(usize); 147 | 148 | impl std::fmt::Debug for Data { 149 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 150 | f.write_fmt(format_args!("d:{}", self.0)) 151 | } 152 | } 153 | 154 | async fn get_data(i: usize) -> Data { 155 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 156 | println!( 157 | "[{}] ## get_data({}) will complete in {} ms", 158 | START_TIME.elapsed().as_millis(), 159 | i, 160 | millis 161 | ); 162 | 163 | sleep(Duration::from_millis(millis)).await; 164 | println!( 165 | "[{}] ## get_data({}) completed", 166 | START_TIME.elapsed().as_millis(), 167 | i 168 | ); 169 | Data(i) 170 | } 171 | -------------------------------------------------------------------------------- /examples/28-ui-no-cancel-2-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Don't cancel 25 queries, buffered by 3"); 20 | congested_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn congested_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_observing(rx, buf_factor, valid_reader, &counter_writer).await; 39 | }); 40 | 41 | let (send_res, receive_res) = join!(send, receive); 42 | send_res?; 43 | receive_res?; 44 | 45 | counter.print(); 46 | Ok(()) 47 | } 48 | 49 | async fn send_task_tracking_validity( 50 | tx: UnboundedSender, 51 | n: usize, 52 | valid_writer: ValidRange, 53 | ) { 54 | for i in 0..n { 55 | let range = 10 * i..10 * i + 5; 56 | valid_writer.set(range.clone()); 57 | for j in range { 58 | println!("## unbounded_send({})", j); 59 | tx.unbounded_send(j).unwrap(); 60 | } 61 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 62 | println!("## sleep({}) for {} ms", i, millis); 63 | 64 | let duration = Duration::from_millis(millis); 65 | sleep(duration).await; 66 | println!("## sleep({}) completed", i); 67 | } 68 | } 69 | 70 | async fn receive_task_observing( 71 | rx: UnboundedReceiver, 72 | buf_factor: usize, 73 | valid_reader: ValidRange, 74 | counter_writer: &Arc, 75 | ) { 76 | rx.map(|i| get_data(i)) 77 | .buffered(buf_factor) 78 | .for_each(|data| async move { 79 | let is_valid = valid_reader.is_valid(data.0); 80 | counter_writer.increment(is_valid); 81 | println!( 82 | "## data = {:?} ({})", 83 | data, 84 | if is_valid { "valid" } else { "expired" } 85 | ); 86 | }) 87 | .await; 88 | } 89 | 90 | #[derive(Clone)] 91 | struct ValidRange { 92 | range: Arc>>, 93 | } 94 | 95 | impl ValidRange { 96 | fn new() -> (ValidRange, ValidRange) { 97 | let writer = Arc::new(RwLock::new(0..0)); 98 | let reader = writer.clone(); 99 | (ValidRange { range: writer }, ValidRange { range: reader }) 100 | } 101 | 102 | fn set(&self, range: Range) { 103 | *self.range.write().unwrap() = range; 104 | } 105 | 106 | fn is_valid(&self, x: usize) -> bool { 107 | self.range.read().unwrap().contains(&x) 108 | } 109 | } 110 | 111 | struct ValidCounter { 112 | valid: AtomicUsize, 113 | expired: AtomicUsize, 114 | } 115 | 116 | impl ValidCounter { 117 | fn new() -> ValidCounter { 118 | ValidCounter { 119 | valid: AtomicUsize::new(0), 120 | expired: AtomicUsize::new(0), 121 | } 122 | } 123 | 124 | fn increment(&self, is_valid: bool) { 125 | if is_valid { 126 | self.valid.fetch_add(1, Ordering::SeqCst); 127 | } else { 128 | self.expired.fetch_add(1, Ordering::SeqCst); 129 | } 130 | } 131 | 132 | fn print(&self) { 133 | let valid = self.valid.load(Ordering::SeqCst); 134 | let expired = self.expired.load(Ordering::SeqCst); 135 | 136 | println!( 137 | "Made {} queries, {} results were still valid, {} expired", 138 | valid + expired, 139 | valid, 140 | expired 141 | ); 142 | } 143 | } 144 | 145 | #[derive(Clone, Copy)] 146 | struct Data(usize); 147 | 148 | impl std::fmt::Debug for Data { 149 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 150 | f.write_fmt(format_args!("d:{}", self.0)) 151 | } 152 | } 153 | 154 | async fn get_data(i: usize) -> Data { 155 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 156 | println!( 157 | "[{}] ## get_data({}) will complete in {} ms", 158 | START_TIME.elapsed().as_millis(), 159 | i, 160 | millis 161 | ); 162 | 163 | sleep(Duration::from_millis(millis)).await; 164 | println!( 165 | "[{}] ## get_data({}) completed", 166 | START_TIME.elapsed().as_millis(), 167 | i 168 | ); 169 | Data(i) 170 | } 171 | -------------------------------------------------------------------------------- /examples/29-ui-cancel-buffered-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered_cancelling(rx, buf_factor, &valid_reader, &counter_writer).await; 39 | }); 40 | 41 | let (send_res, receive_res) = join!(send, receive); 42 | send_res?; 43 | receive_res?; 44 | 45 | counter.print(); 46 | Ok(()) 47 | } 48 | 49 | async fn send_task_tracking_validity( 50 | tx: UnboundedSender, 51 | n: usize, 52 | valid_writer: ValidRange, 53 | ) { 54 | for i in 0..n { 55 | let range = 10 * i..10 * i + 5; 56 | valid_writer.set(range.clone()); 57 | for j in range { 58 | println!("## unbounded_send({})", j); 59 | tx.unbounded_send(j).unwrap(); 60 | } 61 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 62 | println!("## sleep({}) for {} ms", i, millis); 63 | 64 | let duration = Duration::from_millis(millis); 65 | sleep(duration).await; 66 | println!("## sleep({}) completed", i); 67 | } 68 | } 69 | 70 | async fn receive_task_buffered_cancelling( 71 | rx: UnboundedReceiver, 72 | buf_factor: usize, 73 | valid_reader: &ValidRange, 74 | counter_writer: &Arc, 75 | ) { 76 | rx.filter(|i| async move { 77 | let is_valid = valid_reader.is_valid(*i); 78 | println!("## filter({}) = {}", i, is_valid); 79 | is_valid 80 | }) 81 | .map(|i| get_data(i)) 82 | .buffered(buf_factor) 83 | .for_each(|data| async move { 84 | let is_valid = valid_reader.is_valid(data.0); 85 | counter_writer.increment(is_valid); 86 | println!( 87 | "## data = {:?} ({})", 88 | data, 89 | if is_valid { "valid" } else { "expired" } 90 | ); 91 | }) 92 | .await; 93 | } 94 | 95 | #[derive(Clone)] 96 | struct ValidRange { 97 | range: Arc>>, 98 | } 99 | 100 | impl ValidRange { 101 | fn new() -> (ValidRange, ValidRange) { 102 | let writer = Arc::new(RwLock::new(0..0)); 103 | let reader = writer.clone(); 104 | (ValidRange { range: writer }, ValidRange { range: reader }) 105 | } 106 | 107 | fn set(&self, range: Range) { 108 | *self.range.write().unwrap() = range; 109 | } 110 | 111 | fn is_valid(&self, x: usize) -> bool { 112 | self.range.read().unwrap().contains(&x) 113 | } 114 | } 115 | 116 | struct ValidCounter { 117 | valid: AtomicUsize, 118 | expired: AtomicUsize, 119 | } 120 | 121 | impl ValidCounter { 122 | fn new() -> ValidCounter { 123 | ValidCounter { 124 | valid: AtomicUsize::new(0), 125 | expired: AtomicUsize::new(0), 126 | } 127 | } 128 | 129 | fn increment(&self, is_valid: bool) { 130 | if is_valid { 131 | self.valid.fetch_add(1, Ordering::SeqCst); 132 | } else { 133 | self.expired.fetch_add(1, Ordering::SeqCst); 134 | } 135 | } 136 | 137 | fn print(&self) { 138 | let valid = self.valid.load(Ordering::SeqCst); 139 | let expired = self.expired.load(Ordering::SeqCst); 140 | 141 | println!( 142 | "Made {} queries, {} results were still valid, {} expired", 143 | valid + expired, 144 | valid, 145 | expired 146 | ); 147 | } 148 | } 149 | 150 | #[derive(Clone, Copy)] 151 | struct Data(usize); 152 | 153 | impl std::fmt::Debug for Data { 154 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 155 | f.write_fmt(format_args!("d:{}", self.0)) 156 | } 157 | } 158 | 159 | async fn get_data(i: usize) -> Data { 160 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 161 | println!( 162 | "[{}] ## get_data({}) will complete in {} ms", 163 | START_TIME.elapsed().as_millis(), 164 | i, 165 | millis 166 | ); 167 | 168 | sleep(Duration::from_millis(millis)).await; 169 | println!( 170 | "[{}] ## get_data({}) completed", 171 | START_TIME.elapsed().as_millis(), 172 | i 173 | ); 174 | Data(i) 175 | } 176 | -------------------------------------------------------------------------------- /examples/30-ui-cancel-buffered-2-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::stream::StreamExt; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered_cancelling(rx, buf_factor, &valid_reader, &counter_writer).await; 39 | }); 40 | 41 | let (send_res, receive_res) = join!(send, receive); 42 | send_res?; 43 | receive_res?; 44 | 45 | counter.print(); 46 | Ok(()) 47 | } 48 | 49 | async fn send_task_tracking_validity( 50 | tx: UnboundedSender, 51 | n: usize, 52 | valid_writer: ValidRange, 53 | ) { 54 | for i in 0..n { 55 | let range = 10 * i..10 * i + 5; 56 | valid_writer.set(range.clone()); 57 | for j in range { 58 | println!("## unbounded_send({})", j); 59 | tx.unbounded_send(j).unwrap(); 60 | } 61 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 62 | println!("## sleep({}) for {} ms", i, millis); 63 | 64 | let duration = Duration::from_millis(millis); 65 | sleep(duration).await; 66 | println!("## sleep({}) completed", i); 67 | } 68 | } 69 | 70 | async fn receive_task_buffered_cancelling( 71 | rx: UnboundedReceiver, 72 | buf_factor: usize, 73 | valid_reader: &ValidRange, 74 | counter_writer: &Arc, 75 | ) { 76 | rx.filter(|i| { 77 | let is_valid = valid_reader.is_valid(*i); 78 | println!("## filter({}) = {}", i, is_valid); 79 | future::ready(is_valid) 80 | }) 81 | .map(|i| get_data(i)) 82 | .buffered(buf_factor) 83 | .for_each(|data| async move { 84 | let is_valid = valid_reader.is_valid(data.0); 85 | counter_writer.increment(is_valid); 86 | println!( 87 | "## data = {:?} ({})", 88 | data, 89 | if is_valid { "valid" } else { "expired" } 90 | ); 91 | }) 92 | .await; 93 | } 94 | 95 | #[derive(Clone)] 96 | struct ValidRange { 97 | range: Arc>>, 98 | } 99 | 100 | impl ValidRange { 101 | fn new() -> (ValidRange, ValidRange) { 102 | let writer = Arc::new(RwLock::new(0..0)); 103 | let reader = writer.clone(); 104 | (ValidRange { range: writer }, ValidRange { range: reader }) 105 | } 106 | 107 | fn set(&self, range: Range) { 108 | *self.range.write().unwrap() = range; 109 | } 110 | 111 | fn is_valid(&self, x: usize) -> bool { 112 | self.range.read().unwrap().contains(&x) 113 | } 114 | } 115 | 116 | struct ValidCounter { 117 | valid: AtomicUsize, 118 | expired: AtomicUsize, 119 | } 120 | 121 | impl ValidCounter { 122 | fn new() -> ValidCounter { 123 | ValidCounter { 124 | valid: AtomicUsize::new(0), 125 | expired: AtomicUsize::new(0), 126 | } 127 | } 128 | 129 | fn increment(&self, is_valid: bool) { 130 | if is_valid { 131 | self.valid.fetch_add(1, Ordering::SeqCst); 132 | } else { 133 | self.expired.fetch_add(1, Ordering::SeqCst); 134 | } 135 | } 136 | 137 | fn print(&self) { 138 | let valid = self.valid.load(Ordering::SeqCst); 139 | let expired = self.expired.load(Ordering::SeqCst); 140 | 141 | println!( 142 | "Made {} queries, {} results were still valid, {} expired", 143 | valid + expired, 144 | valid, 145 | expired 146 | ); 147 | } 148 | } 149 | 150 | #[derive(Clone, Copy)] 151 | struct Data(usize); 152 | 153 | impl std::fmt::Debug for Data { 154 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 155 | f.write_fmt(format_args!("d:{}", self.0)) 156 | } 157 | } 158 | 159 | async fn get_data(i: usize) -> Data { 160 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 161 | println!( 162 | "[{}] ## get_data({}) will complete in {} ms", 163 | START_TIME.elapsed().as_millis(), 164 | i, 165 | millis 166 | ); 167 | 168 | sleep(Duration::from_millis(millis)).await; 169 | println!( 170 | "[{}] ## get_data({}) completed", 171 | START_TIME.elapsed().as_millis(), 172 | i 173 | ); 174 | Data(i) 175 | } 176 | -------------------------------------------------------------------------------- /examples/31-ui-cancel-buffer_unordered-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::stream::StreamExt; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffer-unordered by 3"); 20 | cancel_queries_buffer_unordered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffer_unordered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffer_unordered_cancelling(rx, buf_factor, &valid_reader, &counter_writer) 39 | .await; 40 | }); 41 | 42 | let (send_res, receive_res) = join!(send, receive); 43 | send_res?; 44 | receive_res?; 45 | 46 | counter.print(); 47 | Ok(()) 48 | } 49 | 50 | async fn send_task_tracking_validity( 51 | tx: UnboundedSender, 52 | n: usize, 53 | valid_writer: ValidRange, 54 | ) { 55 | for i in 0..n { 56 | let range = 10 * i..10 * i + 5; 57 | valid_writer.set(range.clone()); 58 | for j in range { 59 | println!("## unbounded_send({})", j); 60 | tx.unbounded_send(j).unwrap(); 61 | } 62 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 63 | println!("## sleep({}) for {} ms", i, millis); 64 | 65 | let duration = Duration::from_millis(millis); 66 | sleep(duration).await; 67 | println!("## sleep({}) completed", i); 68 | } 69 | } 70 | 71 | async fn receive_task_buffer_unordered_cancelling( 72 | rx: UnboundedReceiver, 73 | buf_factor: usize, 74 | valid_reader: &ValidRange, 75 | counter_writer: &Arc, 76 | ) { 77 | rx.filter(|i| { 78 | let is_valid = valid_reader.is_valid(*i); 79 | println!("## filter({}) = {}", i, is_valid); 80 | future::ready(is_valid) 81 | }) 82 | .map(|i| get_data(i)) 83 | .buffer_unordered(buf_factor) 84 | .for_each(|data| async move { 85 | let is_valid = valid_reader.is_valid(data.0); 86 | counter_writer.increment(is_valid); 87 | println!( 88 | "## data = {:?} ({})", 89 | data, 90 | if is_valid { "valid" } else { "expired" } 91 | ); 92 | }) 93 | .await; 94 | } 95 | 96 | #[derive(Clone)] 97 | struct ValidRange { 98 | range: Arc>>, 99 | } 100 | 101 | impl ValidRange { 102 | fn new() -> (ValidRange, ValidRange) { 103 | let writer = Arc::new(RwLock::new(0..0)); 104 | let reader = writer.clone(); 105 | (ValidRange { range: writer }, ValidRange { range: reader }) 106 | } 107 | 108 | fn set(&self, range: Range) { 109 | *self.range.write().unwrap() = range; 110 | } 111 | 112 | fn is_valid(&self, x: usize) -> bool { 113 | self.range.read().unwrap().contains(&x) 114 | } 115 | } 116 | 117 | struct ValidCounter { 118 | valid: AtomicUsize, 119 | expired: AtomicUsize, 120 | } 121 | 122 | impl ValidCounter { 123 | fn new() -> ValidCounter { 124 | ValidCounter { 125 | valid: AtomicUsize::new(0), 126 | expired: AtomicUsize::new(0), 127 | } 128 | } 129 | 130 | fn increment(&self, is_valid: bool) { 131 | if is_valid { 132 | self.valid.fetch_add(1, Ordering::SeqCst); 133 | } else { 134 | self.expired.fetch_add(1, Ordering::SeqCst); 135 | } 136 | } 137 | 138 | fn print(&self) { 139 | let valid = self.valid.load(Ordering::SeqCst); 140 | let expired = self.expired.load(Ordering::SeqCst); 141 | 142 | println!( 143 | "Made {} queries, {} results were still valid, {} expired", 144 | valid + expired, 145 | valid, 146 | expired 147 | ); 148 | } 149 | } 150 | 151 | #[derive(Clone, Copy)] 152 | struct Data(usize); 153 | 154 | impl std::fmt::Debug for Data { 155 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 156 | f.write_fmt(format_args!("d:{}", self.0)) 157 | } 158 | } 159 | 160 | async fn get_data(i: usize) -> Data { 161 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 162 | println!( 163 | "[{}] ## get_data({}) will complete in {} ms", 164 | START_TIME.elapsed().as_millis(), 165 | i, 166 | millis 167 | ); 168 | 169 | sleep(Duration::from_millis(millis)).await; 170 | println!( 171 | "[{}] ## get_data({}) completed", 172 | START_TIME.elapsed().as_millis(), 173 | i 174 | ); 175 | Data(i) 176 | } 177 | -------------------------------------------------------------------------------- /examples/32-ui-cancel-buffered-3-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | let valid_reader = &valid_reader; 39 | let counter_writer = &counter_writer; 40 | rx.filter(|i| async move { 41 | let is_valid = valid_reader.is_valid(*i); 42 | println!("## filter({}) = {}", i, is_valid); 43 | is_valid 44 | }) 45 | .map(|i| get_data(i)) 46 | .buffered(buf_factor) 47 | .for_each(|data| async move { 48 | let is_valid = valid_reader.is_valid(data.0); 49 | counter_writer.increment(is_valid); 50 | println!( 51 | "## data = {:?} ({})", 52 | data, 53 | if is_valid { "valid" } else { "expired" } 54 | ); 55 | }) 56 | .await; 57 | }); 58 | 59 | let (send_res, receive_res) = join!(send, receive); 60 | send_res?; 61 | receive_res?; 62 | 63 | counter.print(); 64 | Ok(()) 65 | } 66 | 67 | async fn send_task_tracking_validity( 68 | tx: UnboundedSender, 69 | n: usize, 70 | valid_writer: ValidRange, 71 | ) { 72 | for i in 0..n { 73 | let range = 10 * i..10 * i + 5; 74 | valid_writer.set(range.clone()); 75 | for j in range { 76 | println!("## unbounded_send({})", j); 77 | tx.unbounded_send(j).unwrap(); 78 | } 79 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 80 | println!("## sleep({}) for {} ms", i, millis); 81 | 82 | let duration = Duration::from_millis(millis); 83 | sleep(duration).await; 84 | println!("## sleep({}) completed", i); 85 | } 86 | } 87 | 88 | #[derive(Clone)] 89 | struct ValidRange { 90 | range: Arc>>, 91 | } 92 | 93 | impl ValidRange { 94 | fn new() -> (ValidRange, ValidRange) { 95 | let writer = Arc::new(RwLock::new(0..0)); 96 | let reader = writer.clone(); 97 | (ValidRange { range: writer }, ValidRange { range: reader }) 98 | } 99 | 100 | fn set(&self, range: Range) { 101 | *self.range.write().unwrap() = range; 102 | } 103 | 104 | fn is_valid(&self, x: usize) -> bool { 105 | self.range.read().unwrap().contains(&x) 106 | } 107 | } 108 | 109 | struct ValidCounter { 110 | valid: AtomicUsize, 111 | expired: AtomicUsize, 112 | } 113 | 114 | impl ValidCounter { 115 | fn new() -> ValidCounter { 116 | ValidCounter { 117 | valid: AtomicUsize::new(0), 118 | expired: AtomicUsize::new(0), 119 | } 120 | } 121 | 122 | fn increment(&self, is_valid: bool) { 123 | if is_valid { 124 | self.valid.fetch_add(1, Ordering::SeqCst); 125 | } else { 126 | self.expired.fetch_add(1, Ordering::SeqCst); 127 | } 128 | } 129 | 130 | fn print(&self) { 131 | let valid = self.valid.load(Ordering::SeqCst); 132 | let expired = self.expired.load(Ordering::SeqCst); 133 | 134 | println!( 135 | "Made {} queries, {} results were still valid, {} expired", 136 | valid + expired, 137 | valid, 138 | expired 139 | ); 140 | } 141 | } 142 | 143 | #[derive(Clone, Copy)] 144 | struct Data(usize); 145 | 146 | impl std::fmt::Debug for Data { 147 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 148 | f.write_fmt(format_args!("d:{}", self.0)) 149 | } 150 | } 151 | 152 | async fn get_data(i: usize) -> Data { 153 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 154 | println!( 155 | "[{}] ## get_data({}) will complete in {} ms", 156 | START_TIME.elapsed().as_millis(), 157 | i, 158 | millis 159 | ); 160 | 161 | sleep(Duration::from_millis(millis)).await; 162 | println!( 163 | "[{}] ## get_data({}) completed", 164 | START_TIME.elapsed().as_millis(), 165 | i 166 | ); 167 | Data(i) 168 | } 169 | -------------------------------------------------------------------------------- /examples/33-ui-cancel-combinator-1-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedSender}; 2 | use futures::stream::{Stream, StreamExt}; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered( 39 | cancel(rx, &valid_reader), 40 | buf_factor, 41 | &valid_reader, 42 | &counter_writer, 43 | ) 44 | .await; 45 | }); 46 | 47 | let (send_res, receive_res) = join!(send, receive); 48 | send_res?; 49 | receive_res?; 50 | 51 | counter.print(); 52 | Ok(()) 53 | } 54 | 55 | fn cancel>( 56 | stream: S, 57 | valid_range: &ValidRange, 58 | ) -> impl Stream { 59 | stream.filter(|i| { 60 | let is_valid = valid_range.is_valid(*i); 61 | println!("## filter({}) = {}", i, is_valid); 62 | future::ready(is_valid) 63 | }) 64 | } 65 | 66 | async fn send_task_tracking_validity( 67 | tx: UnboundedSender, 68 | n: usize, 69 | valid_writer: ValidRange, 70 | ) { 71 | for i in 0..n { 72 | let range = 10 * i..10 * i + 5; 73 | valid_writer.set(range.clone()); 74 | for j in range { 75 | println!("## unbounded_send({})", j); 76 | tx.unbounded_send(j).unwrap(); 77 | } 78 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 79 | println!("## sleep({}) for {} ms", i, millis); 80 | 81 | let duration = Duration::from_millis(millis); 82 | sleep(duration).await; 83 | println!("## sleep({}) completed", i); 84 | } 85 | } 86 | 87 | async fn receive_task_buffered( 88 | rx: impl Stream, 89 | buf_factor: usize, 90 | valid_reader: &ValidRange, 91 | counter_writer: &Arc, 92 | ) { 93 | rx.map(|i| get_data(i)) 94 | .buffered(buf_factor) 95 | .for_each(|data| async move { 96 | let is_valid = valid_reader.is_valid(data.0); 97 | counter_writer.increment(is_valid); 98 | println!( 99 | "## data = {:?} ({})", 100 | data, 101 | if is_valid { "valid" } else { "expired" } 102 | ); 103 | }) 104 | .await; 105 | } 106 | 107 | #[derive(Clone)] 108 | struct ValidRange { 109 | range: Arc>>, 110 | } 111 | 112 | impl ValidRange { 113 | fn new() -> (ValidRange, ValidRange) { 114 | let writer = Arc::new(RwLock::new(0..0)); 115 | let reader = writer.clone(); 116 | (ValidRange { range: writer }, ValidRange { range: reader }) 117 | } 118 | 119 | fn set(&self, range: Range) { 120 | *self.range.write().unwrap() = range; 121 | } 122 | 123 | fn is_valid(&self, x: usize) -> bool { 124 | self.range.read().unwrap().contains(&x) 125 | } 126 | } 127 | 128 | struct ValidCounter { 129 | valid: AtomicUsize, 130 | expired: AtomicUsize, 131 | } 132 | 133 | impl ValidCounter { 134 | fn new() -> ValidCounter { 135 | ValidCounter { 136 | valid: AtomicUsize::new(0), 137 | expired: AtomicUsize::new(0), 138 | } 139 | } 140 | 141 | fn increment(&self, is_valid: bool) { 142 | if is_valid { 143 | self.valid.fetch_add(1, Ordering::SeqCst); 144 | } else { 145 | self.expired.fetch_add(1, Ordering::SeqCst); 146 | } 147 | } 148 | 149 | fn print(&self) { 150 | let valid = self.valid.load(Ordering::SeqCst); 151 | let expired = self.expired.load(Ordering::SeqCst); 152 | 153 | println!( 154 | "Made {} queries, {} results were still valid, {} expired", 155 | valid + expired, 156 | valid, 157 | expired 158 | ); 159 | } 160 | } 161 | 162 | #[derive(Clone, Copy)] 163 | struct Data(usize); 164 | 165 | impl std::fmt::Debug for Data { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | f.write_fmt(format_args!("d:{}", self.0)) 168 | } 169 | } 170 | 171 | async fn get_data(i: usize) -> Data { 172 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 173 | println!( 174 | "[{}] ## get_data({}) will complete in {} ms", 175 | START_TIME.elapsed().as_millis(), 176 | i, 177 | millis 178 | ); 179 | 180 | sleep(Duration::from_millis(millis)).await; 181 | println!( 182 | "[{}] ## get_data({}) completed", 183 | START_TIME.elapsed().as_millis(), 184 | i 185 | ); 186 | Data(i) 187 | } 188 | -------------------------------------------------------------------------------- /examples/34-ui-cancel-combinator-2-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedSender}; 2 | use futures::stream::{Stream, StreamExt}; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered( 39 | cancel(rx, &valid_reader), 40 | buf_factor, 41 | &valid_reader, 42 | &counter_writer, 43 | ) 44 | .await; 45 | }); 46 | 47 | let (send_res, receive_res) = join!(send, receive); 48 | send_res?; 49 | receive_res?; 50 | 51 | counter.print(); 52 | Ok(()) 53 | } 54 | 55 | fn cancel>( 56 | stream: S, 57 | valid_range: &ValidRange, 58 | ) -> impl Stream + '_ { 59 | stream.filter(|i| { 60 | let is_valid = valid_range.is_valid(*i); 61 | println!("## filter({}) = {}", i, is_valid); 62 | future::ready(is_valid) 63 | }) 64 | } 65 | 66 | async fn send_task_tracking_validity( 67 | tx: UnboundedSender, 68 | n: usize, 69 | valid_writer: ValidRange, 70 | ) { 71 | for i in 0..n { 72 | let range = 10 * i..10 * i + 5; 73 | valid_writer.set(range.clone()); 74 | for j in range { 75 | println!("## unbounded_send({})", j); 76 | tx.unbounded_send(j).unwrap(); 77 | } 78 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 79 | println!("## sleep({}) for {} ms", i, millis); 80 | 81 | let duration = Duration::from_millis(millis); 82 | sleep(duration).await; 83 | println!("## sleep({}) completed", i); 84 | } 85 | } 86 | 87 | async fn receive_task_buffered( 88 | rx: impl Stream, 89 | buf_factor: usize, 90 | valid_reader: &ValidRange, 91 | counter_writer: &Arc, 92 | ) { 93 | rx.map(|i| get_data(i)) 94 | .buffered(buf_factor) 95 | .for_each(|data| async move { 96 | let is_valid = valid_reader.is_valid(data.0); 97 | counter_writer.increment(is_valid); 98 | println!( 99 | "## data = {:?} ({})", 100 | data, 101 | if is_valid { "valid" } else { "expired" } 102 | ); 103 | }) 104 | .await; 105 | } 106 | 107 | #[derive(Clone)] 108 | struct ValidRange { 109 | range: Arc>>, 110 | } 111 | 112 | impl ValidRange { 113 | fn new() -> (ValidRange, ValidRange) { 114 | let writer = Arc::new(RwLock::new(0..0)); 115 | let reader = writer.clone(); 116 | (ValidRange { range: writer }, ValidRange { range: reader }) 117 | } 118 | 119 | fn set(&self, range: Range) { 120 | *self.range.write().unwrap() = range; 121 | } 122 | 123 | fn is_valid(&self, x: usize) -> bool { 124 | self.range.read().unwrap().contains(&x) 125 | } 126 | } 127 | 128 | struct ValidCounter { 129 | valid: AtomicUsize, 130 | expired: AtomicUsize, 131 | } 132 | 133 | impl ValidCounter { 134 | fn new() -> ValidCounter { 135 | ValidCounter { 136 | valid: AtomicUsize::new(0), 137 | expired: AtomicUsize::new(0), 138 | } 139 | } 140 | 141 | fn increment(&self, is_valid: bool) { 142 | if is_valid { 143 | self.valid.fetch_add(1, Ordering::SeqCst); 144 | } else { 145 | self.expired.fetch_add(1, Ordering::SeqCst); 146 | } 147 | } 148 | 149 | fn print(&self) { 150 | let valid = self.valid.load(Ordering::SeqCst); 151 | let expired = self.expired.load(Ordering::SeqCst); 152 | 153 | println!( 154 | "Made {} queries, {} results were still valid, {} expired", 155 | valid + expired, 156 | valid, 157 | expired 158 | ); 159 | } 160 | } 161 | 162 | #[derive(Clone, Copy)] 163 | struct Data(usize); 164 | 165 | impl std::fmt::Debug for Data { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | f.write_fmt(format_args!("d:{}", self.0)) 168 | } 169 | } 170 | 171 | async fn get_data(i: usize) -> Data { 172 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 173 | println!( 174 | "[{}] ## get_data({}) will complete in {} ms", 175 | START_TIME.elapsed().as_millis(), 176 | i, 177 | millis 178 | ); 179 | 180 | sleep(Duration::from_millis(millis)).await; 181 | println!( 182 | "[{}] ## get_data({}) completed", 183 | START_TIME.elapsed().as_millis(), 184 | i 185 | ); 186 | Data(i) 187 | } 188 | -------------------------------------------------------------------------------- /examples/35-ui-cancel-combinator-3-fail.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedSender}; 2 | use futures::stream::{Stream, StreamExt}; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered( 39 | cancel(rx, &valid_reader), 40 | buf_factor, 41 | &valid_reader, 42 | &counter_writer, 43 | ) 44 | .await; 45 | }); 46 | 47 | let (send_res, receive_res) = join!(send, receive); 48 | send_res?; 49 | receive_res?; 50 | 51 | counter.print(); 52 | Ok(()) 53 | } 54 | 55 | fn cancel<'a, S: Stream + 'a>( 56 | stream: S, 57 | valid_range: &'a ValidRange, 58 | ) -> impl Stream + 'a { 59 | stream.filter(|i| { 60 | let is_valid = valid_range.is_valid(*i); 61 | println!("## filter({}) = {}", i, is_valid); 62 | future::ready(is_valid) 63 | }) 64 | } 65 | 66 | async fn send_task_tracking_validity( 67 | tx: UnboundedSender, 68 | n: usize, 69 | valid_writer: ValidRange, 70 | ) { 71 | for i in 0..n { 72 | let range = 10 * i..10 * i + 5; 73 | valid_writer.set(range.clone()); 74 | for j in range { 75 | println!("## unbounded_send({})", j); 76 | tx.unbounded_send(j).unwrap(); 77 | } 78 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 79 | println!("## sleep({}) for {} ms", i, millis); 80 | 81 | let duration = Duration::from_millis(millis); 82 | sleep(duration).await; 83 | println!("## sleep({}) completed", i); 84 | } 85 | } 86 | 87 | async fn receive_task_buffered( 88 | rx: impl Stream, 89 | buf_factor: usize, 90 | valid_reader: &ValidRange, 91 | counter_writer: &Arc, 92 | ) { 93 | rx.map(|i| get_data(i)) 94 | .buffered(buf_factor) 95 | .for_each(|data| async move { 96 | let is_valid = valid_reader.is_valid(data.0); 97 | counter_writer.increment(is_valid); 98 | println!( 99 | "## data = {:?} ({})", 100 | data, 101 | if is_valid { "valid" } else { "expired" } 102 | ); 103 | }) 104 | .await; 105 | } 106 | 107 | #[derive(Clone)] 108 | struct ValidRange { 109 | range: Arc>>, 110 | } 111 | 112 | impl ValidRange { 113 | fn new() -> (ValidRange, ValidRange) { 114 | let writer = Arc::new(RwLock::new(0..0)); 115 | let reader = writer.clone(); 116 | (ValidRange { range: writer }, ValidRange { range: reader }) 117 | } 118 | 119 | fn set(&self, range: Range) { 120 | *self.range.write().unwrap() = range; 121 | } 122 | 123 | fn is_valid(&self, x: usize) -> bool { 124 | self.range.read().unwrap().contains(&x) 125 | } 126 | } 127 | 128 | struct ValidCounter { 129 | valid: AtomicUsize, 130 | expired: AtomicUsize, 131 | } 132 | 133 | impl ValidCounter { 134 | fn new() -> ValidCounter { 135 | ValidCounter { 136 | valid: AtomicUsize::new(0), 137 | expired: AtomicUsize::new(0), 138 | } 139 | } 140 | 141 | fn increment(&self, is_valid: bool) { 142 | if is_valid { 143 | self.valid.fetch_add(1, Ordering::SeqCst); 144 | } else { 145 | self.expired.fetch_add(1, Ordering::SeqCst); 146 | } 147 | } 148 | 149 | fn print(&self) { 150 | let valid = self.valid.load(Ordering::SeqCst); 151 | let expired = self.expired.load(Ordering::SeqCst); 152 | 153 | println!( 154 | "Made {} queries, {} results were still valid, {} expired", 155 | valid + expired, 156 | valid, 157 | expired 158 | ); 159 | } 160 | } 161 | 162 | #[derive(Clone, Copy)] 163 | struct Data(usize); 164 | 165 | impl std::fmt::Debug for Data { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | f.write_fmt(format_args!("d:{}", self.0)) 168 | } 169 | } 170 | 171 | async fn get_data(i: usize) -> Data { 172 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 173 | println!( 174 | "[{}] ## get_data({}) will complete in {} ms", 175 | START_TIME.elapsed().as_millis(), 176 | i, 177 | millis 178 | ); 179 | 180 | sleep(Duration::from_millis(millis)).await; 181 | println!( 182 | "[{}] ## get_data({}) completed", 183 | START_TIME.elapsed().as_millis(), 184 | i 185 | ); 186 | Data(i) 187 | } 188 | -------------------------------------------------------------------------------- /examples/36-ui-cancel-combinator-4-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedSender}; 2 | use futures::stream::{Stream, StreamExt}; 3 | use futures::{future, join}; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered( 39 | cancel(rx, &valid_reader), 40 | buf_factor, 41 | &valid_reader, 42 | &counter_writer, 43 | ) 44 | .await; 45 | }); 46 | 47 | let (send_res, receive_res) = join!(send, receive); 48 | send_res?; 49 | receive_res?; 50 | 51 | counter.print(); 52 | Ok(()) 53 | } 54 | 55 | fn cancel<'a, S: Stream + 'a>( 56 | stream: S, 57 | valid_range: &'a ValidRange, 58 | ) -> impl Stream + 'a { 59 | stream.filter(move |i| { 60 | let is_valid = valid_range.is_valid(*i); 61 | println!("## filter({}) = {}", i, is_valid); 62 | future::ready(is_valid) 63 | }) 64 | } 65 | 66 | async fn send_task_tracking_validity( 67 | tx: UnboundedSender, 68 | n: usize, 69 | valid_writer: ValidRange, 70 | ) { 71 | for i in 0..n { 72 | let range = 10 * i..10 * i + 5; 73 | valid_writer.set(range.clone()); 74 | for j in range { 75 | println!("## unbounded_send({})", j); 76 | tx.unbounded_send(j).unwrap(); 77 | } 78 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 79 | println!("## sleep({}) for {} ms", i, millis); 80 | 81 | let duration = Duration::from_millis(millis); 82 | sleep(duration).await; 83 | println!("## sleep({}) completed", i); 84 | } 85 | } 86 | 87 | async fn receive_task_buffered( 88 | rx: impl Stream, 89 | buf_factor: usize, 90 | valid_reader: &ValidRange, 91 | counter_writer: &Arc, 92 | ) { 93 | rx.map(|i| get_data(i)) 94 | .buffered(buf_factor) 95 | .for_each(|data| async move { 96 | let is_valid = valid_reader.is_valid(data.0); 97 | counter_writer.increment(is_valid); 98 | println!( 99 | "## data = {:?} ({})", 100 | data, 101 | if is_valid { "valid" } else { "expired" } 102 | ); 103 | }) 104 | .await; 105 | } 106 | 107 | #[derive(Clone)] 108 | struct ValidRange { 109 | range: Arc>>, 110 | } 111 | 112 | impl ValidRange { 113 | fn new() -> (ValidRange, ValidRange) { 114 | let writer = Arc::new(RwLock::new(0..0)); 115 | let reader = writer.clone(); 116 | (ValidRange { range: writer }, ValidRange { range: reader }) 117 | } 118 | 119 | fn set(&self, range: Range) { 120 | *self.range.write().unwrap() = range; 121 | } 122 | 123 | fn is_valid(&self, x: usize) -> bool { 124 | self.range.read().unwrap().contains(&x) 125 | } 126 | } 127 | 128 | struct ValidCounter { 129 | valid: AtomicUsize, 130 | expired: AtomicUsize, 131 | } 132 | 133 | impl ValidCounter { 134 | fn new() -> ValidCounter { 135 | ValidCounter { 136 | valid: AtomicUsize::new(0), 137 | expired: AtomicUsize::new(0), 138 | } 139 | } 140 | 141 | fn increment(&self, is_valid: bool) { 142 | if is_valid { 143 | self.valid.fetch_add(1, Ordering::SeqCst); 144 | } else { 145 | self.expired.fetch_add(1, Ordering::SeqCst); 146 | } 147 | } 148 | 149 | fn print(&self) { 150 | let valid = self.valid.load(Ordering::SeqCst); 151 | let expired = self.expired.load(Ordering::SeqCst); 152 | 153 | println!( 154 | "Made {} queries, {} results were still valid, {} expired", 155 | valid + expired, 156 | valid, 157 | expired 158 | ); 159 | } 160 | } 161 | 162 | #[derive(Clone, Copy)] 163 | struct Data(usize); 164 | 165 | impl std::fmt::Debug for Data { 166 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 167 | f.write_fmt(format_args!("d:{}", self.0)) 168 | } 169 | } 170 | 171 | async fn get_data(i: usize) -> Data { 172 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 173 | println!( 174 | "[{}] ## get_data({}) will complete in {} ms", 175 | START_TIME.elapsed().as_millis(), 176 | i, 177 | millis 178 | ); 179 | 180 | sleep(Duration::from_millis(millis)).await; 181 | println!( 182 | "[{}] ## get_data({}) completed", 183 | START_TIME.elapsed().as_millis(), 184 | i 185 | ); 186 | Data(i) 187 | } 188 | -------------------------------------------------------------------------------- /examples/37-ui-cancel-buffered-4-ok.rs: -------------------------------------------------------------------------------- 1 | use futures::channel::mpsc::{unbounded, UnboundedReceiver, UnboundedSender}; 2 | use futures::join; 3 | use futures::stream::StreamExt; 4 | use lazy_static::lazy_static; 5 | use rand::distributions::{Distribution, Uniform}; 6 | use std::ops::Range; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::sync::{Arc, RwLock}; 9 | use std::time::Duration; 10 | use tokio::spawn; 11 | use tokio::time::{sleep, Instant}; 12 | 13 | lazy_static! { 14 | static ref START_TIME: Instant = Instant::now(); 15 | } 16 | 17 | #[tokio::main] 18 | async fn main() -> Result<(), Box> { 19 | println!("Cancel 25 queries, buffered by 3"); 20 | cancel_queries_buffered(5, 3).await?; 21 | Ok(()) 22 | } 23 | 24 | async fn cancel_queries_buffered( 25 | n: usize, 26 | buf_factor: usize, 27 | ) -> Result<(), Box> { 28 | let (tx, rx) = unbounded(); 29 | let (valid_writer, valid_reader) = ValidRange::new(); 30 | let counter = Arc::new(ValidCounter::new()); 31 | 32 | let send = spawn(async move { 33 | send_task_tracking_validity(tx, n, valid_writer).await; 34 | }); 35 | 36 | let counter_writer = counter.clone(); 37 | let receive = spawn(async move { 38 | receive_task_buffered_cancelling(rx, buf_factor, &valid_reader, &counter_writer).await; 39 | }); 40 | 41 | let (send_res, receive_res) = join!(send, receive); 42 | send_res?; 43 | receive_res?; 44 | 45 | counter.print(); 46 | Ok(()) 47 | } 48 | 49 | async fn send_task_tracking_validity( 50 | tx: UnboundedSender, 51 | n: usize, 52 | valid_writer: ValidRange, 53 | ) { 54 | for i in 0..n { 55 | let range = 10 * i..10 * i + 5; 56 | valid_writer.set(range.clone()); 57 | for j in range { 58 | println!("## unbounded_send({})", j); 59 | tx.unbounded_send(j).unwrap(); 60 | } 61 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 62 | println!("## sleep({}) for {} ms", i, millis); 63 | 64 | let duration = Duration::from_millis(millis); 65 | sleep(duration).await; 66 | println!("## sleep({}) completed", i); 67 | } 68 | } 69 | 70 | async fn receive_task_buffered_cancelling( 71 | rx: UnboundedReceiver, 72 | buf_factor: usize, 73 | valid_reader: &ValidRange, 74 | counter_writer: &Arc, 75 | ) { 76 | rx.filter(|i| { 77 | let is_valid = valid_reader.is_valid(*i); 78 | println!("## filter({}) = {}", i, is_valid); 79 | async move { is_valid } 80 | }) 81 | .map(|i| get_data(i)) 82 | .buffered(buf_factor) 83 | .for_each(|data| async move { 84 | let is_valid = valid_reader.is_valid(data.0); 85 | counter_writer.increment(is_valid); 86 | println!( 87 | "## data = {:?} ({})", 88 | data, 89 | if is_valid { "valid" } else { "expired" } 90 | ); 91 | }) 92 | .await; 93 | } 94 | 95 | #[derive(Clone)] 96 | struct ValidRange { 97 | range: Arc>>, 98 | } 99 | 100 | impl ValidRange { 101 | fn new() -> (ValidRange, ValidRange) { 102 | let writer = Arc::new(RwLock::new(0..0)); 103 | let reader = writer.clone(); 104 | (ValidRange { range: writer }, ValidRange { range: reader }) 105 | } 106 | 107 | fn set(&self, range: Range) { 108 | *self.range.write().unwrap() = range; 109 | } 110 | 111 | fn is_valid(&self, x: usize) -> bool { 112 | self.range.read().unwrap().contains(&x) 113 | } 114 | } 115 | 116 | struct ValidCounter { 117 | valid: AtomicUsize, 118 | expired: AtomicUsize, 119 | } 120 | 121 | impl ValidCounter { 122 | fn new() -> ValidCounter { 123 | ValidCounter { 124 | valid: AtomicUsize::new(0), 125 | expired: AtomicUsize::new(0), 126 | } 127 | } 128 | 129 | fn increment(&self, is_valid: bool) { 130 | if is_valid { 131 | self.valid.fetch_add(1, Ordering::SeqCst); 132 | } else { 133 | self.expired.fetch_add(1, Ordering::SeqCst); 134 | } 135 | } 136 | 137 | fn print(&self) { 138 | let valid = self.valid.load(Ordering::SeqCst); 139 | let expired = self.expired.load(Ordering::SeqCst); 140 | 141 | println!( 142 | "Made {} queries, {} results were still valid, {} expired", 143 | valid + expired, 144 | valid, 145 | expired 146 | ); 147 | } 148 | } 149 | 150 | #[derive(Clone, Copy)] 151 | struct Data(usize); 152 | 153 | impl std::fmt::Debug for Data { 154 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 155 | f.write_fmt(format_args!("d:{}", self.0)) 156 | } 157 | } 158 | 159 | async fn get_data(i: usize) -> Data { 160 | let millis = Uniform::from(0..10).sample(&mut rand::thread_rng()); 161 | println!( 162 | "[{}] ## get_data({}) will complete in {} ms", 163 | START_TIME.elapsed().as_millis(), 164 | i, 165 | millis 166 | ); 167 | 168 | sleep(Duration::from_millis(millis)).await; 169 | println!( 170 | "[{}] ## get_data({}) completed", 171 | START_TIME.elapsed().as_millis(), 172 | i 173 | ); 174 | Data(i) 175 | } 176 | -------------------------------------------------------------------------------- /examples/plot.rs: -------------------------------------------------------------------------------- 1 | use plotters::coord::Shift; 2 | use plotters::prelude::*; 3 | use regex::Regex; 4 | use std::io::BufRead; 5 | 6 | const BAR_HEIGHT: i32 = 18; 7 | const BAR_WIDTH: i32 = 5; 8 | 9 | fn main() { 10 | let bars = parse(std::io::stdin().lock()); 11 | 12 | let width = bars 13 | .iter() 14 | .map(|bar| bar.begin + bar.length) 15 | .max() 16 | .unwrap_or(0) as u32 17 | * BAR_WIDTH as u32 18 | + 200; 19 | let height = bars.len() as u32 * BAR_HEIGHT as u32 + 5; 20 | 21 | println!("Drawing area: {}, {}", width, height); 22 | 23 | let drawing_area = SVGBackend::new("plot.svg", (width, height)).into_drawing_area(); 24 | draw(drawing_area, &bars); 25 | } 26 | 27 | fn draw(drawing_area: DrawingArea, bars: &[Bar]) { 28 | let text_style = TextStyle::from(("monospace", BAR_HEIGHT).into_font()).color(&BLACK); 29 | 30 | for (i, bar) in bars.iter().enumerate() { 31 | let i = i as i32; 32 | let rect = [ 33 | (BAR_WIDTH * bar.begin, BAR_HEIGHT * i), 34 | ( 35 | BAR_WIDTH * (bar.begin + bar.length) + 2, 36 | BAR_HEIGHT * (i + 1), 37 | ), 38 | ]; 39 | drawing_area 40 | .draw(&Rectangle::new( 41 | rect, 42 | ShapeStyle { 43 | color: bar.color.to_rgba(), 44 | filled: true, 45 | stroke_width: 0, 46 | }, 47 | )) 48 | .unwrap(); 49 | drawing_area 50 | .draw_text( 51 | &format!("{}({})", bar.label, bar.id), 52 | &text_style, 53 | (BAR_WIDTH * bar.begin, BAR_HEIGHT * i), 54 | ) 55 | .unwrap(); 56 | } 57 | } 58 | 59 | fn parse(input: impl BufRead) -> Vec { 60 | let re_fetch = 61 | Regex::new(r"^\[(\d+)\] \#{1,2} ([a-z_]+)\((\d+)\) will complete in (\d+) ms$").unwrap(); 62 | let re_data = Regex::new(r"^\#\# data = d:(\d+) \(([a-z]+)\)$").unwrap(); 63 | 64 | let mut bars = Vec::new(); 65 | for line in input.lines() { 66 | let line = line.unwrap(); 67 | if let Some(caps) = re_fetch.captures(&line) { 68 | println!("Line matches fetching: {}", line); 69 | let label = caps[2].to_owned(); 70 | let color = if label == "get_page" || label == "get_data" { 71 | RGBColor(0xA0, 0xC0, 0xFF) 72 | } else if label == "fetch_resource" { 73 | RGBColor(0xA0, 0xFF, 0xC0) 74 | } else { 75 | RGBColor(0xC0, 0xC0, 0xC0) 76 | }; 77 | bars.push(Bar { 78 | begin: caps[1].parse().unwrap(), 79 | length: caps[4].parse().unwrap(), 80 | label, 81 | id: caps[3].parse().unwrap(), 82 | color, 83 | }); 84 | } 85 | if let Some(caps) = re_data.captures(&line) { 86 | println!("Line matches data: {}", line); 87 | let id: usize = caps[1].parse().unwrap(); 88 | let status = &caps[2]; 89 | for bar in bars.iter_mut() { 90 | if bar.label == "get_data" && bar.id == id { 91 | if status == "valid" { 92 | bar.color = GREEN; 93 | } else { 94 | bar.color = RED; 95 | } 96 | } 97 | } 98 | } 99 | } 100 | 101 | // Normalize for the starting time. 102 | let start = bars.iter().map(|bar| bar.begin).min().unwrap_or(0); 103 | for bar in bars.iter_mut() { 104 | bar.begin -= start; 105 | } 106 | 107 | bars 108 | } 109 | 110 | struct Bar { 111 | begin: i32, 112 | length: i32, 113 | label: String, 114 | id: usize, 115 | color: RGBColor, 116 | } 117 | --------------------------------------------------------------------------------