├── .gitignore ├── bulk.yaml ├── src ├── wrapper_types.rs ├── log.rs ├── sleep.rs ├── lib.rs ├── errors.rs ├── error.rs ├── listen_ext.rs ├── byte_stream.rs └── backpressure.rs ├── .github └── workflows │ ├── publish.yml │ └── test.yml ├── Cargo.toml ├── README.md ├── examples ├── infinite_accept.rs ├── backpressure_wrapper.rs ├── token_reference.rs ├── metric.rs └── tcp_and_unix.rs ├── LICENSE-MIT ├── vagga.yaml ├── tests ├── log.rs └── backpressure.rs └── LICENSE-APACHE /.gitignore: -------------------------------------------------------------------------------- 1 | /Cargo.lock 2 | /.vagga 3 | /target 4 | /tmp 5 | -------------------------------------------------------------------------------- /bulk.yaml: -------------------------------------------------------------------------------- 1 | minimum-bulk: v0.4.5 2 | 3 | versions: 4 | 5 | - file: Cargo.toml 6 | block-start: ^\[package\] 7 | block-end: ^\[.*\] 8 | regex: ^version\s*=\s*"(\S+)" 9 | -------------------------------------------------------------------------------- /src/wrapper_types.rs: -------------------------------------------------------------------------------- 1 | //! This module exports all the public wrapper types that library uses 2 | //! 3 | //! Usually we don't need to import these types, but they have to be public. 4 | pub use crate::log::LogWarnings; 5 | pub use crate::sleep::HandleErrors; 6 | pub use crate::error::ErrorHint; 7 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | tags: 4 | - v* 5 | 6 | name: CI 7 | 8 | jobs: 9 | build_and_test: 10 | name: Publish a release 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@master 14 | - uses: actions-rs/toolchain@v1 15 | with: 16 | profile: minimal 17 | toolchain: stable 18 | default: true 19 | - uses: actions-rs/cargo@v1 20 | with: 21 | command: test 22 | - uses: actions-rs/cargo@v1 23 | with: 24 | command: publish 25 | args: --verbose --token=${{ secrets.CARGO_TOKEN }} 26 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "async-listen" 3 | description = """ 4 | Various helpers for writing production-ready servers in 5 | rust using async-std. 6 | """ 7 | license = "MIT/Apache-2.0" 8 | readme = "README.md" 9 | keywords = ["async", "server", "backpressure", "accept", "listen"] 10 | categories = ["network-programming"] 11 | homepage = "https://github.com/tailhook/async-listen" 12 | documentation = "https://docs.rs/async-listen" 13 | version = "0.2.1" 14 | authors = ["Paul Colomiets "] 15 | edition = "2018" 16 | 17 | [dependencies] 18 | async-std = "1.4" 19 | 20 | [dev-dependencies] 21 | rand = "0.7.2" 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Async Listen 2 | 3 | The crate contains various helpers for writing production-ready servers in 4 | rust using [async-std](https://async.rs/). 5 | 6 | [Docs](https://docs.rs/async-listen/) | 7 | [Github](https://github.com/tailhook/async-listen/) | 8 | [Crate](https://crates.io/crates/async-listen) 9 | 10 | Features: 11 | * Processing of errors in accept loop 12 | * Limiting number of incomming connections 13 | 14 | 15 | ## License 16 | 17 | Licensed under either of 18 | 19 | * Apache License, Version 2.0, 20 | (./LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0) 21 | * MIT license (./LICENSE-MIT or http://opensource.org/licenses/MIT) 22 | at your option. 23 | 24 | ### Contribution 25 | 26 | Unless you explicitly state otherwise, any contribution intentionally 27 | submitted for inclusion in the work by you, as defined in the Apache-2.0 28 | license, shall be dual licensed as above, without any additional terms or 29 | conditions. 30 | 31 | -------------------------------------------------------------------------------- /examples/infinite_accept.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::time::Duration; 3 | 4 | use async_std::task; 5 | use async_std::net::{TcpListener, TcpStream}; 6 | use async_std::prelude::*; 7 | 8 | use async_listen::{ListenExt, error_hint}; 9 | 10 | 11 | fn main() -> Result<(), Box> { 12 | task::block_on(async { 13 | let listener = TcpListener::bind("localhost:8080").await?; 14 | eprintln!("Accepting connections on localhost:8080"); 15 | let mut incoming = listener.incoming() 16 | .log_warnings(|e| { 17 | eprintln!("Accept error: {}. Paused listener for 0.5s. {}", 18 | e, error_hint(&e)) 19 | }) 20 | .handle_errors(Duration::from_millis(500)); 21 | while let Some(stream) = incoming.next().await { 22 | task::spawn(connection_loop(stream)); 23 | } 24 | Ok(()) 25 | }) 26 | } 27 | 28 | async fn connection_loop(_stream: TcpStream) { 29 | task::sleep(Duration::from_secs(10)).await; 30 | } 31 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 The async-listen Developers 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /examples/backpressure_wrapper.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::time::Duration; 3 | 4 | use async_std::task; 5 | use async_std::net::TcpListener; 6 | use async_std::prelude::*; 7 | 8 | use async_listen::{ListenExt, ByteStream, backpressure, error_hint}; 9 | 10 | 11 | fn main() -> Result<(), Box> { 12 | task::block_on(async { 13 | let (_, bp) = backpressure::new(10); 14 | let listener = TcpListener::bind("localhost:8080").await?; 15 | eprintln!("Accepting connections on localhost:8080"); 16 | let mut incoming = listener.incoming() 17 | .log_warnings(|e| { 18 | eprintln!("Accept error: {}. Paused listener for 0.5s. {}", 19 | e, error_hint(&e)) 20 | }) 21 | .handle_errors(Duration::from_millis(500)) 22 | .backpressure_wrapper(bp); 23 | while let Some(stream) = incoming.next().await { 24 | task::spawn(connection_loop(stream)); 25 | } 26 | Ok(()) 27 | }) 28 | } 29 | 30 | async fn connection_loop(_stream: ByteStream) { 31 | task::sleep(Duration::from_secs(10)).await; 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | on: [push] 2 | 3 | name: CI 4 | 5 | jobs: 6 | test_linux: 7 | name: Test on Linux 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@master 11 | - uses: actions-rs/toolchain@v1 12 | with: 13 | profile: minimal 14 | toolchain: stable 15 | default: true 16 | - uses: actions-rs/cargo@v1 17 | with: 18 | command: test 19 | 20 | test_macos: 21 | name: Test on MacOS 22 | runs-on: macos-latest 23 | steps: 24 | - uses: actions/checkout@master 25 | - uses: actions-rs/toolchain@v1 26 | with: 27 | profile: minimal 28 | toolchain: stable 29 | default: true 30 | - uses: actions-rs/cargo@v1 31 | with: 32 | command: test 33 | 34 | test_windows: 35 | name: Test on Windows 36 | runs-on: windows-latest 37 | steps: 38 | - uses: actions/checkout@master 39 | - uses: actions-rs/toolchain@v1 40 | with: 41 | profile: minimal 42 | toolchain: stable 43 | default: true 44 | - uses: actions-rs/cargo@v1 45 | with: 46 | command: test 47 | -------------------------------------------------------------------------------- /vagga.yaml: -------------------------------------------------------------------------------- 1 | commands: 2 | 3 | make: !Command 4 | description: Build the library 5 | container: ubuntu 6 | run: [cargo, build] 7 | 8 | cargo: !Command 9 | description: Run arbitrary cargo command 10 | symlink-name: cargo 11 | container: ubuntu 12 | run: [cargo] 13 | 14 | test: !Command 15 | description: Run tests 16 | container: ubuntu 17 | run: [cargo, test] 18 | 19 | _bulk: !Command 20 | description: Run `bulk` command (for version bookkeeping) 21 | container: ubuntu 22 | run: [bulk] 23 | 24 | containers: 25 | 26 | ubuntu: 27 | setup: 28 | - !Ubuntu xenial 29 | - !Install [ca-certificates, git, build-essential, vim] 30 | 31 | - !TarInstall 32 | url: "https://static.rust-lang.org/dist/rust-1.40.0-x86_64-unknown-linux-gnu.tar.gz" 33 | script: "./install.sh --prefix=/usr \ 34 | --components=rustc,rust-std-x86_64-unknown-linux-gnu,cargo" 35 | - &bulk !Tar 36 | url: "https://github.com/tailhook/bulk/releases/download/v0.4.10/bulk-v0.4.10.tar.gz" 37 | sha256: 481513f8a0306a9857d045497fb5b50b50a51e9ff748909ecf7d2bda1de275ab 38 | path: / 39 | 40 | environ: 41 | HOME: /work/target 42 | RUST_BACKTRACE: 1 43 | -------------------------------------------------------------------------------- /examples/token_reference.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::time::Duration; 3 | 4 | use async_std::task; 5 | use async_std::net::{TcpListener, TcpStream}; 6 | use async_std::prelude::*; 7 | 8 | use async_listen::{ListenExt, backpressure::{self, Token}, error_hint}; 9 | 10 | 11 | fn main() -> Result<(), Box> { 12 | let (tok_gen, throttle) = backpressure::new(10); 13 | task::block_on(async { 14 | let listener = TcpListener::bind("localhost:8080").await?; 15 | let mut incoming = listener 16 | .incoming() 17 | .log_warnings(|e| { 18 | eprintln!("Accept error: {}. Paused listener for 0.5s. {}", 19 | e, error_hint(&e)) 20 | }) 21 | .handle_errors(Duration::from_millis(500)) // 1 22 | .apply_backpressure(throttle); 23 | while let Some(stream) = incoming.next().await { // 2 24 | let token = tok_gen.token(); 25 | task::spawn(async move { 26 | connection_loop(&token, stream).await 27 | }); 28 | } 29 | Ok(()) 30 | }) 31 | } 32 | 33 | async fn connection_loop(_token: &Token, _stream: TcpStream) { 34 | task::sleep(Duration::from_secs(10)).await; 35 | } 36 | -------------------------------------------------------------------------------- /examples/metric.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | use std::time::Duration; 3 | 4 | use async_std::task; 5 | use async_std::net::TcpListener; 6 | use async_std::prelude::*; 7 | 8 | use rand::{thread_rng, Rng}; 9 | use async_listen::{ListenExt, ByteStream, backpressure, error_hint}; 10 | 11 | 12 | fn main() -> Result<(), Box> { 13 | let (metrics, bp) = backpressure::new(10); 14 | let metrics1 = metrics.clone(); 15 | task::spawn(async move { 16 | loop { 17 | task::sleep(Duration::from_secs(10)).await; 18 | println!("Currently connected {}", metrics1.get_active_tokens()); 19 | } 20 | }); 21 | task::block_on(async move { 22 | let listener = TcpListener::bind("localhost:8080").await?; 23 | eprintln!("Accepting connections on localhost:8080"); 24 | let mut incoming = listener.incoming() 25 | .log_warnings(|e| { 26 | eprintln!("Accept error: {}. Paused listener for 0.5s. {}", 27 | e, error_hint(&e)) 28 | }) 29 | .handle_errors(Duration::from_millis(500)) 30 | .backpressure_wrapper(bp); 31 | while let Some(stream) = incoming.next().await { 32 | task::spawn(connection_loop(stream, metrics.clone())); 33 | } 34 | Ok(()) 35 | }) 36 | } 37 | async fn connection_loop(mut stream: ByteStream, metrics: backpressure::Sender) 38 | { 39 | let delay = Duration::from_millis(thread_rng().gen_range(100, 5000)); 40 | task::sleep(delay).await; 41 | let text = format!("Connections {}\n", metrics.get_active_tokens()); 42 | stream.write_all(text.as_bytes()).await 43 | .map_err(|e| eprintln!("Write error: {}", e)).ok(); 44 | } 45 | -------------------------------------------------------------------------------- /tests/log.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | 3 | use async_std::stream::{from_iter, Stream, StreamExt}; 4 | use async_std::task; 5 | 6 | use async_listen::{ListenExt, error_hint}; 7 | 8 | fn collect(mut stream: S) -> Vec { 9 | task::block_on(async { 10 | let mut result = Vec::new(); 11 | while let Some(item) = stream.next().await { 12 | result.push(item); 13 | } 14 | result 15 | }) 16 | } 17 | 18 | #[test] 19 | fn test_log() { 20 | let s = from_iter(vec![ 21 | Ok(1u32), 22 | Err(io::ErrorKind::ConnectionReset.into()), 23 | Ok(2), 24 | Err(io::ErrorKind::Other.into()), 25 | Ok(3), 26 | ]); 27 | let mut visited = false; 28 | let stream = s.log_warnings(|e| { 29 | assert_eq!(e.kind(), io::ErrorKind::Other); 30 | visited = true; 31 | }); 32 | let result = collect(stream); 33 | assert_eq!(result.len(), 5); 34 | assert!(visited); 35 | } 36 | 37 | #[test] 38 | #[cfg(target_os="linux")] // other OSs may have different error code or text 39 | fn test_hint() { 40 | let e = io::Error::from_raw_os_error(24); 41 | assert_eq!( 42 | format!("Error: {}. {}", e, error_hint(&e)), 43 | "Error: Too many open files (os error 24). \ 44 | Increase per-process open file limit \ 45 | https://bit.ly/async-err#EMFILE"); 46 | let e = io::Error::from_raw_os_error(23); 47 | assert_eq!( 48 | format!("Error: {}. {}", e, error_hint(&e)), 49 | "Error: Too many open files in system (os error 23). \ 50 | Increase system open file limit \ 51 | https://bit.ly/async-err#ENFILE"); 52 | let e = io::ErrorKind::Other.into(); 53 | assert_eq!( 54 | format!("Error: {}. {}", e, error_hint(&e)), 55 | "Error: other os error. "); 56 | } 57 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::fmt; 3 | use std::pin::Pin; 4 | 5 | use async_std::stream::Stream; 6 | use async_std::task::{Poll, Context}; 7 | 8 | use crate::is_transient_error; 9 | 10 | /// A stream adapter that logs errors which aren't transient 11 | /// 12 | /// See 13 | /// [`ListenExt::log_warnings`](../trait.ListenExt.html#method.log_warnings) 14 | /// for more info. 15 | pub struct LogWarnings { 16 | stream: S, 17 | logger: F, 18 | } 19 | 20 | impl fmt::Debug for LogWarnings { 21 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 22 | f.debug_struct("LogWarnings") 23 | .field("stream", &self.stream) 24 | .finish() 25 | } 26 | } 27 | 28 | impl Unpin for LogWarnings {} 29 | 30 | impl LogWarnings { 31 | pub(crate) fn new(stream: S, f: F) -> LogWarnings { 32 | LogWarnings { 33 | stream, 34 | logger: f, 35 | } 36 | } 37 | 38 | /// Acquires a reference to the underlying stream that this adapter is 39 | /// pulling from. 40 | pub fn get_ref(&self) -> &S { 41 | &self.stream 42 | } 43 | 44 | /// Acquires a mutable reference to the underlying stream that this 45 | /// adapter is pulling from. 46 | pub fn get_mut(&mut self) -> &mut S { 47 | &mut self.stream 48 | } 49 | 50 | /// Consumes this adapter, returning the underlying stream. 51 | pub fn into_inner(self) -> S { 52 | self.stream 53 | } 54 | } 55 | 56 | impl Stream for LogWarnings 57 | where S: Stream> + Unpin, 58 | F: FnMut(&io::Error), 59 | { 60 | type Item = Result; 61 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) 62 | -> Poll> 63 | { 64 | let res = Pin::new(&mut self.stream).poll_next(cx); 65 | match &res { 66 | Poll::Ready(Some(Err(e))) if !is_transient_error(e) 67 | => (self.get_mut().logger)(e), 68 | _ => {} 69 | }; 70 | return res; 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /examples/tcp_and_unix.rs: -------------------------------------------------------------------------------- 1 | use std::env::args; 2 | use std::error::Error; 3 | use std::fs::remove_file; 4 | use std::io; 5 | use std::time::Duration; 6 | 7 | use async_std::task; 8 | use async_std::net::TcpListener; 9 | use async_std::prelude::*; 10 | 11 | use async_listen::{ListenExt, ByteStream, backpressure, error_hint}; 12 | 13 | fn main() -> Result<(), Box> { 14 | let (_, bp) = backpressure::new(10); 15 | #[cfg(unix)] { 16 | use async_std::os::unix::net::UnixListener; 17 | 18 | if args().any(|x| x == "--unix") { 19 | remove_file("./example.sock").ok(); 20 | return task::block_on(async move { 21 | let listener = UnixListener::bind("./example.sock").await?; 22 | eprintln!("Accepting connections on ./example.sock"); 23 | let mut incoming = listener.incoming() 24 | .log_warnings(log_error) 25 | .handle_errors(Duration::from_millis(500)) 26 | .backpressure_wrapper(bp); 27 | while let Some(stream) = incoming.next().await { 28 | task::spawn(connection_loop(stream)); 29 | } 30 | Ok(()) 31 | }); 32 | } 33 | } 34 | task::block_on(async move { 35 | let listener = TcpListener::bind("localhost:8080").await?; 36 | eprintln!("Accepting connections on localhost:8080"); 37 | let mut incoming = listener.incoming() 38 | .log_warnings(log_error) 39 | .handle_errors(Duration::from_millis(500)) 40 | .backpressure_wrapper(bp); 41 | while let Some(stream) = incoming.next().await { 42 | task::spawn(async { 43 | if let Err(e) = connection_loop(stream).await { 44 | eprintln!("Error: {}", e); 45 | } 46 | }); 47 | } 48 | Ok(()) 49 | }) 50 | } 51 | 52 | async fn connection_loop(mut stream: ByteStream) -> Result<(), io::Error> { 53 | println!("Connected from {}", stream.peer_addr()?); 54 | task::sleep(Duration::from_secs(5)).await; 55 | stream.write_all("hello\n".as_bytes()).await?; 56 | Ok(()) 57 | } 58 | 59 | fn log_error(e: &io::Error) { 60 | eprintln!("Accept error: {}. Paused for 0.5s. {}", e, error_hint(&e)); 61 | } 62 | -------------------------------------------------------------------------------- /src/sleep.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io; 3 | use std::pin::Pin; 4 | use std::time::Duration; 5 | 6 | use async_std::future::Future; 7 | use async_std::stream::Stream; 8 | use async_std::task::{sleep, Context, Poll}; 9 | 10 | use crate::is_transient_error; 11 | 12 | /// A stream adapter that retries on error 13 | /// 14 | /// See 15 | /// [`ListenExt::sleep_on_error`](../trait.ListenExt.html#method.sleep_on_error) 16 | /// for more info. 17 | pub struct HandleErrors { 18 | stream: S, 19 | sleep_on_warning: Duration, 20 | timeout: Option + 'static + Send>>>, 21 | } 22 | 23 | impl fmt::Debug for HandleErrors { 24 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 25 | f.debug_struct("HandleErrors") 26 | .field("stream", &self.stream) 27 | .field("sleep_on_warning", &self.sleep_on_warning) 28 | .finish() 29 | } 30 | } 31 | 32 | impl Unpin for HandleErrors {} 33 | 34 | impl HandleErrors { 35 | pub(crate) fn new(stream: S, sleep_on_warning: Duration) 36 | -> HandleErrors 37 | { 38 | HandleErrors { stream, sleep_on_warning, timeout: None } 39 | } 40 | 41 | /// Acquires a mutable reference to the underlying stream that this 42 | /// adapter is pulling from. 43 | pub fn get_mut(&mut self) -> &mut S { 44 | &mut self.stream 45 | } 46 | 47 | /// Consumes this adapter, returning the underlying stream. 48 | pub fn into_inner(self) -> S { 49 | self.stream 50 | } 51 | } 52 | 53 | impl Stream for HandleErrors 54 | where S: Stream> + Unpin, 55 | { 56 | type Item = I; 57 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) 58 | -> Poll> 59 | { 60 | if let Some(ref mut to) = self.timeout { 61 | match to.as_mut().poll(cx) { 62 | Poll::Ready(_) => {} 63 | Poll::Pending => return Poll::Pending, 64 | } 65 | } 66 | self.timeout = None; 67 | loop { 68 | match Pin::new(&mut self.stream).poll_next(cx) { 69 | Poll::Pending => return Poll::Pending, 70 | Poll::Ready(Some(Ok(v))) => return Poll::Ready(Some(v)), 71 | Poll::Ready(None) => return Poll::Ready(None), 72 | Poll::Ready(Some(Err(ref e))) 73 | if is_transient_error(e) => continue, 74 | Poll::Ready(Some(Err(_))) => { 75 | let mut timeout = Box::pin(sleep(self.sleep_on_warning)); 76 | match timeout.as_mut().poll(cx) { 77 | Poll::Pending => { 78 | self.timeout = Some(timeout); 79 | return Poll::Pending; 80 | } 81 | Poll::Ready(()) => continue, 82 | } 83 | } 84 | } 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! # Async Listen 2 | //! 3 | //! The crate contains various helpers for writing production-ready servers in 4 | //! rust using [async-std](https://async.rs/). 5 | //! 6 | //! [Docs](https://docs.rs/async-listen/) | 7 | //! [Github](https://github.com/tailhook/async-listen/) | 8 | //! [Crate](https://crates.io/crates/async-listen) 9 | //! 10 | //! # Utilities 11 | //! * [ListenExt](trait.ListenExt.html) -- extension trait for stream of 12 | //! accepted sockets, provides useful conbinators for a stream 13 | //! * [error_hint](fn.error_hint.html) -- shows end-user hints no how to fix 14 | //! [the most imporant errors](errors/index.html) 15 | //! 16 | //! # Low-Level Utilities 17 | //! 18 | //! * [is_transient_error](fn.is_transient_error.html) -- determines if the 19 | //! error returned from `accept()` can be ignored 20 | //! 21 | //! # Example 22 | //! 23 | //! Here is a quite elaborate example that demonstrates: 24 | //! * Backpressure (limit on the number of simultaneous connections) 25 | //! * Error handling 26 | //! * Unification of Tcp and Unix sockets 27 | //! 28 | //! ```no_run 29 | //! use std::env::args; 30 | //! use std::error::Error; 31 | //! use std::fs::remove_file; 32 | //! use std::io; 33 | //! use std::time::Duration; 34 | //! 35 | //! use async_std::task; 36 | //! use async_std::net::TcpListener; 37 | //! use async_std::prelude::*; 38 | //! 39 | //! use async_listen::{ListenExt, ByteStream, backpressure, error_hint}; 40 | //! 41 | //! 42 | //! fn main() -> Result<(), Box> { 43 | //! let (_, bp) = backpressure::new(10); 44 | //! #[cfg(unix)] { 45 | //! use async_std::os::unix::net::UnixListener; 46 | //! 47 | //! if args().any(|x| x == "--unix") { 48 | //! remove_file("./example.sock").ok(); 49 | //! return task::block_on(async { 50 | //! let listener = UnixListener::bind("./example.sock").await?; 51 | //! eprintln!("Accepting connections on ./example.sock"); 52 | //! let mut incoming = listener.incoming() 53 | //! .log_warnings(log_accept_error) 54 | //! .handle_errors(Duration::from_millis(500)) 55 | //! .backpressure_wrapper(bp); 56 | //! while let Some(stream) = incoming.next().await { 57 | //! task::spawn(connection_loop(stream)); 58 | //! } 59 | //! Ok(()) 60 | //! }); 61 | //! } 62 | //! } 63 | //! task::block_on(async { 64 | //! let listener = TcpListener::bind("localhost:8080").await?; 65 | //! eprintln!("Accepting connections on localhost:8080"); 66 | //! let mut incoming = listener.incoming() 67 | //! .log_warnings(log_accept_error) 68 | //! .handle_errors(Duration::from_millis(500)) 69 | //! .backpressure_wrapper(bp); 70 | //! while let Some(stream) = incoming.next().await { 71 | //! task::spawn(async { 72 | //! if let Err(e) = connection_loop(stream).await { 73 | //! eprintln!("Error: {}", e); 74 | //! } 75 | //! }); 76 | //! } 77 | //! Ok(()) 78 | //! }) 79 | //! } 80 | //! 81 | //! async fn connection_loop(mut stream: ByteStream) -> Result<(), io::Error> { 82 | //! println!("Connected from {}", stream.peer_addr()?); 83 | //! task::sleep(Duration::from_secs(5)).await; 84 | //! stream.write_all("hello\n".as_bytes()).await?; 85 | //! Ok(()) 86 | //! } 87 | //! 88 | //! fn log_accept_error(e: &io::Error) { 89 | //! eprintln!("Accept error: {}. Sleeping 0.5s. {}", e, error_hint(&e)); 90 | //! } 91 | //! ``` 92 | #![warn(missing_debug_implementations)] 93 | #![warn(missing_docs)] 94 | #![forbid(unsafe_code)] 95 | 96 | mod error; 97 | mod listen_ext; 98 | mod log; 99 | mod sleep; 100 | mod byte_stream; 101 | pub mod backpressure; 102 | pub mod wrapper_types; 103 | pub mod errors; 104 | 105 | pub use byte_stream::{ByteStream, PeerAddr}; 106 | pub use error::{is_transient_error, error_hint}; 107 | pub use listen_ext::ListenExt; 108 | -------------------------------------------------------------------------------- /tests/backpressure.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | use std::sync::atomic::{AtomicUsize, Ordering}; 3 | use std::time::Duration; 4 | 5 | use rand::{Rng, thread_rng}; 6 | 7 | use async_std::stream::{from_iter, Stream, StreamExt}; 8 | use async_std::task; 9 | 10 | use async_listen::{ListenExt, backpressure}; 11 | 12 | fn collect(mut stream: S) -> Vec { 13 | task::block_on(async { 14 | let mut result = Vec::new(); 15 | while let Some(item) = stream.next().await { 16 | result.push(item); 17 | } 18 | result 19 | }) 20 | } 21 | 22 | fn random_delay() -> Duration { 23 | Duration::from_millis(thread_rng().gen_range(1, 200)) 24 | } 25 | 26 | #[test] 27 | fn test_backpressure() { 28 | let current = Arc::new(AtomicUsize::new(0)); 29 | let top = Arc::new(AtomicUsize::new(0)); 30 | let tasks = collect( 31 | from_iter(0..100) 32 | .backpressure(10) 33 | .map(|(token, _index)| { 34 | let current = current.clone(); 35 | let top = top.clone(); 36 | task::spawn(async move { 37 | let size = current.fetch_add(1, Ordering::Acquire) + 1; 38 | assert!(size <= 10); 39 | if size == 10 { 40 | top.fetch_add(1, Ordering::Relaxed); 41 | } 42 | task::sleep(random_delay()).await; 43 | current.fetch_sub(1, Ordering::Acquire); 44 | drop(token); 45 | }) 46 | })); 47 | for item in tasks { 48 | task::block_on(item); 49 | }; 50 | let top = top.load(Ordering::SeqCst); 51 | println!("Top capacity {}", top); 52 | assert!(30 < top && top <= 91); 53 | } 54 | 55 | #[test] 56 | fn test_apply_backpressure() { 57 | let current = Arc::new(AtomicUsize::new(0)); 58 | let top = Arc::new(AtomicUsize::new(0)); 59 | let (tx, rx) = backpressure::new(10); 60 | let tasks = collect( 61 | from_iter(0..100) 62 | .apply_backpressure(rx) 63 | .map(|_index| { 64 | let token = tx.token(); 65 | let current = current.clone(); 66 | let top = top.clone(); 67 | task::spawn(async move { 68 | let size = current.fetch_add(1, Ordering::Acquire) + 1; 69 | assert!(size <= 10); 70 | if size == 10 { 71 | top.fetch_add(1, Ordering::Relaxed); 72 | } 73 | task::sleep(random_delay()).await; 74 | current.fetch_sub(1, Ordering::Acquire); 75 | drop(token); 76 | }) 77 | })); 78 | for item in tasks { 79 | task::block_on(item); 80 | }; 81 | let top = top.load(Ordering::SeqCst); 82 | println!("Top capacity {}", top); 83 | assert!(30 < top && top <= 91); 84 | } 85 | 86 | #[test] 87 | fn test_change_limit() { 88 | let current = Arc::new(AtomicUsize::new(0)); 89 | let top = Arc::new(AtomicUsize::new(0)); 90 | let (tx, rx) = backpressure::new(10); 91 | let tasks = collect( 92 | from_iter(0..100) 93 | .apply_backpressure(rx) 94 | .map(|index| { 95 | let tx = tx.clone(); 96 | let token = tx.token(); 97 | let current = current.clone(); 98 | let top = top.clone(); 99 | task::spawn(async move { 100 | let size = current.fetch_add(1, Ordering::Acquire) + 1; 101 | assert!(size <= 20); 102 | if size == 20 { 103 | top.fetch_add(1, Ordering::Relaxed); 104 | } 105 | task::sleep(random_delay()).await; 106 | if index == 20 { 107 | tx.set_limit(20); 108 | } else if index == 60 { 109 | tx.set_limit(5); 110 | } 111 | current.fetch_sub(1, Ordering::Acquire); 112 | drop(token); 113 | }) 114 | })); 115 | for item in tasks { 116 | task::block_on(item); 117 | }; 118 | let top = top.load(Ordering::SeqCst); 119 | println!("Top capacity {}", top); 120 | assert!(5 < top && top <= 81); 121 | } 122 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! # Documentation of the Error Hints 2 | //! 3 | //! This page is the destination of the links shown by 4 | //! [`error_hint`](../fn.error_hint.html). 5 | //! 6 | //! List of errors having a hint: 7 | //! 8 | //! * [Too many open files](#EMFILE) / [EMFILE](#EMFILE) 9 | //! * [Too many open files in system](#ENFILE) / [ENFILE](#ENFILE) 10 | //! 11 | //! 12 | //! # Too Many Open Files 13 | //! 14 | //! | Posix Name | EMFILE | 15 | //! |---|---| 16 | //! | Message | `Too many open files (os error 24)` | 17 | //! | Hint | `Increase per-process open file limit` | 18 | //! | Link | `https://bit.ly/async-err#EMFILE` | 19 | //! 20 | //! ## Common Causes 21 | //! 22 | //! 1. File descriptor limit for the process is too low 23 | //! 2. Limit of number of simultaneous connections is either too high or 24 | //! unimplemented 25 | //! 26 | //! The (2) can be fixed by applying [`backpressure`] abstraction from this 27 | //! crate. 28 | //! 29 | //! The rest of this section discusses how to change file decriptor limit. 30 | //! 31 | //! [`backpressure`]: ../backpressure/fn.new.html 32 | //! 33 | //! ## Choosing a Limit 34 | //! 35 | //! There is no one good strategy. But here are few hints: 36 | //! 37 | //! 1. It must be lower than limit on simultaneous connections. 38 | //! 2. Sometimes it's several times lower, like if you need to open a file 39 | //! or to open a backend connection for each client, it should be 2x lower 40 | //! plus some offset. 41 | //! 3. Measure an average memory used by each client and divide memory 42 | //! available by that value. 43 | //! 44 | //! We use `10000` as an example value later in the text. 45 | //! 46 | //! ## Linux 47 | //! 48 | //! Either use `ulimit -n` in the same shell: 49 | //! ```console 50 | //! $ ulimit -n 10000 51 | //! $ ./your_app 52 | //! ``` 53 | //! 54 | //! If you get the error, you need superuser privileges: 55 | //! ```console 56 | //! $ ulimit -n 10000 57 | //! ulimit: value exceeds hard limit 58 | //! $ sudo -s 59 | //! # ulimit -n 10000 60 | //! # su your_user 61 | //! $ ./your_app 62 | //! ``` 63 | //! 64 | //! On most systems there is `/etc/security/limits.conf` to make persistent 65 | //! changes: 66 | //! ```text 67 | //! your_user hard nofile 10000 68 | //! your_user sort nofile 10000 69 | //! ``` 70 | //! 71 | //! Run ``su your_user`` to apply changes to your current shell without reboot. 72 | //! 73 | //! [More information](https://duckduckgo.com/?q=Increase+per-process+open+file+limit+linux) 74 | //! 75 | //! ## Docker 76 | //! 77 | //! Docker uses limit of `65535` by default. To increase it add a parameter: 78 | //! ```shell 79 | //! docker run --ulimit nofile=10000:10000 ... 80 | //! ``` 81 | //! 82 | //! ## MacOS 83 | //! 84 | //! On MacOS raising ulimit doesn't require permissions: 85 | //! ```console 86 | //! $ ulimit -n 10000 87 | //! $ ./your_app 88 | //! ``` 89 | //! 90 | //! [More information](https://duckduckgo.com/?q=Increase+per-process+open+file+limit+macos) 91 | //! 92 | //! # Too Many Open Files in System 93 | //! 94 | //! | Posix Name | ENFILE | 95 | //! |---|---| 96 | //! | Message | `Too many open files in system (os error 23)` | 97 | //! | Hint | `Increase system open file limit` | 98 | //! | Link | `https://bit.ly/async-err#ENFILE` | 99 | //! 100 | //! ## Common Causes 101 | //! 102 | //! 1. Per-process file descriptor limit is larger than system one 103 | //! 2. File descriptor limit on the system is too small 104 | //! 3. Limit of number of simultaneous connections is either too high or 105 | //! unimplemented 106 | //! 107 | //! The (3) can be fixed by applying [`backpressure`] abstraction from this 108 | //! crate. 109 | //! 110 | //! Changing (1) is described in the [section above](#EMFILE). 111 | //! 112 | //! The rest of this section discusses how to change system file decriptor 113 | //! limit. 114 | //! 115 | //! ## Choosing a Limit 116 | //! 117 | //! Usually system limit depends on the memory and doesn't have to be 118 | //! increased. So be careful and consult your system docs for more info. 119 | //! 120 | //! ## Linux 121 | //! 122 | //! Checking the limit: 123 | //! ```console 124 | //! $ cat /proc/sys/fs/file-max 125 | //! 818354 126 | //! ``` 127 | //! 128 | //! Setting a limit: 129 | //! ```console 130 | //! $ sudo sysctl fs.file-max=1500000 131 | //! ``` 132 | //! 133 | //! This only works **until reboot**. On some systems, to preserve this 134 | //! setting after reboot you can run: 135 | //! ```console 136 | //! $ sudo sysctl -w fs.file-max=1500000 137 | //! ``` 138 | //! (Note `-w`) 139 | //! 140 | //! [More information](https://duckduckgo.com/?q=Increase+system+open+file+limit+linux) 141 | //! 142 | //! ## MacOS 143 | //! 144 | //! Checking a limit: 145 | //! ```shell 146 | //! launchctl limit maxfiles 147 | //! ``` 148 | //! 149 | //! Setting a limit, **until reboot**: 150 | //! ```shell 151 | //! sudo sysctl -w kern.maxfiles=20480 152 | //! ``` 153 | //! 154 | //! To make the permanent change, add the following to `/etc/sysctl.conf`: 155 | //! ```config 156 | //! kern.maxfiles=65536 157 | //! kern.maxfilesperproc=65536 158 | //! ``` 159 | //! 160 | //! [More information](https://duckduckgo.com/?q=Increase+system+open+file+limit+macos) 161 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | #![deny(meta_variable_misuse)] 2 | 3 | use std::fmt; 4 | use std::io; 5 | 6 | /// Error hint that can be formatted 7 | /// 8 | /// The structure implements `Display` and is usually used in logs like this: 9 | /// ``` 10 | /// # use std::io; 11 | /// # let e: io::Error = io::ErrorKind::Other.into(); 12 | /// use async_listen::error_hint; 13 | /// eprintln!("Error: {}. {}", e, error_hint(&e)); 14 | /// ``` 15 | /// 16 | /// See [error description](../errors/index.html) for a list of the errors that 17 | /// can return a hint. 18 | /// 19 | /// You can also apply a custom formatting (i.e. a different link) for the 20 | /// hint. Just replace an `error_hint` function with something of your own: 21 | /// 22 | /// ``` 23 | /// # use std::io; 24 | /// fn error_hint(e: &io::Error) -> String { 25 | /// let hint = async_listen::error_hint(e); 26 | /// if hint.is_empty() { 27 | /// return String::new(); 28 | /// } else { 29 | /// return format!("{} http://example.org/my-server-errors#{}", 30 | /// hint.hint_text(), hint.link_hash()); 31 | /// } 32 | /// } 33 | /// ``` 34 | #[derive(Debug)] 35 | pub struct ErrorHint { 36 | error: Option 37 | } 38 | 39 | #[derive(Debug)] 40 | enum KnownError { 41 | Enfile, 42 | Emfile, 43 | } 44 | 45 | /// Returns true if the error is transient 46 | /// 47 | /// The transient error is defined here as an error after which we can continue 48 | /// accepting subsequent connections without the risk of a tight loop. 49 | /// 50 | /// For example, a per-connection error like `ConnectionReset` in `accept()` 51 | /// system call means then next connection might be ready to be accepted 52 | /// immediately. 53 | /// 54 | /// All other errors should incur a timeout before the next `accept()` is 55 | /// performed. The timeout is useful to handle resource exhaustion errors 56 | /// like ENFILE and EMFILE: file descriptor might be released after some time 57 | /// but the error will be the same if we continue to accept in a tight loop. 58 | /// 59 | /// This function is most likely should not be used directly, but rather 60 | /// through one of the following adapters: 61 | /// * [`log_warnings`](trait.ListenExt.html#method.log_warnings) 62 | /// * [`handle_errors`](trait.ListenExt.html#method.handle_errors) 63 | pub fn is_transient_error(e: &io::Error) -> bool { 64 | e.kind() == io::ErrorKind::ConnectionRefused || 65 | e.kind() == io::ErrorKind::ConnectionAborted || 66 | e.kind() == io::ErrorKind::ConnectionReset 67 | } 68 | 69 | macro_rules! error_match { 70 | ($value:expr => { 71 | $( 72 | ($n: pat | wasi: $wasi: pat | haiku: $haiku:pat) => $val: ident, 73 | )* 74 | }) => { 75 | match $value {$( 76 | #[cfg(any(target_env="wasi", target_os="wasi"))] 77 | Some($wasi) => Some($val), 78 | #[cfg(target_os="haiku")] 79 | Some($haiku) => Some($val), 80 | #[cfg(all( 81 | any(unix, windows, target_os="fuchsia"), 82 | not(any(target_env="wasi", target_os="wasi",target_os="haiku")) 83 | ))] 84 | Some($n) => Some($val), 85 | )* 86 | _ => None, 87 | } 88 | } 89 | } 90 | 91 | /// Returns a hint structure that can be formatter to the log output 92 | /// 93 | /// # Example 94 | /// ``` 95 | /// # use std::io; 96 | /// # let e: io::Error = io::ErrorKind::Other.into(); 97 | /// use async_listen::error_hint; 98 | /// eprintln!("Error: {}. {}", e, error_hint(&e)); 99 | /// ``` 100 | /// 101 | /// Error message might look like: 102 | /// ```text 103 | /// Error: Too many open files (os error 24). Increase per-process open file limit https://bit.ly/async-err#EMFILE 104 | /// ``` 105 | /// 106 | /// See [error description](errors/index.html) for a list of the errors that 107 | /// can return a hint. 108 | /// 109 | /// See [`ErrorHint`] for more info on customizing the output 110 | /// 111 | /// [`ErrorHint`]: wrapper_types/struct.ErrorHint.html 112 | pub fn error_hint(e: &io::Error) -> ErrorHint { 113 | use KnownError::*; 114 | let error = error_match!(e.raw_os_error() => { 115 | (24 | wasi: 33 | haiku: -2147459062) => Emfile, 116 | (23 | wasi: 41 | haiku: -2147454970) => Enfile, 117 | }); 118 | return ErrorHint { error } 119 | } 120 | 121 | 122 | impl ErrorHint { 123 | /// Text of the hint 124 | /// 125 | /// Since the text is expected to be printed **after** the error message, 126 | /// it usually includes call to action, like: 127 | /// ```text 128 | /// Increase per-process open file limit 129 | /// ``` 130 | /// 131 | /// Usually the hint is good enough to use search engine to find the 132 | /// solution to the problem. But usually link is printed too. 133 | pub fn hint_text(&self) -> &'static str { 134 | use KnownError::*; 135 | match &self.error { 136 | None => "", 137 | Some(Emfile) => "Increase per-process open file limit", 138 | Some(Enfile) => "Increase system open file limit", 139 | } 140 | } 141 | 142 | /// The part of the link after the hash `#` sign 143 | /// 144 | /// To make a link prepend with the base URL: 145 | /// ``` 146 | /// # let h = async_listen::error_hint(&std::io::ErrorKind::Other.into()); 147 | /// println!("{}#{}", h.default_link_base(), h.link_hash()) 148 | /// ``` 149 | /// 150 | /// It's expected that implementation may customize base link. Mathing 151 | /// for the link hash is also well supported, for exaple if you want to 152 | /// change the link only for one of few errors. 153 | /// 154 | /// Link hashes are stable (we don't change them in future versions). 155 | pub fn link_hash(&self) -> &'static str { 156 | use KnownError::*; 157 | match &self.error { 158 | None => "", 159 | Some(Emfile) => "EMFILE", 160 | Some(Enfile) => "ENFILE", 161 | } 162 | } 163 | 164 | /// Returns current base link printed with the hint 165 | /// 166 | /// Current value is `https://bit.ly/async-err`. In future versions we 167 | /// might change the base if we find a better place to host the docs, but 168 | /// we don't take this decision lightly. 169 | /// 170 | /// To make a link, just append a hash part: 171 | /// ```no_run 172 | /// # let h = async_listen::error_hint(&std::io::ErrorKind::Other.into()); 173 | /// println!("{}#{}", h.default_link_base(), h.link_hash()) 174 | /// ``` 175 | pub fn default_link_base(&self) -> &'static str { 176 | return "https://bit.ly/async-err"; 177 | } 178 | 179 | /// Returns true if the hint is empty 180 | /// 181 | /// Even if there is no hint for the error (error code is unknown) 182 | /// the `error_hint` function returns the `ErrorHint` object which is 183 | /// empty when displayed. This is a convenience in most cases, but you 184 | /// have to check for `is_empty` when formatting your own hint. 185 | pub fn is_empty(&self) -> bool { 186 | self.error.is_some() 187 | } 188 | } 189 | 190 | impl fmt::Display for ErrorHint { 191 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 192 | if self.error.is_none() { 193 | return Ok(()) 194 | } 195 | write!(f, "{} {}#{}", 196 | self.hint_text(), self.default_link_base(), self.link_hash()) 197 | } 198 | } 199 | -------------------------------------------------------------------------------- /src/listen_ext.rs: -------------------------------------------------------------------------------- 1 | use std::io; 2 | use std::time::Duration; 3 | 4 | use async_std::stream::Stream; 5 | 6 | use crate::log; 7 | use crate::sleep; 8 | use crate::backpressure::{self, Token}; 9 | use crate::byte_stream::ByteStream; 10 | 11 | 12 | /// An extension trait that provides necessary adapters for turning 13 | /// a stream of `accept()` events into a full-featured connection listener 14 | /// 15 | pub trait ListenExt: Stream { 16 | /// Log errors which aren't transient using user-specified function 17 | /// 18 | /// The the warning in this context is any error which isn't transient. 19 | /// There are no fatal errors (ones which don't allow listener to 20 | /// procceed in the future) on any known platform so any error is 21 | /// considered a warning. See 22 | /// [`is_transient_error`](fn.is_transient_error.html) for more info. 23 | /// 24 | /// `stream.log_warnings(user_func)` is equivalent of: 25 | /// 26 | /// ```ignore 27 | /// stream.inspect(|res| res.map_err(|e| { 28 | /// if !is_transient_error(e) { 29 | /// user_func(e); 30 | /// } 31 | /// }) 32 | /// ``` 33 | /// 34 | /// # Example 35 | /// 36 | /// ```no_run 37 | /// # use async_std::net::TcpListener; 38 | /// # use async_std::prelude::*; 39 | /// # fn main() -> std::io::Result<()> { async_std::task::block_on(async { 40 | /// # 41 | /// use async_listen::ListenExt; 42 | /// 43 | /// let listener = TcpListener::bind("127.0.0.1:0").await?; 44 | /// let mut incoming = listener.incoming() 45 | /// .log_warnings(|e| eprintln!("Listening error: {}", e)); 46 | /// 47 | /// while let Some(stream) = incoming.next().await { 48 | /// // ... 49 | /// } 50 | /// # 51 | /// # Ok(()) }) } 52 | /// ``` 53 | fn log_warnings(self, f: F) 54 | -> log::LogWarnings 55 | where Self: Stream> + Sized, 56 | F: FnMut(&io::Error), 57 | { 58 | log::LogWarnings::new(self, f) 59 | } 60 | 61 | /// Handle errors and return infallible stream 62 | /// 63 | /// There are two types of errors: 64 | /// 65 | /// * [`transient`](fn.is_transient_error.html) which may be ignored 66 | /// * warnings which keep socket in accept queue after the error 67 | /// 68 | /// We ignore transient errors entirely, and timeout for a `sleep_amount` 69 | /// on stick ones. 70 | /// 71 | /// One example of warning is `EMFILE: too many open files`. In this 72 | /// case, if we sleep for some amount, so there is a chance that other 73 | /// connection or some file descriptor is closed in the meantime and we 74 | /// can accept another connection. 75 | /// 76 | /// Also in the case of warnings, it's usually a good idea to log them 77 | /// (i.e. so file descritor limit or max connection is adjusted by user). 78 | /// Use [`log_warnings`](#method.log_warnings) to do this. 79 | /// 80 | /// `while let Some(s) = stream.handle_errors(d).next().await {...}` 81 | /// is equivalent of: 82 | /// 83 | /// ```ignore 84 | /// while let Some(res) = stream.next().await? { 85 | /// let s = match res { 86 | /// Ok(s) => s, 87 | /// Err(e) => { 88 | /// if !is_traisient_error(e) { 89 | /// task::sleep(d); 90 | /// } 91 | /// continue; 92 | /// } 93 | /// }; 94 | /// # ... 95 | /// } 96 | /// ``` 97 | /// 98 | /// # Example 99 | /// 100 | fn handle_errors(self, sleep_on_warning: Duration) 101 | -> sleep::HandleErrors 102 | where Self: Stream> + Sized, 103 | { 104 | sleep::HandleErrors::new(self, sleep_on_warning) 105 | } 106 | 107 | /// Apply a fixed backpressure to the the stream 108 | /// 109 | /// The output stream yields pairs of (token, stream). The token must 110 | /// be kept alive as long as connection is still alive. 111 | /// 112 | /// See [`backpressure_wrapper`](#method.backpressure_wrapper) method for 113 | /// a simple way of handling backpressure in a common case. 114 | /// 115 | /// `stream.backpressure(10)` is equivalent of: 116 | /// ```ignore 117 | /// let (tx, rx) = backpressure::new(10); 118 | /// stream 119 | /// .apply_backpressure(rx) 120 | /// .map(|conn| (tx.token(), conn)) 121 | /// ``` 122 | /// 123 | /// # Example 124 | /// 125 | /// ```no_run 126 | /// # use std::time::Duration; 127 | /// # use async_std::net::{TcpListener, TcpStream}; 128 | /// # use async_std::prelude::*; 129 | /// # use async_std::task; 130 | /// # fn main() -> std::io::Result<()> { task::block_on(async { 131 | /// # 132 | /// use async_listen::ListenExt; 133 | /// 134 | /// let listener = TcpListener::bind("127.0.0.1:0").await?; 135 | /// let mut incoming = listener.incoming() 136 | /// .handle_errors(Duration::from_millis(100)) 137 | /// .backpressure(100); 138 | /// 139 | /// while let Some((token, stream)) = incoming.next().await { 140 | /// task::spawn(async { 141 | /// connection_loop(stream).await; 142 | /// drop(token); 143 | /// }); 144 | /// } 145 | /// # async fn connection_loop(_stream: TcpStream) { 146 | /// # } 147 | /// # 148 | /// # Ok(()) }) } 149 | /// ``` 150 | /// 151 | /// *Note:* the `drop` there is not needed you can use either: 152 | /// 153 | /// * `let _token = token;` inside `async` block, or 154 | /// * `connection_loop(&token, stream)`, 155 | /// 156 | /// To achieve the same result. But `drop(token)` makes it explicit that 157 | /// token is dropped only at that point, which is an important property to 158 | /// achieve. 159 | fn backpressure(self, limit: usize) 160 | -> backpressure::BackpressureToken 161 | where Self: Stream + Sized, 162 | { 163 | let (_tx, rx) = backpressure::new(limit); 164 | return backpressure::BackpressureToken::new(self, rx); 165 | } 166 | 167 | /// Apply a backpressure object to a stream 168 | /// 169 | /// This method is different from [`backpressure`](#method.backpressure) in 170 | /// two ways: 171 | /// 172 | /// 1. It doesn't modify stream output 173 | /// 2. External backpressure object may be used to change limit at runtime 174 | /// 175 | /// With the greater power comes greater responsibility, though. Here are 176 | /// some things to remember when using the method: 177 | /// 178 | /// 1. You must create a token for each connection (see example). 179 | /// 2. Token *should* be created before yielding to a main loop, otherwise 180 | /// limit can be exhausted at times. 181 | /// 2. Token should be kept alive as long as the connection is alive. 182 | /// 183 | /// See [`backpressure_wrapper`](#method.backpressure_wrapper) method for 184 | /// a simple way of handling backpressure in a common case. 185 | /// 186 | /// # Example 187 | /// 188 | /// ```no_run 189 | /// # use std::time::Duration; 190 | /// # use async_std::net::{TcpListener, TcpStream}; 191 | /// # use async_std::prelude::*; 192 | /// # use async_std::task; 193 | /// # fn main() -> std::io::Result<()> { task::block_on(async { 194 | /// # 195 | /// use async_listen::ListenExt; 196 | /// use async_listen::backpressure; 197 | /// 198 | /// let listener = TcpListener::bind("127.0.0.1:0").await?; 199 | /// let (tx, rx) = backpressure::new(10); 200 | /// let mut incoming = listener.incoming() 201 | /// .handle_errors(Duration::from_millis(100)) 202 | /// .apply_backpressure(rx); 203 | /// 204 | /// while let Some(stream) = incoming.next().await { 205 | /// let token = tx.token(); // should be created before spawn 206 | /// task::spawn(async { 207 | /// connection_loop(stream).await; 208 | /// drop(token); // should be dropped after 209 | /// }); 210 | /// } 211 | /// # async fn connection_loop(_stream: TcpStream) { 212 | /// # } 213 | /// # 214 | /// # Ok(()) }) } 215 | /// ``` 216 | /// 217 | /// *Note:* the `drop` there is not needed you can use either: 218 | /// 219 | /// * `let _token = token;` inside `async` block, or 220 | /// * `connection_loop(&token, stream)`, 221 | 222 | /// To achieve the same result. But `drop(token)` makes it explicit that 223 | /// token is dropped only at that point, which is an important property to 224 | /// achieve. Also don't create token in async block as it makes 225 | /// backpressure enforcing unreliable. 226 | fn apply_backpressure(self, backpressure: backpressure::Receiver) 227 | -> backpressure::Backpressure 228 | where Self: Stream + Sized, 229 | { 230 | return backpressure::Backpressure::new(self, backpressure); 231 | } 232 | 233 | /// Apply a backpressure object to a stream and yield ByteStream 234 | /// 235 | /// This method simplifies backpressure handling by hiding the token 236 | /// inside the [`ByteStream`](struct.ByteStream.html) structure, so 237 | /// it's lifetime is tied to the lifetime of the structure 238 | /// 239 | /// The wrapper works for `TcpListener` and `UdpListener` and returns 240 | /// the same `ByteStream` structure on both of them. This helps working 241 | /// with both kinds of sockets in a uniform way. 242 | /// 243 | /// Wrapping streams might incur tiny performance cost (although, this cast 244 | /// is much smaller than cost of system calls involved in working with 245 | /// sockets nevertheless). See [`backpressure`](#method.backpressure) and 246 | /// [`apply_backpressure`](#method.apply_backpressure) for a wrapper-less 247 | /// way of applying backpressure. 248 | /// 249 | /// # Example 250 | /// 251 | /// ```no_run 252 | /// # use std::time::Duration; 253 | /// # use async_std::net::{TcpListener, TcpStream}; 254 | /// # use async_std::prelude::*; 255 | /// # use async_std::task; 256 | /// # fn main() -> std::io::Result<()> { task::block_on(async { 257 | /// # 258 | /// use async_listen::{ListenExt, ByteStream, backpressure}; 259 | /// 260 | /// let listener = TcpListener::bind("127.0.0.1:0").await?; 261 | /// let (_, rx) = backpressure::new(10); 262 | /// let mut incoming = listener.incoming() 263 | /// .handle_errors(Duration::from_millis(100)) 264 | /// .backpressure_wrapper(rx); 265 | /// 266 | /// while let Some(stream) = incoming.next().await { 267 | /// task::spawn(connection_loop(stream)); 268 | /// } 269 | /// # async fn connection_loop(_stream: ByteStream) { 270 | /// # } 271 | /// # 272 | /// # Ok(()) }) } 273 | /// ``` 274 | /// 275 | /// # Notes 276 | /// 277 | /// The following examples are equivalent: 278 | /// 279 | /// ```ignore 280 | /// let (_, bp) = backpressure::new(100); 281 | /// stream.backpressure_wrapper(bp) 282 | /// ``` 283 | /// 284 | /// ```ignore 285 | /// let (tx, rx) = backpressure::new(100); 286 | /// stream.apply_backpressure(rx) 287 | /// .map(|stream| ByteStream::from((tx.token(), stream))) 288 | /// ``` 289 | /// 290 | /// ```ignore 291 | /// stream.backpressure(100) 292 | /// .map(ByteStream::from) 293 | /// ``` 294 | /// 295 | fn backpressure_wrapper(self, backpressure: backpressure::Receiver) 296 | -> backpressure::BackpressureWrapper 297 | where Self: Stream + Sized, 298 | ByteStream: From<(Token, I)>, 299 | { 300 | return backpressure::BackpressureWrapper::new(self, backpressure); 301 | } 302 | } 303 | 304 | impl ListenExt for T {} 305 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /src/byte_stream.rs: -------------------------------------------------------------------------------- 1 | use std::fmt; 2 | use std::io; 3 | use std::net::SocketAddr; 4 | use std::path::PathBuf; 5 | use std::pin::Pin; 6 | use std::task::{Poll, Context}; 7 | 8 | use async_std::io::{Read, Write, IoSlice, IoSliceMut}; 9 | use async_std::net::{TcpStream, Shutdown}; 10 | #[cfg(unix)] use async_std::os::unix::net::UnixStream; 11 | 12 | use crate::backpressure::Token; 13 | 14 | 15 | #[derive(Debug, Clone)] 16 | enum Stream { 17 | Tcp(TcpStream), 18 | #[cfg(unix)] 19 | Unix(UnixStream), 20 | } 21 | 22 | /// A peer address for either Tcp or Unix socket 23 | /// 24 | /// This enum is returned by 25 | /// [`ByteStream::peer_addr`](struct.ByteStream.html#method.peer_addr). 26 | /// 27 | /// 28 | /// The enum contains `Unix` option even on platforms that don't support 29 | /// unix sockets (Windows) to make code easier to write (less `#[cfg(unix)]` 30 | /// attributes all over the code). 31 | #[derive(Debug, Clone, PartialEq, Eq, Hash)] 32 | pub enum PeerAddr { 33 | /// The peer address is TCP socket address. 34 | Tcp(SocketAddr), 35 | /// The peer address is Unix socket path. `None` if socket is unnamed. 36 | Unix(Option), 37 | } 38 | 39 | /// A wrapper around TcpStream and UnixStream 40 | /// 41 | /// This structure is yielded by the stream created by 42 | /// [`ListenExt::backpressure_wrapper`](trait.ListenExt.html#method.backpressure_wrapper) 43 | /// 44 | /// This wrapper serves two purposes: 45 | /// 46 | /// 1. Holds backpressure token 47 | /// 2. Abstract away differences between TcpStream and UnixStream 48 | /// 49 | /// The structure implements AsyncRead and AsyncWrite so can be used for 50 | /// protocol implementation directly. 51 | /// 52 | /// # Notes on Cloning 53 | /// 54 | /// Cloning a `ByteStream` is a shallow clone, both resulting `ByteStream` 55 | /// structures hold the same backpressure token (and the same underlying OS socket). 56 | /// The backpressure slot will be freed (which means new connection can be accepted) 57 | /// when the last clone of `ByteStream` is dropped. 58 | #[derive(Debug, Clone)] 59 | pub struct ByteStream { 60 | stream: Stream, 61 | token: Option, 62 | } 63 | 64 | trait Assert: Read + Write + Send + Unpin + 'static { } 65 | impl Assert for ByteStream {} 66 | 67 | impl fmt::Display for PeerAddr { 68 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 69 | match self { 70 | PeerAddr::Tcp(s) => s.fmt(f), 71 | PeerAddr::Unix(None) => "".fmt(f), 72 | PeerAddr::Unix(Some(s)) => s.display().fmt(f), 73 | } 74 | } 75 | } 76 | 77 | impl ByteStream { 78 | /// Create a bytestream for a tcp socket 79 | pub fn new_tcp(token: Token, stream: TcpStream) -> ByteStream { 80 | ByteStream { 81 | stream: Stream::Tcp(stream), 82 | token: Some(token), 83 | } 84 | } 85 | 86 | /// Create a bytestream for a tcp socket (without token) 87 | /// 88 | /// This can be used with interfaces that require a `ByteStream` but 89 | /// aren't got from the listener that have backpressure applied. For 90 | /// example, if you have two listeners in the single app or even for 91 | /// client connections. 92 | pub fn new_tcp_detached(stream: TcpStream) -> ByteStream { 93 | ByteStream { 94 | stream: Stream::Tcp(stream), 95 | token: None, 96 | } 97 | } 98 | 99 | /// Create a bytestream for a unix socket 100 | #[cfg(unix)] 101 | pub fn new_unix(token: Token, stream: UnixStream) -> ByteStream { 102 | ByteStream { 103 | stream: Stream::Unix(stream), 104 | token: Some(token), 105 | } 106 | } 107 | 108 | /// Create a bytestream for a unix socket (without token) 109 | /// 110 | /// This can be used with interfaces that require a `ByteStream` but 111 | /// aren't got from the listener that have backpressure applied. For 112 | /// example, if you have two listeners in the single app or even for 113 | /// client connections. 114 | #[cfg(unix)] 115 | pub fn new_unix_detached(stream: UnixStream) -> ByteStream { 116 | ByteStream { 117 | stream: Stream::Unix(stream), 118 | token: None, 119 | } 120 | } 121 | 122 | /// Returns the remote address that this stream is connected to. 123 | /// 124 | /// Note: even on non-unix platforms (Windows) 125 | /// [`PeerAddr`](enum.PeerAddr.html) still contains `Unix` option so you 126 | /// don't have to use conditional compilation when matching. 127 | /// 128 | /// ## Examples 129 | /// 130 | /// ```ignore 131 | /// let peer = stream.peer_addr()?; 132 | /// match peer.peer_addr()? { 133 | /// PeerAddr::Tcp(addr) => println!("TCP addr {}", addr), 134 | /// PeerAddr::Unix(None) => println!("Unnamed unix socket"), 135 | /// PeerAddr::Unix(Some(path)) => println!("Unix {}", path.display()), 136 | /// } 137 | /// ``` 138 | pub fn peer_addr(&self) -> io::Result { 139 | match &self.stream { 140 | Stream::Tcp(s) => s.peer_addr().map(PeerAddr::Tcp), 141 | #[cfg(unix)] 142 | Stream::Unix(s) => { 143 | s.peer_addr() 144 | .map(|a| a.as_pathname().map(|p| p.to_owned())) 145 | .map(PeerAddr::Unix) 146 | } 147 | } 148 | } 149 | 150 | /// Gets the value of the `TCP_NODELAY` option on this socket. 151 | /// 152 | /// For Unix sockets this function always returns true (Unix sockets 153 | /// always behave like the option is off). 154 | /// 155 | /// For more information about this option, see [`set_nodelay`]. 156 | /// 157 | /// [`set_nodelay`]: #method.set_nodelay 158 | pub fn nodelay(&self) -> io::Result { 159 | match &self.stream { 160 | Stream::Tcp(s) => s.nodelay(), 161 | #[cfg(unix)] 162 | Stream::Unix(_) => Ok(true), 163 | } 164 | } 165 | 166 | /// Sets the value of the `TCP_NODELAY` option on this socket. 167 | /// 168 | /// If set, this option disables the Nagle algorithm. This means that 169 | /// segments are always sent as soon as possible, even if there is only a 170 | /// small amount of data. When not set, data is buffered until there is a 171 | /// sufficient amount to send out, thereby avoiding the frequent sending of 172 | /// small packets. 173 | /// 174 | /// For Unix sockets this function does nothing (Unix sockets always behave 175 | /// like the option is enabled, and there is no way to change that). 176 | pub fn set_nodelay(&self, nodelay: bool) -> io::Result<()> { 177 | match &self.stream { 178 | Stream::Tcp(s) => s.set_nodelay(nodelay), 179 | #[cfg(unix)] 180 | Stream::Unix(_) => Ok(()), 181 | } 182 | } 183 | 184 | /// Shuts down the read, write, or both halves of this connection. 185 | /// 186 | /// This function will cause all pending and future I/O calls on the 187 | /// specified portions to immediately return with an appropriate value 188 | /// (see the documentation of Shutdown). 189 | pub fn shutdown(&self, how: Shutdown) -> Result<(), io::Error> { 190 | match &self.stream { 191 | Stream::Tcp(s) => s.shutdown(how), 192 | #[cfg(unix)] 193 | Stream::Unix(s) => s.shutdown(how), 194 | } 195 | } 196 | } 197 | 198 | impl From<(Token, TcpStream)> for ByteStream { 199 | fn from((token, stream): (Token, TcpStream)) -> ByteStream { 200 | ByteStream::new_tcp(token, stream) 201 | } 202 | } 203 | 204 | #[cfg(unix)] 205 | impl From<(Token, UnixStream)> for ByteStream { 206 | fn from((token, stream): (Token, UnixStream)) -> ByteStream { 207 | ByteStream::new_unix(token, stream) 208 | } 209 | } 210 | 211 | impl Read for ByteStream { 212 | 213 | fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) 214 | -> Poll> 215 | { 216 | match self.stream { 217 | Stream::Tcp(ref s) => { 218 | Pin::new(&mut &*s).poll_read(cx, buf) 219 | } 220 | #[cfg(unix)] 221 | Stream::Unix(ref s) => { 222 | Pin::new(&mut &*s).poll_read(cx, buf) 223 | } 224 | } 225 | } 226 | 227 | fn poll_read_vectored(self: Pin<&mut Self>, cx: &mut Context, 228 | bufs: &mut [IoSliceMut]) 229 | -> Poll> 230 | { 231 | match self.stream { 232 | Stream::Tcp(ref s) => { 233 | Pin::new(&mut &*s).poll_read_vectored(cx, bufs) 234 | } 235 | #[cfg(unix)] 236 | Stream::Unix(ref s) => { 237 | Pin::new(&mut &*s).poll_read_vectored(cx, bufs) 238 | } 239 | } 240 | } 241 | } 242 | 243 | impl Read for &ByteStream { 244 | fn poll_read(self: Pin<&mut Self>, cx: &mut Context, buf: &mut [u8]) 245 | -> Poll> 246 | { 247 | match self.stream { 248 | Stream::Tcp(ref s) => { 249 | Pin::new(&mut &*s).poll_read(cx, buf) 250 | } 251 | #[cfg(unix)] 252 | Stream::Unix(ref s) => { 253 | Pin::new(&mut &*s).poll_read(cx, buf) 254 | } 255 | } 256 | } 257 | fn poll_read_vectored(self: Pin<&mut Self>, cx: &mut Context, 258 | bufs: &mut [IoSliceMut]) 259 | -> Poll> 260 | { 261 | match self.stream { 262 | Stream::Tcp(ref s) => { 263 | Pin::new(&mut &*s).poll_read_vectored(cx, bufs) 264 | } 265 | #[cfg(unix)] 266 | Stream::Unix(ref s) => { 267 | Pin::new(&mut &*s).poll_read_vectored(cx, bufs) 268 | } 269 | } 270 | } 271 | } 272 | 273 | impl Write for ByteStream { 274 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) 275 | -> Poll> 276 | { 277 | match self.stream { 278 | Stream::Tcp(ref s) => { 279 | Pin::new(&mut &*s).poll_write(cx, buf) 280 | } 281 | #[cfg(unix)] 282 | Stream::Unix(ref s) => { 283 | Pin::new(&mut &*s).poll_write(cx, buf) 284 | } 285 | } 286 | } 287 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) 288 | -> Poll> 289 | { 290 | match self.stream { 291 | Stream::Tcp(ref s) => { 292 | Pin::new(&mut &*s).poll_flush(cx) 293 | } 294 | #[cfg(unix)] 295 | Stream::Unix(ref s) => { 296 | Pin::new(&mut &*s).poll_flush(cx) 297 | } 298 | } 299 | } 300 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context) 301 | -> Poll> 302 | { 303 | match self.stream { 304 | Stream::Tcp(ref s) => { 305 | Pin::new(&mut &*s).poll_close(cx) 306 | } 307 | #[cfg(unix)] 308 | Stream::Unix(ref s) => { 309 | Pin::new(&mut &*s).poll_close(cx) 310 | } 311 | } 312 | } 313 | fn poll_write_vectored(self: Pin<&mut Self>, cx: &mut Context, 314 | bufs: &[IoSlice]) 315 | -> Poll> 316 | { 317 | match self.stream { 318 | Stream::Tcp(ref s) => { 319 | Pin::new(&mut &*s).poll_write_vectored(cx, bufs) 320 | } 321 | #[cfg(unix)] 322 | Stream::Unix(ref s) => { 323 | Pin::new(&mut &*s).poll_write_vectored(cx, bufs) 324 | } 325 | } 326 | } 327 | } 328 | 329 | impl Write for &ByteStream { 330 | fn poll_write(self: Pin<&mut Self>, cx: &mut Context, buf: &[u8]) 331 | -> Poll> 332 | { 333 | match self.stream { 334 | Stream::Tcp(ref s) => { 335 | Pin::new(&mut &*s).poll_write(cx, buf) 336 | } 337 | #[cfg(unix)] 338 | Stream::Unix(ref s) => { 339 | Pin::new(&mut &*s).poll_write(cx, buf) 340 | } 341 | } 342 | } 343 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) 344 | -> Poll> 345 | { 346 | match self.stream { 347 | Stream::Tcp(ref s) => { 348 | Pin::new(&mut &*s).poll_flush(cx) 349 | } 350 | #[cfg(unix)] 351 | Stream::Unix(ref s) => { 352 | Pin::new(&mut &*s).poll_flush(cx) 353 | } 354 | } 355 | } 356 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context) 357 | -> Poll> 358 | { 359 | match self.stream { 360 | Stream::Tcp(ref s) => { 361 | Pin::new(&mut &*s).poll_close(cx) 362 | } 363 | #[cfg(unix)] 364 | Stream::Unix(ref s) => { 365 | Pin::new(&mut &*s).poll_close(cx) 366 | } 367 | } 368 | } 369 | fn poll_write_vectored(self: Pin<&mut Self>, cx: &mut Context, 370 | bufs: &[IoSlice]) 371 | -> Poll> 372 | { 373 | match self.stream { 374 | Stream::Tcp(ref s) => { 375 | Pin::new(&mut &*s).poll_write_vectored(cx, bufs) 376 | } 377 | #[cfg(unix)] 378 | Stream::Unix(ref s) => { 379 | Pin::new(&mut &*s).poll_write_vectored(cx, bufs) 380 | } 381 | } 382 | } 383 | } 384 | -------------------------------------------------------------------------------- /src/backpressure.rs: -------------------------------------------------------------------------------- 1 | //! Backpressure handling structures 2 | //! 3 | //! The usual way to apply backpressure to a stream is using one of the 4 | //! [`ListenExt`](../trait.ListenExt.html) trait methods: 5 | //! * [`backpressure`](../trait.ListenExt.html#method.backpressure) 6 | //! * [`apply_backpressure`](../trait.ListenExt.html#method.apply_backpressure) 7 | //! * [`backpressure_wrapper`](../trait.ListenExt.html#method.backpressure_wrapper) 8 | //! 9 | //! Also take a look at [`backpressure::new`](fn.new.html) for the low-level 10 | //! interface. 11 | //! 12 | use std::fmt; 13 | use std::pin::Pin; 14 | use std::sync::atomic::{AtomicUsize, Ordering}; 15 | use std::sync::{Arc, Mutex, TryLockError}; 16 | 17 | use async_std::stream::Stream; 18 | use async_std::future::Future; 19 | use async_std::task::{Poll, Context, Waker}; 20 | 21 | use crate::byte_stream::ByteStream; 22 | 23 | 24 | struct Inner { 25 | active: AtomicUsize, 26 | limit: AtomicUsize, 27 | task: Mutex>, 28 | } 29 | 30 | /// A stream adapter that applies backpressure 31 | /// 32 | /// See 33 | /// [`ListenExt::backpressure`](../trait.ListenExt.html#method.backpressure) 34 | /// for more info. 35 | pub struct BackpressureToken(Backpressure); 36 | 37 | /// A stream adapter that applies backpressure and yields ByteStream 38 | /// 39 | /// See 40 | /// [`ListenExt::backpressure_wrapper`](../trait.ListenExt.html#method.backpressure_wrapper) 41 | /// for more info. 42 | pub struct BackpressureWrapper(Backpressure); 43 | 44 | /// A stream adapter that applies backpressure and yields a token 45 | /// 46 | /// See 47 | /// [`ListenExt::apply_backpressure`](../trait.ListenExt.html#method.apply_backpressure) 48 | /// for more info. 49 | pub struct Backpressure { 50 | stream: S, 51 | backpressure: Receiver, 52 | } 53 | 54 | /// The throttler of a stream 55 | /// 56 | /// See [`new`](fn.new.html) for more details 57 | pub struct Receiver { 58 | inner: Arc, 59 | } 60 | 61 | /// Future that resolves when there is less that limit tokens alive 62 | pub struct HasCapacity<'a> { 63 | recv: &'a mut Receiver, 64 | } 65 | 66 | /// The handle that controls backpressure 67 | /// 68 | /// It can be used to create tokens, changing limit and getting metrics. 69 | /// 70 | /// See [`new`](fn.new.html) for more details 71 | #[derive(Clone)] 72 | pub struct Sender { 73 | inner: Arc, 74 | } 75 | 76 | /// The token which holds onto a single resource item 77 | /// 78 | /// # Notes on Cloning 79 | /// 80 | /// After cloning a `Token`, *both* clones have to be dropped to make 81 | /// backpressure slot available again. 82 | #[derive(Clone)] 83 | pub struct Token { 84 | inner: Arc, 85 | } 86 | 87 | impl Unpin for Backpressure {} 88 | impl Unpin for BackpressureToken {} 89 | impl Unpin for BackpressureWrapper {} 90 | 91 | impl Sender { 92 | /// Acquire a backpressure token 93 | /// 94 | /// The token holds one unit of resource 95 | /// 96 | /// *Note:* You can always acquire a token, even if capacity limit reached. 97 | pub fn token(&self) -> Token { 98 | self.inner.active.fetch_add(1, Ordering::SeqCst); 99 | Token { 100 | inner: self.inner.clone(), 101 | } 102 | } 103 | /// Change the limit for the number of connections 104 | /// 105 | /// If limit is increased it's applied immediately. If limit is lowered, 106 | /// we can't drop connections. So listening stream is paused until 107 | /// there are less then new limit tokens alive (i.e. first dropped 108 | /// tokens may not unblock the stream). 109 | pub fn set_limit(&self, new_limit: usize) { 110 | let old_limit = self.inner.limit.swap(new_limit, Ordering::SeqCst); 111 | if old_limit < new_limit { 112 | match self.inner.task.try_lock() { 113 | Ok(mut guard) => { 114 | guard.take().map(|w| w.wake()); 115 | } 116 | Err(TryLockError::WouldBlock) => { 117 | // This means either another token is currently waking 118 | // up a Receiver. Or Receiver is currently running. 119 | // Receiver will recheck values after releasing the Mutex. 120 | } 121 | Err(TryLockError::Poisoned(_)) => { 122 | unreachable!("backpressure lock should never be poisoned"); 123 | } 124 | } 125 | } 126 | } 127 | 128 | /// Returns the number of currently active tokens 129 | /// 130 | /// Can return a value larger than limit if tokens are created manually. 131 | /// 132 | /// This can be used for metrics or debugging. You should not rely on 133 | /// this value being in sync. There is also no way to wake-up when this 134 | /// value is lower than limit, also see 135 | /// [`has_capacity`](struct.Receiver.html#method.has_capacity). 136 | pub fn get_active_tokens(&self) -> usize { 137 | self.inner.active.load(Ordering::Relaxed) 138 | } 139 | } 140 | 141 | impl Receiver { 142 | /// Handy to create token in Backpressure wrapper 143 | fn token(&self) -> Token { 144 | self.inner.active.fetch_add(1, Ordering::SeqCst); 145 | Token { 146 | inner: self.inner.clone(), 147 | } 148 | } 149 | 150 | /// Return future which resolves when the current number active of tokens 151 | /// is less than a limit 152 | /// 153 | /// If you create tokens in different task than the task that waits 154 | /// on `HasCapacity` there is a race condition. 155 | pub fn has_capacity(&mut self) -> HasCapacity { 156 | HasCapacity { recv: self } 157 | } 158 | 159 | fn poll(&mut self, cx: &mut Context) -> Poll<()> { 160 | let limit = self.inner.limit.load(Ordering::Acquire); 161 | loop { 162 | let active = self.inner.active.load(Ordering::Acquire); 163 | if active < limit { 164 | return Poll::Ready(()); 165 | } 166 | match self.inner.task.try_lock() { 167 | Ok(mut guard) => { 168 | *guard = Some(cx.waker().clone()); 169 | break; 170 | } 171 | Err(TryLockError::WouldBlock) => { 172 | // This means either another token is currently waking 173 | // up this receiver, retry 174 | // 175 | // Note: this looks like a busyloop, but we don't have 176 | // anything long/slow behind the mutex. And it's only 177 | // executed when limit is reached. 178 | continue; 179 | } 180 | Err(TryLockError::Poisoned(_)) => { 181 | unreachable!("backpressure lock should never be poisoned"); 182 | } 183 | } 184 | } 185 | // Reread the limit after lock is unlocked because 186 | // token Drop relies on that 187 | let active = self.inner.active.load(Ordering::Acquire); 188 | if active < limit { 189 | Poll::Ready(()) 190 | } else { 191 | Poll::Pending 192 | } 193 | } 194 | } 195 | 196 | impl Drop for Token { 197 | fn drop(&mut self) { 198 | // TODO(tailhook) we could use Acquire for old_ref, 199 | // but not sure how safe is it to compare it with a limit 200 | let old_ref = self.inner.active.fetch_sub(1, Ordering::SeqCst); 201 | let limit = self.inner.limit.load(Ordering::SeqCst); 202 | if old_ref == limit { 203 | match self.inner.task.try_lock() { 204 | Ok(mut guard) => { 205 | guard.take().map(|w| w.wake()); 206 | } 207 | Err(TryLockError::WouldBlock) => { 208 | // This means either another token is currently waking 209 | // up a Receiver. Or Receiver is currently running. 210 | // Receiver will recheck values after releasing the Mutex. 211 | } 212 | Err(TryLockError::Poisoned(_)) => { 213 | unreachable!("backpressure lock should never be poisoned"); 214 | } 215 | } 216 | } 217 | } 218 | } 219 | 220 | impl BackpressureToken { 221 | pub(crate) fn new(stream: S, backpressure: Receiver) 222 | -> BackpressureToken 223 | { 224 | BackpressureToken(Backpressure::new(stream, backpressure)) 225 | } 226 | 227 | /// Acquires a reference to the underlying stream that this adapter is 228 | /// pulling from. 229 | pub fn get_ref(&self) -> &S { 230 | self.0.get_ref() 231 | } 232 | 233 | /// Acquires a mutable reference to the underlying stream that this 234 | /// adapter is pulling from. 235 | pub fn get_mut(&mut self) -> &mut S { 236 | self.0.get_mut() 237 | } 238 | 239 | /// Consumes this adapter, returning the underlying stream. 240 | pub fn into_inner(self) -> S { 241 | self.0.into_inner() 242 | } 243 | } 244 | 245 | impl BackpressureWrapper { 246 | pub(crate) fn new(stream: S, backpressure: Receiver) 247 | -> BackpressureWrapper 248 | { 249 | BackpressureWrapper(Backpressure::new(stream, backpressure)) 250 | } 251 | 252 | /// Acquires a reference to the underlying stream that this adapter is 253 | /// pulling from. 254 | pub fn get_ref(&self) -> &S { 255 | self.0.get_ref() 256 | } 257 | 258 | /// Acquires a mutable reference to the underlying stream that this 259 | /// adapter is pulling from. 260 | pub fn get_mut(&mut self) -> &mut S { 261 | self.0.get_mut() 262 | } 263 | 264 | /// Consumes this adapter, returning the underlying stream. 265 | pub fn into_inner(self) -> S { 266 | self.0.into_inner() 267 | } 268 | } 269 | 270 | impl Backpressure { 271 | pub(crate) fn new(stream: S, backpressure: Receiver) -> Backpressure { 272 | Backpressure { stream, backpressure } 273 | } 274 | 275 | /// Acquires a reference to the underlying stream that this adapter is 276 | /// pulling from. 277 | pub fn get_ref(&self) -> &S { 278 | &self.stream 279 | } 280 | 281 | /// Acquires a mutable reference to the underlying stream that this 282 | /// adapter is pulling from. 283 | pub fn get_mut(&mut self) -> &mut S { 284 | &mut self.stream 285 | } 286 | 287 | /// Consumes this adapter, returning the underlying stream. 288 | pub fn into_inner(self) -> S { 289 | self.stream 290 | } 291 | } 292 | 293 | /// Create a new pair of backpressure structures 294 | /// 295 | /// These structures are called [`Sender`](struct.Sender.html) 296 | /// and [`Receiver`](struct.Receiver.html) similar to channels. 297 | /// The `Receiver` should be used to throttle, either by applying 298 | /// it to a stream or using it directly. The `Sender` is a way to create 299 | /// throtting tokens (the stream is paused when there are tokens >= limit), 300 | /// and to change the limit. 301 | /// 302 | /// See [`ListenExt`](../trait.ListenExt.html) for example usage 303 | /// 304 | /// # Direct Use Example 305 | /// 306 | /// ```no_run 307 | /// # use std::time::Duration; 308 | /// # use async_std::net::{TcpListener, TcpStream}; 309 | /// # use async_std::prelude::*; 310 | /// # use async_std::task; 311 | /// # fn main() -> std::io::Result<()> { task::block_on(async { 312 | /// # 313 | /// use async_listen::ListenExt; 314 | /// use async_listen::backpressure; 315 | /// 316 | /// let listener = TcpListener::bind("127.0.0.1:0").await?; 317 | /// let (tx, mut rx) = backpressure::new(10); 318 | /// let mut incoming = listener.incoming() 319 | /// .handle_errors(Duration::from_millis(100)); 320 | /// 321 | /// loop { 322 | /// rx.has_capacity().await; 323 | /// let conn = match incoming.next().await { 324 | /// Some(conn) => conn, 325 | /// None => break, 326 | /// }; 327 | /// let token = tx.token(); // should be created before spawn 328 | /// task::spawn(async { 329 | /// connection_loop(conn).await; 330 | /// drop(token); // should be dropped after 331 | /// }); 332 | /// } 333 | /// # async fn connection_loop(_stream: TcpStream) { 334 | /// # } 335 | /// # 336 | /// # Ok(()) }) } 337 | /// ``` 338 | /// 339 | pub fn new(initial_limit: usize) -> (Sender, Receiver) { 340 | let inner = Arc::new(Inner { 341 | limit: AtomicUsize::new(initial_limit), 342 | active: AtomicUsize::new(0), 343 | task: Mutex::new(None), 344 | }); 345 | return ( 346 | Sender { 347 | inner: inner.clone(), 348 | }, 349 | Receiver { 350 | inner: inner.clone(), 351 | }, 352 | ) 353 | } 354 | 355 | impl fmt::Debug for Token { 356 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 357 | debug("Token", &self.inner, f) 358 | } 359 | } 360 | 361 | impl fmt::Debug for Sender { 362 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 363 | debug("Sender", &self.inner, f) 364 | } 365 | } 366 | 367 | impl fmt::Debug for Receiver { 368 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 369 | debug("Receiver", &self.inner, f) 370 | } 371 | } 372 | 373 | impl<'a> fmt::Debug for HasCapacity<'a> { 374 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 375 | debug("HasCapacity", &self.recv.inner, f) 376 | } 377 | } 378 | 379 | fn debug(name: &str, inner: &Arc, f: &mut fmt::Formatter) 380 | -> fmt::Result 381 | { 382 | let active = inner.active.load(Ordering::Relaxed); 383 | let limit = inner.limit.load(Ordering::Relaxed); 384 | write!(f, "<{} {}/{}>", name, active, limit) 385 | } 386 | 387 | impl fmt::Debug for Backpressure { 388 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 389 | f.debug_struct("Backpressure") 390 | .field("stream", &self.stream) 391 | .field("backpressure", &self.backpressure) 392 | .finish() 393 | } 394 | } 395 | 396 | impl fmt::Debug for BackpressureToken { 397 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 398 | f.debug_struct("BackpressureToken") 399 | .field("stream", &self.0.stream) 400 | .field("backpressure", &self.0.backpressure) 401 | .finish() 402 | } 403 | } 404 | 405 | impl fmt::Debug for BackpressureWrapper { 406 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 407 | f.debug_struct("BackpressureWrapper") 408 | .field("stream", &self.0.stream) 409 | .field("backpressure", &self.0.backpressure) 410 | .finish() 411 | } 412 | } 413 | 414 | impl Stream for Backpressure 415 | where S: Stream + Unpin 416 | { 417 | type Item = I; 418 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) 419 | -> Poll> 420 | { 421 | match self.backpressure.poll(cx) { 422 | Poll::Pending => Poll::Pending, 423 | Poll::Ready(()) => Pin::new(&mut self.stream).poll_next(cx), 424 | } 425 | } 426 | } 427 | 428 | impl<'a> Future for HasCapacity<'a> { 429 | type Output = (); 430 | fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> { 431 | self.recv.poll(cx) 432 | } 433 | } 434 | 435 | impl Stream for BackpressureToken 436 | where S: Stream + Unpin 437 | { 438 | type Item = (Token, I); 439 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) 440 | -> Poll> 441 | { 442 | Pin::new(&mut self.0) 443 | .poll_next(cx) 444 | .map(|opt| opt.map(|conn| (self.0.backpressure.token(), conn))) 445 | } 446 | } 447 | 448 | impl Stream for BackpressureWrapper 449 | where S: Stream + Unpin, 450 | ByteStream: From<(Token, I)>, 451 | { 452 | type Item = ByteStream; 453 | fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) 454 | -> Poll> 455 | { 456 | Pin::new(&mut self.0) 457 | .poll_next(cx) 458 | .map(|opt| opt.map(|conn| { 459 | ByteStream::from((self.0.backpressure.token(), conn)) 460 | })) 461 | } 462 | } 463 | --------------------------------------------------------------------------------