├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── azure-pipelines.yml ├── src ├── error.rs ├── lib.rs ├── mediator.rs ├── multiplex │ ├── client.rs │ ├── mod.rs │ └── server.rs ├── pipeline │ ├── client.rs │ ├── mod.rs │ └── server.rs └── wrappers.rs └── tests ├── lib.rs ├── multiplex └── mod.rs └── pipeline ├── client.rs └── mod.rs /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | **/*.rs.bk 3 | Cargo.lock 4 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "tokio-tower" 3 | version = "0.7.0-rc4" 4 | edition = "2018" 5 | authors = ["Jon Gjengset "] 6 | 7 | readme = "README.md" 8 | description = "Bridging the protocol gap between Tokio and Tower" 9 | repository = "https://github.com/tower-rs/tokio-tower.git" 10 | 11 | keywords = ["tokio", "tower", "asynchronous", "protocol", "transport"] 12 | categories = ["asynchronous", "network-programming"] 13 | 14 | license = "MIT" 15 | 16 | [badges] 17 | azure-devops = { project = "tower-rs/tokio-tower", pipeline = "tokio-tower", build = "4" } 18 | 19 | [features] 20 | log = ["tracing/log"] 21 | default = [] 22 | 23 | [dependencies] 24 | tower-service = "0.3" 25 | tower = { version = "0.4", features = ["load"] } 26 | futures-util = { version = "0.3", features = [ "sink" ] } 27 | futures-core = "0.3" 28 | futures-sink = "0.3" 29 | tokio = { version = "1.0", features = [ "sync", "rt" ] } 30 | crossbeam = "0.8" 31 | tracing = "0.1.2" 32 | pin-project = "1.0" 33 | 34 | [dev-dependencies] 35 | tokio = { version = "1.0", features = [ "full" ] } 36 | serde = "1.0" 37 | serde_derive = "1.0" 38 | async-bincode = "0.6" 39 | slab = "0.4" 40 | tokio-test = "0.4" 41 | tower-test = "0.4" 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Jon Gjengset 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Crates.io](https://img.shields.io/crates/v/tokio-tower.svg)](https://crates.io/crates/tokio-tower) 2 | [![Documentation](https://docs.rs/tokio-tower/badge.svg)](https://docs.rs/tokio-tower/) 3 | [![Build Status](https://dev.azure.com/tower-rs/tokio-tower/_apis/build/status/tokio-tower?branchName=master)](https://dev.azure.com/tower-rs/tokio-tower/_build/latest?definitionId=4&branchName=master) 4 | [![Dependency status](https://deps.rs/repo/github/tower-rs/tokio-tower/status.svg)](https://deps.rs/repo/github/tower-rs/tokio-tower) 5 | 6 | 7 | This crate provides convenient wrappers to make 8 | [Tokio](https://tokio.rs) and [Tower](https://github.com/tower-rs/tower) 9 | work together. In particular, it provides: 10 | 11 | - server bindings wrappers that combine a `tower::Service` with a 12 | transport that implements `Sink` and `Stream`. 14 | - client wrappers that implement `tower::Service` for transports that 15 | implement `Sink` and `Stream`. 16 | 17 | Take a look at the [crate documentation](https://docs.rs/tokio-tower) 18 | for details. 19 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - template: default.yml@templates 3 | parameters: 4 | minrust: 1.56 5 | 6 | resources: 7 | repositories: 8 | - repository: templates 9 | type: github 10 | name: crate-ci/azure-pipelines 11 | ref: refs/heads/v0.4 12 | endpoint: tower-rs 13 | -------------------------------------------------------------------------------- /src/error.rs: -------------------------------------------------------------------------------- 1 | use futures_core::stream::TryStream; 2 | use futures_sink::Sink; 3 | use std::{error, fmt}; 4 | 5 | /// An error that occurred while servicing a request. 6 | #[non_exhaustive] 7 | pub enum Error 8 | where 9 | T: Sink + TryStream, 10 | { 11 | /// The underlying transport failed to send a request. 12 | BrokenTransportSend(>::Error), 13 | 14 | /// The underlying transport failed while attempting to receive a response. 15 | /// 16 | /// If `None`, the transport closed without error while there were pending requests. 17 | BrokenTransportRecv(Option<::Error>), 18 | 19 | /// The internal pending data store has dropped the pending response. 20 | Cancelled, 21 | 22 | /// Attempted to issue a `call` when no more requests can be in flight. 23 | /// 24 | /// See [`tower_service::Service::poll_ready`] and [`Client::with_limit`]. 25 | TransportFull, 26 | 27 | /// Attempted to issue a `call`, but the underlying transport has been closed. 28 | ClientDropped, 29 | 30 | /// The server sent a response that the client was not expecting. 31 | Desynchronized, 32 | 33 | /// The underlying transport task did not exit gracefully (either panic or cancellation). 34 | /// Transport task panics can happen for example when the codec logic panics. 35 | TransportDropped, 36 | } 37 | 38 | impl fmt::Display for Error 39 | where 40 | T: Sink + TryStream, 41 | >::Error: fmt::Display, 42 | ::Error: fmt::Display, 43 | { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 45 | match *self { 46 | Error::BrokenTransportSend(_) => f.pad("underlying transport failed to send a request"), 47 | Error::BrokenTransportRecv(Some(_)) => { 48 | f.pad("underlying transport failed while attempting to receive a response") 49 | } 50 | Error::BrokenTransportRecv(None) => f.pad("transport closed with in-flight requests"), 51 | Error::Cancelled => f.pad("request was cancelled internally"), 52 | Error::TransportFull => f.pad("no more in-flight requests allowed"), 53 | Error::ClientDropped => f.pad("Client was dropped"), 54 | Error::Desynchronized => f.pad("server sent a response the client did not expect"), 55 | Error::TransportDropped => { 56 | f.pad("underlying transport task exited unexpectedly (panic or cancellation)") 57 | } 58 | } 59 | } 60 | } 61 | 62 | impl fmt::Debug for Error 63 | where 64 | T: Sink + TryStream, 65 | >::Error: fmt::Debug, 66 | ::Error: fmt::Debug, 67 | { 68 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 69 | match *self { 70 | Error::BrokenTransportSend(ref se) => write!(f, "BrokenTransportSend({:?})", se), 71 | Error::BrokenTransportRecv(Some(ref se)) => write!(f, "BrokenTransportRecv({:?})", se), 72 | Error::BrokenTransportRecv(None) => f.pad("BrokenTransportRecv"), 73 | Error::Cancelled => f.pad("Cancelled"), 74 | Error::TransportFull => f.pad("TransportFull"), 75 | Error::ClientDropped => f.pad("ClientDropped"), 76 | Error::Desynchronized => f.pad("Desynchronized"), 77 | Error::TransportDropped => f.pad("TransportDropped"), 78 | } 79 | } 80 | } 81 | 82 | impl error::Error for Error 83 | where 84 | T: Sink + TryStream, 85 | >::Error: error::Error + 'static, 86 | ::Error: error::Error + 'static, 87 | { 88 | fn source(&self) -> Option<&(dyn error::Error + 'static)> { 89 | match *self { 90 | Error::BrokenTransportSend(ref se) => Some(se), 91 | Error::BrokenTransportRecv(Some(ref se)) => Some(se), 92 | _ => None, 93 | } 94 | } 95 | } 96 | 97 | impl Error 98 | where 99 | T: Sink + TryStream, 100 | { 101 | pub(crate) fn from_sink_error(e: >::Error) -> Self { 102 | Error::BrokenTransportSend(e) 103 | } 104 | 105 | pub(crate) fn from_stream_error(e: ::Error) -> Self { 106 | Error::BrokenTransportRecv(Some(e)) 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! This crate provides utilities for using protocols that follow certain common patterns on 2 | //! top of [Tokio](https://tokio.rs) and [Tower](https://github.com/tower-rs/tower). 3 | //! 4 | //! # Protocols 5 | //! 6 | //! At a high level, a protocol is a mechanism that lets you take a bunch of requests and turn them 7 | //! into responses. Tower provides the [`Service`](https://docs.rs/tower-service/) trait, which is 8 | //! an interface for mapping requests into responses, but it does not deal with how those requests 9 | //! are sent between clients and servers. Tokio, on the other hand, provides asynchronous 10 | //! communication primitives, but it does not deal with high-level abstractions like services. This 11 | //! crate attempts to bridge that gap. 12 | //! 13 | //! There are many types of protocols in the wild, but they generally come in two forms: 14 | //! *pipelining* and *multiplexing*. A pipelining protocol sends requests and responses in-order 15 | //! between the consumer and provider of a service, and processes requests one at a time. A 16 | //! multiplexing protocol on the other hand constructs requests in such a way that they can be 17 | //! handled and responded to in *any* order while still allowing the client to know which response 18 | //! is for which request. Pipelining and multiplexing both have their advantages and disadvantages; 19 | //! see the module-level documentation for [`pipeline`] and [`multiplex`] for details. There is 20 | //! also good deal of discussion in [this StackOverflow 21 | //! answer](https://softwareengineering.stackexchange.com/a/325888/79642). 22 | //! 23 | //! # Transports 24 | //! 25 | //! A key part of any protocol is its transport, which is the way that it transmits requests and 26 | //! responses. In general, `tokio-tower` leaves the on-the-wire implementations of protocols to 27 | //! other crates (like [`tokio-codec`](https://docs.rs/tokio-codec/) or 28 | //! [`async-bincode`](https://docs.rs/async-bincode)) and instead operates at the level of 29 | //! [`Sink`](https://docs.rs/futures/0.1/futures/sink/trait.Sink.html)s and 30 | //! [`Stream`](https://docs.rs/futures/0.15/futures/stream/trait.Stream.html)s. 31 | //! 32 | //! At its core, `tokio-tower` wraps a type that is `Sink + Stream`. On the client side, the Sink 33 | //! is used to send requests, and the Stream is used to receive responses (from the server) to 34 | //! those requests. On the server side, the Stream is used to receive requests, and the Sink is 35 | //! used to send the responses. 36 | //! 37 | //! # Servers and clients 38 | //! 39 | //! This crate provides utilities that make writing both clients and servers easier. You'll find 40 | //! the client helper as `Client` in the protocol module you're working with (e.g., 41 | //! [`pipeline::Client`]), and the server helper as `Server` in the same place. 42 | //! 43 | //! # Example 44 | //! ```rust 45 | //! # use std::pin::Pin; 46 | //! # use std::boxed::Box; 47 | //! # use tokio::sync::mpsc; 48 | //! # use tokio::io::{AsyncWrite, AsyncRead}; 49 | //! # use futures_core::task::{Context, Poll}; 50 | //! # use futures_util::{never::Never, future::{poll_fn, ready, Ready}}; 51 | //! # use tokio_tower::pipeline; 52 | //! # use core::fmt::Debug; 53 | //! type StdError = Box; 54 | //! 55 | //! /// A transport implemented using a pair of `mpsc` channels. 56 | //! /// 57 | //! /// `mpsc::Sender` and `mpsc::Receiver` are both unidirectional. So, if we want to use `mpsc` 58 | //! /// to send requests and responses between a client and server, we need *two* channels, one 59 | //! /// that lets requests flow from the client to the server, and one that lets responses flow the 60 | //! /// other way. 61 | //! /// 62 | //! /// In this echo server example, requests and responses are both of type `T`, but for "real" 63 | //! /// services, the two types are usually different. 64 | //! struct ChannelTransport { 65 | //! rcv: mpsc::UnboundedReceiver, 66 | //! snd: mpsc::UnboundedSender, 67 | //! } 68 | //! 69 | //! impl futures_sink::Sink for ChannelTransport { 70 | //! type Error = StdError; 71 | //! 72 | //! fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 73 | //! Poll::Ready(Ok(())) 74 | //! } 75 | //! 76 | //! fn start_send(mut self: Pin<&mut Self>, item: T) -> Result<(), Self::Error> { 77 | //! // use map_err because `T` contained in `mpsc::SendError` may not be `Send + Sync`. 78 | //! self.snd.send(item).map_err(|e| e.to_string())?; 79 | //! Ok(()) 80 | //! } 81 | //! 82 | //! fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 83 | //! Poll::Ready(Ok(())) // no-op because all sends succeed immediately 84 | //! } 85 | //! 86 | //! fn poll_close( self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 87 | //! Poll::Ready(Ok(())) // no-op because channel is closed on drop and flush is no-op 88 | //! } 89 | //! } 90 | //! 91 | //! impl futures_util::stream::Stream for ChannelTransport { 92 | //! type Item = Result; 93 | //! 94 | //! fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { 95 | //! self.rcv.poll_recv(cx).map(|s| s.map(Ok)) 96 | //! } 97 | //! } 98 | //! 99 | //! /// A service that tokio-tower should serve over the transport. 100 | //! /// This one just echoes whatever it gets. 101 | //! struct Echo; 102 | //! 103 | //! impl tower_service::Service for Echo { 104 | //! type Response = T; 105 | //! type Error = Never; 106 | //! type Future = Ready>; 107 | //! 108 | //! fn poll_ready(&mut self, cx: &mut Context) -> Poll> { 109 | //! Poll::Ready(Ok(())) 110 | //! } 111 | //! 112 | //! fn call(&mut self, req: T) -> Self::Future { 113 | //! ready(Ok(req)) 114 | //! } 115 | //! } 116 | //! 117 | //! #[tokio::main] 118 | //! async fn main() { 119 | //! let (s1, r1) = mpsc::unbounded_channel(); 120 | //! let (s2, r2) = mpsc::unbounded_channel(); 121 | //! let pair1 = ChannelTransport{snd: s1, rcv: r2}; 122 | //! let pair2 = ChannelTransport{snd: s2, rcv: r1}; 123 | //! 124 | //! tokio::spawn(pipeline::Server::new(pair1, Echo)); 125 | //! let mut client = pipeline::Client::<_, tokio_tower::Error<_, _>, _>::new(pair2); 126 | //! 127 | //! use tower_service::Service; 128 | //! poll_fn(|cx| client.poll_ready(cx)).await; 129 | //! 130 | //! let msg = "Hello, tokio-tower"; 131 | //! let resp = client.call(String::from(msg)).await.expect("client call"); 132 | //! assert_eq!(resp, msg); 133 | //! } 134 | //! 135 | //! ``` 136 | #![warn( 137 | missing_docs, 138 | missing_debug_implementations, 139 | unreachable_pub, 140 | rust_2018_idioms 141 | )] 142 | #![allow(clippy::type_complexity)] 143 | 144 | const YIELD_EVERY: usize = 24; 145 | 146 | mod error; 147 | mod mediator; 148 | pub(crate) mod wrappers; 149 | pub use error::Error; 150 | 151 | use futures_core::{ 152 | future::Future, 153 | stream::TryStream, 154 | task::{Context, Poll}, 155 | }; 156 | use futures_sink::Sink; 157 | use tower_service::Service; 158 | 159 | /// Creates new `Transport` (i.e., `Sink + Stream`) instances. 160 | /// 161 | /// Acts as a transport factory. This is useful for cases where new `Sink + Stream` 162 | /// values must be produced. 163 | /// 164 | /// This is essentially a trait alias for a `Service` of `Sink + Stream`s. 165 | pub trait MakeTransport: self::sealed::Sealed { 166 | /// Items produced by the transport 167 | type Item; 168 | 169 | /// Errors produced when receiving from the transport 170 | type Error; 171 | 172 | /// Errors produced when sending to the transport 173 | type SinkError; 174 | 175 | /// The `Sink + Stream` implementation created by this factory 176 | type Transport: TryStream 177 | + Sink; 178 | 179 | /// Errors produced while building a transport. 180 | type MakeError; 181 | 182 | /// The future of the `Service` instance. 183 | type Future: Future>; 184 | 185 | /// Returns `Ready` when the factory is able to create more transports. 186 | /// 187 | /// If the service is at capacity, then `NotReady` is returned and the task 188 | /// is notified when the service becomes ready again. This function is 189 | /// expected to be called while on a task. 190 | /// 191 | /// This is a **best effort** implementation. False positives are permitted. 192 | /// It is permitted for the service to return `Ready` from a `poll_ready` 193 | /// call and the next invocation of `make_transport` results in an error. 194 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll>; 195 | 196 | /// Create and return a new transport asynchronously. 197 | fn make_transport(&mut self, target: Target) -> Self::Future; 198 | } 199 | 200 | impl self::sealed::Sealed for M 201 | where 202 | M: Service, 203 | T: TryStream + Sink, 204 | { 205 | } 206 | 207 | impl MakeTransport for M 208 | where 209 | M: Service, 210 | T: TryStream + Sink, 211 | { 212 | type Item = ::Ok; 213 | type Error = ::Error; 214 | type SinkError = >::Error; 215 | type Transport = T; 216 | type MakeError = M::Error; 217 | type Future = M::Future; 218 | 219 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 220 | Service::poll_ready(self, cx) 221 | } 222 | 223 | fn make_transport(&mut self, target: Target) -> Self::Future { 224 | Service::call(self, target) 225 | } 226 | } 227 | 228 | mod sealed { 229 | pub trait Sealed {} 230 | } 231 | 232 | pub mod multiplex; 233 | pub mod pipeline; 234 | -------------------------------------------------------------------------------- /src/mediator.rs: -------------------------------------------------------------------------------- 1 | use crossbeam::atomic::AtomicCell; 2 | use futures_util::task; 3 | use std::fmt; 4 | use std::sync::Arc; 5 | use std::task::{Context, Poll}; 6 | 7 | #[derive(Debug)] 8 | enum CellValue { 9 | /// The sender has left a value. 10 | Some(T), 11 | 12 | /// If the receiver sees this, the sender has disconnected. 13 | /// If the sender sees this, the receiver has disconnected. 14 | /// 15 | /// Will be `Some` if the sender sent a value that wasn't handled before it disconnected. 16 | Fin(Option), 17 | 18 | /// The sender has not left a value. 19 | None, 20 | } 21 | 22 | impl CellValue { 23 | fn is_none(&self) -> bool { 24 | matches!(self, CellValue::None) 25 | } 26 | } 27 | 28 | struct Mediator { 29 | value: AtomicCell>, 30 | tx_task: task::AtomicWaker, 31 | rx_task: task::AtomicWaker, 32 | } 33 | 34 | impl fmt::Debug for Mediator { 35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 36 | f.debug_struct("Mediator") 37 | .field("tx_task", &self.tx_task) 38 | .field("rx_task", &self.rx_task) 39 | .finish() 40 | } 41 | } 42 | 43 | pub(crate) struct Receiver(Arc>); 44 | 45 | impl fmt::Debug for Receiver { 46 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 47 | f.debug_tuple("Receiver").field(&self.0).finish() 48 | } 49 | } 50 | 51 | pub(crate) struct Sender { 52 | inner: Arc>, 53 | checked_ready: bool, 54 | } 55 | 56 | impl fmt::Debug for Sender { 57 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 58 | f.debug_struct("Sender") 59 | .field("inner", &self.inner) 60 | .field("checked_ready", &self.checked_ready) 61 | .finish() 62 | } 63 | } 64 | 65 | impl Drop for Sender { 66 | fn drop(&mut self) { 67 | match self.inner.value.swap(CellValue::None) { 68 | CellValue::Some(t) => { 69 | self.inner.value.swap(CellValue::Fin(Some(t))); 70 | } 71 | CellValue::Fin(_) => { 72 | // receiver has gone away too -- all good. 73 | return; 74 | } 75 | CellValue::None => { 76 | self.inner.value.swap(CellValue::Fin(None)); 77 | } 78 | } 79 | self.inner.rx_task.wake(); 80 | } 81 | } 82 | 83 | pub(crate) fn new() -> (Sender, Receiver) { 84 | let m = Arc::new(Mediator { 85 | value: AtomicCell::new(CellValue::None), 86 | tx_task: task::AtomicWaker::new(), 87 | rx_task: task::AtomicWaker::new(), 88 | }); 89 | 90 | ( 91 | Sender { 92 | inner: m.clone(), 93 | checked_ready: false, 94 | }, 95 | Receiver(m), 96 | ) 97 | } 98 | 99 | #[derive(Clone, Debug, PartialEq, Eq)] 100 | pub(crate) enum TrySendError { 101 | Pending(T), 102 | Closed(T), 103 | } 104 | 105 | impl Sender { 106 | /// Returns true if there is a free slot for a client request. 107 | /// 108 | /// This method errors if the receiver has disconnected. 109 | pub(crate) fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 110 | // register in case we can't send 111 | self.inner.tx_task.register(cx.waker()); 112 | match self.inner.value.swap(CellValue::None) { 113 | CellValue::Some(t) => { 114 | // whoops -- put it back 115 | self.inner.value.swap(CellValue::Some(t)); 116 | // notify in case the receiver just missed us 117 | self.inner.rx_task.wake(); 118 | Poll::Pending 119 | } 120 | CellValue::None => { 121 | self.checked_ready = true; 122 | Poll::Ready(Ok(())) 123 | } 124 | f @ CellValue::Fin(_) => { 125 | // the receiver must have gone away (since we can't have gone away) 126 | // put the Fin marker back for ourselves to see again later 127 | self.inner.value.swap(f); 128 | Poll::Ready(Err(())) 129 | } 130 | } 131 | } 132 | 133 | /// Attempts to place `t` in a free client request slot. 134 | /// 135 | /// This method returns `NotReady` if `is_ready` has not previously returned `true`. 136 | /// This method errors if the receiver has disconnected since `poll_ready`. 137 | pub(crate) fn try_send(&mut self, t: T) -> Result<(), TrySendError> { 138 | if !self.checked_ready { 139 | return Err(TrySendError::Pending(t)); 140 | } 141 | 142 | // we're supposed to _know_ that there is a slot here, 143 | // so no need to do a tx_task.register. 144 | match self.inner.value.swap(CellValue::Some(t)) { 145 | CellValue::None => {} 146 | CellValue::Some(_) => unreachable!("is_ready returned true, but slot occupied"), 147 | f @ CellValue::Fin(_) => { 148 | // the receiver must have gone away (since we can't have gone away) 149 | // put the Fin marker back for ourselves to see again later 150 | if let CellValue::Some(t) = self.inner.value.swap(f) { 151 | return Err(TrySendError::Closed(t)); 152 | } else { 153 | unreachable!("where did it go?"); 154 | } 155 | } 156 | } 157 | 158 | self.checked_ready = false; 159 | self.inner.rx_task.wake(); 160 | Ok(()) 161 | } 162 | } 163 | 164 | impl Receiver { 165 | /// Attempts to receive a value sent by the client. 166 | /// 167 | /// `Ready(None)` is returned if the client has disconnected. 168 | pub(crate) fn try_recv(&mut self, cx: &mut Context<'_>) -> Poll> { 169 | self.0.rx_task.register(cx.waker()); 170 | match self.0.value.swap(CellValue::None) { 171 | CellValue::Some(v) => { 172 | // let the sender know there's room now 173 | self.0.tx_task.wake(); 174 | Poll::Ready(Some(v)) 175 | } 176 | CellValue::Fin(Some(v)) => { 177 | // leave a None in there so we know to close after 178 | if cfg!(debug_assertions) { 179 | let old = self.0.value.swap(CellValue::Fin(None)); 180 | assert!(old.is_none()); 181 | } else { 182 | self.0.value.store(CellValue::Fin(None)); 183 | } 184 | Poll::Ready(Some(v)) 185 | } 186 | CellValue::Fin(None) => Poll::Ready(None), 187 | CellValue::None => Poll::Pending, 188 | } 189 | } 190 | } 191 | 192 | impl Drop for Receiver { 193 | fn drop(&mut self) { 194 | self.0.value.swap(CellValue::Fin(None)); 195 | } 196 | } 197 | 198 | #[cfg(test)] 199 | mod test { 200 | use super::*; 201 | 202 | #[test] 203 | fn basic() { 204 | let (tx, rx) = new::(); 205 | let mut tx = tokio_test::task::spawn(tx); 206 | let mut rx = tokio_test::task::spawn(rx); 207 | 208 | assert_eq!( 209 | tx.enter(|cx, mut tx| tx.poll_ready(cx)), 210 | Poll::Ready(Ok(())) 211 | ); 212 | assert!(!tx.is_woken()); 213 | assert!(!rx.is_woken()); 214 | assert_eq!(tx.enter(|_, mut tx| tx.try_send(42)), Ok(())); 215 | assert!(!tx.is_woken()); 216 | assert!(!rx.is_woken()); 217 | assert_eq!( 218 | rx.enter(|cx, mut rx| rx.try_recv(cx)), 219 | Poll::Ready(Some(42)) 220 | ); 221 | assert!(tx.is_woken()); 222 | assert!(!rx.is_woken()); 223 | 224 | assert_eq!( 225 | tx.enter(|cx, mut tx| tx.poll_ready(cx)), 226 | Poll::Ready(Ok(())) 227 | ); 228 | assert_eq!(tx.enter(|_, mut tx| tx.try_send(43)), Ok(())); 229 | assert!(rx.is_woken()); 230 | assert_eq!(tx.enter(|cx, mut tx| tx.poll_ready(cx)), Poll::Pending); 231 | assert_eq!( 232 | tx.enter(|_, mut tx| tx.try_send(44)), 233 | Err(TrySendError::Pending(44)) 234 | ); 235 | assert_eq!( 236 | rx.enter(|cx, mut rx| rx.try_recv(cx)), 237 | Poll::Ready(Some(43)) 238 | ); 239 | assert!(tx.is_woken()); // sender is notified 240 | assert_eq!( 241 | tx.enter(|cx, mut tx| tx.poll_ready(cx)), 242 | Poll::Ready(Ok(())) 243 | ); 244 | assert_eq!(tx.enter(|_, mut tx| tx.try_send(44)), Ok(())); 245 | assert!(rx.is_woken()); 246 | 247 | drop(tx); 248 | assert_eq!( 249 | rx.enter(|cx, mut rx| rx.try_recv(cx)), 250 | Poll::Ready(Some(44)) 251 | ); 252 | assert_eq!(rx.enter(|cx, mut rx| rx.try_recv(cx)), Poll::Ready(None)); 253 | } 254 | 255 | #[test] 256 | fn notified_on_empty_drop() { 257 | let (tx, rx) = new::(); 258 | let tx = tokio_test::task::spawn(tx); 259 | let mut rx = tokio_test::task::spawn(rx); 260 | 261 | assert_eq!(rx.enter(|cx, mut rx| rx.try_recv(cx)), Poll::Pending); 262 | assert!(!rx.is_woken()); 263 | drop(tx); 264 | assert!(rx.is_woken()); 265 | assert_eq!(rx.enter(|cx, mut rx| rx.try_recv(cx)), Poll::Ready(None)); 266 | } 267 | 268 | #[test] 269 | fn sender_sees_receiver_drop() { 270 | let (tx, rx) = new::(); 271 | let mut tx = tokio_test::task::spawn(tx); 272 | let rx = tokio_test::task::spawn(rx); 273 | 274 | assert_eq!( 275 | tx.enter(|cx, mut tx| tx.poll_ready(cx)), 276 | Poll::Ready(Ok(())) 277 | ); 278 | drop(rx); 279 | assert_eq!( 280 | tx.enter(|cx, mut tx| tx.poll_ready(cx)), 281 | Poll::Ready(Err(())) 282 | ); 283 | assert_eq!( 284 | tx.enter(|_, mut tx| tx.try_send(42)), 285 | Err(TrySendError::Closed(42)) 286 | ); 287 | } 288 | } 289 | -------------------------------------------------------------------------------- /src/multiplex/client.rs: -------------------------------------------------------------------------------- 1 | use crate::mediator; 2 | use crate::mediator::TrySendError; 3 | use crate::wrappers::*; 4 | use crate::Error; 5 | use futures_core::{ready, stream::TryStream}; 6 | use futures_sink::Sink; 7 | use pin_project::pin_project; 8 | use std::collections::VecDeque; 9 | use std::fmt; 10 | use std::future::Future; 11 | use std::marker::PhantomData; 12 | use std::pin::Pin; 13 | use std::task::{Context, Poll}; 14 | use tower_service::Service; 15 | 16 | // NOTE: this implementation could be more opinionated about request IDs by using a slab, but 17 | // instead, we allow the user to choose their own identifier format. 18 | 19 | /// A transport capable of transporting tagged requests and responses must implement this 20 | /// interface in order to be used with a [`Client`]. 21 | /// 22 | /// Note that we require self to be pinned here as `assign_tag` and `finish_tag` are called on the 23 | /// transport, which is already pinned so that we can use it as a `Stream + Sink`. It wouldn't be 24 | /// safe to then give out `&mut` to the transport without `Pin`, as that might move the transport. 25 | pub trait TagStore { 26 | /// The type used for tags. 27 | type Tag; 28 | 29 | /// Assign a fresh tag to the given `Request`, and return that tag. 30 | fn assign_tag(self: Pin<&mut Self>, r: &mut Request) -> Self::Tag; 31 | 32 | /// Retire and return the tag contained in the given `Response`. 33 | fn finish_tag(self: Pin<&mut Self>, r: &Response) -> Self::Tag; 34 | } 35 | 36 | /// A store used to track pending requests. 37 | /// 38 | /// Each request that is `sent` is passed to the local state used to track 39 | /// each pending request, and is expected to be able to recall that state 40 | /// through `completed` when a response later comes in with the same tag as 41 | /// the original request. 42 | pub trait PendingStore 43 | where 44 | T: TryStream + Sink + TagStore, 45 | { 46 | /// Store the provided tag and pending request. 47 | fn sent(self: Pin<&mut Self>, tag: T::Tag, pending: Pending, transport: Pin<&mut T>); 48 | 49 | /// Retrieve the pending request associated with this tag. 50 | /// 51 | /// This method should return `Ok(Some(p))` where `p` is the [`Pending`] 52 | /// that was passed to `sent` with `tag`. Implementors can choose 53 | /// to ignore a given response, such as to support request cancellation, 54 | /// by returning `Ok(None)` for a tag and dropping the corresponding 55 | /// `Pending` type. Doing so will make the original request future resolve as `Err(Error::Cancelled)`. 56 | /// 57 | /// If `tag` is not recognized as belonging to an in-flight request, implementors 58 | /// should return `Err(Error::Desynchronized)`. 59 | fn completed( 60 | self: Pin<&mut Self>, 61 | tag: T::Tag, 62 | transport: Pin<&mut T>, 63 | ) -> Result>, Error>; 64 | 65 | /// Return the count of in-flight pending responses in the [`PendingStore`]. 66 | fn in_flight(&self, transport: &T) -> usize; 67 | } 68 | 69 | /// A [`PendingStore`] implementation that uses a [`VecDeque`] 70 | /// to store pending requests. 71 | /// 72 | /// When the [`Client`] recives a response with a `Tag` that does not 73 | /// exist in the internal [`PendingStore`] this implementation will return 74 | /// an `Error::Desynchronized` error. 75 | #[pin_project] 76 | pub struct VecDequePendingStore 77 | where 78 | T: TryStream + Sink + TagStore, 79 | { 80 | pending: VecDeque<(T::Tag, Pending)>, 81 | _pd: PhantomData, 82 | } 83 | 84 | impl Default for VecDequePendingStore 85 | where 86 | T: TryStream + Sink + TagStore, 87 | { 88 | fn default() -> Self { 89 | Self { 90 | pending: VecDeque::new(), 91 | _pd: PhantomData, 92 | } 93 | } 94 | } 95 | 96 | impl fmt::Debug for VecDequePendingStore 97 | where 98 | T: TryStream + Sink + TagStore, 99 | T::Tag: fmt::Debug, 100 | { 101 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 102 | f.debug_struct("VecDequePendingStore") 103 | .field("pending", &self.pending) 104 | .finish() 105 | } 106 | } 107 | 108 | impl PendingStore for VecDequePendingStore 109 | where 110 | T: TryStream + Sink + TagStore, 111 | T::Tag: Eq, 112 | { 113 | fn sent(self: Pin<&mut Self>, tag: T::Tag, pending: Pending, _transport: Pin<&mut T>) { 114 | let this = self.project(); 115 | this.pending.push_back((tag, pending)); 116 | } 117 | 118 | fn completed( 119 | self: Pin<&mut Self>, 120 | tag: T::Tag, 121 | _transport: Pin<&mut T>, 122 | ) -> Result>, Error> { 123 | let this = self.project(); 124 | 125 | let pending = this 126 | .pending 127 | .iter() 128 | .position(|(t, _)| t == &tag) 129 | .ok_or(Error::Desynchronized)?; 130 | 131 | // this request just finished, which means it's _probably_ near the front 132 | // (i.e., was issued a while ago). so, for the swap needed for efficient 133 | // remove, we want to swap with something else that is close to the front. 134 | let response = this.pending.swap_remove_front(pending).unwrap(); 135 | 136 | Ok(Some(response.1)) 137 | } 138 | 139 | fn in_flight(&self, _transport: &T) -> usize { 140 | self.pending.len() 141 | } 142 | } 143 | 144 | // ===== Client ===== 145 | 146 | /// This type provides an implementation of a Tower 147 | /// [`Service`](https://docs.rs/tokio-service/0.1/tokio_service/trait.Service.html) on top of a 148 | /// request-at-a-time protocol transport. In particular, it wraps a transport that implements 149 | /// `Sink` and `Stream` with the necessary bookkeeping to 150 | /// adhere to Tower's convenient `fn(Request) -> Future` API. 151 | pub struct Client> 152 | where 153 | T: Sink + TryStream, 154 | { 155 | mediator: mediator::Sender>, 156 | _error: PhantomData, 157 | } 158 | 159 | impl fmt::Debug for Client 160 | where 161 | T: Sink + TryStream, 162 | { 163 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 164 | f.debug_struct("Client") 165 | .field("mediator", &self.mediator) 166 | .finish() 167 | } 168 | } 169 | 170 | // ===== Pending ===== 171 | 172 | /// A type used to track in-flight requests. 173 | /// 174 | /// Each pending response has an associated `Tag` that is provided 175 | /// by the [`TagStore`], which is used to uniquely identify a request/response pair. 176 | pub struct Pending { 177 | tx: tokio::sync::oneshot::Sender>, 178 | span: tracing::Span, 179 | } 180 | 181 | impl fmt::Debug for Pending { 182 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 183 | f.debug_struct("Pending").field("span", &self.span).finish() 184 | } 185 | } 186 | 187 | // ===== Builder ===== 188 | 189 | /// The default service error handler. 190 | pub type DefaultOnServiceError = Box; 191 | 192 | /// Builder for [`Client`] this is used to configure the transport, pending store 193 | /// and service_handler. 194 | /// 195 | /// # Defaults 196 | /// 197 | /// By default this builder only requires a transport and sets a default [`PendingStore`] 198 | /// and error handler. The default pending store is a [`VecDeque`] and the default 199 | /// error handler is just a closure that silently drops all errors. 200 | pub struct Builder< 201 | T, 202 | E, 203 | Request, 204 | F = DefaultOnServiceError, 205 | P = VecDequePendingStore, 206 | > { 207 | transport: T, 208 | on_service_error: F, 209 | pending_store: P, 210 | _pd: PhantomData, 211 | } 212 | 213 | impl Builder 214 | where 215 | T: Sink + TryStream + TagStore::Ok> + Send + 'static, 216 | P: PendingStore + Send + 'static, 217 | E: From>, 218 | E: 'static + Send, 219 | Request: 'static + Send, 220 | T::Ok: 'static + Send, 221 | T::Tag: Send, 222 | F: FnOnce(E) + Send + 'static, 223 | { 224 | fn new( 225 | transport: T, 226 | ) -> Builder, VecDequePendingStore> { 227 | Builder { 228 | transport, 229 | on_service_error: Box::new(|_| {}), 230 | pending_store: VecDequePendingStore::default(), 231 | _pd: PhantomData, 232 | } 233 | } 234 | 235 | /// Set the constructed client's [`PendingStore`]. 236 | pub fn pending_store(self, pending_store: P2) -> Builder { 237 | Builder { 238 | pending_store, 239 | on_service_error: self.on_service_error, 240 | transport: self.transport, 241 | _pd: PhantomData, 242 | } 243 | } 244 | 245 | /// Set the constructed client's service error handler. 246 | /// 247 | /// If the [`Client`] encounters an error, it passes that error to `on_service_error` 248 | /// before exiting. 249 | /// 250 | /// `on_service_error` will be run from within a `Drop` implementation when the transport task 251 | /// panics, so it will likely abort if it panics. 252 | pub fn on_service_error(self, on_service_error: F2) -> Builder 253 | where 254 | F: FnOnce(E) + Send + 'static, 255 | { 256 | Builder { 257 | on_service_error, 258 | pending_store: self.pending_store, 259 | transport: self.transport, 260 | _pd: PhantomData, 261 | } 262 | } 263 | 264 | /// Build a client based on the configured items on the builder. 265 | pub fn build(self) -> Client { 266 | Client::new_internal(self.transport, self.pending_store, self.on_service_error) 267 | } 268 | } 269 | 270 | impl fmt::Debug for Builder 271 | where 272 | T: fmt::Debug, 273 | P: fmt::Debug, 274 | { 275 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 276 | f.debug_struct("Builder") 277 | .field("transport", &self.transport) 278 | .field("pending_store", &self.pending_store) 279 | .finish() 280 | } 281 | } 282 | 283 | // ===== ClientInner ===== 284 | 285 | #[pin_project] 286 | struct ClientInner 287 | where 288 | T: Sink + TryStream + TagStore, 289 | P: PendingStore, 290 | { 291 | mediator: mediator::Receiver>, 292 | #[pin] 293 | pending: P, 294 | #[pin] 295 | transport: T, 296 | 297 | finish: bool, 298 | rx_only: bool, 299 | 300 | #[allow(unused)] 301 | error: PhantomData, 302 | } 303 | 304 | impl Client 305 | where 306 | T: Sink + TryStream + TagStore::Ok> + Send + 'static, 307 | E: From>, 308 | E: 'static + Send, 309 | Request: 'static + Send, 310 | T::Ok: 'static + Send, 311 | T::Tag: Eq + Send, 312 | { 313 | /// Construct a new [`Client`] over the given `transport`. 314 | /// 315 | /// If the Client errors, the error is dropped when `new` is used -- use `with_error_handler` 316 | /// to handle such an error explicitly. 317 | pub fn new(transport: T) -> Self { 318 | Self::builder(transport).build() 319 | } 320 | 321 | /// Create a new builder with the provided transport. 322 | pub fn builder(transport: T) -> Builder { 323 | Builder::<_, _, _, DefaultOnServiceError, VecDequePendingStore>::new( 324 | transport, 325 | ) 326 | } 327 | } 328 | 329 | /// Handles executing the service error handler in case awaiting the `ClientInner` Future panics. 330 | struct ClientInnerCleanup 331 | where 332 | T: Sink + TryStream, 333 | E: From>, 334 | F: FnOnce(E), 335 | { 336 | on_service_error: Option, 337 | _phantom_data: PhantomData<(Request, T, E)>, 338 | } 339 | 340 | impl Drop for ClientInnerCleanup 341 | where 342 | T: Sink + TryStream, 343 | E: From>, 344 | F: FnOnce(E), 345 | { 346 | fn drop(&mut self) { 347 | if let Some(handler) = self.on_service_error.take() { 348 | (handler)(E::from(Error::::TransportDropped)) 349 | } 350 | } 351 | } 352 | 353 | impl Client 354 | where 355 | T: Sink + TryStream + TagStore::Ok> + Send + 'static, 356 | P: PendingStore + Send + 'static, 357 | E: From>, 358 | E: 'static + Send, 359 | Request: 'static + Send, 360 | T::Ok: 'static + Send, 361 | T::Tag: Send, 362 | { 363 | fn new_internal(transport: T, pending: P, on_service_error: F) -> Self 364 | where 365 | F: FnOnce(E) + Send + 'static, 366 | { 367 | let (tx, rx) = mediator::new(); 368 | tokio::spawn({ 369 | let c = ClientInner { 370 | mediator: rx, 371 | transport, 372 | pending, 373 | error: PhantomData::, 374 | finish: false, 375 | rx_only: false, 376 | }; 377 | async move { 378 | let mut cleanup = ClientInnerCleanup { 379 | on_service_error: Some(on_service_error), 380 | _phantom_data: PhantomData::default(), 381 | }; 382 | 383 | let result = c.await; 384 | let error = cleanup.on_service_error.take().unwrap(); 385 | if let Err(e) = result { 386 | error(e); 387 | } 388 | } 389 | }); 390 | Client { 391 | mediator: tx, 392 | _error: PhantomData, 393 | } 394 | } 395 | } 396 | 397 | impl Future for ClientInner 398 | where 399 | T: Sink + TryStream + TagStore::Ok>, 400 | P: PendingStore, 401 | E: From>, 402 | E: 'static + Send, 403 | Request: 'static + Send, 404 | T::Ok: 'static + Send, 405 | { 406 | type Output = Result<(), E>; 407 | 408 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 409 | // go through the deref so we can do partial borrows 410 | let this = self.project(); 411 | 412 | // we never move transport, nor do we ever hand out &mut to it 413 | let mut transport: Pin<_> = this.transport; 414 | let mut pending: Pin<_> = this.pending; 415 | 416 | // track how many times we have iterated 417 | let mut i = 0; 418 | 419 | if !*this.finish { 420 | while let Poll::Ready(r) = transport.as_mut().poll_ready(cx) { 421 | if let Err(e) = r { 422 | return Poll::Ready(Err(E::from(Error::from_sink_error(e)))); 423 | } 424 | 425 | // send more requests if we have them 426 | match this.mediator.try_recv(cx) { 427 | Poll::Ready(Some(ClientRequest { 428 | mut req, 429 | span: _span, 430 | res, 431 | })) => { 432 | let id = transport.as_mut().assign_tag(&mut req); 433 | 434 | let guard = _span.enter(); 435 | tracing::trace!("request received by worker; sending to Sink"); 436 | 437 | transport 438 | .as_mut() 439 | .start_send(req) 440 | .map_err(Error::from_sink_error)?; 441 | tracing::trace!("request sent"); 442 | drop(guard); 443 | 444 | pending.as_mut().sent( 445 | id, 446 | Pending { 447 | tx: res, 448 | span: _span, 449 | }, 450 | transport.as_mut(), 451 | ); 452 | 453 | // if we have run for a while without yielding, yield so we can make progress 454 | i += 1; 455 | if i == crate::YIELD_EVERY { 456 | // we're forcing a yield, so need to ensure we get woken up again 457 | cx.waker().wake_by_ref(); 458 | // we still want to execute the code below the loop 459 | break; 460 | } 461 | } 462 | Poll::Ready(None) => { 463 | // XXX: should we "give up" the Sink::poll_ready here? 464 | *this.finish = true; 465 | break; 466 | } 467 | Poll::Pending => { 468 | // XXX: should we "give up" the Sink::poll_ready here? 469 | break; 470 | } 471 | } 472 | } 473 | } 474 | 475 | if pending.as_ref().in_flight(&transport) != 0 && !*this.rx_only { 476 | // flush out any stuff we've sent in the past 477 | // don't return on NotReady since we have to check for responses too 478 | if *this.finish { 479 | // we're closing up shop! 480 | // 481 | // poll_close() implies poll_flush() 482 | let r = transport 483 | .as_mut() 484 | .poll_close(cx) 485 | .map_err(Error::from_sink_error)?; 486 | 487 | if r.is_ready() { 488 | // now that close has completed, we should never send anything again 489 | // we only need to receive to make the in-flight requests complete 490 | *this.rx_only = true; 491 | } 492 | } else { 493 | let _ = transport 494 | .as_mut() 495 | .poll_flush(cx) 496 | .map_err(Error::from_sink_error)?; 497 | } 498 | } 499 | 500 | // and start looking for replies. 501 | // 502 | // note that we *could* have this just be a loop, but we don't want to poll the stream 503 | // if we know there's nothing for it to produce. 504 | while pending.as_ref().in_flight(&transport) != 0 { 505 | let poll_next = match transport.as_mut().try_poll_next(cx) { 506 | Poll::Pending => { 507 | // try_poll_next could mutate the pending store and actually change the number 508 | // of in_flight requests, so we check again if we have an inflight request or not 509 | if pending.as_ref().in_flight(&transport) == 0 { 510 | break; 511 | } 512 | return Poll::Pending; 513 | } 514 | Poll::Ready(x) => x, 515 | }; 516 | 517 | match poll_next.transpose().map_err(Error::from_stream_error)? { 518 | Some(r) => { 519 | let id = transport.as_mut().finish_tag(&r); 520 | 521 | let pending = if let Some(pending) = 522 | pending.as_mut().completed(id, transport.as_mut())? 523 | { 524 | pending 525 | } else { 526 | tracing::trace!( 527 | "response arrived but no associated pending tag; ignoring response" 528 | ); 529 | continue; 530 | }; 531 | 532 | tracing::trace!(parent: &pending.span, "response arrived; forwarding"); 533 | 534 | // ignore send failures 535 | // the client may just no longer care about the response 536 | let sender = pending.tx; 537 | let _ = sender.send(ClientResponse { 538 | response: r, 539 | span: pending.span, 540 | }); 541 | } 542 | None => { 543 | // the transport terminated while we were waiting for a response! 544 | // TODO: it'd be nice if we could return the transport here.. 545 | return Poll::Ready(Err(E::from(Error::BrokenTransportRecv(None)))); 546 | } 547 | } 548 | } 549 | 550 | if *this.finish && pending.as_ref().in_flight(&transport) == 0 { 551 | if *this.rx_only { 552 | // we have already closed the send side. 553 | } else { 554 | // we're completely done once close() finishes! 555 | ready!(transport.poll_close(cx)).map_err(Error::from_sink_error)?; 556 | } 557 | return Poll::Ready(Ok(())); 558 | } 559 | 560 | // to get here, we must have no requests in flight and have gotten a NotReady from 561 | // self.mediator.try_recv or self.transport.start_send. we *could* also have messages 562 | // waiting to be sent (transport.poll_complete), but if that's the case it must also have 563 | // returned NotReady. so, at this point, we know that all of our subtasks are either done 564 | // or have returned NotReady, so the right thing for us to do is return NotReady too! 565 | Poll::Pending 566 | } 567 | } 568 | 569 | impl Service for Client 570 | where 571 | T: Sink + TryStream + TagStore::Ok>, 572 | P: PendingStore, 573 | E: From>, 574 | E: 'static + Send, 575 | Request: 'static + Send, 576 | T: 'static, 577 | T::Ok: 'static + Send, 578 | { 579 | type Response = T::Ok; 580 | type Error = E; 581 | type Future = Pin> + Send>>; 582 | 583 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 584 | Poll::Ready(ready!(self.mediator.poll_ready(cx)).map_err(|_| E::from(Error::ClientDropped))) 585 | } 586 | 587 | fn call(&mut self, req: Request) -> Self::Future { 588 | let (tx, rx) = tokio::sync::oneshot::channel(); 589 | let span = tracing::Span::current(); 590 | tracing::trace!("issuing request"); 591 | let req = ClientRequest { req, span, res: tx }; 592 | let r = self.mediator.try_send(req); 593 | Box::pin(async move { 594 | match r { 595 | Ok(()) => match rx.await { 596 | Ok(r) => { 597 | tracing::trace!(parent: &r.span, "response returned"); 598 | Ok(r.response) 599 | } 600 | Err(_) => Err(E::from(Error::Cancelled)), 601 | }, 602 | Err(TrySendError::Pending(_)) => Err(E::from(Error::TransportFull)), 603 | Err(TrySendError::Closed(_)) => Err(E::from(Error::ClientDropped)), 604 | } 605 | }) 606 | } 607 | } 608 | -------------------------------------------------------------------------------- /src/multiplex/mod.rs: -------------------------------------------------------------------------------- 1 | //! In a multiplexed protocol, the server responds to client requests in the order they complete. 2 | //! Request IDs ([`TagStore::Tag`]) are used to match up responses with the request that triggered 3 | //! them. This allows the server to process requests out-of-order, and eliminates the 4 | //! application-level head-of-line blocking that pipelined protocols suffer from. Example 5 | //! multiplexed protocols include SSH, HTTP/2, and AMQP. [This 6 | //! page](https://250bpm.com/blog:18/) has some further details about how multiplexing protocols 7 | //! operate. 8 | //! 9 | //! Note: multiplexing with the max number of in-flight requests set to 1 implies that for each 10 | //! request, the response must be received before sending another request on the same connection. 11 | 12 | use futures_core::stream::{Stream, TryStream}; 13 | use futures_sink::Sink; 14 | use pin_project::pin_project; 15 | use std::pin::Pin; 16 | use std::task::{Context, Poll}; 17 | 18 | /// Client bindings for a multiplexed protocol. 19 | pub mod client; 20 | pub use self::client::{Client, TagStore}; 21 | 22 | /// Server bindings for a multiplexed protocol. 23 | pub mod server; 24 | pub use self::server::Server; 25 | 26 | /// A convenience wrapper that lets you take separate transport and tag store types and use them as 27 | /// a single [`client::Transport`]. 28 | #[pin_project] 29 | #[derive(Debug)] 30 | pub struct MultiplexTransport { 31 | #[pin] 32 | transport: T, 33 | #[pin] 34 | tagger: S, 35 | } 36 | 37 | impl MultiplexTransport { 38 | /// Fuse together the given `transport` and `tagger` into a single `Transport`. 39 | pub fn new(transport: T, tagger: S) -> Self { 40 | MultiplexTransport { transport, tagger } 41 | } 42 | } 43 | 44 | impl Sink for MultiplexTransport 45 | where 46 | T: Sink, 47 | { 48 | type Error = >::Error; 49 | 50 | fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 51 | self.project().transport.poll_ready(cx) 52 | } 53 | fn start_send(self: Pin<&mut Self>, item: Request) -> Result<(), Self::Error> { 54 | self.project().transport.start_send(item) 55 | } 56 | fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 57 | self.project().transport.poll_flush(cx) 58 | } 59 | fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 60 | self.project().transport.poll_close(cx) 61 | } 62 | } 63 | 64 | impl Stream for MultiplexTransport 65 | where 66 | T: TryStream, 67 | { 68 | type Item = Result<::Ok, ::Error>; 69 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { 70 | self.project().transport.try_poll_next(cx) 71 | } 72 | } 73 | 74 | impl TagStore::Ok> for MultiplexTransport 75 | where 76 | T: Sink + TryStream, 77 | S: TagStore::Ok>, 78 | { 79 | type Tag = ::Ok>>::Tag; 80 | fn assign_tag(self: Pin<&mut Self>, req: &mut Request) -> Self::Tag { 81 | self.project().tagger.assign_tag(req) 82 | } 83 | fn finish_tag(self: Pin<&mut Self>, rsp: &::Ok) -> Self::Tag { 84 | self.project().tagger.finish_tag(rsp) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /src/multiplex/server.rs: -------------------------------------------------------------------------------- 1 | use futures_core::{ready, stream::TryStream}; 2 | use futures_sink::Sink; 3 | use futures_util::stream::FuturesUnordered; 4 | use pin_project::pin_project; 5 | use std::future::Future; 6 | use std::pin::Pin; 7 | use std::task::{Context, Poll}; 8 | use std::{error, fmt}; 9 | use tower_service::Service; 10 | 11 | /// This type provides an implementation of a Tower 12 | /// [`Service`](https://docs.rs/tokio-service/0.1/tokio_service/trait.Service.html) on top of a 13 | /// multiplexed protocol transport. In particular, it wraps a transport that implements 14 | /// `Sink` and `Stream` with the necessary bookkeeping to 15 | /// adhere to Tower's convenient `fn(Request) -> Future` API. 16 | #[pin_project] 17 | #[derive(Debug)] 18 | pub struct Server 19 | where 20 | T: Sink + TryStream, 21 | S: Service<::Ok>, 22 | { 23 | #[pin] 24 | pending: FuturesUnordered, 25 | #[pin] 26 | transport: T, 27 | service: S, 28 | 29 | in_flight: usize, 30 | finish: bool, 31 | } 32 | 33 | /// An error that occurred while servicing a request. 34 | pub enum Error 35 | where 36 | T: Sink + TryStream, 37 | S: Service<::Ok>, 38 | { 39 | /// The underlying transport failed to produce a request. 40 | BrokenTransportRecv(::Error), 41 | 42 | /// The underlying transport failed while attempting to send a response. 43 | BrokenTransportSend(>::Error), 44 | 45 | /// The underlying service failed to process a request. 46 | Service(S::Error), 47 | } 48 | 49 | impl fmt::Display for Error 50 | where 51 | T: Sink + TryStream, 52 | S: Service<::Ok>, 53 | >::Error: fmt::Display, 54 | ::Error: fmt::Display, 55 | S::Error: fmt::Display, 56 | { 57 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 58 | match *self { 59 | Error::BrokenTransportRecv(_) => { 60 | f.pad("underlying transport failed to produce a request") 61 | } 62 | Error::BrokenTransportSend(_) => { 63 | f.pad("underlying transport failed while attempting to send a response") 64 | } 65 | Error::Service(_) => f.pad("underlying service failed to process a request"), 66 | } 67 | } 68 | } 69 | 70 | impl fmt::Debug for Error 71 | where 72 | T: Sink + TryStream, 73 | S: Service<::Ok>, 74 | >::Error: fmt::Debug, 75 | ::Error: fmt::Debug, 76 | S::Error: fmt::Debug, 77 | { 78 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 79 | match *self { 80 | Error::BrokenTransportRecv(ref se) => write!(f, "BrokenTransportRecv({:?})", se), 81 | Error::BrokenTransportSend(ref se) => write!(f, "BrokenTransportSend({:?})", se), 82 | Error::Service(ref se) => write!(f, "Service({:?})", se), 83 | } 84 | } 85 | } 86 | 87 | impl error::Error for Error 88 | where 89 | T: Sink + TryStream, 90 | S: Service<::Ok>, 91 | >::Error: error::Error + 'static, 92 | ::Error: error::Error + 'static, 93 | S::Error: error::Error + 'static, 94 | { 95 | fn source(&self) -> Option<&(dyn error::Error + 'static)> { 96 | match *self { 97 | Error::BrokenTransportSend(ref se) => Some(se), 98 | Error::BrokenTransportRecv(ref se) => Some(se), 99 | Error::Service(ref se) => Some(se), 100 | } 101 | } 102 | } 103 | 104 | impl Error 105 | where 106 | T: Sink + TryStream, 107 | S: Service<::Ok>, 108 | { 109 | fn from_sink_error(e: >::Error) -> Self { 110 | Error::BrokenTransportSend(e) 111 | } 112 | 113 | fn from_stream_error(e: ::Error) -> Self { 114 | Error::BrokenTransportRecv(e) 115 | } 116 | 117 | fn from_service_error(e: S::Error) -> Self { 118 | Error::Service(e) 119 | } 120 | } 121 | 122 | impl Server 123 | where 124 | T: Sink + TryStream, 125 | S: Service<::Ok>, 126 | { 127 | /// Construct a new [`Server`] over the given `transport` that services requests using the 128 | /// given `service`. 129 | /// 130 | /// Requests are passed to `Service::call` as they arrive, and responses are written back to 131 | /// the underlying `transport` in the order that they complete. If a later request completes 132 | /// before an earlier request, its response is still sent immediately. 133 | pub fn new(transport: T, service: S) -> Self { 134 | Server { 135 | pending: FuturesUnordered::new(), 136 | transport, 137 | service, 138 | in_flight: 0, 139 | finish: false, 140 | } 141 | } 142 | 143 | /* 144 | /// Manage incoming new transport instances using the given service constructor. 145 | /// 146 | /// For each transport that `incoming` yields, a new instance of `service` is created to 147 | /// manage requests on that transport. This is roughly equivalent to: 148 | /// 149 | /// ```rust,ignore 150 | /// incoming.map(|t| Server::multiplexed(t, service.new_service(), limit)) 151 | /// ``` 152 | pub fn serve_on( 153 | incoming: TS, 154 | service: SS, 155 | limit: Option, 156 | ) -> impl Stream 157 | where 158 | TS: Stream, 159 | SS: NewService, 160 | E: From, 161 | E: From, 162 | { 163 | incoming.map_err(E::from).and_then(move |transport| { 164 | service 165 | .new_service() 166 | .map_err(E::from) 167 | .map(move |s| Server::multiplexed(transport, s, limit)) 168 | }) 169 | } 170 | */ 171 | } 172 | 173 | impl Future for Server 174 | where 175 | T: Sink + TryStream, 176 | S: Service<::Ok>, 177 | { 178 | type Output = Result<(), Error>; 179 | 180 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 181 | let span = tracing::trace_span!("poll"); 182 | let _guard = span.enter(); 183 | tracing::trace!("poll"); 184 | 185 | // go through the deref so we can do partial borrows 186 | let this = self.project(); 187 | 188 | // we never move transport or pending, nor do we ever hand out &mut to it 189 | let mut transport: Pin<_> = this.transport; 190 | let mut pending: Pin<_> = this.pending; 191 | 192 | // track how many times we have iterated 193 | let mut i = 0; 194 | 195 | loop { 196 | // first, poll pending futures to see if any have produced responses 197 | // note that we only poll for completed service futures if we can send the response 198 | while let Poll::Ready(r) = transport.as_mut().poll_ready(cx) { 199 | if let Err(e) = r { 200 | return Poll::Ready(Err(Error::from_sink_error(e))); 201 | } 202 | 203 | tracing::trace!( 204 | in_flight = *this.in_flight, 205 | pending = pending.len(), 206 | "transport.ready" 207 | ); 208 | match pending.as_mut().try_poll_next(cx) { 209 | Poll::Ready(Some(Err(e))) => { 210 | return Poll::Ready(Err(Error::from_service_error(e))); 211 | } 212 | Poll::Ready(Some(Ok(rsp))) => { 213 | tracing::trace!("transport.start_send"); 214 | // try to send the response! 215 | transport 216 | .as_mut() 217 | .start_send(rsp) 218 | .map_err(Error::from_sink_error)?; 219 | *this.in_flight -= 1; 220 | } 221 | _ => { 222 | // XXX: should we "release" the poll_ready we got from the Sink? 223 | break; 224 | } 225 | } 226 | } 227 | 228 | // also try to make progress on sending 229 | tracing::trace!(finish = *this.finish, "transport.poll_flush"); 230 | if let Poll::Ready(()) = transport 231 | .as_mut() 232 | .poll_flush(cx) 233 | .map_err(Error::from_sink_error)? 234 | { 235 | if *this.finish && pending.as_mut().is_empty() { 236 | // there are no more requests 237 | // and we've finished all the work! 238 | return Poll::Ready(Ok(())); 239 | } 240 | } 241 | 242 | if *this.finish { 243 | // there's still work to be done, but there are no more requests 244 | // so no need to check the incoming transport 245 | return Poll::Pending; 246 | } 247 | 248 | // if we have run for a while without yielding, yield back so other tasks can run 249 | i += 1; 250 | if i == crate::YIELD_EVERY { 251 | // we're forcing a yield, so need to ensure we get woken up again 252 | tracing::trace!("forced yield"); 253 | cx.waker().wake_by_ref(); 254 | return Poll::Pending; 255 | } 256 | 257 | // is the service ready? 258 | tracing::trace!("service.poll_ready"); 259 | ready!(this.service.poll_ready(cx)).map_err(Error::from_service_error)?; 260 | 261 | tracing::trace!("transport.poll_next"); 262 | let rq = ready!(transport.as_mut().try_poll_next(cx)) 263 | .transpose() 264 | .map_err(Error::from_stream_error)?; 265 | if let Some(rq) = rq { 266 | // the service is ready, and we have another request! 267 | // you know what that means: 268 | pending.push(this.service.call(rq)); 269 | *this.in_flight += 1; 270 | } else { 271 | // there are no more requests coming 272 | // check one more time for responses, and then yield 273 | assert!(!*this.finish); 274 | *this.finish = true; 275 | } 276 | } 277 | } 278 | } 279 | -------------------------------------------------------------------------------- /src/pipeline/client.rs: -------------------------------------------------------------------------------- 1 | use crate::mediator; 2 | use crate::wrappers::*; 3 | use crate::Error; 4 | use futures_core::{ready, stream::TryStream}; 5 | use futures_sink::Sink; 6 | use pin_project::pin_project; 7 | use std::collections::VecDeque; 8 | use std::fmt; 9 | use std::future::Future; 10 | use std::marker::PhantomData; 11 | use std::pin::Pin; 12 | use std::sync::{atomic, Arc}; 13 | use std::task::{Context, Poll}; 14 | use tower_service::Service; 15 | 16 | // ===== Client ===== 17 | 18 | /// This type provides an implementation of a Tower 19 | /// [`Service`](https://docs.rs/tokio-service/0.1/tokio_service/trait.Service.html) on top of a 20 | /// request-at-a-time protocol transport. In particular, it wraps a transport that implements 21 | /// `Sink` and `Stream` with the necessary bookkeeping to 22 | /// adhere to Tower's convenient `fn(Request) -> Future` API. 23 | pub struct Client 24 | where 25 | T: Sink + TryStream, 26 | { 27 | mediator: mediator::Sender>, 28 | in_flight: Arc, 29 | _error: PhantomData, 30 | } 31 | 32 | impl fmt::Debug for Client 33 | where 34 | T: Sink + TryStream, 35 | { 36 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 37 | f.debug_struct("Client") 38 | .field("mediator", &self.mediator) 39 | .field("in_flight", &self.in_flight) 40 | .finish() 41 | } 42 | } 43 | 44 | // ===== ClientInner ===== 45 | 46 | struct Pending { 47 | tx: tokio::sync::oneshot::Sender>, 48 | span: tracing::Span, 49 | } 50 | 51 | #[pin_project] 52 | struct ClientInner 53 | where 54 | T: Sink + TryStream, 55 | { 56 | mediator: mediator::Receiver>, 57 | responses: VecDeque>, 58 | #[pin] 59 | transport: T, 60 | 61 | in_flight: Arc, 62 | finish: bool, 63 | rx_only: bool, 64 | 65 | #[allow(unused)] 66 | error: PhantomData, 67 | } 68 | 69 | impl Client 70 | where 71 | T: Sink + TryStream + Send + 'static, 72 | E: From>, 73 | E: 'static + Send, 74 | Request: 'static + Send, 75 | T::Ok: 'static + Send, 76 | { 77 | /// Construct a new [`Client`] over the given `transport`. 78 | /// 79 | /// If the Client errors, the error is dropped when `new` is used -- use `with_error_handler` 80 | /// to handle such an error explicitly. 81 | pub fn new(transport: T) -> Self where { 82 | Self::with_error_handler(transport, |_| {}) 83 | } 84 | 85 | /// Construct a new [`Client`] over the given `transport`. 86 | /// 87 | /// If the `Client` errors, its error is passed to `on_service_error`. 88 | pub fn with_error_handler(transport: T, on_service_error: F) -> Self 89 | where 90 | F: FnOnce(E) + Send + 'static, 91 | { 92 | let (tx, rx) = mediator::new(); 93 | let in_flight = Arc::new(atomic::AtomicUsize::new(0)); 94 | tokio::spawn({ 95 | let c = ClientInner { 96 | mediator: rx, 97 | responses: Default::default(), 98 | transport, 99 | in_flight: in_flight.clone(), 100 | error: PhantomData::, 101 | finish: false, 102 | rx_only: false, 103 | }; 104 | async move { 105 | if let Err(e) = c.await { 106 | on_service_error(e); 107 | } 108 | } 109 | }); 110 | Client { 111 | mediator: tx, 112 | in_flight, 113 | _error: PhantomData, 114 | } 115 | } 116 | } 117 | 118 | impl Future for ClientInner 119 | where 120 | T: Sink + TryStream, 121 | E: From>, 122 | E: 'static + Send, 123 | Request: 'static + Send, 124 | T::Ok: 'static + Send, 125 | { 126 | type Output = Result<(), E>; 127 | 128 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 129 | // go through the deref so we can do partial borrows 130 | let this = self.project(); 131 | 132 | // we never move transport, nor do we ever hand out &mut to it 133 | let mut transport: Pin<_> = this.transport; 134 | 135 | // track how many times we have iterated 136 | let mut i = 0; 137 | 138 | if !*this.finish { 139 | while let Poll::Ready(r) = transport.as_mut().poll_ready(cx) { 140 | if let Err(e) = r { 141 | return Poll::Ready(Err(E::from(Error::from_sink_error(e)))); 142 | } 143 | 144 | // send more requests if we have them 145 | match this.mediator.try_recv(cx) { 146 | Poll::Ready(Some(ClientRequest { 147 | req, 148 | span: _span, 149 | res, 150 | })) => { 151 | let guard = _span.enter(); 152 | tracing::trace!("request received by worker; sending to Sink"); 153 | 154 | transport 155 | .as_mut() 156 | .start_send(req) 157 | .map_err(Error::from_sink_error)?; 158 | tracing::trace!("request sent"); 159 | drop(guard); 160 | 161 | this.responses.push_back(Pending { 162 | tx: res, 163 | span: _span, 164 | }); 165 | this.in_flight.fetch_add(1, atomic::Ordering::AcqRel); 166 | 167 | // if we have run for a while without yielding, yield so we can make progress 168 | i += 1; 169 | if i == crate::YIELD_EVERY { 170 | // we're forcing a yield, so need to ensure we get woken up again 171 | cx.waker().wake_by_ref(); 172 | // we still want to execute the code below the loop 173 | break; 174 | } 175 | } 176 | Poll::Ready(None) => { 177 | // XXX: should we "give up" the Sink::poll_ready here? 178 | *this.finish = true; 179 | break; 180 | } 181 | Poll::Pending => { 182 | // XXX: should we "give up" the Sink::poll_ready here? 183 | break; 184 | } 185 | } 186 | } 187 | } 188 | 189 | if this.in_flight.load(atomic::Ordering::Acquire) != 0 && !*this.rx_only { 190 | // flush out any stuff we've sent in the past 191 | // don't return on NotReady since we have to check for responses too 192 | if *this.finish { 193 | // we're closing up shop! 194 | // 195 | // poll_close() implies poll_flush() 196 | let r = transport 197 | .as_mut() 198 | .poll_close(cx) 199 | .map_err(Error::from_sink_error)?; 200 | 201 | if r.is_ready() { 202 | // now that close has completed, we should never send anything again 203 | // we only need to receive to make the in-flight requests complete 204 | *this.rx_only = true; 205 | } 206 | } else { 207 | let _ = transport 208 | .as_mut() 209 | .poll_flush(cx) 210 | .map_err(Error::from_sink_error)?; 211 | } 212 | } 213 | 214 | // and start looking for replies. 215 | // 216 | // note that we *could* have this just be a loop, but we don't want to poll the stream 217 | // if we know there's nothing for it to produce. 218 | while this.in_flight.load(atomic::Ordering::Acquire) != 0 { 219 | match ready!(transport.as_mut().try_poll_next(cx)) 220 | .transpose() 221 | .map_err(Error::from_stream_error)? 222 | { 223 | Some(r) => { 224 | // ignore send failures 225 | // the client may just no longer care about the response 226 | let pending = this.responses.pop_front().ok_or(Error::Desynchronized)?; 227 | tracing::trace!(parent: &pending.span, "response arrived; forwarding"); 228 | 229 | let sender = pending.tx; 230 | let _ = sender.send(ClientResponse { 231 | response: r, 232 | span: pending.span, 233 | }); 234 | this.in_flight.fetch_sub(1, atomic::Ordering::AcqRel); 235 | } 236 | None => { 237 | // the transport terminated while we were waiting for a response! 238 | // TODO: it'd be nice if we could return the transport here.. 239 | return Poll::Ready(Err(E::from(Error::BrokenTransportRecv(None)))); 240 | } 241 | } 242 | } 243 | 244 | if *this.finish && this.in_flight.load(atomic::Ordering::Acquire) == 0 { 245 | if *this.rx_only { 246 | // we have already closed the send side. 247 | } else { 248 | // we're completely done once close() finishes! 249 | ready!(transport.poll_close(cx)).map_err(Error::from_sink_error)?; 250 | } 251 | return Poll::Ready(Ok(())); 252 | } 253 | 254 | // to get here, we must have no requests in flight and have gotten a NotReady from 255 | // self.mediator.try_recv or self.transport.start_send. we *could* also have messages 256 | // waiting to be sent (transport.poll_complete), but if that's the case it must also have 257 | // returned NotReady. so, at this point, we know that all of our subtasks are either done 258 | // or have returned NotReady, so the right thing for us to do is return NotReady too! 259 | Poll::Pending 260 | } 261 | } 262 | 263 | impl Service for Client 264 | where 265 | T: Sink + TryStream, 266 | E: From>, 267 | E: 'static + Send, 268 | Request: 'static + Send, 269 | T: 'static, 270 | T::Ok: 'static + Send, 271 | { 272 | type Response = T::Ok; 273 | type Error = E; 274 | type Future = Pin> + Send>>; 275 | 276 | fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { 277 | Poll::Ready(ready!(self.mediator.poll_ready(cx)).map_err(|_| E::from(Error::ClientDropped))) 278 | } 279 | 280 | fn call(&mut self, req: Request) -> Self::Future { 281 | let (tx, rx) = tokio::sync::oneshot::channel(); 282 | let span = tracing::Span::current(); 283 | tracing::trace!("issuing request"); 284 | let req = ClientRequest { req, span, res: tx }; 285 | let r = self.mediator.try_send(req); 286 | Box::pin(async move { 287 | match r { 288 | Ok(()) => match rx.await { 289 | Ok(r) => { 290 | tracing::trace!(parent: &r.span, "response returned"); 291 | Ok(r.response) 292 | } 293 | Err(_) => Err(E::from(Error::ClientDropped)), 294 | }, 295 | Err(_) => Err(E::from(Error::TransportFull)), 296 | } 297 | }) 298 | } 299 | } 300 | 301 | impl tower::load::Load for Client 302 | where 303 | T: Sink + TryStream, 304 | { 305 | type Metric = usize; 306 | 307 | fn load(&self) -> Self::Metric { 308 | self.in_flight.load(atomic::Ordering::Acquire) 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /src/pipeline/mod.rs: -------------------------------------------------------------------------------- 1 | //! In a pipelined protocol, the server responds to client requests in the order they were sent. 2 | //! Many requests can be in flight at the same time, but no request sees a response until all 3 | //! previous requests have been satisfied. Pipelined protocols can experience head-of-line 4 | //! blocking wherein a slow-to-process request prevents any subsequent request from being 5 | //! processed, but are often to easier to implement on the server side, and provide clearer request 6 | //! ordering semantics. Example pipelined protocols include HTTP/1.1, MySQL, and Redis. 7 | //! 8 | //! Note: pipelining with the max number of in-flight requests set to 1 implies that for each 9 | //! request, the response must be received before sending another request on the same connection. 10 | 11 | /// Client bindings for a pipelined protocol. 12 | pub mod client; 13 | pub use self::client::Client; 14 | 15 | /// Server bindings for a pipelined protocol. 16 | pub mod server; 17 | pub use self::server::Server; 18 | -------------------------------------------------------------------------------- /src/pipeline/server.rs: -------------------------------------------------------------------------------- 1 | use futures_core::{ready, stream::TryStream}; 2 | use futures_sink::Sink; 3 | use futures_util::stream::FuturesOrdered; 4 | use pin_project::pin_project; 5 | use std::future::Future; 6 | use std::pin::Pin; 7 | use std::task::{Context, Poll}; 8 | use std::{error, fmt}; 9 | use tower_service::Service; 10 | 11 | /// This type provides an implementation of a Tower 12 | /// [`Service`](https://docs.rs/tokio-service/0.1/tokio_service/trait.Service.html) on top of a 13 | /// request-at-a-time protocol transport. In particular, it wraps a transport that implements 14 | /// `Sink` and `Stream` with the necessary bookkeeping to 15 | /// adhere to Tower's convenient `fn(Request) -> Future` API. 16 | #[pin_project] 17 | #[derive(Debug)] 18 | pub struct Server 19 | where 20 | T: Sink + TryStream, 21 | S: Service<::Ok>, 22 | { 23 | #[pin] 24 | pending: FuturesOrdered, 25 | #[pin] 26 | transport: T, 27 | service: S, 28 | 29 | in_flight: usize, 30 | finish: bool, 31 | } 32 | 33 | /// An error that occurred while servicing a request. 34 | pub enum Error 35 | where 36 | T: Sink + TryStream, 37 | S: Service<::Ok>, 38 | { 39 | /// The underlying transport failed to produce a request. 40 | BrokenTransportRecv(::Error), 41 | 42 | /// The underlying transport failed while attempting to send a response. 43 | BrokenTransportSend(>::Error), 44 | 45 | /// The underlying service failed to process a request. 46 | Service(S::Error), 47 | } 48 | 49 | impl fmt::Display for Error 50 | where 51 | T: Sink + TryStream, 52 | S: Service<::Ok>, 53 | >::Error: fmt::Display, 54 | ::Error: fmt::Display, 55 | S::Error: fmt::Display, 56 | { 57 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 58 | match *self { 59 | Error::BrokenTransportRecv(_) => { 60 | f.pad("underlying transport failed to produce a request") 61 | } 62 | Error::BrokenTransportSend(_) => { 63 | f.pad("underlying transport failed while attempting to send a response") 64 | } 65 | Error::Service(_) => f.pad("underlying service failed to process a request"), 66 | } 67 | } 68 | } 69 | 70 | impl fmt::Debug for Error 71 | where 72 | T: Sink + TryStream, 73 | S: Service<::Ok>, 74 | >::Error: fmt::Debug, 75 | ::Error: fmt::Debug, 76 | S::Error: fmt::Debug, 77 | { 78 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 79 | match *self { 80 | Error::BrokenTransportRecv(ref se) => write!(f, "BrokenTransportRecv({:?})", se), 81 | Error::BrokenTransportSend(ref se) => write!(f, "BrokenTransportSend({:?})", se), 82 | Error::Service(ref se) => write!(f, "Service({:?})", se), 83 | } 84 | } 85 | } 86 | 87 | impl error::Error for Error 88 | where 89 | T: Sink + TryStream, 90 | S: Service<::Ok>, 91 | >::Error: error::Error + 'static, 92 | ::Error: error::Error + 'static, 93 | S::Error: error::Error + 'static, 94 | { 95 | fn source(&self) -> Option<&(dyn error::Error + 'static)> { 96 | match *self { 97 | Error::BrokenTransportSend(ref se) => Some(se), 98 | Error::BrokenTransportRecv(ref se) => Some(se), 99 | Error::Service(ref se) => Some(se), 100 | } 101 | } 102 | } 103 | 104 | impl Error 105 | where 106 | T: Sink + TryStream, 107 | S: Service<::Ok>, 108 | { 109 | fn from_sink_error(e: >::Error) -> Self { 110 | Error::BrokenTransportSend(e) 111 | } 112 | 113 | fn from_stream_error(e: ::Error) -> Self { 114 | Error::BrokenTransportRecv(e) 115 | } 116 | 117 | fn from_service_error(e: S::Error) -> Self { 118 | Error::Service(e) 119 | } 120 | } 121 | 122 | impl Server 123 | where 124 | T: Sink + TryStream, 125 | S: Service<::Ok>, 126 | { 127 | /// Construct a new [`Server`] over the given `transport` that services requests using the 128 | /// given `service`. 129 | /// 130 | /// Requests are passed to `Service::call` as they arrive, and responses are written back to 131 | /// the underlying `transport` in the order that the requests arrive. If a later request 132 | /// completes before an earlier request, its result will be buffered until all preceding 133 | /// requests have been sent. 134 | pub fn new(transport: T, service: S) -> Self { 135 | Server { 136 | pending: FuturesOrdered::new(), 137 | transport, 138 | service, 139 | in_flight: 0, 140 | finish: false, 141 | } 142 | } 143 | 144 | /* 145 | /// Manage incoming new transport instances using the given service constructor. 146 | /// 147 | /// For each transport that `incoming` yields, a new instance of `service` is created to 148 | /// manage requests on that transport. This is roughly equivalent to: 149 | /// 150 | /// ```rust,ignore 151 | /// incoming.map(|t| Server::pipelined(t, service.new_service(), limit)) 152 | /// ``` 153 | pub fn serve_on( 154 | incoming: TS, 155 | service: SS, 156 | limit: Option, 157 | ) -> impl Stream 158 | where 159 | TS: Stream, 160 | SS: NewService, 161 | E: From, 162 | E: From, 163 | { 164 | incoming.map_err(E::from).and_then(move |transport| { 165 | service 166 | .new_service() 167 | .map_err(E::from) 168 | .map(move |s| Server::pipelined(transport, s, limit)) 169 | }) 170 | } 171 | */ 172 | } 173 | 174 | impl Future for Server 175 | where 176 | T: Sink + TryStream, 177 | S: Service<::Ok>, 178 | { 179 | type Output = Result<(), Error>; 180 | 181 | fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { 182 | let span = tracing::trace_span!("poll"); 183 | let _guard = span.enter(); 184 | tracing::trace!("poll"); 185 | 186 | // go through the deref so we can do partial borrows 187 | let this = self.project(); 188 | 189 | // we never move transport or pending, nor do we ever hand out &mut to it 190 | let mut transport: Pin<_> = this.transport; 191 | let mut pending: Pin<_> = this.pending; 192 | 193 | // track how many times we have iterated 194 | let mut i = 0; 195 | 196 | loop { 197 | // first, poll pending futures to see if any have produced responses 198 | // note that we only poll for completed service futures if we can send the response 199 | while let Poll::Ready(r) = transport.as_mut().poll_ready(cx) { 200 | if let Err(e) = r { 201 | return Poll::Ready(Err(Error::from_sink_error(e))); 202 | } 203 | 204 | tracing::trace!( 205 | in_flight = *this.in_flight, 206 | pending = pending.len(), 207 | "transport.ready" 208 | ); 209 | match pending.as_mut().try_poll_next(cx) { 210 | Poll::Ready(Some(Err(e))) => { 211 | return Poll::Ready(Err(Error::from_service_error(e))); 212 | } 213 | Poll::Ready(Some(Ok(rsp))) => { 214 | tracing::trace!("transport.start_send"); 215 | // try to send the response! 216 | transport 217 | .as_mut() 218 | .start_send(rsp) 219 | .map_err(Error::from_sink_error)?; 220 | *this.in_flight -= 1; 221 | } 222 | _ => { 223 | // XXX: should we "release" the poll_ready we got from the Sink? 224 | break; 225 | } 226 | } 227 | } 228 | 229 | // also try to make progress on sending 230 | tracing::trace!(finish = *this.finish, "transport.poll_flush"); 231 | if let Poll::Ready(()) = transport 232 | .as_mut() 233 | .poll_flush(cx) 234 | .map_err(Error::from_sink_error)? 235 | { 236 | if *this.finish && pending.as_mut().is_empty() { 237 | // there are no more requests 238 | // and we've finished all the work! 239 | return Poll::Ready(Ok(())); 240 | } 241 | } 242 | 243 | if *this.finish { 244 | // there's still work to be done, but there are no more requests 245 | // so no need to check the incoming transport 246 | return Poll::Pending; 247 | } 248 | 249 | // if we have run for a while without yielding, yield back so other tasks can run 250 | i += 1; 251 | if i == crate::YIELD_EVERY { 252 | // we're forcing a yield, so need to ensure we get woken up again 253 | tracing::trace!("forced yield"); 254 | cx.waker().wake_by_ref(); 255 | return Poll::Pending; 256 | } 257 | 258 | // is the service ready? 259 | tracing::trace!("service.poll_ready"); 260 | ready!(this.service.poll_ready(cx)).map_err(Error::from_service_error)?; 261 | 262 | tracing::trace!("transport.poll_next"); 263 | let rq = ready!(transport.as_mut().try_poll_next(cx)) 264 | .transpose() 265 | .map_err(Error::from_stream_error)?; 266 | if let Some(rq) = rq { 267 | // the service is ready, and we have another request! 268 | // you know what that means: 269 | pending.push_back(this.service.call(rq)); 270 | *this.in_flight += 1; 271 | } else { 272 | // there are no more requests coming 273 | // check one more time for responses, and then yield 274 | assert!(!*this.finish); 275 | *this.finish = true; 276 | } 277 | } 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /src/wrappers.rs: -------------------------------------------------------------------------------- 1 | use futures_core::stream::TryStream; 2 | use futures_sink::Sink; 3 | 4 | pub(crate) struct ClientRequest 5 | where 6 | T: Sink + TryStream, 7 | { 8 | pub(crate) req: I, 9 | pub(crate) span: tracing::Span, 10 | pub(crate) res: tokio::sync::oneshot::Sender>, 11 | } 12 | 13 | pub(crate) struct ClientResponse { 14 | pub(crate) response: T, 15 | pub(crate) span: tracing::Span, 16 | } 17 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate serde_derive; 3 | 4 | use futures_util::future::poll_fn; 5 | use std::task::{Context, Poll}; 6 | use tower_service::Service; 7 | 8 | async fn ready, Request>(svc: &mut S) -> Result<(), S::Error> { 9 | poll_fn(|cx| svc.poll_ready(cx)).await 10 | } 11 | 12 | #[derive(Serialize, Deserialize)] 13 | pub struct Request { 14 | tag: usize, 15 | value: u32, 16 | } 17 | 18 | impl Request { 19 | pub fn new(val: u32) -> Self { 20 | Request { tag: 0, value: val } 21 | } 22 | 23 | pub fn check(&self, expected: u32) { 24 | assert_eq!(self.value, expected); 25 | } 26 | } 27 | 28 | #[derive(Serialize, Deserialize)] 29 | pub struct Response { 30 | tag: usize, 31 | value: u32, 32 | } 33 | 34 | impl From for Response { 35 | fn from(r: Request) -> Response { 36 | Response { 37 | tag: r.tag, 38 | value: r.value, 39 | } 40 | } 41 | } 42 | 43 | impl Response { 44 | pub fn check(&self, expected: u32) { 45 | assert_eq!(self.value, expected); 46 | } 47 | 48 | pub fn get_tag(&self) -> usize { 49 | self.tag 50 | } 51 | } 52 | 53 | impl Request { 54 | pub fn set_tag(&mut self, tag: usize) { 55 | self.tag = tag; 56 | } 57 | } 58 | 59 | struct PanicError; 60 | use std::fmt; 61 | impl From for PanicError 62 | where 63 | E: fmt::Debug, 64 | { 65 | fn from(e: E) -> Self { 66 | panic!("{:?}", e) 67 | } 68 | } 69 | 70 | fn unwrap(r: Result) -> T { 71 | if let Ok(t) = r { 72 | t 73 | } else { 74 | unreachable!(); 75 | } 76 | } 77 | 78 | struct EchoService; 79 | impl Service for EchoService { 80 | type Response = Response; 81 | type Error = (); 82 | type Future = futures_util::future::Ready>; 83 | 84 | fn poll_ready(&mut self, _: &mut Context) -> Poll> { 85 | Poll::Ready(Ok(())) 86 | } 87 | 88 | fn call(&mut self, r: Request) -> Self::Future { 89 | futures_util::future::ok(Response::from(r)) 90 | } 91 | } 92 | 93 | mod multiplex; 94 | mod pipeline; 95 | -------------------------------------------------------------------------------- /tests/multiplex/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ready, unwrap, EchoService, PanicError, Request, Response}; 2 | use async_bincode::AsyncBincodeStream; 3 | use futures_util::pin_mut; 4 | use slab::Slab; 5 | use std::pin::Pin; 6 | use tokio::net::{TcpListener, TcpStream}; 7 | use tokio_tower::multiplex::{ 8 | client::VecDequePendingStore, Client, MultiplexTransport, Server, TagStore, 9 | }; 10 | use tower_service::Service; 11 | use tower_test::mock; 12 | 13 | pub(crate) struct SlabStore(Slab<()>); 14 | 15 | impl TagStore for SlabStore { 16 | type Tag = usize; 17 | fn assign_tag(mut self: Pin<&mut Self>, request: &mut Request) -> usize { 18 | let tag = self.0.insert(()); 19 | request.set_tag(tag); 20 | tag 21 | } 22 | fn finish_tag(mut self: Pin<&mut Self>, response: &Response) -> usize { 23 | let tag = response.get_tag(); 24 | self.0.remove(tag); 25 | tag 26 | } 27 | } 28 | 29 | #[tokio::test] 30 | async fn integration() { 31 | let rx = TcpListener::bind("127.0.0.1:0").await.unwrap(); 32 | let addr = rx.local_addr().unwrap(); 33 | 34 | // connect 35 | let tx = TcpStream::connect(&addr).await.unwrap(); 36 | let tx = AsyncBincodeStream::from(tx).for_async(); 37 | let mut tx: Client<_, PanicError, _> = 38 | Client::builder(MultiplexTransport::new(tx, SlabStore(Slab::new()))).build(); 39 | 40 | // accept 41 | let (rx, _) = rx.accept().await.unwrap(); 42 | let rx = AsyncBincodeStream::from(rx).for_async(); 43 | let server = Server::new(rx, EchoService); 44 | 45 | tokio::spawn(async move { server.await.unwrap() }); 46 | 47 | unwrap(ready(&mut tx).await); 48 | let fut1 = tx.call(Request::new(1)); 49 | unwrap(ready(&mut tx).await); 50 | let fut2 = tx.call(Request::new(2)); 51 | unwrap(ready(&mut tx).await); 52 | let fut3 = tx.call(Request::new(3)); 53 | unwrap(fut1.await).check(1); 54 | unwrap(fut2.await).check(2); 55 | unwrap(fut3.await).check(3); 56 | } 57 | 58 | #[tokio::test] 59 | async fn racing_close() { 60 | let rx = TcpListener::bind("127.0.0.1:0").await.unwrap(); 61 | let addr = rx.local_addr().unwrap(); 62 | 63 | // connect 64 | let tx = TcpStream::connect(&addr).await.unwrap(); 65 | let tx = AsyncBincodeStream::from(tx).for_async(); 66 | let mut tx: Client<_, PanicError, _> = 67 | Client::builder(MultiplexTransport::new(tx, SlabStore(Slab::new()))) 68 | .pending_store(VecDequePendingStore::default()) 69 | .on_service_error(|_| {}) 70 | .build(); 71 | 72 | let (service, handle) = mock::pair::(); 73 | pin_mut!(handle); 74 | 75 | // accept 76 | let (rx, _) = rx.accept().await.unwrap(); 77 | let rx = AsyncBincodeStream::from(rx).for_async(); 78 | let server = Server::new(rx, service); 79 | 80 | tokio::spawn(async move { server.await.unwrap() }); 81 | 82 | // we now want to set up a situation where a request has been sent to the server, and then the 83 | // client goes away while the request is still outstanding. in this case, the connection to the 84 | // server will be shut down in the write direction, but not in the read direction. 85 | 86 | // send a couple of request 87 | unwrap(ready(&mut tx).await); 88 | let fut1 = tx.call(Request::new(1)); 89 | unwrap(ready(&mut tx).await); 90 | let fut2 = tx.call(Request::new(2)); 91 | unwrap(ready(&mut tx).await); 92 | // drop client to indicate no more requests 93 | drop(tx); 94 | // respond to both requests one after the other 95 | // the response to the first should trigger the state machine to handle 96 | // a read after it has poll_closed on the transport. 97 | let (req1, rsp1) = handle.as_mut().next_request().await.unwrap(); 98 | req1.check(1); 99 | rsp1.send_response(Response::from(req1)); 100 | unwrap(fut1.await).check(1); 101 | let (req2, rsp2) = handle.as_mut().next_request().await.unwrap(); 102 | req2.check(2); 103 | rsp2.send_response(Response::from(req2)); 104 | unwrap(fut2.await).check(2); 105 | } 106 | -------------------------------------------------------------------------------- /tests/pipeline/client.rs: -------------------------------------------------------------------------------- 1 | use crate::{ready, unwrap, PanicError, Request, Response}; 2 | use async_bincode::{AsyncBincodeReader, AsyncBincodeStream, AsyncBincodeWriter}; 3 | use futures_util::{sink::SinkExt, stream::StreamExt}; 4 | use tokio::net::{TcpListener, TcpStream}; 5 | use tokio_tower::pipeline::Client; 6 | use tower_service::Service; 7 | 8 | #[tokio::test] 9 | async fn it_works() { 10 | let rx = TcpListener::bind("127.0.0.1:0").await.unwrap(); 11 | let addr = rx.local_addr().unwrap(); 12 | 13 | // connect 14 | let tx = TcpStream::connect(&addr).await.unwrap(); 15 | let tx: AsyncBincodeStream<_, Response, _, _> = AsyncBincodeStream::from(tx).for_async(); 16 | let mut tx: Client<_, PanicError, _> = Client::new(tx); 17 | 18 | tokio::spawn(async move { 19 | loop { 20 | let (mut stream, _) = rx.accept().await.unwrap(); 21 | tokio::spawn(async move { 22 | let (r, w) = stream.split(); 23 | let mut r: AsyncBincodeReader<_, Request> = AsyncBincodeReader::from(r); 24 | let mut w: AsyncBincodeWriter<_, Response, _> = 25 | AsyncBincodeWriter::from(w).for_async(); 26 | loop { 27 | let req = r.next().await.unwrap().unwrap(); 28 | w.send(Response::from(req)).await.unwrap(); 29 | } 30 | }); 31 | } 32 | }); 33 | 34 | unwrap(ready(&mut tx).await); 35 | unwrap(tx.call(Request::new(1)).await).check(1); 36 | } 37 | -------------------------------------------------------------------------------- /tests/pipeline/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::{ready, unwrap, EchoService, PanicError, Request, Response}; 2 | use async_bincode::AsyncBincodeStream; 3 | use futures_util::pin_mut; 4 | use tokio::net::{TcpListener, TcpStream}; 5 | use tokio_tower::pipeline::{Client, Server}; 6 | use tower_service::Service; 7 | use tower_test::mock; 8 | 9 | mod client; 10 | 11 | #[tokio::test] 12 | async fn integration() { 13 | let rx = TcpListener::bind("127.0.0.1:0").await.unwrap(); 14 | let addr = rx.local_addr().unwrap(); 15 | 16 | // connect 17 | let tx = TcpStream::connect(&addr).await.unwrap(); 18 | let tx: AsyncBincodeStream<_, Response, _, _> = AsyncBincodeStream::from(tx).for_async(); 19 | let mut tx: Client<_, PanicError, _> = Client::new(tx); 20 | 21 | // accept 22 | let (rx, _) = rx.accept().await.unwrap(); 23 | let rx = AsyncBincodeStream::from(rx).for_async(); 24 | let server = Server::new(rx, EchoService); 25 | 26 | tokio::spawn(async move { server.await.unwrap() }); 27 | 28 | unwrap(ready(&mut tx).await); 29 | let fut1 = tx.call(Request::new(1)); 30 | unwrap(ready(&mut tx).await); 31 | let fut2 = tx.call(Request::new(2)); 32 | unwrap(ready(&mut tx).await); 33 | let fut3 = tx.call(Request::new(3)); 34 | unwrap(fut1.await).check(1); 35 | unwrap(fut2.await).check(2); 36 | unwrap(fut3.await).check(3); 37 | } 38 | 39 | #[tokio::test] 40 | async fn racing_close() { 41 | let rx = TcpListener::bind("127.0.0.1:0").await.unwrap(); 42 | let addr = rx.local_addr().unwrap(); 43 | 44 | // connect 45 | let tx = TcpStream::connect(&addr).await.unwrap(); 46 | let tx: AsyncBincodeStream<_, Response, _, _> = AsyncBincodeStream::from(tx).for_async(); 47 | let mut tx: Client<_, PanicError, _> = Client::new(tx); 48 | 49 | let (service, handle) = mock::pair::(); 50 | pin_mut!(handle); 51 | 52 | // accept 53 | let (rx, _) = rx.accept().await.unwrap(); 54 | let rx = AsyncBincodeStream::from(rx).for_async(); 55 | let server = Server::new(rx, service); 56 | 57 | tokio::spawn(async move { server.await.unwrap() }); 58 | 59 | // we now want to set up a situation where a request has been sent to the server, and then the 60 | // client goes away while the request is still outstanding. in this case, the connection to the 61 | // server will be shut down in the write direction, but not in the read direction. 62 | 63 | // send a couple of request 64 | unwrap(ready(&mut tx).await); 65 | let fut1 = tx.call(Request::new(1)); 66 | unwrap(ready(&mut tx).await); 67 | let fut2 = tx.call(Request::new(2)); 68 | unwrap(ready(&mut tx).await); 69 | // drop client to indicate no more requests 70 | drop(tx); 71 | // respond to both requests one after the other 72 | // the response to the first should trigger the state machine to handle 73 | // a read after it has poll_closed on the transport. 74 | let (req1, rsp1) = handle.as_mut().next_request().await.unwrap(); 75 | req1.check(1); 76 | rsp1.send_response(Response::from(req1)); 77 | unwrap(fut1.await).check(1); 78 | let (req2, rsp2) = handle.as_mut().next_request().await.unwrap(); 79 | req2.check(2); 80 | rsp2.send_response(Response::from(req2)); 81 | unwrap(fut2.await).check(2); 82 | } 83 | --------------------------------------------------------------------------------