├── .gitignore ├── Cargo.toml ├── LICENSE ├── src ├── sync.rs ├── main.rs ├── backend.rs └── tcplb.rs └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "nucleon" 3 | version = "0.0.1" 4 | authors = ["Nicolas Le Manchet "] 5 | 6 | [dependencies] 7 | log = "0.3.1" 8 | argparse = "0.2.1" 9 | mio = "0.4.3" 10 | redis = "0.5.1" 11 | env_logger = "0.3.1" 12 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Nicolas Le Manchet 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | 1. Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | 2. Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /src/sync.rs: -------------------------------------------------------------------------------- 1 | extern crate redis; 2 | 3 | use std::sync::{Arc, Mutex}; 4 | use std::thread; 5 | 6 | use backend::{RoundRobinBackend, GetBackend}; 7 | 8 | pub fn create_sync_thread(backend: Arc>, redis_url: String) { 9 | thread::spawn(move || { 10 | let pubsub = subscribe_to_redis(&redis_url).unwrap(); 11 | loop { 12 | let msg = pubsub.get_message().unwrap(); 13 | handle_message(backend.clone(), msg).unwrap(); 14 | } 15 | }); 16 | } 17 | 18 | fn subscribe_to_redis(url: &str) -> redis::RedisResult { 19 | let client = try!(redis::Client::open(url)); 20 | let mut pubsub: redis::PubSub = try!(client.get_pubsub()); 21 | try!(pubsub.subscribe("backend_add")); 22 | try!(pubsub.subscribe("backend_remove")); 23 | info!("Subscribed to Redis channels 'backend_add' and 'backend_remove'"); 24 | Ok(pubsub) 25 | } 26 | 27 | fn handle_message(backend: Arc>, 28 | msg: redis::Msg) 29 | -> redis::RedisResult<()> { 30 | let channel = msg.get_channel_name(); 31 | let payload: String = try!(msg.get_payload()); 32 | debug!("New message on Redis channel {}: '{}'", channel, payload); 33 | 34 | match channel { 35 | "backend_add" => { 36 | let mut backend = backend.lock().unwrap(); 37 | match backend.add(&payload) { 38 | Ok(_) => info!("Added new server {}", payload), 39 | _ => {} 40 | } 41 | } 42 | "backend_remove" => { 43 | let mut backend = backend.lock().unwrap(); 44 | match backend.remove(&payload) { 45 | Ok(_) => info!("Removed server {}", payload), 46 | _ => {} 47 | } 48 | } 49 | _ => info!("Cannot parse Redis message"), 50 | } 51 | Ok(()) 52 | } 53 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate log; 3 | extern crate env_logger; 4 | extern crate argparse; 5 | extern crate mio; 6 | 7 | mod backend; 8 | mod sync; 9 | mod tcplb; 10 | 11 | use std::sync::{Arc, Mutex}; 12 | use std::process::exit; 13 | use std::env; 14 | 15 | use argparse::{ArgumentParser, StoreTrue, Store, Collect}; 16 | use mio::*; 17 | 18 | fn main() { 19 | let mut servers: Vec = Vec::new(); 20 | let mut bind = "127.0.0.1:8000".to_string(); 21 | let mut redis_url = "redis://localhost".to_string(); 22 | let mut disable_redis = false; 23 | let mut log_level = "info".to_string(); 24 | 25 | { 26 | let mut ap = ArgumentParser::new(); 27 | ap.set_description("Dynamic TCP load balancer"); 28 | 29 | ap.refer(&mut servers).add_argument("server", Collect, "Servers to load balance"); 30 | 31 | ap.refer(&mut bind).add_option(&["-b", "--bind"], 32 | Store, 33 | "Bind the load balancer to address:port (127.0.0.1:8000)"); 34 | 35 | ap.refer(&mut redis_url).add_option(&["-r", "--redis"], 36 | Store, 37 | "URL of Redis database (redis://localhost)"); 38 | 39 | ap.refer(&mut disable_redis).add_option(&["--no-redis"], 40 | StoreTrue, 41 | "Disable updates of backend through Redis"); 42 | 43 | ap.refer(&mut log_level).add_option(&["-l", "--log"], 44 | Store, 45 | "Log level [debug, info, warn, error] (info)"); 46 | 47 | ap.parse_args_or_exit(); 48 | } 49 | 50 | env::set_var("RUST_LOG", log_level); 51 | 52 | env_logger::init().unwrap(); 53 | 54 | if servers.is_empty() { 55 | println!("Need at least one server to load balance"); 56 | exit(1); 57 | } 58 | 59 | let backend = Arc::new(Mutex::new(backend::RoundRobinBackend::new(servers).unwrap())); 60 | 61 | let mut proxy = tcplb::Proxy::new(&bind, backend.clone()); 62 | let mut event_loop = EventLoop::new().unwrap(); 63 | 64 | // Register interest in notifications of new connections 65 | event_loop.register_opt(&proxy.listen_sock, 66 | Token(1), 67 | EventSet::readable(), 68 | PollOpt::edge()) 69 | .unwrap(); 70 | 71 | if !disable_redis { 72 | sync::create_sync_thread(backend.clone(), redis_url); 73 | } 74 | 75 | // Start handling events 76 | event_loop.run(&mut proxy).unwrap(); 77 | 78 | } 79 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Nucleon 2 | ======= 3 | 4 | Nucleon is a dynamic TCP load balancer written in Rust. It has the ability to 5 | insert and remove backend servers on the fly. To do that it leverages [Redis 6 | Pub/Sub](http://redis.io/topics/pubsub) mechanism. Adding or removing a server 7 | to a cluster is as easy as publishing a message to Redis. 8 | 9 | How to build it 10 | --------------- 11 | 12 | All you need to build it is [Rust 13 | 1.3](https://doc.rust-lang.org/stable/book/installing-rust.html). 14 | 15 | Just go in the repository and issue: 16 | 17 | $ cargo build --release 18 | 19 | Usage 20 | ----- 21 | 22 | Nucleon can be used with or without a Redis database. When ran without Redis it 23 | is not possible to add or remove load balanced servers without restarting the 24 | process. 25 | 26 | ``` 27 | Usage: 28 | nucleon [OPTIONS] [SERVER ...] 29 | 30 | Dynamic TCP load balancer 31 | 32 | positional arguments: 33 | server Servers to load balance 34 | 35 | optional arguments: 36 | -h,--help show this help message and exit 37 | -b,--bind BIND Bind the load balancer to address:port (127.0.0.1:8000) 38 | -r,--redis REDIS URL of Redis database (redis://localhost) 39 | --no-redis Disable updates of backend through Redis 40 | -l,--log LOG Log level [debug, info, warn, error] (info) 41 | ``` 42 | 43 | Imagine you have two web servers to load balance, and a local Redis. Run the 44 | load balancer with: 45 | 46 | nucleon --bind 0.0.0.0:8000 10.0.0.1:80 10.0.0.2:80 47 | 48 | Now imagine that you want to scale up your infrastructure by spawning a new web 49 | server at 10.0.0.3. Just send a message to the Redis channel `backend_add`: 50 | 51 | redis:6379> PUBLISH backend_add 10.0.0.3:80 52 | (integer) 1 53 | 54 | Your will see in logs: 55 | 56 | INFO - Load balancing server V4(10.0.0.1:80) 57 | INFO - Load balancing server V4(10.0.0.2:80) 58 | INFO - Now listening on 0.0.0.0:8000 59 | INFO - Subscribed to Redis channels 'backend_add' and 'backend_remove' 60 | INFO - Added new server 10.0.0.3:80 61 | 62 | If you decide that you do not need server 2 any longer: 63 | 64 | redis:6379> PUBLISH backend_remove 10.0.0.2:80 65 | (integer) 1 66 | 67 | How does it perform? 68 | -------------------- 69 | 70 | Surprisingly well. A quick comparison with HA Proxy in TCP mode with a single 71 | backend containing a single server using iperf results in: 72 | 73 | | Connections | HA Proxy | Nucleon | 74 | | -----------:| ------------:| -------------:| 75 | | 1 | 15.1 Gbits/s | 15.7 Gbits/s | 76 | | 10 | 13.5 Gbits/s | 11.3 Gbits/s | 77 | | 100 | 8.9 Gbits/s | 10.5 Gbits/s | 78 | 79 | Keep in mind that this is a really simple test, far from what real life traffic 80 | looks like. A real benchmark should compare short lived connections with long 81 | running one, etc. 82 | 83 | Licence 84 | ------- 85 | 86 | MIT 87 | -------------------------------------------------------------------------------- /src/backend.rs: -------------------------------------------------------------------------------- 1 | use std::net::{SocketAddr, AddrParseError}; 2 | use std::str::FromStr; 3 | 4 | pub trait GetBackend { 5 | fn get(&mut self) -> Option; 6 | fn add(&mut self, backend_str: &str) -> Result<(), AddrParseError>; 7 | fn remove(&mut self, backend_str: &str) -> Result<(), AddrParseError>; 8 | } 9 | 10 | pub struct RoundRobinBackend { 11 | backends: Vec, 12 | last_used: usize, 13 | } 14 | 15 | impl RoundRobinBackend { 16 | pub fn new(backends_str: Vec) -> Result { 17 | let mut backends = Vec::new(); 18 | for backend_str in backends_str { 19 | let backend_socket_addr: SocketAddr = try!(FromStr::from_str(&backend_str)); 20 | backends.push(backend_socket_addr); 21 | info!("Load balancing server {:?}", backend_socket_addr); 22 | } 23 | Ok(RoundRobinBackend { 24 | backends: backends, 25 | last_used: 0, 26 | }) 27 | } 28 | } 29 | 30 | impl GetBackend for RoundRobinBackend { 31 | fn get(&mut self) -> Option { 32 | if self.backends.is_empty() { 33 | return None; 34 | } 35 | self.last_used = (self.last_used + 1) % self.backends.len(); 36 | self.backends.get(self.last_used).map(|b| b.clone()) 37 | } 38 | 39 | fn add(&mut self, backend_str: &str) -> Result<(), AddrParseError> { 40 | let backend_socket_addr: SocketAddr = try!(FromStr::from_str(&backend_str)); 41 | self.backends.push(backend_socket_addr); 42 | Ok(()) 43 | } 44 | 45 | fn remove(&mut self, backend_str: &str) -> Result<(), AddrParseError> { 46 | let backend_socket_addr: SocketAddr = try!(FromStr::from_str(&backend_str)); 47 | self.backends.retain(|&x| x != backend_socket_addr); 48 | Ok(()) 49 | } 50 | } 51 | 52 | 53 | #[cfg(test)] 54 | mod tests { 55 | use std::net::{SocketAddr, AddrParseError}; 56 | use super::{RoundRobinBackend, GetBackend}; 57 | 58 | #[test] 59 | fn test_rrb_backend() { 60 | let backends_str = vec!["127.0.0.1:6000".to_string(), "127.0.0.1:6001".to_string()]; 61 | let mut rrb = RoundRobinBackend::new(backends_str).unwrap(); 62 | assert_eq!(2, rrb.backends.len()); 63 | 64 | let first_socket_addr = rrb.get().unwrap(); 65 | let second_socket_addr = rrb.get().unwrap(); 66 | let third_socket_addr = rrb.get().unwrap(); 67 | let fourth_socket_addr = rrb.get().unwrap(); 68 | assert_eq!(first_socket_addr, third_socket_addr); 69 | assert_eq!(second_socket_addr, fourth_socket_addr); 70 | assert!(first_socket_addr != second_socket_addr); 71 | } 72 | 73 | #[test] 74 | fn test_empty_rrb_backend() { 75 | let backends_str = vec![]; 76 | let mut rrb = RoundRobinBackend::new(backends_str).unwrap(); 77 | assert_eq!(0, rrb.backends.len()); 78 | assert!(rrb.get().is_none()); 79 | } 80 | 81 | #[test] 82 | fn test_add_to_rrb_backend() { 83 | let mut rrb = RoundRobinBackend::new(vec![]).unwrap(); 84 | assert!(rrb.get().is_none()); 85 | assert!(rrb.add("327.0.0.1:6000").is_err()); 86 | assert!(rrb.get().is_none()); 87 | assert!(rrb.add("127.0.0.1:6000").is_ok()); 88 | assert!(rrb.get().is_some()); 89 | } 90 | 91 | #[test] 92 | fn test_remove_from_rrb_backend() { 93 | let backends_str = vec!["127.0.0.1:6000".to_string(), "127.0.0.1:6001".to_string()]; 94 | let mut rrb = RoundRobinBackend::new(backends_str).unwrap(); 95 | assert!(rrb.remove("327.0.0.1:6000").is_err()); 96 | assert_eq!(2, rrb.backends.len()); 97 | assert!(rrb.remove("127.0.0.1:6000").is_ok()); 98 | assert_eq!(1, rrb.backends.len()); 99 | assert!(rrb.remove("127.0.0.1:6000").is_ok()); 100 | assert_eq!(1, rrb.backends.len()); 101 | } 102 | 103 | } 104 | -------------------------------------------------------------------------------- /src/tcplb.rs: -------------------------------------------------------------------------------- 1 | extern crate mio; 2 | 3 | use std::io; 4 | use std::net::SocketAddr; 5 | use std::str::FromStr; 6 | use std::sync::{Arc, Mutex}; 7 | 8 | use mio::*; 9 | use mio::buf::ByteBuf; 10 | use mio::tcp::{TcpListener, TcpStream}; 11 | use mio::util::Slab; 12 | 13 | use std::collections::VecDeque; 14 | use std::ops::Drop; 15 | 16 | use backend::{RoundRobinBackend, GetBackend}; 17 | 18 | const BUFFER_SIZE: usize = 8192; 19 | const MAX_BUFFERS_PER_CONNECTION: usize = 16; 20 | const MAX_CONNECTIONS: usize = 512; 21 | const CONNECT_TIMEOUT_MS: usize = 1000; 22 | 23 | pub struct Proxy { 24 | // socket where incoming connections arrive 25 | pub listen_sock: TcpListener, 26 | 27 | // token of the listening socket 28 | token: Token, 29 | 30 | // backend containing server to proxify 31 | backend: Arc>, 32 | 33 | // slab of Connections (front and back ends) 34 | connections: Slab, 35 | 36 | // queue of tokens waiting to be read 37 | readable_tokens: VecDeque, 38 | } 39 | 40 | impl Proxy { 41 | 42 | /// Create an instance of our proxy 43 | pub fn new(listen_addr: &str, backend: Arc>) -> Proxy { 44 | let listen_addr: SocketAddr = FromStr::from_str(&listen_addr) 45 | .ok() 46 | .expect("Failed to parse listen host:port string"); 47 | let listen_sock = TcpListener::bind(&listen_addr).unwrap(); 48 | info!("Now listening on {}", &listen_addr); 49 | Proxy { 50 | listen_sock: listen_sock, 51 | token: Token(1), 52 | backend: backend, 53 | connections: Slab::new_starting_at(Token(2), MAX_CONNECTIONS), 54 | readable_tokens: VecDeque::with_capacity(MAX_CONNECTIONS), 55 | } 56 | } 57 | 58 | /// Put a token in the list of readable tokens 59 | /// 60 | /// Once token is in the list, it is no longer interested in readable events 61 | /// from the event_loop. 62 | fn push_to_readable_tokens(&mut self, event_loop: &mut EventLoop, token: Token) { 63 | self.readable_tokens.push_back(token); 64 | self.connections[token].interest.remove(EventSet::readable()); 65 | self.connections[token].reregister(event_loop).unwrap(); 66 | } 67 | 68 | /// Read as much as it can of a token 69 | /// 70 | /// Stops when we read everything the kernel had for us or the other end send 71 | /// queue is full. 72 | fn read_token(&mut self, event_loop: &mut EventLoop, token: Token) -> io::Result { 73 | let other_end_token = match self.connections[token].end_token { 74 | Some(other_end_token) => other_end_token, 75 | None => { 76 | warn!("Attempting to read a token with no other end"); 77 | return Err(io::Error::new(io::ErrorKind::Other, "No other end")); 78 | } 79 | }; 80 | let buffers_to_read = MAX_BUFFERS_PER_CONNECTION - 81 | self.connections[other_end_token].send_queue.len(); 82 | let (exhausted_kernel, messages) = try!(self.find_connection_by_token(token) 83 | .read(buffers_to_read)); 84 | // let's not tell we have something to write if we don't 85 | if messages.is_empty() { 86 | return Ok(exhausted_kernel); 87 | } 88 | 89 | self.connections[other_end_token] 90 | .send_messages(messages) 91 | .and_then(|_| { 92 | self.connections[other_end_token].interest.insert(EventSet::writable()); 93 | self.connections[other_end_token].reregister(event_loop) 94 | }) 95 | .unwrap_or_else(|e| { 96 | error!("Failed to queue message for {:?}: {:?}", other_end_token, e); 97 | }); 98 | self.connections[token].reregister(event_loop).unwrap(); 99 | Ok(exhausted_kernel) 100 | } 101 | 102 | /// Try to flush the list of readable tokens 103 | /// 104 | /// Loops on all readable tokens and remove them from the list if we flushed 105 | /// completely the kernel buffer. 106 | fn flush_readable_tokens(&mut self, event_loop: &mut EventLoop) { 107 | for _ in 0..self.readable_tokens.len() { 108 | match self.readable_tokens.pop_front() { 109 | Some(token) => { 110 | match self.read_token(event_loop, token) { 111 | Ok(exhausted_kernel) => { 112 | if exhausted_kernel { 113 | self.connections[token].interest.insert(EventSet::readable()); 114 | self.connections[token].reregister(event_loop).unwrap(); 115 | } else { 116 | self.readable_tokens.push_back(token); 117 | } 118 | } 119 | Err(e) => { 120 | warn!("Error while reading {:?}: {}", token, e); 121 | self.terminate_connection(event_loop, token); 122 | } 123 | } 124 | } 125 | None => break, 126 | } 127 | } 128 | } 129 | 130 | /// Handle a write event from the event loop 131 | /// 132 | /// We assume that getting a write event means the TCP connection 133 | /// phase is finished. 134 | fn handle_write_event(&mut self, event_loop: &mut EventLoop, token: Token) { 135 | if !self.connections[token].connected { 136 | self.connections[token].connected = true; 137 | match self.connections[token].sock.peer_addr() { 138 | Ok(addr) => info!("Connected to backend server {}", addr), 139 | Err(_) => warn!("Connected to unknonw backend server, this is odd"), 140 | } 141 | } 142 | self.flush_send_queue(event_loop, token); 143 | } 144 | 145 | /// Flush the send queue of a token 146 | /// 147 | /// Drop the connection if we sent everything and other end is gone 148 | fn flush_send_queue(&mut self, event_loop: &mut EventLoop, token: Token) { 149 | match self.connections[token].write() { 150 | Ok(flushed_everything) => { 151 | if flushed_everything { 152 | self.connections[token].interest.remove(EventSet::writable()); 153 | self.connections[token].reregister(event_loop).unwrap(); 154 | } 155 | } 156 | Err(_) => { 157 | error!("Could not write on {:?}, dropping send queue", token); 158 | self.connections[token].send_queue.clear(); 159 | } 160 | } 161 | 162 | // Terminate connection if other end is gone and send queue if flushed 163 | if self.connections[token].send_queue.is_empty() && 164 | self.connections[token].end_token.is_none() { 165 | self.terminate_connection(event_loop, token); 166 | } 167 | } 168 | 169 | /// Accept all pending connections 170 | fn accept(&mut self, event_loop: &mut EventLoop) { 171 | loop { 172 | if !self.accept_one(event_loop) { 173 | break; 174 | } 175 | } 176 | } 177 | 178 | /// Accept a single pending connections 179 | /// 180 | /// Once connection is accepted, it creates a new connection to the backend 181 | /// and links both connections together. 182 | /// 183 | /// Returns true when a connection is accepted successfully, 184 | /// false when no more connection to accept or error happened. 185 | fn accept_one(&mut self, event_loop: &mut EventLoop) -> bool { 186 | let client_sock = match self.listen_sock.accept() { 187 | Ok(s) => { 188 | match s { 189 | Some(sock) => sock, 190 | None => { 191 | debug!("No more socket to accept on this event"); 192 | return false; 193 | } 194 | } 195 | } 196 | Err(e) => { 197 | error!("Failed to accept new socket, {:?}", e); 198 | return false; 199 | } 200 | }; 201 | match client_sock.peer_addr() { 202 | Ok(client_addr) => info!("New client connection from {}", client_addr), 203 | Err(_) => info!("New client connection from unknown source"), 204 | } 205 | let backend_sock = match self.connect_to_backend_server() { 206 | Ok(backend_sock) => { 207 | backend_sock 208 | } 209 | Err(e) => { 210 | error!("Could not connect to backend: {}", e); 211 | return false; 212 | } 213 | }; 214 | let client_token = self.create_connection_from_sock(client_sock, true, event_loop); 215 | let backend_token = self.create_connection_from_sock(backend_sock, false, event_loop); 216 | match client_token { 217 | Some(client_token) => { 218 | match backend_token { 219 | Some(backend_token) => { 220 | self.link_connections_together(client_token, backend_token, event_loop); 221 | // Register a timeout on the backend server socket 222 | let timeout = event_loop.timeout_ms(backend_token.as_usize(), 223 | CONNECT_TIMEOUT_MS as u64) 224 | .unwrap(); 225 | self.connections[backend_token].timeout = Some(timeout); 226 | } 227 | None => { 228 | error!("Cannot create backend Connection, dropping client"); 229 | self.connections.remove(client_token); 230 | return false; 231 | } 232 | } 233 | } 234 | None => { 235 | match backend_token { 236 | Some(backend_token) => { 237 | error!("Cannot create client Connection, dropping backend"); 238 | self.connections.remove(backend_token); 239 | return false; 240 | } 241 | None => { 242 | error!("Cannot create client nor backend Connection"); 243 | return false; 244 | } 245 | } 246 | } 247 | } 248 | 249 | true 250 | 251 | } 252 | 253 | /// Create a new TCP socket to a backend server 254 | fn connect_to_backend_server(&mut self) -> io::Result { 255 | let backend_socket_addr = self.backend.lock().unwrap().get().unwrap(); 256 | TcpStream::connect(&backend_socket_addr) 257 | } 258 | 259 | /// Create a Connection instance from a socket 260 | fn create_connection_from_sock(&mut self, 261 | sock: TcpStream, 262 | already_connected: bool, 263 | event_loop: &mut EventLoop) 264 | -> Option { 265 | self.connections.insert_with(|token| { 266 | info!("Creating Connection with {:?}", token); 267 | let mut connection = Connection::new(sock, token, already_connected); 268 | connection.register(event_loop).unwrap(); 269 | connection 270 | }) 271 | } 272 | 273 | /// Link two Connection together 274 | /// 275 | /// This makes it easy to know to which Connection send the data we 276 | /// receive on another Connection. 277 | /// A Connection is not interested in read events before being linked correctly 278 | fn link_connections_together(&mut self, 279 | client_token: Token, 280 | backend_token: Token, 281 | event_loop: &mut EventLoop) { 282 | self.connections[client_token].end_token = Some(backend_token); 283 | self.connections[backend_token].end_token = Some(client_token); 284 | // Now that we have two Connections linked with each other 285 | // we can register to Read events. 286 | self.connections[client_token].interest.insert(EventSet::readable()); 287 | self.connections[backend_token].interest.insert(EventSet::readable()); 288 | self.connections[backend_token].interest.insert(EventSet::writable()); 289 | self.connections[client_token].reregister(event_loop).unwrap(); 290 | self.connections[backend_token].reregister(event_loop).unwrap(); 291 | } 292 | 293 | /// Terminate a connection as well as its other end 294 | /// 295 | /// Makes sure to flush all pending queues before dropping the other end 296 | /// of a connection. 297 | fn terminate_connection(&mut self, event_loop: &mut EventLoop, token: Token) { 298 | if !self.connections.contains(token) { 299 | warn!("Attempting to terminate an already gone connection"); 300 | return; 301 | } 302 | match self.connections[token].end_token { 303 | Some(end_token) => { 304 | if self.connections[end_token].send_queue.is_empty() { 305 | // Nothing to write on the other end, we can drop it 306 | self.destroy_connection(event_loop, end_token); 307 | } else { 308 | // We still need to write things in the other end 309 | // just stop reading it and we will terminate it 310 | // when we flushed its send_queue 311 | // Todo: Is there a way to schedule a timeout? 312 | self.connections[end_token].end_token = None; 313 | self.connections[end_token].interest.remove(EventSet::readable()); 314 | self.connections[end_token].interest.insert(EventSet::writable()); 315 | self.connections[end_token].reregister(event_loop).unwrap(); 316 | } 317 | } 318 | None => {} 319 | } 320 | self.destroy_connection(event_loop, token); 321 | } 322 | 323 | /// Clean the environment and drop connection 324 | /// 325 | /// While terminate_connection() handles corner cases, this method does not, it 326 | /// only cleans and drops. 327 | fn destroy_connection(&mut self, event_loop: &mut EventLoop, token: Token) { 328 | match self.connections[token].timeout { 329 | Some(timeout) => { 330 | event_loop.clear_timeout(timeout); 331 | self.connections[token].timeout = None; 332 | } 333 | None => {} 334 | } 335 | self.connections[token].deregister(event_loop).unwrap(); 336 | self.connections.remove(token); 337 | } 338 | 339 | /// Find a connection in the slab using the given token. 340 | fn find_connection_by_token<'a>(&'a mut self, token: Token) -> &'a mut Connection { 341 | &mut self.connections[token] 342 | } 343 | 344 | /// Try to connect to the next backend server 345 | /// 346 | /// Resets the timeout to prevent multiple timeouts to be set concurrently. 347 | fn try_next_server(&mut self, event_loop: &mut EventLoop, token: Token) { 348 | match self.connections[token].timeout { 349 | Some(timeout) => { 350 | event_loop.clear_timeout(timeout); 351 | self.connections[token].timeout = None; 352 | } 353 | None => {} 354 | } 355 | self.connections[token].deregister(event_loop).unwrap(); 356 | self.connections[token].sock = match self.connect_to_backend_server() { 357 | Ok(backend_sock) => { 358 | backend_sock 359 | } 360 | Err(e) => { 361 | // Todo: drop connections 362 | error!("Could not connect to backend: {}", e); 363 | return; 364 | } 365 | }; 366 | self.connections[token].register(event_loop).unwrap(); 367 | // Register another timeout on the backend server socket 368 | // in case the new backend server is also unavailable 369 | let timeout = event_loop.timeout_ms(token.as_usize(), CONNECT_TIMEOUT_MS as u64) 370 | .unwrap(); 371 | self.connections[token].timeout = Some(timeout); 372 | } 373 | } 374 | 375 | impl Handler for Proxy { 376 | type Timeout = usize; 377 | type Message = (); 378 | 379 | /// Method called when a timeout is fired 380 | /// 381 | /// Only used for backend server connection timeout 382 | fn timeout(&mut self, event_loop: &mut EventLoop, timeout: usize) { 383 | let token = Token(timeout); 384 | if self.connections.contains(token) && !self.connections[token].connected { 385 | warn!("Connect to backend server timeout"); 386 | self.try_next_server(event_loop, token); 387 | } 388 | } 389 | 390 | /// Method called when a event from the event loop is notified 391 | fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: EventSet) { 392 | debug!("events [{:?}] on {:?}", events, token); 393 | 394 | // we are only interested in read events from the listening token 395 | // so we can safely assume this is a read event 396 | if token == self.token { 397 | self.accept(event_loop); 398 | info!("Accepted connection(s), now {} Connections", 399 | self.connections.count()); 400 | return; 401 | } 402 | 403 | if !self.connections.contains(token) { 404 | warn!("Got an event on a gone token"); 405 | return; 406 | } 407 | 408 | if events.is_error() { 409 | debug!("Got an error on {:?}", token); 410 | if !self.connections[token].connected { 411 | // Got an error while connecting to a backend server? 412 | // Let's try the next one. 413 | warn!("Connect to backend server failed"); 414 | self.try_next_server(event_loop, token); 415 | } else { 416 | self.terminate_connection(event_loop, token); 417 | } 418 | return; 419 | } 420 | 421 | if events.is_writable() { 422 | debug!("Got a write event on {:?}", token); 423 | self.handle_write_event(event_loop, token); 424 | } 425 | 426 | if events.is_hup() && events.is_readable() { 427 | debug!("Got a read hang up on {:?}", token); 428 | // bypass the readable tokens queue, let's read until kernel 429 | // is exhausted and drop the connection 430 | match self.read_token(event_loop, token) { 431 | Ok(_) => self.terminate_connection(event_loop, token), 432 | Err(_) => self.terminate_connection(event_loop, token), 433 | } 434 | } else if events.is_readable() { 435 | debug!("Got a read event on {:?}", token); 436 | self.push_to_readable_tokens(event_loop, token); 437 | } else if events.is_hup() { 438 | debug!("Got a hup event on {:?}", token); 439 | } 440 | 441 | self.flush_readable_tokens(event_loop); 442 | 443 | debug!("Finished loop with {} Connections and {} readable tokens", 444 | self.connections.count(), 445 | self.readable_tokens.len()); 446 | 447 | } 448 | } 449 | 450 | struct Connection { 451 | // handle to the accepted socket 452 | sock: TcpStream, 453 | 454 | // token used to register with the event loop 455 | token: Token, 456 | 457 | // set of events we are interested in 458 | interest: EventSet, 459 | 460 | // messages waiting to be sent out to sock 461 | send_queue: VecDeque, 462 | 463 | // other end of the tunnel 464 | end_token: Option, 465 | 466 | // is the socket connected already or waiting for answer? 467 | connected: bool, 468 | 469 | // store timeout when connecting to a server backend 470 | timeout: Option, 471 | } 472 | 473 | impl Drop for Connection { 474 | fn drop(&mut self) { 475 | info!("Dropping Connection with {:?}", self.token); 476 | } 477 | } 478 | 479 | impl Connection { 480 | fn new(sock: TcpStream, token: Token, connected: bool) -> Connection { 481 | Connection { 482 | sock: sock, 483 | token: token, 484 | 485 | // new connections are only listening for a hang up event when 486 | // they are first created. We always want to make sure we are 487 | // listening for the hang up event. We will additionally listen 488 | // for readable and writable events later on. 489 | interest: EventSet::hup() | EventSet::error(), 490 | 491 | send_queue: VecDeque::with_capacity(MAX_BUFFERS_PER_CONNECTION), 492 | 493 | // When instanciated a Connection does not have yet an other end 494 | end_token: None, 495 | 496 | connected: connected, 497 | 498 | timeout: None, 499 | } 500 | } 501 | 502 | /// Read buffers from the kernel 503 | /// 504 | /// Reads at most 'nb_buffers' buffers. 505 | /// 506 | /// Returns a tuple of (bool, Vec) wrapped in a Result. 507 | /// True if kernel has no more data to give, false otherwise. 508 | /// Vec contains the actual data read by chunks of buffers. 509 | fn read(&mut self, nb_buffers: usize) -> io::Result<(bool, Vec)> { 510 | 511 | let mut recv_vec: Vec = Vec::with_capacity(nb_buffers); 512 | let mut exhausted_kernel: bool = false; 513 | 514 | for _ in 0..nb_buffers { 515 | let mut recv_buf = ByteBuf::mut_with_capacity(BUFFER_SIZE); 516 | match self.sock.try_read_buf(&mut recv_buf) { 517 | // the socket receive buffer is empty, so let's move on 518 | // try_read_buf internally handles WouldBlock here too 519 | Ok(None) => { 520 | debug!("CONN : we read 0 bytes, exhausted kernel"); 521 | exhausted_kernel = true; 522 | break; 523 | } 524 | Ok(Some(n)) => { 525 | debug!("CONN : we read {} bytes", n); 526 | if n == 0 { 527 | // Reading on a closed socket never gives Ok(None)... 528 | // Todo: check why 529 | exhausted_kernel = true; 530 | break; 531 | } 532 | if n > 0 { 533 | // flip changes our type from MutByteBuf to ByteBuf 534 | recv_vec.push(recv_buf.flip()); 535 | } 536 | } 537 | Err(e) => { 538 | error!("Failed to read buffer for {:?}, error: {}", self.token, e); 539 | return Err(e); 540 | } 541 | } 542 | } 543 | 544 | Ok((exhausted_kernel, recv_vec)) 545 | } 546 | 547 | /// Try to flush all send queue 548 | /// 549 | /// Returns true when everything is flushed, false otherwise 550 | fn write(&mut self) -> io::Result { 551 | while !self.send_queue.is_empty() { 552 | let wrote_everything = try!(self.write_one_buf()); 553 | if !wrote_everything { 554 | // Kernel did not accept all our data, let's keep 555 | // interest on write events so we get notified when 556 | // kernel is ready to accept our data in the future. 557 | return Ok(false); 558 | } 559 | } 560 | 561 | Ok(true) 562 | } 563 | 564 | /// Write one buffer to the socket 565 | /// 566 | /// Returns true if the totality of the buffer was sent, 567 | /// false if the kernel did not accept everything. 568 | fn write_one_buf(&mut self) -> io::Result { 569 | 570 | self.send_queue 571 | .pop_front() 572 | .ok_or(io::Error::new(io::ErrorKind::Other, "Could not pop send queue")) 573 | .and_then(|mut buf| { 574 | match self.sock.try_write_buf(&mut buf) { 575 | Ok(None) => { 576 | debug!("client flushing buf; WouldBlock"); 577 | 578 | // put message back into the queue so we can try again 579 | self.send_queue.push_front(buf); 580 | Ok(false) 581 | } 582 | Ok(Some(n)) => { 583 | debug!("CONN : we wrote {} bytes", n); 584 | if buf.has_remaining() { 585 | self.send_queue.push_front(buf); 586 | Ok(false) 587 | } else { 588 | Ok(true) 589 | } 590 | } 591 | Err(e) => { 592 | error!("Failed to send buffer for {:?}, error: {}", self.token, e); 593 | Err(e) 594 | } 595 | } 596 | }) 597 | 598 | } 599 | 600 | /// Queue an outgoing message to the client 601 | /// 602 | /// This will cause the connection to register interests in write 603 | /// events with the event loop. 604 | fn send_messages(&mut self, messages: Vec) -> io::Result<()> { 605 | // Todo: use Vec.append() but not in Rust stable 606 | self.send_queue.extend(messages.into_iter()); 607 | self.interest.insert(EventSet::writable()); 608 | Ok(()) 609 | } 610 | 611 | /// Register interest in read events with the event_loop 612 | /// 613 | /// This will let the event loop get notified on events happening on 614 | /// this connection. 615 | fn register(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { 616 | event_loop.register_opt(&self.sock, 617 | self.token, 618 | self.interest, 619 | PollOpt::edge() | PollOpt::oneshot()) 620 | .or_else(|e| { 621 | error!("Failed to register {:?}, {:?}", self.token, e); 622 | Err(e) 623 | }) 624 | } 625 | 626 | /// Re-register interest in events with the event_loop 627 | fn reregister(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { 628 | event_loop.reregister(&self.sock, 629 | self.token, 630 | self.interest, 631 | PollOpt::edge() | PollOpt::oneshot()) 632 | .or_else(|e| { 633 | error!("Failed to reregister {:?}, {:?}", self.token, e); 634 | Err(e) 635 | }) 636 | } 637 | 638 | /// De-register every interest in events for this connection 639 | fn deregister(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { 640 | event_loop.deregister(&self.sock).or_else(|e| { 641 | error!("Failed to deregister {:?}, {:?}", self.token, e); 642 | Err(e) 643 | }) 644 | } 645 | } 646 | --------------------------------------------------------------------------------