├── .gitignore ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── basic_async.rs └── basic_sync.rs └── src ├── lib.rs └── proxy ├── async └── mod.rs ├── mod.rs └── sync └── mod.rs /.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | target 3 | *.bk.rs 4 | *.rs.bk 5 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "meter_proxy" 3 | version = "0.2.3" 4 | authors = ["gmazzeo "] 5 | 6 | # A short blurb about the package. This is not rendered in any format when 7 | # uploaded to crates.io (aka this is not markdown). 8 | description = "A TCP proxy that measures latency and throughput of a client-server application" 9 | 10 | repository = "https://github.com/dzobbe/rust-proxy" 11 | 12 | 13 | 14 | # This points to a file in the repository (relative to this `Cargo.toml`). The 15 | # contents of this file are stored and indexed in the registry. 16 | readme = "README.md" 17 | 18 | # This is a small list of keywords used to categorize and search for this 19 | # package. 20 | keywords = ["proxy", "forwarder"] 21 | 22 | 23 | # If a project is using a nonstandard license, then this key may be specified in 24 | # lieu of the above key and must point to a file relative to this manifest 25 | # (similar to the readme key). 26 | license = "MIT" 27 | 28 | 29 | [dependencies] 30 | time = "0.1.35" 31 | docopt = "0.6" 32 | rustc-serialize = "0.3" 33 | libc = "0.2.0" 34 | lazy_static = "0.1.0" 35 | ansi_term = "0.9" 36 | futures = "0.1" 37 | futures-cpupool = "0.1" 38 | tokio-core = "0.1" 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 dzobbe 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # rust-proxy [![Crates.io](https://img.shields.io/crates/v/meter_proxy.svg)](https://crates.io/crates/meter_proxy) 2 | This is a TCP meter proxy implemented in Rust, which interposes between a client and a server and measures the latency and the throughput. Two versions are provided: synchronous and asynchronous. Further checks and tests need to be done. Suggestions are welcome ;) 3 | 4 | ## Requirements 5 | Of course, you will need Rust installed. If you haven't already, get it here: [rust-lang.org](https://www.rust-lang.org). Also you need [Cargo](https://crates.io) to easily compile. The rustc compiler version required is the 1.15.0-nightly. 6 | 7 | ## Usage 8 | To use the proxy, add this to your `Cargo.toml`: 9 | 10 | ```toml 11 | [dependencies] 12 | meter_proxy = "0.2.1" 13 | ``` 14 | 15 | ## Example Meter Proxy Usage 16 | 17 | ```rust 18 | extern crate meter_proxy; 19 | use meter_proxy::proxy::sync::SyncMeterProxy; 20 | use std::thread; 21 | use std::time::Duration; 22 | 23 | fn main() { 24 | println!("Starting Proxy"); 25 | let meter_proxy=SyncMeterProxy::new("127.0.0.1".to_string(), 12347,"127.0.0.1".to_string(),12349); 26 | 27 | let meter_proxy_c=meter_proxy.clone(); 28 | let child_proxy = thread::spawn(move || { 29 | meter_proxy_c.start(); 30 | }); 31 | 32 | let mut n=0; 33 | let sleep_time=2000; 34 | while n<100{ 35 | n += 1; 36 | 37 | //Do something 38 | thread::sleep(Duration::from_millis(sleep_time)); 39 | 40 | println!("The measured latency 'till now: {:.3} ms",meter_proxy.get_latency_ms()); 41 | println!("The measured throughput 'till now: {:.3}",meter_proxy.get_num_bytes_rcvd() as f64/(n*sleep_time) as f64); 42 | } 43 | 44 | meter_proxy.stop_and_reset(); 45 | let _ = child_proxy.join(); 46 | 47 | } 48 | ``` 49 | 50 | 51 | ## License 52 | 53 | MIT © [Giovanni Mazzeo](https://github.com/dzobbe) 54 | 55 | -------------------------------------------------------------------------------- /examples/basic_async.rs: -------------------------------------------------------------------------------- 1 | extern crate meter_proxy; 2 | 3 | use meter_proxy::proxy::async::AsyncMeterProxy; 4 | use std::thread; 5 | use std::time::Duration; 6 | 7 | fn main() { 8 | println!("Starting Proxy"); 9 | let meter_proxy = AsyncMeterProxy::new("127.0.0.1".to_string(), 10 | 12347, 11 | "127.0.0.1".to_string(), 12 | 12349); 13 | 14 | let meter_proxy_c = meter_proxy.clone(); 15 | let child_proxy = thread::spawn(move || { 16 | meter_proxy_c.start(); 17 | }); 18 | 19 | let mut n = 0; 20 | let sleep_time = 2000; 21 | while n < 100 { 22 | n += 1; 23 | 24 | // Do something 25 | thread::sleep(Duration::from_millis(sleep_time)); 26 | 27 | println!("The measured latency 'till now: {:.3} ms", 28 | meter_proxy.get_latency()); 29 | println!("The measured throughput 'till now: {:.3}", 30 | meter_proxy.get_num_kbytes_rcvd() as f64 / (n * sleep_time) as f64); 31 | } 32 | 33 | meter_proxy.reset(); 34 | let _ = child_proxy.join(); 35 | 36 | } 37 | -------------------------------------------------------------------------------- /examples/basic_sync.rs: -------------------------------------------------------------------------------- 1 | extern crate meter_proxy; 2 | 3 | use meter_proxy::proxy::sync::SyncMeterProxy; 4 | use std::thread; 5 | use std::time::Duration; 6 | 7 | fn main() { 8 | println!("Starting Proxy"); 9 | let meter_proxy = SyncMeterProxy::new("127.0.0.1".to_string(), 10 | 12347, 11 | "127.0.0.1".to_string(), 12 | 12349); 13 | 14 | let meter_proxy_c = meter_proxy.clone(); 15 | let child_proxy = thread::spawn(move || { 16 | meter_proxy_c.start(); 17 | }); 18 | 19 | let mut n = 0; 20 | let sleep_time = 2000; 21 | while n < 100 { 22 | n += 1; 23 | 24 | // Do something 25 | thread::sleep(Duration::from_millis(sleep_time)); 26 | 27 | println!("The measured latency 'till now: {:.3} ms", 28 | meter_proxy.get_latency_ms()); 29 | println!("The measured throughput 'till now: {:.3}", 30 | meter_proxy.get_num_bytes_rcvd() as f64 / (n * sleep_time) as f64); 31 | } 32 | 33 | meter_proxy.stop_and_reset(); 34 | let _ = child_proxy.join(); 35 | 36 | } 37 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /// Copyright 2016 Giovanni Mazzeo. See the COPYRIGHT 2 | /// file at the top-level directory of this distribution and at 3 | /// http://rust-lang.org/COPYRIGHT. 4 | /// 5 | /// Licensed under the Apache License, Version 2.0 or the MIT license 7 | /// , at your 8 | /// option. This file may not be copied, modified, or distributed 9 | /// except according to those terms. 10 | /// /////////////////////////////////////////////////////////////////////////// 11 | #[macro_use] 12 | extern crate lazy_static; 13 | extern crate libc; 14 | extern crate time; 15 | extern crate ansi_term; 16 | #[macro_use] 17 | extern crate futures; 18 | #[macro_use] 19 | extern crate tokio_core; 20 | extern crate futures_cpupool; 21 | 22 | pub mod proxy; 23 | -------------------------------------------------------------------------------- /src/proxy/async/mod.rs: -------------------------------------------------------------------------------- 1 | use std::panic; 2 | use std::cell::RefCell; 3 | use std::rc::Rc; 4 | use std::env; 5 | use std::io::{self, Read, Write}; 6 | use std::net::{SocketAddr, IpAddr}; 7 | use std::net::Shutdown; 8 | use std::str; 9 | use std::time::Duration; 10 | use libc; 11 | use futures; 12 | use futures::{Future, Poll, Async}; 13 | use futures::stream::Stream; 14 | use futures_cpupool::CpuPool; 15 | use tokio_core::reactor::{Core, Handle, Timeout}; 16 | use tokio_core::net::{TcpStream, TcpListener}; 17 | use tokio_core::io::{Io, read_exact, write_all, Window}; 18 | use std::sync::{Arc, Mutex, RwLock}; 19 | 20 | 21 | lazy_static! { 22 | static ref ERROR: Arc> = Arc::new(Mutex::new(false)); 23 | } 24 | 25 | #[derive(Clone, Debug)] 26 | pub struct AsyncMeterProxy { 27 | pub back_address: String, 28 | pub back_port: u16, 29 | pub front_address: String, 30 | pub front_port: u16, 31 | pub num_bytes: Arc>, 32 | pub num_resp: Arc>, 33 | } 34 | 35 | 36 | impl AsyncMeterProxy { 37 | pub fn new(b_addr: String, b_port: u16, f_addr: String, f_port: u16) -> AsyncMeterProxy { 38 | AsyncMeterProxy { 39 | back_address: b_addr, 40 | back_port: b_port, 41 | front_address: f_addr, 42 | front_port: f_port, 43 | num_bytes: Arc::new(Mutex::new(0.0)), 44 | num_resp: Arc::new(Mutex::new(0.0)), 45 | } 46 | } 47 | 48 | 49 | // Start the Proxy 50 | pub fn start(&self) { 51 | 52 | let rlim = libc::rlimit { 53 | rlim_cur: 4096, 54 | rlim_max: 4096, 55 | }; 56 | unsafe { 57 | libc::setrlimit(libc::RLIMIT_NOFILE, &rlim); 58 | } 59 | 60 | let mut core = Core::new().unwrap(); 61 | let mut lp = Core::new().unwrap(); 62 | let pool = CpuPool::new(4); 63 | let buffer = Rc::new(RefCell::new(vec![0; 64 * 1024])); 64 | let handle = lp.handle(); 65 | 66 | 67 | let f_addr_c = self.front_address.clone(); 68 | let b_addr_c = self.back_address.clone(); 69 | 70 | let front_address = 71 | (f_addr_c + ":" + &self.front_port.to_string()).parse::().unwrap(); 72 | let back_address = 73 | (b_addr_c + ":" + &self.back_port.to_string()).parse::().unwrap(); 74 | 75 | let listener = TcpListener::bind(&front_address, &handle).unwrap(); 76 | 77 | 78 | // Construct a future representing our server. This future processes all 79 | // incoming connections and spawns a new task for each client which will do 80 | // the proxy work. 81 | let clients = listener.incoming().map(move |(socket, addr)| { 82 | (Client { 83 | buffer: buffer.clone(), 84 | pool: pool.clone(), 85 | handle: handle.clone(), 86 | num_bytes: self.num_bytes.clone(), 87 | num_resp: self.num_resp.clone(), 88 | } 89 | .serve(socket, back_address), 90 | addr) 91 | }); 92 | let handle = lp.handle(); 93 | let server = clients.for_each(|(client, addr)| { 94 | handle.spawn(client.then(move |res| { 95 | match res { 96 | Ok((a, b)) => println!("proxied {}/{} bytes for {}", a, b, addr), 97 | Err(e) => {; 98 | } 99 | } 100 | futures::finished(()) 101 | })); 102 | Ok(()) 103 | }); 104 | 105 | 106 | // Now that we've got our future ready to go, let's run it! 107 | // 108 | // This `run` method will return the resolution of the future itself, but 109 | // our `server` futures will resolve to `io::Result<()>`, so we just want to 110 | // assert that it didn't hit an error. 111 | lp.run(server).unwrap(); 112 | } 113 | 114 | 115 | /** 116 | Reset the proxy server counter 117 | **/ 118 | pub fn reset(&self) { 119 | { 120 | let mut n_bytes = self.num_bytes.lock().unwrap(); 121 | *n_bytes = 0.0; 122 | } 123 | { 124 | let mut n_resp = self.num_resp.lock().unwrap(); 125 | *n_resp = 0.0; 126 | } 127 | 128 | } 129 | 130 | 131 | pub fn get_num_kbytes_rcvd(&self) -> f64 { 132 | let n_bytes = self.num_bytes.lock().unwrap(); 133 | return *n_bytes as f64 / 1024.0f64; 134 | } 135 | 136 | pub fn get_latency(&self) -> f64 { 137 | let n_resp = self.num_resp.lock().unwrap(); 138 | return *n_resp as f64 / 1000000.0f64; 139 | } 140 | } 141 | 142 | // Data used to when processing a client to perform various operations over its 143 | // lifetime. 144 | struct Client { 145 | buffer: Rc>>, 146 | pool: CpuPool, 147 | handle: Handle, 148 | num_bytes: Arc>, 149 | num_resp: Arc>, 150 | } 151 | 152 | impl Client { 153 | fn serve(self, 154 | front_socket: TcpStream, 155 | back_addr: SocketAddr) 156 | -> Box> { 157 | 158 | let pool = self.pool.clone(); 159 | 160 | 161 | // Now that we've got a socket address to connect to, let's actually 162 | // create a connection to that socket! 163 | // 164 | // To do this, we use our `handle` field, a handle to the event loop, to 165 | // issue a connection to the address we've figured out we're going to 166 | // connect to. Note that this `tcp_connect` method itself returns a 167 | // future resolving to a `TcpStream`, representing how long it takes to 168 | // initiate a TCP connection to the remote. 169 | 170 | 171 | let handle = self.handle.clone(); 172 | 173 | let pair = TcpStream::connect(&back_addr, &handle) 174 | .and_then(|back_socket| futures::lazy(|| Ok((back_socket, front_socket)))); 175 | 176 | 177 | let buffer = self.buffer.clone(); 178 | let n_bytes = self.num_bytes.clone(); 179 | let n_resp = self.num_resp.clone(); 180 | 181 | mybox(pair.and_then(|(back, front)| { 182 | let back = Rc::new(back); 183 | let front = Rc::new(front); 184 | 185 | let half1 = TransferFrontBack::new(back.clone(), 186 | front.clone(), 187 | buffer.clone(), 188 | n_bytes.clone(), 189 | n_resp.clone()); 190 | let half2 = TransferBackFront::new(front, back, buffer, n_bytes, n_resp); 191 | half1.join(half2) 192 | })) 193 | } 194 | } 195 | 196 | 197 | fn mybox(f: F) -> Box> { 198 | Box::new(f) 199 | } 200 | 201 | 202 | 203 | /// A future representing reading all data from one side of a proxy connection 204 | /// and writing it to another. 205 | /// 206 | /// This future, unlike the handshake performed above, is implemented via a 207 | /// custom implementation of the `Future` trait rather than with combinators. 208 | /// This is intended to show off how the combinators are not all that can be 209 | /// done with futures, but rather more custom (or optimized) implementations can 210 | /// be implemented with just a trait impl! 211 | struct TransferBackFront { 212 | // The two I/O objects we'll be reading. 213 | reader: Rc, 214 | writer: Rc, 215 | 216 | // The shared global buffer that all connections on our server are using. 217 | buf: Rc>>, 218 | 219 | // The number of bytes we've written so far. 220 | amt: u64, 221 | num_bytes: Arc>, 222 | num_resp: Arc>, 223 | } 224 | 225 | impl TransferBackFront { 226 | fn new(reader: Rc, 227 | writer: Rc, 228 | buffer: Rc>>, 229 | n_bytes: Arc>, 230 | n_resp: Arc>) 231 | -> TransferBackFront { 232 | 233 | TransferBackFront { 234 | reader: reader, 235 | writer: writer, 236 | buf: buffer, 237 | amt: 0, 238 | num_bytes: n_bytes, 239 | num_resp: n_resp, 240 | } 241 | } 242 | } 243 | 244 | // Here we implement the `Future` trait for `Transfer` directly. This does not 245 | // use any combinators, and shows how you might implement it in custom 246 | // situations if needed. 247 | impl Future for TransferBackFront { 248 | // Our future resolves to the number of bytes transferred, or an I/O error 249 | // that happens during the connection, if any. 250 | type Item = u64; 251 | type Error = io::Error; 252 | 253 | 254 | fn poll(&mut self) -> Poll { 255 | let mut buffer = self.buf.borrow_mut(); 256 | 257 | 258 | // Here we loop over the two TCP halves, reading all data from one 259 | // connection and writing it to another. The crucial performance aspect 260 | // of this server, however, is that we wait until both the read half and 261 | // the write half are ready on the connection, allowing the buffer to 262 | // only be temporarily used in a small window for all connections. 263 | loop { 264 | let read_ready = self.reader.poll_read().is_ready(); 265 | 266 | let write_ready = self.writer.poll_write().is_ready(); 267 | if !read_ready || !write_ready { 268 | return Ok(Async::NotReady); 269 | } 270 | 271 | 272 | let n = try_nb!((&*self.reader).read(&mut buffer)); 273 | if n == 0 { 274 | try!(self.writer.shutdown(Shutdown::Write)); 275 | return Ok(self.amt.into()); 276 | } 277 | 278 | self.amt += n as u64; 279 | 280 | 281 | // Unlike above, we don't handle `WouldBlock` specially, because 282 | // that would play into the logic mentioned above (tracking read 283 | // rates and write rates), so we just ferry along that error for 284 | // now. 285 | let m = try!((&*self.writer).write(&buffer[..n])); 286 | assert_eq!(n, m); 287 | } 288 | } 289 | } 290 | 291 | struct TransferFrontBack { 292 | // The two I/O objects we'll be reading. 293 | reader: Rc, 294 | writer: Rc, 295 | 296 | // The shared global buffer that all connections on our server are using. 297 | buf: Rc>>, 298 | 299 | // The number of bytes we've written so far. 300 | amt: u64, 301 | 302 | num_bytes: Arc>, 303 | num_resp: Arc>, 304 | } 305 | 306 | impl TransferFrontBack { 307 | fn new(reader: Rc, 308 | writer: Rc, 309 | buffer: Rc>>, 310 | n_bytes: Arc>, 311 | n_resp: Arc>) 312 | -> TransferFrontBack { 313 | TransferFrontBack { 314 | reader: reader, 315 | writer: writer, 316 | buf: buffer, 317 | amt: 0, 318 | num_bytes: n_bytes, 319 | num_resp: n_resp, 320 | } 321 | } 322 | } 323 | 324 | 325 | impl Future for TransferFrontBack { 326 | // Our future resolves to the number of bytes transferred, or an I/O error 327 | // that happens during the connection, if any. 328 | type Item = u64; 329 | type Error = io::Error; 330 | 331 | 332 | fn poll(&mut self) -> Poll { 333 | let mut buffer = self.buf.borrow_mut(); 334 | 335 | // Here we loop over the two TCP halves, reading all data from one 336 | // connection and writing it to another. The crucial performance aspect 337 | // of this server, however, is that we wait until both the read half and 338 | // the write half are ready on the connection, allowing the buffer to 339 | // only be temporarily used in a small window for all connections. 340 | loop { 341 | let read_ready = self.reader.poll_read().is_ready(); 342 | 343 | let write_ready = self.writer.poll_write().is_ready(); 344 | if !read_ready || !write_ready { 345 | return Ok(Async::NotReady); 346 | } 347 | 348 | 349 | let n = try_nb!((&*self.reader).read(&mut buffer)); 350 | if n == 0 { 351 | try!(self.writer.shutdown(Shutdown::Write)); 352 | return Ok(self.amt.into()); 353 | } 354 | 355 | self.amt += n as u64; 356 | 357 | 358 | { 359 | let mut n_bytes = self.num_bytes.lock().unwrap(); 360 | *n_bytes += n as f64; 361 | } 362 | 363 | { 364 | let mut n_resp = self.num_resp.lock().unwrap(); 365 | *n_resp += 1.0; 366 | } 367 | 368 | // Unlike above, we don't handle `WouldBlock` specially, because 369 | // that would play into the logic mentioned above (tracking read 370 | // rates and write rates), so we just ferry along that error for 371 | // now. 372 | let m = try!((&*self.writer).write(&buffer[..n])); 373 | assert_eq!(n, m); 374 | } 375 | } 376 | } 377 | 378 | fn other(desc: &str) -> io::Error { 379 | io::Error::new(io::ErrorKind::Other, desc) 380 | } 381 | -------------------------------------------------------------------------------- /src/proxy/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod sync; 2 | pub mod async; 3 | -------------------------------------------------------------------------------- /src/proxy/sync/mod.rs: -------------------------------------------------------------------------------- 1 | /// Copyright 2016 Giovanni Mazzeo. See the COPYRIGHT 2 | /// file at the top-level directory of this distribution and at 3 | /// http://rust-lang.org/COPYRIGHT. 4 | /// 5 | /// Licensed under the Apache License, Version 2.0 or the MIT license 7 | /// , at your 8 | /// option. This file may not be copied, modified, or distributed 9 | /// except according to those terms. 10 | /// /////////////////////////////////////////////////////////////////////////// 11 | 12 | use time; 13 | use libc; 14 | use ansi_term::Colour::{Red, Yellow}; 15 | use std::net::{TcpListener, TcpStream, Shutdown, SocketAddr,IpAddr}; 16 | use std::sync::{Arc, Mutex, RwLock}; 17 | use std::sync::atomic::{AtomicBool, Ordering}; 18 | use std::{thread, str}; 19 | use std::time::Duration; 20 | use std::io::prelude::*; 21 | use libc::setrlimit; 22 | use std::collections::HashMap; 23 | use std::sync::mpsc::{channel, Sender, Receiver}; 24 | 25 | 26 | /** 27 | The MeterProxy is a proxy which interposes between two applications and measures 28 | their latency and throughput (KB/s). The convention is the following: 29 | 30 | SERVER <=====> (back) PROXY (front) <=====> CLIENT 31 | 32 | Therefore, the proxy will listen on the front-side from requests of a client and will 33 | forward the requests on the back-side to the server 34 | **/ 35 | 36 | 37 | //////////////////////////////////////////////////////////////////////// 38 | //////////////////////////////////////////////////////////////////////// 39 | /** 40 | Definition of a Shared Counter for the THROUGHPUT evaluation and a 41 | Time Vector for the LATENCY evaluation. Each thread will put its measurement. 42 | **/ 43 | struct SharedCounter(Arc>); 44 | impl SharedCounter { 45 | fn new() -> Self { 46 | SharedCounter(Arc::new(Mutex::new(0))) 47 | } 48 | fn increment(&self, quantity: usize) { 49 | let mut counter = self.0.lock().unwrap(); 50 | *counter = *counter + quantity; 51 | } 52 | fn get(&self) -> usize { 53 | let counter = self.0.lock().unwrap(); 54 | *counter 55 | } 56 | 57 | fn reset(&self) { 58 | let mut counter = self.0.lock().unwrap(); 59 | *counter = 0; 60 | } 61 | } 62 | 63 | struct SharedTimeVec(Arc>>); 64 | impl SharedTimeVec { 65 | fn new() -> Self { 66 | SharedTimeVec(Arc::new(Mutex::new(Vec::new()))) 67 | } 68 | 69 | fn insert(&self, value: u64) { 70 | let mut time_vec = self.0.lock().unwrap(); 71 | time_vec.push(value); 72 | } 73 | 74 | fn get_avg_value(&self) -> f64 { 75 | let mut time_vec = self.0.lock().unwrap(); 76 | let sum: u64= time_vec.iter().sum(); 77 | return sum as f64/time_vec.len() as f64; 78 | } 79 | 80 | fn reset(&self) { 81 | let mut time_vec = self.0.lock().unwrap(); 82 | time_vec.clear(); 83 | } 84 | 85 | } 86 | 87 | lazy_static! { 88 | static ref TIME_TABLE: SharedTimeVec = {SharedTimeVec::new()}; 89 | static ref NUM_BYTES : SharedCounter = {SharedCounter::new()}; 90 | 91 | static ref ERROR: Arc> = Arc::new(Mutex::new(false)); 92 | } 93 | 94 | //////////////////////////////////////////////////////////////////////// 95 | //////////////////////////////////////////////////////////////////////// 96 | 97 | 98 | #[derive(Clone)] 99 | pub struct SyncMeterProxy { 100 | pub back_address : String, 101 | pub back_port : u16, 102 | pub front_address: String, 103 | pub front_port : u16, 104 | pub reset_lock_flag: Arc>, 105 | } 106 | 107 | 108 | impl SyncMeterProxy { 109 | 110 | pub fn new(b_addr: String, b_port: u16, f_addr: String, f_port: u16) -> SyncMeterProxy { 111 | SyncMeterProxy { 112 | back_address : b_addr, 113 | back_port : b_port, 114 | front_address: f_addr, 115 | front_port : f_port, 116 | reset_lock_flag: Arc::new(RwLock::new(false)), 117 | } 118 | } 119 | 120 | 121 | //Start the Proxy 122 | pub fn start(&self) { 123 | // Increase file descriptor resources limits (this avoids the risk of exception: "Too many open files (os error 24)") 124 | let rlim = libc::rlimit { 125 | rlim_cur: 4096, 126 | rlim_max: 4096, 127 | }; 128 | unsafe { 129 | libc::setrlimit(libc::RLIMIT_NOFILE, &rlim); 130 | } 131 | 132 | let targ_addr: IpAddr = self.front_address.parse() 133 | .expect("Unable to parse FRONT-side Address"); 134 | 135 | //Start the proxy listening on the FRONT-side 136 | let acceptor = TcpListener::bind((targ_addr, self.front_port)).unwrap(); 137 | let mut children = vec![]; 138 | 139 | for stream in acceptor.incoming() { 140 | 141 | let reset_lock_flag_c = self.reset_lock_flag.clone(); 142 | let back_addr_c = self.clone().back_address; 143 | let back_port_c = self.back_port; 144 | 145 | if *reset_lock_flag_c.read().unwrap() == true { 146 | // Reset Flag raised: This is a way to interrupt from an external thread the ProxyServer 147 | break; 148 | } 149 | 150 | match stream { 151 | Err(e) => println!("Strange connection broken: {}", e), 152 | Ok(stream) => { 153 | children.push(thread::spawn(move || { 154 | // connection succeeded 155 | let mut stream_c = stream.try_clone().unwrap(); 156 | let stream_c2 = stream.try_clone().unwrap(); 157 | stream_c2.set_read_timeout(Some(Duration::new(3, 0))); 158 | 159 | //Start a pipe for every connection coming from the client 160 | SyncMeterProxy::start_pipe(stream_c, back_addr_c, back_port_c); 161 | drop(stream); 162 | 163 | })); 164 | 165 | } 166 | } 167 | } 168 | for child in children { 169 | // Wait for the thread to finish. Returns a result. 170 | let _ = child.join(); 171 | } 172 | drop(acceptor); 173 | return; 174 | } 175 | 176 | 177 | /** 178 | Stop the proxy server and clean resources 179 | **/ 180 | pub fn stop_and_reset(&self) { 181 | *self.reset_lock_flag.write().unwrap() = true; 182 | NUM_BYTES.reset(); 183 | TIME_TABLE.reset(); 184 | 185 | // Spurious connection needed to break the proxy server loop 186 | let targ_addr: IpAddr = self.front_address.parse() 187 | .expect("Unable to parse FRONT-side Address"); 188 | TcpStream::connect((targ_addr, self.front_port)); 189 | } 190 | 191 | 192 | pub fn get_num_bytes_rcvd(&self) -> usize { 193 | return NUM_BYTES.get(); 194 | } 195 | 196 | pub fn get_latency_ms(&self) -> f64 { 197 | return TIME_TABLE.get_avg_value() / 1000000.0f64; 198 | } 199 | 200 | fn start_pipe(front: TcpStream, target_addr: String, target_port: u16) { 201 | 202 | let targ_addr: IpAddr = target_addr.parse() 203 | .expect("Unable to parse BACK-side Address"); 204 | let mut back = match TcpStream::connect((targ_addr, target_port)) { 205 | Err(e) => { 206 | 207 | let mut err = ERROR.lock().unwrap(); 208 | if *err == false { 209 | println!("{} Unable to connect to the Target Application. Maybe a bad \ 210 | configuration: {}", 211 | Red.paint("*****ERROR***** --> "), 212 | e); 213 | }; 214 | *err = true; 215 | front.shutdown(Shutdown::Both); 216 | drop(front); 217 | return; 218 | } 219 | Ok(b) => b, 220 | }; 221 | 222 | 223 | 224 | let front_c = front.try_clone().unwrap(); 225 | let back_c = back.try_clone().unwrap(); 226 | 227 | let timedOut = Arc::new(AtomicBool::new(false)); 228 | let timedOut_c = timedOut.clone(); 229 | 230 | 231 | let latency_mutex: Arc> = Arc::new(Mutex::new(0)); 232 | let (tx, rx) = channel(); 233 | let latency_mutex_c = latency_mutex.clone(); 234 | 235 | 236 | thread::spawn(move || { 237 | SyncMeterProxy::keep_copying_bench_2_targ(front, back, timedOut, latency_mutex, tx); 238 | }); 239 | 240 | thread::spawn(move || { 241 | SyncMeterProxy::keep_copying_targ_2_bench(back_c, front_c, timedOut_c, latency_mutex_c, rx); 242 | }); 243 | 244 | 245 | } 246 | 247 | /** 248 | Pipe BACK(Server)<======FRONT(Client) 249 | **/ 250 | fn keep_copying_bench_2_targ(mut front: TcpStream, 251 | mut back: TcpStream, 252 | timedOut: Arc, 253 | time_mutex: Arc>, tx: Sender) { 254 | 255 | front.set_read_timeout(Some(Duration::new(1000, 0))); 256 | let mut buf = [0; 1024]; 257 | 258 | 259 | loop { 260 | 261 | let read = match front.read(&mut buf) { 262 | Err(ref err) => { 263 | let other = timedOut.swap(true, Ordering::AcqRel); 264 | if other { 265 | // the other side also timed-out / errored, so lets go 266 | drop(front); 267 | drop(back); 268 | return; 269 | } 270 | // normal errors, just stop 271 | front.shutdown(Shutdown::Both); 272 | back.shutdown(Shutdown::Both); 273 | // normal errors, just stop 274 | drop(front); 275 | drop(back); 276 | return; // normal errors, stop 277 | } 278 | Ok(r) => r, 279 | }; 280 | 281 | 282 | let mut start_time = time_mutex.lock().unwrap(); 283 | *start_time=time::precise_time_ns(); 284 | 285 | timedOut.store(false, Ordering::Release); 286 | match back.write(&buf[0..read]) { 287 | Err(..) => { 288 | timedOut.store(true, Ordering::Release); 289 | // normal errors, just stop 290 | front.shutdown(Shutdown::Both); 291 | back.shutdown(Shutdown::Both); 292 | drop(front); 293 | drop(back); 294 | return; 295 | } 296 | Ok(..) => (), 297 | }; 298 | 299 | tx.send(1).unwrap(); 300 | } 301 | 302 | } 303 | 304 | /** 305 | Pipe BACK(Server)======>FRONT(Client) 306 | **/ 307 | fn keep_copying_targ_2_bench(mut back: TcpStream, 308 | mut front: TcpStream, 309 | timedOut: Arc, 310 | time_mutex: Arc>, rx: Receiver) { 311 | 312 | back.set_read_timeout(Some(Duration::new(1000, 0))); 313 | let mut buf = [0; 1024]; 314 | 315 | // SeqNumber for latency measuring 316 | let mut seq_number = 0; 317 | 318 | loop { 319 | 320 | let read = match back.read(&mut buf) { 321 | Err(ref err) => { 322 | let other = timedOut.swap(true, Ordering::AcqRel); 323 | if other { 324 | // the other side also timed-out / errored, so lets go 325 | drop(back); 326 | drop(front); 327 | return; 328 | } 329 | // normal errors, just stop 330 | front.shutdown(Shutdown::Both); 331 | back.shutdown(Shutdown::Both); 332 | drop(back); 333 | drop(front); 334 | 335 | return; // normal errors, stop 336 | } 337 | Ok(r) => r, 338 | }; 339 | 340 | match rx.try_recv(){ 341 | Ok(r) => { 342 | let res = *(time_mutex.lock().unwrap()); 343 | TIME_TABLE.insert(time::precise_time_ns()-res); 344 | }, 345 | RecvError => {}, 346 | }; 347 | 348 | // Increment the number of bytes read counter 349 | NUM_BYTES.increment(read); 350 | 351 | 352 | timedOut.store(false, Ordering::Release); 353 | match front.write(&buf[0..read]) { 354 | Err(..) => { 355 | timedOut.store(true, Ordering::Release); 356 | // normal errors, just stop 357 | front.shutdown(Shutdown::Both); 358 | back.shutdown(Shutdown::Both); 359 | drop(back); 360 | drop(front); 361 | return; 362 | } 363 | Ok(..) => (), 364 | }; 365 | 366 | 367 | } 368 | 369 | drop(back); 370 | drop(front); 371 | 372 | 373 | } 374 | } 375 | 376 | --------------------------------------------------------------------------------