├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── examples ├── client.rs ├── server.rs └── stress.rs ├── sbahn.jpg ├── src ├── client.rs ├── constants.rs ├── handler.rs ├── lib.rs ├── message.rs ├── network.rs ├── storage.rs └── storage_node.rs └── tests ├── end_to_end.rs ├── lib.rs └── storage_node.rs /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | matrix: 7 | allow_failures: 8 | - rust: nightly 9 | env: 10 | - RUST_BACKTRACE=1 11 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sbahn" 3 | version = "0.1.0" 4 | authors = ["Esteban Küber "] 5 | 6 | [dependencies] 7 | bincode = "0.4.0" 8 | env_logger = "0.3.2" 9 | eventual = "0.1.5" 10 | log = "0.3.4" 11 | rustc-serialize = "0.3.16" 12 | time = "0.1.34" 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Esteban Kuber 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # sbahn: Pure Rust Distributed Hash Table [![Build Status](https://travis-ci.org/estebank/sbahn.svg?branch=master)](https://travis-ci.org/estebank/sbahn) 2 | 3 | sbahn is a toy implementation of a [Distributed][1] [Hash][2] [Table][3]. 4 | 5 | The main driver behind it was to get acquinted with Rust, so some of the code 6 | will not be particularly elegant. 7 | 8 | ![Berlin Ringbahn Map](https://raw.github.com/estebank/sbahn/master/sbahn.jpg) 9 | 10 | [1]: https://en.wikipedia.org/wiki/Distributed_hash_table 11 | [2]: http://stackoverflow.com/questions/144360/simple-basic-explanation-of-a-distributed-hash-table-dht 12 | [3]: https://www.ietf.org/proceedings/65/slides/plenaryt-2.pdf 13 | -------------------------------------------------------------------------------- /examples/client.rs: -------------------------------------------------------------------------------- 1 | extern crate env_logger; 2 | extern crate eventual; 3 | #[macro_use] 4 | extern crate log; 5 | extern crate sbahn; 6 | 7 | use sbahn::client; 8 | use sbahn::message; 9 | use eventual::*; 10 | use std::net::{Ipv4Addr, SocketAddrV4}; 11 | 12 | 13 | fn main() { 14 | let _ = env_logger::init(); 15 | 16 | let target = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1100); 17 | let key = message::Key { 18 | dataset: vec![1, 2, 3], 19 | pkey: vec![4, 5, 6], 20 | lkey: vec![7, 8, 9], 21 | }; 22 | let key2 = message::Key { 23 | dataset: vec![1, 2, 3], 24 | pkey: vec![4, 5, 0], 25 | lkey: vec![7, 8, 9], 26 | }; 27 | let messages = vec![ 28 | message::Action::Write { 29 | key: key.to_owned(), 30 | content: vec![1, 2, 3], 31 | }, 32 | message::Action::Write { 33 | key: key.to_owned(), 34 | content: vec![4, 5, 6], 35 | }, 36 | message::Action::Read { 37 | key: key.to_owned(), 38 | }, 39 | message::Action::Read { 40 | key: key2.to_owned(), 41 | }, 42 | message::Action::Write { 43 | key: key2.to_owned(), 44 | content: vec![0; 2048], 45 | }, 46 | message::Action::Read { 47 | key: key2.to_owned(), 48 | }, 49 | message::Action::Delete { 50 | key: key2.to_owned(), 51 | }, 52 | message::Action::Read { 53 | key: key2.to_owned(), 54 | }, 55 | ]; 56 | 57 | let client = client::Client::new(vec![target]); 58 | 59 | for m in messages { 60 | let content = message::Request { 61 | action: m, 62 | consistency: message::Consistency::One, 63 | }; 64 | 65 | let r = client.send(&content).await().unwrap(); 66 | println!("Response: {:?}", r); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /examples/server.rs: -------------------------------------------------------------------------------- 1 | extern crate sbahn; 2 | #[macro_use] 3 | extern crate log; 4 | extern crate env_logger; 5 | 6 | use sbahn::handler; 7 | use sbahn::storage::HashMapBackend; 8 | use sbahn::storage_node::StorageNode; 9 | use std::net::{Ipv4Addr, SocketAddrV4}; 10 | use std::thread; 11 | 12 | fn main() { 13 | let _ = env_logger::init(); 14 | 15 | let shards: Vec> = vec![ 16 | vec![SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1024), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1025), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1026)], 17 | vec![SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1027), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1028), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1029)], 18 | vec![SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1030), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1031), SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1032)], 19 | ]; 20 | 21 | let e = &shards; 22 | let z = e.clone(); 23 | 24 | thread::spawn(move || { 25 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1100); 26 | let _shards = &z.to_owned(); 27 | println!("Handler Node @ {:?}", &addr); 28 | let _ = handler::listen(&addr, &_shards); 29 | }); 30 | 31 | let y = &shards.clone(); 32 | let x = y.iter(); 33 | 34 | 35 | // Create SHARD_SIZE storage nodes. 36 | for (pos, addresses) in x.enumerate() { 37 | for addr in addresses { 38 | let addr = addr.to_owned(); 39 | let shard_count = shards.len(); 40 | thread::spawn(move || { 41 | println!("Storage Node {:?} @ {:?}", &pos, &addr); 42 | let mut sn: StorageNode= StorageNode::new(&addr, pos, shard_count); 43 | &sn.listen(); 44 | }); 45 | } 46 | } 47 | loop {} 48 | } 49 | -------------------------------------------------------------------------------- /examples/stress.rs: -------------------------------------------------------------------------------- 1 | extern crate env_logger; 2 | extern crate eventual; 3 | extern crate sbahn; 4 | 5 | use eventual::*; 6 | use sbahn::client; 7 | use sbahn::message::*; 8 | use sbahn::message; 9 | use sbahn::storage::HashMapBackend; 10 | use sbahn::storage_node::StorageNode; 11 | use std::net::{Ipv4Addr, SocketAddrV4}; 12 | use std::thread; 13 | use std::time::Duration; 14 | 15 | fn main() { 16 | let _ = env_logger::init(); 17 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 1050); 18 | thread::spawn(move || { 19 | let pos = 0; 20 | let mut sn: StorageNode= StorageNode::new(&addr, pos, 1); 21 | &sn.listen(); 22 | }); 23 | thread::sleep(Duration::from_millis(500)); 24 | for i in 0..255 { 25 | let insert_key = message::Key { 26 | dataset: vec![1, 2, 3], 27 | pkey: vec![4, 5, 6], 28 | lkey: vec![7, 8, 9], 29 | }; 30 | { 31 | let content = message::InternodeRequest::Write { 32 | key: insert_key.to_owned(), 33 | value: Value::Value { 34 | content: vec![i], 35 | timestamp: 10000000, 36 | }, 37 | }; 38 | let r = client::Client::send_to_node(&addr, &content).await(); 39 | match r { 40 | Ok(r) => match r { 41 | InternodeResponse::WriteAck {key, timestamp} => { 42 | assert_eq!(key, insert_key); 43 | assert_eq!(timestamp, 10000000); 44 | }, 45 | _ => panic!(), 46 | }, 47 | Err(e) => println!("####{:?}", e), 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /sbahn.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/estebank/sbahn/2abcbc5b3467f8eae488e39034a13217858a9f59/sbahn.jpg -------------------------------------------------------------------------------- /src/client.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::io::prelude::*; 3 | use std::net::{SocketAddrV4, TcpStream}; 4 | use std::time::Duration; 5 | use eventual::*; 6 | use message::{Action, Buffer, Consistency, Error, Key, Request, Result, ResponseMessage}; 7 | use bincode::rustc_serialize::{encode, decode}; 8 | use rustc_serialize::{Encodable, Decodable}; 9 | use bincode::SizeLimit; 10 | 11 | /// An sbahn client. 12 | pub struct Client { 13 | /// List of addresses to frontend request handlers 14 | pub handlers: Vec, 15 | pub read_timeout: Option, 16 | pub write_timeout: Option, 17 | pub consistency: Consistency, 18 | } 19 | 20 | pub type MessageResult = Result; 21 | 22 | impl Client { 23 | pub fn new(handlers: Vec) -> Client { 24 | Client { 25 | handlers: handlers, 26 | read_timeout: Some(Duration::from_millis(300)), 27 | write_timeout: Some(Duration::from_millis(300)), 28 | consistency: Consistency::Latest, 29 | } 30 | } 31 | 32 | pub fn with_timeouts(handlers: Vec, 33 | read_timeout: Duration, 34 | write_timeout: Duration) 35 | -> Client { 36 | Client { 37 | handlers: handlers, 38 | read_timeout: Some(read_timeout), 39 | write_timeout: Some(write_timeout), 40 | consistency: Consistency::Latest, 41 | } 42 | } 43 | 44 | pub fn with_consistency(handlers: Vec, consistency: Consistency) -> Client { 45 | Client { 46 | handlers: handlers, 47 | read_timeout: Some(Duration::from_millis(300)), 48 | write_timeout: Some(Duration::from_millis(300)), 49 | consistency: consistency, 50 | } 51 | } 52 | 53 | pub fn insert(&self, key: &Key, value: &Buffer) -> Future { 54 | let content = Request { 55 | action: Action::Write { 56 | key: key.to_owned(), 57 | content: value.to_owned(), 58 | }, 59 | consistency: self.consistency.clone(), 60 | }; 61 | self.send(&content) 62 | } 63 | 64 | pub fn get(&self, key: &Key) -> Future { 65 | let content = Request { 66 | action: Action::Read { key: key.to_owned() }, 67 | consistency: self.consistency.clone(), 68 | }; 69 | self.send(&content) 70 | } 71 | 72 | pub fn send(&self, message: &Request) -> Future { 73 | let target = &self.handlers[0]; 74 | Self::send_to_node(target, &message) 75 | } 76 | 77 | /// Sends a message that can be binary encoded to the Storage Node at `target`. 78 | pub fn send_to_node(target: &SocketAddrV4, message: &T) -> Future 79 | where T: Debug + Encodable, 80 | K: Debug + Decodable + Send 81 | { 82 | Self::send_to_node_with_timeout(target, message, None) 83 | } 84 | 85 | /// Sends a message that can be binary encoded to the Storage Node at `target`. 86 | pub fn send_to_node_with_timeout(target: &SocketAddrV4, 87 | message: &T, 88 | timeout: Option) 89 | -> Future 90 | where T: Debug + Encodable, 91 | K: Debug + Decodable + Send 92 | { 93 | debug!("sending message {:?} to node {:?}", message, target); 94 | match encode(&message, SizeLimit::Infinite) { 95 | Ok(content) => { 96 | Self::send_buffer(target, content, timeout).and_then(|x| { 97 | match decode(&x) { 98 | Ok(m) => Ok(m), 99 | Err(_) => Err(Error::DecodeError), 100 | } 101 | }) 102 | } 103 | Err(_) => Future::error(Error::EncodeError), 104 | } 105 | } 106 | 107 | /// Sends a binary encoded message to the Storage Node at `target`. 108 | pub fn send_buffer(target: &SocketAddrV4, 109 | message: Vec, 110 | timeout: Option) 111 | -> Future, Error> { 112 | let target = target.to_owned(); 113 | Future::lazy(move || { 114 | match TcpStream::connect(target) { 115 | Ok(stream) => { 116 | let _ = stream.set_read_timeout(timeout); 117 | let _ = stream.set_write_timeout(timeout); 118 | 119 | let mut stream = stream; 120 | if stream.write(&message).is_err() { 121 | return Err(Error::ConnectionError); 122 | } 123 | let mut val: Buffer = vec![]; 124 | if stream.read_to_end(&mut val).is_err() { 125 | return Err(Error::ConnectionError); 126 | } 127 | Ok(val) 128 | } 129 | Err(e) => { 130 | error!("{:?}", e); 131 | Err(Error::ConnectionError) 132 | } 133 | } 134 | }) 135 | } 136 | 137 | pub fn set_timeouts(&mut self, timeout: Duration) { 138 | self.read_timeout = Some(timeout); 139 | self.write_timeout = Some(timeout); 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | pub const BUFFER_SIZE: usize = 1024; 2 | -------------------------------------------------------------------------------- /src/handler.rs: -------------------------------------------------------------------------------- 1 | use bincode::SizeLimit; 2 | use bincode::rustc_serialize::{encode, decode}; 3 | use client; 4 | use eventual::*; 5 | use message::*; 6 | use network::NetworkRead; 7 | use std::fmt::Debug; 8 | use std::io::Write; 9 | use std::net::{SocketAddrV4, TcpListener, TcpStream}; 10 | use std::thread; 11 | use std::time::Duration; 12 | use time; 13 | 14 | 15 | /// Current Unix timestamp 16 | fn get_now() -> u64 { 17 | let now = time::get_time(); 18 | let sec = now.sec; 19 | let nsec = now.nsec; 20 | // Consider using the Timespec directly instead. 21 | ((sec as u64) * 1_000_000) + (nsec as u64 / 1000) 22 | } 23 | 24 | /// Obtain one (any) valid response from all the shard responses. 25 | fn read_one(key: &Key, responses: Vec>) -> client::MessageResult { 26 | debug!("Reading one"); 27 | 28 | 29 | Ok(sequence(responses) 30 | .await() 31 | .unwrap() 32 | .iter() 33 | .next() 34 | .map(|&(ref m, _)| { 35 | ResponseMessage { 36 | message: m.clone().to_response(), 37 | consistency: Consistency::One, 38 | } 39 | }) 40 | .unwrap_or(ResponseMessage { 41 | message: Response::Error { 42 | key: key.to_owned(), 43 | message: "All the storage nodes replied with errors.".to_string(), 44 | }, 45 | consistency: Consistency::One, 46 | })) 47 | } 48 | 49 | /// Obtain the newest `Value` among those stored in this shard's `StorageNode`s. 50 | fn read_latest(key: &Key, 51 | responses: Vec>) 52 | -> client::MessageResult { 53 | debug!("Reading latest"); 54 | let responses_needed = responses.len() / 2; 55 | let ((_, success_count), latest) = 56 | sequence(responses) 57 | .reduce(((0, 0), None), |last, r| { 58 | let ((max_timestamp, success_count), max_response) = last; 59 | debug!("Max timestamp so far: {:?}", max_timestamp); 60 | debug!("Max max_response so far: {:?}", max_response); 61 | match r.get_timestamp() { 62 | Some(ts) => { 63 | if ts >= max_timestamp { 64 | ((ts, success_count + 1), Some(r.clone())) 65 | } else { 66 | ((max_timestamp, success_count + 1), max_response) 67 | } 68 | } 69 | None => ((max_timestamp, success_count), max_response), 70 | } 71 | }) 72 | .await() 73 | .unwrap(); 74 | 75 | debug!("Quorum read final response: {:?}", latest); 76 | debug!("Nodes responed successfully: {:?}", success_count); 77 | debug!("Nodes needed for succesfull read: {:?}", responses_needed); 78 | 79 | if success_count <= responses_needed { 80 | info!("Not enough storage nodes succeeded: {:?} of at least {:?}", 81 | success_count, 82 | responses_needed); 83 | Ok(ResponseMessage { 84 | message: Response::Error { 85 | key: key.to_owned(), 86 | message: "Not enough storage nodes succeeded to give a response".to_string(), 87 | }, 88 | consistency: Consistency::Latest, 89 | }) 90 | } else { 91 | match latest { 92 | Some(m) => { 93 | Ok(ResponseMessage { 94 | message: m.to_response(), 95 | consistency: Consistency::Latest, 96 | }) 97 | } 98 | None => { 99 | Ok(ResponseMessage { 100 | message: Response::Error { 101 | key: key.to_owned(), 102 | message: "?".to_string(), 103 | }, 104 | consistency: Consistency::Latest, 105 | }) 106 | } 107 | } 108 | } 109 | } 110 | 111 | /// Read from all nodes for this `Key`'s shard, and use `consistency` to 112 | /// collate the `StorageNode`'s responses. 113 | fn read(shards: &Vec, key: &Key, consistency: &Consistency) -> client::MessageResult { 114 | debug!("Read {:?} with {:?} consistency.", key, consistency); 115 | let mut responses: Vec> = vec![]; 116 | for shard in shards { 117 | let response = read_from_other_storage_node(&shard, &key); 118 | responses.push(response); 119 | } 120 | match consistency { 121 | &Consistency::One => read_one(key, responses), 122 | &Consistency::Latest => read_latest(key, responses), 123 | } 124 | } 125 | 126 | /// Write to all nodes for this `Key`'s shard, and use `consistency` to 127 | /// determine when to acknowledge the write to the client. 128 | fn write(shards: &Vec, 129 | key: &Key, 130 | value: &Value, 131 | consistency: &Consistency) 132 | -> client::MessageResult { 133 | let mut responses: Vec> = vec![]; 134 | for shard in shards { 135 | let response = write_to_other_storage_node(&shard, &key, &value); 136 | debug!("Write response for {:?}, {:?} @ Shard {:?}: {:?}", 137 | key, 138 | value, 139 | shard, 140 | response); 141 | responses.push(response); 142 | } 143 | let mut message = Response::Error { 144 | key: key.to_owned(), 145 | message: "Quorum write could not be accomplished.".to_string(), 146 | }; 147 | 148 | let mut write_count = 0; 149 | for response_result in responses { 150 | match response_result.await() { 151 | Ok(response) => { 152 | match response { 153 | InternodeResponse::WriteAck {key, timestamp} => { 154 | write_count += 1; 155 | if write_count >= (shards.len() / 2) + 1 { 156 | debug!("Successful write to mayority of shards for {:?}", key); 157 | message = Response::WriteAck { 158 | key: key, 159 | timestamp: timestamp, 160 | }; 161 | } 162 | } 163 | _ => (), 164 | } 165 | } 166 | Err(_) => (), 167 | } 168 | } 169 | let r = ResponseMessage { 170 | message: message, 171 | consistency: consistency.to_owned(), 172 | }; 173 | 174 | Ok(r) 175 | } 176 | 177 | 178 | fn read_from_other_storage_node(target: &SocketAddrV4, 179 | key: &Key) 180 | -> Future { 181 | debug!("Forwarding read request for {:?} to shard at {:?}.", 182 | key, 183 | target); 184 | let content = InternodeRequest::Read { key: key.to_owned() }; 185 | let timeout = Some(Duration::from_millis(300)); 186 | client::Client::send_to_node_with_timeout(target, &content, timeout) 187 | } 188 | 189 | fn write_to_other_storage_node(target: &SocketAddrV4, 190 | key: &Key, 191 | value: &Value) 192 | -> Future { 193 | debug!("Forwarding write request for {:?} to shard at {:?}.", 194 | key, 195 | target); 196 | let request = InternodeRequest::Write { 197 | key: key.to_owned(), 198 | value: value.to_owned(), 199 | }; 200 | 201 | let timeout = Some(Duration::from_millis(300)); 202 | client::Client::send_to_node_with_timeout(target, &request, timeout) 203 | } 204 | 205 | /// Perform a client's `Request` in the appropriate shard and respond to the 206 | /// client with a ResponseMessage. 207 | pub fn handle_client(stream: &mut TcpStream, shards: &Vec>) { 208 | let mut value: Buffer = vec![]; 209 | 210 | if stream.read_to_message_end(&mut value).is_err() { 211 | panic!("Couldn't read from stream."); 212 | } 213 | 214 | let request: Request = match decode(&value) { 215 | Ok(m) => m, 216 | Err(e) => panic!("Message decoding error! {:?}", e), 217 | }; 218 | 219 | debug!("Message received: {:?}", request); 220 | 221 | let timestamp = get_now(); 222 | let r = match request.action { 223 | Action::Read {key} => { 224 | let msg_shard = key.shard(shards.len()); 225 | read(&shards[msg_shard], &key, &request.consistency) 226 | } 227 | Action::Write {key, content} => { 228 | let value = Value::Value { 229 | content: content.to_owned(), 230 | timestamp: timestamp, 231 | }; 232 | let msg_shard = key.shard(shards.len()); 233 | write(&shards[msg_shard], &key, &value, &request.consistency) 234 | } 235 | Action::Delete {key} => { 236 | let value = Value::Tombstone { timestamp: timestamp }; 237 | let msg_shard = key.shard(shards.len()); 238 | write(&shards[msg_shard], &key, &value, &request.consistency) 239 | } 240 | }; 241 | debug!("Response to be sent: {:?}", r); 242 | let _ = match r { 243 | Ok(message) => { 244 | let encoded = encode(&message, SizeLimit::Infinite); 245 | match encoded { 246 | Ok(b) => { 247 | match stream.write(&b) { 248 | Ok(size) => debug!("Response sent (size: {})", size), 249 | Err(e) => error!("Error sending message: {:?}, {:?}", message, e), 250 | } 251 | } 252 | Err(e) => panic!("Message encoding error! {:?}", e), 253 | } 254 | } 255 | Err(e) => panic!("Communication error! {:?}", e), 256 | }; 257 | } 258 | 259 | 260 | trait ClientHandler where Self: Debug { 261 | fn handle(&mut self, shards: &Vec>); 262 | } 263 | 264 | /// An sbahn aware stream 265 | impl ClientHandler for TcpStream { 266 | fn handle(&mut self, shards: &Vec>) { 267 | debug!("Starting listener stream: {:?}", self); 268 | handle_client(self, &shards); 269 | } 270 | } 271 | 272 | /// Listen on `address` for incoming client requests, and perform them on the appropriate shards. 273 | pub fn listen(address: &SocketAddrV4, shards: &Vec>) -> Future<(), ()> { 274 | let address = address.to_owned(); 275 | let shards = shards.clone(); 276 | 277 | let read_timeout = Some(Duration::from_millis(300)); 278 | let write_timeout = Some(Duration::from_millis(300)); 279 | 280 | Future::spawn(move || { 281 | match TcpListener::bind(&address) { 282 | Ok(listener) => { 283 | // Accept connections and process them, spawning a new thread for each one. 284 | for stream in listener.incoming() { 285 | let shards = shards.to_owned(); 286 | match stream { 287 | Ok(stream) => { 288 | let _ = stream.set_read_timeout(read_timeout); 289 | let _ = stream.set_write_timeout(write_timeout); 290 | thread::spawn(move || { 291 | // connection succeeded 292 | let mut stream = stream; 293 | stream.handle(&shards); 294 | }); 295 | } 296 | Err(e) => error!("Connection failed!: {:?}", e), 297 | } 298 | } 299 | } 300 | Err(e) => error!("Could not bind handler to {:?}: {:?}", address, e), 301 | } 302 | }) 303 | } 304 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate bincode; 2 | extern crate eventual; 3 | #[macro_use] 4 | extern crate log; 5 | extern crate rustc_serialize; 6 | extern crate time; 7 | 8 | pub mod client; 9 | pub mod constants; 10 | pub mod handler; 11 | pub mod message; 12 | pub mod network; 13 | pub mod storage; 14 | pub mod storage_node; 15 | -------------------------------------------------------------------------------- /src/message.rs: -------------------------------------------------------------------------------- 1 | use std::result; 2 | use std::hash::{Hash, SipHasher, Hasher}; 3 | 4 | 5 | pub type Buffer = Vec; 6 | 7 | 8 | /// Client request. 9 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 10 | pub struct Request { 11 | pub action: Action, 12 | pub consistency: Consistency, 13 | } 14 | 15 | /// `Request`'s possible actions to be performed by a `handler`. 16 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 17 | pub enum Action { 18 | /// Read the given `Key` and receive a `Response::Value`. 19 | Read { 20 | key: Key, 21 | }, 22 | /// Write the given `Value` for `Key` and receive a `Response::WriteAck`. 23 | Write { 24 | key: Key, 25 | content: Buffer, 26 | }, 27 | /// Delete the given `Key` and receive a `Response::WriteAck`. 28 | Delete { 29 | key: Key, 30 | }, 31 | } 32 | 33 | /// A `Request`'s `Response` message envelope. 34 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 35 | pub struct ResponseMessage { 36 | pub message: Response, 37 | pub consistency: Consistency, 38 | } 39 | 40 | /// A `Request`'s response. 41 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 42 | pub enum Response { 43 | /// An stored value, stored in the shard's `StorageNode`s, according to the 44 | /// required `Consistency`. 45 | Value { 46 | key: Key, 47 | value: Value, 48 | }, 49 | /// The `Request`ed write for a `Value` has been stored in the `Key`'s 50 | /// shard's `StorageNode`s, according to the required `Consistency`. 51 | WriteAck { 52 | key: Key, 53 | timestamp: u64, 54 | }, 55 | /// There was an error performing the operation on `Key`. 56 | Error { 57 | key: Key, 58 | message: String, 59 | }, 60 | } 61 | 62 | /// The `Key` used to lookup a given `Value`. 63 | #[derive(Debug, Hash, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] 64 | pub struct Key { 65 | pub dataset: Buffer, 66 | pub pkey: Buffer, 67 | pub lkey: Buffer, 68 | } 69 | 70 | impl Key { 71 | /// Return the ring hash for this `Key`. 72 | pub fn hash(&self) -> u64 { 73 | let mut s = SipHasher::new(); 74 | &self.pkey.hash(&mut s); 75 | s.finish() 76 | } 77 | 78 | /// Return the corresponding shard for this key, given a `shard_count` 79 | /// amount of shards. 80 | pub fn shard(&self, shard_count: usize) -> usize { 81 | (self.hash() % (shard_count as u64)) as usize 82 | } 83 | } 84 | 85 | /// Any of the possible stored values on a `StorageNode`. 86 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 87 | pub enum Value { 88 | /// There's no value stored for the given `Key` in the `StorageNode`s. 89 | None, 90 | /// The value stored for the given `Key` in the `StorageNode`s. 91 | Value { 92 | content: Buffer, 93 | timestamp: u64, 94 | }, 95 | /// A deleted value for the given `Key` in the `StorageNode`s. 96 | Tombstone { 97 | timestamp: u64, 98 | }, 99 | } 100 | 101 | /// Request operations performed by a `handler` to the `StorageNode`s. 102 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 103 | pub enum InternodeRequest { 104 | Read { 105 | key: Key, 106 | }, 107 | Write { 108 | key: Key, 109 | value: Value, 110 | }, 111 | } 112 | 113 | /// Request Response for a `handler` from a `StorageNode`. 114 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 115 | pub enum InternodeResponse { 116 | Value { 117 | key: Key, 118 | value: Value, 119 | }, 120 | WriteAck { 121 | key: Key, 122 | timestamp: u64, 123 | }, 124 | Error { 125 | key: Key, 126 | message: String, 127 | }, 128 | } 129 | 130 | impl InternodeResponse { 131 | /// If the `Value` has a timestamp, return it. 132 | pub fn get_timestamp(&self) -> Option { 133 | match self { 134 | &InternodeResponse::Value {ref value, ..} => { 135 | match *value { 136 | Value::None => None, 137 | Value::Value {timestamp, ..} => Some(timestamp), 138 | Value::Tombstone {timestamp} => Some(timestamp), 139 | } 140 | } 141 | &InternodeResponse::WriteAck {ref timestamp, ..} => Some(*timestamp), 142 | &InternodeResponse::Error {..} => None, 143 | } 144 | } 145 | 146 | /// Cast this `InternodeResponse` into a `handler` -> `Client` `Response`. 147 | pub fn to_response(self) -> Response { 148 | match self { 149 | InternodeResponse::Value {key, value} => { 150 | Response::Value { 151 | key: key, 152 | value: value, 153 | } 154 | } 155 | InternodeResponse::WriteAck {key, timestamp} => { 156 | Response::WriteAck { 157 | key: key, 158 | timestamp: timestamp, 159 | } 160 | } 161 | InternodeResponse::Error {key, message} => { 162 | Response::Error { 163 | key: key, 164 | message: message, 165 | } 166 | } 167 | } 168 | } 169 | } 170 | 171 | /// Consistency level for the `Request` or `Response`. Determines the conflict 172 | /// resolution when `StorageNode`s for a given shard diverge and the persistence 173 | /// assurance when writing. 174 | #[derive(Debug, Hash, Clone, PartialEq, RustcEncodable, RustcDecodable)] 175 | pub enum Consistency { 176 | /// Only wait for one `StorageNode` to successfully reply before responding. 177 | One, 178 | /// Wait for all `StorageNode`s to reply and send a `Response` with the 179 | /// newest `Value`. 180 | Latest, 181 | } 182 | 183 | #[derive(Debug, Hash, Clone, PartialEq)] 184 | pub enum Error { 185 | /// Error when binary encoding a message. 186 | EncodeError, 187 | /// Error when decoding a binary into a message. 188 | DecodeError, 189 | /// Connection error. 190 | ConnectionError, 191 | } 192 | 193 | pub type Result = result::Result; 194 | 195 | #[cfg(test)] 196 | mod tests { 197 | use super::*; 198 | 199 | #[test] 200 | fn key_hash() { 201 | let key = Key { 202 | dataset: vec![1], 203 | pkey: vec![1], 204 | lkey: vec![1], 205 | }; 206 | assert_eq!(8934463522374858327, key.hash()); 207 | assert_eq!(0, key.shard(1 as usize)); 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /src/network.rs: -------------------------------------------------------------------------------- 1 | use constants::BUFFER_SIZE; 2 | use std::io::Read; 3 | use std::io::Result; 4 | use std::net::TcpStream; 5 | 6 | pub trait NetworkRead { 7 | fn read_to_message_end(&mut self, buf: &mut Vec) -> Result; 8 | } 9 | 10 | impl NetworkRead for TcpStream { 11 | fn read_to_message_end(&mut self, buf: &mut Vec) -> Result { 12 | let mut total_size: usize = 0; 13 | loop { 14 | let mut fixed_buf = [0; BUFFER_SIZE]; 15 | info!("before read_to_end"); 16 | let bytes_read = try!(self.read(&mut fixed_buf)); 17 | buf.extend(fixed_buf[..bytes_read].iter()); 18 | info!("read {:?} bytes", bytes_read); 19 | total_size += bytes_read; 20 | if bytes_read != BUFFER_SIZE || bytes_read == 0 { 21 | break; 22 | } 23 | } 24 | Ok(total_size) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /src/storage.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | use message::{Key, Value}; 3 | use std::sync::Mutex; 4 | use std::fmt::Debug; 5 | 6 | /// A generic storage backend for `StorageNode`s to use as persistence layer. 7 | pub trait StorageBackend where Self: Debug + Send + Sync { 8 | fn new() -> Self; 9 | /// Persist `value` under `key`. 10 | fn insert(&self, key: Key, value: Value); 11 | /// Get a `Value` for the given `key`. 12 | fn get(&self, key: &Key) -> Option; 13 | } 14 | 15 | /// A basic `HashMap` based backend for in-memory `StorageNode`s. 16 | #[derive(Debug)] 17 | pub struct HashMapBackend { 18 | hashmap: Mutex>, 19 | } 20 | 21 | impl StorageBackend for HashMapBackend { 22 | fn new() -> HashMapBackend { 23 | debug!("New HashMapBackend"); 24 | let hm = HashMap::new(); 25 | HashMapBackend { hashmap: Mutex::new(hm) } 26 | } 27 | 28 | fn insert(&self, key: Key, value: Value) { 29 | debug!("[HashMapBackend] Going to insert {:?}, {:?}", key, value); 30 | let lock = self.hashmap.lock(); 31 | let mut map = lock.unwrap(); 32 | map.insert(key, value.clone()); 33 | debug!("[HashMapBackend] inserted {:?}", value); 34 | } 35 | 36 | fn get(&self, key: &Key) -> Option { 37 | debug!("[HashMapBackend] Going to read {:?}", key); 38 | let lock = self.hashmap.lock(); 39 | let map = lock.unwrap(); 40 | let value = map.get(key); 41 | debug!("[HashMapBackend] Value read {:?}", value); 42 | match value { 43 | Some(x) => Some(x.to_owned()), 44 | None => None, 45 | } 46 | } 47 | } 48 | 49 | unsafe impl Sync for HashMapBackend {} 50 | -------------------------------------------------------------------------------- /src/storage_node.rs: -------------------------------------------------------------------------------- 1 | use bincode::SizeLimit; 2 | use bincode::rustc_serialize::{encode, decode}; 3 | use message::{Buffer, Key, Value, InternodeRequest, InternodeResponse}; 4 | use network::NetworkRead; 5 | use std::io::Write; 6 | use std::net::{SocketAddrV4, TcpListener, TcpStream}; 7 | use std::sync::Arc; 8 | use std::thread; 9 | use storage::StorageBackend; 10 | 11 | pub struct StorageNode { 12 | pub shard: usize, 13 | pub shard_count: usize, 14 | pub address: SocketAddrV4, 15 | pub map: Arc, 16 | } 17 | 18 | #[derive(Debug)] 19 | struct ClientHandler { 20 | stream: TcpStream, 21 | shard: usize, 22 | shard_count: usize, 23 | map: Arc, 24 | } 25 | 26 | impl ClientHandler { 27 | fn new(stream: TcpStream, 28 | map: Arc, 29 | shard_number: usize, 30 | shard_count: usize) 31 | -> ClientHandler { 32 | ClientHandler { 33 | stream: stream, 34 | shard: shard_number, 35 | shard_count: shard_count, 36 | map: map, 37 | } 38 | } 39 | 40 | pub fn handle_client(&mut self) { 41 | let mut value: Buffer = vec![]; 42 | if self.stream.read_to_message_end(&mut value).is_err() { 43 | panic!("Couldn't read from stream."); 44 | } 45 | 46 | let m: InternodeRequest = match decode(&value) { 47 | Ok(m) => m, 48 | Err(_) => panic!("decoding error!"), 49 | }; 50 | 51 | debug!("Message received: {:?}", m); 52 | let response: InternodeResponse = self.handle_message(m); 53 | let encoded = encode(&response, SizeLimit::Infinite); 54 | let _ = match encoded { 55 | Ok(b) => self.stream.write(&b), 56 | Err(_) => panic!("encoding error! {:?}", response), 57 | }; 58 | } 59 | 60 | pub fn handle_message(&mut self, message: InternodeRequest) -> InternodeResponse { 61 | match message { 62 | InternodeRequest::Read {key} => self.get(key), 63 | InternodeRequest::Write {key, value} => self.insert(key, value), 64 | } 65 | } 66 | 67 | fn get(&mut self, key: Key) -> InternodeResponse { 68 | debug!("Reading {:?}", key); 69 | let key_shard = key.shard(self.shard_count.clone()); 70 | let this_shard = self.shard; 71 | if key_shard == this_shard { 72 | 73 | debug!("get self {:?}", self); 74 | debug!("get self.map {:?}", self.map); 75 | let v = self.map.get(&key); 76 | debug!("get value {:?}", v); 77 | // let ref mut map: Backend = *match Arc::get_mut(&mut self.map); 78 | match v { 79 | Some(value) => { 80 | InternodeResponse::Value { 81 | key: key.to_owned(), 82 | value: value.to_owned(), 83 | } 84 | } 85 | None => { 86 | InternodeResponse::Value { 87 | key: key.to_owned(), 88 | value: Value::None, 89 | } 90 | } 91 | } 92 | } else { 93 | let error = format!("{:?} doesn't belong to this shard!", key); 94 | error!("{}", error); 95 | InternodeResponse::Error { 96 | key: key.to_owned(), 97 | message: error, 98 | } 99 | } 100 | } 101 | 102 | fn insert(&mut self, key: Key, value: Value) -> InternodeResponse { 103 | debug!("Writing {:?} -> {:?}", key, value); 104 | let key_shard = key.shard(self.shard_count.clone()); 105 | let this_shard = self.shard; 106 | if key_shard == this_shard { 107 | match value { 108 | Value::None => { 109 | let error = format!("Write operation at {:?} with None.This should have been \ 110 | a Tombstone", 111 | key); 112 | error!("{}", error); 113 | InternodeResponse::Error { 114 | key: key.to_owned(), 115 | message: error, 116 | } 117 | } 118 | Value::Value {timestamp, ..} | Value::Tombstone {timestamp} => { 119 | debug!("set self.map {:?}", self.map); 120 | debug!("key: {:?}", key); 121 | debug!("timestamp: {:?}", timestamp); 122 | let map = &self.map; 123 | map.insert(key.to_owned(), value.to_owned()); 124 | InternodeResponse::WriteAck { 125 | key: key.to_owned(), 126 | timestamp: timestamp.to_owned(), 127 | } 128 | } 129 | } 130 | } else { 131 | let error = format!("{:?} doesn't belong to this shard!", key); 132 | error!("{}", error); 133 | InternodeResponse::Error { 134 | key: key.to_owned(), 135 | message: error, 136 | } 137 | } 138 | } 139 | } 140 | 141 | impl StorageNode { 142 | pub fn new(local_address: &SocketAddrV4, 143 | shard_number: usize, 144 | shard_count: usize) 145 | -> StorageNode { 146 | let map = Arc::new(Backend::new()); 147 | StorageNode { 148 | shard: shard_number, 149 | shard_count: shard_count, 150 | address: local_address.to_owned(), 151 | map: map, 152 | } 153 | } 154 | 155 | pub fn listen(&mut self) { 156 | let listener = match TcpListener::bind(self.address) { 157 | Ok(l) => l, 158 | Err(e) => { 159 | panic!("Error binding this storage node @ {:?}: {:?}", 160 | self.address, 161 | e) 162 | } 163 | }; 164 | 165 | // accept connections and process them, spawning a new thread for each one 166 | for stream in listener.incoming() { 167 | match stream { 168 | Ok(stream) => { 169 | debug!("Starting listener stream: {:?}", stream); 170 | let shard = self.shard; 171 | let shard_count = self.shard_count; 172 | let map = self.map.clone(); 173 | thread::spawn(move || { 174 | let mut ch = ClientHandler::new(stream, map, shard, shard_count); 175 | // connection succeeded 176 | ch.handle_client(); 177 | }); 178 | } 179 | Err(e) => { 180 | error!("connection failed!: {:?}", e); 181 | } 182 | } 183 | debug!("Contents of shard {:?} @ {:?} map: {:?}", 184 | self.shard, 185 | self.address, 186 | self.map); 187 | } 188 | 189 | // close the socket server 190 | drop(listener); 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /tests/end_to_end.rs: -------------------------------------------------------------------------------- 1 | extern crate eventual; 2 | extern crate sbahn; 3 | 4 | use eventual::*; 5 | use sbahn::client; 6 | use sbahn::handler; 7 | use sbahn::message::*; 8 | use sbahn::storage::HashMapBackend; 9 | use sbahn::storage_node::StorageNode; 10 | use std::net::{Ipv4Addr, SocketAddrV4}; 11 | use std::thread; 12 | use std::time::Duration; 13 | 14 | 15 | static mut port: u16 = 1400; 16 | fn get_port() -> u16 { 17 | let p; 18 | unsafe { 19 | port += 1; 20 | p = port; 21 | } 22 | p 23 | } 24 | 25 | fn get_storage_node<'a>(pos: usize, shard_count: usize) -> SocketAddrV4 { 26 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), get_port()); 27 | let mut sn: StorageNode = StorageNode::new(&addr, pos, shard_count); 28 | thread::spawn(move || { 29 | &sn.listen(); 30 | }); 31 | thread::sleep(Duration::from_millis(100)); // Wait for storage node to start listening 32 | addr 33 | } 34 | 35 | #[test] 36 | fn end_to_end() { 37 | let mut shards: Vec> = vec![]; 38 | for i in 0..3 { 39 | let mut shard: Vec = vec![]; 40 | for _ in 0..3 { 41 | shard.push(get_storage_node(i, 3)); 42 | } 43 | shards.push(shard); 44 | } 45 | 46 | let e = &shards; 47 | let z = e.clone(); 48 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), get_port()); 49 | thread::spawn(move || { 50 | let _shards = &z.to_owned(); 51 | let _ = handler::listen(&addr, &_shards); 52 | }); 53 | thread::sleep(Duration::from_millis(100)); // Wait for handler node to start listening 54 | 55 | { 56 | let insert_key = Key { 57 | dataset: vec![1, 2, 3], 58 | pkey: vec![4, 5, 6], 59 | lkey: vec![7, 8, 9], 60 | }; 61 | let client = client::Client::new(vec![addr]); 62 | { 63 | let content = Request { 64 | action: Action::Write { 65 | key: insert_key.to_owned(), 66 | content: vec![1], 67 | }, 68 | consistency: Consistency::Latest, 69 | }; 70 | let r = client.send(&content).await().unwrap(); 71 | match r.message { 72 | Response::WriteAck {key, ..} => assert_eq!(key, insert_key), 73 | _ => panic!(), 74 | } 75 | } 76 | { 77 | let content = Request { 78 | action: Action::Read { 79 | key: insert_key.to_owned(), 80 | }, 81 | consistency: Consistency::Latest, 82 | }; 83 | let r = client.send(&content).await().unwrap(); 84 | match r.message { 85 | Response::Value {key, value} => { 86 | assert_eq!(key, insert_key); 87 | match value { 88 | Value::Value {content, ..} => assert_eq!(&content[..], &vec![1][..]), 89 | _ => panic!(), 90 | } 91 | }, 92 | _ => panic!(), 93 | } 94 | } 95 | { 96 | let content = Request { 97 | action: Action::Delete { 98 | key: insert_key.to_owned(), 99 | }, 100 | consistency: Consistency::Latest, 101 | }; 102 | let r = client.send(&content).await().unwrap(); 103 | match r.message { 104 | Response::WriteAck {key, ..} => assert_eq!(key, insert_key), 105 | _ => panic!(), 106 | } 107 | } 108 | { 109 | let content = Request { 110 | action: Action::Read { 111 | key: insert_key.to_owned(), 112 | }, 113 | consistency: Consistency::Latest, 114 | }; 115 | let r = client.send(&content).await().unwrap(); 116 | match r.message { 117 | Response::Value {key, value} => { 118 | assert_eq!(key, insert_key); 119 | match value { 120 | Value::Tombstone {..} => assert!(true), 121 | _ => panic!(), 122 | } 123 | }, 124 | _ => panic!(), 125 | } 126 | } 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /tests/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate sbahn; 2 | -------------------------------------------------------------------------------- /tests/storage_node.rs: -------------------------------------------------------------------------------- 1 | extern crate eventual; 2 | extern crate sbahn; 3 | 4 | use eventual::*; 5 | use sbahn::client; 6 | use sbahn::handler; 7 | use sbahn::message::*; 8 | use sbahn::storage::HashMapBackend; 9 | use sbahn::storage_node::StorageNode; 10 | use std::net::{Ipv4Addr, SocketAddrV4, TcpListener}; 11 | use std::thread; 12 | use std::time::Duration; 13 | 14 | // Milis to wait before trying to connect to any node. 15 | static DELAY: u64 = 100; 16 | 17 | static mut PORT: u16 = 1100; 18 | /// Obtain an open port 19 | fn get_port() -> u16 { 20 | let mut port = 0; 21 | loop { 22 | unsafe { 23 | PORT += 1; 24 | port = PORT; 25 | } 26 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), port); 27 | if TcpListener::bind(&addr).is_ok() { 28 | // Check wether the port is open, and only return it if it is. 29 | return port; 30 | } 31 | } 32 | } 33 | 34 | fn get_storage_node(pos: usize, shard_count: usize) -> SocketAddrV4 { 35 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), get_port()); 36 | let mut sn: StorageNode = StorageNode::new(&addr, pos, shard_count); 37 | thread::spawn(move || { 38 | &sn.listen(); 39 | }); 40 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 41 | addr 42 | } 43 | 44 | fn setup_handler_node(shards: &Vec>) -> SocketAddrV4 { 45 | let shards = shards.clone(); 46 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), get_port()); 47 | thread::spawn(move || { 48 | let _ = handler::listen(&addr, &shards); 49 | }); 50 | thread::sleep(Duration::from_millis(DELAY)); // Wait for handler node to start listening 51 | addr 52 | } 53 | 54 | /// Listen on `address` for incoming client requests, and do nothing. 55 | pub fn dead_node(address: &SocketAddrV4) -> Future<(), ()> { 56 | let address = address.to_owned(); 57 | 58 | let read_timeout = Some(Duration::from_millis(300)); 59 | let write_timeout = Some(Duration::from_millis(300)); 60 | 61 | Future::spawn(move || { 62 | match TcpListener::bind(&address) { 63 | Ok(listener) => { 64 | // Accept connections and process them, spawning a new thread for each one. 65 | for stream in listener.incoming() { 66 | match stream { 67 | Ok(stream) => { 68 | let _ = stream.set_read_timeout(read_timeout); 69 | let _ = stream.set_write_timeout(write_timeout); 70 | thread::spawn(|| { 71 | thread::sleep(Duration::from_millis(500)); 72 | }); 73 | } 74 | Err(e) => panic!("Connection failed!: {:?}", e), 75 | } 76 | } 77 | } 78 | Err(e) => panic!("Error while binding to {:?}: {:?}", address, e), 79 | } 80 | }) 81 | } 82 | 83 | fn get_dead_storage_node() -> SocketAddrV4 { 84 | let addr = SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), get_port()); 85 | thread::spawn(move || { 86 | let _ = dead_node(&addr); 87 | }); 88 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 89 | addr 90 | } 91 | 92 | fn setup_cluster() -> (SocketAddrV4, Vec>) { 93 | setup_bad_cluster(0) 94 | } 95 | 96 | fn setup_bad_cluster(bad_nodes: u32) -> (SocketAddrV4, Vec>) { 97 | let mut shards: Vec> = vec![]; 98 | for i in 0..3 { 99 | let mut shard: Vec = vec![]; 100 | for _ in 0..bad_nodes { 101 | shard.push(get_dead_storage_node()); 102 | } 103 | for _ in bad_nodes..3 { 104 | shard.push(get_storage_node(i, 3)); 105 | } 106 | shards.push(shard); 107 | } 108 | 109 | (setup_handler_node(&shards), shards) 110 | } 111 | 112 | fn write_to_storage_node(target: &SocketAddrV4, key: &Key, value: &Vec, timestamp: u64) { 113 | let request = InternodeRequest::Write { 114 | key: key.to_owned(), 115 | value: Value::Value { 116 | content: value.to_owned(), 117 | timestamp: timestamp, 118 | }, 119 | }; 120 | let r: Future = client::Client::send_to_node(target, &request); 121 | let _ = r.await(); 122 | } 123 | 124 | fn key_and_value() -> (Key, Vec) { 125 | let key = Key { 126 | dataset: vec![1, 2, 3], 127 | pkey: vec![4, 5, 6], 128 | lkey: vec![7, 8, 9], 129 | }; 130 | let value: Vec = vec![9, 8, 7]; 131 | (key, value) 132 | } 133 | 134 | #[test] 135 | fn read_what_you_insert() { 136 | let (handler_addr, _) = setup_cluster(); 137 | let (local_key, local_value) = key_and_value(); 138 | 139 | let client = client::Client::new(vec![handler_addr]); 140 | // Should succeed 141 | let r = client.insert(&local_key, &local_value); 142 | let r = r.await().unwrap(); 143 | match r.message { 144 | Response::WriteAck {key, ..} => assert_eq!(key, local_key), 145 | _ => assert!(false), 146 | } 147 | // Should succeed 148 | let r = client.get(&local_key); 149 | let r = r.await().unwrap(); 150 | match r.message { 151 | Response::Value {key, value} => { 152 | assert_eq!(key, local_key); 153 | match value { 154 | Value::Value {content, ..} => assert_eq!(content, local_value), 155 | _ => assert!(false), 156 | } 157 | }, 158 | _ => assert!(false), 159 | } 160 | } 161 | 162 | #[test] 163 | fn read_consistency_one_all_nodes_available() { 164 | let (handler_addr, shards) = setup_cluster(); 165 | let (local_key, local_value) = key_and_value(); 166 | 167 | for shard in shards { 168 | for node in shard { 169 | write_to_storage_node(&node, &local_key, &local_value, 100000); 170 | } 171 | } 172 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 173 | 174 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::One); 175 | 176 | // Should succeed 177 | let r = client.get(&local_key); 178 | let r = r.await().unwrap(); 179 | match r.message { 180 | Response::Value {key, value} => { 181 | assert_eq!(key, local_key); 182 | match value { 183 | Value::Value {content, ..} => assert_eq!(content, local_value), 184 | _ => assert!(false), 185 | } 186 | }, 187 | _ => assert!(false), 188 | } 189 | } 190 | 191 | #[test] 192 | fn read_consistency_one_one_node_available() { 193 | let (handler_addr, shards) = setup_cluster(); 194 | let (local_key, local_value) = key_and_value(); 195 | 196 | // Write to only one node (local_key corresponds to shard 2). 197 | write_to_storage_node(&shards[2][0], &local_key, &local_value, 100000); 198 | 199 | thread::sleep(Duration::from_millis(DELAY*3)); // Wait for storage node to start listening 200 | 201 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::One); 202 | 203 | // Should succeed 204 | let r = client.get(&local_key); 205 | let r = r.await().unwrap(); 206 | match r.message { 207 | Response::Value {key, value} => { 208 | assert_eq!(key, local_key); 209 | match value { 210 | Value::Value {content, ..} => assert_eq!(content, local_value), 211 | _ => assert!(false), 212 | } 213 | }, 214 | _ => assert!(false), 215 | } 216 | } 217 | 218 | #[test] 219 | fn read_consistency_latest_all_same() { 220 | // Should succeed 221 | let (handler_addr, shards) = setup_cluster(); 222 | let (local_key, local_value) = key_and_value(); 223 | 224 | for shard in shards { 225 | for node in shard { 226 | write_to_storage_node(&node, &local_key, &local_value, 100000); 227 | } 228 | } 229 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 230 | 231 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 232 | 233 | // Should succeed 234 | let r = client.get(&local_key); 235 | let r = r.await().unwrap(); 236 | match r.message { 237 | Response::Value {key, value} => { 238 | assert_eq!(key, local_key); 239 | match value { 240 | Value::Value {content, ..} => assert_eq!(content, local_value), 241 | _ => assert!(false), 242 | } 243 | }, 244 | _ => assert!(false), 245 | } 246 | } 247 | 248 | #[test] 249 | fn read_consistency_latest_all_different() { 250 | // Should succeed 251 | } 252 | 253 | #[test] 254 | fn read_consistency_latest_one_node_available() { 255 | let (handler_addr, shards) = setup_cluster(); 256 | let (local_key, local_value) = key_and_value(); 257 | 258 | // Write to only one node (local_key corresponds to shard 2). 259 | write_to_storage_node(&shards[2][0], &local_key, &local_value, 100000); 260 | 261 | thread::sleep(Duration::from_millis(DELAY*3)); // Wait for storage node to start listening 262 | 263 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 264 | 265 | // Should fail 266 | match client.get(&local_key).await() { 267 | Ok(r) => { 268 | match r.message { 269 | Response::Error {key, ..} => { 270 | assert_eq!(key, local_key); 271 | } 272 | _ => assert!(false), 273 | } 274 | } 275 | Err(_) => assert!(true), 276 | } 277 | } 278 | 279 | #[test] 280 | fn write_consistency_one_all_available() { 281 | let (handler_addr, _) = setup_cluster(); 282 | let (local_key, local_value) = key_and_value(); 283 | 284 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 285 | 286 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 287 | 288 | // Should succeed 289 | let r = client.insert(&local_key, &local_value); 290 | let r = r.await().unwrap(); 291 | match r.message { 292 | Response::WriteAck {key, timestamp} => { 293 | assert_eq!(key, local_key); 294 | assert!(timestamp > 0); 295 | }, 296 | _ => assert!(false), 297 | } 298 | } 299 | 300 | #[test] 301 | fn write_consistency_one_none_available() { 302 | let (handler_addr, _) = setup_bad_cluster(3); 303 | let (local_key, local_value) = key_and_value(); 304 | 305 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 306 | 307 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::One); 308 | 309 | // Should fail 310 | match client.insert(&local_key, &local_value).await() { 311 | Ok(r) => { 312 | match r.message { 313 | Response::Error {key, message} => { 314 | assert_eq!(key, local_key); 315 | assert_eq!("Quorum write could not be accomplished.".to_string(), message); 316 | }, 317 | _ => assert!(false), 318 | } 319 | } 320 | Err(_) => assert!(true), 321 | } 322 | } 323 | 324 | #[test] 325 | fn write_consistency_latest_all_available() { 326 | let (handler_addr, _) = setup_cluster(); 327 | let (local_key, local_value) = key_and_value(); 328 | 329 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 330 | 331 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 332 | 333 | // Should succeed 334 | let r = client.insert(&local_key, &local_value); 335 | let r = r.await().unwrap(); 336 | match r.message { 337 | Response::WriteAck {key, timestamp} => { 338 | assert_eq!(key, local_key); 339 | assert!(timestamp > 0); 340 | }, 341 | _ => assert!(false), 342 | } 343 | } 344 | 345 | #[test] 346 | fn write_consistency_latest_quorum_available() { 347 | let (handler_addr, _) = setup_bad_cluster(1); 348 | let (local_key, local_value) = key_and_value(); 349 | 350 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 351 | 352 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 353 | 354 | // Should succeed 355 | let r = client.insert(&local_key, &local_value); 356 | let r = r.await().unwrap(); 357 | match r.message { 358 | Response::WriteAck {key, timestamp} => { 359 | assert_eq!(key, local_key); 360 | assert!(timestamp > 0); 361 | }, 362 | _ => assert!(false), 363 | } 364 | } 365 | 366 | #[test] 367 | fn write_consistency_latest_one_available() { 368 | let (handler_addr, _) = setup_bad_cluster(2); 369 | let (local_key, local_value) = key_and_value(); 370 | 371 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 372 | 373 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 374 | 375 | // Should fail 376 | match client.insert(&local_key, &local_value).await() { 377 | Ok(r) => { 378 | match r.message { 379 | Response::Error {key, message} => { 380 | assert_eq!(local_key, key); 381 | assert_eq!("Quorum write could not be accomplished.".to_string(), message); 382 | } 383 | _ => assert!(false), 384 | } 385 | } 386 | Err(_) => assert!(true), 387 | } 388 | } 389 | 390 | #[test] 391 | fn write_consistency_latest_none_available() { 392 | let (handler_addr, _) = setup_bad_cluster(3); 393 | let (local_key, local_value) = key_and_value(); 394 | 395 | thread::sleep(Duration::from_millis(DELAY)); // Wait for storage node to start listening 396 | 397 | let client = client::Client::with_consistency(vec![handler_addr], Consistency::Latest); 398 | 399 | // Should fail 400 | match client.insert(&local_key, &local_value).await() { 401 | Ok(r) => { 402 | match r.message { 403 | Response::Error {key, message} => { 404 | assert_eq!(local_key, key); 405 | assert_eq!("Quorum write could not be accomplished.".to_string(), message); 406 | } 407 | _ => assert!(false), 408 | } 409 | } 410 | Err(_) => assert!(true), 411 | } 412 | } 413 | 414 | #[test] 415 | fn single_node() { 416 | let addr = get_storage_node(0, 1); 417 | let (insert_key, _) = key_and_value(); 418 | 419 | { 420 | let content = InternodeRequest::Write { 421 | key: insert_key.to_owned(), 422 | value: Value::Value { 423 | content: vec![1], 424 | timestamp: 10000000, 425 | }, 426 | }; 427 | let addr = &addr.to_owned(); 428 | let r: Future = client::Client::send_to_node(addr, &content); 429 | match r.await().unwrap() { 430 | InternodeResponse::WriteAck {key, timestamp} => { 431 | assert_eq!(key, insert_key); 432 | assert_eq!(timestamp, 10000000); 433 | }, 434 | e => panic!("{:?}", e), 435 | } 436 | } 437 | { 438 | let content = InternodeRequest::Read { 439 | key: insert_key.to_owned(), 440 | }; 441 | let addr = &addr.to_owned(); 442 | let r: Future = client::Client::send_to_node(addr, &content); 443 | let r = r.await(); 444 | match r { 445 | Ok(r) => match r { 446 | InternodeResponse::Value {key, value} => { 447 | assert_eq!(key, insert_key); 448 | match value { 449 | Value::Value {content, timestamp} => { 450 | assert_eq!(&content[..], &[1][..]); 451 | assert_eq!(timestamp, 10000000); 452 | }, 453 | _ => panic!(), 454 | } 455 | }, 456 | _ => panic!(), 457 | }, 458 | Err(_) => panic!(), 459 | } 460 | } 461 | } 462 | --------------------------------------------------------------------------------