├── scripts ├── run-integration-tests.sh ├── fix-rust-dtrace-symbols.sh └── head-to-head-benchmark-runner.sh ├── synchrotron-test ├── Cargo.toml ├── src │ ├── daemons.rs │ └── main.rs └── Cargo.lock ├── .gitignore ├── ci ├── script.sh ├── before_deploy.sh └── install.sh ├── src ├── metrics │ └── utils.rs ├── util │ ├── untyped.rs │ ├── timed.rs │ ├── helpers.rs │ ├── mod.rs │ ├── container.rs │ └── batch.rs ├── protocol │ ├── mod.rs │ ├── errors.rs │ └── redis │ │ └── filtering.rs ├── service │ ├── mod.rs │ ├── errors.rs │ └── pipeline.rs ├── routing │ ├── mod.rs │ ├── fixed.rs │ ├── errors.rs │ └── shadow.rs ├── backend │ ├── hasher │ │ ├── fnv64a.rs │ │ ├── md5.rs │ │ └── mod.rs │ ├── distributor │ │ ├── modulo.rs │ │ ├── random.rs │ │ └── mod.rs │ ├── processor │ │ ├── errors.rs │ │ └── mod.rs │ ├── health.rs │ ├── errors.rs │ ├── message_queue.rs │ ├── pool.rs │ ├── mod.rs │ └── redis.rs ├── conf │ ├── mod.rs │ ├── backend_addr.rs │ └── config.rs ├── errors │ └── mod.rs ├── common.rs ├── main.rs └── listener.rs ├── .rustfmt.toml ├── Justfile ├── config └── synchrotron.json ├── LICENSE ├── .travis.yml ├── Cargo.toml ├── README.md └── CODE_OF_CONDUCT.md /scripts/run-integration-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Gotta have Synchrotron available. 4 | cargo build 5 | 6 | pushd synchrotron-test 7 | cargo test 8 | RESULT=$? 9 | popd 10 | exit $RESULT 11 | -------------------------------------------------------------------------------- /synchrotron-test/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "synchrotron-test" 3 | version = "0.1.0" 4 | authors = ["Toby Lawrence "] 5 | 6 | [dependencies] 7 | tempfile = "^3.0" 8 | redis = "^0.9" 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Profiling. 2 | .flamegraph 3 | out.stacks 4 | profile.svg 5 | 6 | # Normal Rust-y stuff. 7 | /target/ 8 | synchrotron-test/target 9 | **/*.rs.bk 10 | 11 | # Config stuff. 12 | config/synchrotron.dev* 13 | config/synchrotron.local* 14 | -------------------------------------------------------------------------------- /ci/script.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | main() { 4 | cross build --target $TARGET 5 | 6 | if [ ! -z $DISABLE_TESTS ]; then 7 | return 8 | fi 9 | 10 | cross test --target $TARGET 11 | scripts/run-integration-tests.sh 12 | } 13 | 14 | # we don't run the "test phase" when doing deploys 15 | if [ -z $TRAVIS_TAG ]; then 16 | main 17 | fi 18 | -------------------------------------------------------------------------------- /src/metrics/utils.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | 3 | pub struct Timed(pub u64, pub F); 4 | 5 | impl Future for Timed { 6 | type Item = (u64, F::Item); 7 | type Error = F::Error; 8 | 9 | fn poll(&mut self) -> Poll { 10 | let result = try_ready!(self.1.poll()); 11 | Ok(Async::Ready((self.0, result))) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/util/untyped.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | 3 | pub struct Untyped { 4 | inner: F, 5 | } 6 | 7 | impl Untyped { 8 | pub fn new(inner: F) -> Self { Self { inner } } 9 | } 10 | 11 | impl Future for Untyped { 12 | type Error = (); 13 | type Item = (); 14 | 15 | fn poll(&mut self) -> Poll { self.inner.poll().map(|x| x.map(|_| ())).map_err(|_| ()) } 16 | } 17 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | max_width = 120 2 | wrap_comments = true 3 | comment_width = 120 4 | use_try_shorthand = true 5 | merge_imports = true 6 | reorder_imports = true 7 | reorder_modules = true 8 | fn_args_density = "Compressed" 9 | fn_single_line = true 10 | imports_indent = "Block" 11 | match_block_trailing_comma = true 12 | merge_derives = true 13 | force_multiline_blocks = true 14 | normalize_comments = true 15 | reorder_impl_items = true 16 | use_field_init_shorthand = true 17 | license_template_path = "LICENSE" 18 | -------------------------------------------------------------------------------- /src/util/timed.rs: -------------------------------------------------------------------------------- 1 | use futures::prelude::*; 2 | 3 | pub struct Timed { 4 | start: u64, 5 | inner: F, 6 | } 7 | 8 | impl Timed { 9 | pub fn new(inner: F, start: u64) -> Self { Timed { inner, start } } 10 | } 11 | 12 | impl Future for Timed { 13 | type Error = F::Error; 14 | type Item = (u64, F::Item); 15 | 16 | fn poll(&mut self) -> Poll { 17 | let result = try_ready!(self.inner.poll()); 18 | Ok(Async::Ready((self.start, result))) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /scripts/fix-rust-dtrace-symbols.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sed -i -- 's/\$LT\$/\</g' profile.svg 3 | sed -i -- 's/\$GT\$/\>/g' profile.svg 4 | sed -i -- 's/\$RF\$/\&/g' profile.svg 5 | sed -i -- 's/\$u20\$/ /g' profile.svg 6 | sed -i -- 's/\$u27\$/'"'"'/g' profile.svg 7 | sed -i -- 's/\$u5b\$/[/g' profile.svg 8 | sed -i -- 's/\$u5d\$/]/g' profile.svg 9 | sed -i -- 's/\$u7b\$/{/g' profile.svg 10 | sed -i -- 's/\$u7d\$/}/g' profile.svg 11 | sed -i -- 's/\$LP\$/(/g' profile.svg 12 | sed -i -- 's/\$RP\$/)/g' profile.svg 13 | sed -i -- 's/\$C\$/,/g' profile.svg 14 | sed -i -- 's/\.\./::/g' profile.svg 15 | -------------------------------------------------------------------------------- /ci/before_deploy.sh: -------------------------------------------------------------------------------- 1 | # This script takes care of building your crate and packaging it for release 2 | 3 | set -ex 4 | 5 | main() { 6 | local src=$(pwd) \ 7 | stage= 8 | 9 | case $TRAVIS_OS_NAME in 10 | linux) 11 | stage=$(mktemp -d) 12 | ;; 13 | osx) 14 | stage=$(mktemp -d -t tmp) 15 | ;; 16 | esac 17 | 18 | test -f Cargo.lock || cargo generate-lockfile 19 | 20 | cross rustc --bin synchrotron --target $TARGET --release -- -C lto 21 | 22 | cp target/$TARGET/release/synchroton $stage/ 23 | 24 | cd $stage 25 | tar czf $src/$CRATE_NAME-$TRAVIS_TAG-$TARGET.tar.gz * 26 | cd $src 27 | 28 | rm -rf $stage 29 | } 30 | 31 | main 32 | -------------------------------------------------------------------------------- /scripts/head-to-head-benchmark-runner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Test against Synchrotron. 4 | for pipeline in 1 4 8 16 32 64 128; do 5 | for clients in 1 2 4 8 16; do 6 | sleep 1 7 | request_count=$((pipeline*clients*5000)) 8 | output=$(redis-benchmark -p 6380 -t get -n $request_count -c $clients -P $pipeline --csv) 9 | parsed=$(echo "$output" | awk -F'","' '{x=$2; gsub("\"","",x); print x}') 10 | echo "synchrotron $clients clients, $pipeline pipeline: $parsed" 11 | done 12 | done 13 | 14 | # Test against Twemproxy. 15 | for pipeline in 1 4 8 16 32 64 128; do 16 | for clients in 1 2 4 8 16; do 17 | sleep 1 18 | request_count=$((pipeline*clients*5000)) 19 | output=$(redis-benchmark -p 22121 -t get -n $request_count -c $clients -P $pipeline --csv) 20 | parsed=$(echo "$output" | awk -F'","' '{x=$2; gsub("\"","",x); print x}') 21 | echo "twemproxy $clients clients, $pipeline pipeline: $parsed" 22 | done 23 | done 24 | -------------------------------------------------------------------------------- /Justfile: -------------------------------------------------------------------------------- 1 | clean: 2 | cargo clean 3 | 4 | fmt: 5 | cargo fmt --all 6 | 7 | lint: 8 | cargo fmt --all -- --check 9 | 10 | build: 11 | cargo build 12 | 13 | build-release: 14 | cargo build --release 15 | 16 | test: 17 | cargo test 18 | 19 | integration-test: 20 | ./scripts/run-integration-tests.sh 21 | 22 | bench: 23 | cargo bench 24 | 25 | profile: build 26 | sudo dtrace -c './target/debug/synchrotron' -o out.stacks -n 'profile-997 /execname == "synchrotron"/ { @[ustack(100)] = count(); }' 27 | 28 | profile-release: build-release 29 | sudo dtrace -c './target/release/synchrotron' -o out.stacks -n 'profile-997 /execname == "synchrotron"/ { @[ustack(100)] = count(); }' 30 | 31 | profile-svg: 32 | test -d .flamegraph || git clone https://github.com/brendangregg/FlameGraph.git .flamegraph 33 | .flamegraph/stackcollapse.pl out.stacks | .flamegraph/flamegraph.pl > profile.svg 34 | scripts/fix-rust-dtrace-symbols.sh 35 | open profile.svg 36 | -------------------------------------------------------------------------------- /config/synchrotron.json: -------------------------------------------------------------------------------- 1 | { 2 | "logging": { 3 | "level": "info" 4 | }, 5 | "listeners": { 6 | "fixed": { 7 | "protocol": "redis", 8 | "address": "127.0.0.1:6380", 9 | "pools": { 10 | "default": { 11 | "addresses": ["127.0.0.1:6379"] 12 | } 13 | }, 14 | "routing": { 15 | "type": "fixed" 16 | } 17 | }, 18 | "fixed_spread": { 19 | "protocol": "redis", 20 | "address": "127.0.0.1:6381", 21 | "pools": { 22 | "default": { 23 | "addresses": ["127.0.0.1:6382", "127.0.0.1:6383"] 24 | } 25 | }, 26 | "routing": { 27 | "type": "fixed" 28 | } 29 | }, 30 | "shadow": { 31 | "protocol": "redis", 32 | "address": "127.0.0.1:6384", 33 | "pools": { 34 | "default": { 35 | "addresses": ["127.0.0.1:6385"] 36 | }, 37 | "shadow": { 38 | "addresses": ["127.0.0.1:6386"] 39 | } 40 | }, 41 | "routing": { 42 | "type": "shadow" 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | -------------------------------------------------------------------------------- /src/protocol/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | pub mod errors; 21 | pub mod redis; 22 | -------------------------------------------------------------------------------- /src/service/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | mod errors; 21 | mod pipeline; 22 | 23 | pub use self::{errors::PipelineError, pipeline::Pipeline}; 24 | -------------------------------------------------------------------------------- /src/routing/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | mod errors; 21 | pub use self::errors::RouterError; 22 | 23 | mod fixed; 24 | mod shadow; 25 | pub use self::{fixed::FixedRouter, shadow::ShadowRouter}; 26 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | services: docker 3 | 4 | env: 5 | global: 6 | - CRATE_NAME=synchrotron 7 | - REDIS_BIN=/usr/bin/redis-server 8 | 9 | matrix: 10 | include: 11 | - env: TARGET=aarch64-unknown-linux-gnu 12 | - env: TARGET=x86_64-unknown-linux-gnu 13 | - env: 14 | - TARGET=x86_64-apple-darwin 15 | - REDIS_BIN=/usr/local/bin/redis-server 16 | os: osx 17 | 18 | before_install: 19 | - set -e 20 | - rustup self update 21 | - rustup install nightly 22 | - rustup default nightly 23 | - if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then brew install redis; fi 24 | 25 | install: 26 | - sh ci/install.sh 27 | - source ~/.cargo/env || true 28 | 29 | script: 30 | - bash ci/script.sh 31 | 32 | after_script: set +e 33 | 34 | before_deploy: 35 | - sh ci/before_deploy.sh 36 | 37 | deploy: 38 | api_key: 39 | secure: 40 | file_glob: true 41 | file: $CRATE_NAME-$TRAVIS_TAG-$TARGET.* 42 | on: 43 | condition: $TRAVIS_RUST_VERSION = nightly 44 | tags: true 45 | provider: releases 46 | skip_cleanup: true 47 | 48 | cache: cargo 49 | before_cache: 50 | # Travis can't cache files that are not readable by "others" 51 | - chmod -R a+r $HOME/.cargo 52 | 53 | branches: 54 | only: 55 | # release tags 56 | - /^v\d+\.\d+\.\d+.*$/ 57 | - master 58 | 59 | notifications: 60 | email: 61 | on_success: never 62 | -------------------------------------------------------------------------------- /ci/install.sh: -------------------------------------------------------------------------------- 1 | set -ex 2 | 3 | main() { 4 | local target= 5 | if [ $TRAVIS_OS_NAME = linux ]; then 6 | target=x86_64-unknown-linux-musl 7 | sort=sort 8 | else 9 | target=x86_64-apple-darwin 10 | sort=gsort # for `sort --sort-version`, from brew's coreutils. 11 | fi 12 | 13 | # Builds for iOS are done on OSX, but require the specific target to be 14 | # installed. 15 | case $TARGET in 16 | aarch64-apple-ios) 17 | rustup target install aarch64-apple-ios 18 | ;; 19 | armv7-apple-ios) 20 | rustup target install armv7-apple-ios 21 | ;; 22 | armv7s-apple-ios) 23 | rustup target install armv7s-apple-ios 24 | ;; 25 | i386-apple-ios) 26 | rustup target install i386-apple-ios 27 | ;; 28 | x86_64-apple-ios) 29 | rustup target install x86_64-apple-ios 30 | ;; 31 | esac 32 | 33 | # This fetches latest stable release 34 | local tag=$(git ls-remote --tags --refs --exit-code https://github.com/japaric/cross \ 35 | | cut -d/ -f3 \ 36 | | grep -E '^v[0.1.0-9.]+$' \ 37 | | $sort --version-sort \ 38 | | tail -n1) 39 | curl -LSfs https://japaric.github.io/trust/install.sh | \ 40 | sh -s -- \ 41 | --force \ 42 | --git japaric/cross \ 43 | --tag $tag \ 44 | --target $target 45 | } 46 | 47 | main 48 | -------------------------------------------------------------------------------- /src/backend/hasher/fnv64a.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use pruefung::fnv::fnv64::Fnv64a; 21 | use std::hash::Hasher; 22 | 23 | use super::KeyHasher; 24 | 25 | pub struct Fnv64aHasher; 26 | 27 | impl Fnv64aHasher { 28 | pub fn new() -> Fnv64aHasher { Fnv64aHasher {} } 29 | } 30 | 31 | impl KeyHasher for Fnv64aHasher { 32 | fn hash(&self, buf: &[u8]) -> u64 { 33 | let mut hasher = Fnv64a::default(); 34 | hasher.write(buf); 35 | hasher.finish() 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "synchrotron" 3 | edition = "2018" 4 | version = "0.1.0" 5 | authors = ["Toby Lawrence "] 6 | edition = "2018" 7 | 8 | [profile.release] 9 | debug = true 10 | lto = true 11 | opt-level = 3 12 | 13 | [profile.bench] 14 | debug = true 15 | opt-level = 3 16 | 17 | [dependencies] 18 | lazy_static = "^1.2" 19 | phf = { version = "^0.7", features = ["macros"] } 20 | derivative = "^1.0" 21 | log = { version = "^0.4", features = ["max_level_trace", "release_max_level_info"] } 22 | slog = "^2.4" 23 | slog-async = "^2.3" 24 | slog-scope = "^4.0" 25 | slog-stdlog = "^3.0" 26 | slog-term = "^2.4" 27 | serde = "^1.0" 28 | serde_derive = "^1.0" 29 | tokio = { version = "^0.1", features = ["io", "sync", "tcp", "timer"] } 30 | tokio-executor = "^0.1" 31 | tokio-io-pool = "^0.1" 32 | futures = "^0.1" 33 | net2 = "^0.2" 34 | libc = "^0.2" 35 | signal-hook = "^0.1" 36 | futures-turnstyle = "^3.0" 37 | bytes = "^0.4" 38 | btoi = "^0.4" 39 | itoa = "^0.4" 40 | rand = "^0.6" 41 | rust-crypto = "^0.2" 42 | pruefung = "^0.2" 43 | fnv = "^1.0" 44 | slab = "^0.4" 45 | tokio-evacuate = "^1.1" 46 | warp = "^0.1" 47 | tower = { git = "https://github.com/nuclearfurnace/tower" } 48 | tower-service = { git = "https://github.com/nuclearfurnace/tower" } 49 | tower-direct-service = { git = "https://github.com/nuclearfurnace/tower" } 50 | tower-buffer = { git = "https://github.com/nuclearfurnace/tower" } 51 | metrics = { path = "../metrics/metrics" } 52 | metrics-runtime = { path = "../metrics/metrics-runtime" } 53 | 54 | [dependencies.config] 55 | version = "^0.9" 56 | default-features = false 57 | features = ["json"] 58 | 59 | [dev-dependencies] 60 | spectral = "^0.6" 61 | matches = "^0.1" 62 | -------------------------------------------------------------------------------- /src/backend/hasher/md5.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crypto::{digest::Digest, md5::Md5}; 21 | 22 | use super::KeyHasher; 23 | 24 | pub struct MD5Hasher; 25 | 26 | impl MD5Hasher { 27 | pub fn new() -> MD5Hasher { MD5Hasher {} } 28 | } 29 | 30 | impl KeyHasher for MD5Hasher { 31 | fn hash(&self, buf: &[u8]) -> u64 { 32 | let mut hasher = Md5::new(); 33 | hasher.input(buf); 34 | 35 | let mut result = [0; 16]; 36 | hasher.result(&mut result); 37 | 38 | u64::from( 39 | (u32::from(result[3]) << 24) 40 | + (u32::from(result[2]) << 16) 41 | + (u32::from(result[1]) << 8) 42 | + u32::from(result[0]), 43 | ) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/backend/hasher/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | mod fnv64a; 21 | mod md5; 22 | pub use self::{fnv64a::Fnv64aHasher, md5::MD5Hasher}; 23 | use crate::errors::CreationError; 24 | 25 | /// Basic hashing capabilities. 26 | /// 27 | /// The hash output is a 64-bit integer so that it can be used with mapping hashed keys to specific 28 | /// backend servers by index. 29 | pub trait KeyHasher { 30 | fn hash(&self, buf: &[u8]) -> u64; 31 | } 32 | 33 | pub fn configure_hasher(hash_type: &str) -> Result, CreationError> { 34 | match hash_type { 35 | "md5" => Ok(Box::new(MD5Hasher::new())), 36 | "fnv1a_64" => Ok(Box::new(Fnv64aHasher::new())), 37 | s => Err(CreationError::InvalidResource(format!("unknown hash type {}", s))), 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/util/helpers.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::protocol::errors::ProtocolError; 21 | use futures::prelude::*; 22 | use tokio::net::tcp::TcpStream; 23 | 24 | /// Wraps any future that does protocol operations and hands back a TCP stream. 25 | pub struct ProcessFuture { 26 | inner: Box + Send + 'static>, 27 | } 28 | 29 | impl ProcessFuture { 30 | pub fn new(inner: F) -> ProcessFuture 31 | where 32 | F: Future + Send + 'static, 33 | { 34 | ProcessFuture { inner: Box::new(inner) } 35 | } 36 | } 37 | 38 | impl Future for ProcessFuture { 39 | type Error = ProtocolError; 40 | type Item = TcpStream; 41 | 42 | fn poll(&mut self) -> Poll { self.inner.poll() } 43 | } 44 | -------------------------------------------------------------------------------- /src/conf/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use slog::Level; 21 | 22 | mod config; 23 | pub use self::config::{Configuration, ListenerConfiguration, LoggingConfiguration, PoolConfiguration}; 24 | 25 | mod backend_addr; 26 | pub use self::backend_addr::BackendAddress; 27 | 28 | pub trait LevelExt { 29 | fn from_str(_: &str) -> Level; 30 | } 31 | 32 | impl LevelExt for Level { 33 | fn from_str(raw: &str) -> Level { 34 | match raw.to_string().to_lowercase().as_str() { 35 | "trace" => Level::Trace, 36 | "debug" => Level::Debug, 37 | "info" => Level::Info, 38 | "warn" => Level::Warning, 39 | "error" => Level::Error, 40 | "crit" => Level::Critical, 41 | "critical" => Level::Critical, 42 | _ => Level::Debug, 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /src/backend/distributor/modulo.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use super::{BackendDescriptor, Distributor}; 21 | 22 | /// Provides a modulo'd distribution of requests. 23 | pub struct ModuloDistributor { 24 | backend_count: usize, 25 | backends: Vec, 26 | } 27 | 28 | impl ModuloDistributor { 29 | pub fn new() -> ModuloDistributor { 30 | ModuloDistributor { 31 | backend_count: 0, 32 | backends: Vec::new(), 33 | } 34 | } 35 | } 36 | 37 | impl Distributor for ModuloDistributor { 38 | fn update(&mut self, backends: Vec) { 39 | self.backends = backends; 40 | self.backend_count = self.backends.len(); 41 | } 42 | 43 | fn choose(&self, point: u64) -> usize { 44 | let idx = point as usize % self.backend_count; 45 | self.backends[idx].idx 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/backend/distributor/random.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use super::{BackendDescriptor, Distributor}; 21 | use rand::{thread_rng, Rng}; 22 | 23 | /// Provides a randomized distribution of requests. 24 | pub struct RandomDistributor { 25 | backend_count: usize, 26 | backends: Vec, 27 | } 28 | 29 | impl RandomDistributor { 30 | pub fn new() -> RandomDistributor { 31 | RandomDistributor { 32 | backend_count: 0, 33 | backends: Vec::new(), 34 | } 35 | } 36 | } 37 | 38 | impl Distributor for RandomDistributor { 39 | fn update(&mut self, backends: Vec) { 40 | self.backends = backends; 41 | self.backend_count = self.backends.len(); 42 | } 43 | 44 | fn choose(&self, _point: u64) -> usize { 45 | let mut rng = thread_rng(); 46 | let idx = rng.gen_range(0, self.backend_count); 47 | self.backends[idx].idx 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/errors/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use std::fmt; 21 | 22 | #[derive(Debug)] 23 | pub enum CreationError { 24 | /// An invalid parameter was supplied, usually sourced from configuration. 25 | InvalidParameter(String), 26 | 27 | /// An invalid resource was requested, usually a configuration value pointing to a non-existent 28 | /// type or function. 29 | InvalidResource(String), 30 | 31 | /// When a listener fails to get created during the launch/reload phase. 32 | ListenerSpawnFailed, 33 | } 34 | 35 | impl fmt::Display for CreationError { 36 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 37 | match self { 38 | CreationError::InvalidParameter(param) => write!(f, "invalid parameter: {}", param.as_str()), 39 | CreationError::InvalidResource(s) => write!(f, "invalid resource: {}", s.as_str()), 40 | CreationError::ListenerSpawnFailed => write!(f, "listener spawn failed"), 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/backend/distributor/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | mod modulo; 21 | mod random; 22 | pub use self::{modulo::ModuloDistributor, random::RandomDistributor}; 23 | use crate::errors::CreationError; 24 | 25 | /// A placeholder for backends. This lets us avoid holding references to the actual backends. 26 | pub struct BackendDescriptor { 27 | pub idx: usize, 28 | pub identifier: String, 29 | pub healthy: bool, 30 | } 31 | 32 | /// Distributes items amongst a set of backends. 33 | pub trait Distributor { 34 | fn update(&mut self, backends: Vec); 35 | 36 | /// Chooses a backend based on the given point. 37 | fn choose(&self, point: u64) -> usize; 38 | } 39 | 40 | pub fn configure_distributor(dist_type: &str) -> Result, CreationError> { 41 | match dist_type { 42 | "random" => Ok(Box::new(RandomDistributor::new())), 43 | "modulo" => Ok(Box::new(ModuloDistributor::new())), 44 | s => { 45 | Err(CreationError::InvalidResource(format!( 46 | "unknown distributor type {}", 47 | s 48 | ))) 49 | }, 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/backend/processor/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use std::{error, fmt, io}; 21 | 22 | #[derive(Debug)] 23 | pub enum ProcessorError { 24 | FragmentError(String), 25 | DefragmentError(String), 26 | } 27 | 28 | impl Into for ProcessorError { 29 | fn into(self) -> io::Error { 30 | let desc = match self { 31 | ProcessorError::FragmentError(s) => s, 32 | ProcessorError::DefragmentError(s) => s, 33 | }; 34 | 35 | io::Error::new(io::ErrorKind::Other, desc) 36 | } 37 | } 38 | 39 | impl error::Error for ProcessorError { 40 | fn description(&self) -> &str { 41 | match self { 42 | ProcessorError::FragmentError(s) => s.as_str(), 43 | ProcessorError::DefragmentError(s) => s.as_str(), 44 | } 45 | } 46 | 47 | fn cause(&self) -> Option<&error::Error> { None } 48 | } 49 | 50 | impl fmt::Display for ProcessorError { 51 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 52 | match self { 53 | ProcessorError::FragmentError(s) => write!(f, "fragment error: {}", s.as_str()), 54 | ProcessorError::DefragmentError(s) => write!(f, "defragment error: {}", s.as_str()), 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/conf/backend_addr.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use serde::de::{Deserialize, Deserializer, Error}; 21 | use std::{fmt, net::SocketAddr}; 22 | 23 | #[derive(Debug, Clone)] 24 | pub struct BackendAddress { 25 | pub address: SocketAddr, 26 | pub identifier: String, 27 | } 28 | 29 | impl fmt::Display for BackendAddress { 30 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}({})", self.address, self.identifier) } 31 | } 32 | 33 | impl<'de> Deserialize<'de> for BackendAddress { 34 | fn deserialize(deserializer: D) -> Result 35 | where 36 | D: Deserializer<'de>, 37 | { 38 | let s = String::deserialize(deserializer)?; 39 | let mut parts = s.split(" "); 40 | 41 | let address = parts 42 | .next() 43 | .ok_or(D::Error::custom("missing address"))? 44 | .parse::() 45 | .map_err(D::Error::custom)?; 46 | let identifier = parts 47 | .next() 48 | .map(|s| s.to_string()) 49 | .unwrap_or_else(|| address.to_string().clone()); 50 | 51 | if parts.next() != None { 52 | return Err(D::Error::custom("unexpected element")); 53 | } 54 | 55 | Ok(BackendAddress { address, identifier }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/routing/fixed.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::processor::Processor, 22 | common::{AssignedRequests, EnqueuedRequest, EnqueuedRequests, Message}, 23 | }; 24 | use futures::prelude::*; 25 | use tower_service::Service; 26 | 27 | #[derive(Clone)] 28 | pub struct FixedRouter 29 | where 30 | P: Processor + Clone + Send + 'static, 31 | P::Message: Message + Send, 32 | S: Service> + Clone, 33 | { 34 | processor: P, 35 | inner: S, 36 | } 37 | 38 | impl FixedRouter 39 | where 40 | P: Processor + Clone + Send + 'static, 41 | P::Message: Message + Send, 42 | S: Service> + Clone, 43 | { 44 | pub fn new(processor: P, inner: S) -> FixedRouter { FixedRouter { processor, inner } } 45 | } 46 | 47 | impl Service> for FixedRouter 48 | where 49 | P: Processor + Clone + Send + 'static, 50 | P::Message: Message + Send, 51 | S: Service> + Clone, 52 | { 53 | type Error = S::Error; 54 | type Future = S::Future; 55 | type Response = S::Response; 56 | 57 | fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.inner.poll_ready() } 58 | 59 | fn call(&mut self, req: AssignedRequests) -> Self::Future { 60 | let transformed = req.into_iter().map(|(id, msg)| EnqueuedRequest::new(id, msg)).collect(); 61 | self.inner.call(transformed) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # synchrotron 2 | 3 | [![conduct-badge][]][conduct] [![travis-badge][]][travis] [![release-badge][]][releases] [![license-badge][]](#license) 4 | 5 | [conduct-badge]: https://img.shields.io/badge/%E2%9D%A4-code%20of%20conduct-blue.svg 6 | [travis-badge]: https://img.shields.io/travis/nuclearfurnace/synchrotron/master.svg 7 | [release-badge]: https://img.shields.io/github/release-date/nuclearfurnace/synchrotron.svg 8 | [license-badge]: https://img.shields.io/badge/License-MIT-green.svg 9 | [conduct]: https://github.com/nuclearfurnace/synchrotron/blob/master/CODE_OF_CONDUCT.md 10 | [releases]: https://github.com/nuclearfurnace/synchrotron/releases 11 | [travis]: https://travis-ci.org/nuclearfurnace/synchrotron 12 | 13 | Synchrotron is a caching layer load balancer, in the spirit of [Twemproxy](https://github.com/twitter/twemproxy) and [mcrouter](https://github.com/facebook/mcrouter). 14 | 15 | # archived! 16 | 17 | After going another direction at work, this project is now archived! My hope is that some of the design can act as an inspiration, although given the heavy `futures@0.1` basis of the project, it may be less relevant in the async/await world than I think. 18 | 19 | # Why another one? 20 | 21 | There's a few things here: 22 | - I wanted to write a real piece of software in Rust, not just toy programs! 23 | - Twemproxy is basically deprecated 24 | - mcrouter is advanced but only supports memcached 25 | 26 | Essentially, this project aims to be a mix of Twemproxy and mcrouter: memcached _and_ Redis support with advanced features like traffic shadowing, pool warm up, and online reconfiguration... while being written in Rust: a systems programming language whose community, IMO, is second to none. 27 | 28 | # What's done? 29 | 30 | Here is a non-exhaustive checklist of what's done and what is a serious target: 31 | 32 | - [x] Redis support 33 | - [ ] memcached support 34 | - [x] Redis pipelining support 35 | - [x] basic connection multiplexing (M client conns over N server conns; configurable server connection limit) 36 | - [x] advanced connection multiplexing (server backoff after failure, timeout on backend operations, etc) 37 | - [x] basic routing strategies (single pool, traffic shadowing)\* 38 | - [ ] advanced routing strategies (warm up [cold before warm], prefix routing, fallthrough, majority, fastest response) 39 | - [x] distribution (modulo vs ketama) and hashing (md5 vs sha vs fnv1a) support\* 40 | - [x] online reconfiguration 41 | - [x] metrics collection\* 42 | - [ ] TLS support 43 | 44 | * - while the scaffolding is present, all options may not be i.e. not all hash methods may be implemented, etc 45 | 46 | ## License 47 | 48 | Licensed under the MIT license ([LICENSE](LICENSE) or http://opensource.org/licenses/MIT) 49 | -------------------------------------------------------------------------------- /src/routing/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::backend::processor::ProcessorError; 21 | use std::{error, fmt, io}; 22 | 23 | #[derive(Debug)] 24 | pub enum RouterError { 25 | BadRequest(String), 26 | BadResponse(String), 27 | } 28 | 29 | impl RouterError { 30 | pub fn from_processor(e: &ProcessorError) -> Self { 31 | let desc = e.to_string(); 32 | match e { 33 | ProcessorError::FragmentError(_) => RouterError::BadRequest(desc), 34 | ProcessorError::DefragmentError(_) => RouterError::BadResponse(desc), 35 | } 36 | } 37 | } 38 | 39 | impl From for RouterError { 40 | fn from(e: ProcessorError) -> Self { RouterError::from_processor(&e) } 41 | } 42 | 43 | impl Into for RouterError { 44 | fn into(self) -> io::Error { 45 | let desc = match self { 46 | RouterError::BadRequest(s) => s, 47 | RouterError::BadResponse(s) => s, 48 | }; 49 | 50 | io::Error::new(io::ErrorKind::Other, desc) 51 | } 52 | } 53 | 54 | impl error::Error for RouterError { 55 | fn description(&self) -> &str { 56 | match self { 57 | RouterError::BadRequest(s) => s.as_str(), 58 | RouterError::BadResponse(s) => s.as_str(), 59 | } 60 | } 61 | 62 | fn cause(&self) -> Option<&error::Error> { None } 63 | } 64 | 65 | impl fmt::Display for RouterError { 66 | fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { 67 | match self { 68 | RouterError::BadRequest(s) => write!(f, "bad request: {}", s.as_str()), 69 | RouterError::BadResponse(s) => write!(f, "bad response: {}", s.as_str()), 70 | } 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /src/util/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use futures::{future::Future, stream::Stream}; 21 | 22 | mod batch; 23 | mod timed; 24 | mod untyped; 25 | pub use self::{batch::Batch, timed::Timed, untyped::Untyped}; 26 | 27 | mod helpers; 28 | pub use self::helpers::ProcessFuture; 29 | 30 | mod container; 31 | pub use self::container::IntegerMappedVec; 32 | 33 | impl StreamExt for T where T: Stream {} 34 | 35 | /// An extension trait for `Stream`s that provides necessary combinators specific to synchrotron. 36 | pub trait StreamExt: Stream { 37 | /// Converts this stream into a batched stream. 38 | /// 39 | /// Items from the underlying stream will be batched, up to `capacity`, and returned as a 40 | /// `Vec`. 41 | /// 42 | /// Unlike standard combinators that perform grouping, batches are collected opportunistically. 43 | /// The combinator will take up to `capacity` items from the underlying stream in a single 44 | /// `poll`, but will return the currently batched items -- if any have been batched since the 45 | /// last batch was emitted -- either when capacity is reached or the underlying stream reports 46 | /// that it is no longer ready. 47 | /// 48 | /// If the underlying stream signals that it is not ready, and no items have been batched, then 49 | /// the stream will emit nothing. 50 | fn batch(self, capacity: usize) -> Batch 51 | where 52 | Self: Sized, 53 | Self::Item: Sizable, 54 | { 55 | Batch::new(self, capacity) 56 | } 57 | } 58 | 59 | impl FutureExt for T where T: Future {} 60 | 61 | pub trait FutureExt: Future { 62 | fn timed(self, start: u64) -> Timed 63 | where 64 | Self: Sized, 65 | { 66 | Timed::new(self, start) 67 | } 68 | 69 | fn untyped(self) -> Untyped 70 | where 71 | Self: Sized, 72 | { 73 | Untyped::new(self) 74 | } 75 | } 76 | 77 | pub trait Sizable { 78 | fn size(&self) -> usize; 79 | } 80 | -------------------------------------------------------------------------------- /src/protocol/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use std::{error, fmt, io}; 21 | use tokio::sync::oneshot; 22 | 23 | #[derive(Debug)] 24 | pub enum ProtocolError { 25 | IoError(io::Error), 26 | InvalidProtocol, 27 | BackendClosedPrematurely, 28 | } 29 | 30 | impl ProtocolError { 31 | pub fn client_closed(&self) -> bool { 32 | match self { 33 | ProtocolError::IoError(e) => { 34 | match e.kind() { 35 | io::ErrorKind::ConnectionReset => true, 36 | _ => false, 37 | } 38 | }, 39 | _ => false, 40 | } 41 | } 42 | } 43 | 44 | impl error::Error for ProtocolError { 45 | fn description(&self) -> &str { 46 | match *self { 47 | ProtocolError::IoError(ref e) => e.description(), 48 | ProtocolError::InvalidProtocol => "invalid protocol", 49 | ProtocolError::BackendClosedPrematurely => "backend closed prematurely", 50 | } 51 | } 52 | 53 | fn cause(&self) -> Option<&error::Error> { None } 54 | } 55 | 56 | impl fmt::Display for ProtocolError { 57 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 58 | match *self { 59 | ProtocolError::IoError(ref ie) => fmt::Display::fmt(ie, f), 60 | ProtocolError::InvalidProtocol => write!(f, "invalid protocol"), 61 | ProtocolError::BackendClosedPrematurely => write!(f, "backend closed prematurely"), 62 | } 63 | } 64 | } 65 | 66 | impl From for ProtocolError { 67 | fn from(e: io::Error) -> ProtocolError { ProtocolError::IoError(e) } 68 | } 69 | 70 | impl From for ProtocolError { 71 | fn from(_: oneshot::error::RecvError) -> ProtocolError { 72 | // It's not the most descriptive, but we really only get receiver errors when 73 | // we fail to finish a batch to respond back with either the value or the error 74 | // from the backend, so it sort of fits. 75 | ProtocolError::BackendClosedPrematurely 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /src/conf/config.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use super::BackendAddress; 21 | use config::{Config, ConfigError, File}; 22 | use std::{collections::HashMap, env}; 23 | 24 | #[derive(Deserialize, Default, Clone, Debug)] 25 | pub struct Configuration { 26 | pub stats_addr: String, 27 | pub logging: LoggingConfiguration, 28 | pub listeners: HashMap, 29 | } 30 | 31 | #[derive(Deserialize, Default, Clone, Debug)] 32 | pub struct LoggingConfiguration { 33 | pub level: String, 34 | } 35 | 36 | #[derive(Deserialize, Default, Clone, Debug)] 37 | pub struct ListenerConfiguration { 38 | pub protocol: String, 39 | pub address: String, 40 | pub reload_timeout_ms: Option, 41 | pub pools: HashMap, 42 | pub routing: HashMap, 43 | } 44 | 45 | #[derive(Deserialize, Default, Clone, Debug)] 46 | pub struct PoolConfiguration { 47 | pub addresses: Vec, 48 | pub options: Option>, 49 | } 50 | 51 | impl Configuration { 52 | pub fn new() -> Result { 53 | let mut s = Config::new(); 54 | 55 | // TODO: the hierarchy stuff doesn't work IIRC. re-examine that and figure out if my 56 | // memory is just shit or if it actually doesn't work. 57 | 58 | // Set some defaults. 59 | s.set_default("logging.level", "info")?; 60 | // how tf do we make this work? 61 | // s.set_default("listeners", Vec::::new())?; 62 | s.set_default("stats_addr", "0.0.0.0:16161")?; 63 | 64 | // Now load in any configuration files we can find. 65 | s.merge(File::with_name("config/synchrotron").required(false))?; 66 | 67 | let env = env::var("ENV").unwrap_or_else(|_| "dev".into()); 68 | s.merge(File::with_name(&format!("config/synchrotron.{}", env)).required(false))?; 69 | s.merge(File::with_name("config/synchrotron.local").required(false))?; 70 | 71 | let conf_override = env::var("SYNC_CONFIG").ok(); 72 | if let Some(path) = conf_override { 73 | s.merge(File::with_name(path.as_str()).required(false))?; 74 | } 75 | 76 | s.try_into() 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # The Code of Conduct 2 | 3 | This document is based on the [Rust Code of Conduct](https://www.rust-lang.org/conduct.html) and outlines the standard of conduct which is both expected and enforced as part of this project. 4 | 5 | ## Conduct 6 | 7 | **Contact**: [toby@nuclearfurnace.com](mailto:toby@nuclearfurnace.com) 8 | 9 | * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. 10 | * Avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. 11 | * Please be kind and courteous. There's no need to be mean or rude. 12 | * Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. 13 | * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. 14 | * We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term "harassment" as including the definition in the Citizen Code of Conduct; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups. 15 | * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the repository Owners immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back. 16 | * Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome. 17 | 18 | ## Moderation 19 | 20 | These are the policies for upholding our community's standards of conduct. If you feel that a thread needs moderation, please use the contact information above, or mention @tobz in the thread. 21 | 22 | 1. Remarks that violate this Code of Conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) 23 | 2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. 24 | 25 | In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. 26 | 27 | And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. 28 | -------------------------------------------------------------------------------- /src/service/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::backend::processor::ProcessorError; 21 | use futures::prelude::*; 22 | use std::fmt; 23 | use tower_service::Service; 24 | 25 | /// Error type for `Pipeline`. 26 | pub enum PipelineError 27 | where 28 | T: Sink + Stream, 29 | S: Service, 30 | { 31 | /// The underlying transport failed to produce a request. 32 | TransportReceive(::Error), 33 | 34 | /// The underlying transport failed while attempting to send a response. 35 | TransportSend(::SinkError), 36 | 37 | /// The underlying service failed to process a request. 38 | Service(S::Error), 39 | } 40 | 41 | impl fmt::Display for PipelineError 42 | where 43 | T: Sink + Stream, 44 | ::SinkError: fmt::Display, 45 | ::Error: fmt::Display, 46 | S: Service, 47 | >::Error: fmt::Display, 48 | { 49 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 50 | match *self { 51 | PipelineError::TransportReceive(ref se) => fmt::Display::fmt(se, f), 52 | PipelineError::TransportSend(ref se) => fmt::Display::fmt(se, f), 53 | PipelineError::Service(ref se) => fmt::Display::fmt(se, f), 54 | } 55 | } 56 | } 57 | 58 | impl fmt::Debug for PipelineError 59 | where 60 | T: Sink + Stream, 61 | ::SinkError: fmt::Debug, 62 | ::Error: fmt::Debug, 63 | S: Service, 64 | >::Error: fmt::Debug, 65 | { 66 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 67 | match *self { 68 | PipelineError::TransportReceive(ref se) => write!(f, "TransportRecv({:?})", se), 69 | PipelineError::TransportSend(ref se) => write!(f, "TransportSend({:?})", se), 70 | PipelineError::Service(ref se) => write!(f, "Service({:?})", se), 71 | } 72 | } 73 | } 74 | 75 | impl PipelineError 76 | where 77 | T: Sink + Stream, 78 | S: Service, 79 | { 80 | pub fn from_sink_error(e: ::SinkError) -> Self { PipelineError::TransportSend(e) } 81 | 82 | pub fn from_stream_error(e: ::Error) -> Self { PipelineError::TransportReceive(e) } 83 | 84 | pub fn from_service_error(e: >::Error) -> Self { PipelineError::Service(e) } 85 | } 86 | 87 | impl From for PipelineError 88 | where 89 | T: Sink + Stream, 90 | S: Service, 91 | { 92 | fn from(e: ProcessorError) -> PipelineError { e.into() } 93 | } 94 | -------------------------------------------------------------------------------- /src/backend/processor/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | mod errors; 21 | pub use self::errors::ProcessorError; 22 | 23 | use crate::{ 24 | backend::message_queue::MessageState, 25 | common::{EnqueuedRequests, Message}, 26 | protocol::errors::ProtocolError, 27 | util::ProcessFuture, 28 | }; 29 | use futures::future::{Either, FutureResult}; 30 | use std::{error::Error, net::SocketAddr}; 31 | use tokio::net::tcp::TcpStream; 32 | 33 | /// An existing or pending TcpStream. 34 | pub type TcpStreamFuture = Either, ProcessFuture>; 35 | 36 | /// Cache-specific logic for processing requests and interacting with backends. 37 | pub trait Processor 38 | where 39 | Self::Message: Message + Clone, 40 | { 41 | type Message; 42 | type Transport; 43 | 44 | /// Fragments a client's requests into, potentially, multiple subrequests. 45 | /// 46 | /// This allows multi-operation requests -- multi-key lookups, etc -- to be sharded to the 47 | /// correct backend server when routed. 48 | fn fragment_messages(&self, _: Vec) -> Result, ProcessorError>; 49 | 50 | /// Defragments a client's subrequests into a single request. 51 | /// 52 | /// This is used to do any coalesing necessary to assemble multiple subrequests -- generated by 53 | /// `fragment_messages` -- back into a cohesive response that the client will understand. 54 | fn defragment_messages(&self, _: Vec<(MessageState, Self::Message)>) -> Result; 55 | 56 | /// Converts the given error into a corresponding format that can be sent to the client. 57 | fn get_error_message(&self, _: Box) -> Self::Message; 58 | 59 | /// Converts the given error string into a corresponding format the can be sent to the client. 60 | fn get_error_message_str(&self, _: &str) -> Self::Message; 61 | 62 | /// Wraps the given TCP stream with a protocol-specific transport layer, allowing the caller to 63 | /// extract protocol-specific messages, as well as send them, via the `Stream` and `Sink` 64 | /// implementations. 65 | fn get_transport(&self, _: TcpStream) -> Self::Transport; 66 | 67 | /// Connects to the given address via TCP and performs any necessary processor-specific 68 | /// initialization. 69 | fn preconnect(&self, _: &SocketAddr, _: bool) -> ProcessFuture; 70 | 71 | /// Processes a batch of requests, running the necessary operations against the given TCP 72 | /// stream. 73 | fn process(&self, _: EnqueuedRequests, _: TcpStreamFuture) -> ProcessFuture; 74 | } 75 | -------------------------------------------------------------------------------- /src/backend/health.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::util::FutureExt; 21 | use futures::{future::ok, task, Future}; 22 | use std::time::{Duration, Instant}; 23 | use tokio::timer::Delay; 24 | 25 | pub struct BackendHealth { 26 | cooloff_enabled: bool, 27 | cooloff_period_ms: u64, 28 | error_limit: usize, 29 | error_count: usize, 30 | in_cooloff: bool, 31 | epoch: u64, 32 | cooloff_done_at: Instant, 33 | } 34 | 35 | impl BackendHealth { 36 | pub fn new(cooloff_enabled: bool, cooloff_period_ms: u64, error_limit: usize) -> BackendHealth { 37 | debug!( 38 | "[backend health] cooloff enabled: {}, cooloff period (ms): {}, error limit: {}", 39 | cooloff_enabled, cooloff_period_ms, error_limit 40 | ); 41 | 42 | BackendHealth { 43 | cooloff_enabled, 44 | cooloff_period_ms, 45 | error_limit, 46 | error_count: 0, 47 | in_cooloff: false, 48 | epoch: 0, 49 | cooloff_done_at: Instant::now(), 50 | } 51 | } 52 | 53 | pub fn is_healthy(&mut self) -> bool { 54 | if !self.cooloff_enabled || !self.in_cooloff { 55 | return true; 56 | } 57 | 58 | if self.cooloff_done_at < Instant::now() { 59 | self.error_count = 0; 60 | self.in_cooloff = false; 61 | self.epoch += 1; 62 | 63 | return true; 64 | } 65 | 66 | false 67 | } 68 | 69 | pub fn epoch(&self) -> u64 { self.epoch } 70 | 71 | pub fn increment_error(&mut self) { 72 | if !self.cooloff_enabled { 73 | return; 74 | } 75 | 76 | self.error_count += 1; 77 | 78 | // If we're over the error threshold, put ourselves into cooloff. 79 | if self.error_count >= self.error_limit && !self.in_cooloff { 80 | debug!("[health] error count over limit, setting cooloff"); 81 | self.in_cooloff = true; 82 | self.epoch += 1; 83 | self.fire_cooloff_check(); 84 | } 85 | } 86 | 87 | fn fire_cooloff_check(&mut self) { 88 | // Mark when our cooloff period should be lifted, and trigger a task notification to fire 89 | // once that deadline has passed: our health will be checked, and thus we can reenable 90 | // ourselves. 91 | let deadline = Instant::now() + Duration::from_millis(self.cooloff_period_ms); 92 | self.cooloff_done_at = deadline; 93 | 94 | let current_task = task::current(); 95 | let task = Delay::new(deadline) 96 | .then(move |_| { 97 | debug!("[health] resetting cooloff"); 98 | current_task.notify(); 99 | ok::<_, ()>(()) 100 | }) 101 | .untyped(); 102 | 103 | tokio::spawn(task); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /src/backend/errors.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{backend::processor::ProcessorError, protocol::errors::ProtocolError}; 21 | use std::{ 22 | error::{self, Error}, 23 | fmt, io, 24 | }; 25 | use tokio::sync::oneshot; 26 | 27 | #[derive(Debug)] 28 | pub enum BackendError { 29 | Internal(String), 30 | Protocol(ProtocolError), 31 | Io(io::Error), 32 | } 33 | 34 | #[derive(Debug)] 35 | pub enum PoolError { 36 | Internal(String), 37 | Backend(BackendError), 38 | } 39 | 40 | impl From for BackendError { 41 | fn from(e: ProcessorError) -> Self { 42 | let desc = e.to_string(); 43 | match e { 44 | ProcessorError::FragmentError(_) => BackendError::Internal(desc), 45 | ProcessorError::DefragmentError(_) => BackendError::Internal(desc), 46 | } 47 | } 48 | } 49 | 50 | impl From for BackendError { 51 | fn from(e: ProtocolError) -> Self { BackendError::Protocol(e) } 52 | } 53 | 54 | impl Into for BackendError { 55 | fn into(self) -> io::Error { 56 | match self { 57 | BackendError::Internal(s) => io::Error::new(io::ErrorKind::Other, s), 58 | BackendError::Protocol(e) => { 59 | match e { 60 | ProtocolError::IoError(ie) => ie, 61 | x => io::Error::new(io::ErrorKind::Other, x.description()), 62 | } 63 | }, 64 | BackendError::Io(e) => e, 65 | } 66 | } 67 | } 68 | 69 | impl From for BackendError { 70 | fn from(e: io::Error) -> BackendError { BackendError::Io(e) } 71 | } 72 | 73 | impl From for BackendError { 74 | fn from(_: oneshot::error::RecvError) -> BackendError { BackendError::Internal("receive failed".to_owned()) } 75 | } 76 | 77 | impl error::Error for BackendError { 78 | fn description(&self) -> &str { 79 | match self { 80 | BackendError::Internal(s) => s.as_str(), 81 | BackendError::Protocol(e) => e.description(), 82 | BackendError::Io(e) => e.description(), 83 | } 84 | } 85 | 86 | fn cause(&self) -> Option<&error::Error> { None } 87 | } 88 | 89 | impl fmt::Display for BackendError { 90 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 91 | use std::error::Error; 92 | 93 | match self { 94 | BackendError::Internal(s) => write!(f, "internal error: {}", s.as_str()), 95 | BackendError::Protocol(pe) => write!(f, "protocol: {}", pe), 96 | BackendError::Io(e) => write!(f, "internal error: {}", e.description()), 97 | } 98 | } 99 | } 100 | 101 | impl From for PoolError { 102 | fn from(e: BackendError) -> PoolError { PoolError::Backend(e) } 103 | } 104 | 105 | impl From for PoolError { 106 | fn from(_: oneshot::error::RecvError) -> PoolError { PoolError::Internal("receive failed".to_owned()) } 107 | } 108 | 109 | impl fmt::Display for PoolError { 110 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 111 | match self { 112 | PoolError::Internal(s) => write!(f, "internal error: {}", s.as_str()), 113 | PoolError::Backend(be) => write!(f, "backend: {}", be), 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /src/util/container.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use std::{ 21 | iter::{Flatten, Iterator}, 22 | vec::IntoIter as VecIntoIter, 23 | }; 24 | 25 | type KeyValuePair = (usize, Vec); 26 | type FlattenedIterator = Flatten>>; 27 | 28 | pub struct IntegerMappedVec { 29 | items: Vec>>, 30 | size: u32, 31 | mask: usize, 32 | count: usize, 33 | } 34 | 35 | impl IntegerMappedVec { 36 | pub fn new() -> Self { 37 | let mut vec = IntegerMappedVec { 38 | items: Vec::new(), 39 | size: 0, 40 | count: 0, 41 | mask: 0, 42 | }; 43 | vec.expand(); 44 | vec 45 | } 46 | 47 | pub fn push(&mut self, key: usize, value: V) { 48 | let idx = self.calculate_index(key); 49 | let vals = &mut self.items[idx]; 50 | match vals.iter().position(|x| x.0 == key) { 51 | Some(vidx) => { 52 | let ivals = &mut vals[vidx].1; 53 | ivals.push(value); 54 | }, 55 | None => { 56 | let mut ivals = Vec::new(); 57 | ivals.push(value); 58 | vals.push((key, ivals)); 59 | self.count += 1; 60 | }, 61 | } 62 | 63 | if (self.count & 4) == 4 { 64 | self.ensure_load_rate(); 65 | } 66 | } 67 | 68 | #[inline] 69 | fn calculate_index(&self, key: usize) -> usize { 70 | let a = 11_400_714_819_323_198_549usize; 71 | let hash = a.wrapping_mul(key); 72 | (hash & self.mask) as usize 73 | } 74 | 75 | #[inline] 76 | fn limit(&self) -> usize { 2usize.pow(self.size) as usize } 77 | 78 | fn expand(&mut self) { 79 | self.size += 1; 80 | let new_limit = self.limit(); 81 | self.mask = (new_limit as usize) - 1; 82 | 83 | let mut vec = Vec::new(); 84 | vec.append(&mut self.items); 85 | 86 | for _ in 0..new_limit { 87 | // pretty sure we can optimize this to allocate the minimum required up front 88 | self.items.push(Vec::with_capacity(0)); 89 | } 90 | 91 | vec.drain(0..).for_each(|mut values| { 92 | values.drain(0..).for_each(|kv| { 93 | let idx = self.calculate_index(kv.0); 94 | let vals = &mut self.items[idx]; 95 | vals.push(kv); 96 | }); 97 | }); 98 | } 99 | 100 | fn ensure_load_rate(&mut self) { 101 | while ((self.count * 100) / self.items.len()) > 70 { 102 | self.expand(); 103 | } 104 | } 105 | } 106 | 107 | pub struct IntoIter { 108 | inner: FlattenedIterator>, 109 | } 110 | 111 | impl IntoIter { 112 | pub fn new(items: Vec>>) -> IntoIter { 113 | IntoIter { 114 | inner: items.into_iter().flatten(), 115 | } 116 | } 117 | } 118 | 119 | impl Iterator for IntoIter { 120 | type Item = (usize, Vec); 121 | 122 | #[inline] 123 | fn next(&mut self) -> Option<(usize, Vec)> { self.inner.next() } 124 | } 125 | 126 | impl IntoIterator for IntegerMappedVec { 127 | type IntoIter = IntoIter; 128 | type Item = (usize, Vec); 129 | 130 | fn into_iter(self) -> Self::IntoIter { IntoIter::new(self.items) } 131 | } 132 | -------------------------------------------------------------------------------- /src/common.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::util::Sizable; 21 | use bytes::BytesMut; 22 | use tokio::sync::oneshot::{channel, Receiver, Sender}; 23 | 24 | pub trait Message: Sizable { 25 | fn key(&self) -> &[u8]; 26 | fn is_inline(&self) -> bool; 27 | fn into_buf(self) -> BytesMut; 28 | } 29 | 30 | /// Message response types for a queued message. 31 | #[derive(Debug)] 32 | pub enum MessageResponse { 33 | /// The message ultimately "failed". This happens if a queued message is dropped before having 34 | /// a response sent for it, which may happen if an error occurs during the backend read, etc. 35 | Failed, 36 | 37 | /// The message was processed and a response was received. 38 | Complete(T), 39 | } 40 | 41 | // Core types. 42 | // 43 | // These define the transformation between raw messages that come in over the transport and the 44 | // interstitial types as they're batched, fragmented, etc. 45 | pub type AssignedRequest = (usize, T); 46 | pub type AssignedResponse = (usize, MessageResponse); 47 | pub type AssignedRequests = Vec>; 48 | pub type AssignedResponses = Vec>; 49 | 50 | pub type PendingResponse = Receiver>; 51 | pub type PendingResponses = Vec>; 52 | pub type EnqueuedRequests = Vec>; 53 | 54 | pub struct EnqueuedRequest { 55 | id: usize, 56 | request: Option, 57 | has_response: bool, 58 | done: bool, 59 | tx: Option>>, 60 | } 61 | 62 | impl EnqueuedRequest { 63 | pub fn new(id: usize, request: T) -> EnqueuedRequest { 64 | EnqueuedRequest { 65 | id, 66 | request: Some(request), 67 | tx: None, 68 | has_response: true, 69 | done: false, 70 | } 71 | } 72 | 73 | pub fn without_response(request: T) -> EnqueuedRequest { 74 | EnqueuedRequest { 75 | id: 0, 76 | request: Some(request), 77 | tx: None, 78 | has_response: false, 79 | done: true, 80 | } 81 | } 82 | 83 | pub fn key(&self) -> &[u8] { 84 | // Pass-through for `Message::key` because we really don't want to expose the 85 | // entire Message trait over ourselves, as one of the methods allows taking 86 | // the request by consuming self. 87 | self.request.as_ref().expect("tried to get key for empty request").key() 88 | } 89 | 90 | pub fn consume(&mut self) -> T { self.request.take().unwrap() } 91 | 92 | pub fn fulfill(&mut self, response: T) { 93 | if self.done { 94 | return; 95 | } 96 | 97 | let _ = self 98 | .tx 99 | .take() 100 | .expect("tried to send response to uninitialized receiver") 101 | .send((self.id, MessageResponse::Complete(response))); 102 | self.done = true; 103 | } 104 | 105 | pub fn get_response_rx(&mut self) -> Option> { 106 | if self.has_response { 107 | let (tx, rx) = channel(); 108 | self.tx = Some(tx); 109 | self.done = false; 110 | self.has_response = false; 111 | 112 | return Some(rx); 113 | } 114 | 115 | None 116 | } 117 | } 118 | 119 | impl Drop for EnqueuedRequest { 120 | fn drop(&mut self) { 121 | // The drop guard is used to make sure we always send back a response to the upper 122 | // layers even if a backend has an error that kills an entire batch of requests. 123 | if !self.done { 124 | let _ = self.tx.take().unwrap().send((self.id, MessageResponse::Failed)); 125 | } 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/protocol/redis/filtering.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use phf::phf_set; 21 | 22 | static VALID_COMMANDS: phf::Set<&'static str> = phf_set! { 23 | "DEL", 24 | "DUMP", 25 | "EXISTS", 26 | "EXPIRE", 27 | "EXPIREAT", 28 | "PERSIST", 29 | "PEXPIRE", 30 | "PEXPIREAT", 31 | "PTTL", 32 | "RESTORE", 33 | "SORT", 34 | "TTL", 35 | "TYPE", 36 | "APPEND", 37 | "BITCOUNT", 38 | "BITPOS", 39 | "DECR", 40 | "DECRBY", 41 | "GET", 42 | "GETBIT", 43 | "GETRANGE", 44 | "GETSET", 45 | "INCR", 46 | "INCRBY", 47 | "INCRBYFLOAT", 48 | "MGET", 49 | "MSET", 50 | "PSETEX", 51 | "SET", 52 | "SETBIT", 53 | "SETEX", 54 | "SETNX", 55 | "SETRANGE", 56 | "STRLEN", 57 | "HDEL", 58 | "HEXISTS", 59 | "HGET", 60 | "HGETALL", 61 | "HINCRBY", 62 | "HINCRBYFLOAT", 63 | "HKEYS", 64 | "HLEN", 65 | "HMGET", 66 | "HMSET", 67 | "HSET", 68 | "HSETNX", 69 | "HVALS", 70 | "HSCAN", 71 | "LINDEX", 72 | "LINSERT", 73 | "LLEN", 74 | "LPOP", 75 | "LPUSH", 76 | "LPUSHX", 77 | "LRANGE", 78 | "LREM", 79 | "LSET", 80 | "LTRIM", 81 | "RPOP", 82 | "RPOPLPUSH", 83 | "RPUSH", 84 | "RPUSHX", 85 | "SADD", 86 | "SCARD", 87 | "SDIFF", 88 | "SDIFFSTORE", 89 | "SINTER", 90 | "SINTERSTORE", 91 | "SISMEMBER", 92 | "SMEMBERS", 93 | "SMOVE", 94 | "SPOP", 95 | "SRANDMEMBER", 96 | "SREM", 97 | "SUNION", 98 | "SUNIONSTORE", 99 | "SSCAN", 100 | "ZADD", 101 | "ZCARD", 102 | "ZCOUNT", 103 | "ZINCRBY", 104 | "ZINTERSTORE", 105 | "ZLEXCOUNT", 106 | "ZRANGE", 107 | "ZRANGEBYLEX", 108 | "ZRANGEBYSCORE", 109 | "ZRANK", 110 | "ZREM", 111 | "ZREMRANGEBYLEX", 112 | "ZREMRANGEBYRANK", 113 | "ZREMRANGEBYSCORE", 114 | "ZREVRANGE", 115 | "ZREVRANGEBYSCORE", 116 | "ZREVRANK", 117 | "ZSCORE", 118 | "ZUNIONSTORE", 119 | "ZSCAN", 120 | "PFADD", 121 | "PFCOUNT", 122 | "PFMERGE", 123 | "EVAL", 124 | "EVALSHA", 125 | "PING", 126 | "QUIT", 127 | }; 128 | 129 | pub fn check_command_validity(cmd: &[u8]) -> bool { 130 | // This is goofy but redis only supports commands with ASCII characters, so we munge 131 | // these bytes to make sure that, if they were lowercase ASCII, they now become 132 | // uppercase ASCII... and we do it by hand instead of using str::to_uppercase because 133 | // this is 2x as fast. Really feels stupid to pay a constant perf penalty if we don't 134 | // really have to. Could probably unroll this to work on 8-byte chunks, 4-byte chunks, 135 | // etc, but that'd require full on pointers and this is good enough for now, I think. 136 | let mut c = cmd.to_owned(); 137 | let m = c.as_mut_slice(); 138 | 139 | let count = m.len(); 140 | let mut offset = 0; 141 | 142 | while offset < count { 143 | m[offset] = m[offset] & 0b11011111; 144 | offset += 1; 145 | } 146 | 147 | let as_str = unsafe { std::str::from_utf8_unchecked(m) }; 148 | VALID_COMMANDS.contains(as_str) 149 | } 150 | 151 | #[cfg(test)] 152 | mod tests { 153 | use super::*; 154 | use test::Bencher; 155 | 156 | #[test] 157 | fn ensure_valid_vs_invalid() { 158 | let valid_cmd_1 = "PFCOUNT"; 159 | let valid_cmd_2 = "hmset"; 160 | let invalid_cmd_1 = "INFO"; 161 | let invalid_cmd_2 = "rename"; 162 | 163 | assert!(check_command_validity(valid_cmd_1.as_bytes())); 164 | assert!(check_command_validity(valid_cmd_2.as_bytes())); 165 | assert!(!check_command_validity(invalid_cmd_1.as_bytes())); 166 | assert!(!check_command_validity(invalid_cmd_2.as_bytes())); 167 | } 168 | 169 | #[bench] 170 | fn bench_valid_lookup(b: &mut Bencher) { 171 | let valid_cmd = "PFCOUNT".as_bytes(); 172 | b.iter(|| check_command_validity(valid_cmd)); 173 | } 174 | 175 | #[bench] 176 | fn bench_invalid_lookup(b: &mut Bencher) { 177 | let invalid_cmd = "INFO".as_bytes(); 178 | b.iter(|| check_command_validity(invalid_cmd)); 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/util/batch.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use super::Sizable; 21 | use futures::{prelude::*, stream::Fuse}; 22 | use std::mem; 23 | 24 | /// An adapter for batching up items in a stream opportunistically. 25 | /// 26 | /// On each call to `poll`, the adapter will poll the underlying stream in a loop until the 27 | /// underlying stream reports that it is not ready. Any items returned during this loop will be 28 | /// stored and forwarded on either when the batch capacity is met or when the underlying stream 29 | /// signals that it has no available items. 30 | #[derive(Debug)] 31 | #[must_use = "streams do nothing unless polled"] 32 | pub struct Batch 33 | where 34 | S: Stream, 35 | S::Item: Sizable, 36 | { 37 | items: Vec, 38 | size: usize, 39 | err: Option, 40 | stream: Fuse, 41 | } 42 | 43 | impl Batch 44 | where 45 | S: Stream, 46 | S::Item: Sizable, 47 | { 48 | pub fn new(s: S, capacity: usize) -> Batch { 49 | assert!(capacity > 0); 50 | 51 | Batch { 52 | items: Vec::with_capacity(capacity), 53 | size: 0, 54 | err: None, 55 | stream: s.fuse(), 56 | } 57 | } 58 | 59 | fn take(&mut self) -> (Vec, usize) { 60 | let cap = self.items.capacity(); 61 | let items = mem::replace(&mut self.items, Vec::with_capacity(cap)); 62 | let size = mem::replace(&mut self.size, 0); 63 | 64 | (items, size) 65 | } 66 | } 67 | 68 | impl Stream for Batch 69 | where 70 | S: Stream, 71 | S::Item: Sizable, 72 | { 73 | type Error = S::Error; 74 | type Item = (Vec, usize); 75 | 76 | fn poll(&mut self) -> Poll, Self::Error> { 77 | if let Some(err) = self.err.take() { 78 | return Err(err); 79 | } 80 | 81 | let cap = self.items.capacity(); 82 | loop { 83 | match self.stream.poll() { 84 | // If the underlying stream isn't ready any more, and we have items queued up, 85 | // simply return them to the caller and zero out our internal buffer. If we have 86 | // no items, then tell the caller we aren't ready. 87 | Ok(Async::NotReady) => { 88 | return if self.items.is_empty() { 89 | Ok(Async::NotReady) 90 | } else { 91 | Ok(Some(self.take()).into()) 92 | }; 93 | }, 94 | 95 | // If the underlying stream is ready and has items, buffer them until we hit our 96 | // capacity. 97 | // 98 | // Generally, the capacity should be high enough that we consume every 99 | // possible item available to us at the time of a given `poll`, maximixing the 100 | // batching effect. 101 | Ok(Async::Ready(Some(item))) => { 102 | let size = item.size(); 103 | self.items.push(item); 104 | self.size += size; 105 | if self.items.len() >= cap { 106 | return Ok(Some(self.take()).into()); 107 | } 108 | }, 109 | 110 | // Since the underlying stream ran out of values, return what we have buffered, if 111 | // we have anything at all. 112 | Ok(Async::Ready(None)) => { 113 | return if !self.items.is_empty() { 114 | Ok(Some(self.take()).into()) 115 | } else { 116 | Ok(Async::Ready(None)) 117 | }; 118 | }, 119 | 120 | // If we've got buffered items be sure to return them first, we'll defer our error 121 | // for later. 122 | Err(e) => { 123 | if self.items.is_empty() { 124 | return Err(e); 125 | } else { 126 | self.err = Some(e); 127 | return Ok(Some(self.take()).into()); 128 | } 129 | }, 130 | } 131 | } 132 | } 133 | } 134 | 135 | impl Sink for Batch 136 | where 137 | S: Sink + Stream, 138 | ::Item: Sizable, 139 | { 140 | type SinkError = S::SinkError; 141 | type SinkItem = S::SinkItem; 142 | 143 | fn start_send(&mut self, item: S::SinkItem) -> StartSend { self.stream.start_send(item) } 144 | 145 | fn poll_complete(&mut self) -> Poll<(), S::SinkError> { self.stream.poll_complete() } 146 | 147 | fn close(&mut self) -> Poll<(), S::SinkError> { self.stream.close() } 148 | } 149 | -------------------------------------------------------------------------------- /src/routing/shadow.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::processor::Processor, 22 | common::{AssignedRequests, EnqueuedRequest, EnqueuedRequests, Message}, 23 | }; 24 | use futures::{prelude::*, stream::futures_unordered::FuturesUnordered}; 25 | use std::marker::PhantomData; 26 | use tokio::sync::mpsc; 27 | use tower_service::Service; 28 | 29 | #[derive(Derivative)] 30 | #[derivative(Clone)] 31 | pub struct ShadowRouter 32 | where 33 | P: Processor + Clone + Send + 'static, 34 | P::Message: Message + Clone + Send, 35 | S: Service> + Clone, 36 | S::Future: Future + Send + 'static, 37 | { 38 | processor: P, 39 | default_inner: S, 40 | shadow_inner: S, 41 | noops: mpsc::UnboundedSender, 42 | } 43 | 44 | struct ShadowWorker 45 | where 46 | S: Service, 47 | { 48 | rx: mpsc::UnboundedReceiver, 49 | should_close: bool, 50 | inner: FuturesUnordered, 51 | _service: PhantomData, 52 | } 53 | 54 | impl ShadowWorker 55 | where 56 | S: Service, 57 | { 58 | pub fn new(rx: mpsc::UnboundedReceiver) -> ShadowWorker { 59 | ShadowWorker { 60 | rx, 61 | should_close: false, 62 | inner: FuturesUnordered::new(), 63 | _service: PhantomData, 64 | } 65 | } 66 | } 67 | 68 | impl Future for ShadowWorker 69 | where 70 | S: Service, 71 | { 72 | type Error = (); 73 | type Item = (); 74 | 75 | fn poll(&mut self) -> Poll { 76 | if !self.should_close { 77 | loop { 78 | match self.rx.poll() { 79 | Ok(Async::Ready(Some(fut))) => self.inner.push(fut), 80 | Ok(Async::Ready(None)) => { 81 | self.should_close = true; 82 | break; 83 | }, 84 | Ok(Async::NotReady) => break, 85 | Err(_) => { 86 | error!("shadow worker closed unexpectedly"); 87 | return Ok(Async::Ready(())); 88 | }, 89 | } 90 | } 91 | } 92 | 93 | // Just drive our inner futures; we don't care about their return value. 94 | loop { 95 | match self.inner.poll() { 96 | // These are successful results, so we just drop the value and keep on moving on. 97 | Ok(Async::Ready(Some(_))) => {}, 98 | // If we have no more futures to drive, and we've been instructed to close, it's 99 | // time to go. 100 | Ok(Async::Ready(None)) => { 101 | if self.should_close { 102 | return Ok(Async::Ready(())); 103 | } else { 104 | break; 105 | } 106 | }, 107 | Ok(Async::NotReady) => break, 108 | // We don't really care about errors per se, since it's the shadow pool. 109 | Err(_) => {}, 110 | } 111 | } 112 | 113 | Ok(Async::NotReady) 114 | } 115 | } 116 | 117 | impl ShadowRouter 118 | where 119 | P: Processor + Clone + Send + 'static, 120 | P::Message: Message + Clone + Send, 121 | S: Service> + Clone + Send + 'static, 122 | S::Future: Future + Send + 'static, 123 | { 124 | pub fn new(processor: P, default_inner: S, shadow_inner: S) -> ShadowRouter { 125 | let (tx, rx) = mpsc::unbounded_channel(); 126 | 127 | // Spin off a task that drives all of the shadow responses. 128 | let shadow: ShadowWorker> = ShadowWorker::new(rx); 129 | tokio::spawn(shadow); 130 | 131 | ShadowRouter { 132 | processor, 133 | default_inner, 134 | shadow_inner, 135 | noops: tx, 136 | } 137 | } 138 | } 139 | 140 | impl Service> for ShadowRouter 141 | where 142 | P: Processor + Clone + Send + 'static, 143 | P::Message: Message + Clone + Send, 144 | S: Service> + Clone, 145 | S::Future: Future + Send + 'static, 146 | { 147 | type Error = S::Error; 148 | type Future = S::Future; 149 | type Response = S::Response; 150 | 151 | fn poll_ready(&mut self) -> Poll<(), Self::Error> { self.default_inner.poll_ready() } 152 | 153 | fn call(&mut self, req: AssignedRequests) -> Self::Future { 154 | let shadow_reqs = req 155 | .clone() 156 | .into_iter() 157 | .map(|(_, msg)| EnqueuedRequest::without_response(msg)) 158 | .collect(); 159 | 160 | let default_reqs = req.into_iter().map(|(id, msg)| EnqueuedRequest::new(id, msg)).collect(); 161 | 162 | let noop = self.shadow_inner.call(shadow_reqs); 163 | let _ = self.noops.try_send(noop); 164 | 165 | self.default_inner.call(default_reqs) 166 | } 167 | } 168 | -------------------------------------------------------------------------------- /synchrotron-test/src/daemons.rs: -------------------------------------------------------------------------------- 1 | use std::str; 2 | use std::env; 3 | use std::fs::File; 4 | use std::io::{Error, Write}; 5 | use std::process::{Command, Child, Stdio}; 6 | use tempfile::{Builder, TempDir}; 7 | use std::sync::atomic::{AtomicUsize, Ordering}; 8 | use std::thread; 9 | use std::time::Duration; 10 | 11 | static PORT_OFFSET: AtomicUsize = AtomicUsize::new(0); 12 | 13 | fn get_redis_config(stats_port: u16, listen1_port: u16, listen2_port: u16, redis1_port: u16, redis2_port: u16) -> String { 14 | format!(r#" 15 | {{ 16 | "stats_addr": "127.0.0.1:{stats_port}", 17 | "listeners": {{ 18 | "fixed": {{ 19 | "protocol": "redis", 20 | "address": "127.0.0.1:{listen1_port}", 21 | "pools": {{ 22 | "default": {{ 23 | "addresses": ["127.0.0.1:{redis1_port}", "127.0.0.1:{redis2_port}"], 24 | "options": {{ 25 | "cooloff_timeout_ms": "2000", 26 | "timeout_ms": "100" 27 | }} 28 | }} 29 | }}, 30 | "routing": {{ 31 | "type": "fixed" 32 | }} 33 | }}, 34 | "shadow": {{ 35 | "protocol": "redis", 36 | "address": "127.0.0.1:{listen2_port}", 37 | "pools": {{ 38 | "default": {{ 39 | "addresses": ["127.0.0.1:{redis1_port}"] 40 | }}, 41 | "shadow": {{ 42 | "addresses": ["127.0.0.1:{redis2_port}"] 43 | }} 44 | }}, 45 | "routing": {{ 46 | "type": "shadow" 47 | }} 48 | }} 49 | }} 50 | }} 51 | "#, stats_port = stats_port, listen1_port = listen1_port, listen2_port = listen2_port, redis1_port = redis1_port, redis2_port = redis2_port) 52 | } 53 | 54 | pub struct SynchrotronRunner { 55 | handle: Child, 56 | port: u16, 57 | fixed_conn_str: String, 58 | shadow_conn_str: String, 59 | conf_dir: Option, 60 | } 61 | 62 | impl SynchrotronRunner { 63 | pub fn new_redis(stats_port: u16, listen1_port: u16, listen2_port: u16, redis1_port: u16, redis2_port: u16) -> Result { 64 | let full_config = get_redis_config(stats_port, listen1_port, listen2_port, redis1_port, redis2_port); 65 | 66 | // Create our configuration file from the data we got. 67 | let conf_dir = Builder::new() 68 | .prefix("synchrotron-test-") 69 | .tempdir()?; 70 | 71 | let file_path = conf_dir.path().join("synchrotron"); 72 | let file_path_w_ext = conf_dir.path().join("synchrotron.json"); 73 | let mut conf_file = File::create(file_path_w_ext)?; 74 | conf_file.write(full_config.as_bytes())?; 75 | 76 | // Now try and launch Synchrotron. 77 | let handle = Command::new("../target/debug/synchrotron") 78 | .env("SYNC_CONFIG", file_path) 79 | .stdout(Stdio::null()) 80 | .stderr(Stdio::null()) 81 | .spawn()?; 82 | 83 | wait_until(|| check_synchrotron(listen1_port)); 84 | wait_until(|| check_synchrotron(listen2_port)); 85 | 86 | Ok(SynchrotronRunner { 87 | handle: handle, 88 | port: listen1_port, 89 | fixed_conn_str: format!("redis://127.0.0.1:{}", listen1_port), 90 | shadow_conn_str: format!("redis://127.0.0.1:{}", listen2_port), 91 | conf_dir: Some(conf_dir), 92 | }) 93 | } 94 | 95 | pub fn get_fixed_conn_str(&self) -> &str { 96 | self.fixed_conn_str.as_str() 97 | } 98 | 99 | pub fn get_shadow_conn_str(&self) -> &str { 100 | self.shadow_conn_str.as_str() 101 | } 102 | } 103 | 104 | impl Drop for SynchrotronRunner { 105 | fn drop(&mut self) { 106 | // If it panics, it panics. ¯\_(ツ)_/¯ 107 | self.handle.kill().unwrap(); 108 | self.conf_dir.take().unwrap().close().unwrap(); 109 | 110 | println!("Synchrotron ({}) killed!", self.port); 111 | } 112 | } 113 | 114 | pub struct RedisRunner { 115 | handle: Child, 116 | port: u16, 117 | conn_str: String, 118 | } 119 | 120 | impl RedisRunner { 121 | pub fn new(port: u16) -> Result { 122 | let redis_bin = match env::var("REDIS_BIN") { 123 | Ok(s) => s, 124 | Err(_) => "/usr/local/bin/redis-server".to_owned(), 125 | }; 126 | 127 | // Launch Redis on the specified port. 128 | let handle = Command::new(redis_bin) 129 | .arg("--port") 130 | .arg(port.to_string()) 131 | .stdout(Stdio::null()) 132 | .stderr(Stdio::null()) 133 | .spawn()?; 134 | 135 | // Wait for the instance to be ready. 136 | wait_until(|| check_redis(port)); 137 | 138 | Ok(RedisRunner { 139 | handle: handle, 140 | port: port, 141 | conn_str: format!("redis://127.0.0.1:{}", port), 142 | }) 143 | } 144 | 145 | pub fn get_conn_str(&self) -> &str { 146 | self.conn_str.as_str() 147 | } 148 | } 149 | 150 | impl Drop for RedisRunner { 151 | fn drop(&mut self) { 152 | // If it panics, it panics. ¯\_(ツ)_/¯ 153 | self.handle.kill().unwrap(); 154 | 155 | println!("redis-server ({}) killed!", self.port); 156 | } 157 | } 158 | 159 | fn wait_until(f: F) 160 | where F: Fn() -> bool 161 | { 162 | let mut sleep_ms = 50; 163 | 164 | loop { 165 | let status = f(); 166 | if status { 167 | return; 168 | } 169 | 170 | thread::sleep(Duration::from_millis(sleep_ms)); 171 | 172 | if sleep_ms < 5000 { 173 | sleep_ms *= 2; 174 | } 175 | } 176 | } 177 | 178 | fn check_redis(port: u16) -> bool { 179 | let result = Command::new("redis-cli") 180 | .args(&["-h", "localhost", "-p", port.to_string().as_str(), "ping"]) 181 | .output() 182 | .expect("failed to run redis-cli"); 183 | 184 | match str::from_utf8(&result.stdout) { 185 | Ok(output) => match output == "PONG\n" { 186 | true => { 187 | println!("redis-server ({}) is running!", port); 188 | true 189 | }, 190 | false => { 191 | println!("redis-server ({}) not running yet.", port); 192 | false 193 | }, 194 | }, 195 | _ => { 196 | println!("redis-server ({}) not running yet.", port); 197 | false 198 | }, 199 | } 200 | } 201 | 202 | fn check_synchrotron(port: u16) -> bool { 203 | let result = Command::new("redis-cli") 204 | .args(&["-h", "localhost", "-p", port.to_string().as_str(), "ping"]) 205 | .output() 206 | .expect("failed to run redis-cli"); 207 | 208 | match str::from_utf8(&result.stdout) { 209 | Ok(output) => match output == "PONG\n" { 210 | true => { 211 | println!("Synchrotron ({}) is running!", port); 212 | true 213 | }, 214 | false => { 215 | println!("Synchrotron ({}) not running yet.", port); 216 | false 217 | }, 218 | }, 219 | _ => { 220 | println!("Synchrotron ({}) not running yet.", port); 221 | false 222 | }, 223 | } 224 | } 225 | 226 | pub fn get_redis_daemons() -> (SynchrotronRunner, RedisRunner, RedisRunner) { 227 | let offset = PORT_OFFSET.fetch_add(1, Ordering::SeqCst) as u16; 228 | 229 | let synchrotron_stats_port = 43000 + offset; 230 | let synchrotron_listen1_port = 44000 + offset; 231 | let synchrotron_listen2_port = 45000 + offset; 232 | let redis1_port = 46000 + offset; 233 | let redis2_port = 47000 + offset; 234 | 235 | let redis1 = RedisRunner::new(redis1_port).unwrap(); 236 | let redis2 = RedisRunner::new(redis2_port).unwrap(); 237 | let synchrotron = SynchrotronRunner::new_redis(synchrotron_stats_port, synchrotron_listen1_port, synchrotron_listen2_port, redis1_port, redis2_port).unwrap(); 238 | 239 | (synchrotron, redis1, redis2) 240 | } 241 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | #![feature(test)] 21 | #![feature(nll)] 22 | #![feature(never_type)] 23 | #![feature(proc_macro_hygiene)] 24 | #![recursion_limit = "1024"] 25 | 26 | #[macro_use] 27 | extern crate lazy_static; 28 | 29 | #[macro_use] 30 | extern crate derivative; 31 | 32 | #[macro_use] 33 | extern crate serde_derive; 34 | 35 | #[macro_use] 36 | extern crate futures; 37 | 38 | use crate::{ 39 | futures_turnstyle::{Turnstyle, Waiter}, 40 | signal_hook::iterator::Signals, 41 | libc::{SIGINT, SIGUSR1}, 42 | }; 43 | use futures::future::{lazy, ok}; 44 | use std::thread; 45 | 46 | extern crate tokio; 47 | use tokio::{ 48 | prelude::*, 49 | sync::{mpsc, oneshot}, 50 | }; 51 | 52 | #[macro_use] 53 | extern crate log; 54 | #[macro_use(slog_o)] 55 | extern crate slog; 56 | #[macro_use] 57 | extern crate metrics; 58 | 59 | use slog::Drain; 60 | 61 | #[cfg(test)] 62 | extern crate test; 63 | 64 | mod backend; 65 | mod common; 66 | mod conf; 67 | mod errors; 68 | mod listener; 69 | mod protocol; 70 | mod routing; 71 | mod service; 72 | mod util; 73 | 74 | use crate::{ 75 | conf::{Configuration, LevelExt}, 76 | errors::CreationError, 77 | util::FutureExt, 78 | }; 79 | use metrics_runtime::{ 80 | exporters::HttpExporter, recorders::PrometheusRecorder, Controller, Receiver, Sink as MetricSink, 81 | }; 82 | 83 | enum SupervisorCommand { 84 | Launch, 85 | Reload, 86 | Shutdown, 87 | } 88 | 89 | fn main() { 90 | // Set up our signal handling before anything else. 91 | let (mut supervisor_tx, supervisor_rx) = mpsc::unbounded_channel(); 92 | let signals = Signals::new(&[SIGINT, SIGUSR1]).expect("failed to register signal handlers"); 93 | thread::spawn(move || { 94 | // Do an initial send of the launch command to trigger actually spawning the listeners at 95 | // startup. 96 | let _ = supervisor_tx.try_send(SupervisorCommand::Launch); 97 | 98 | for signal in signals.forever() { 99 | info!("[core] signal received: {:?}", signal); 100 | 101 | match signal { 102 | libc::SIGUSR1 => { 103 | let _ = supervisor_tx.try_send(SupervisorCommand::Reload); 104 | }, 105 | libc::SIGINT => { 106 | let _ = supervisor_tx.try_send(SupervisorCommand::Shutdown); 107 | break; 108 | }, 109 | _ => {}, // we don't care about the rest 110 | } 111 | } 112 | }); 113 | 114 | let configuration = Configuration::new().expect("failed to parse configuration"); 115 | 116 | // Configure our logging. This gives us fully asynchronous logging to the terminal 117 | // which is also level filtered. As well, we've replaced the global std logger 118 | // and pulled in helper macros that correspond to the various logging levels. 119 | let decorator = slog_term::TermDecorator::new().build(); 120 | let drain = slog_term::FullFormat::new(decorator).build().fuse(); 121 | let drain = slog_async::Async::new(drain).build().fuse(); 122 | let logger = slog::Logger::root( 123 | slog::LevelFilter::new(drain, slog::Level::from_str(&configuration.logging.level)).fuse(), 124 | slog_o!("version" => env!("GIT_HASH")), 125 | ); 126 | 127 | let _scope_guard = slog_scope::set_global_logger(logger); 128 | slog_stdlog::init().unwrap(); 129 | info!("[core] logging configured"); 130 | 131 | // Configure our metrics. We want to do this pretty early on before anything actually tries to 132 | // record any metrics. 133 | let receiver = Receiver::builder().build().expect("failed to build metrics receiver"); 134 | let controller = receiver.get_controller(); 135 | let sink = receiver.get_sink(); 136 | receiver.install(); 137 | 138 | tokio_io_pool::run(lazy(move || { 139 | let (shutdown_tx, shutdown_rx) = oneshot::channel(); 140 | launch_metrics(configuration.stats_addr, controller, shutdown_rx); 141 | launch_supervisor(supervisor_rx, shutdown_tx, sink); 142 | 143 | info!("[core] synchrotron running"); 144 | 145 | ok(()) 146 | })) 147 | } 148 | 149 | fn launch_supervisor( 150 | supervisor_rx: mpsc::UnboundedReceiver, shutdown_tx: oneshot::Sender<()>, sink: MetricSink, 151 | ) { 152 | let turnstyle = Turnstyle::new(); 153 | let supervisor = supervisor_rx 154 | .map_err(|_| CreationError::ListenerSpawnFailed) 155 | .fold(turnstyle, move |ts, command| { 156 | match command { 157 | SupervisorCommand::Launch => { 158 | let (version, waiter) = ts.join(); 159 | launch_listeners(version, waiter, sink.clone())?; 160 | counter!("supervisor.configuration_loads", 1); 161 | }, 162 | SupervisorCommand::Reload => { 163 | let (version, waiter) = ts.join(); 164 | launch_listeners(version, waiter, sink.clone())?; 165 | ts.turn(); 166 | counter!("supervisor.configuration_loads", 1); 167 | }, 168 | SupervisorCommand::Shutdown => { 169 | ts.turn(); 170 | }, 171 | } 172 | 173 | Ok(ts) 174 | }) 175 | .then(move |result| { 176 | if let Err(e) = result { 177 | error!("[core supervisor] caught an error during launch/reload: {}", e); 178 | } 179 | 180 | shutdown_tx.send(()) 181 | }) 182 | .untyped(); 183 | 184 | tokio::spawn(supervisor); 185 | } 186 | 187 | fn launch_listeners(version: usize, close: Waiter, sink: MetricSink) -> Result<(), CreationError> { 188 | let configuration = Configuration::new().expect("failed to parse configuration"); 189 | let closer = close.shared(); 190 | let listeners = configuration 191 | .listeners 192 | .into_iter() 193 | .map(|(name, config)| { 194 | let close = closer.clone(); 195 | 196 | listener::from_config(version, name, config, close, sink.clone()) 197 | }) 198 | .collect::>(); 199 | 200 | let mut errors = Vec::new(); 201 | for listener in &listeners { 202 | let result = listener.as_ref(); 203 | if result.is_err() { 204 | let error = result.err().unwrap(); 205 | errors.push(error.to_string()); 206 | } 207 | } 208 | 209 | if !errors.is_empty() { 210 | error!("[core] encountered errors while spawning listeners:"); 211 | for error in errors { 212 | error!("[core] - {}", error); 213 | } 214 | 215 | return Err(CreationError::ListenerSpawnFailed); 216 | } 217 | 218 | // Launch all these listeners into the runtime. 219 | for listener in listeners { 220 | tokio::spawn(listener.unwrap()); 221 | } 222 | 223 | Ok(()) 224 | } 225 | 226 | fn launch_metrics(stats_addr: String, controller: Controller, shutdown_rx: impl Future + Send + 'static) { 227 | let addr = stats_addr.parse().expect("failed to parse metrics listen address"); 228 | let exporter = HttpExporter::new(controller, PrometheusRecorder::new(), addr); 229 | let task = exporter.into_future().select2(shutdown_rx).untyped(); 230 | tokio::spawn(task); 231 | } 232 | -------------------------------------------------------------------------------- /src/service/pipeline.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::{message_queue::MessageQueue, processor::Processor}, 22 | common::{AssignedRequests, AssignedResponse, Message}, 23 | service::PipelineError, 24 | util::{Batch, FutureExt, Timed}, 25 | }; 26 | use bytes::BytesMut; 27 | use futures::prelude::*; 28 | use metrics_runtime::{ 29 | data::{Counter, Histogram}, 30 | Sink as MetricSink, 31 | }; 32 | use std::collections::VecDeque; 33 | use tower_service::Service; 34 | 35 | /// Pipeline-capable service base. 36 | /// 37 | /// `Pipeline` can simultaenously drive a `Transport` and an underlying `Service`, 38 | /// opportunistically batching messages from the client transport and handing them off for 39 | /// processing while waiting to send back to the responses. 40 | pub struct Pipeline 41 | where 42 | T: Sink + Stream, 43 | S: Service>, 44 | S::Response: IntoIterator>, 45 | P: Processor, 46 | P::Message: Message + Clone, 47 | { 48 | responses: VecDeque>, 49 | transport: Batch, 50 | service: S, 51 | queue: MessageQueue

, 52 | 53 | send_buf: Option<(BytesMut, u64)>, 54 | finish: bool, 55 | 56 | sink: MetricSink, 57 | bytes_sent: Counter, 58 | bytes_received: Counter, 59 | messages_sent: Counter, 60 | messages_received: Counter, 61 | client_e2e: Histogram, 62 | } 63 | 64 | impl Pipeline 65 | where 66 | T: Sink + Stream, 67 | S: Service>, 68 | S::Response: IntoIterator>, 69 | P: Processor, 70 | P::Message: Message + Clone, 71 | { 72 | /// Creates a new `Pipeline`. 73 | pub fn new(transport: T, service: S, processor: P, mut sink: MetricSink) -> Self { 74 | let bytes_sent = sink.counter("bytes_sent"); 75 | let bytes_received = sink.counter("bytes_received"); 76 | let messages_sent = sink.counter("messages_sent"); 77 | let messages_received = sink.counter("messages_received"); 78 | let client_e2e = sink.histogram("client_e2e"); 79 | 80 | Pipeline { 81 | responses: VecDeque::new(), 82 | transport: Batch::new(transport, 128), 83 | service, 84 | queue: MessageQueue::new(processor), 85 | send_buf: None, 86 | finish: false, 87 | sink, 88 | bytes_sent, 89 | bytes_received, 90 | messages_sent, 91 | messages_received, 92 | client_e2e, 93 | } 94 | } 95 | } 96 | 97 | impl Future for Pipeline 98 | where 99 | T: Sink + Stream, 100 | S: Service>, 101 | S::Response: IntoIterator>, 102 | P: Processor, 103 | P::Message: Message + Clone, 104 | { 105 | type Error = PipelineError>; 106 | type Item = (); 107 | 108 | fn poll(&mut self) -> Poll { 109 | loop { 110 | // In order, drive the response futures we're waiting on. Keep pulling from the 111 | // front to keep things in order, and as soon as we hit something that isn't ready or 112 | // isn't ready to flush to the message queue. 113 | while let Some(mut f) = self.responses.pop_front() { 114 | match f.poll() { 115 | Ok(Async::Ready((start, rsp))) => { 116 | self.queue.fulfill(rsp); 117 | let end = self.sink.now(); 118 | self.client_e2e.record_timing(start, end); 119 | }, 120 | Ok(Async::NotReady) => { 121 | self.responses.push_front(f); 122 | break; 123 | }, 124 | Err(e) => { 125 | return Err(PipelineError::from_service_error(e)); 126 | }, 127 | } 128 | } 129 | 130 | // Now that we've polled and fulfilled any completed batches, see if we have a buffer 131 | // to send: first, we might be holding on to a buffer we got from the queue that 132 | // hasn't been sendable, or we might be trying to get a buffer to send period. 133 | if self.send_buf.is_some() { 134 | let (buf, count) = self.send_buf.take().expect("left over send buffer not available"); 135 | let buf_len = buf.len(); 136 | if let AsyncSink::NotReady(buf) = 137 | self.transport.start_send(buf).map_err(PipelineError::from_sink_error)? 138 | { 139 | self.send_buf = Some((buf, count)); 140 | return Ok(Async::NotReady); 141 | } 142 | 143 | self.messages_sent.record(count); 144 | self.bytes_sent.record(buf_len as u64); 145 | } 146 | 147 | let mut msgs_sent = 0; 148 | let mut bytes_sent = 0; 149 | 150 | while let Some((buf, count)) = self.queue.get_sendable_buf() { 151 | let buf_len = buf.len(); 152 | if let AsyncSink::NotReady(buf) = 153 | self.transport.start_send(buf).map_err(PipelineError::from_sink_error)? 154 | { 155 | self.send_buf = Some((buf, count)); 156 | self.messages_sent.record(msgs_sent); 157 | self.bytes_sent.record(bytes_sent as u64); 158 | return Ok(Async::NotReady); 159 | } 160 | 161 | msgs_sent += count; 162 | bytes_sent += buf_len; 163 | } 164 | 165 | self.messages_sent.record(msgs_sent); 166 | self.bytes_sent.record(bytes_sent as u64); 167 | 168 | // Drive our transport to flush any buffers we have. 169 | if let Async::Ready(()) = self.transport.poll_complete().map_err(PipelineError::from_sink_error)? { 170 | // If we're finished and have nothing else to send, then we're done! 171 | if self.finish && self.responses.is_empty() { 172 | return Ok(Async::Ready(())); 173 | } 174 | } 175 | 176 | // Don't try and grab anything else from the transport if we're finished, we just need 177 | // to flush the rest of our responses and that's it. 178 | if self.finish { 179 | return Ok(Async::NotReady); 180 | } 181 | 182 | // Make sure the underlying service is ready to be called. 183 | try_ready!(self.service.poll_ready().map_err(PipelineError::from_service_error)); 184 | 185 | // See if we can pull a batch from the transport. 186 | match try_ready!(self.transport.poll().map_err(PipelineError::from_stream_error)) { 187 | Some((batch, batch_size)) => { 188 | self.messages_received.record(batch.len() as u64); 189 | self.bytes_received.record(batch_size as u64); 190 | let batch = self.queue.enqueue(batch)?; 191 | if !batch.is_empty() { 192 | let fut = self.service.call(batch); 193 | let start = self.sink.now(); 194 | self.responses.push_back(fut.timed(start)); 195 | } 196 | }, 197 | None => { 198 | // Our transport has signalled no more messages are going to come in, so mark 199 | // ourselves as finished so we can begin the closing process. 200 | assert!(!self.finish); 201 | self.finish = true; 202 | }, 203 | } 204 | } 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/backend/message_queue.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::processor::{Processor, ProcessorError}, 22 | common::{AssignedRequests, AssignedResponse, Message, MessageResponse}, 23 | }; 24 | use bytes::BytesMut; 25 | use slab::Slab; 26 | use std::collections::VecDeque; 27 | 28 | /// Message state of queued messages. 29 | #[derive(Debug, PartialEq)] 30 | pub enum MessageState { 31 | /// An unfragmented, standalone message. 32 | /// 33 | /// A filled variant of this state can be immediately sent off to the client. 34 | Standalone, 35 | 36 | /// An unfragmented, standalone message that is _also_ immediately available. 37 | /// 38 | /// While normal messages have to be processed before a response, these messages are available 39 | /// to send as soon as they're enqueued. 40 | Inline, 41 | 42 | /// A fragmented message. 43 | /// 44 | /// This represents a discrete fragment of a parent message. The buffer represents arbitrary 45 | /// data that is used to identify the parent message. Given that fragments may not have the 46 | /// information any longer, we keep track of it in the message state. 47 | /// 48 | /// The integers provide the index of the given fragment and the overall count of fragments 49 | /// within the parent message. 50 | Fragmented(BytesMut, usize, usize), 51 | 52 | /// A streaming fragmented message. 53 | /// 54 | /// This represents a discrete fragment of a parent message. The key difference is that the 55 | /// parent message is "streamable." This is usually the case for get operations, where, as 56 | /// long as the fragments are in order, they can be streamed back to the client as they're 57 | /// available. This is in contrast to some other fragmented messages, where the response must 58 | /// be generated by the sum of all the parts. 59 | /// 60 | /// The optional buffer represents a header that can be sent before the actual fragment. This 61 | /// allows sending any response data that is needed to coalesce the fragments into a meaningful 62 | /// response to the client. 63 | /// 64 | /// The boolean marks whether this streaming fragment represents the end of the response to the 65 | /// client for a given input message. For example, if the client sent in a multi-get that 66 | /// asked for 10 keys, the 10th streaming fragment would be the "end" of the response. 67 | StreamingFragmented(Option, bool), 68 | } 69 | 70 | pub struct MessageQueue

71 | where 72 | P: Processor, 73 | { 74 | // Processor that provides fragmentation capabilities. 75 | processor: P, 76 | 77 | // Holds all message slots, and stores the slot IDs in order of the messages tied to them. 78 | slot_order: VecDeque<(usize, MessageState)>, 79 | slots: Slab>, 80 | } 81 | 82 | impl

MessageQueue

83 | where 84 | P: Processor, 85 | P::Message: Message + Clone, 86 | { 87 | pub fn new(processor: P) -> MessageQueue

{ 88 | MessageQueue { 89 | processor, 90 | slot_order: VecDeque::new(), 91 | slots: Slab::new(), 92 | } 93 | } 94 | 95 | fn is_slot_ready(&self, slot: usize) -> bool { 96 | match self.slot_order.get(slot) { 97 | None => false, 98 | Some((slot_id, _)) => { 99 | match self.slots.get(*slot_id) { 100 | Some(Some(_)) => true, 101 | _ => false, 102 | } 103 | }, 104 | } 105 | } 106 | 107 | fn get_next_response(&mut self) -> Result, ProcessorError> { 108 | // If we have an immediately available response aka a standalone message or streaming 109 | // fragment, just return it. 110 | let has_immediate = match self.slot_order.front() { 111 | None => return Ok(None), 112 | Some((slot_id, state)) => { 113 | match self.slots.get(*slot_id) { 114 | Some(_) => { 115 | match state { 116 | MessageState::Standalone 117 | | MessageState::Inline 118 | | MessageState::StreamingFragmented(_, _) => true, 119 | MessageState::Fragmented(_, _, _) => false, 120 | } 121 | }, 122 | None => return Ok(None), 123 | } 124 | }, 125 | }; 126 | 127 | if has_immediate { 128 | let (slot_id, state) = self.slot_order.pop_front().expect("failed to pop slot order"); 129 | let slot = self.slots.remove(slot_id).expect("failed to remove slot"); 130 | 131 | let (buf, count) = match state { 132 | MessageState::Standalone | MessageState::Inline => (slot.into_buf(), 1), 133 | MessageState::StreamingFragmented(header, is_last) => { 134 | let count = if is_last { 1 } else { 0 }; 135 | match header { 136 | Some(mut header_buf) => { 137 | header_buf.unsplit(slot.into_buf()); 138 | (header_buf, count) 139 | }, 140 | None => (slot.into_buf(), count), 141 | } 142 | }, 143 | _ => unreachable!(), 144 | }; 145 | 146 | return Ok(Some((buf, count))); 147 | } 148 | 149 | // Now we know that the next slot has been fulfilled, and that it's a fragmented message. 150 | // Let's peek at the slot to grab the fragment count, and then we can loop through to see 151 | // if all the fragments have completed and are ready to be coalesced. 152 | let fragment_count = match self.slot_order.front() { 153 | None => unreachable!(), 154 | Some((_, state)) => { 155 | match state { 156 | MessageState::Fragmented(_, _, count) => *count, 157 | _ => unreachable!(), 158 | } 159 | }, 160 | }; 161 | 162 | for index in 0..fragment_count { 163 | if !self.is_slot_ready(index) { 164 | return Ok(None); 165 | } 166 | } 167 | 168 | // We have all the slots filled and ready to coalesce. Pull out the fragments! 169 | let mut fragments = Vec::new(); 170 | for _ in 0..fragment_count { 171 | let (slot_id, state) = self.slot_order.pop_front().expect("failed to pop fragment slot order"); 172 | let msg = self.slots.remove(slot_id).expect("failed to remove fragment slot"); 173 | fragments.push((state, msg)); 174 | } 175 | 176 | let msg = self.processor.defragment_messages(fragments)?; 177 | Ok(Some((msg.into_buf(), 1))) 178 | } 179 | 180 | pub fn enqueue(&mut self, msgs: Vec) -> Result, ProcessorError> { 181 | let fmsgs = self.processor.fragment_messages(msgs)?; 182 | 183 | let mut amsgs = Vec::new(); 184 | for (msg_state, msg) in fmsgs { 185 | if msg_state == MessageState::Inline { 186 | let slot_id = self.slots.insert(Some(msg)); 187 | self.slot_order.push_back((slot_id, msg_state)); 188 | } else { 189 | let slot_id = self.slots.insert(None); 190 | self.slot_order.push_back((slot_id, msg_state)); 191 | amsgs.push((slot_id, msg)); 192 | } 193 | } 194 | 195 | Ok(amsgs) 196 | } 197 | 198 | pub fn fulfill(&mut self, batch: I) 199 | where 200 | I: IntoIterator>, 201 | { 202 | for (slot, response) in batch.into_iter() { 203 | let slot = self.slots.get_mut(slot).unwrap(); 204 | match response { 205 | MessageResponse::Complete(msg) => { 206 | slot.replace(msg); 207 | }, 208 | MessageResponse::Failed => { 209 | let err = self.processor.get_error_message_str("failed to receive response"); 210 | slot.replace(err); 211 | }, 212 | } 213 | } 214 | } 215 | 216 | pub fn get_sendable_buf(&mut self) -> Option<(BytesMut, u64)> { 217 | if !self.is_slot_ready(0) { 218 | return None; 219 | } 220 | 221 | if let Ok(inner) = self.get_next_response() { 222 | inner 223 | } else { 224 | None 225 | } 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /src/backend/pool.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use super::{ 21 | distributor::{configure_distributor, Distributor}, 22 | hasher::{configure_hasher, KeyHasher}, 23 | }; 24 | use crate::{ 25 | backend::{processor::Processor, Backend, BackendError, PoolError, ResponseFuture}, 26 | common::{AssignedResponses, EnqueuedRequests, Message}, 27 | conf::PoolConfiguration, 28 | errors::CreationError, 29 | util::IntegerMappedVec, 30 | }; 31 | use futures::{ 32 | future::{join_all, JoinAll}, 33 | prelude::*, 34 | }; 35 | use metrics_runtime::Sink as MetricSink; 36 | use std::{collections::HashMap, marker::PhantomData}; 37 | use tower_direct_service::DirectService; 38 | 39 | type DistributorFutureSafe = Box; 40 | type KeyHasherFutureSafe = Box; 41 | 42 | pub struct BackendPool

43 | where 44 | P: Processor + Clone + Send + 'static, 45 | P::Message: Message + Send + 'static, 46 | { 47 | distributor: DistributorFutureSafe, 48 | key_hasher: KeyHasherFutureSafe, 49 | backends: Vec>, 50 | noreply: bool, 51 | epoch: u64, 52 | sink: MetricSink, 53 | } 54 | 55 | impl

BackendPool

56 | where 57 | P: Processor + Clone + Send + 'static, 58 | P::Message: Message + Send + 'static, 59 | { 60 | pub fn new( 61 | backends: Vec>, distributor: DistributorFutureSafe, key_hasher: KeyHasherFutureSafe, noreply: bool, 62 | sink: MetricSink, 63 | ) -> BackendPool

{ 64 | let mut pool = BackendPool { 65 | distributor, 66 | key_hasher, 67 | backends, 68 | noreply, 69 | epoch: 0, 70 | sink, 71 | }; 72 | pool.regenerate_distribution(); 73 | pool 74 | } 75 | 76 | pub fn regenerate_distribution(&mut self) { 77 | let descriptors = self 78 | .backends 79 | .iter_mut() 80 | .enumerate() 81 | .map(|(idx, backend)| { 82 | let mut descriptor = backend.get_descriptor(); 83 | descriptor.idx = idx; 84 | descriptor 85 | }) 86 | .filter(|backend| backend.healthy) 87 | .collect(); 88 | self.distributor.update(descriptors); 89 | self.sink.record_counter("distribution_updated", 1); 90 | } 91 | } 92 | 93 | impl

DirectService> for BackendPool

94 | where 95 | P: Processor + Clone + Send + 'static, 96 | P::Message: Message + Send + 'static, 97 | { 98 | type Error = PoolError; 99 | type Future = PoolResponse

; 100 | type Response = AssignedResponses; 101 | 102 | fn poll_ready(&mut self) -> Poll<(), Self::Error> { 103 | // Not every backend will be ready all the time, especially if they're knocked out of the 104 | // pool temporarily, but as long as one is ready, then we're ready. If any of them are in 105 | // a bad enough state to throw an error, though, then something is very wrong and we need 106 | // to bubble that up. 107 | let mut any_ready = false; 108 | let mut epoch = 0; 109 | for backend in &mut self.backends { 110 | match backend.poll_ready() { 111 | Ok(Async::Ready(_)) => any_ready = true, 112 | Ok(Async::NotReady) => {}, 113 | Err(e) => return Err(PoolError::Backend(e)), 114 | } 115 | 116 | epoch += backend.health().epoch(); 117 | } 118 | 119 | if !any_ready { 120 | return Ok(Async::NotReady); 121 | } 122 | 123 | if self.epoch != epoch { 124 | debug!("regenerating distribution"); 125 | self.regenerate_distribution(); 126 | self.epoch = epoch; 127 | } 128 | 129 | Ok(Async::Ready(())) 130 | } 131 | 132 | fn poll_service(&mut self) -> Poll<(), Self::Error> { 133 | for backend in &mut self.backends { 134 | // not clear if it actually makes sense to pre-emptively return notready without 135 | // driving all services.. poll_ready should cover the "am i knocked out of the pool 136 | // temporarily?" case but would this ever actually return notready when driving the 137 | // underlying service? unclear 138 | try_ready!(backend.poll_service()); 139 | } 140 | 141 | Ok(Async::Ready(())) 142 | } 143 | 144 | fn poll_close(&mut self) -> Poll<(), Self::Error> { 145 | for backend in &mut self.backends { 146 | try_ready!(backend.poll_close()); 147 | } 148 | 149 | Ok(Async::Ready(())) 150 | } 151 | 152 | fn call(&mut self, req: EnqueuedRequests) -> Self::Future { 153 | let mut futs = Vec::new(); 154 | let mut batches = IntegerMappedVec::new(); 155 | 156 | for msg in req { 157 | let msg_key = msg.key(); 158 | let msg_hashed = self.key_hasher.hash(msg_key); 159 | let backend_idx = self.distributor.choose(msg_hashed); 160 | 161 | batches.push(backend_idx, msg); 162 | } 163 | 164 | // make the batch calls to each relevant backend, and collect them 165 | for (backend_idx, batch) in batches { 166 | let fut = self.backends[backend_idx].call(batch); 167 | futs.push(fut); 168 | } 169 | 170 | PoolResponse::new(futs) 171 | } 172 | } 173 | 174 | pub struct BackendPoolBuilder

175 | where 176 | P: Processor + Clone + Send + 'static, 177 | P::Message: Message + Send + 'static, 178 | { 179 | processor: P, 180 | config: PoolConfiguration, 181 | noreply: bool, 182 | sink: MetricSink, 183 | } 184 | 185 | impl

BackendPoolBuilder

186 | where 187 | P: Processor + Clone + Send + 'static, 188 | P::Message: Message + Send + 'static, 189 | { 190 | pub fn new(name: String, processor: P, config: PoolConfiguration, mut sink: MetricSink) -> BackendPoolBuilder

{ 191 | sink.add_default_labels(&[("pool", name)]); 192 | 193 | BackendPoolBuilder { 194 | processor, 195 | config, 196 | noreply: false, 197 | sink, 198 | } 199 | } 200 | 201 | pub fn set_noreply(mut self, noreply: bool) -> Self { 202 | self.noreply = noreply; 203 | self 204 | } 205 | 206 | pub fn build(self) -> Result, CreationError> 207 | where 208 | P: Processor + Clone + Send + 'static, 209 | P::Message: Message + Send + 'static, 210 | { 211 | let mut options = self.config.options.unwrap_or_else(HashMap::new); 212 | let dist_type = options 213 | .entry("distribution".to_owned()) 214 | .or_insert_with(|| "modulo".to_owned()) 215 | .to_lowercase(); 216 | let distributor = configure_distributor(&dist_type)?; 217 | debug!("[listener] using distributor '{}'", dist_type); 218 | 219 | let hash_type = options 220 | .entry("hash".to_owned()) 221 | .or_insert_with(|| "fnv1a_64".to_owned()) 222 | .to_lowercase(); 223 | let hasher = configure_hasher(&hash_type)?; 224 | debug!("[listener] using hasher '{}'", hash_type); 225 | 226 | // Build all of our backends for this pool. 227 | let mut backends = Vec::new(); 228 | for address in &self.config.addresses { 229 | let backend = Backend::new( 230 | address.address, 231 | address.identifier.clone(), 232 | self.processor.clone(), 233 | options.clone(), 234 | self.noreply, 235 | self.sink.clone(), 236 | )?; 237 | backends.push(backend); 238 | } 239 | 240 | Ok(BackendPool::new(backends, distributor, hasher, self.noreply, self.sink)) 241 | } 242 | } 243 | 244 | pub struct PoolResponse

245 | where 246 | P: Processor + Send + 'static, 247 | P::Message: Message + Send + 'static, 248 | { 249 | responses: JoinAll>>, 250 | _processor: PhantomData

, 251 | } 252 | 253 | impl

PoolResponse

254 | where 255 | P: Processor + Send + 'static, 256 | P::Message: Message + Send + 'static, 257 | { 258 | pub fn new(responses: Vec>) -> PoolResponse

{ 259 | PoolResponse { 260 | responses: join_all(responses), 261 | _processor: PhantomData, 262 | } 263 | } 264 | } 265 | 266 | impl

Future for PoolResponse

267 | where 268 | P: Processor + Send + 'static, 269 | P::Message: Message + Send + 'static, 270 | { 271 | type Error = PoolError; 272 | type Item = AssignedResponses; 273 | 274 | fn poll(&mut self) -> Poll { 275 | let result = try_ready!(self.responses.poll()); 276 | let flattened = result.into_iter().flatten().collect::>(); 277 | Ok(Async::Ready(flattened)) 278 | } 279 | } 280 | -------------------------------------------------------------------------------- /synchrotron-test/src/main.rs: -------------------------------------------------------------------------------- 1 | extern crate redis; 2 | extern crate tempfile; 3 | 4 | mod daemons; 5 | 6 | fn main() { 7 | println!("This binary does nothing. Run `cargo test` on this to actually run the tests.") 8 | } 9 | 10 | #[cfg(test)] 11 | mod redis_tests { 12 | use std::thread; 13 | use std::time::Duration; 14 | use redis::cmd as redis_cmd; 15 | use redis::Client as RedisClient; 16 | use redis::{Commands, RedisResult, ErrorKind as RedisErrorKind}; 17 | use daemons::get_redis_daemons; 18 | 19 | #[test] 20 | fn test_set_get() { 21 | let (sd, _rd1, _rd2) = get_redis_daemons(); 22 | 23 | // A simple set and then get. 24 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 25 | let conn = client.get_connection().unwrap(); 26 | let _: () = conn.set("my_key", 42).unwrap(); 27 | let value: isize = conn.get("my_key").unwrap(); 28 | assert_eq!(value, 42); 29 | } 30 | 31 | #[test] 32 | fn test_mget() { 33 | let (sd, _rd1, _rd2) = get_redis_daemons(); 34 | 35 | // A simple set and then get. 36 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 37 | let conn = client.get_connection().unwrap(); 38 | let _: () = conn.set("key_one", 42).unwrap(); 39 | let _: () = conn.set("key_two", 43).unwrap(); 40 | let _: () = conn.set("key_three", 44).unwrap(); 41 | let value: Vec = conn.get(&["key_one", "key_two", "key_three"]).unwrap(); 42 | assert_eq!(value, vec![42, 43, 44]); 43 | } 44 | 45 | #[test] 46 | fn test_invalid_commands() { 47 | let (sd, _rd1, _rd2) = get_redis_daemons(); 48 | 49 | // Do a ping first. 50 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 51 | let conn = client.get_connection().unwrap(); 52 | let ping_cmd = redis_cmd("PING"); 53 | let ping_result: RedisResult = ping_cmd.query(&conn); 54 | assert!(ping_result.is_ok()); 55 | 56 | // Now do INFO which is not supported. 57 | let info_cmd = redis_cmd("INFO"); 58 | let info_result: RedisResult = info_cmd.query(&conn); 59 | assert!(info_result.is_err()); 60 | } 61 | 62 | #[test] 63 | fn test_case_insensitive_commands() { 64 | let (sd, _rd1, _rd2) = get_redis_daemons(); 65 | 66 | // Do a lowercase ping first. 67 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 68 | let conn = client.get_connection().unwrap(); 69 | let ping_cmd = redis_cmd("ping"); 70 | let ping_result: RedisResult = ping_cmd.query(&conn); 71 | assert!(ping_result.is_ok()); 72 | 73 | // Do an uppercase ping next. 74 | let ping_cmd2 = redis_cmd("PING"); 75 | let ping_result2: RedisResult = ping_cmd2.query(&conn); 76 | assert!(ping_result2.is_ok()); 77 | } 78 | 79 | #[test] 80 | fn test_null_key() { 81 | let (sd, _rd1, _rd2) = get_redis_daemons(); 82 | 83 | // A simple set and then get. 84 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 85 | let conn = client.get_connection().unwrap(); 86 | 87 | let _: () = conn.set("", 19).unwrap(); 88 | let value: isize = conn.get("").unwrap(); 89 | assert_eq!(value, 19); 90 | 91 | let _: () = conn.set("", "").unwrap(); 92 | let value: String = conn.get("").unwrap(); 93 | assert_eq!(value, ""); 94 | 95 | let _: () = conn.set_multiple(&[("", "x"), ("d", "t")]).unwrap(); 96 | let value: String = conn.get("").unwrap(); 97 | assert_eq!(value, "x"); 98 | } 99 | 100 | #[test] 101 | fn test_linsert() { 102 | let (sd, _rd1, _rd2) = get_redis_daemons(); 103 | 104 | // A simple set and then get. 105 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 106 | let conn = client.get_connection().unwrap(); 107 | 108 | let _: () = conn.rpush("mylist", "Hello").unwrap(); 109 | let _: () = conn.rpush("mylist", "World").unwrap(); 110 | let _: () = conn.linsert_before("mylist", "World", "There").unwrap(); 111 | let value: Vec = conn.lrange("mylist", 0, -1).unwrap(); 112 | assert_eq!(value, ["Hello", "There", "World"]); 113 | } 114 | 115 | #[test] 116 | fn test_large_insert_times_out() { 117 | let (sd, _rd1, _rd2) = get_redis_daemons(); 118 | 119 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 120 | let conn = client.get_connection().unwrap(); 121 | 122 | let mut hash_fields = Vec::new(); 123 | for i in 0..500000 { 124 | let key = format!("k-{}", i); 125 | let value = format!("v-{}", i); 126 | 127 | hash_fields.push((key, value)); 128 | } 129 | 130 | let result: RedisResult<()> = conn.hset_multiple("large-hash", &hash_fields); 131 | match result { 132 | Ok(_) => panic!("should have been error after request timing out"), 133 | Err(inner_err) => assert_eq!(inner_err.kind(), RedisErrorKind::ResponseError), 134 | } 135 | } 136 | 137 | #[test] 138 | fn test_quit_drops_conn() { 139 | let (sd, _rd1, _rd2) = get_redis_daemons(); 140 | 141 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 142 | let conn = client.get_connection().unwrap(); 143 | 144 | let _: () = conn.set("", 19).unwrap(); 145 | let value: isize = conn.get("").unwrap(); 146 | assert_eq!(value, 19); 147 | 148 | // Now end the connection with QUIT. 149 | let _ = conn.send_packed_command(b"quit\r\n").unwrap(); 150 | let _ = conn.recv_response().unwrap(); 151 | 152 | // We still have our sending side open, so we can send the PING command, but trying to 153 | // receive the command should fail. 154 | let _ = conn.send_packed_command(b"ping\r\n").unwrap(); 155 | match conn.recv_response().err() { 156 | Some(e) => { 157 | println!("quit conn error: {:?}", e); 158 | assert!(e.is_connection_dropped() || e.kind() == RedisErrorKind::ResponseError) 159 | }, 160 | None => panic!("call after quit should yield error"), 161 | } 162 | } 163 | 164 | #[test] 165 | fn test_traffic_shadowing() { 166 | let (sd, rd1, rd2) = get_redis_daemons(); 167 | 168 | let client = RedisClient::open(sd.get_shadow_conn_str()).unwrap(); 169 | let conn = client.get_connection().unwrap(); 170 | 171 | // Set values directly on both Redis servers so we can distinguish between nodes when 172 | // we eventually go through Synchrotron. 173 | let r1client = RedisClient::open(rd1.get_conn_str()).unwrap(); 174 | let r1conn = r1client.get_connection().unwrap(); 175 | 176 | let _: () = r1conn.set("two", 1).unwrap(); 177 | let value1: isize = r1conn.get("two").unwrap(); 178 | assert_eq!(value1, 1); 179 | 180 | let r2client = RedisClient::open(rd2.get_conn_str()).unwrap(); 181 | let r2conn = r2client.get_connection().unwrap(); 182 | 183 | let _: () = r2conn.set("two", 2).unwrap(); 184 | let value2: isize = r2conn.get("two").unwrap(); 185 | assert_eq!(value2, 2); 186 | 187 | // Now set the value through Synchrotron. 188 | let _: () = conn.set("two", 3).unwrap(); 189 | 190 | // Wait for a hot second just to make sure the shadow pool is hit. 191 | thread::sleep(Duration::from_millis(50)); 192 | 193 | // Both pools should have the same value now. 194 | let value3: isize = r1conn.get("two").unwrap(); 195 | assert_eq!(value3, 3); 196 | 197 | let value4: isize = r2conn.get("two").unwrap(); 198 | assert_eq!(value4, 3); 199 | 200 | // Do it through Synchrotron one more time to show the shadow backend isn't locked up or 201 | // anything. 202 | let _: () = conn.set("two", 4).unwrap(); 203 | 204 | // Wait for a hot second just to make sure the shadow pool is hit. 205 | thread::sleep(Duration::from_millis(50)); 206 | 207 | // Both pools should have the same value now. 208 | let value3: isize = r1conn.get("two").unwrap(); 209 | assert_eq!(value3, 4); 210 | 211 | let value4: isize = r2conn.get("two").unwrap(); 212 | assert_eq!(value4, 4); 213 | 214 | } 215 | 216 | #[test] 217 | fn test_backend_cooloff() { 218 | let (sd, rd1, rd2) = get_redis_daemons(); 219 | 220 | let client = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 221 | let conn = client.get_connection().unwrap(); 222 | 223 | // Set values directly on both Redis servers so we can distinguish between nodes when 224 | // we eventually go through Synchrotron. 225 | let r1client = RedisClient::open(rd1.get_conn_str()).unwrap(); 226 | let r1conn = r1client.get_connection().unwrap(); 227 | 228 | let _: () = r1conn.set("two", 1).unwrap(); 229 | let value1: isize = r1conn.get("two").unwrap(); 230 | assert_eq!(value1, 1); 231 | 232 | let r2client = RedisClient::open(rd2.get_conn_str()).unwrap(); 233 | let r2conn = r2client.get_connection().unwrap(); 234 | 235 | let _: () = r2conn.set("two", 2).unwrap(); 236 | let value2: isize = r2conn.get("two").unwrap(); 237 | assert_eq!(value2, 2); 238 | 239 | // Now grab the value through Synchrotron so we have our baseline value. 240 | let baseline: isize = conn.get("two").unwrap(); 241 | 242 | // Now kill whichever server was the one that the key routed to. 243 | if baseline == 1 { 244 | drop(rd1); 245 | } else { 246 | drop(rd2); 247 | } 248 | 249 | // Wait for a hot second just to make sure things are dead. 250ms should do it. 250 | thread::sleep(Duration::from_millis(250)); 251 | 252 | // Now, try to ask Synchrotron for the value, five times. Should be all errors. 253 | for _ in 0..5 { 254 | let iclient = RedisClient::open(sd.get_fixed_conn_str()).unwrap(); 255 | let iconn = iclient.get_connection().unwrap(); 256 | let result: RedisResult = iconn.get("two"); 257 | match result { 258 | Ok(_) => panic!("should have been error after killing redis node"), 259 | Err(inner_err) => assert_eq!(inner_err.kind(), RedisErrorKind::ResponseError), 260 | } 261 | } 262 | 263 | // Next one should work as it switches over to the new server. 264 | let failover: isize = conn.get("two").unwrap(); 265 | assert!(failover != baseline); 266 | 267 | // Now wait for the cooloff to expire. 268 | thread::sleep(Duration::from_millis(2500)); 269 | 270 | // And make sure we get errors again. We can't easily restart the killed Redis 271 | // daemon again but we'll know the cooloff happened if we get an error because 272 | // it means it tried the downed server again. 273 | let result: RedisResult = conn.get("two"); 274 | assert!(result.is_err()); 275 | } 276 | } 277 | -------------------------------------------------------------------------------- /src/listener.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::{ 22 | pool::{BackendPool, BackendPoolBuilder}, 23 | processor::Processor, 24 | redis::RedisProcessor, 25 | }, 26 | common::{AssignedRequests, AssignedResponse, EnqueuedRequests, Message}, 27 | conf::ListenerConfiguration, 28 | errors::CreationError, 29 | protocol::errors::ProtocolError, 30 | routing::{FixedRouter, ShadowRouter}, 31 | service::{Pipeline, PipelineError}, 32 | util::FutureExt, 33 | }; 34 | use bytes::BytesMut; 35 | use futures::{ 36 | future::{lazy, ok, Shared}, 37 | prelude::*, 38 | }; 39 | use futures_turnstyle::Waiter; 40 | use metrics_runtime::Sink as MetricSink; 41 | use net2::TcpBuilder; 42 | use std::{collections::HashMap, fmt::Display, net::SocketAddr}; 43 | use tokio::{io, net::TcpListener, reactor}; 44 | use tokio_evacuate::{Evacuate, Warden}; 45 | use tokio_executor::DefaultExecutor; 46 | use tower_buffer::{Buffer, DirectServiceRef}; 47 | use tower_service::Service; 48 | 49 | type GenericRuntimeFuture = Box + Send + 'static>; 50 | type BufferedPool = Buffer>, EnqueuedRequests>; 51 | 52 | /// Creates a listener from the given configuration. 53 | /// 54 | /// The listener will spawn a socket for accepting client connections, and when a client connects, 55 | /// spawn a task to process all of the messages from that client until the client disconnects or 56 | /// there is an unrecoverable connection/protocol error. 57 | pub fn from_config( 58 | version: usize, name: String, config: ListenerConfiguration, close: Shared, sink: MetricSink, 59 | ) -> Result { 60 | // Create the actual listener proper. 61 | let listen_address = config.address.clone(); 62 | let listener = get_listener(&listen_address).expect("failed to create the TCP listener"); 63 | 64 | // Now build our handler: this is what's actually going to do the real work. 65 | let protocol = config.protocol.to_lowercase(); 66 | let handler = match protocol.as_str() { 67 | "redis" => routing_from_config(name, config, listener, close.clone(), RedisProcessor::new(), sink), 68 | s => Err(CreationError::InvalidResource(format!("unknown cache protocol: {}", s))), 69 | }?; 70 | 71 | // Make sure our handlers close out when told. 72 | let listen_address2 = listen_address.clone(); 73 | let wrapped = lazy(move || { 74 | info!("[listener] starting listener '{}' (v{})", listen_address, version); 75 | ok(()) 76 | }) 77 | .and_then(|_| handler) 78 | .select2(close) 79 | .then(move |_| { 80 | info!("[listener] shutting down listener '{}' (v{})", listen_address2, version); 81 | ok(()) 82 | }); 83 | Ok(Box::new(wrapped)) 84 | } 85 | 86 | fn routing_from_config( 87 | name: String, config: ListenerConfiguration, listener: TcpListener, close: C, processor: P, sink: MetricSink, 88 | ) -> Result 89 | where 90 | P: Processor + Clone + Send + 'static, 91 | P::Message: Message + Clone + Send + 'static, 92 | P::Transport: 93 | Sink + Stream + Send, 94 | C: Future + Clone + Send + 'static, 95 | { 96 | let reload_timeout_ms = config.reload_timeout_ms.unwrap_or_else(|| 5000); 97 | 98 | // Build our evacuator and wrap it as shared. This lets us soft close everything. 99 | let (warden, evacuate) = Evacuate::new(close, reload_timeout_ms); 100 | let closer = evacuate.shared(); 101 | 102 | // Get our scoped metric sink. 103 | let mut sink = sink.clone(); 104 | sink.add_default_labels(&[("listener", name)]); 105 | 106 | // Extract all the configured pools and build a backend pool for them. 107 | let mut pools = HashMap::new(); 108 | let pool_configs = config.pools.clone(); 109 | for (pool_name, pool_config) in pool_configs { 110 | debug!( 111 | "[listener] configuring backend pool '{}' for address '{}'", 112 | &pool_name, 113 | config.address.clone() 114 | ); 115 | 116 | let pool = BackendPoolBuilder::new(pool_name.clone(), processor.clone(), pool_config, sink.clone()).build()?; 117 | let buffered_pool = Buffer::new_direct(pool, 32, &DefaultExecutor::current()).map_err(|_| { 118 | CreationError::InvalidResource(format!( 119 | "error while building pool '{}': failed to spawn task", 120 | pool_name 121 | )) 122 | })?; 123 | pools.insert(pool_name, buffered_pool); 124 | } 125 | 126 | // Figure out what sort of routing we're doing so we can grab the right handler. 127 | let mut routing = config.routing; 128 | let route_type = routing 129 | .entry("type".to_owned()) 130 | .or_insert_with(|| "fixed".to_owned()) 131 | .to_lowercase(); 132 | match route_type.as_str() { 133 | "fixed" => get_fixed_router(listener, pools, processor, warden, closer, sink), 134 | "shadow" => get_shadow_router(listener, pools, processor, warden, closer, sink), 135 | x => Err(CreationError::InvalidResource(format!("unknown route type '{}'", x))), 136 | } 137 | } 138 | 139 | fn get_fixed_router( 140 | listener: TcpListener, pools: HashMap>, processor: P, warden: Warden, close: C, 141 | sink: MetricSink, 142 | ) -> Result 143 | where 144 | P: Processor + Clone + Send + 'static, 145 | P::Message: Message + Clone + Send + 'static, 146 | P::Transport: 147 | Sink + Stream + Send, 148 | C: Future + Clone + Send + 'static, 149 | { 150 | // Construct an instance of our router. 151 | let default_pool = pools 152 | .get("default") 153 | .ok_or_else(|| CreationError::InvalidResource("no default pool configured for fixed router".to_string()))? 154 | .clone(); 155 | let router = FixedRouter::new(processor.clone(), default_pool); 156 | 157 | build_router_chain(listener, processor, router, warden, close, sink) 158 | } 159 | 160 | fn get_shadow_router( 161 | listener: TcpListener, pools: HashMap>, processor: P, warden: Warden, close: C, 162 | sink: MetricSink, 163 | ) -> Result 164 | where 165 | P: Processor + Clone + Send + 'static, 166 | P::Message: Message + Clone + Send + 'static, 167 | P::Transport: 168 | Sink + Stream + Send, 169 | C: Future + Clone + Send + 'static, 170 | { 171 | // Construct an instance of our router. 172 | let default_pool = pools 173 | .get("default") 174 | .ok_or_else(|| CreationError::InvalidResource("no default pool configured for shadow router".to_string()))? 175 | .clone(); 176 | 177 | let shadow_pool = pools 178 | .get("shadow") 179 | .ok_or_else(|| CreationError::InvalidResource("no shadow pool configured for shadow router".to_string()))? 180 | .clone(); 181 | 182 | let router = ShadowRouter::new(processor.clone(), default_pool, shadow_pool); 183 | 184 | build_router_chain(listener, processor, router, warden, close, sink) 185 | } 186 | 187 | fn build_router_chain( 188 | listener: TcpListener, processor: P, router: R, warden: Warden, close: C, mut sink: MetricSink, 189 | ) -> Result 190 | where 191 | P: Processor + Clone + Send + 'static, 192 | P::Message: Message + Clone + Send + 'static, 193 | P::Transport: 194 | Sink + Stream + Send, 195 | R: Service> + Clone + Send + 'static, 196 | R::Error: Display + Send + Sync, 197 | R::Response: IntoIterator> + Send, 198 | R::Future: Future + Send, 199 | C: Future + Clone + Send + 'static, 200 | { 201 | let close2 = close.clone(); 202 | let task = listener 203 | .incoming() 204 | .for_each(move |client| { 205 | warden.increment(); 206 | sink.record_counter("clients_connected", 1); 207 | 208 | let router = router.clone(); 209 | let processor = processor.clone(); 210 | let close = close.clone(); 211 | let warden2 = warden.clone(); 212 | let mut sink2 = sink.clone(); 213 | let client_addr = client.peer_addr().unwrap(); 214 | debug!("[client] {} connected", client_addr); 215 | 216 | let transport = processor.get_transport(client); 217 | let task = Pipeline::new(transport, router, processor, sink.clone()) 218 | .then(move |result| { 219 | match result { 220 | Ok(_) => { 221 | debug!("[client] {} disconnected", client_addr); 222 | }, 223 | Err(e) => { 224 | match e { 225 | // If we got a protocol error from a client, that's bad. Otherwise, 226 | // clients closing their connection is a normal thing. 227 | PipelineError::TransportReceive(ie) => { 228 | if !ie.client_closed() { 229 | sink2.record_counter("client_errors", 1); 230 | error!("[client] transport error from {}: {}", client_addr, ie); 231 | } 232 | }, 233 | e => error!("[client] error from {}: {}", client_addr, e), 234 | } 235 | }, 236 | } 237 | 238 | warden2.decrement(); 239 | 240 | ok::<(), ()>(()) 241 | }) 242 | .select2(close); 243 | 244 | tokio::spawn(task.untyped()); 245 | 246 | ok(()) 247 | }) 248 | .map_err(|e| error!("[listener] caught error while accepting connections: {:?}", e)) 249 | .select2(close2); 250 | 251 | Ok(Box::new(task.untyped())) 252 | } 253 | 254 | fn get_listener(addr_str: &str) -> io::Result { 255 | let addr = addr_str.parse().unwrap(); 256 | let builder = match addr { 257 | SocketAddr::V4(_) => TcpBuilder::new_v4()?, 258 | SocketAddr::V6(_) => TcpBuilder::new_v6()?, 259 | }; 260 | configure_builder(&builder)?; 261 | builder.reuse_address(true)?; 262 | builder.bind(addr)?; 263 | builder 264 | .listen(1024) 265 | .and_then(|l| TcpListener::from_std(l, &reactor::Handle::default())) 266 | } 267 | 268 | #[cfg(unix)] 269 | fn configure_builder(builder: &TcpBuilder) -> io::Result<()> { 270 | use net2::unix::*; 271 | 272 | builder.reuse_port(true)?; 273 | Ok(()) 274 | } 275 | 276 | #[cfg(windows)] 277 | fn configure_builder(_builder: &TcpBuilder) -> io::Result<()> { Ok(()) } 278 | -------------------------------------------------------------------------------- /src/backend/mod.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | pub mod distributor; 21 | mod errors; 22 | pub mod hasher; 23 | mod health; 24 | pub mod message_queue; 25 | pub mod pool; 26 | pub mod processor; 27 | pub mod redis; 28 | 29 | pub use self::errors::{BackendError, PoolError}; 30 | 31 | use crate::{ 32 | backend::{distributor::BackendDescriptor, health::BackendHealth, processor::Processor}, 33 | common::{AssignedResponses, EnqueuedRequests, Message, PendingResponses}, 34 | errors::CreationError, 35 | util::ProcessFuture, 36 | }; 37 | use futures::{ 38 | future::{join_all, ok, Either, JoinAll}, 39 | prelude::*, 40 | Poll, 41 | }; 42 | use metrics_runtime::{data::Counter, Sink as MetricSink}; 43 | use std::{ 44 | collections::{HashMap, VecDeque}, 45 | marker::PhantomData, 46 | net::SocketAddr, 47 | str::FromStr, 48 | time::Duration, 49 | }; 50 | use tokio::{ 51 | net::tcp::TcpStream, 52 | sync::oneshot, 53 | timer::{timeout::Error as TimeoutError, Timeout}, 54 | }; 55 | use tower_direct_service::DirectService; 56 | 57 | type MaybeTimeout = Either, Timeout>; 58 | 59 | pub struct NotTimeout 60 | where 61 | F: Future, 62 | { 63 | inner: F, 64 | } 65 | 66 | impl Future for NotTimeout 67 | where 68 | F: Future, 69 | { 70 | type Error = TimeoutError; 71 | type Item = F::Item; 72 | 73 | fn poll(&mut self) -> Poll { self.inner.poll().map_err(TimeoutError::inner) } 74 | } 75 | 76 | /// A backend connection. 77 | /// 78 | /// This represents a one-to-one mapping with a TCP connection to the given backend server. This 79 | /// connection will independently poll the work queue for the backend and run requests when 80 | /// available. 81 | /// 82 | /// If a backend connection encounters an error, it will terminate and notify its backend 83 | /// supervisor, so that it can be replaced. 84 | pub struct BackendConnection

85 | where 86 | P: Processor + Send + 'static, 87 | P::Message: Message + Clone + Send + 'static, 88 | { 89 | processor: P, 90 | address: SocketAddr, 91 | timeout_ms: u64, 92 | noreply: bool, 93 | 94 | stream: Option, 95 | current: Option>, 96 | pending: VecDeque>, 97 | pending_len: usize, 98 | 99 | connects: Counter, 100 | } 101 | 102 | impl

BackendConnection

103 | where 104 | P: Processor + Send + 'static, 105 | P::Message: Message + Clone + Send + 'static, 106 | { 107 | pub fn new( 108 | address: SocketAddr, processor: P, timeout_ms: u64, noreply: bool, mut sink: MetricSink, 109 | ) -> BackendConnection

{ 110 | BackendConnection { 111 | processor, 112 | address, 113 | timeout_ms, 114 | noreply, 115 | stream: None, 116 | current: None, 117 | pending: VecDeque::new(), 118 | pending_len: 0, 119 | connects: sink.counter("connects"), 120 | } 121 | } 122 | 123 | pub fn enqueue(&mut self, batch: EnqueuedRequests) { 124 | self.pending_len += batch.len(); 125 | self.pending.push_back(batch); 126 | } 127 | } 128 | 129 | impl

DirectService> for BackendConnection

130 | where 131 | P: Processor + Send + 'static, 132 | P::Message: Message + Clone + Send + 'static, 133 | { 134 | type Error = BackendError; 135 | type Future = ResponseFuture; 136 | type Response = AssignedResponses; 137 | 138 | fn poll_ready(&mut self) -> Poll<(), Self::Error> { Ok(Async::Ready(())) } 139 | 140 | fn poll_service(&mut self) -> Poll<(), Self::Error> { 141 | loop { 142 | // First, check if we have an operation running. If we do, poll it to drive it towards 143 | // completion. If it's done, we'll reclaim the socket and then fallthrough to trying to 144 | // find another piece of work to run. 145 | if let Some(task) = self.current.as_mut() { 146 | match task.poll() { 147 | Ok(Async::Ready(stream)) => { 148 | // The operation finished, and gave us the connection back. 149 | self.stream = Some(stream); 150 | self.current = None; 151 | }, 152 | Ok(Async::NotReady) => return Ok(Async::NotReady), 153 | Err(e) => { 154 | // If we caught any sort of error, it means this batch has implicitly 155 | // failed. Some callers may have gotten the data they requested, depending 156 | // on what step in the process failed. All requests are protected by a 157 | // drop guard that fulfills the response channel if it hasn't been 158 | // fulfilled yet, so that we can at least hand back an error saying that 159 | // something broke internally. 160 | self.current = None; 161 | 162 | // If this is specifically an inner error, and not a timeout, then the 163 | // connection to the backend is also likely compromised, so we'll drop that 164 | // as well, giving us a new connection when we go to process our next 165 | // batch. 166 | if e.is_inner() { 167 | self.stream = None; 168 | return Err(e.into_inner().unwrap().into()); 169 | } 170 | }, 171 | } 172 | } 173 | 174 | // If we're here, we have no current operation to drive, so see if anything is in our work 175 | // queue that we can grab. 176 | let mut batch: Option> = None; 177 | loop { 178 | if let Some(batch2) = batch.as_ref() { 179 | if batch2.len() > 256 { 180 | break; 181 | } 182 | } 183 | 184 | match self.pending.pop_front() { 185 | Some(batch2) => { 186 | if let Some(batch3) = batch.as_mut() { 187 | batch3.extend(batch2); 188 | } else { 189 | batch = Some(batch2); 190 | } 191 | }, 192 | None => break, 193 | } 194 | } 195 | 196 | match batch { 197 | Some(batch) => { 198 | self.pending_len -= batch.len(); 199 | 200 | // Get our stream, which we either already have or we'll just get a future for. 201 | let stream = match self.stream.take() { 202 | Some(stream) => Either::A(ok(stream)), 203 | None => { 204 | self.connects.record(1); 205 | Either::B(self.processor.preconnect(&self.address, self.noreply)) 206 | }, 207 | }; 208 | 209 | // Get the response future from the processor. 210 | let inner = self.processor.process(batch, stream); 211 | 212 | // Wrap it up to handle any configured timeouts. 213 | let work = if self.timeout_ms == 0 { 214 | Either::A(NotTimeout { inner }) 215 | } else { 216 | Either::B(Timeout::new(inner, Duration::from_millis(self.timeout_ms))) 217 | }; 218 | 219 | self.current = Some(work); 220 | }, 221 | None => return Ok(Async::Ready(())), 222 | } 223 | } 224 | } 225 | 226 | fn poll_close(&mut self) -> Poll<(), Self::Error> { 227 | if self.current.is_some() || !self.pending.is_empty() { 228 | return Ok(Async::NotReady); 229 | } 230 | 231 | Ok(Async::Ready(())) 232 | } 233 | 234 | fn call(&mut self, mut req: EnqueuedRequests) -> Self::Future { 235 | // This is weird, but imagine: our requests are actually (request, response channel). 236 | // 237 | // We send this around via `EnqueuedRequest` because we need to be able to send back a 238 | // future from the leaf service in this stack -- us, `BackendConnection` -- when we get 239 | // called. 240 | // 241 | // Since some requests don't actually have or require a responseo to the client, though, 242 | // the response channel isn't always there. Thus, we check each request and extract its 243 | // response channel if it has one. The requests are then queued up for processing, and the 244 | // response channels wrapped in a future that can be shipped back to the caller. 245 | let response = req 246 | .as_mut_slice() 247 | .iter_mut() 248 | .map(|x| x.get_response_rx()) 249 | .filter(|x| x.is_some()) 250 | .map(|x| x.unwrap()) 251 | .collect::>(); 252 | 253 | self.enqueue(req); 254 | ResponseFuture::new(response) 255 | } 256 | } 257 | 258 | /// Managed connections to a backend server. 259 | /// 260 | /// This backend is serviced by a Tokio task, which processes all work requests to backend servers, 261 | /// and the connections that constitute this backend server. 262 | /// 263 | /// Backends are, in essence, proxy objects to their respective Tokio task, which is doing the 264 | /// actual heavy lifting. They exist purely as a facade to the underlying channels which shuttle 265 | /// work back and forth between the backend connections and client connections. 266 | /// 267 | /// Backends maintain a given number of connections to their underlying service, and track error 268 | /// states, recycling connections and pausing work when required. 269 | pub struct Backend

270 | where 271 | P: Processor + Clone + Send + 'static, 272 | P::Message: Message + Clone + Send + 'static, 273 | { 274 | identifier: String, 275 | health: BackendHealth, 276 | conns: Vec>, 277 | conns_index: usize, 278 | sink: MetricSink, 279 | } 280 | 281 | impl

Backend

282 | where 283 | P: Processor + Clone + Send + 'static, 284 | P::Message: Message + Clone + Send + 'static, 285 | { 286 | pub fn new( 287 | address: SocketAddr, identifier: String, processor: P, mut options: HashMap, noreply: bool, 288 | sink: MetricSink, 289 | ) -> Result, CreationError> 290 | where 291 | P: Processor + Clone + Send + 'static, 292 | P::Message: Message + Send + 'static, 293 | { 294 | let sink = sink.scoped("backend"); 295 | 296 | let conn_limit_raw = options.entry("conns".to_owned()).or_insert_with(|| "1".to_owned()); 297 | let conn_limit = usize::from_str(conn_limit_raw.as_str()) 298 | .map_err(|_| CreationError::InvalidParameter("options.conns".to_string()))?; 299 | debug!("[listener] using connection limit of '{}'", conn_limit); 300 | 301 | let cooloff_enabled_raw = options 302 | .entry("cooloff_enabled".to_owned()) 303 | .or_insert_with(|| "true".to_owned()); 304 | let cooloff_enabled = bool::from_str(cooloff_enabled_raw.as_str()) 305 | .map_err(|_| CreationError::InvalidParameter("options.cooloff_enabled".to_string()))?; 306 | 307 | let cooloff_timeout_ms_raw = options 308 | .entry("cooloff_timeout_ms".to_owned()) 309 | .or_insert_with(|| "10000".to_owned()); 310 | let cooloff_timeout_ms = u64::from_str(cooloff_timeout_ms_raw.as_str()) 311 | .map_err(|_| CreationError::InvalidParameter("options.cooloff_timeout_ms".to_string()))?; 312 | 313 | let cooloff_error_limit_raw = options 314 | .entry("cooloff_error_limit".to_owned()) 315 | .or_insert_with(|| "5".to_owned()); 316 | let cooloff_error_limit = usize::from_str(cooloff_error_limit_raw.as_str()) 317 | .map_err(|_| CreationError::InvalidParameter("options.cooloff_error_limit".to_string()))?; 318 | 319 | let health = BackendHealth::new(cooloff_enabled, cooloff_timeout_ms, cooloff_error_limit); 320 | 321 | // TODO: where the hell did the actual backend timeout value go? can't hard-code this 322 | let conns = (0..conn_limit) 323 | .map(|_| BackendConnection::new(address, processor.clone(), 500, noreply, sink.clone())) 324 | .collect(); 325 | 326 | Ok(Backend { 327 | identifier, 328 | health, 329 | conns, 330 | conns_index: 0, 331 | sink, 332 | }) 333 | } 334 | 335 | pub fn health(&self) -> &BackendHealth { &self.health } 336 | 337 | pub fn get_descriptor(&mut self) -> BackendDescriptor { 338 | BackendDescriptor { 339 | idx: 0, 340 | identifier: self.identifier.clone(), 341 | healthy: self.health.is_healthy(), 342 | } 343 | } 344 | } 345 | 346 | impl

DirectService> for Backend

347 | where 348 | P: Processor + Clone + Send + 'static, 349 | P::Message: Message + Clone + Send + 'static, 350 | { 351 | type Error = BackendError; 352 | type Future = ResponseFuture; 353 | type Response = AssignedResponses; 354 | 355 | fn poll_ready(&mut self) -> Poll<(), Self::Error> { 356 | if self.health.is_healthy() { 357 | Ok(Async::Ready(())) 358 | } else { 359 | Ok(Async::NotReady) 360 | } 361 | } 362 | 363 | fn poll_service(&mut self) -> Poll<(), Self::Error> { 364 | for conn in &mut self.conns { 365 | if conn.poll_service().is_err() { 366 | self.health.increment_error(); 367 | } 368 | } 369 | 370 | Ok(Async::Ready(())) 371 | } 372 | 373 | fn poll_close(&mut self) -> Poll<(), Self::Error> { Ok(Async::Ready(())) } 374 | 375 | fn call(&mut self, req: EnqueuedRequests) -> Self::Future { 376 | let result = self.conns[self.conns_index].call(req); 377 | 378 | self.conns_index += 1; 379 | self.conns_index %= self.conns.len(); 380 | 381 | result 382 | } 383 | } 384 | 385 | pub struct ResponseFuture 386 | where 387 | P: Processor + Send + 'static, 388 | P::Message: Message + Send + 'static, 389 | E: From, 390 | { 391 | responses: JoinAll>, 392 | _processor: PhantomData

, 393 | _error: PhantomData, 394 | } 395 | 396 | impl ResponseFuture 397 | where 398 | P: Processor + Send + 'static, 399 | P::Message: Message + Send + 'static, 400 | E: From, 401 | { 402 | pub fn new(responses: PendingResponses) -> ResponseFuture { 403 | ResponseFuture { 404 | responses: join_all(responses), 405 | _processor: PhantomData, 406 | _error: PhantomData, 407 | } 408 | } 409 | } 410 | 411 | impl Future for ResponseFuture 412 | where 413 | P: Processor + Send + 'static, 414 | P::Message: Message + Send + 'static, 415 | E: From, 416 | { 417 | type Error = E; 418 | type Item = AssignedResponses; 419 | 420 | fn poll(&mut self) -> Poll { self.responses.poll().map_err(|e| e.into()) } 421 | } 422 | -------------------------------------------------------------------------------- /src/backend/redis.rs: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2018 Nuclear Furnace 2 | // 3 | // Permission is hereby granted, free of charge, to any person obtaining a copy 4 | // of this software and associated documentation files (the "Software"), to deal 5 | // in the Software without restriction, including without limitation the rights 6 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | // copies of the Software, and to permit persons to whom the Software is 8 | // furnished to do so, subject to the following conditions: 9 | // 10 | // The above copyright notice and this permission notice shall be included in all 11 | // copies or substantial portions of the Software. 12 | // 13 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | // SOFTWARE. 20 | use crate::{ 21 | backend::{ 22 | message_queue::MessageState, 23 | processor::{Processor, ProcessorError, TcpStreamFuture}, 24 | }, 25 | common::{EnqueuedRequests, Message}, 26 | protocol::{ 27 | errors::ProtocolError, 28 | redis::{self, RedisMessage, RedisTransport}, 29 | }, 30 | util::ProcessFuture, 31 | }; 32 | use bytes::BytesMut; 33 | use futures::{ 34 | future::{ok, Either}, 35 | prelude::*, 36 | }; 37 | use itoa; 38 | use std::{borrow::Borrow, error::Error, net::SocketAddr}; 39 | use tokio::net::TcpStream; 40 | 41 | const REDIS_DEL: &[u8] = b"del"; 42 | const REDIS_SET: &[u8] = b"set"; 43 | 44 | #[derive(Clone)] 45 | pub struct RedisProcessor; 46 | 47 | impl RedisProcessor { 48 | pub fn new() -> RedisProcessor { RedisProcessor {} } 49 | } 50 | 51 | impl Processor for RedisProcessor { 52 | type Message = RedisMessage; 53 | type Transport = RedisTransport; 54 | 55 | fn fragment_messages( 56 | &self, msgs: Vec, 57 | ) -> Result, ProcessorError> { 58 | redis_fragment_messages(msgs) 59 | } 60 | 61 | fn defragment_messages(&self, msgs: Vec<(MessageState, Self::Message)>) -> Result { 62 | redis_defragment_messages(msgs) 63 | } 64 | 65 | fn get_error_message(&self, e: Box) -> Self::Message { RedisMessage::from_error(e) } 66 | 67 | fn get_error_message_str(&self, e: &str) -> Self::Message { RedisMessage::from_error_str(e) } 68 | 69 | fn get_transport(&self, client: TcpStream) -> Self::Transport { RedisTransport::new(client) } 70 | 71 | fn preconnect(&self, addr: &SocketAddr, noreply: bool) -> ProcessFuture { 72 | let inner = TcpStream::connect(addr) 73 | .map_err(ProtocolError::IoError) 74 | .and_then(move |conn| { 75 | if noreply { 76 | let noreply_req = RedisMessage::from_inline("CLIENT REPLY OFF"); 77 | Either::A(redis::write_raw_message(conn, noreply_req).map(|(server, _n)| server)) 78 | } else { 79 | Either::B(ok(conn)) 80 | } 81 | }); 82 | ProcessFuture::new(inner) 83 | } 84 | 85 | fn process(&self, req: EnqueuedRequests, stream: TcpStreamFuture) -> ProcessFuture { 86 | let inner = stream 87 | .and_then(move |server| redis::write_messages(server, req)) 88 | .and_then(move |(server, msgs, _n)| redis::read_messages(server, msgs)) 89 | .and_then(move |(server, _n)| ok(server)); 90 | ProcessFuture::new(inner) 91 | } 92 | } 93 | 94 | fn redis_fragment_messages(msgs: Vec) -> Result, ProcessorError> { 95 | let mut fragments = Vec::new(); 96 | 97 | for msg in msgs { 98 | if !redis_is_multi_message(&msg) { 99 | // This message isn't fragmentable, so it passes through untouched. 100 | let state = if msg.is_inline() { 101 | MessageState::Inline 102 | } else { 103 | MessageState::Standalone 104 | }; 105 | fragments.push((state, msg)); 106 | } else { 107 | match msg { 108 | RedisMessage::Bulk(_, mut args) => { 109 | // Split off the actual command string and figure out what the new command string 110 | // will be for our fragments. 111 | let cmd = args.remove(0); 112 | let cmd_buf = redis_get_data_buffer(&cmd); 113 | let new_cmd_buf = match cmd_buf { 114 | Some(buf) => { 115 | match buf { 116 | b"mget" => b"get", 117 | b"del" => b"del", 118 | b"mset" => b"set", 119 | x => { 120 | return Err(ProcessorError::FragmentError(format!( 121 | "tried to fragment command '{:?}' but command is not fragmentable!", 122 | x 123 | ))); 124 | }, 125 | } 126 | }, 127 | None => { 128 | return Err(ProcessorError::FragmentError( 129 | "tried to fragment bulk message with non-data argument in position 0!".to_owned(), 130 | )); 131 | }, 132 | }; 133 | 134 | // Now we'll do the actual splitting. We take the new command string (get for 135 | // mget, set for mset, and del for del) and build a buffer for it. We extract 136 | // N arguments at a time from our original message, where N is either 1 or 2 137 | // depending on if this is a set operation. With each N arguments, we build a 138 | // new message using the new command string and the arguments we extract. 139 | let cmd_arg = redis_new_data_buffer(&new_cmd_buf[..]); 140 | let mut cmd_type = BytesMut::with_capacity(new_cmd_buf.len()); 141 | cmd_type.extend_from_slice(&new_cmd_buf[..]); 142 | 143 | let arg_take_cnt = if new_cmd_buf == b"set" { 2 } else { 1 }; 144 | let total_fragments = args.len(); 145 | 146 | // Make sure we won't be left with extra arguments. 147 | if total_fragments % arg_take_cnt != 0 { 148 | return Err(ProcessorError::FragmentError(format!( 149 | "incorrect multiple of argument count! (multiple: {}, arg count: {}, cmd type: {:?})", 150 | arg_take_cnt, 151 | args.len(), 152 | &cmd_type 153 | ))); 154 | } 155 | 156 | // For get requests, we can stream back the fragments so long as they're in 157 | // order. We also need to make sure we provide the proper header (aka the data 158 | // that tells the client the response is going to be multiple items) to the 159 | // first fragment so that we generate valid output. 160 | let is_streaming = new_cmd_buf == b"get"; 161 | let mut streaming_hdr = if is_streaming { 162 | Some(redis_new_bulk_buffer(total_fragments)) 163 | } else { 164 | None 165 | }; 166 | 167 | let mut fragment_count = 0; 168 | while !args.is_empty() { 169 | // This is contorted but we split off the first N arguments, which leaves `args` 170 | // with those N and `new_args` with the rest. We feed those to a function which 171 | // builds us our new message, and then finally we replace `args` with `new_args` 172 | // so that we can continue on. 173 | let new_args = args.split_off(arg_take_cnt); 174 | args.insert(0, cmd_arg.clone()); 175 | let new_bulk = redis_new_bulk_from_args(args); 176 | let is_last = new_args.is_empty(); 177 | 178 | let state = if is_streaming { 179 | MessageState::StreamingFragmented(streaming_hdr.take(), is_last) 180 | } else { 181 | // Normal fragments need to know the command they're being used for so 182 | // we can properly form a command-specific response when we ultimate 183 | // defragment these messages later on. 184 | MessageState::Fragmented(cmd_type.clone(), fragment_count, total_fragments) 185 | }; 186 | 187 | fragments.push((state, new_bulk)); 188 | fragment_count += 1; 189 | args = new_args; 190 | } 191 | }, 192 | _ => unreachable!(), 193 | } 194 | } 195 | } 196 | 197 | Ok(fragments) 198 | } 199 | 200 | fn redis_defragment_messages(fragments: Vec<(MessageState, RedisMessage)>) -> Result { 201 | // This shouldn't happen but it's a simple invariant that lets me write slightly cleaner code. 202 | if fragments.is_empty() { 203 | return Ok(RedisMessage::Null); 204 | } 205 | 206 | // Peek at the metadata buffer on the first message. If it's not a fragmented message, then 207 | // something isn't rightand we need to bomb out. 208 | let first = fragments.first().unwrap(); 209 | let cmd_type = match first { 210 | (MessageState::Fragmented(buf, _, _), _) => buf.clone(), 211 | _ => { 212 | return Err(ProcessorError::DefragmentError( 213 | "tried to defragment messages, but got non-fragmented message in list".to_owned(), 214 | )); 215 | }, 216 | }; 217 | 218 | // We have the command type, so let's actually defragment now. 219 | match cmd_type.borrow() { 220 | // DEL returns the number of keys it deleted, so we have to tally up the integer responses. 221 | REDIS_DEL => { 222 | let mut keys_deleted = 0; 223 | for (_state, fragment) in fragments { 224 | match fragment { 225 | RedisMessage::Integer(_, value) => keys_deleted += value, 226 | RedisMessage::Error(_, _) => return Ok(fragment), 227 | _ => { 228 | return Err(ProcessorError::DefragmentError( 229 | "non-integer response for DEL!".to_owned(), 230 | )); 231 | }, 232 | } 233 | } 234 | 235 | Ok(RedisMessage::from_integer(keys_deleted)) 236 | }, 237 | REDIS_SET => { 238 | // MSET is funny because it says it can't fail, but really, the command has no failure 239 | // mode _except_ for, like, you know, the server running out of memory. However, MSET 240 | // also promises to be atomic. 241 | // 242 | // So, we have to lie a bit here. If we get back an error, other things could have 243 | // completed, but we'll send back the first error we iterate over so we can at least 244 | // inform the caller that _something_ bad happened. If we see no errors, we assume 245 | // everything went well, and send back the "normal" OK message. 246 | for (_state, fragment) in fragments { 247 | if let RedisMessage::Error(_, _) = fragment { 248 | return Ok(fragment); 249 | } 250 | } 251 | 252 | Ok(RedisMessage::OK) 253 | }, 254 | x => { 255 | Err(ProcessorError::DefragmentError(format!( 256 | "unknown command type '{:?}'", 257 | x 258 | ))) 259 | }, 260 | } 261 | } 262 | 263 | fn redis_get_data_buffer(msg: &RedisMessage) -> Option<&[u8]> { 264 | match msg { 265 | RedisMessage::Data(buf, offset) => Some(redis_clean_data(buf, *offset)), 266 | _ => None, 267 | } 268 | } 269 | 270 | fn redis_is_multi_message(msg: &RedisMessage) -> bool { 271 | match msg { 272 | RedisMessage::Bulk(_, args) => { 273 | match args.len() { 274 | 0 => false, 275 | _ => { 276 | let arg = &args[0]; 277 | match redis_get_data_buffer(arg) { 278 | Some(buf) => { 279 | match buf { 280 | b"mget" | b"mset" | b"del" => true, 281 | _ => false, 282 | } 283 | }, 284 | None => false, 285 | } 286 | }, 287 | } 288 | }, 289 | _ => false, 290 | } 291 | } 292 | 293 | fn redis_clean_data(buf: &BytesMut, offset: usize) -> &[u8] { 294 | assert!(buf.len() > 2); 295 | let val_len = buf.len() - 2; 296 | &buf[offset..val_len] 297 | } 298 | 299 | fn redis_new_data_buffer(buf: &[u8]) -> RedisMessage { 300 | let mut new_buf = BytesMut::new(); 301 | new_buf.extend_from_slice(b"$"); 302 | 303 | let mut cnt_buf = [b'\0'; 20]; 304 | let n = itoa::write(&mut cnt_buf[..], buf.len()).unwrap(); 305 | new_buf.extend_from_slice(&cnt_buf[..n]); 306 | new_buf.extend_from_slice(b"\r\n"); 307 | new_buf.extend_from_slice(buf); 308 | new_buf.extend_from_slice(b"\r\n"); 309 | 310 | RedisMessage::Data(new_buf, 1 + n + 2) 311 | } 312 | 313 | fn redis_new_bulk_buffer(arg_count: usize) -> BytesMut { 314 | let mut buf = BytesMut::new(); 315 | buf.extend_from_slice(b"*"); 316 | 317 | let mut cnt_buf = [b'\0'; 20]; 318 | let n = itoa::write(&mut cnt_buf[..], arg_count).unwrap(); 319 | buf.extend_from_slice(&cnt_buf[..n]); 320 | buf.extend_from_slice(b"\r\n"); 321 | buf 322 | } 323 | 324 | fn redis_new_bulk_from_args(args: Vec) -> RedisMessage { 325 | let mut buf = redis_new_bulk_buffer(args.len()); 326 | let mut new_args = Vec::new(); 327 | for arg in args { 328 | let arg_buf = arg.get_buf(); 329 | buf.unsplit(arg_buf); 330 | 331 | new_args.push(arg); 332 | } 333 | 334 | RedisMessage::Bulk(buf, new_args) 335 | } 336 | 337 | #[cfg(test)] 338 | mod tests { 339 | use super::*; 340 | use std::io::{Error, ErrorKind}; 341 | 342 | const STATUS_BUF: &str = "StAtUs_BuF"; 343 | const DATA_BUF: &[u8; 8] = b"DaTa_BuF"; 344 | const DATA_BUF_2: &[u8; 10] = b"DaTa_BuF_2"; 345 | const DATA_BUF_3: &[u8; 4] = b"mget"; 346 | 347 | lazy_static! { 348 | static ref NULL_MSG: RedisMessage = RedisMessage::Null; 349 | static ref OK_MSG: RedisMessage = RedisMessage::OK; 350 | static ref STATUS_MSG: RedisMessage = RedisMessage::from_status(&STATUS_BUF[..]); 351 | static ref ERR_MSG: RedisMessage = 352 | RedisMessage::from_error(Box::new(Error::new(ErrorKind::Other, "fake error message"))); 353 | static ref INT_MSG: RedisMessage = RedisMessage::from_integer(-42); 354 | static ref DATA_MSG: RedisMessage = redis_new_data_buffer(&DATA_BUF[..]); 355 | static ref DATA_MSG_2: RedisMessage = redis_new_data_buffer(&DATA_BUF_2[..]); 356 | static ref DATA_MSG_3: RedisMessage = redis_new_data_buffer(&DATA_BUF_3[..]); 357 | static ref BULK_MSG: RedisMessage = 358 | redis_new_bulk_from_args(vec![DATA_MSG.clone(), DATA_MSG_2.clone(), DATA_MSG_3.clone()]); 359 | static ref BULK_MULTI_MSG: RedisMessage = 360 | redis_new_bulk_from_args(vec![DATA_MSG_3.clone(), DATA_MSG_2.clone(), DATA_MSG.clone()]); 361 | } 362 | 363 | #[test] 364 | fn test_is_multi_message() { 365 | assert!(!redis_is_multi_message(&NULL_MSG)); 366 | assert!(!redis_is_multi_message(&OK_MSG)); 367 | assert!(!redis_is_multi_message(&STATUS_MSG)); 368 | assert!(!redis_is_multi_message(&ERR_MSG)); 369 | assert!(!redis_is_multi_message(&INT_MSG)); 370 | assert!(!redis_is_multi_message(&DATA_MSG)); 371 | assert!(!redis_is_multi_message(&BULK_MSG)); 372 | assert!(redis_is_multi_message(&BULK_MULTI_MSG)); 373 | } 374 | 375 | #[test] 376 | fn test_get_data_buffer() { 377 | let nm_buf = redis_get_data_buffer(&NULL_MSG); 378 | let om_buf = redis_get_data_buffer(&OK_MSG); 379 | let sm_buf = redis_get_data_buffer(&STATUS_MSG); 380 | let em_buf = redis_get_data_buffer(&ERR_MSG); 381 | let im_buf = redis_get_data_buffer(&INT_MSG); 382 | let dm_buf = redis_get_data_buffer(&DATA_MSG); 383 | let bm_buf = redis_get_data_buffer(&BULK_MSG); 384 | 385 | assert!(nm_buf.is_none()); 386 | assert!(om_buf.is_none()); 387 | assert!(sm_buf.is_none()); 388 | assert!(em_buf.is_none()); 389 | assert!(im_buf.is_none()); 390 | assert!(dm_buf.is_some()); 391 | assert_eq!(dm_buf, Some(&DATA_BUF[..])); 392 | assert!(bm_buf.is_none()); 393 | } 394 | } 395 | -------------------------------------------------------------------------------- /synchrotron-test/Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | [[package]] 4 | name = "ascii" 5 | version = "0.7.1" 6 | source = "registry+https://github.com/rust-lang/crates.io-index" 7 | 8 | [[package]] 9 | name = "bitflags" 10 | version = "1.0.3" 11 | source = "registry+https://github.com/rust-lang/crates.io-index" 12 | 13 | [[package]] 14 | name = "byteorder" 15 | version = "1.2.4" 16 | source = "registry+https://github.com/rust-lang/crates.io-index" 17 | 18 | [[package]] 19 | name = "bytes" 20 | version = "0.4.9" 21 | source = "registry+https://github.com/rust-lang/crates.io-index" 22 | dependencies = [ 23 | "byteorder 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)", 24 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 25 | ] 26 | 27 | [[package]] 28 | name = "cfg-if" 29 | version = "0.1.4" 30 | source = "registry+https://github.com/rust-lang/crates.io-index" 31 | 32 | [[package]] 33 | name = "cloudabi" 34 | version = "0.0.3" 35 | source = "registry+https://github.com/rust-lang/crates.io-index" 36 | dependencies = [ 37 | "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", 38 | ] 39 | 40 | [[package]] 41 | name = "combine" 42 | version = "3.4.0" 43 | source = "registry+https://github.com/rust-lang/crates.io-index" 44 | dependencies = [ 45 | "ascii 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", 46 | "byteorder 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)", 47 | "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", 48 | "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 49 | "unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", 50 | ] 51 | 52 | [[package]] 53 | name = "either" 54 | version = "1.5.0" 55 | source = "registry+https://github.com/rust-lang/crates.io-index" 56 | 57 | [[package]] 58 | name = "fuchsia-zircon" 59 | version = "0.3.3" 60 | source = "registry+https://github.com/rust-lang/crates.io-index" 61 | dependencies = [ 62 | "bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)", 63 | "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 64 | ] 65 | 66 | [[package]] 67 | name = "fuchsia-zircon-sys" 68 | version = "0.3.3" 69 | source = "registry+https://github.com/rust-lang/crates.io-index" 70 | 71 | [[package]] 72 | name = "futures" 73 | version = "0.1.23" 74 | source = "registry+https://github.com/rust-lang/crates.io-index" 75 | 76 | [[package]] 77 | name = "idna" 78 | version = "0.1.5" 79 | source = "registry+https://github.com/rust-lang/crates.io-index" 80 | dependencies = [ 81 | "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 82 | "unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", 83 | "unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 84 | ] 85 | 86 | [[package]] 87 | name = "iovec" 88 | version = "0.1.2" 89 | source = "registry+https://github.com/rust-lang/crates.io-index" 90 | dependencies = [ 91 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 92 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 93 | ] 94 | 95 | [[package]] 96 | name = "kernel32-sys" 97 | version = "0.2.2" 98 | source = "registry+https://github.com/rust-lang/crates.io-index" 99 | dependencies = [ 100 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 101 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 102 | ] 103 | 104 | [[package]] 105 | name = "lazycell" 106 | version = "0.6.0" 107 | source = "registry+https://github.com/rust-lang/crates.io-index" 108 | 109 | [[package]] 110 | name = "libc" 111 | version = "0.2.43" 112 | source = "registry+https://github.com/rust-lang/crates.io-index" 113 | 114 | [[package]] 115 | name = "log" 116 | version = "0.4.3" 117 | source = "registry+https://github.com/rust-lang/crates.io-index" 118 | dependencies = [ 119 | "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 120 | ] 121 | 122 | [[package]] 123 | name = "matches" 124 | version = "0.1.7" 125 | source = "registry+https://github.com/rust-lang/crates.io-index" 126 | 127 | [[package]] 128 | name = "memchr" 129 | version = "2.0.1" 130 | source = "registry+https://github.com/rust-lang/crates.io-index" 131 | dependencies = [ 132 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 133 | ] 134 | 135 | [[package]] 136 | name = "mio" 137 | version = "0.6.15" 138 | source = "registry+https://github.com/rust-lang/crates.io-index" 139 | dependencies = [ 140 | "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 141 | "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 142 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 143 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 144 | "lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", 145 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 146 | "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", 147 | "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 148 | "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", 149 | "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 150 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 151 | ] 152 | 153 | [[package]] 154 | name = "miow" 155 | version = "0.2.1" 156 | source = "registry+https://github.com/rust-lang/crates.io-index" 157 | dependencies = [ 158 | "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", 159 | "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", 160 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 161 | "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 162 | ] 163 | 164 | [[package]] 165 | name = "net2" 166 | version = "0.2.33" 167 | source = "registry+https://github.com/rust-lang/crates.io-index" 168 | dependencies = [ 169 | "cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", 170 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 171 | "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", 172 | ] 173 | 174 | [[package]] 175 | name = "percent-encoding" 176 | version = "1.0.1" 177 | source = "registry+https://github.com/rust-lang/crates.io-index" 178 | 179 | [[package]] 180 | name = "rand" 181 | version = "0.5.5" 182 | source = "registry+https://github.com/rust-lang/crates.io-index" 183 | dependencies = [ 184 | "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", 185 | "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", 186 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 187 | "rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", 188 | "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", 189 | ] 190 | 191 | [[package]] 192 | name = "rand_core" 193 | version = "0.2.1" 194 | source = "registry+https://github.com/rust-lang/crates.io-index" 195 | 196 | [[package]] 197 | name = "redis" 198 | version = "0.9.0" 199 | source = "registry+https://github.com/rust-lang/crates.io-index" 200 | dependencies = [ 201 | "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", 202 | "combine 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 203 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 204 | "sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", 205 | "tokio-codec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", 206 | "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", 207 | "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 208 | "tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 209 | "url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)", 210 | ] 211 | 212 | [[package]] 213 | name = "redox_syscall" 214 | version = "0.1.40" 215 | source = "registry+https://github.com/rust-lang/crates.io-index" 216 | 217 | [[package]] 218 | name = "remove_dir_all" 219 | version = "0.5.1" 220 | source = "registry+https://github.com/rust-lang/crates.io-index" 221 | dependencies = [ 222 | "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", 223 | ] 224 | 225 | [[package]] 226 | name = "sha1" 227 | version = "0.2.0" 228 | source = "registry+https://github.com/rust-lang/crates.io-index" 229 | 230 | [[package]] 231 | name = "slab" 232 | version = "0.4.1" 233 | source = "registry+https://github.com/rust-lang/crates.io-index" 234 | 235 | [[package]] 236 | name = "synchrotron-test" 237 | version = "0.1.0" 238 | dependencies = [ 239 | "redis 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", 240 | "tempfile 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)", 241 | ] 242 | 243 | [[package]] 244 | name = "tempfile" 245 | version = "3.0.3" 246 | source = "registry+https://github.com/rust-lang/crates.io-index" 247 | dependencies = [ 248 | "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)", 249 | "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", 250 | "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", 251 | "remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", 252 | "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", 253 | ] 254 | 255 | [[package]] 256 | name = "tokio-codec" 257 | version = "0.1.0" 258 | source = "registry+https://github.com/rust-lang/crates.io-index" 259 | dependencies = [ 260 | "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", 261 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 262 | "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 263 | ] 264 | 265 | [[package]] 266 | name = "tokio-executor" 267 | version = "0.1.3" 268 | source = "registry+https://github.com/rust-lang/crates.io-index" 269 | dependencies = [ 270 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 271 | ] 272 | 273 | [[package]] 274 | name = "tokio-io" 275 | version = "0.1.7" 276 | source = "registry+https://github.com/rust-lang/crates.io-index" 277 | dependencies = [ 278 | "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", 279 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 280 | "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", 281 | ] 282 | 283 | [[package]] 284 | name = "tokio-reactor" 285 | version = "0.1.3" 286 | source = "registry+https://github.com/rust-lang/crates.io-index" 287 | dependencies = [ 288 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 289 | "log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", 290 | "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", 291 | "slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", 292 | "tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", 293 | "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 294 | ] 295 | 296 | [[package]] 297 | name = "tokio-tcp" 298 | version = "0.1.1" 299 | source = "registry+https://github.com/rust-lang/crates.io-index" 300 | dependencies = [ 301 | "bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)", 302 | "futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)", 303 | "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", 304 | "mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)", 305 | "tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 306 | "tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", 307 | ] 308 | 309 | [[package]] 310 | name = "unicode-bidi" 311 | version = "0.3.4" 312 | source = "registry+https://github.com/rust-lang/crates.io-index" 313 | dependencies = [ 314 | "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 315 | ] 316 | 317 | [[package]] 318 | name = "unicode-normalization" 319 | version = "0.1.7" 320 | source = "registry+https://github.com/rust-lang/crates.io-index" 321 | 322 | [[package]] 323 | name = "unreachable" 324 | version = "1.0.0" 325 | source = "registry+https://github.com/rust-lang/crates.io-index" 326 | dependencies = [ 327 | "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", 328 | ] 329 | 330 | [[package]] 331 | name = "url" 332 | version = "1.7.1" 333 | source = "registry+https://github.com/rust-lang/crates.io-index" 334 | dependencies = [ 335 | "idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", 336 | "matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", 337 | "percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", 338 | ] 339 | 340 | [[package]] 341 | name = "void" 342 | version = "1.0.2" 343 | source = "registry+https://github.com/rust-lang/crates.io-index" 344 | 345 | [[package]] 346 | name = "winapi" 347 | version = "0.2.8" 348 | source = "registry+https://github.com/rust-lang/crates.io-index" 349 | 350 | [[package]] 351 | name = "winapi" 352 | version = "0.3.5" 353 | source = "registry+https://github.com/rust-lang/crates.io-index" 354 | dependencies = [ 355 | "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 356 | "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", 357 | ] 358 | 359 | [[package]] 360 | name = "winapi-build" 361 | version = "0.1.1" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | 364 | [[package]] 365 | name = "winapi-i686-pc-windows-gnu" 366 | version = "0.4.0" 367 | source = "registry+https://github.com/rust-lang/crates.io-index" 368 | 369 | [[package]] 370 | name = "winapi-x86_64-pc-windows-gnu" 371 | version = "0.4.0" 372 | source = "registry+https://github.com/rust-lang/crates.io-index" 373 | 374 | [[package]] 375 | name = "ws2_32-sys" 376 | version = "0.2.1" 377 | source = "registry+https://github.com/rust-lang/crates.io-index" 378 | dependencies = [ 379 | "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", 380 | "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", 381 | ] 382 | 383 | [metadata] 384 | "checksum ascii 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3ae7d751998c189c1d4468cf0a39bb2eae052a9c58d50ebb3b9591ee3813ad50" 385 | "checksum bitflags 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d0c54bb8f454c567f21197eefcdbf5679d0bd99f2ddbe52e84c77061952e6789" 386 | "checksum byteorder 1.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8389c509ec62b9fe8eca58c502a0acaf017737355615243496cde4994f8fa4f9" 387 | "checksum bytes 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8" 388 | "checksum cfg-if 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "efe5c877e17a9c717a0bf3613b2709f723202c4e4675cc8f12926ded29bcb17e" 389 | "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" 390 | "checksum combine 3.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de4d970a671fc33c6ac45b4535c0723104c7cf9d7b09ede7c020c131f9b40f40" 391 | "checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" 392 | "checksum fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" 393 | "checksum fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" 394 | "checksum futures 0.1.23 (registry+https://github.com/rust-lang/crates.io-index)" = "884dbe32a6ae4cd7da5c6db9b78114449df9953b8d490c9d7e1b51720b922c62" 395 | "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" 396 | "checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" 397 | "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" 398 | "checksum lazycell 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a6f08839bc70ef4a3fe1d566d5350f519c5912ea86be0df1740a7d247c7fc0ef" 399 | "checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d" 400 | "checksum log 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "61bd98ae7f7b754bc53dca7d44b604f733c6bba044ea6f41bc8d89272d8161d2" 401 | "checksum matches 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "835511bab37c34c47da5cb44844bea2cfde0236db0b506f90ea4224482c9774a" 402 | "checksum memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "796fba70e76612589ed2ce7f45282f5af869e0fdd7cc6199fa1aa1f1d591ba9d" 403 | "checksum mio 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)" = "4fcfcb32d63961fb6f367bfd5d21e4600b92cd310f71f9dca25acae196eb1560" 404 | "checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" 405 | "checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" 406 | "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" 407 | "checksum rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e464cd887e869cddcae8792a4ee31d23c7edd516700695608f5b98c67ee0131c" 408 | "checksum rand_core 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "edecf0f94da5551fc9b492093e30b041a891657db7940ee221f9d2f66e82eef2" 409 | "checksum redis 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2dde3e551c6f072b3c1feab259e2d85e5795dfb4a4e0dd1510f3a1269f3befcf" 410 | "checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" 411 | "checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5" 412 | "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" 413 | "checksum slab 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5f9776d6b986f77b35c6cf846c11ad986ff128fe0b2b63a3628e3755e8d3102d" 414 | "checksum tempfile 3.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c4b103c6d08d323b92ff42c8ce62abcd83ca8efa7fd5bf7927efefec75f58c76" 415 | "checksum tokio-codec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "881e9645b81c2ce95fcb799ded2c29ffb9f25ef5bef909089a420e5961dd8ccb" 416 | "checksum tokio-executor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "424f0c87ecd66b863045d84e384cb7ce0ae384d8b065b9f0363d29c0d1b30b2f" 417 | "checksum tokio-io 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a5c9635ee806f26d302b8baa1e145689a280d8f5aa8d0552e7344808da54cc21" 418 | "checksum tokio-reactor 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8703a5762ff6913510dc64272c714c4389ffd8c4b3cf602879b8bd14ff06b604" 419 | "checksum tokio-tcp 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4c329b47f071eb8a746040465fa751bd95e4716e98daef6a9b4e434c17d565" 420 | "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" 421 | "checksum unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6a0180bc61fc5a987082bfa111f4cc95c4caff7f9799f3e46df09163a937aa25" 422 | "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" 423 | "checksum url 1.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2a321979c09843d272956e73700d12c4e7d3d92b2ee112b31548aef0d4efc5a6" 424 | "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" 425 | "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" 426 | "checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd" 427 | "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" 428 | "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 429 | "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 430 | "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" 431 | --------------------------------------------------------------------------------