├── .gitignore ├── LICENSE ├── README.md ├── bin └── vanguard_admin ├── config ├── sys.config ├── vars.config ├── vars_node1.config ├── vars_node2.config ├── vars_node3.config └── vm.args ├── rebar.config ├── rebar.lock ├── schema ├── eleveldb.schema ├── lager.schema └── vanguard.schema └── src ├── cadre.erl ├── cadre_storage.erl ├── vanguard.app.src ├── vanguard_app.erl ├── vanguard_cluster.erl ├── vanguard_console.erl ├── vanguard_delegate.erl ├── vanguard_ensemble_backend.erl ├── vanguard_http_callback.erl └── vanguard_sup.erl /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | _* 3 | .eunit 4 | *.o 5 | *.beam 6 | *.plt 7 | *.swp 8 | *.swo 9 | .erlang.cookie 10 | ebin 11 | log 12 | erl_crash.dump 13 | .rebar 14 | logs 15 | _build 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, Tristan Sloughter . 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | * Redistributions in binary form must reproduce the above copyright 12 | notice, this list of conditions and the following disclaimer in the 13 | documentation and/or other materials provided with the distribution. 14 | 15 | * The names of its contributors may not be used to endorse or promote 16 | products derived from this software without specific prior written 17 | permission. 18 | 19 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | vanguard 2 | ===== 3 | 4 | Consul "clone". It is not and will not be 1:1 for features. The main purpose of creating this was to have a project for writing advanced Erlang tutorials around. 5 | 6 | ### Build and Run 7 | 8 | ``` 9 | $ rebar3 release 10 | $ _build/default/rel/vanguard/bin/vanguard console 11 | ``` 12 | 13 | ### Update and Query 14 | 15 | ``` 16 | $ curl -v -XPUT localhost:8080/node/register -d "{'node':'localhost','port':0,'service':{'id':'service1'}}" 17 | $ dig -p8053 @127.0.0.1 serv71.service.c1.vanguard srv 18 | ;; ANSWER SECTION: 19 | service1.service.c1.vanguard. 3600 IN SRV 1 1 0 localhost. 20 | ``` 21 | -------------------------------------------------------------------------------- /bin/vanguard_admin: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | SCRIPT=$(readlink $0 || true) 6 | if [ -z $SCRIPT ]; then 7 | SCRIPT=$0 8 | fi; 9 | SCRIPT_DIR="$(cd `dirname "$SCRIPT"` && pwd -P)" 10 | RELEASE_ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd -P)" 11 | REL_NAME=vanguard 12 | REL_VSN="{{ rel_vsn }}" 13 | ERTS_VSN="{{ erts_vsn }}" 14 | CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}" 15 | REL_DIR="$RELEASE_ROOT_DIR/releases/$REL_VSN" 16 | ERL_OPTS="{{ erl_opts }}" 17 | RUNNER_LOG_DIR="${RUNNER_LOG_DIR:-$RELEASE_ROOT_DIR/log}" 18 | 19 | find_erts_dir() { 20 | __erts_dir="$RELEASE_ROOT_DIR/erts-$ERTS_VSN" 21 | if [ -d "$__erts_dir" ]; then 22 | ERTS_DIR="$__erts_dir"; 23 | ROOTDIR="$RELEASE_ROOT_DIR" 24 | else 25 | __erl="$(which erl)" 26 | code="io:format(\"~s\", [code:root_dir()]), halt()." 27 | __erl_root="$("$__erl" -noshell -eval "$code")" 28 | ERTS_DIR="$__erl_root/erts-$ERTS_VSN" 29 | ROOTDIR="$__erl_root" 30 | fi 31 | } 32 | 33 | # Get node pid 34 | relx_get_pid() { 35 | if output="$(relx_nodetool rpcterms os getpid)" 36 | then 37 | echo "$output" | sed -e 's/"//g' 38 | return 0 39 | else 40 | echo "$output" 41 | return 1 42 | fi 43 | } 44 | 45 | relx_get_longname() { 46 | id="longname$(relx_gen_id)-${NAME}" 47 | "$BINDIR/erl" -boot start_clean -eval 'io:format("~s~n", [node()]), halt()' -noshell -name $id | sed -e 's/.*@//g' 48 | } 49 | 50 | # Connect to a remote node 51 | relx_rem_sh() { 52 | # Generate a unique id used to allow multiple remsh to the same node 53 | # transparently 54 | id="remsh$(relx_gen_id)-${NAME}" 55 | 56 | # Get the node's ticktime so that we use the same thing. 57 | TICKTIME="$(relx_nodetool rpcterms net_kernel get_net_ticktime)" 58 | 59 | # Setup remote shell command to control node 60 | exec "$BINDIR/erl" "$NAME_TYPE" "$id" -remsh "$NAME" -boot start_clean \ 61 | -boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \ 62 | -setcookie "$COOKIE" -hidden -kernel net_ticktime $TICKTIME 63 | } 64 | 65 | # Generate a random id 66 | relx_gen_id() { 67 | od -X -N 4 /dev/urandom | head -n1 | awk '{print $2}' 68 | } 69 | 70 | # Control a node 71 | relx_nodetool() { 72 | command="$1"; shift 73 | 74 | "$ERTS_DIR/bin/escript" "$ROOTDIR/bin/nodetool" "$NAME_TYPE" "$NAME" \ 75 | -setcookie "$COOKIE" "$command" $@ 76 | } 77 | 78 | # Run an escript in the node's environment 79 | relx_escript() { 80 | shift; scriptpath="$1"; shift 81 | export RELEASE_ROOT_DIR 82 | 83 | "$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" $@ 84 | } 85 | 86 | # Output a start command for the last argument of run_erl 87 | relx_start_command() { 88 | printf "exec \"%s\" \"%s\"" "$RELEASE_ROOT_DIR/bin/$REL_NAME" \ 89 | "$START_OPTION" 90 | } 91 | 92 | # Use $CWD/vm.args if exists, otherwise releases/VSN/vm.args 93 | if [ -z "$VMARGS_PATH" ]; then 94 | if [ -f "$RELEASE_ROOT_DIR/vm.args" ]; then 95 | VMARGS_PATH="$RELEASE_ROOT_DIR/vm.args" 96 | else 97 | VMARGS_PATH="$REL_DIR/vm.args" 98 | fi 99 | fi 100 | 101 | orig_vmargs_path="$VMARGS_PATH.orig" 102 | if [ $RELX_REPLACE_OS_VARS ]; then 103 | #Make sure we don't break dev mode by keeping the symbolic link to 104 | #the user's vm.args 105 | if [ ! -L "$orig_vmargs_path" ]; then 106 | #we're in copy mode, rename the vm.args file to vm.args.orig 107 | mv "$VMARGS_PATH" "$orig_vmargs_path" 108 | fi 109 | 110 | awk '{while(match($0,"[$]{[^}]*}")) {var=substr($0,RSTART+2,RLENGTH -3);gsub("[$]{"var"}",ENVIRON[var])}}1' < "$orig_vmargs_path" > "$VMARGS_PATH" 111 | else 112 | #We don't need to replace env. vars, just rename the 113 | #symlink vm.args.orig to vm.args, and keep it as a 114 | #symlink. 115 | if [ -L "$orig_vmargs_path" ]; then 116 | mv "$orig_vmargs_path" "$VMARGS_PATH" 117 | fi 118 | fi 119 | 120 | # Make sure log directory exists 121 | mkdir -p "$RUNNER_LOG_DIR" 122 | 123 | # Use $CWD/sys.config if exists, otherwise releases/VSN/sys.config 124 | if [ -z "$RELX_CONFIG_PATH" ]; then 125 | if [ -f "$RELEASE_ROOT_DIR/sys.config" ]; then 126 | RELX_CONFIG_PATH="$RELEASE_ROOT_DIR/sys.config" 127 | else 128 | RELX_CONFIG_PATH="$REL_DIR/sys.config" 129 | fi 130 | fi 131 | 132 | orig_relx_config_path="$RELX_CONFIG_PATH.orig" 133 | if [ $RELX_REPLACE_OS_VARS ]; then 134 | #Make sure we don't break dev mode by keeping the symbolic link to 135 | #the user's sys.config 136 | if [ ! -L "$orig_relx_config_path" ]; then 137 | #We're in copy mode, rename sys.config to sys.config.orig 138 | mv "$RELX_CONFIG_PATH" "$orig_relx_config_path" 139 | fi 140 | 141 | awk '{while(match($0,"[$]{[^}]*}")) {var=substr($0,RSTART+2,RLENGTH -3);gsub("[$]{"var"}",ENVIRON[var])}}1' < "$orig_relx_config_path" > "$RELX_CONFIG_PATH" 142 | else 143 | #We don't need to replace env. vars, just rename the 144 | #symlink sys.config.orig to sys.config. Keep it as 145 | #a symlink. 146 | if [ -L "$orig_relx_config_path" ]; then 147 | mv "$orig_relx_config_path" "$RELX_CONFIG_PATH" 148 | fi 149 | fi 150 | 151 | # Extract the target node name from node.args 152 | NAME_ARG=$(egrep '^-s?name' "$VMARGS_PATH" || true) 153 | if [ -z "$NAME_ARG" ]; then 154 | echo "vm.args needs to have either -name or -sname parameter." 155 | exit 1 156 | fi 157 | 158 | # Extract the name type and name from the NAME_ARG for REMSH 159 | NAME_TYPE="$(echo "$NAME_ARG" | awk '{print $1}')" 160 | NAME="$(echo "$NAME_ARG" | awk '{print $2}')" 161 | 162 | PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" 163 | 164 | # Extract the target cookie 165 | COOKIE_ARG="$(grep '^-setcookie' "$VMARGS_PATH" || true)" 166 | if [ -z "$COOKIE_ARG" ]; then 167 | echo "vm.args needs to have a -setcookie parameter." 168 | exit 1 169 | fi 170 | 171 | # Extract cookie name from COOKIE_ARG 172 | COOKIE="$(echo "$COOKIE_ARG" | awk '{print $2}')" 173 | 174 | find_erts_dir 175 | export ROOTDIR="$RELEASE_ROOT_DIR" 176 | export BINDIR="$ERTS_DIR/bin" 177 | export EMU="beam" 178 | export PROGNAME="erl" 179 | export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" 180 | ERTS_LIB_DIR="$ERTS_DIR/../lib" 181 | 182 | cd "$ROOTDIR" 183 | 184 | # User can specify an sname without @hostname 185 | # This will fail when creating remote shell 186 | # So here we check for @ and add @hostname if missing 187 | case $NAME in 188 | *@*) 189 | # Nothing to do 190 | ;; 191 | *) 192 | # Add @hostname 193 | case $NAME_TYPE in 194 | -sname) 195 | NAME=$NAME@`hostname -s` 196 | ;; 197 | -name) 198 | NAME=$NAME@$(relx_get_longname) 199 | ;; 200 | esac 201 | ;; 202 | esac 203 | 204 | # Check the first argument for instructions 205 | case "$1" in 206 | create) 207 | # Make sure a node IS running 208 | if ! relx_nodetool "ping" > /dev/null; then 209 | echo "Node is not running!" 210 | exit 1 211 | fi 212 | 213 | shift 214 | 215 | relx_nodetool rpc vanguard_console create "" 216 | ;; 217 | join) 218 | if [ $# -ne 2 ]; then 219 | echo "Usage: $SCRIPT join " 220 | exit 1 221 | fi 222 | 223 | # Make sure a node IS running 224 | if ! relx_nodetool "ping" > /dev/null; then 225 | echo "Node is not running!" 226 | exit 1 227 | fi 228 | 229 | shift 230 | 231 | relx_nodetool rpc vanguard_console join "$1" 232 | ;; 233 | *) 234 | echo "Usage: $REL_NAME {create|join}" 235 | exit 1 236 | ;; 237 | esac 238 | 239 | exit 0 240 | -------------------------------------------------------------------------------- /config/sys.config: -------------------------------------------------------------------------------- 1 | [ 2 | {riak_ensemble, [{data_root, "./vanguard_data/"}]}, 3 | 4 | {erldns,[ 5 | %% DB Config 6 | {storage, 7 | [{type, erldns_storage_cadre}, %% erldns_storage_json | erldns_storage_mnesia 8 | {dir, "db"}, 9 | {dbname, undefined}, %% name of the db 10 | {user, undefined}, %% for future db types 11 | {pass, undefined}, %% for future db types 12 | {host, undefined}, %% for future db types 13 | {port, undefined} %% for future db types 14 | ]}, 15 | {servers, [ 16 | [{name, inet_localhost_1}, {address, "127.0.0.1"}, {port, 8053}, {family, inet}, {processes, 2}], 17 | [{name, inet6_localhost_1}, {address, "::1"}, {port, 8053}, {family, inet6}] 18 | % [{name, inet_localhost_2}, {address, "127.0.0.2"}, {port, 8053}, {family, inet}] 19 | ]}, 20 | 21 | {use_root_hints, false}, 22 | {catch_exceptions, false}, 23 | {zones, "/home/tristan/Devel/erl-dns/priv/example.zone.json"}, 24 | {metrics, [ 25 | {port, 8082} 26 | ]}, 27 | {admin, [ 28 | {port, 8083}, 29 | {credentials, {"username", "password"}} 30 | ]}, 31 | {pools, [ 32 | {tcp_worker_pool, erldns_worker, [ 33 | {size, 10}, 34 | {max_overflow, 20} 35 | ]} 36 | ]} 37 | ]}, 38 | 39 | {eleveldb, [{data_root, "./data"}, 40 | %% Memory usage per vnode 41 | 42 | %% Maximum number of files open at once per partition 43 | %% Default. You must calculate to adjust (see below) 44 | {max_open_files, 2}, 45 | %% Default. You must calculate to adjust (see below) 46 | {cache_size, 4194304}, 47 | 48 | %% Write performance, Write safety 49 | 50 | %% this is default, recommended 51 | {sync, false}, 52 | %% this is default, recommended 53 | {write_buffer_size_min, 31457280}, 54 | %% this is default, recommended 55 | {write_buffer_size_max, 62914560}, 56 | 57 | %% Read performance 58 | 59 | %% Required, strongly recommended to be true 60 | {use_bloomfilter, true}, 61 | %% Default. Recommended to be 4k 62 | {sst_block_size, 4096}, 63 | %% Default. Recommended to be 16 64 | {block_restart_interval, 16}, 65 | 66 | %% Database integrity 67 | 68 | %% Default. Strongly recommended to be true 69 | {verify_checksums, true}, 70 | %% Default. Strongly recommended to be true 71 | {verify_compactions, true}]}, 72 | 73 | {lager, [ 74 | {handlers, [ 75 | {lager_file_backend, [{file, "log/debug.log"}, {level, debug}, {size, 104857600}, {count, 5}]} 76 | ]} 77 | ]} 78 | ]. 79 | -------------------------------------------------------------------------------- /config/vars.config: -------------------------------------------------------------------------------- 1 | %% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*- 2 | %% ex: ft=erlang ts=4 sw=4 et 3 | 4 | %% Erlang node longname 5 | {node, "vanguard@127.0.0.1"}. 6 | 7 | {http_port, 8080}. 8 | {dns_port, 8053}. 9 | 10 | %% Paths 11 | {platform_data_dir, "./data"}. 12 | {log_path, "./log"}. 13 | {crash_dump, "{{platform_log_dir}}/erl_crash.dump"}. 14 | -------------------------------------------------------------------------------- /config/vars_node1.config: -------------------------------------------------------------------------------- 1 | {node, "node1@127.0.0.1"}. 2 | {http_port, 8081}. 3 | {dns_port, 8054}. 4 | -------------------------------------------------------------------------------- /config/vars_node2.config: -------------------------------------------------------------------------------- 1 | {node, "node2@127.0.0.1"}. 2 | {http_port, 8082}. 3 | {dns_port, 8055}. 4 | -------------------------------------------------------------------------------- /config/vars_node3.config: -------------------------------------------------------------------------------- 1 | {node, "node3@127.0.0.1"}. 2 | {http_port, 8083}. 3 | {dns_port, 8056}. 4 | -------------------------------------------------------------------------------- /config/vm.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -sname vanguard 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie vanguard 6 | 7 | ## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive 8 | ## (Disabled by default..use with caution!) 9 | ##-heart 10 | 11 | ## Enable kernel poll and a few async threads 12 | ##+K true 13 | ##+A 5 14 | 15 | ## Increase number of concurrent ports/sockets 16 | ##-env ERL_MAX_PORTS 4096 17 | 18 | ## Tweak GC to run more often 19 | ##-env ERL_FULLSWEEP_AFTER 10 20 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [debug_info, {parse_transform, lager_transform}]}. 2 | 3 | {deps, [riak_ensemble, 4 | lager, 5 | elli, 6 | {erldns, {git, "https://github.com/tsloughter/erl-dns.git", {branch, "master"}}}]}. 7 | 8 | {relx, [{release, {vanguard, "0.1.0"}, 9 | [vanguard, 10 | {riak_ensemble, load}]}, 11 | 12 | {dev_mode, true}, 13 | {include_erts, false}, 14 | 15 | {overlay_vars, "config/vars.config"}, 16 | {overlay, [{template, "bin/vanguard_admin", "bin/vanguard_admin"}, 17 | {template, "schema/eleveldb.schema", "share/schema/00-eleveldb.schema"}]} 18 | ]}. 19 | 20 | 21 | {profiles, [{prod, [{relx, [{dev_mode, false}, 22 | {include_erts, true}]}]}, 23 | {node1, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_node1.config"]}]}]}, 24 | {node2, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_node2.config"]}]}]}, 25 | {node3, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_node3.config"]}]}]}]}. 26 | 27 | {project_plugins, [rebar3_cuttlefish]}. 28 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | [{<<"base32">>, 2 | {git,"https://github.com/aetrion/base32_erlang.git", 3 | {ref,"40431a7480176b24863e18b92d9872e54c714df5"}}, 4 | 2}, 5 | {<<"bear">>,{pkg,<<"bear">>,<<"0.8.3">>},2}, 6 | {<<"dns">>, 7 | {git,"git://github.com/aetrion/dns_erlang.git", 8 | {ref,"674603aec44a2f3fda1f42dd93d1c1368ac319ca"}}, 9 | 1}, 10 | {<<"eleveldb">>,{pkg,<<"eleveldb">>,<<"2.1.3">>},1}, 11 | {<<"elli">>,{pkg,<<"elli">>,<<"1.0.5">>},0}, 12 | {<<"erldns">>, 13 | {git,"https://github.com/tsloughter/erl-dns.git", 14 | {ref,"8de33f58f3eee6ac1cda5a71ff3813145a644b1c"}}, 15 | 0}, 16 | {<<"folsom">>,{pkg,<<"folsom">>,<<"0.8.3">>},1}, 17 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.7">>},1}, 18 | {<<"jsx">>,{pkg,<<"jsx">>,<<"2.8.0">>},1}, 19 | {<<"lager">>,{pkg,<<"lager">>,<<"3.0.2">>},0}, 20 | {<<"poolboy">>,{pkg,<<"poolboy">>,<<"1.5.1">>},1}, 21 | {<<"riak_ensemble">>,{pkg,<<"riak_ensemble">>,<<"2.1.3">>},0}]. 22 | -------------------------------------------------------------------------------- /schema/eleveldb.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | %% @doc Where LevelDB will store its data. 4 | {mapping, "leveldb.data_root", "eleveldb.data_root", [ 5 | {datatype, directory}, 6 | {default, "{{platform_data_dir}}/leveldb"}, 7 | hidden 8 | ]}. 9 | 10 | %% @doc This parameter defines the percentage of total server memory 11 | %% to assign to LevelDB. LevelDB will dynamically adjust its internal 12 | %% cache sizes to stay within this size. The memory size can 13 | %% alternately be assigned as a byte count via leveldb.maximum_memory 14 | %% instead. 15 | %% @see leveldb.maximum_memory 16 | {mapping, "leveldb.maximum_memory.percent", "eleveldb.total_leveldb_mem_percent", [ 17 | {default, "70"}, 18 | {datatype, integer} 19 | ]}. 20 | 21 | %% @doc This parameter defines the number of bytes of server memory to 22 | %% assign to LevelDB. LevelDB will dynamically adjust its internal 23 | %% cache sizes to stay within this size. The memory size can 24 | %% alternately be assigned as percentage of total server memory via 25 | %% leveldb.maximum_memory.percent. 26 | %% @see leveldb.maximum_memory.percent 27 | {mapping, "leveldb.maximum_memory", "eleveldb.total_leveldb_mem", [ 28 | {datatype, bytesize}, 29 | hidden 30 | ]}. 31 | 32 | %% @doc Whether LevelDB will flush after every write. Note: If you are 33 | %% familiar with fsync, this is analagous to calling fsync after every 34 | %% write. 35 | {mapping, "leveldb.sync_on_write", "eleveldb.sync", [ 36 | {default, off}, 37 | {datatype, flag}, 38 | hidden 39 | ]}. 40 | 41 | %% @doc limited_developer_mem is a Riak specific option that is used 42 | %% when a developer is testing a high number of vnodes and/or several 43 | %% VMs on a machine with limited physical memory. Do NOT use this 44 | %% option if making performance measurements. This option overwrites 45 | %% values given to write_buffer_size_min and write_buffer_size_max. 46 | {mapping, "leveldb.limited_developer_mem", "eleveldb.limited_developer_mem", [ 47 | {default, off}, 48 | {datatype, flag}, 49 | hidden 50 | ]}. 51 | 52 | %% @doc Each vnode first stores new key/value data in a memory based 53 | %% write buffer. This write buffer is in parallel to the recovery log 54 | %% mentioned in the "sync" parameter. Riak creates each vnode with a 55 | %% randomly sized write buffer for performance reasons. The random 56 | %% size is somewhere between write_buffer_size_min and 57 | %% write_buffer_size_max. 58 | %% @see leveldb.sync 59 | {mapping, "leveldb.write_buffer_size_min", "eleveldb.write_buffer_size_min", [ 60 | {default, "30MB"}, 61 | {datatype, bytesize}, 62 | hidden 63 | ]}. 64 | 65 | %% @see leveldb.write_buffer_size_min 66 | {mapping, "leveldb.write_buffer_size_max", "eleveldb.write_buffer_size_max", [ 67 | {default, "60MB"}, 68 | {datatype, bytesize}, 69 | hidden 70 | ]}. 71 | 72 | %% @doc Each database .sst table file can include an optional "bloom 73 | %% filter" that is highly effective in shortcutting data queries that 74 | %% are destined to not find the requested key. The Bloom filter 75 | %% typically increases the size of an .sst table file by about 76 | %% 2%. 77 | {mapping, "leveldb.bloomfilter", "eleveldb.use_bloomfilter", [ 78 | {default, on}, 79 | {datatype, flag}, 80 | hidden 81 | ]}. 82 | 83 | %% @doc Defines the limit where block cache memory can no longer be 84 | %% released in favor of the page cache. This has no impact with 85 | %% regard to release in favor of file cache. The value is per 86 | %% vnode. 87 | {mapping, "leveldb.block_cache_threshold", "eleveldb.block_cache_threshold", [ 88 | {default, "32MB"}, 89 | {datatype, bytesize}, 90 | hidden 91 | ]}. 92 | 93 | %% @doc Defines the size threshold for a block / chunk of data within 94 | %% one .sst table file. Each new block gets an index entry in the .sst 95 | %% table file's master index. 96 | {mapping, "leveldb.block.size", "eleveldb.sst_block_size", [ 97 | {default, "4KB"}, 98 | {datatype, bytesize}, 99 | hidden 100 | ]}. 101 | 102 | %% @doc Defines the key count threshold for a new key entry in the key 103 | %% index for a block. Most deployments should leave this parameter 104 | %% alone. 105 | {mapping, "leveldb.block.restart_interval", "eleveldb.block_restart_interval", [ 106 | {default, 16}, 107 | {datatype, integer}, 108 | hidden 109 | ]}. 110 | 111 | 112 | %% @doc Defines the number of incremental adjustments to attempt 113 | %% between the block.size value and the maximum block.size for an .sst 114 | %% table file. Value of zero disables the underlying dynamic 115 | %% block_size feature. 116 | %% @see leveldb.block.size 117 | {mapping, "leveldb.block.size_steps", "eleveldb.block_size_steps", [ 118 | {default, 16}, 119 | {datatype, integer}, 120 | hidden 121 | ]}. 122 | 123 | %% @doc Enables or disables the verification of the data fetched from 124 | %% LevelDB against internal checksums. 125 | {mapping, "leveldb.verify_checksums", "eleveldb.verify_checksums", [ 126 | {default, on}, 127 | {datatype, flag}, 128 | hidden 129 | ]}. 130 | 131 | %% @doc Enables or disables the verification of LevelDB data during 132 | %% compaction. 133 | {mapping, "leveldb.verify_compaction", "eleveldb.verify_compaction", [ 134 | {default, on}, 135 | {datatype, flag}, 136 | hidden 137 | ]}. 138 | 139 | %% @doc The number of worker threads performing LevelDB operations. 140 | {mapping, "leveldb.threads", "eleveldb.eleveldb_threads", [ 141 | {default, 71}, 142 | {datatype, integer}, 143 | hidden 144 | ]}. 145 | 146 | %% @doc Option to override LevelDB's use of fadvise(DONTNEED) with 147 | %% fadvise(WILLNEED) instead. WILLNEED can reduce disk activity on 148 | %% systems where physical memory exceeds the database size. 149 | {mapping, "leveldb.fadvise_willneed", "eleveldb.fadvise_willneed", [ 150 | {default, false}, 151 | {datatype, {enum, [true, false]}}, 152 | hidden 153 | ]}. 154 | 155 | %% @doc Enables or disables the compression of data on disk. 156 | %% Enabling (default) saves disk space. Disabling may reduce read 157 | %% latency but increase overall disk activity. Option can be 158 | %% changed at any time, but will not impact data on disk until 159 | %% next time a file requires compaction. 160 | {mapping, "leveldb.compression", "eleveldb.compression", [ 161 | {default, on}, 162 | {datatype, flag}, 163 | hidden 164 | ]}. 165 | 166 | %% @doc Controls when a background compaction initiates solely 167 | %% due to the number of delete tombstones within an individual 168 | %% .sst table file. Value of 'off' disables the feature. 169 | {mapping, "leveldb.compaction.trigger.tombstone_count", "eleveldb.delete_threshold", [ 170 | {default, 1000}, 171 | {datatype, [integer, {atom, off}]}, 172 | hidden 173 | ]}. 174 | 175 | {translation, 176 | "eleveldb.delete_threshold", 177 | fun(Conf) -> 178 | case cuttlefish:conf_get("leveldb.compaction.trigger.tombstone_count", Conf) of 179 | off -> 0; 180 | Int -> Int 181 | end 182 | end}. 183 | 184 | %% @doc leveldb can be configured to use different mounts for 185 | %% different levels. This tiered option defaults to off, but you can 186 | %% configure it to trigger at levels 1-6. If you do this, anything 187 | %% stored at the chosen level or greater will be stored on 188 | %% leveldb.tiered.mounts.slow, while everything at the levels below will 189 | %% be stored on leveldb.tiered.mounts.fast 190 | %% Levels 3 or 4 are recommended settings. 191 | %% WARNING: There is no dynamic reallocation of leveldb 192 | %% data across mounts. If you change this setting without manually 193 | %% moving the level files to the correct mounts, leveldb will act in 194 | %% an unexpected state. 195 | %% @see leveldb.tiered.mounts.fast 196 | %% @see leveldb.tiered.mounts.slow 197 | {mapping, "leveldb.tiered", "eleveldb.tiered_slow_level", [ 198 | {default, off}, 199 | {datatype, [ 200 | {atom, off}, 201 | {integer, 1}, 202 | {integer, 2}, 203 | {integer, 3}, 204 | {integer, 4}, 205 | {integer, 5}, 206 | {integer, 6} 207 | ]}, 208 | hidden 209 | ]}. 210 | 211 | {translation, "eleveldb.tiered_slow_level", 212 | fun(Conf) -> 213 | case cuttlefish:conf_get("leveldb.tiered", Conf) of 214 | off -> 0; 215 | I -> I 216 | end 217 | end 218 | }. 219 | 220 | %% @see leveldb.tiered 221 | {mapping, "leveldb.tiered.path.fast", "eleveldb.tiered_fast_prefix", [ 222 | {datatype, directory}, 223 | hidden 224 | ]}. 225 | 226 | %% @see leveldb.tiered 227 | {mapping, "leveldb.tiered.path.slow", "eleveldb.tiered_slow_prefix", [ 228 | {datatype, directory}, 229 | hidden 230 | ]}. 231 | 232 | %% @doc This parameter enables/disables logic that saves a list 233 | %% of cached objects (currently only open .sst file names) at 234 | %% database close. The list is used on the next open to pre-populate 235 | %% the cache. This typically eliminates heavy latencies associated 236 | %% with .sst files opening for early user requests. 237 | {mapping, "leveldb.cache_object_warming", "eleveldb.cache_object_warming", [ 238 | {default, true}, 239 | {datatype, {enum, [true, false]}}, 240 | hidden 241 | ]}. 242 | -------------------------------------------------------------------------------- /schema/lager.schema: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | %% complex lager example 3 | %% @doc where do you want the console.log output: 4 | %% off : nowhere 5 | %% file: the file specified by log.console.file 6 | %% console : standard out 7 | %% both : log.console.file and standard out. 8 | {mapping, "log.console", "lager.handlers", [ 9 | {default, file}, 10 | {datatype, {enum, [off, file, console, both]}} 11 | ]}. 12 | 13 | %% @doc the log level of the console log 14 | {mapping, "log.console.level", "lager.handlers", [ 15 | {default, info}, 16 | {datatype, {enum, [debug, info, warning, error]}} 17 | ]}. 18 | 19 | %% @doc location of the console log 20 | {mapping, "log.console.file", "lager.handlers", [ 21 | {default, "{{log_path}}/console.log"} 22 | ]}. 23 | 24 | %% *gasp* notice the same @mapping! 25 | %% @doc location of the error log 26 | {mapping, "log.error.file", "lager.handlers", [ 27 | {default, "{{log_path}}/error.log"} 28 | ]}. 29 | 30 | %% *gasp* notice the same @mapping! 31 | %% @doc location of the debug log 32 | {mapping, "log.debug.file", "lager.handlers", [ 33 | {default, "{{log_path}}/debug.log"} 34 | ]}. 35 | 36 | %% *gasp* notice the same @mapping! 37 | %% @doc turn on syslog 38 | {mapping, "log.syslog", "lager.handlers", [ 39 | {default, off}, 40 | {datatype, {enum, [on, off]}} 41 | ]}. 42 | 43 | { translation, 44 | "lager.handlers", 45 | fun(Conf) -> 46 | SyslogHandler = case cuttlefish:conf_get("log.syslog", Conf) of 47 | on -> [{lager_syslog_backend, ["riak", daemon, info]}]; 48 | _ -> [] 49 | end, 50 | ErrorHandler = case cuttlefish:conf_get("log.error.file", Conf) of 51 | undefined -> []; 52 | ErrorFilename -> [{lager_file_backend, [{file, ErrorFilename}, 53 | {level, error}, 54 | {size, 10485760}, 55 | {date, "$D0"}, 56 | {count, 5}]}] 57 | end, 58 | 59 | ConsoleLogLevel = cuttlefish:conf_get("log.console.level", Conf), 60 | ConsoleLogFile = cuttlefish:conf_get("log.console.file", Conf), 61 | 62 | ConsoleHandler = {lager_console_handler, ConsoleLogLevel}, 63 | ConsoleFileHandler = {lager_file_backend, [{file, ConsoleLogFile}, 64 | {level, ConsoleLogLevel}, 65 | {size, 10485760}, 66 | {date, "$D0"}, 67 | {count, 5}]}, 68 | 69 | ConsoleHandlers = case cuttlefish:conf_get("log.console", Conf) of 70 | off -> []; 71 | file -> [ConsoleFileHandler]; 72 | console -> [ConsoleHandler]; 73 | both -> [ConsoleHandler, ConsoleFileHandler]; 74 | _ -> [] 75 | end, 76 | DebugHandler = case cuttlefish:conf_get("log.debug.file", Conf) of 77 | undefined -> []; 78 | DebugFilename -> [{lager_file_backend, [{file, DebugFilename}, 79 | {level, debug}, 80 | {size, 10485760}, 81 | {date, "$D0"}, 82 | {count, 5}]}] 83 | end, 84 | 85 | SyslogHandler ++ ConsoleHandlers ++ ErrorHandler ++ DebugHandler 86 | end 87 | }. 88 | 89 | %% Lager Config 90 | 91 | %% @doc Whether to write a crash log, and where. 92 | %% Commented/omitted/undefined means no crash logger. 93 | {mapping, "log.crash.file", "lager.crash_log", [ 94 | {default, "{{log_path}}/crash.log"} 95 | ]}. 96 | 97 | %% @doc Maximum size in bytes of events in the crash log - defaults to 65536 98 | %% @datatype integer 99 | %% @mapping 100 | {mapping, "log.crash.msg_size", "lager.crash_log_msg_size", [ 101 | {default, "64KB"}, 102 | {datatype, bytesize} 103 | ]}. 104 | 105 | %% @doc Maximum size of the crash log in bytes, before its rotated, set 106 | %% to 0 to disable rotation - default is 0 107 | {mapping, "log.crash.size", "lager.crash_log_size", [ 108 | {default, "10MB"}, 109 | {datatype, bytesize} 110 | ]}. 111 | 112 | %% @doc What time to rotate the crash log - default is no time 113 | %% rotation. See the lager README for a description of this format: 114 | %% https://github.com/basho/lager/blob/master/README.org 115 | {mapping, "log.crash.date", "lager.crash_log_date", [ 116 | {default, "$D0"} 117 | ]}. 118 | 119 | %% @doc Number of rotated crash logs to keep, 0 means keep only the 120 | %% current one - default is 0 121 | {mapping, "log.crash.count", "lager.crash_log_count", [ 122 | {default, 5}, 123 | {datatype, integer} 124 | ]}. 125 | 126 | %% @doc Whether to redirect error_logger messages into lager - defaults to true 127 | {mapping, "log.error.redirect", "lager.error_logger_redirect", [ 128 | {default, on}, 129 | {datatype, {enum, [on, off]}} 130 | ]}. 131 | 132 | { translation, 133 | "lager.error_logger_redirect", fun(Conf) -> 134 | Setting = cuttlefish:conf_get("log.error.redirect", Conf), 135 | case Setting of 136 | on -> true; 137 | off -> false; 138 | _Default -> true 139 | end 140 | end}. 141 | 142 | %% @doc maximum number of error_logger messages to handle in a second 143 | %% lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup 144 | {mapping, "log.error.messages_per_second", "lager.error_logger_hwm", [ 145 | {default, 100}, 146 | {datatype, integer} 147 | ]}. 148 | -------------------------------------------------------------------------------- /schema/vanguard.schema: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | 3 | {mapping, "http_port", "vanguard.http_port", [ 4 | {default, {{http_port}}}, 5 | {datatype, integer} 6 | ]}. 7 | 8 | {mapping, "domain", "vanguard.domain", [ 9 | {default, "vanguard"}, 10 | {datatype, string} 11 | ]}. 12 | 13 | {mapping, "cluster", "vanguard.cluster", [ 14 | {default, "c1"}, 15 | {datatype, string} 16 | ]}. 17 | 18 | {mapping, "zone_delegates", "erldns.zone_delegates", [ 19 | {default, vanguard_delegate}, 20 | {datatype, {enum, [vanguard_delegate]}} 21 | ]}. 22 | 23 | {translation, 24 | "erldns.zone_delegates", 25 | fun(Conf) -> 26 | Domain = list_to_binary(cuttlefish:conf_get("domain", Conf)), 27 | Cluster = list_to_binary(cuttlefish:conf_get("cluster", Conf)), 28 | Zone = <<"service.", Cluster/binary, ".", Domain/binary>>, 29 | [{_, ZD}] = cuttlefish_variable:filter_by_prefix("zone_delegates", Conf), 30 | [{Zone, ZD}] 31 | end}. 32 | 33 | {mapping, "storage_type", "erldns.storage.type", [ 34 | {default, erldns_storage_json}, 35 | {datatype, {enum, [erldns_storage_cadre, erldns_storage_json]}} 36 | ]}. 37 | 38 | {mapping, "storage_dir", "erldns.storage.dir", [ 39 | {default, "db"}, 40 | {datatype, string}, 41 | {include_default, "inet_localhost_1"} 42 | ]}. 43 | 44 | {mapping, "server.$name.address", "erldns.servers", [ 45 | {default, "127.0.0.1"}, 46 | {datatype, string}, 47 | {include_default, "inet_localhost_1"} 48 | ]}. 49 | 50 | {mapping, "server.$name.port", "erldns.servers", [ 51 | {default, {{dns_port}}}, 52 | {datatype, integer}, 53 | {include_default, "inet_localhost_1"} 54 | ]}. 55 | 56 | {mapping, "server.$name.family", "erldns.servers", [ 57 | {default, inet}, 58 | {datatype, {enum, [inet, inet6]}}, 59 | {include_default, "inet_localhost_1"} 60 | ]}. 61 | 62 | {mapping, "server.$name.processes", "erldns.servers", [ 63 | {default, 2}, 64 | {datatype, integer}, 65 | {include_default, "inet_localhost_1"} 66 | ]}. 67 | 68 | {translation, 69 | "erldns.servers", 70 | fun(Conf) -> 71 | Servers = cuttlefish_variable:filter_by_prefix("server", Conf), 72 | Dict = lists:foldl(fun({["server", Name, Key], Value}, DictAcc) -> 73 | dict:append_list(list_to_atom(Name), [{list_to_atom(Key), Value}], DictAcc); 74 | (_, DictAcc) -> 75 | DictAcc 76 | end, dict:new(), Servers), 77 | dict:fold(fun(Name, Values, Acc) -> 78 | [[{name, Name} | Values] | Acc] 79 | end, [], Dict) 80 | end}. 81 | 82 | {mapping, "dns_pool.$name.module", "erldns.pools", [ 83 | {default, erldns_worker}, 84 | {datatype, atom}, 85 | {include_default, "tcp_worker_pool"} 86 | ]}. 87 | 88 | {mapping, "dns_pool.$name.size", "erldns.pools", [ 89 | {default, 10}, 90 | {datatype, integer}, 91 | {include_default, "tcp_worker_pool"} 92 | ]}. 93 | 94 | {mapping, "dns_pool.$name.max_overflow", "erldns.pools", [ 95 | {default, 20}, 96 | {datatype, integer}, 97 | {include_default, "tcp_worker_pool"} 98 | ]}. 99 | 100 | {translation, 101 | "erldns.pools", 102 | fun(Conf) -> 103 | Servers = cuttlefish_variable:filter_by_prefix("dns_pool", Conf), 104 | Dict = lists:foldl(fun({["dns_pool", Name, Key], Value}, DictAcc) -> 105 | dict:append_list(list_to_atom(Name), [{list_to_atom(Key), Value}], DictAcc); 106 | (_, DictAcc) -> 107 | DictAcc 108 | end, dict:new(), Servers), 109 | dict:fold(fun(Name, Values, Acc) -> 110 | {value, {module, Module}, Values1} = lists:keytake(module, 1, Values), 111 | [{Name, Module, Values1} | Acc] 112 | end, [], Dict) 113 | end}. 114 | -------------------------------------------------------------------------------- /src/cadre.erl: -------------------------------------------------------------------------------- 1 | -module(cadre). 2 | 3 | -export([register/3, 4 | find/1]). 5 | 6 | -include_lib("dns/include/dns_records.hrl"). 7 | 8 | %% service: [tag.].service.. 9 | 10 | register(ServiceId, Node, Port) -> 11 | cadre_storage:insert(services, {ServiceId, Node, Port}). 12 | 13 | find(ServiceId) -> 14 | cadre_storage:select(services, ServiceId). 15 | -------------------------------------------------------------------------------- /src/cadre_storage.erl: -------------------------------------------------------------------------------- 1 | -module(cadre_storage). 2 | 3 | %% API 4 | -export([insert/2, 5 | delete/2, 6 | select/2]). 7 | 8 | -spec insert(atom(), tuple()) -> ok | {error, Reason :: term()}. 9 | insert(Table, Value)-> 10 | case riak_ensemble_client:kover(node(), Table, element(1, Value), Value, 10000) of 11 | {ok, _} -> 12 | ok; 13 | Err -> 14 | lager:error("error=~p", [Err]), 15 | {error, Err} 16 | end. 17 | 18 | -spec delete(atom(), term()) -> ok. 19 | delete(Table, Key) -> 20 | case riak_ensemble_client:kover(node(), Table, Key, notfound, 10000) of 21 | {ok, _} -> 22 | ok; 23 | Err -> 24 | lager:error("error=~p", [Err]), 25 | {error, Err} 26 | end. 27 | 28 | select(Table, Key) -> 29 | case riak_ensemble_client:kget(node(), Table, Key, 10000) of 30 | {ok, Obj} -> 31 | vanguard_ensemble_backend:obj_value(Obj); 32 | Err -> 33 | lager:error("error=~p", [Err]), 34 | notfound 35 | end. 36 | -------------------------------------------------------------------------------- /src/vanguard.app.src: -------------------------------------------------------------------------------- 1 | {application, vanguard, 2 | [{description, ""}, 3 | {vsn, "0.1.0"}, 4 | {registered, []}, 5 | {mod, { vanguard_app, []}}, 6 | {applications, 7 | [kernel, 8 | stdlib, 9 | lager, 10 | sasl, 11 | dns, 12 | jsx, 13 | erldns, 14 | eleveldb, 15 | elli 16 | ]}, 17 | {env,[]}, 18 | {modules, []}, 19 | 20 | {maintainers, []}, 21 | {licenses, []}, 22 | {links, []} 23 | ]}. 24 | -------------------------------------------------------------------------------- /src/vanguard_app.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc vanguard public API 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(vanguard_app). 7 | 8 | -behaviour(application). 9 | 10 | %% Application callbacks 11 | -export([start/2 12 | ,stop/1]). 13 | 14 | -include_lib("dns/include/dns_records.hrl"). 15 | 16 | %%==================================================================== 17 | %% API 18 | %%==================================================================== 19 | 20 | start(_StartType, _StartArgs) -> 21 | initialize_domain(), 22 | vanguard_sup:start_link(). 23 | 24 | %%-------------------------------------------------------------------- 25 | stop(_State) -> 26 | ok. 27 | 28 | %%==================================================================== 29 | %% Internal functions 30 | %%==================================================================== 31 | 32 | initialize_domain() -> 33 | {ok, Domain} = application:get_env(vanguard, domain), 34 | {ok, Cluster} = application:get_env(vanguard, cluster), 35 | Zone = <<"service.", (list_to_binary(Cluster))/binary, ".", (list_to_binary(Domain))/binary>>, 36 | SOA = #dns_rrdata_soa{mname = <<"ns1.", Zone/binary>>, 37 | rname = <<"admin.", Zone/binary>>, 38 | serial = 2013022001, 39 | refresh = 86400, 40 | retry = 7200, 41 | expire = 604800, 42 | minimum = 300}, 43 | erldns_zone_cache:put_zone({Zone, [], [#dns_rr{name = Zone, 44 | type = ?DNS_TYPE_SOA, 45 | ttl = 3600, 46 | data = SOA}]}). 47 | -------------------------------------------------------------------------------- /src/vanguard_cluster.erl: -------------------------------------------------------------------------------- 1 | -module(vanguard_cluster). 2 | 3 | -behaviour(gen_server). 4 | 5 | %% API 6 | -export([start_link/0, add_nodes/0, create/0, create_ensembles/1, update_ensembles/1]). 7 | 8 | %% gen_server callbacks 9 | -export([init/1, handle_call/3, handle_cast/2, handle_info/2, 10 | terminate/2, code_change/3]). 11 | 12 | -define(SERVER, ?MODULE). 13 | -define(ENSEMBLES, [services]). 14 | 15 | -record(state, {}). 16 | 17 | start_link() -> 18 | gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). 19 | 20 | add_nodes() -> 21 | gen_server:call(?SERVER, add_nodes, 20000). 22 | 23 | create() -> 24 | gen_server:call(?SERVER, create, 20000). 25 | 26 | init([]) -> 27 | {ok, #state{}}. 28 | 29 | handle_call(add_nodes, _From, State) -> 30 | join_cluster(nodes()), 31 | {reply, ok, State}; 32 | handle_call(create, _From, State) -> 33 | riak_ensemble_manager:enable(), 34 | wait_stable(), 35 | create_ensembles(?ENSEMBLES), 36 | {reply, ok, State}; 37 | handle_call(_Request, _From, State) -> 38 | Reply = ok, 39 | {reply, Reply, State}. 40 | 41 | handle_cast(_Msg, State) -> 42 | {noreply, State}. 43 | 44 | handle_info(timeout, State) -> 45 | {noreply, State}; 46 | handle_info(_Info, State) -> 47 | {noreply, State}. 48 | 49 | terminate(_Reason, _State) -> 50 | ok. 51 | 52 | code_change(_OldVsn, State, _Extra) -> 53 | {ok, State}. 54 | 55 | %% Internal functions 56 | 57 | wait_stable() -> 58 | case check_stable() of 59 | true -> 60 | ok; 61 | false -> 62 | wait_stable() 63 | end. 64 | 65 | check_stable() -> 66 | case riak_ensemble_manager:check_quorum(root, 1000) of 67 | true -> 68 | case riak_ensemble_peer:stable_views(root, 1000) of 69 | {ok, true} -> 70 | true; 71 | _ -> 72 | false 73 | end; 74 | false -> 75 | false 76 | end. 77 | 78 | join_cluster([]) -> 79 | ok; 80 | join_cluster([H|T]) -> 81 | case riak_ensemble_manager:join(H, node()) of 82 | ok -> 83 | wait_stable(), 84 | update_ensembles(?ENSEMBLES), 85 | ok; 86 | already_enabled -> 87 | wait_stable(), 88 | update_ensembles(?ENSEMBLES); 89 | {error, same_node} -> 90 | ok; 91 | _ -> 92 | if 93 | H =:= node() -> 94 | join_cluster(T); 95 | true -> 96 | join_cluster([H]) 97 | end 98 | end. 99 | 100 | update_ensembles(Ensembles) -> 101 | [begin 102 | {_, Node} = Peer = riak_ensemble_manager:get_leader(EnsembleId), 103 | Pid = rpc:call(Node, riak_ensemble_manager, get_peer_pid, [EnsembleId, Peer]), 104 | riak_ensemble_peer:update_members(Pid, [{add, {EnsembleId, node()}}], 5000) 105 | end || EnsembleId <- Ensembles]. 106 | 107 | create_ensembles(Ensembles) -> 108 | [riak_ensemble_manager:create_ensemble(EnsembleId, 109 | {EnsembleId, node()}, 110 | vanguard_ensemble_backend, 111 | []) || EnsembleId <- Ensembles]. 112 | -------------------------------------------------------------------------------- /src/vanguard_console.erl: -------------------------------------------------------------------------------- 1 | -module(vanguard_console). 2 | 3 | -export([join/1, 4 | create/1, 5 | leave/1, 6 | status/1, 7 | cluster_info/1, 8 | cluster_status/0]). 9 | 10 | -export([ensemble_status/1]). 11 | 12 | join([NodeStr]) -> 13 | case net_adm:ping(list_to_atom(NodeStr)) of 14 | pang -> 15 | {error, not_reachable}; 16 | pong -> 17 | vanguard_cluster:add_nodes() 18 | end. 19 | 20 | create([]) -> 21 | vanguard_cluster:create(). 22 | 23 | leave([]) -> 24 | ok. 25 | 26 | status([]) -> 27 | ok. 28 | 29 | cluster_info([]) -> 30 | ok. 31 | 32 | ensemble_status([]) -> 33 | ok. 34 | 35 | cluster_status() -> 36 | case riak_ensemble_manager:enabled() of 37 | false -> 38 | {error, not_enabled}; 39 | true -> 40 | Nodes = lists:sort(riak_ensemble_manager:cluster()), 41 | io:format("Nodes in cluster: ~p~n",[Nodes]), 42 | LeaderNode = node(riak_ensemble_manager:get_leader_pid(root)), 43 | io:format("Leader: ~p~n",[LeaderNode]) 44 | end. 45 | -------------------------------------------------------------------------------- /src/vanguard_delegate.erl: -------------------------------------------------------------------------------- 1 | -module(vanguard_delegate). 2 | 3 | -behaviour(erldns_resolver). 4 | 5 | -export([get_records_by_name/1]). 6 | 7 | -include_lib("dns/include/dns_records.hrl"). 8 | 9 | get_records_by_name(Qname) -> 10 | ServiceId = get_service_id(Qname), 11 | case cadre:find(ServiceId) of 12 | {_, Target, Port} -> 13 | [#dns_rr{name = Qname, 14 | type = ?DNS_TYPE_SRV, 15 | ttl = 0, 16 | data = #dns_rrdata_srv{priority = 1, 17 | weight = 1, 18 | port = Port, 19 | target = Target}}]; 20 | notfound -> 21 | [] 22 | end. 23 | 24 | get_service_id(Qname) -> 25 | {ok, Domain0} = application:get_env(vanguard, domain), 26 | Domain = list_to_binary(Domain0), 27 | {ok, Cluster0} = application:get_env(vanguard, cluster), 28 | Cluster = list_to_binary(Cluster0), 29 | [ServiceId, <<"service">>, Cluster, Domain] = binary:split(Qname, <<".">>, [global]), 30 | ServiceId. 31 | -------------------------------------------------------------------------------- /src/vanguard_ensemble_backend.erl: -------------------------------------------------------------------------------- 1 | -module(vanguard_ensemble_backend). 2 | -behaviour(riak_ensemble_backend). 3 | 4 | -export([init/3, new_obj/4]). 5 | -export([obj_epoch/1, obj_seq/1, obj_key/1, obj_value/1]). 6 | -export([set_obj_epoch/2, set_obj_seq/2, set_obj_value/2]). 7 | -export([get/3, put/4, tick/5, ping/2, ready_to_start/0]). 8 | -export([synctree_path/2]). 9 | -export([handle_down/4]). 10 | 11 | -include_lib("riak_ensemble/include/riak_ensemble_types.hrl"). 12 | 13 | -record(obj, {epoch :: epoch(), 14 | seq :: seq(), 15 | key :: term(), 16 | value :: term()}). 17 | 18 | -record(state, {savefile :: file:filename(), 19 | id :: peer_id(), 20 | tid :: ets:tid()}). 21 | 22 | -type obj() :: #obj{}. 23 | -type state() :: #state{}. 24 | -type key() :: any(). 25 | -type value() :: any(). 26 | 27 | %%=================================================================== 28 | 29 | -spec init(ensemble_id(), peer_id(), []) -> state(). 30 | init(Ensemble, Id, []) -> 31 | %% TODO: Any concerns about using hash here? 32 | %% TODO: For root ensemble, should we use different naming scheme? 33 | <> = riak_ensemble_util:sha(term_to_binary({Ensemble, Id})), 34 | Name = integer_to_list(Hash), 35 | {ok, Root} = application:get_env(riak_ensemble, data_root), 36 | File = filename:join([Root, "ensembles", Name ++ "_kv"]), 37 | Tid = reload_data(File), 38 | #state{savefile=File, tid=Tid, id=Id}. 39 | 40 | %%=================================================================== 41 | 42 | -spec new_obj(epoch(), seq(), key(), value()) -> obj(). 43 | new_obj(Epoch, Seq, Key, Value) -> 44 | #obj{epoch=Epoch, seq=Seq, key=Key, value=Value}. 45 | 46 | %%=================================================================== 47 | 48 | -spec obj_epoch(obj()) -> epoch(). 49 | obj_epoch(Obj) -> 50 | Obj#obj.epoch. 51 | 52 | -spec obj_seq(obj()) -> seq(). 53 | obj_seq(Obj) -> 54 | Obj#obj.seq. 55 | 56 | -spec obj_key(obj()) -> key(). 57 | obj_key(Obj) -> 58 | Obj#obj.key. 59 | 60 | -spec obj_value(obj()) -> value(). 61 | obj_value(Obj) -> 62 | Obj#obj.value. 63 | 64 | %%=================================================================== 65 | 66 | -spec set_obj_epoch(epoch(), obj()) -> obj(). 67 | set_obj_epoch(Epoch, Obj) -> 68 | Obj#obj{epoch=Epoch}. 69 | 70 | -spec set_obj_seq(seq(), obj()) -> obj(). 71 | set_obj_seq(Seq, Obj) -> 72 | Obj#obj{seq=Seq}. 73 | 74 | -spec set_obj_value(value(), obj()) -> obj(). 75 | set_obj_value(Value, Obj) -> 76 | Obj#obj{value=Value}. 77 | 78 | %%=================================================================== 79 | 80 | -spec get(key(), riak_ensemble_backend:from(), state()) -> state(). 81 | get(Key, From, State=#state{tid=Tid}) -> 82 | Reply = case ets:lookup(Tid, Key) of 83 | [Value] -> 84 | element(2, Value); 85 | [] -> 86 | notfound 87 | end, 88 | riak_ensemble_backend:reply(From, Reply), 89 | State. 90 | 91 | -spec put(key(), obj(), riak_ensemble_backend:from(), state()) -> state(). 92 | put(Key, Obj, From, State=#state{savefile=File, tid=Tid}) -> 93 | ets:insert(Tid, {Key, Obj}), 94 | save_data(File, Tid), 95 | riak_ensemble_backend:reply(From, Obj), 96 | State. 97 | 98 | %%=================================================================== 99 | 100 | -spec tick(epoch(), seq(), peer_id(), views(), state()) -> state(). 101 | tick(_Epoch, _Seq, _Leader, _Views, State) -> 102 | State. 103 | 104 | -spec ping(pid(), state()) -> {ok, state()}. 105 | ping(_From, State) -> 106 | {ok, State}. 107 | 108 | ready_to_start() -> 109 | true. 110 | 111 | synctree_path(_Ensemble, _Id) -> 112 | default. 113 | 114 | %%=================================================================== 115 | 116 | -spec handle_down(reference(), pid(), term(), state()) -> false. 117 | handle_down(_Ref, _Pid, _Reason, _State) -> 118 | false. 119 | 120 | %%=================================================================== 121 | 122 | -spec reload_data(file:filename()) -> ets:tid(). 123 | reload_data(File) -> 124 | case load_saved_data(File) of 125 | {ok, Tid} -> 126 | Tid; 127 | not_found -> 128 | ets:new(cadre, [ordered_set, private, {read_concurrency, true}]) 129 | end. 130 | 131 | -spec load_saved_data(file:filename()) -> not_found | {ok, ets:tid()}. 132 | load_saved_data(File) -> 133 | case filelib:is_regular(File) of 134 | true -> 135 | ets:file2tab(File, [{verify, true}]); 136 | _ -> 137 | not_found 138 | end. 139 | 140 | 141 | -spec save_data(file:filename(), ets:tid()) -> ok. 142 | save_data(File, Tid) -> 143 | ok = filelib:ensure_dir(File), 144 | ets:tab2file(Tid, File, [{extended_info, [md5sum, object_count]}, {sync, true}]). 145 | -------------------------------------------------------------------------------- /src/vanguard_http_callback.erl: -------------------------------------------------------------------------------- 1 | -module(vanguard_http_callback). 2 | 3 | -export([handle/2, 4 | handle_event/3]). 5 | 6 | -include_lib("elli/include/elli.hrl"). 7 | -behaviour(elli_handler). 8 | 9 | handle(Req, _Args) -> 10 | handle(Req#req.method, elli_request:path(Req), Req). 11 | 12 | handle('GET', [<<"service">>, ServiceId], _Req) -> 13 | {_, Node, Port} = cadre:find(ServiceId), 14 | {ok, [], jsx:encode(#{<<"node">> => Node, 15 | <<"port">> => Port})}; 16 | handle('PUT',[<<"node">>, <<"register">>], Req) -> 17 | #{<<"node">> := Node, 18 | <<"port">> := Port, 19 | <<"service">> := #{<<"id">> := ServiceId}} = jsx:decode(elli_request:body(Req), [return_maps]), 20 | ok = cadre:register(ServiceId, Node, Port), 21 | {204, [], <<>>}; 22 | 23 | handle(_, _, _Req) -> 24 | {404, [], <<"Not Found">>}. 25 | 26 | handle_event(_Event, _Data, _Args) -> 27 | ok. 28 | -------------------------------------------------------------------------------- /src/vanguard_sup.erl: -------------------------------------------------------------------------------- 1 | %%%------------------------------------------------------------------- 2 | %% @doc vanguard top level supervisor. 3 | %% @end 4 | %%%------------------------------------------------------------------- 5 | 6 | -module(vanguard_sup). 7 | 8 | -behaviour(supervisor). 9 | 10 | %% API 11 | -export([start_link/0]). 12 | 13 | %% Supervisor callbacks 14 | -export([init/1]). 15 | 16 | -define(SERVER, ?MODULE). 17 | 18 | %%==================================================================== 19 | %% API functions 20 | %%==================================================================== 21 | 22 | start_link() -> 23 | supervisor:start_link({local, ?SERVER}, ?MODULE, []). 24 | 25 | %%==================================================================== 26 | %% Supervisor callbacks 27 | %%==================================================================== 28 | 29 | %% Child :: {Id,StartFunc,Restart,Shutdown,Type,Modules} 30 | init([]) -> 31 | DataRoot = application:get_env(riak_ensemble, data_root, "./data"), 32 | ListenPort = application:get_env(vanguard, http_port, 8080), 33 | 34 | HttpInterface = {http_interface, {elli, start_link, [[{callback, vanguard_http_callback}, {port, ListenPort}]]}, 35 | permanent, 20000, worker, [elli]}, 36 | Ensemble = {riak_ensemble_sup, {riak_ensemble_sup, start_link, [filename:join(DataRoot, atom_to_list(node()))]}, 37 | permanent, 20000, supervisor, [riak_ensemble_sup]}, 38 | Cluster = {vanguard_cluster, {vanguard_cluster, start_link, []}, 39 | permanent, 20000, worker, [vanguard_cluster]}, 40 | 41 | {ok, {{one_for_one, 0, 1}, [HttpInterface, Ensemble, Cluster]}}. 42 | 43 | %%==================================================================== 44 | %% Internal functions 45 | %%==================================================================== 46 | --------------------------------------------------------------------------------