├── .gitignore ├── Dockerfile ├── Makefile ├── README.md ├── apps └── udon │ └── src │ ├── udon.app.src │ ├── udon.erl │ ├── udon.hrl │ ├── udon_app.erl │ ├── udon_console.erl │ ├── udon_http_ping.erl │ ├── udon_op_fsm.erl │ ├── udon_op_fsm_sup.erl │ ├── udon_sup.erl │ └── udon_vnode.erl ├── config ├── admin_bin ├── advanced.config ├── sys.config ├── vars.config ├── vars_dev1.config ├── vars_dev2.config ├── vars_dev3.config └── vm.args ├── priv └── 01-udon.schema ├── rebar.config └── rebar.lock /.gitignore: -------------------------------------------------------------------------------- 1 | .rebar3 2 | rebar3 3 | _* 4 | .eunit 5 | *.o 6 | *.beam 7 | *.plt 8 | *.swp 9 | *.swo 10 | .erlang.cookie 11 | ebin 12 | log 13 | erl_crash.dump 14 | .rebar 15 | _rel 16 | _deps 17 | _plugins 18 | _tdeps 19 | logs 20 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM synlay/erlang:19.3 2 | RUN git clone https://github.com/mrallen1/udon_ng /root/udon 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BASEDIR = $(shell pwd) 2 | REBAR = rebar3 3 | RELPATH = _build/default/rel/udon 4 | PRODRELPATH = _build/prod/rel/udon 5 | APPNAME = udon 6 | SHELL = /bin/bash 7 | 8 | release: 9 | $(REBAR) release 10 | mkdir -p $(RELPATH)/../udon_config 11 | mkdir -p $(RELPATH)/../udon_data 12 | cp _build/default/rel/udon/etc/* _build/default/rel/udon_config/ 13 | 14 | console: 15 | cd $(RELPATH) && ./bin/udon console 16 | 17 | prod-release: 18 | $(REBAR) as prod release 19 | mkdir -p $(PRODRELPATH)/../udon_config 20 | [ -f $(PRODRELPATH)/../udon_config/udon.conf ] || cp $(PRODRELPATH)/etc/udon.conf $(PRODRELPATH)/../udon_config/udon.conf 21 | [ -f $(PRODRELPATH)/../udon_config/advanced.config ] || cp $(PRODRELPATH)/etc/advanced.config $(PRODRELPATH)/../udon_config/advanced.config 22 | 23 | prod-console: 24 | cd $(PRODRELPATH) && ./bin/udon console 25 | 26 | compile: 27 | $(REBAR) compile 28 | 29 | clean: 30 | $(REBAR) clean 31 | 32 | test: 33 | $(REBAR) ct 34 | 35 | devrel1: 36 | $(REBAR) as dev1 release 37 | 38 | devrel2: 39 | $(REBAR) as dev2 release 40 | 41 | devrel3: 42 | $(REBAR) as dev3 release 43 | 44 | devrel: devrel1 devrel2 devrel3 45 | 46 | dev1-console: 47 | $(BASEDIR)/_build/dev1/rel/udon/bin/$(APPNAME) console 48 | 49 | dev2-console: 50 | $(BASEDIR)/_build/dev2/rel/udon/bin/$(APPNAME) console 51 | 52 | dev3-console: 53 | $(BASEDIR)/_build/dev3/rel/udon/bin/$(APPNAME) console 54 | 55 | devrel-start: 56 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/udon/bin/$(APPNAME) start; done 57 | 58 | devrel-join: 59 | for d in $(BASEDIR)/_build/dev{2,3}; do $$d/rel/udon/bin/$(APPNAME)-admin cluster join udon1@127.0.0.1; done 60 | 61 | devrel-cluster-plan: 62 | $(BASEDIR)/_build/dev1/rel/udon/bin/$(APPNAME)-admin cluster plan 63 | 64 | devrel-cluster-commit: 65 | $(BASEDIR)/_build/dev1/rel/udon/bin/$(APPNAME)-admin cluster commit 66 | 67 | devrel-status: 68 | $(BASEDIR)/_build/dev1/rel/udon/bin/$(APPNAME)-admin member-status 69 | 70 | devrel-ping: 71 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/udon/bin/$(APPNAME) ping; done 72 | 73 | devrel-stop: 74 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/udon/bin/$(APPNAME) stop; done 75 | 76 | start: 77 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) start 78 | 79 | stop: 80 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) stop 81 | 82 | attach: 83 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) attach 84 | 85 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | udon 2 | ==== 3 | 4 | This is the "next generation" of *udon*, an example application to demonstrate 5 | and learn about riak_core. 6 | 7 | It should work on Erlang 19. OTP 20 doesn't currently compile without quite a bit 8 | of work on dependencies. 9 | 10 | This was first presented at [Erlang and Elixir Factory Lite Bengalore 2017][1]. 11 | 12 | Building a docker image 13 | ----------------------- 14 | One easy way to get a collection of rebar3, Erlang 19 and the udon code together into a run time environment, is to use the included Dockerfile. To build a docker image, you can execute the following commands: 15 | 16 | $ docker build -t /udon . 17 | $ docker run -it /udon bash 18 | root@de8b68cfa169:/# cd /root/udon 19 | root@de8b68cfa169:~/udon# rebar3 release 20 | root@de8b68cfa169:~/udon# make console 21 | 22 | The hash value of your Docker container after `root@` may not match the example here. That's ok. 23 | 24 | Build 25 | ----- 26 | 27 | rebar3 release 28 | 29 | Test 30 | ---- 31 | 32 | rebar3 ct 33 | 34 | Run 35 | --- 36 | 37 | rebar3 run 38 | 39 | Try 40 | --- 41 | 42 | 1> udon:ping(). 43 | {pong,753586781748746817198774991869333432010090217472} 44 | 45 | Quit 46 | ---- 47 | 48 | 2> q(). 49 | 50 | Play with Clustering 51 | -------------------- 52 | 53 | Build 3 releases that can run on the same machine: 54 | 55 | make devrel 56 | 57 | Start them in different consoles: 58 | 59 | make dev1-console 60 | make dev2-console 61 | make dev3-console 62 | 63 | join 2 nodes to the first one: 64 | 65 | make devrel-join 66 | 67 | check the status of the cluster: 68 | 69 | make devrel-status 70 | 71 | you should see something like this: 72 | 73 | ================================= Membership ================================== 74 | Status Ring Pending Node 75 | ------------------------------------------------------------------------------- 76 | joining 0.0% -- 'udon2@127.0.0.1' 77 | joining 0.0% -- 'udon3@127.0.0.1' 78 | valid 100.0% -- 'udon1@127.0.0.1' 79 | ------------------------------------------------------------------------------- 80 | Valid:1 / Leaving:0 / Exiting:0 / Joining:2 / Down:0 81 | 82 | it should say that 3 nodes are joining, now check the cluster plan: 83 | 84 | make devrel-cluster-plan 85 | 86 | it should display the cluster plan, now we can commit the plan: 87 | 88 | make devrel-cluster-commit 89 | 90 | check the status of the cluster again: 91 | 92 | make devrel-status 93 | 94 | you could see the vnodes transfering: 95 | 96 | ================================= Membership ================================== 97 | Status Ring Pending Node 98 | ------------------------------------------------------------------------------- 99 | valid 75.0% 25.0% 'udon1@127.0.0.1' 100 | valid 9.4% 25.0% 'udon2@127.0.0.1' 101 | valid 7.8% 25.0% 'udon3@127.0.0.1' 102 | ------------------------------------------------------------------------------- 103 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 104 | 105 | at some point you should see something like this: 106 | 107 | ================================= Membership ================================== 108 | Status Ring Pending Node 109 | ------------------------------------------------------------------------------- 110 | valid 33.3% -- 'udon1@127.0.0.1' 111 | valid 33.3% -- 'udon2@127.0.0.1' 112 | valid 33.3% -- 'udon3@127.0.0.1' 113 | ------------------------------------------------------------------------------- 114 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 115 | 116 | when you are bored you can stop them: 117 | 118 | make devrel-stop 119 | 120 | [1]: http://www.erlang-factory.com/india2017/ 121 | -------------------------------------------------------------------------------- /apps/udon/src/udon.app.src: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | {application, udon, 3 | [ 4 | {description, "Distributed file cache"}, 5 | {vsn, "1"}, 6 | {registered, []}, 7 | {applications, [ 8 | kernel, 9 | stdlib, 10 | sasl, 11 | riak_core, 12 | setup 13 | ]}, 14 | {mod, { udon_app, []}}, 15 | {env, []} 16 | ]}. 17 | -------------------------------------------------------------------------------- /apps/udon/src/udon.erl: -------------------------------------------------------------------------------- 1 | -module(udon). 2 | -include("udon.hrl"). 3 | -include_lib("riak_core/include/riak_core_vnode.hrl"). 4 | 5 | -export([ 6 | ping/0, 7 | store/2, 8 | rename/2, 9 | fetch/1 10 | ]). 11 | 12 | -ignore_xref([ 13 | ping/0, 14 | store/2, 15 | rename/2, 16 | fetch/1 17 | ]). 18 | 19 | %% Public API 20 | 21 | %% @doc Stores a static file at the given path 22 | store(Path, Data) -> 23 | N = 3, 24 | W = 3, 25 | Timeout = 5000, % millisecs 26 | 27 | PHash = path_to_hash(Path), 28 | PRec = #file{ request_path = Path, path_md5 = PHash, csum = erlang:adler32(Data) }, 29 | 30 | {ok, ReqId} = udon_op_fsm:op(N, W, {store, PRec, Data}, ?KEY(PHash)), 31 | wait_for_reqid(ReqId, Timeout). 32 | 33 | %% @TODO Handle redirects 34 | store(redirect, Path, NewPath) -> 35 | N = 3, 36 | W = 3, 37 | Timeout = 5000, % millisecs 38 | 39 | PHash = path_to_hash(Path), 40 | 41 | {ok, ReqId} = udon_op_fsm:op(N, W, {redirect, PHash, NewPath}, ?KEY(PHash)), 42 | wait_for_reqid(ReqId, Timeout). 43 | 44 | %% @doc Retrieves a static file from the given path 45 | fetch(Path) -> 46 | PHash = path_to_hash(Path), 47 | Idx = riak_core_util:chash_key(?KEY(PHash)), 48 | %% TODO: Get a preflist with more than one node 49 | [{Node, _Type}] = riak_core_apl:get_primary_apl(Idx, 1, udon), 50 | riak_core_vnode_master:sync_spawn_command(Node, {fetch, PHash}, udon_vnode_master). 51 | 52 | rename(Path, NewPath) -> 53 | Data = fetch(Path), 54 | store(NewPath, Data), 55 | store(redirect, Path, NewPath). 56 | 57 | %% @doc Pings a random vnode to make sure communication is functional 58 | ping() -> 59 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(now())}), 60 | PrefList = riak_core_apl:get_primary_apl(DocIdx, 1, udon), 61 | [{IndexNode, _Type}] = PrefList, 62 | riak_core_vnode_master:sync_spawn_command(IndexNode, ping, udon_vnode_master). 63 | 64 | wait_for_reqid(Id, Timeout) -> 65 | receive {Id, Value} -> {ok, Value} 66 | after Timeout -> {error, timeout} 67 | end. 68 | 69 | path_to_hash(Path) when is_list(Path) -> 70 | crypto:hash(md5, Path). 71 | 72 | -------------------------------------------------------------------------------- /apps/udon/src/udon.hrl: -------------------------------------------------------------------------------- 1 | -define(PRINT(Var), io:format("DEBUG: ~p:~p - ~p~n~n ~p~n~n", [?MODULE, ?LINE, ??Var, Var])). 2 | -define(KEY(X), {<<"udon">>, X}). 3 | 4 | -record(file, { 5 | request_path, 6 | path_md5, 7 | version = 0, 8 | csum, 9 | redirect = false 10 | }). 11 | 12 | -------------------------------------------------------------------------------- /apps/udon/src/udon_app.erl: -------------------------------------------------------------------------------- 1 | -module(udon_app). 2 | 3 | -behaviour(application). 4 | 5 | %% Application callbacks 6 | -export([start/2, stop/1]). 7 | 8 | %% =================================================================== 9 | %% Application callbacks 10 | %% =================================================================== 11 | 12 | start(_StartType, _StartArgs) -> 13 | case udon_sup:start_link() of 14 | {ok, Pid} -> 15 | ok = riak_core:register([{vnode_module, udon_vnode}]), 16 | ok = riak_core_node_watcher:service_up(udon, self()), 17 | 18 | Dispatch = cowboy_router:compile([ 19 | {'_', [ 20 | {"/ping", udon_http_ping, []}%, 21 | %{'_', udon_http_api, []} 22 | ] 23 | } 24 | ]), 25 | 26 | %% TODO: make the port configurable (its already 27 | %% in the cuttlefish settings 28 | {ok, _} = cowboy:start_clear(http, [{port, 8080}], #{ 29 | env => #{dispatch => Dispatch} 30 | }), 31 | 32 | {ok, Pid}; 33 | {error, Reason} -> 34 | {error, Reason} 35 | end. 36 | 37 | stop(_State) -> 38 | ok. 39 | -------------------------------------------------------------------------------- /apps/udon/src/udon_console.erl: -------------------------------------------------------------------------------- 1 | %% @doc Interface for riak_searchng-admin commands. 2 | -module(udon_console). 3 | -export([staged_join/1, 4 | down/1, 5 | ringready/1]). 6 | -ignore_xref([join/1, 7 | leave/1, 8 | remove/1, 9 | ringready/1]). 10 | 11 | staged_join([NodeStr]) -> 12 | Node = list_to_atom(NodeStr), 13 | join(NodeStr, fun riak_core:staged_join/1, 14 | "Success: staged join request for ~p to ~p~n", [node(), Node]). 15 | 16 | join(NodeStr, JoinFn, SuccessFmt, SuccessArgs) -> 17 | try 18 | case JoinFn(NodeStr) of 19 | ok -> 20 | io:format(SuccessFmt, SuccessArgs), 21 | ok; 22 | {error, not_reachable} -> 23 | io:format("Node ~s is not reachable!~n", [NodeStr]), 24 | error; 25 | {error, different_ring_sizes} -> 26 | io:format("Failed: ~s has a different ring_creation_size~n", 27 | [NodeStr]), 28 | error; 29 | {error, unable_to_get_join_ring} -> 30 | io:format("Failed: Unable to get ring from ~s~n", [NodeStr]), 31 | error; 32 | {error, not_single_node} -> 33 | io:format("Failed: This node is already a member of a " 34 | "cluster~n"), 35 | error; 36 | {error, self_join} -> 37 | io:format("Failed: This node cannot join itself in a " 38 | "cluster~n"), 39 | error; 40 | {error, _} -> 41 | io:format("Join failed. Try again in a few moments.~n", []), 42 | error 43 | end 44 | catch 45 | Exception:Reason -> 46 | lager:error("Join failed ~p:~p", [Exception, Reason]), 47 | io:format("Join failed, see log for details~n"), 48 | error 49 | end. 50 | 51 | 52 | down([Node]) -> 53 | try 54 | case riak_core:down(list_to_atom(Node)) of 55 | ok -> 56 | io:format("Success: ~p marked as down~n", [Node]), 57 | ok; 58 | {error, legacy_mode} -> 59 | io:format("Cluster is currently in legacy mode~n"), 60 | ok; 61 | {error, is_up} -> 62 | io:format("Failed: ~s is up~n", [Node]), 63 | error; 64 | {error, not_member} -> 65 | io:format("Failed: ~p is not a member of the cluster.~n", 66 | [Node]), 67 | error; 68 | {error, only_member} -> 69 | io:format("Failed: ~p is the only member.~n", [Node]), 70 | error 71 | end 72 | catch 73 | Exception:Reason -> 74 | lager:error("Down failed ~p:~p", [Exception, Reason]), 75 | io:format("Down failed, see log for details~n"), 76 | error 77 | end. 78 | 79 | ringready([]) -> 80 | try 81 | case riak_core_status:ringready() of 82 | {ok, Nodes} -> 83 | io:format("TRUE All nodes agree on the ring ~p\n", [Nodes]); 84 | {error, {different_owners, N1, N2}} -> 85 | io:format("FALSE Node ~p and ~p list different partition owners\n", [N1, N2]), 86 | error; 87 | {error, {nodes_down, Down}} -> 88 | io:format("FALSE ~p down. All nodes need to be up to check.\n", [Down]), 89 | error 90 | end 91 | catch 92 | Exception:Reason -> 93 | lager:error("Ringready failed ~p:~p", [Exception, 94 | Reason]), 95 | io:format("Ringready failed, see log for details~n"), 96 | error 97 | end. 98 | -------------------------------------------------------------------------------- /apps/udon/src/udon_http_ping.erl: -------------------------------------------------------------------------------- 1 | -module(udon_http_ping). 2 | 3 | %% Standard callbacks. 4 | -export([init/2]). 5 | -export([allowed_methods/2]). 6 | -export([content_types_provided/2]). 7 | -export([to_json/2]). 8 | 9 | init(Req, Opts) -> 10 | {cowboy_rest, Req, Opts}. 11 | 12 | allowed_methods(Req, State) -> 13 | {[<<"GET">>], Req, State}. 14 | 15 | content_types_provided(Req, State) -> 16 | {[ 17 | {{<<"application">>, <<"json">>, []}, to_json} 18 | ], Req, State}. 19 | 20 | to_json(Req, State) -> 21 | {pong, Partition} = udon:ping(), 22 | Response = jsone:encode(#{pong => integer_to_binary(Partition)}), 23 | {Response, Req, State}. 24 | -------------------------------------------------------------------------------- /apps/udon/src/udon_op_fsm.erl: -------------------------------------------------------------------------------- 1 | -module(udon_op_fsm). 2 | -behavior(gen_fsm). 3 | -include("udon.hrl"). 4 | 5 | %% API 6 | -export([start_link/6, op/3, op/4]). 7 | 8 | %% Callbacks 9 | -export([init/1, code_change/4, handle_event/3, handle_info/3, 10 | handle_sync_event/4, terminate/3]). 11 | 12 | %% States 13 | -export([prepare/2, execute/2, waiting/2]). 14 | 15 | %% req_id: The request id so the caller can verify the response. 16 | %% 17 | %% sender: The pid of the sender so a reply can be made. 18 | %% 19 | %% prelist: The preflist for the data. 20 | %% 21 | %% num_w: The number of successful write replies. 22 | %% 23 | %% op: must be a two item tuple with the command and the params 24 | -record(state, {req_id :: pos_integer(), 25 | from :: pid(), 26 | n :: pos_integer(), 27 | w :: pos_integer(), 28 | op, 29 | % key used to calculate the hash 30 | key, 31 | accum, 32 | preflist :: riak_core_apl:preflist2(), 33 | num_w = 0 :: non_neg_integer()}). 34 | 35 | %%%=================================================================== 36 | %%% API 37 | %%%=================================================================== 38 | 39 | start_link(ReqID, From, Op, Key, N, W) -> 40 | gen_fsm:start_link(?MODULE, [ReqID, From, Op, Key, N, W], []). 41 | 42 | op(N, W, Op) -> 43 | op(N, W, Op, Op). 44 | 45 | op(N, W, Op, Key) -> 46 | ReqID = reqid(), 47 | udon_op_fsm_sup:start_write_fsm([ReqID, self(), Op, Key, N, W]), 48 | {ok, ReqID}. 49 | 50 | %%%=================================================================== 51 | %%% States 52 | %%%=================================================================== 53 | 54 | %% @doc Initialize the state data. 55 | init([ReqID, From, Op, Key, N, W]) -> 56 | SD = #state{req_id=ReqID, from=From, n=N, w=W, op=Op, key=Key, accum=[]}, 57 | {ok, prepare, SD, 0}. 58 | 59 | %% @doc Prepare the write by calculating the _preference list_. 60 | prepare(timeout, SD0=#state{n=N, key=Key}) -> 61 | DocIdx = riak_core_util:chash_key(Key), 62 | Preflist = riak_core_apl:get_apl(DocIdx, N, udon), 63 | SD = SD0#state{preflist=Preflist}, 64 | {next_state, execute, SD, 0}. 65 | 66 | %% @doc Execute the write request and then go into waiting state to 67 | %% verify it has meets consistency requirements. 68 | execute(timeout, SD0=#state{req_id=ReqID, op=Op, preflist=Preflist}) -> 69 | Command = {ReqID, Op}, 70 | riak_core_vnode_master:command(Preflist, Command, {fsm, undefined, self()}, 71 | udon_vnode_master), 72 | {next_state, waiting, SD0}. 73 | 74 | %% @doc Wait for W write reqs to respond. 75 | waiting({ReqID, Resp}, SD0=#state{from=From, num_w=NumW0, w=W, accum=Accum}) -> 76 | NumW = NumW0 + 1, 77 | NewAccum = [Resp|Accum], 78 | SD = SD0#state{num_w=NumW, accum=NewAccum}, 79 | if 80 | NumW =:= W -> 81 | From ! {ReqID, NewAccum}, 82 | {stop, normal, SD}; 83 | true -> {next_state, waiting, SD} 84 | end. 85 | 86 | handle_info(Info, _StateName, StateData) -> 87 | lager:warning("got unexpected info ~p", [Info]), 88 | {stop,badmsg,StateData}. 89 | 90 | handle_event(Event, _StateName, StateData) -> 91 | lager:warning("got unexpected event ~p", [Event]), 92 | {stop,badmsg,StateData}. 93 | 94 | handle_sync_event(Event, _From, _StateName, StateData) -> 95 | lager:warning("got unexpected sync event ~p", [Event]), 96 | {stop,badmsg,StateData}. 97 | 98 | code_change(_OldVsn, StateName, State, _Extra) -> {ok, StateName, State}. 99 | 100 | terminate(_Reason, _SN, _SD) -> 101 | ok. 102 | 103 | %% Private API 104 | 105 | reqid() -> erlang:phash2(erlang:monotonic_time()). 106 | -------------------------------------------------------------------------------- /apps/udon/src/udon_op_fsm_sup.erl: -------------------------------------------------------------------------------- 1 | %% @doc Supervise the udon_op FSM. 2 | -module(udon_op_fsm_sup). 3 | -behavior(supervisor). 4 | 5 | -export([start_write_fsm/1, 6 | start_link/0]). 7 | -export([init/1]). 8 | 9 | start_write_fsm(Args) -> 10 | supervisor:start_child(?MODULE, Args). 11 | 12 | start_link() -> 13 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 14 | 15 | init([]) -> 16 | WriteFsm = {undefined, 17 | {udon_op_fsm, start_link, []}, 18 | temporary, 5000, worker, [udon_op_fsm]}, 19 | {ok, {{simple_one_for_one, 10, 10}, [WriteFsm]}}. 20 | 21 | -------------------------------------------------------------------------------- /apps/udon/src/udon_sup.erl: -------------------------------------------------------------------------------- 1 | -module(udon_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | %% API 6 | -export([start_link/0]). 7 | 8 | %% Supervisor callbacks 9 | -export([init/1]). 10 | 11 | %% =================================================================== 12 | %% API functions 13 | %% =================================================================== 14 | 15 | start_link() -> 16 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 17 | 18 | %% =================================================================== 19 | %% Supervisor callbacks 20 | %% =================================================================== 21 | 22 | init(_Args) -> 23 | VMaster = { udon_vnode_master, 24 | {riak_core_vnode_master, start_link, [udon_vnode]}, 25 | permanent, 5000, worker, [riak_core_vnode_master]}, 26 | 27 | OpFSMs = {udon_op_fsm_sup, 28 | {udon_op_fsm_sup, start_link, []}, 29 | permanent, infinity, supervisor, [udon_op_fsm_sup]}, 30 | 31 | { ok, 32 | { {one_for_one, 5, 10}, 33 | [VMaster, OpFSMs]}}. 34 | -------------------------------------------------------------------------------- /apps/udon/src/udon_vnode.erl: -------------------------------------------------------------------------------- 1 | -module(udon_vnode). 2 | -behaviour(riak_core_vnode). 3 | -include("udon.hrl"). 4 | 5 | -include_lib("riak_core/include/riak_core_vnode.hrl"). 6 | 7 | -export([start_vnode/1, 8 | init/1, 9 | terminate/2, 10 | handle_command/3, 11 | is_empty/1, 12 | delete/1, 13 | handle_handoff_command/3, 14 | handoff_starting/2, 15 | handoff_cancelled/1, 16 | handoff_finished/2, 17 | handle_handoff_data/2, 18 | encode_handoff_item/2, 19 | handle_coverage/4, 20 | handle_exit/3]). 21 | 22 | -ignore_xref([ 23 | start_vnode/1 24 | ]). 25 | 26 | -record(state, {partition, basedir="udon_data"}). 27 | 28 | %% API 29 | start_vnode(I) -> 30 | riak_core_vnode_master:get_vnode_pid(I, ?MODULE). 31 | 32 | init([Partition]) -> 33 | St = #state{ partition=Partition }, 34 | Base = make_base_path(St), 35 | %% filelib:ensure_dir/1 you make me so sad. You won't make the parents 36 | %% unless there's a last element which need not exist and will not be 37 | %% created. Crazytown, baby. 38 | case filelib:ensure_dir(Base ++ "/dummy") of 39 | ok -> {ok, St}; 40 | {error, Reason} -> {error, Reason} 41 | end. 42 | 43 | %% Sample command: respond to a ping 44 | handle_command(ping, _Sender, State) -> 45 | {reply, {pong, State#state.partition}, State}; 46 | 47 | handle_command({RequestId, {store, R, Data}}, _Sender, State) -> 48 | MetaPath = make_metadata_path(State, R), 49 | NewVersion = case filelib:is_regular(MetaPath) of 50 | true -> 51 | OldMD = get_metadata(State, R), 52 | OldMD#file.version + 1; 53 | false -> 54 | 1 55 | end, 56 | {MetaResult, DataResult, Loc} = store(State, R#file{version=NewVersion}, Data), 57 | {reply, {RequestId, {MetaResult, DataResult, Loc}}, State}; 58 | 59 | handle_command({fetch, PHash}, _Sender, State) -> 60 | MetaPath = make_metadata_path(State, PHash), 61 | Res = case filelib:is_regular(MetaPath) of 62 | true -> 63 | MD = get_metadata(State, PHash), 64 | get_data(State, MD); 65 | false -> 66 | not_found 67 | end, 68 | {reply, {Res, filename:join([make_base_path(State), make_filename(PHash)])}, State}; 69 | 70 | handle_command(Message, _Sender, State) -> 71 | ?PRINT({unhandled_command, Message}), 72 | {noreply, State}. 73 | 74 | %% The `VisitFun' is riak_core_handoff_sender:visit_item/3 75 | %% visit_item/3 is going to do all of the hard work of taking your serialized 76 | %% data and pushing it over the wire to the remote node. 77 | %% 78 | %% Acc0 here is the internal state of the handoff. visit_item/3 returns an 79 | %% updated handoff state, so you should use that in your own fold over 80 | %% vnode data elements. 81 | %% 82 | %% The goal here is simple: for each vnode, find all objects, then for 83 | %% each object in a vnode, grab its metadata and the file contents, serialize it 84 | %% using the `encode_handoff_item/2' callback and ship it over to the 85 | %% remote node. 86 | %% 87 | %% The remote node is going to receive the serialized data using 88 | %% the `handle_handoff_data/2' function below. 89 | handle_handoff_command(?FOLD_REQ{foldfun=VisitFun, acc0=Acc0}, _Sender, State) -> 90 | AllObjects = get_all_objects(State), 91 | Base = make_base_path(State), 92 | 93 | Do = fun(Object, AccIn) -> 94 | MPath = path_from_object(Base, Object, ".meta"), 95 | ?PRINT(MPath), 96 | Meta = get_metadata(MPath), 97 | ?PRINT(Meta), 98 | %% TODO: Get all file versions 99 | {ok, LatestFile} = get_data(State, Meta), 100 | ?PRINT(LatestFile), 101 | %% This VisitFun expects a {Bucket, Key} pair 102 | %% but we don't have "buckets" in our application 103 | %% So we will just use our KEY macro from udon.hrl 104 | %% and ignore it in the encoding. 105 | AccOut = VisitFun(?KEY(Meta#file.path_md5), {Meta, LatestFile}, AccIn), 106 | ?PRINT(AccOut), 107 | AccOut 108 | end, 109 | Final = lists:foldl(Do, Acc0, AllObjects), 110 | {reply, Final, State}; 111 | 112 | handle_handoff_command(Message, _Sender, State) -> 113 | ?PRINT({unhandled_handoff_command, Message}), 114 | {noreply, State}. 115 | 116 | handoff_starting(_TargetNode, State) -> 117 | {true, State}. 118 | 119 | handoff_cancelled(State) -> 120 | {ok, State}. 121 | 122 | handoff_finished(_TargetNode, State) -> 123 | {ok, State}. 124 | 125 | handle_handoff_data(Data, State) -> 126 | {Meta, Blob} = binary_to_term(Data), 127 | R = case Meta#file.csum =:= erlang:adler32(Blob) of 128 | true -> 129 | Result = store(State, Meta, Blob), 130 | ?PRINT(Result), 131 | ok; 132 | false -> 133 | {error, file_checksum_differs} 134 | end, 135 | {reply, R, State}. 136 | 137 | encode_handoff_item(_Key, Data = {_Meta, _File}) -> 138 | term_to_binary(Data). 139 | 140 | is_empty(State) -> 141 | Result = case list_dir(State) of 142 | [] -> true; 143 | {error, _Reason} -> true; 144 | _ -> false 145 | end, 146 | {Result, State}. 147 | 148 | delete(State) -> 149 | {ok, State}. 150 | 151 | handle_coverage(_Req, _KeySpaces, _Sender, State) -> 152 | {stop, not_implemented, State}. 153 | 154 | handle_exit(_Pid, _Reason, State) -> 155 | {noreply, State}. 156 | 157 | terminate(_Reason, _State) -> 158 | ok. 159 | 160 | %% Private API 161 | 162 | get_all_objects(State) -> 163 | [ strip_meta(F) || F <- filelib:wildcard("*.meta", make_base_path(State)) ]. 164 | 165 | strip_meta(Filename) -> 166 | Index = string:chr(Filename, $.), 167 | string:substr(Filename, 1, Index - 1). 168 | 169 | list_dir(State) -> 170 | case file:list_dir(make_base_path(State)) of 171 | {ok, Files} -> Files; 172 | Other -> Other 173 | end. 174 | 175 | path_from_object(Base, Object, Suffix) -> 176 | File = Object ++ Suffix, 177 | filename:join([Base, File]). 178 | 179 | get_metadata(State = #state{}, #file{ path_md5 = Hash }) -> 180 | get_metadata(State, Hash); 181 | get_metadata(State = #state{}, Hash) when is_binary(Hash) -> 182 | MDPath = make_metadata_path(State, Hash), 183 | get_metadata(MDPath). 184 | 185 | get_metadata(MetaDataPath) -> 186 | {ok, Data} = file:read_file(MetaDataPath), 187 | binary_to_term(Data). 188 | 189 | get_data(State = #state{}, R = #file{ csum = Csum }) -> 190 | {ok, Data} = file:read_file(make_versioned_file_path(State, R)), 191 | case Csum =:= erlang:adler32(Data) of 192 | true -> {ok, Data}; 193 | false -> {error, file_checksum_differs} 194 | end. 195 | 196 | make_metadata_path(State = #state{}, #file{ path_md5 = Hash }) -> 197 | make_metadata_path(State, Hash); 198 | make_metadata_path(State = #state{}, Hash) when is_binary(Hash) -> 199 | filename:join([make_base_path(State), make_metadata_filename(Hash)]). 200 | 201 | make_versioned_file_path(State = #state{}, #file{ path_md5 = Hash, version = V} ) -> 202 | filename:join([make_base_path(State) , make_versioned_filename(Hash, V)]). 203 | 204 | make_base_path(#state{partition = P, basedir = Base}) -> 205 | filename:join([Base, integer_to_list(P)]). 206 | 207 | store(State = #state{}, R = #file{ path_md5 = PHash }, Blob) -> 208 | Base = make_base_path(State), 209 | Res0 = store_meta_file(make_metadata_path(State, R), R), 210 | Res1 = store_file(make_versioned_file_path(State, R), Blob), 211 | {Res0, Res1, filename:join([Base, make_filename(PHash)])}. 212 | 213 | make_metadata_filename(Hash) when is_binary(Hash) -> 214 | make_filename(Hash) ++ ".meta". 215 | 216 | make_filename(Hash) when is_binary(Hash) -> 217 | hexstring(Hash). 218 | 219 | make_versioned_filename(Hash, Version) when is_integer(Version) andalso is_binary(Hash) -> 220 | make_filename(Hash) ++ "." ++ integer_to_list(Version). 221 | 222 | store_meta_file(Loc, Rec) -> 223 | Bin = term_to_binary(Rec), 224 | store_file(Loc, Bin). 225 | 226 | store_file(Loc, Data) -> 227 | file:write_file(Loc, Data). 228 | 229 | hexstring(<>) -> 230 | lists:flatten(io_lib:format("~32.16.0b", [X])). 231 | -------------------------------------------------------------------------------- /config/admin_bin: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/ksh is. 4 | if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then 5 | POSIX_SHELL="true" 6 | export POSIX_SHELL 7 | # To support 'whoami' add /usr/ucb to path 8 | PATH=/usr/ucb:$PATH 9 | export PATH 10 | exec /usr/bin/ksh $0 "$@" 11 | fi 12 | unset POSIX_SHELL # clear it so if we invoke other scripts, they run as ksh as well 13 | 14 | 15 | SCRIPT=$(readlink $0 || true) 16 | if [ -z $SCRIPT ]; then 17 | SCRIPT=$0 18 | fi; 19 | 20 | 21 | SCRIPT_DIR="$(cd `dirname "$SCRIPT"` && pwd -P)" 22 | RELEASE_ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd -P)" 23 | REL_NAME="{{ release_name }}" 24 | CUTTLEFISH_CONF="{{ release_name }}.conf" 25 | REL_VSN="{{ rel_vsn }}" 26 | ERTS_VSN="{{ erts_vsn }}" 27 | CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}" 28 | REL_DIR="$RELEASE_ROOT_DIR/releases/$REL_VSN" 29 | ERL_OPTS="{{ erl_opts }}" 30 | RUNNER_LOG_DIR="${RUNNER_LOG_DIR:-$RELEASE_ROOT_DIR/log}" 31 | RUNNER_BASE_DIR=$RELEASE_ROOT_DIR 32 | RUNNER_ETC_DIR="${RUNNER_ETC_DIR:-$RELEASE_ROOT_DIR/etc}" 33 | 34 | find_erts_dir() { 35 | __erts_dir="$RELEASE_ROOT_DIR/erts-$ERTS_VSN" 36 | if [ -d "$__erts_dir" ]; then 37 | ERTS_DIR="$__erts_dir"; 38 | ROOTDIR="$RELEASE_ROOT_DIR" 39 | else 40 | __erl="$(which erl)" 41 | code="io:format(\"~s\", [code:root_dir()]), halt()." 42 | __erl_root="$("$__erl" -noshell -eval "$code")" 43 | ERTS_DIR="$__erl_root/erts-$ERTS_VSN" 44 | ROOTDIR="$__erl_root" 45 | fi 46 | } 47 | 48 | # Get node pid 49 | relx_get_pid() { 50 | if output="$(relx_nodetool rpcterms os getpid)" 51 | then 52 | echo "$output" | sed -e 's/"//g' 53 | return 0 54 | else 55 | echo "$output" 56 | return 1 57 | fi 58 | } 59 | 60 | relx_get_longname() { 61 | id="longname$(relx_gen_id)-${NAME}" 62 | "$BINDIR/erl" -boot start_clean -eval 'io:format("~s~n", [node()]), halt()' -noshell -name $id | sed -e 's/.*@//g' 63 | } 64 | 65 | # Connect to a remote node 66 | relx_rem_sh() { 67 | # Generate a unique id used to allow multiple remsh to the same node 68 | # transparently 69 | id="remsh$(relx_gen_id)-${NAME}" 70 | 71 | # Get the node's ticktime so that we use the same thing. 72 | TICKTIME="$(relx_nodetool rpcterms net_kernel get_net_ticktime)" 73 | 74 | # Setup remote shell command to control node 75 | exec "$BINDIR/erl" "$NAME_TYPE" "$id" -remsh "$NAME" -boot start_clean \ 76 | -boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \ 77 | -setcookie "$COOKIE" -hidden -kernel net_ticktime $TICKTIME 78 | } 79 | 80 | # Generate a random id 81 | relx_gen_id() { 82 | od -X -N 4 /dev/urandom | head -n1 | awk '{print $2}' 83 | } 84 | 85 | # Control a node 86 | relx_nodetool() { 87 | command="$1"; shift 88 | 89 | "$ERTS_DIR/bin/escript" "$ROOTDIR/bin/nodetool" "$NAME_TYPE" "$NAME" \ 90 | -setcookie "$COOKIE" "$command" $@ 91 | } 92 | 93 | # Run an escript in the node's environment 94 | relx_escript() { 95 | shift; scriptpath="$1"; shift 96 | export RELEASE_ROOT_DIR 97 | 98 | "$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" $@ 99 | } 100 | 101 | # Output a start command for the last argument of run_erl 102 | relx_start_command() { 103 | printf "exec \"%s\" \"%s\"" "$RELEASE_ROOT_DIR/bin/$REL_NAME" \ 104 | "$START_OPTION" 105 | } 106 | 107 | # Make sure log directory exists 108 | mkdir -p "$RUNNER_LOG_DIR" 109 | 110 | # Use $CWD/sys.config if exists, otherwise releases/VSN/sys.config 111 | if [ -z "$NAME_ARG" ]; then 112 | NODENAME=`egrep '^[ \t]*nodename[ \t]*=[ \t]*' $RUNNER_ETC_DIR/$CUTTLEFISH_CONF 2> /dev/null | tail -n 1 | cut -d = -f 2` 113 | if [ -z "$NODENAME" ]; then 114 | echo "vm.args needs to have a -name parameter." 115 | echo " -sname is not supported." 116 | exit 1 117 | else 118 | NAME_TYPE="-name" 119 | NAME="${NODENAME# *}" 120 | fi 121 | fi 122 | 123 | PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" 124 | 125 | # Extract the target cookie 126 | #COOKIE_ARG=`grep -e '-setcookie' $RUNNER_ETC_DIR/vm.args` 127 | if [ -z "$COOKIE_ARG" ]; then 128 | COOKIE=`egrep '^[ \t]*distributed_cookie[ \t]*=[ \t]*' $RUNNER_ETC_DIR/$CUTTLEFISH_CONF 2> /dev/null | cut -d = -f 2 | tr -d ' '` 129 | if [ -z "$COOKIE" ]; then 130 | echo "vm.args needs to have a -setcookie parameter." 131 | exit 1 132 | else 133 | COOKIE_ARG="-setcookie $COOKIE" 134 | fi 135 | fi 136 | 137 | find_erts_dir 138 | export ROOTDIR="$RELEASE_ROOT_DIR" 139 | export BINDIR="$ERTS_DIR/bin" 140 | export EMU="beam" 141 | export PROGNAME="erl" 142 | export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" 143 | ERTS_LIB_DIR="$ERTS_DIR/../lib" 144 | CUTTLEFISHCMD="$ERTS_DIR/bin/escript $RUNNER_BASE_DIR/bin/cuttlefish" 145 | 146 | cd "$ROOTDIR" 147 | 148 | if CUTTLEFISH_CONFIG=$($CUTTLEFISHCMD -e $RUNNER_ETC_DIR -d $RUNNER_BASE_DIR/generated.conf -s $RUNNER_BASE_DIR/share/schema/ -c $RUNNER_ETC_DIR/$CUTTLEFISH_CONF) 149 | then 150 | CONFIG_FILES="$CUTTLEFISH_CONFIG" 151 | else 152 | echo "Cuttlefish failed! Oh no!" 153 | exit 1 154 | fi 155 | 156 | 157 | # Parse out release and erts info 158 | START_ERL=`cat $RUNNER_BASE_DIR/releases/start_erl.data` 159 | ERTS_VSN=${START_ERL% *} 160 | APP_VSN=${START_ERL#* } 161 | 162 | # TODO: look in the release otherwise use which 163 | ESCRIPT=escript 164 | NODETOOL_PATH=$RUNNER_BASE_DIR/bin 165 | NODETOOL=$NODETOOL_PATH/nodetool 166 | # Setup command to control the node 167 | NODETOOL="$ESCRIPT $NODETOOL $NAME_ARG $COOKIE_ARG" 168 | 169 | ensure_node_running() 170 | { 171 | # Make sure the local node IS running 172 | if ! relx_nodetool "ping"; then 173 | echo "Node is not running!" 174 | exit 1 175 | fi 176 | } 177 | 178 | cluster_admin() 179 | { 180 | case "$1" in 181 | join) 182 | if [ $# -ne 2 ]; then 183 | echo "Usage: $SCRIPT cluster join " 184 | exit 1 185 | fi 186 | ensure_node_running 187 | relx_nodetool rpc {{ release_name }}_console staged_join "$2" 188 | ;; 189 | leave) 190 | if [ $# -eq 1 ]; then 191 | ensure_node_running 192 | relx_nodetool rpc riak_core_console stage_leave 193 | elif [ $# -eq 2 ]; then 194 | ensure_node_running 195 | relx_nodetool rpc riak_core_console stage_leave "$2" 196 | else 197 | echo "Usage: $SCRIPT cluster leave []" 198 | exit 1 199 | fi 200 | ;; 201 | force-remove) 202 | if [ $# -ne 2 ]; then 203 | echo "Usage: $SCRIPT cluster force-remove " 204 | exit 1 205 | fi 206 | ensure_node_running 207 | relx_nodetool rpc riak_core_console stage_remove "$2" 208 | ;; 209 | replace) 210 | if [ $# -ne 3 ]; then 211 | echo "Usage: $SCRIPT cluster replace " 212 | exit 1 213 | fi 214 | ensure_node_running 215 | relx_nodetool rpc riak_core_console stage_replace "$2" "$3" 216 | ;; 217 | force-replace) 218 | if [ $# -ne 3 ]; then 219 | echo "Usage: $SCRIPT cluster force-replace " 220 | exit 1 221 | fi 222 | ensure_node_running 223 | relx_nodetool rpc riak_core_console stage_force_replace "$2" "$3" 224 | ;; 225 | plan) 226 | ensure_node_running 227 | relx_nodetool rpc riak_core_console print_staged 228 | ;; 229 | commit) 230 | ensure_node_running 231 | relx_nodetool rpc riak_core_console commit_staged 232 | ;; 233 | clear) 234 | ensure_node_running 235 | relx_nodetool rpc riak_core_console clear_staged 236 | ;; 237 | *) 238 | echo "\ 239 | Usage: $SCRIPT cluster 240 | 241 | The following commands stage changes to cluster membership. These commands 242 | do not take effect immediately. After staging a set of changes, the staged 243 | plan must be committed to take effect: 244 | 245 | join Join node to the cluster containing 246 | leave Have this node leave the cluster and shutdown 247 | leave Have leave the cluster and shutdown 248 | 249 | force-remove Remove from the cluster without 250 | first handing off data. Designed for 251 | crashed, unrecoverable nodes 252 | 253 | replace Have transfer all data to , 254 | and then leave the cluster and shutdown 255 | 256 | force-replace Reassign all partitions owned by to 257 | without first handing off data, and 258 | remove from the cluster. 259 | 260 | Staging commands: 261 | plan Display the staged changes to the cluster 262 | commit Commit the staged changes 263 | clear Clear the staged changes 264 | " 265 | esac 266 | } 267 | 268 | # Check the first argument for instructions 269 | case "$1" in 270 | down) 271 | if [ $# -ne 2 ]; then 272 | echo "Usage: $SCRIPT down " 273 | exit 1 274 | fi 275 | 276 | ensure_node_running 277 | relx_nodetool rpc {{ release_name }}_console down $@ 278 | ;; 279 | 280 | ringready) 281 | if [ $# -ne 1 ]; then 282 | echo "Usage: $SCRIPT ringready" 283 | exit 1 284 | fi 285 | 286 | ensure_node_running 287 | relx_nodetool rpc {{ release_name }}_console ringready '' 288 | ;; 289 | 290 | member[_-]status) 291 | if [ $# -ne 1 ]; then 292 | echo "Usage: $SCRIPT $1" 293 | exit 1 294 | fi 295 | 296 | ensure_node_running 297 | relx_nodetool rpc riak_core_console member_status '' 298 | ;; 299 | 300 | ring[_-]status) 301 | if [ $# -ne 1 ]; then 302 | echo "Usage: $SCRIPT $1" 303 | exit 1 304 | fi 305 | 306 | ensure_node_running 307 | relx_nodetool rpc riak_core_console ring_status '' 308 | ;; 309 | 310 | services) 311 | relx_nodetool rpcterms riak_core_node_watcher services '' 312 | ;; 313 | 314 | wait[_-]for[_-]service) 315 | SVC=$2 316 | TARGETNODE=$3 317 | if [ $# -lt 3 ]; then 318 | echo "Usage: $SCRIPT $1 " 319 | exit 1 320 | fi 321 | 322 | while (true); do 323 | # Make sure riak_core_node_watcher is up and running locally before trying to query it 324 | # to avoid ugly (but harmless) error messages 325 | NODEWATCHER=`$NODETOOL rpcterms erlang whereis "'riak_core_node_watcher'."` 326 | if [ "$NODEWATCHER" = "undefined" ]; then 327 | echo "$SVC is not up: node watcher is not running" 328 | continue 329 | fi 330 | 331 | # Get the list of services that are available on the requested node 332 | SERVICES=`$NODETOOL rpcterms riak_core_node_watcher services "'${TARGETNODE}'."` 333 | echo "$SERVICES" | grep "[[,]$SVC[],]" > /dev/null 2>&1 334 | if [ "X$?" = "X0" ]; then 335 | echo "$SVC is up" 336 | exit 0 337 | else 338 | echo "$SVC is not up: $SERVICES" 339 | fi 340 | sleep 3 341 | done 342 | ;; 343 | cluster) 344 | shift 345 | cluster_admin "$@" 346 | ;; 347 | *) 348 | echo "Usage: $SCRIPT { cluster | down | ringready | member-status | " 349 | echo " ring-status | services | wait-for-service " 350 | exit 1 351 | ;; 352 | esac 353 | -------------------------------------------------------------------------------- /config/advanced.config: -------------------------------------------------------------------------------- 1 | [ 2 | { udon, []}, 3 | {riak_core, [ 4 | {schema_dirs, ["share/schema"]} 5 | %% udon valid permissions to grant 6 | % {permissions, [{ udon, [put, get, list, grant, delete]}]} 7 | ]}, 8 | %% SASL config 9 | {sasl, [ 10 | {sasl_error_logger, {file, "log/sasl-error.log"}}, 11 | {errlog_type, error}, 12 | {error_logger_mf_dir, "log/sasl"}, % Log directory 13 | {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size 14 | {error_logger_mf_maxfiles, 5} % 5 files max 15 | ] 16 | }, 17 | 18 | %% Lager config 19 | %% see https://github.com/basho/lager#configuration 20 | %% see https://github.com/basho/lager/blob/master/src/lager.app.src 21 | {lager, [ 22 | {handlers, [ 23 | {lager_console_backend, info}, 24 | {lager_file_backend, [{file, "error.log"}, {level, error}, 25 | {size, 10485760}, {date, "$D0"}, {count, 5}]}, 26 | {lager_file_backend, [{file, "console.log"}, {level, info}, 27 | {size, 10485760}, {date, "$D0"}, {count, 5}]} 28 | ]} 29 | ]} 30 | ]. 31 | -------------------------------------------------------------------------------- /config/sys.config: -------------------------------------------------------------------------------- 1 | [ 2 | { udon, []} 3 | ]. 4 | -------------------------------------------------------------------------------- /config/vars.config: -------------------------------------------------------------------------------- 1 | {cuttlefish_conf, "udon.conf"}. 2 | {rel_name, "udon"}. 3 | {node, "udon@127.0.0.1"}. 4 | 5 | {web_ip, "127.0.0.1"}. 6 | {web_port, 8098}. 7 | {handoff_port, 8099}. 8 | {handoff_ip, "0.0.0.0"}. 9 | {sasl_error_log, "./log/sasl-error.log"}. 10 | {sasl_log_dir, "./log/sasl"}. 11 | 12 | {platform_bin_dir, "./bin"}. 13 | {platform_data_dir, "../udon_data"}. 14 | {platform_etc_dir, "../udon_config"}. 15 | {platform_lib_dir, "./lib"}. 16 | {platform_log_dir, "./log"}. 17 | 18 | {crash_dump, "erl_crash.dump"}. 19 | -------------------------------------------------------------------------------- /config/vars_dev1.config: -------------------------------------------------------------------------------- 1 | {node, "udon1@127.0.0.1"}. 2 | 3 | {web_port, 8198}. 4 | {handoff_port, 8199}. 5 | -------------------------------------------------------------------------------- /config/vars_dev2.config: -------------------------------------------------------------------------------- 1 | {node, "udon2@127.0.0.1"}. 2 | 3 | {web_port, 8298}. 4 | {handoff_port, 8299}. 5 | -------------------------------------------------------------------------------- /config/vars_dev3.config: -------------------------------------------------------------------------------- 1 | {node, "udon3@127.0.0.1"}. 2 | 3 | {web_port, 8398}. 4 | {handoff_port, 8399}. 5 | -------------------------------------------------------------------------------- /config/vm.args: -------------------------------------------------------------------------------- 1 | -name udon@127.0.0.1 2 | 3 | -setcookie udon_cookie 4 | 5 | +K true 6 | +A30 7 | -------------------------------------------------------------------------------- /priv/01-udon.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | %% ex: ft=erlang ts=4 sw=4 et 3 | 4 | %% @doc Enable/Disable HTTP API 5 | {mapping, "http.enabled", "udon.http_enabled", [ 6 | {datatype, {flag, yes, no}}, 7 | {default, yes} 8 | ]}. 9 | 10 | %% @doc port to listen to for HTTP API 11 | {mapping, "http.port", "udon.http_port", [ 12 | {datatype, integer}, 13 | {default, 8080} 14 | ]}. 15 | 16 | %% @doc number of acceptors to user for HTTP API 17 | {mapping, "http.acceptors", "udon.http_acceptors", [ 18 | {datatype, integer}, 19 | {default, 100} 20 | ]}. 21 | 22 | %% @doc Enable/Disable HTTPS API 23 | {mapping, "https.enabled", "udon.https_enabled", [ 24 | {datatype, {flag, yes, no}}, 25 | {default, no} 26 | ]}. 27 | 28 | %% @doc port to listen to for HTTPS API 29 | {mapping, "https.port", "udon.https_port", [ 30 | {datatype, integer}, 31 | {default, 8443} 32 | ]}. 33 | 34 | %% @doc number of acceptors to use for HTTPS API 35 | {mapping, "https.acceptors", "udon.https_acceptors", [ 36 | {datatype, integer}, 37 | {default, 100} 38 | ]}. 39 | 40 | %% @doc Enable/Disable HTTP CORS API 41 | {mapping, "http.cors.enabled", "udon.cors_enabled", [ 42 | {datatype, {flag, yes, no}}, 43 | {default, no} 44 | ]}. 45 | 46 | %% @doc HTTP CORS API allowed origins, it can be a comma separated list of 47 | %% origins to accept or * to accept all 48 | {mapping, "http.cors.origins", "udon.cors_origins", [ 49 | {default, "*"} 50 | ]}. 51 | 52 | {translation, "udon.cors_origins", 53 | fun(Conf) -> 54 | Setting = cuttlefish:conf_get("http.cors.origins", Conf), 55 | case Setting of 56 | "*" -> any; 57 | CSVs -> 58 | Tokens = string:tokens(CSVs, ","), 59 | Cleanup = fun (Token) -> 60 | CleanToken = string:strip(Token), 61 | list_to_binary(CleanToken) 62 | end, 63 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 64 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 65 | end 66 | end}. 67 | 68 | %% @doc HTTP CORS API a comma separated list of allowed headers to accept 69 | {mapping, "http.cors.headers", "udon.cors_headers", []}. 70 | 71 | {translation, "udon.cors_headers", 72 | fun(Conf) -> 73 | CSVs = cuttlefish:conf_get("http.cors.headers", Conf), 74 | Tokens = string:tokens(CSVs, ","), 75 | Cleanup = fun (Token) -> 76 | CleanToken = string:strip(Token), 77 | list_to_binary(CleanToken) 78 | end, 79 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 80 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 81 | end}. 82 | 83 | %% @doc HTTP CORS API indicates how long the results of a preflight request can 84 | %% be cached 85 | {mapping, "http.cors.maxage", "udon.cors_max_age_secs", [ 86 | {datatype, {duration, s}}, 87 | {default, "60s"} 88 | ]}. 89 | 90 | %% @doc secret used to encrypt the session token, IMPORTANT: change this 91 | {mapping, "auth.secret", "udon.auth_secret", [ 92 | {default, "changeme"} 93 | ]}. 94 | 95 | {translation, "udon.auth_secret", 96 | fun(Conf) -> 97 | Setting = cuttlefish:conf_get("auth.secret", Conf), 98 | list_to_binary(Setting) 99 | end}. 100 | 101 | %% @doc time a session is valid after login 102 | {mapping, "auth.session.duration", "udon.session_duration_secs", [ 103 | {datatype, {duration, s}}, 104 | {default, "24h"} 105 | ]}. 106 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {erl_opts, [debug_info, {parse_transform, lager_transform}]}. 2 | 3 | {deps, [ 4 | {riak_core, "3.0.9", {pkg, riak_core_ng}}, 5 | {cowboy, "2.1.0"}, 6 | jsone, 7 | pbkdf2, 8 | exometer_core 9 | ]}. 10 | 11 | {relx, [{release, {udon, "0.1.0"}, 12 | [udon, 13 | cowboy, 14 | jsone, 15 | cuttlefish, 16 | sasl]}, 17 | 18 | {dev_mode, true}, 19 | {include_erts, false}, 20 | 21 | {sys_config, "./config/sys.config"}, 22 | {vm_args, "./config/vm.args"}, 23 | 24 | {overlay_vars, "./config/vars.config"}, 25 | {overlay, [ 26 | {mkdir, "etc"}, 27 | {mkdir, "bin"}, 28 | {mkdir, "data/ring"}, 29 | {mkdir, "log/sasl"}, 30 | {template, "./config/admin_bin", "bin/udon-admin"}, 31 | {template, "./config/advanced.config", "etc/advanced.config"} 32 | ]} 33 | ]}. 34 | 35 | {plugins, [rebar3_run]}. 36 | {project_plugins, [rebar3_cuttlefish]}. 37 | 38 | {profiles, [ 39 | {prod, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 40 | {dev1, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev1.config"]}]}]}, 41 | {dev2, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev2.config"]}]}]}, 42 | {dev3, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev3.config"]}]}]} 43 | ]}. 44 | 45 | {overrides, 46 | [{override, eleveldb, 47 | [ 48 | {artifacts, ["priv/eleveldb.so"]}, 49 | {pre_hooks, [{compile, "c_src/build_deps.sh get-deps"}, 50 | {compile, "c_src/build_deps.sh"}]}, 51 | 52 | {post_hooks, [{clean, "c_src/build_deps.sh clean"}]}, 53 | 54 | {plugins, [pc]}, 55 | 56 | {provider_hooks, [{post, 57 | [{compile, {pc, compile}}, 58 | {clean, {pc, clean}} 59 | ] 60 | }] 61 | } 62 | ] 63 | }, 64 | {override, riak_ensemble, 65 | [ 66 | {artifacts, ["priv/riak_ensemble_drv.so"]}, 67 | {plugins, [pc]}, 68 | {provider_hooks, [{post, 69 | [{compile, {pc, compile}}, 70 | {clean, {pc, clean}} 71 | ]}]}, 72 | {erl_opts, [debug_info, 73 | warn_untyped_record, 74 | {parse_transform, lager_transform}]} 75 | ]}, 76 | {override, riak_core, 77 | [ 78 | {erl_opts, [debug_info, 79 | {parse_transform, lager_transform}, 80 | {platform_define, "^[0-9]+", namespaced_types}, 81 | {platform_define, "^R15", "old_hash"}]} 82 | ]}, 83 | {override, poolboy, 84 | [{erl_opts, [debug_info, {platform_define, "^[0-9]+", namespaced_types}]}]}, 85 | {override, cuttlefish, 86 | [{escript_emu_args, "%%! -escript main cuttlefish_escript +S 1 +A 0\n"}]} 87 | ]}. 88 | -------------------------------------------------------------------------------- /rebar.lock: -------------------------------------------------------------------------------- 1 | {"1.1.0", 2 | [{<<"basho_stats">>,{pkg,<<"basho_stats">>,<<"1.0.3">>},1}, 3 | {<<"bear">>,{pkg,<<"bear">>,<<"0.8.5">>},2}, 4 | {<<"blume">>,{pkg,<<"blume">>,<<"0.1.1">>},1}, 5 | {<<"chash">>,{pkg,<<"chash">>,<<"0.1.2">>},1}, 6 | {<<"clique">>,{pkg,<<"clique">>,<<"0.3.10">>},1}, 7 | {<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.1.0">>},0}, 8 | {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.0.1">>},1}, 9 | {<<"cuttlefish">>,{pkg,<<"cuttlefish">>,<<"2.0.12">>},1}, 10 | {<<"eleveldb">>,{pkg,<<"eleveldb">>,<<"2.2.20">>},1}, 11 | {<<"exometer_core">>,{pkg,<<"exometer_core">>,<<"1.5.2">>},0}, 12 | {<<"folsom">>,{pkg,<<"folsom">>,<<"0.8.5">>},1}, 13 | {<<"getopt">>,{pkg,<<"getopt">>,<<"0.8.2">>},2}, 14 | {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},1}, 15 | {<<"hut">>,{pkg,<<"hut">>,<<"1.2.0">>},1}, 16 | {<<"jam">>,{pkg,<<"jam">>,<<"1.0.0">>},1}, 17 | {<<"jsone">>,{pkg,<<"jsone">>,<<"1.4.5">>},0}, 18 | {<<"lager">>,{pkg,<<"lager">>,<<"3.5.2">>},1}, 19 | {<<"parse_trans">>,{pkg,<<"parse_trans">>,<<"3.0.0">>},1}, 20 | {<<"pbkdf2">>,{pkg,<<"pbkdf2">>,<<"2.0.0">>},0}, 21 | {<<"poolboy">>,{pkg,<<"basho_poolboy">>,<<"0.8.2">>},1}, 22 | {<<"ranch">>,{pkg,<<"ranch">>,<<"1.4.0">>},1}, 23 | {<<"riak_core">>,{pkg,<<"riak_core_ng">>,<<"3.0.9">>},0}, 24 | {<<"riak_ensemble">>,{pkg,<<"riak_ensemble_ng">>,<<"2.4.0">>},1}, 25 | {<<"riak_sysmon">>,{pkg,<<"riak_sysmon">>,<<"2.1.5">>},1}, 26 | {<<"setup">>,{pkg,<<"setup">>,<<"1.8.4">>},1}]}. 27 | [ 28 | {pkg_hash,[ 29 | {<<"basho_stats">>, <<"7E1174151509C64FCC1934120ED32295E14F84DAAE7F84926BA2C8D3700D146C">>}, 30 | {<<"bear">>, <<"E95FCA1627CD9E15BAF93CE0A52AFF16917BAF325F0EE65B88CD715376CD2344">>}, 31 | {<<"blume">>, <<"CFB4F43688690BA81C6A79F54E4678CFD5FDEDAB692F277AE740AE4A3897360D">>}, 32 | {<<"chash">>, <<"AF02484F2640C653C4B9A8557A14CA0704989DBEDB27E7CCBC442F1903A3BCA7">>}, 33 | {<<"clique">>, <<"29ECBA7EC61DED866197164D33F61D1CBF892ED85FC933673D7E50D0689D34FB">>}, 34 | {<<"cowboy">>, <<"69F9DB3B23C24AB6B3A169A6357130C16B39CDA1A1F8C582F818883EE552589D">>}, 35 | {<<"cowlib">>, <<"4DFFFB1DB296EAB9F2E8B95EE3017007F674BC920CE30AEB5A53BBDA82FC38C0">>}, 36 | {<<"cuttlefish">>, <<"1441A12BCE207F7FC796A4DA50D47080D21E83E15309AD6496DE27840A54D5FC">>}, 37 | {<<"eleveldb">>, <<"1FFF63A5055BBF4BF821F797EF76065882B193F5E8095F95FCD9287187773B58">>}, 38 | {<<"exometer_core">>, <<"62A99A361BA8A14D53857D4C716A191E810299D2F43C5C981EB7B086C0BFCCE1">>}, 39 | {<<"folsom">>, <<"94A027B56FE84FEED264F9B33CB4C6AC9A801FAD84B87DBDA0836CE83C3B8D69">>}, 40 | {<<"getopt">>, <<"B17556DB683000BA50370B16C0619DF1337E7AF7ECBF7D64FBF8D1D6BCE3109B">>}, 41 | {<<"goldrush">>, <<"F06E5D5F1277DA5C413E84D5A2924174182FB108DABB39D5EC548B27424CD106">>}, 42 | {<<"hut">>, <<"0089DF0FAA2827C605BBADA88153F24FFF5EA7A4BE32ECF0250A7FDC2719CAFB">>}, 43 | {<<"jam">>, <<"ED9B180F2F3A775E6A47AC490954976802F0638C19A393F3E86D4BA4CF890582">>}, 44 | {<<"jsone">>, <<"547AEFEC1C67FE3283F77BAC4DAF85F3F593002764F768FAAA423875A51A2320">>}, 45 | {<<"lager">>, <<"614A8C8F67BF99B69EB264EA22121AD25511C055AAEC09B086773D5108C6767F">>}, 46 | {<<"parse_trans">>, <<"9E96B1C9C3A0DF54E7B76F8F685D38BFA1EB21B31E042B1D1A5A70258E4DB1E3">>}, 47 | {<<"pbkdf2">>, <<"11C23279FDED5C0027AB3996CFAE77805521D7EF4BABDE2BD7EC04A9086CF499">>}, 48 | {<<"poolboy">>, <<"AD5524BF4B8D30EEE0E5386847B74DAD739BB6AE1893F3AF10F54D427ACC96E4">>}, 49 | {<<"ranch">>, <<"10272F95DA79340FA7E8774BA7930B901713D272905D0012B06CA6D994F8826B">>}, 50 | {<<"riak_core">>, <<"F6E27FB67C9CB9A3FB7AEF38EC1BB5633624D7B78E32E73FE7A0045F3514F032">>}, 51 | {<<"riak_ensemble">>, <<"6570463BF09F1200C003DE298C5D6615C418D3BF0A2DF5CAF469A9DF2558059A">>}, 52 | {<<"riak_sysmon">>, <<"2331BBCAEBE73EA7B2449F4C765827142476CC73D7D86999B4209EE005694D6B">>}, 53 | {<<"setup">>, <<"738DB0685DC1741F45C6A9BF78478E0D5877F3D0876C0B50FD02F0210EDB5AA4">>}]} 54 | ]. 55 | --------------------------------------------------------------------------------