├── LICENSE ├── README.rst ├── rebar3_riak_core.app.src.tpl ├── rebar3_riak_core.erl.tpl ├── rebar3_riak_core.erlang_vm.schema ├── rebar3_riak_core.lager.schema ├── rebar3_riak_core.riak_core.schema ├── rebar3_riak_core.template ├── rebar3_riak_core_Makefile.tpl ├── rebar3_riak_core_README.rst.tpl ├── rebar3_riak_core_admin_runner ├── rebar3_riak_core_advanced.config.tpl ├── rebar3_riak_core_app.erl.tpl ├── rebar3_riak_core_config.schema.tpl ├── rebar3_riak_core_console.erl.tpl ├── rebar3_riak_core_editorconfig.tpl ├── rebar3_riak_core_gitignore.tpl ├── rebar3_riak_core_rebar.config.tpl ├── rebar3_riak_core_sup.erl.tpl ├── rebar3_riak_core_vars.config.tpl ├── rebar3_riak_core_vars_dev1.config.tpl ├── rebar3_riak_core_vars_dev2.config.tpl ├── rebar3_riak_core_vars_dev3.config.tpl └── rebar3_riak_core_vnode.erl.tpl /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | rebar3 riak_core template 2 | ========================= 3 | 4 | A `rebar3 `_ template for riak_core applications. 5 | 6 | Setup 7 | ----- 8 | 9 | `Install rebar3 `_ if you haven't already. 10 | 11 | then install this template:: 12 | 13 | mkdir -p ~/.config/rebar3/templates 14 | git clone https://github.com/marianoguerra/rebar3_template_riak_core.git ~/.config/rebar3/templates/rebar3_template_riak_core 15 | 16 | Use 17 | --- 18 | 19 | :: 20 | 21 | mkdir ricor 22 | cd ricor 23 | rebar3 new rebar3_riak_core name=ricor 24 | rebar3 release 25 | rebar3 run 26 | 27 | (ricor@127.0.0.1)1> ricor:ping(). 28 | {pong,981946412581700398168100746981252653831329677312} 29 | 30 | Author 31 | ------ 32 | 33 | Mariano Guerra 34 | 35 | License 36 | ------- 37 | 38 | Apache 2.0 39 | -------------------------------------------------------------------------------- /rebar3_riak_core.app.src.tpl: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | {application, {{ name }}, 3 | [ 4 | {description, "A Riak Core Application"}, 5 | {vsn, "1"}, 6 | {registered, []}, 7 | {applications, [ 8 | kernel, 9 | stdlib, 10 | sasl, 11 | riak_core, 12 | setup 13 | ]}, 14 | {mod, { {{ name }}_app, []}}, 15 | {env, []} 16 | ]}. 17 | -------------------------------------------------------------------------------- /rebar3_riak_core.erl.tpl: -------------------------------------------------------------------------------- 1 | -module({{name}}). 2 | 3 | -export([ 4 | ping/0 5 | ]). 6 | 7 | -ignore_xref([ 8 | ping/0 9 | ]). 10 | 11 | %% Public API 12 | 13 | %% @doc Pings a random vnode to make sure communication is functional 14 | ping() -> 15 | % argument to chash_key has to be a two item tuple, since it comes from riak 16 | % and the full key has a bucket, we use a contant in the bucket position 17 | % and a timestamp as key so we hit different vnodes on each call 18 | DocIdx = riak_core_util:chash_key({<<"ping">>, term_to_binary(os:timestamp())}), 19 | % ask for 1 vnode index to send this request to, change N to get more 20 | % vnodes, for example for replication 21 | N = 1, 22 | PrefList = riak_core_apl:get_primary_apl(DocIdx, N, {{name}}), 23 | [{IndexNode, _Type}] = PrefList, 24 | riak_core_vnode_master:sync_spawn_command(IndexNode, ping, {{name}}_vnode_master). 25 | -------------------------------------------------------------------------------- /rebar3_riak_core.erlang_vm.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | %% @doc Starts the Erlang runtime system with SMP support 4 | %% enabled. This may fail if no runtime system with SMP support is 5 | %% available. The 'auto' setting starts the Erlang runtime system with 6 | %% SMP support enabled if it is available and more than one logical 7 | %% processor are detected. -smp disable starts a runtime system 8 | %% without SMP support. 9 | %% 10 | %% NOTE: The runtime system with SMP support will not be available on 11 | %% all supported platforms. See also the erlang.schedulers settings. 12 | %% 13 | %% NOTE: Some native extensions (NIFs) require use of the SMP 14 | %% emulator. 15 | %% 16 | %% More information at: http://erlang.org/doc/man/erl.html 17 | {mapping, "erlang.smp", "vm_args.-smp", [ 18 | {default, enable}, 19 | {datatype, {enum, [enable, auto, disable]}}, 20 | hidden 21 | ]}. 22 | 23 | %% @doc Sets the mapping of warning messages for error_logger. 24 | %% Messages sent to the error logger using one of the warning 25 | %% routines can be mapped either to errors (default), warnings 26 | %% (w - default), or info reports (i). 27 | {mapping, "erlang.W", "vm_args.+W", [ 28 | {default, "w"}, 29 | hidden 30 | ]}. 31 | 32 | %% @doc Sets the number of scheduler threads to create and scheduler 33 | %% threads to set online when erlang.smp support has been enabled. The 34 | %% maximum for both values is 1024. If the Erlang runtime system is 35 | %% able to determine the amount of logical processors configured and 36 | %% logical processors available, schedulers.total will default to 37 | %% logical processors configured, and schedulers.online will default 38 | %% to logical processors available; otherwise, the default values will 39 | %% be 1. Schedulers may be omitted if schedulers.online is not and 40 | %% vice versa. 41 | %% 42 | %% If schedulers.total or schedulers.online is specified as a negative 43 | %% number, the value is subtracted from the default number of logical 44 | %% processors configured or logical processors available, 45 | %% respectively. 46 | %% 47 | %% Specifying the value 0 for Schedulers or SchedulersOnline resets 48 | %% the number of scheduler threads or scheduler threads online 49 | %% respectively to its default value. 50 | %% 51 | %% This option is ignored if the emulator doesn't have SMP support 52 | %% enabled (see the erlang.smp flag). 53 | %% 54 | %% More information at: http://erlang.org/doc/man/erl.html 55 | %% +S Schedulers:SchedulerOnline 56 | {mapping, "erlang.schedulers.total", "vm_args.+S", [ 57 | {default, undefined}, 58 | {datatype, integer}, 59 | {validators, ["=<1024"]} 60 | ]}. 61 | 62 | %% @see erlang.schedulers.total 63 | {mapping, "erlang.schedulers.online", "vm_args.+S", [ 64 | {default, undefined}, 65 | {datatype, integer}, 66 | {validators, ["=<1024"]} 67 | ]}. 68 | 69 | {translation, "vm_args.+S", 70 | fun(Conf) -> 71 | Total = cuttlefish:conf_get("erlang.schedulers.total", Conf, undefined), 72 | Online = cuttlefish:conf_get("erlang.schedulers.online", Conf, undefined), 73 | case {Total, Online} of 74 | {undefined, undefined} -> cuttlefish:unset(); 75 | {undefined, O} -> ":" ++ integer_to_list(O); 76 | {T, undefined} -> integer_to_list(T); 77 | _ -> integer_to_list(Total) ++ ":" ++ integer_to_list(Online) 78 | end 79 | end 80 | }. 81 | 82 | {validator, "=<1024", "has a maximum value of 1024", 83 | fun(X) -> X =< 1024 end}. 84 | 85 | %% @doc Enables or disables the kernel poll functionality if the 86 | %% emulator supports it. If the emulator does not support kernel poll, 87 | %% and the K flag is passed to the emulator, a warning is issued at 88 | %% startup. 89 | %% 90 | %% Similar information at: http://erlang.org/doc/man/erl.html 91 | {mapping, "erlang.K", "vm_args.+K", [ 92 | {default, on}, 93 | {datatype, flag}, 94 | hidden 95 | ]}. 96 | 97 | %%%% Tunables 98 | %% @doc Name of the Erlang node 99 | {mapping, "nodename", "vm_args.-name", [ 100 | {default, "{{node}}"} 101 | ]}. 102 | 103 | %% @doc Cookie for distributed node communication. All nodes in the 104 | %% same cluster should use the same cookie or they will not be able to 105 | %% communicate. 106 | {mapping, "distributed_cookie", "vm_args.-setcookie", [ 107 | {default, "erlang"} 108 | ]}. 109 | 110 | %% @doc Sets the number of threads in async thread pool, valid range 111 | %% is 0-1024. If thread support is available, the default is 64. 112 | %% 113 | %% More information at: http://erlang.org/doc/man/erl.html 114 | {mapping, "erlang.async_threads", "vm_args.+A", [ 115 | {default, 64}, 116 | {datatype, integer}, 117 | {validators, ["range:0-1024"]} 118 | ]}. 119 | 120 | {validator, "range:0-1024", "must be 0 to 1024", 121 | fun(X) -> X >= 0 andalso X =< 1024 end}. 122 | 123 | %% Note: OTP R15 and earlier uses -env ERL_MAX_PORTS, R16+ uses +Q 124 | %% @doc The number of concurrent ports/sockets 125 | %% Valid range is 1024-134217727 126 | {mapping, "erlang.max_ports", 127 | cuttlefish:otp("R16", "vm_args.+Q", "vm_args.-env ERL_MAX_PORTS"), [ 128 | {default, 65536}, 129 | {datatype, integer}, 130 | {validators, ["range4ports"]} 131 | ]}. 132 | 133 | {validator, "range4ports", "must be 1024 to 134217727", 134 | fun(X) -> X >= 1024 andalso X =< 134217727 end}. 135 | 136 | %% @doc A non-negative integer which indicates how many times 137 | %% generational garbage collections can be done without forcing a 138 | %% fullsweep collection. In low-memory systems (especially without 139 | %% virtual memory), setting the value to 0 can help to conserve 140 | %% memory. 141 | %% 142 | %% More information at: 143 | %% http://www.erlang.org/doc/man/erlang.html#system_flag-2 144 | {mapping, "erlang.fullsweep_after", "vm_args.-env ERL_FULLSWEEP_AFTER", [ 145 | {default, 0}, 146 | {datatype, integer}, 147 | hidden, 148 | {validators, ["positive_integer"]} 149 | ]}. 150 | 151 | {validator, "positive_integer", "must be a positive integer", 152 | fun(X) -> X >= 0 end}. 153 | 154 | %% @doc Set the location of crash dumps 155 | {mapping, "erlang.crash_dump", "vm_args.-env ERL_CRASH_DUMP", [ 156 | {default, "{{crash_dump}}"}, 157 | {datatype, file}, 158 | hidden 159 | ]}. 160 | 161 | %% Note: OTP R15 and earlier uses -env ERL_MAX_ETS_TABLES, 162 | %% R16+ uses +e 163 | %% @doc Raise the ETS table limit 164 | {mapping, "erlang.max_ets_tables", 165 | cuttlefish:otp("R16B01", "vm_args.+e", "vm_args.-env ERL_MAX_ETS_TABLES"), [ 166 | {default, 256000}, 167 | {datatype, integer}, 168 | hidden 169 | ]}. 170 | 171 | %% @doc Raise the default erlang process limit 172 | {mapping, "erlang.process_limit", "vm_args.+P", [ 173 | {datatype, integer}, 174 | {default, 256000}, 175 | hidden 176 | ]}. 177 | 178 | %% @doc For nodes with many busy_dist_port events, Basho recommends 179 | %% raising the sender-side network distribution buffer size. 180 | %% 32MB may not be sufficient for some workloads and is a suggested 181 | %% starting point. Erlangers may know this as +zdbbl. 182 | %% The Erlang/OTP default is 1024 (1 megabyte). 183 | %% See: http://www.erlang.org/doc/man/erl.html#%2bzdbbl 184 | {mapping, "erlang.distribution_buffer_size", "vm_args.+zdbbl", [ 185 | {datatype, bytesize}, 186 | {commented, "32MB"}, 187 | hidden, 188 | {validators, ["zdbbl_range"]} 189 | ]}. 190 | 191 | {translation, "vm_args.+zdbbl", 192 | fun(Conf) -> 193 | ZDBBL = cuttlefish:conf_get("erlang.distribution_buffer_size", Conf, undefined), 194 | case ZDBBL of 195 | undefined -> undefined; 196 | X when is_integer(X) -> cuttlefish_util:ceiling(X / 1024); %% Bytes to Kilobytes; 197 | _ -> undefined 198 | end 199 | end 200 | }. 201 | 202 | {validator, "zdbbl_range", "must be between 1KB and 2097151KB", 203 | fun(ZDBBL) -> 204 | %% 2097151KB = 2147482624 205 | ZDBBL >= 1024 andalso ZDBBL =< 2147482624 206 | end 207 | }. 208 | 209 | %% @doc Set scheduler forced wakeup interval. All run queues will be 210 | %% scanned each Interval milliseconds. While there are sleeping 211 | %% schedulers in the system, one scheduler will be woken for each 212 | %% non-empty run queue found. An Interval of zero disables this 213 | %% feature, which also is the default. 214 | %% 215 | %% This feature is a workaround for lengthy executing native code, and 216 | %% native code that do not bump reductions properly. 217 | %% 218 | %% More information: http://www.erlang.org/doc/man/erl.html#+sfwi 219 | {mapping, "erlang.schedulers.force_wakeup_interval", "vm_args.+sfwi", [ 220 | {commented, 500}, 221 | {datatype, integer}, 222 | hidden 223 | ]}. 224 | 225 | %% @doc For ease of firewall configuration, the Erlang distribution 226 | %% can be bound to a limited range of TCP ports. If this is set, and 227 | %% erlang.distribution.port_range.maximum is *unset*, only this port 228 | %% will be used. If the minimum is *unset*, no restriction will be 229 | %% made on the port range; instead Erlang will listen on a random 230 | %% high-numbered port. 231 | %% 232 | %% More information: http://www.erlang.org/faq/how_do_i.html#id55090 233 | %% http://www.erlang.org/doc/man/kernel_app.html 234 | {mapping, "erlang.distribution.port_range.minimum", "kernel.inet_dist_listen_min", [ 235 | {commented, 6000}, 236 | {datatype, integer}, 237 | hidden 238 | ]}. 239 | 240 | %% @see erlang.distribution.port_range.minimum 241 | {mapping, "erlang.distribution.port_range.maximum", "kernel.inet_dist_listen_max", [ 242 | {commented, 7999}, 243 | {datatype, integer}, 244 | hidden 245 | ]}. 246 | 247 | %% @doc Set the net_kernel's net_ticktime. 248 | %% 249 | %% More information: http://www.erlang.org/doc/man/kernel_app.html#net_ticktime 250 | %% and http://www.erlang.org/doc/man/net_kernel.html#set_net_ticktime-1 251 | {mapping, "erlang.distribution.net_ticktime", "vm_args.-kernel net_ticktime", [ 252 | {commented, 60}, 253 | {datatype, integer}, 254 | hidden 255 | ]}. 256 | 257 | %% @doc Set the memory allocation strategy for binary multiblock carriers. 258 | %% DalmatinerDB has long-lived `metric-io' processes that may cause reference 259 | %% counted binaries to reside in memory for longer periods of time. 260 | %% Setting this value to something other than the default may be 261 | %% useful if the `recon_alloc' library indicates that there may be excessive 262 | %% memory fragmentation. 263 | %% 264 | %% More information: http://erlang.org/doc/man/erts_alloc.html#strategy 265 | %% and https://blog.heroku.com/archives/2013/11/7/logplex-down-the-rabbit-hole 266 | {mapping, "erlang.binary_alloc_strategy", "vm_args.+MBas", [ 267 | {commented, bf}, 268 | {datatype, {enum, [bf, aobf, aoff, aoffcbf, gf, af]}}, 269 | hidden 270 | ]}. 271 | 272 | %% @doc Set the maximum multiblock carrier size (Kb). This controls how many 273 | %% multiblock carriers are created. 274 | %% Smaller values may increase the chance that mbcs are free of all blocks, 275 | %% allowing them to be readily released to the operating system. However, 276 | %% smaller sizes may increase the frequency of slower memory allocation 277 | %% requests from the VM to the OS, which could negate the performance benefit 278 | %% of mbcs. 279 | %% 280 | %% More information: http://erlang.org/doc/man/erts_alloc.html#mseg_mbc_sizes 281 | {mapping, "erlang.binary_alloc_multicarrier_limit", "vm_args.+MBlmbcs", [ 282 | {commented, 512}, 283 | {datatype, integer}, 284 | hidden, 285 | {validators, ["positive_integer"]} 286 | ]}. 287 | -------------------------------------------------------------------------------- /rebar3_riak_core.lager.schema: -------------------------------------------------------------------------------- 1 | %% -*- erlang -*- 2 | %% complex lager example 3 | %% @doc where do you want the console.log output: 4 | %% off : nowhere 5 | %% file: the file specified by log.console.file 6 | %% console : standard out 7 | %% both : log.console.file and standard out. 8 | {mapping, "log.service", "lager.service", 9 | [{default, "{{service}}"}, 10 | {datatype, string}]}. 11 | 12 | {mapping, "log.console", "lager.handlers", 13 | [{default, file}, 14 | {datatype, {enum, [off, file, console, both]}}]}. 15 | 16 | %% @doc the log level of the console log 17 | {mapping, "log.console.level", "lager.handlers", 18 | [{default, info}, 19 | {datatype, {enum, [debug, info, warning, error]}}]}. 20 | 21 | %% @doc location of the console log 22 | {mapping, "log.console.file", "lager.handlers", 23 | [{datatype, file}, 24 | {default, "{{log_path}}/console.log"}]}. 25 | 26 | %% @doc location of the error log 27 | {mapping, "log.error.file", "lager.handlers", 28 | [{datatype, file}, 29 | {default, "{{log_path}}/error.log"}]}. 30 | 31 | %% @doc location of the debug log 32 | {mapping, "log.debug.file", "lager.handlers", 33 | [{datatype, [{atom, off}, file]}, 34 | {default, off}, 35 | {commented, "{{log_path}}/debug.log"}]}. 36 | 37 | %% *gasp* notice the same @mapping! 38 | %% @doc turn on syslog 39 | {mapping, "log.syslog", "lager.handlers", 40 | [{default, off}, 41 | {datatype, {enum, [on, off]}}]}. 42 | 43 | {translation, 44 | "lager.handlers", 45 | fun(Conf) -> 46 | Service = cuttlefish:conf_get("log.service", Conf), 47 | SyslogHandler = 48 | case cuttlefish:conf_get("log.syslog", Conf) of 49 | on -> [{lager_syslog_backend, [Service, daemon, info]}]; 50 | _ -> [] 51 | end, 52 | ErrorHandler = 53 | case cuttlefish:conf_get("log.error.file", Conf) of 54 | undefined -> []; 55 | ErrorFilename -> [{lager_file_backend, [{file, ErrorFilename}, 56 | {level, error}, 57 | {size, 10485760}, 58 | {date, "$D0"}, 59 | {count, 5}]}] 60 | end, 61 | 62 | ConsoleLogLevel = cuttlefish:conf_get("log.console.level", Conf), 63 | ConsoleLogFile = cuttlefish:conf_get("log.console.file", Conf), 64 | 65 | ConsoleHandler = {lager_console_handler, ConsoleLogLevel}, 66 | ConsoleFileHandler = {lager_file_backend, [{file, ConsoleLogFile}, 67 | {level, ConsoleLogLevel}, 68 | {size, 10485760}, 69 | {date, "$D0"}, 70 | {count, 5}]}, 71 | 72 | ConsoleHandlers = case cuttlefish:conf_get("log.console", Conf) of 73 | off -> []; 74 | file -> [ConsoleFileHandler]; 75 | console -> [ConsoleHandler]; 76 | both -> [ConsoleHandler, ConsoleFileHandler]; 77 | _ -> [] 78 | end, 79 | DebugHandler = 80 | case cuttlefish:conf_get("log.debug.file", Conf) of 81 | undefined -> []; 82 | off -> []; 83 | "off" -> []; 84 | DebugFilename -> [{lager_file_backend, [{file, DebugFilename}, 85 | {level, debug}, 86 | {size, 10485760}, 87 | {date, "$D0"}, 88 | {count, 5}]}] 89 | end, 90 | 91 | SyslogHandler ++ ConsoleHandlers ++ ErrorHandler ++ DebugHandler 92 | end 93 | }. 94 | 95 | %% Lager Config 96 | 97 | %% @doc Whether to write a crash log, and where. 98 | %% Commented/omitted/undefined means no crash logger. 99 | {mapping, "log.crash.file", "lager.crash_log", 100 | [{default, "{{log_path}}/crash.log"}]}. 101 | 102 | %% @doc Maximum size in bytes of events in the crash log - defaults to 65536 103 | %% @datatype integer 104 | %% @mapping 105 | {mapping, "log.crash.msg_size", "lager.crash_log_msg_size", 106 | [{default, "64KB"}, 107 | {datatype, bytesize}]}. 108 | 109 | %% @doc Maximum size of the crash log in bytes, before its rotated, set 110 | %% to 0 to disable rotation - default is 0 111 | {mapping, "log.crash.size", "lager.crash_log_size", 112 | [{default, "10MB"}, 113 | {datatype, bytesize}]}. 114 | 115 | %% @doc What time to rotate the crash log - default is no time 116 | %% rotation. See the lager README for a description of this format: 117 | %% https://github.com/basho/lager/blob/master/README.org 118 | {mapping, "log.crash.date", "lager.crash_log_date", 119 | [{default, "$D0"}]}. 120 | 121 | %% @doc Number of rotated crash logs to keep, 0 means keep only the 122 | %% current one - default is 0 123 | {mapping, "log.crash.count", "lager.crash_log_count", 124 | [{default, 5}, 125 | {datatype, integer}]}. 126 | 127 | %% @doc Whether to redirect error_logger messages into lager - defaults to true 128 | {mapping, "log.error.redirect", "lager.error_logger_redirect", 129 | [{default, on}, 130 | {datatype, {enum, [on, off]}}]}. 131 | 132 | {translation, 133 | "lager.error_logger_redirect", 134 | fun(Conf) -> 135 | Setting = cuttlefish:conf_get("log.error.redirect", Conf), 136 | case Setting of 137 | on -> true; 138 | off -> false; 139 | _Default -> true 140 | end 141 | end}. 142 | 143 | %% @doc maximum number of error_logger messages to handle in a second 144 | %% lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup 145 | {mapping, "log.error.messages_per_second", "lager.error_logger_hwm", 146 | [{default, 100}, 147 | {datatype, integer}]}. 148 | 149 | %% SASL 150 | %% We should never care about this 151 | {mapping, "sasl", "sasl.sasl_error_logger", 152 | [{default, off}, 153 | {datatype, {enum, [on, off]}}, 154 | {level, advanced}]}. 155 | 156 | {translation, 157 | "sasl.sasl_error_logger", 158 | fun(Conf) -> 159 | case cuttlefish:conf_get("sasl", Conf) of %%how to pull default? 160 | on -> true; 161 | _ -> false 162 | end 163 | end 164 | }. 165 | -------------------------------------------------------------------------------- /rebar3_riak_core.riak_core.schema: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | 3 | %% @doc enable active anti-entropy subsystem 4 | {mapping, "anti_entropy", "riak_core.anti_entropy", [ 5 | {datatype, {enum, [on, off, debug]}}, 6 | {default, on} 7 | ]}. 8 | 9 | { translation, 10 | "riak_core.anti_entropy", 11 | fun(Conf) -> 12 | Setting = cuttlefish:conf_get("anti_entropy", Conf), 13 | case Setting of 14 | on -> {on, []}; 15 | debug -> {on, [debug]}; 16 | off -> {off, []}; 17 | _Default -> {on, []} 18 | end 19 | end 20 | }. 21 | 22 | %% @doc Restrict how fast AAE can build hash trees. Building the tree 23 | %% for a given partition requires a full scan over that partition's 24 | %% data. Once built, trees stay built until they are expired. 25 | %% Config is of the form: 26 | %% {num-builds, per-timespan} 27 | %% Default is 1 build per hour. 28 | {mapping, "anti_entropy.build_limit.number", "riak_core.anti_entropy_build_limit", [ 29 | {default, 1}, 30 | {datatype, integer} 31 | ]}. 32 | 33 | {mapping, "anti_entropy.build_limit.per_timespan", "riak_core.anti_entropy_build_limit", [ 34 | {default, "1h"}, 35 | {datatype, {duration, ms}} 36 | ]}. 37 | 38 | {translation, 39 | "riak_core.anti_entropy_build_limit", 40 | fun(Conf) -> 41 | {cuttlefish:conf_get("anti_entropy.build_limit.number", Conf), 42 | cuttlefish:conf_get("anti_entropy.build_limit.per_timespan", Conf)} 43 | end}. 44 | 45 | %% @doc Determine how often hash trees are expired after being built. 46 | %% Periodically expiring a hash tree ensures the on-disk hash tree 47 | %% data stays consistent with the actual k/v backend data. It also 48 | %% helps Riak identify silent disk failures and bit rot. However, 49 | %% expiration is not needed for normal AAE operation and should be 50 | %% infrequent for performance reasons. The time is specified in 51 | %% milliseconds. The default is 1 week. 52 | {mapping, "anti_entropy.expire", "riak_core.anti_entropy_expire", [ 53 | {default, "1w"}, 54 | {datatype, {duration, ms}} 55 | ]}. 56 | 57 | %% @doc Limit how many AAE exchanges/builds can happen concurrently. 58 | {mapping, "anti_entropy.concurrency", "riak_core.anti_entropy_concurrency", [ 59 | {default, 2}, 60 | {datatype, integer} 61 | ]}. 62 | 63 | %% @doc The tick determines how often the AAE manager looks for work 64 | %% to do (building/expiring trees, triggering exchanges, etc). 65 | %% The default is every 15 seconds. Lowering this value will 66 | %% speedup the rate that all replicas are synced across the cluster. 67 | %% Increasing the value is not recommended. 68 | {mapping, "anti_entropy.tick", "riak_core.anti_entropy_tick", [ 69 | {default, "15s"}, 70 | {datatype, {duration, ms}} 71 | ]}. 72 | 73 | %% @doc The directory where AAE hash trees are stored. 74 | {mapping, "anti_entropy.data_dir", "riak_core.anti_entropy_data_dir", [ 75 | {default, "{{platform_data_dir}}/anti_entropy"} 76 | ]}. 77 | 78 | 79 | %% @doc This parameter defines the percentage, 1 to 100, of total 80 | %% server memory to assign to leveldb. leveldb will dynamically 81 | %% adjust it internal cache sizs as Riak activates / inactivates 82 | %% vnodes on this server to stay within this size. The memory size 83 | %% can alternately be assigned as a byte count via total_leveldb_mem instead. 84 | {mapping, "anti_entropy.total_leveldb_mem_percent", "riak_core.aae_total_leveldb_mem_percent", 85 | [{default, "80"}, 86 | {datatype, integer}]}. 87 | 88 | 89 | %% @doc This parameter defines the number of bytes of 90 | %% server memory to assign to leveldb. leveldb will dynamically 91 | %% adjust it internal cache sizes as Riak activates / inactivates 92 | %% vnodes on this server to stay within this size. The memory size 93 | %% can alternately be assigned as percentage of total server memory 94 | %% via total_leveldb_mem_percent instead. 95 | {mapping, "anti_entropy.total_leveldb_mem", "riak_core.aae_total_leveldb_mem", 96 | [{datatype, bytesize}, 97 | {level, advanced}]}. 98 | 99 | 100 | %% @doc The 'sync' parameter defines how new key/value data is placed in the 101 | %% recovery log. The recovery log is only used if the Riak program crashes or 102 | %% the server loses power unexpectedly. The parameter's original intent was 103 | %% to guarantee that each new key / value was written to the physical disk 104 | %% before leveldb responded with “write good”. The reality in modern servers 105 | %% is that many layers of data caching exist between the database program and 106 | %% the physical disks. This flag influences only one of the layers. 107 | {mapping, "anti_entropy.sync", "riak_core.aae_sync", 108 | [{default, false}, 109 | {datatype, {enum, [true, false]}}, 110 | {level, advanced}]}. 111 | 112 | %% @doc limited_developer_mem is a Riak specific option that is used when 113 | %% a developer is testing a high number of vnodes and/or several VMs 114 | %% on a machine with limited physical memory. Do NOT use this option 115 | %% if making performance measurements. This option overwrites values 116 | %% given to write_buffer_size_min and write_buffer_size_max. 117 | {mapping, "anti_entropy.limited_developer_mem", "riak_core.aae_limited_developer_mem", 118 | [{default, false}, 119 | {datatype, {enum, [true, false]}}, 120 | {level, advanced}]}. 121 | 122 | 123 | %% @doc Each vnode first stores new key/value data in a memory based write 124 | %% buffer. This write buffer is in parallel to the recovery log mentioned 125 | %% in the “sync” parameter. Riak creates each vnode with a randomly sized 126 | %% write buffer for performance reasons. The random size is somewhere 127 | %% between write_buffer_size_min and write_buffer_size_max. 128 | {mapping, "anti_entropy.write_buffer_size_min", "riak_core.aae_write_buffer_size_min", 129 | [{default, "30MB"}, 130 | {datatype, bytesize}, 131 | {level, advanced}]}. 132 | 133 | {mapping, "anti_entropy.write_buffer_size_max", "riak_core.aae_write_buffer_size_max", 134 | [{default, "60MB"}, 135 | {datatype, bytesize}, 136 | {level, advanced}]}. 137 | 138 | %% @doc Whether the distributed throttle for active anti-entropy is 139 | %% enabled. 140 | {mapping, "anti_entropy.throttle", "riak_core.aae_throttle_kill_switch", [ 141 | {default, on}, 142 | {datatype, {flag, off, on}}, 143 | hidden 144 | ]}. 145 | 146 | %% @doc Sets the throttling tiers for active anti-entropy. Each tier 147 | %% is a minimum vnode mailbox size and a time-delay that the throttle 148 | %% should observe at that size and above. For example: 149 | %% 150 | %% anti_entropy.throttle.tier1.mailbox_size = 0 151 | %% anti_entropy.throttle.tier1.delay = 0ms 152 | %% anti_entropy.throttle.tier2.mailbox_size = 40 153 | %% anti_entropy.throttle.tier2.delay = 5ms 154 | %% 155 | %% If configured, there must be a tier which includes a mailbox size 156 | %% of 0. Both .mailbox_size and .delay must be set for each tier. 157 | %% @see anti_entropy.throttle 158 | {mapping, 159 | "anti_entropy.throttle.$tier.mailbox_size", 160 | "riak_core.aae_throttle_limits", [ 161 | {datatype, integer}, 162 | hidden, 163 | {validators, ["non_negative"]} 164 | ]}. 165 | 166 | %% @see anti_entropy.throttle.$tier.mailbox_size 167 | {mapping, 168 | "anti_entropy.throttle.$tier.delay", 169 | "riak_core.aae_throttle_limits", [ 170 | {datatype, {duration, ms}}, 171 | hidden 172 | ]}. 173 | 174 | {validator, 175 | "non_negative", 176 | "must be greater than or equal to 0", 177 | fun(Value) -> Value >= 0 end}. 178 | 179 | {translation, 180 | "riak_core.aae_throttle_limits", 181 | fun(Conf) -> 182 | %% Grab all of the possible names of tiers so we can ensure that 183 | %% both mailbox_size and delay are included for each tier. 184 | TierNamesM = cuttlefish_variable:fuzzy_matches(["anti_entropy", "throttle", "$tier", "mailbox_size"], Conf), 185 | TierNamesD = cuttlefish_variable:fuzzy_matches(["anti_entropy", "throttle", "$tier", "delay"], Conf), 186 | TierNames = lists:usort(TierNamesM ++ TierNamesD), 187 | Throttles = lists:sort(lists:foldl( 188 | fun({"$tier", Tier}, Settings) -> 189 | Mbox = cuttlefish:conf_get(["anti_entropy", "throttle", Tier, "mailbox_size"], Conf), 190 | Delay = cuttlefish:conf_get(["anti_entropy", "throttle", Tier, "delay"], Conf), 191 | [{Mbox - 1, Delay}|Settings] 192 | end, [], TierNames)), 193 | case Throttles of 194 | %% -1 is a magic "minimum" bound and must be included, so if it 195 | %% isn't present we call it invalid 196 | [{-1,_}|_] -> Throttles; 197 | _ -> cuttlefish:invalid("anti_entropy.throttle tiers must include a tier with mailbox_size 0") 198 | end 199 | end 200 | }. 201 | 202 | 203 | %% @doc Each database .sst table file can include an optional "bloom filter" 204 | %% that is highly effective in shortcutting data queries that are destined 205 | %% to not find the requested key. The bloom_filter typically increases the 206 | %% size of an .sst table file by about 2%. This option must be set to true 207 | %% in the riak.conf to take effect. 208 | {mapping, "anti_entropy.bloomfilter", "riak_core.aae_use_bloomfilter", 209 | [{default, on}, 210 | {datatype, {enum, [on, off]}}]}. 211 | 212 | {translation, 213 | "riak_core.aae_use_bloomfilter", 214 | fun(Conf) -> 215 | case cuttlefish:conf_get("anti_entropy.bloomfilter", Conf) of 216 | on -> true; 217 | off -> false; 218 | _ -> true 219 | end 220 | end 221 | }. 222 | 223 | 224 | %% @doc sst_block_size defines the size threshold for a block / chunk of data 225 | %% within one .sst table file. Each new block gets an index entry in the .sst 226 | %% table file's master index. 227 | {mapping, "anti_entropy.block_size", "riak_core.aae_sst_block_size", 228 | [{default, "4KB"}, 229 | {datatype, bytesize}, 230 | {level, advanced}]}. 231 | 232 | 233 | %% @doc block_restart_interval defines the key count threshold for a new key 234 | %% entry in the key index for a block. 235 | %% Most clients should leave this parameter alone. 236 | {mapping, "anti_entropy.block_restart_interval", "riak_core.aae_block_restart_interval", 237 | [{default, 16}, 238 | {datatype, integer}, 239 | {level, advanced}]}. 240 | 241 | 242 | %% @doc verify_checksums controls whether or not validation occurs when Riak 243 | %% requests data from the leveldb database on behalf of the user. 244 | {mapping, "anti_entropy.verify_checksums", "riak_core.aae_verify_checksums", 245 | [{default, true}, 246 | {datatype, {enum, [true, false]}}, 247 | {level, advanced}]}. 248 | 249 | 250 | %% @doc verify_compaction controls whether or not validation occurs when 251 | %% leveldb reads data as part of its background compaction operations. 252 | {mapping, "anti_entropy.verify_compaction", "riak_core.aae_verify_compaction", 253 | [{default, true}, 254 | {datatype, {enum, [true, false]}}, 255 | {level, advanced}]}. 256 | 257 | %% @doc The number of worker threads performing LevelDB operations. 258 | {mapping, "anti_entropy.threads", "riak_core.aae_eleveldb_threads", 259 | [{default, 71}, 260 | {datatype, integer}, 261 | {level, advanced}]}. 262 | 263 | %% @doc Option to override LevelDB's use of fadvise(DONTNEED) with 264 | %% fadvise(WILLNEED) instead. WILLNEED can reduce disk activity on 265 | %% systems where physical memory exceeds the database size. 266 | {mapping, "anti_entropy.fadvise_willneed", "riak_core.aae_fadvise_willneed", 267 | [{default, false}, 268 | {datatype, {enum, [true, false]}}, 269 | {level, advanced}]}. 270 | 271 | %% Default Bucket Properties 272 | 273 | %% @doc The number of replicas stored. Note: See Replication 274 | %% Properties for further discussion. 275 | %% http://docs.basho.com/riak/latest/dev/advanced/cap-controls/ 276 | {mapping, "buckets.default.n_val", "riak_core.default_bucket_props.n_val", [ 277 | {datatype, integer}, 278 | {default, 3}, 279 | hidden 280 | ]}. 281 | 282 | %% @doc Number of partitions in the cluster (only valid when first 283 | %% creating the cluster). Must be a power of 2, minimum 8 and maximum 284 | %% 1024. 285 | {mapping, "ring_size", "riak_core.ring_creation_size", [ 286 | {datatype, integer}, 287 | {default, 64}, 288 | {validators, ["ring_size^2", "ring_size_max", "ring_size_min"]}, 289 | {commented, 64} 290 | ]}. 291 | 292 | %% ring_size validators 293 | {validator, "ring_size_max", 294 | "2048 and larger are supported, but considered advanced config", 295 | fun(Size) -> 296 | Size =< 1024 297 | end}. 298 | 299 | {mapping, "buckets.default.pr", "riak_core.default_bucket_props.pr", [ 300 | {default, "0"}, 301 | {level, advanced} 302 | ]}. 303 | 304 | %% Cut and paste translation screams to be rewritten as a datatype, but that's a 305 | %% "nice to have" 306 | {translation, 307 | "riak_core.default_bucket_props.pr", 308 | fun(Conf) -> 309 | Setting = cuttlefish:conf_get("buckets.default.pr", Conf), 310 | case Setting of 311 | "quorum" -> quorum; 312 | "all" -> all; 313 | X -> 314 | try list_to_integer(Setting) of 315 | Int -> Int 316 | catch 317 | E:R -> error 318 | end 319 | end 320 | end 321 | }. 322 | 323 | {mapping, "buckets.default.r", "riak_core.default_bucket_props.r", [ 324 | {default, "quorum"}, 325 | {level, advanced} 326 | ]}. 327 | {translation, 328 | "riak_core.default_bucket_props.r", 329 | fun(Conf) -> 330 | Setting = cuttlefish:conf_get("buckets.default.r", Conf), 331 | case Setting of 332 | "quorum" -> quorum; 333 | "all" -> all; 334 | X -> 335 | try list_to_integer(Setting) of 336 | Int -> Int 337 | catch 338 | E:R -> error 339 | end 340 | end 341 | end 342 | }. 343 | 344 | {mapping, "buckets.default.w", "riak_core.default_bucket_props.w", [ 345 | {default, "quorum"}, 346 | {level, advanced} 347 | ]}. 348 | {translation, 349 | "riak_core.default_bucket_props.w", 350 | fun(Conf) -> 351 | Setting = cuttlefish:conf_get("buckets.default.w", Conf), 352 | case Setting of 353 | "quorum" -> quorum; 354 | "all" -> all; 355 | X -> 356 | try list_to_integer(Setting) of 357 | Int -> Int 358 | catch 359 | E:R -> error 360 | end 361 | end 362 | end 363 | }. 364 | 365 | {mapping, "buckets.default.pw", "riak_core.default_bucket_props.pw", [ 366 | {default, "0"}, 367 | {level, advanced} 368 | ]}. 369 | {translation, 370 | "riak_core.default_bucket_props.pw", 371 | fun(Conf) -> 372 | Setting = cuttlefish:conf_get("buckets.default.pw", Conf), 373 | case Setting of 374 | "quorum" -> quorum; 375 | "all" -> all; 376 | X -> 377 | try list_to_integer(Setting) of 378 | Int -> Int 379 | catch 380 | E:R -> error 381 | end 382 | end 383 | end 384 | }. 385 | 386 | {mapping, "buckets.default.dw", "riak_core.default_bucket_props.dw", [ 387 | {default, "quorum"}, 388 | {level, advanced} 389 | ]}. 390 | {translation, 391 | "riak_core.default_bucket_props.dw", 392 | fun(Conf) -> 393 | Setting = cuttlefish:conf_get("buckets.default.dw", Conf), 394 | case Setting of 395 | "quorum" -> quorum; 396 | "all" -> all; 397 | X -> 398 | try list_to_integer(Setting) of 399 | Int -> Int 400 | catch 401 | E:R -> error 402 | end 403 | end 404 | end 405 | }. 406 | 407 | {mapping, "buckets.default.rw", "riak_core.default_bucket_props.rw", [ 408 | {default, "quorum"}, 409 | {level, advanced} 410 | ]}. 411 | {translation, 412 | "riak_core.default_bucket_props.rw", 413 | fun(Conf) -> 414 | Setting = cuttlefish:conf_get("buckets.default.rw", Conf), 415 | case Setting of 416 | "quorum" -> quorum; 417 | "all" -> all; 418 | X -> 419 | try list_to_integer(Setting) of 420 | Int -> Int 421 | catch 422 | E:R -> error 423 | end 424 | end 425 | end 426 | }. 427 | 428 | %% {mapping, "buckets.default.basic_quorum", "riak_core.default_bucket_props.basic_quorum", false}, 429 | %% {mapping, "buckets.default.notfound_ok", "riak_core.default_bucket_props.notfound_ok", true} 430 | 431 | %% @doc whether or not siblings are allowed. 432 | %% Note: See Vector Clocks for a discussion of sibling resolution. 433 | {mapping, "buckets.default.siblings", "riak_core.default_bucket_props.allow_mult", [ 434 | {datatype, {enum, [on, off]}}, 435 | {default, on}, 436 | {level, advanced} 437 | ]}. 438 | 439 | {translation, 440 | "riak_core.default_bucket_props.allow_mult", 441 | fun(Conf) -> 442 | Setting = cuttlefish:conf_get("buckets.default.siblings", Conf), 443 | case Setting of 444 | on -> true; 445 | off -> false; 446 | _Default -> true 447 | end 448 | end}. 449 | 450 | {validator, "ring_size^2", "not a power of 2", 451 | fun(Size) -> 452 | (Size band (Size-1) =:= 0) 453 | end}. 454 | 455 | {validator, "ring_size_min", "must be at least 8", 456 | fun(Size) -> 457 | Size >= 8 458 | end}. 459 | 460 | %% @doc Number of concurrent node-to-node transfers allowed. 461 | {mapping, "transfer_limit", "riak_core.handoff_concurrency", [ 462 | {datatype, integer}, 463 | {default, 2}, 464 | {commented, 2} 465 | ]}. 466 | 467 | %% @doc Default location of ringstate 468 | {mapping, "ring.state_dir", "riak_core.ring_state_dir", [ 469 | {datatype, directory}, 470 | {default, "$(platform_data_dir)/ring"}, 471 | hidden 472 | ]}. 473 | 474 | %% @doc Default cert location for https can be overridden 475 | %% with the ssl config variable, for example: 476 | {mapping, "ssl.certfile", "riak_core.ssl.certfile", [ 477 | {datatype, file}, 478 | {commented, "$(platform_etc_dir)/cert.pem"} 479 | ]}. 480 | 481 | %% @doc Default key location for https can be overridden with the ssl 482 | %% config variable, for example: 483 | {mapping, "ssl.keyfile", "riak_core.ssl.keyfile", [ 484 | {datatype, file}, 485 | {commented, "$(platform_etc_dir)/key.pem"} 486 | ]}. 487 | 488 | %% @doc Default signing authority location for https can be overridden 489 | %% with the ssl config variable, for example: 490 | {mapping, "ssl.cacertfile", "riak_core.ssl.cacertfile", [ 491 | {datatype, file}, 492 | {commented, "$(platform_etc_dir)/cacertfile.pem"} 493 | ]}. 494 | 495 | %% @doc handoff.ip is the network address that Riak binds to for 496 | %% intra-cluster data handoff. 497 | {mapping, "handoff.ip", "riak_core.handoff_ip", [ 498 | {default, "{{handoff_ip}}" }, 499 | {datatype, string}, 500 | % XXX: Commented not_localhost to allow local development 501 | % uncomment for production 502 | % {validators, ["valid_ipaddr", "not_localhost"]}, 503 | {validators, ["valid_ipaddr"]}, 504 | hidden 505 | ]}. 506 | 507 | {validator, 508 | "valid_ipaddr", 509 | "must be a valid IP address", 510 | fun(AddrString) -> 511 | case inet_parse:address(AddrString) of 512 | {ok, _} -> true; 513 | {error, _} -> false 514 | end 515 | end}. 516 | 517 | {validator, 518 | "not_localhost", 519 | "can't be a local ip", 520 | fun(AddrString) -> 521 | case inet_parse:address(AddrString) of 522 | {ok, {127, 0, _, _}} -> false; 523 | {ok, _} -> true; 524 | {error, _} -> false 525 | end 526 | end}. 527 | 528 | %% @doc handoff.port is the TCP port that Riak uses for 529 | %% intra-cluster data handoff. 530 | {mapping, "handoff.port", "riak_core.handoff_port", [ 531 | {default, {{handoff_port}} }, 532 | {datatype, integer}, 533 | hidden 534 | ]}. 535 | 536 | %% @doc To encrypt riak_core intra-cluster data handoff traffic, 537 | %% uncomment the following line and edit its path to an appropriate 538 | %% certfile and keyfile. (This example uses a single file with both 539 | %% items concatenated together.) 540 | {mapping, "handoff.ssl.certfile", "riak_core.handoff_ssl_options.certfile", [ 541 | %% {commented, "/tmp/erlserver.pem"}, 542 | {datatype, file}, 543 | hidden 544 | ]}. 545 | 546 | %% @doc if you need a seperate keyfile for handoff 547 | {mapping, "handoff.ssl.keyfile", "riak_core.handoff_ssl_options.keyfile", [ 548 | {datatype, file}, 549 | hidden 550 | ]}. 551 | 552 | %% @doc Enables/disables outbound handoff transfers for this node. If you 553 | %% turn this setting off at runtime with riak-admin, it will kill any 554 | %% outbound handoffs currently running. 555 | {mapping, "handoff.outbound", "riak_core.disable_outbound_handoff", [ 556 | {default, on}, 557 | {datatype, {flag, off, on}}, 558 | hidden 559 | ]}. 560 | 561 | %% @doc Enables/disables inbound handoff transfers for this node. If you 562 | %% turn this setting off at runtime with riak-admin, it will kill any 563 | %% inbound handoffs currently running. 564 | {mapping, "handoff.inbound", "riak_core.disable_inbound_handoff", [ 565 | {default, on}, 566 | {datatype, {flag, off, on}}, 567 | hidden 568 | ]}. 569 | 570 | %% @doc The time a vnode has to be idle for a handoff to occour. (I think) 571 | {mapping, "handoff.inactivity_timeout", "riak_core.vnode_inactivity_timeout", [ 572 | {default, "1m"}, 573 | {datatype, {duration, ms}} 574 | ]}. 575 | 576 | %% @doc DTrace support Do not enable 'dtrace' unless your Erlang/OTP 577 | %% runtime is compiled to support DTrace. DTrace is available in 578 | %% R15B01 (supported by the Erlang/OTP official source package) and in 579 | %% R14B04 via a custom source repository & branch. 580 | {mapping, "dtrace", "riak_core.dtrace_support", [ 581 | {default, off}, 582 | {datatype, flag} 583 | ]}. 584 | 585 | %% consistent on/off (in lieu of enabled/disabled, true/false) 586 | { translation, 587 | "riak_core.dtrace_support", 588 | fun(Conf) -> 589 | Setting = cuttlefish:conf_get("dtrace", Conf), 590 | case Setting of 591 | on -> true; 592 | off -> false; 593 | _Default -> false 594 | end 595 | end 596 | }. 597 | 598 | %% @doc Platform-specific installation paths (substituted by rebar) 599 | {mapping, "platform_bin_dir", "riak_core.platform_bin_dir", [ 600 | {datatype, directory}, 601 | {default, "{{platform_bin_dir}}"} 602 | ]}. 603 | 604 | %% @see platform_bin_dir 605 | {mapping, "platform_data_dir", "riak_core.platform_data_dir", [ 606 | {datatype, directory}, 607 | {default, "{{platform_data_dir}}"} 608 | ]}. 609 | 610 | %% @see platform_bin_dir 611 | {mapping, "platform_etc_dir", "riak_core.platform_etc_dir", [ 612 | {datatype, directory}, 613 | {default, "{{platform_etc_dir}}"} 614 | ]}. 615 | 616 | %% @see platform_bin_dir 617 | {mapping, "platform_lib_dir", "riak_core.platform_lib_dir", [ 618 | {datatype, directory}, 619 | {default, "{{platform_lib_dir}}"} 620 | ]}. 621 | 622 | %% @see platform_bin_dir 623 | {mapping, "platform_log_dir", "riak_core.platform_log_dir", [ 624 | {datatype, directory}, 625 | {default, "{{platform_log_dir}}"} 626 | ]}. 627 | 628 | %% @doc Enable consensus subsystem. Set to 'on' to enable the 629 | %% consensus subsystem used for strongly consistent Riak operations. 630 | {mapping, "strong_consistency", "riak_core.enable_consensus", [ 631 | {datatype, flag}, 632 | {default, off}, 633 | {commented, on} 634 | ]}. 635 | 636 | %% @doc Whether to enable the background manager globally. When 637 | %% enabled, participating Riak subsystems will coordinate access to 638 | %% shared resources. This will help to prevent system response 639 | %% degradation under times of heavy load from multiple background 640 | %% tasks. Specific subsystems may also have their own controls over 641 | %% use of the background manager. 642 | {mapping, "background_manager", "riak_core.use_background_manager", [ 643 | {datatype, flag}, 644 | {default, off}, 645 | hidden 646 | ]}. 647 | 648 | %% @doc Interval of time between vnode management 649 | %% activities. Modifying this will change the amount of time between 650 | %% attemps to trigger handoff between this node and any other member 651 | %% of the cluster. 652 | {mapping, "vnode_management_timer", "riak_core.vnode_management_timer", [ 653 | {default, "10s"}, 654 | {datatype, {duration, ms}}, 655 | hidden 656 | ]}. 657 | 658 | %% @doc Home directory for the run user 659 | {mapping, "run_user_home", "setup.home", 660 | [{default, "{{run_user_home}}"}, 661 | hidden, 662 | {datatype, string}]}. 663 | 664 | %% Async Job Management 665 | %% 666 | %% This is a translation for mappings that appear in other schema files. 667 | %% Mappings are from "cluster.job.$namespace.$operation"* to 668 | %% "riak_core.job_accept_class" with required attributes 669 | %% [merge, {datatype, {flag, enabled, disabled}}].** 670 | %% * Mappings are only performed on elements with exactly the number of 671 | %% segments shown - any other number of elements, even with a matching 672 | %% prefix, is ignored. 673 | %% ** The 'datatype' should be 'flag', and 'enabled'/'disabled' are our 674 | %% conventions, but any OnFlag/OffFlag pair can be used as long as they map 675 | %% to boolean values. 676 | %% Other attributes, such as 'hidden' or {default, X} are fine, since they 677 | %% don't make it down the stack to here. 678 | %% Job classes that should be enabled by default MUST have a {default, enabled} 679 | %% attribute, as the runtime filter only defaults to accept when no values have 680 | %% been set from ANY schema file. 681 | %% 682 | %% Example: 683 | %% {mapping, "cluster.job.harry.fold", "riak_core.job_accept_class", [ 684 | %% merge, 685 | %% {datatype, {flag, enabled, disabled}}, 686 | %% {default, enabled} 687 | %% ]}. 688 | %% {mapping, "cluster.job.alice.list", "riak_core.job_accept_class", [ 689 | %% merge, 690 | %% {datatype, {flag, enabled, disabled}}, 691 | %% {default, disabled} 692 | %% ]}. 693 | %% Results in: 694 | %% {riak_core, [ 695 | %% ... 696 | %% {job_accept_class, [{harry, fold}]} 697 | %% ... 698 | %% ]}. 699 | %% 700 | {translation, 701 | "riak_core.job_accept_class", 702 | fun(Conf) -> 703 | Fold = 704 | fun({[_, _, Mod, Op], true}, Result) -> 705 | [{erlang:list_to_atom(Mod), erlang:list_to_atom(Op)} | Result]; 706 | ({[_, _, _, _], false}, Result) -> 707 | Result; 708 | ({[_, _, _, _], _} = Setting, _) -> 709 | cuttlefish:invalid(io_lib:format("~p", [Setting])); 710 | (_, Result) -> 711 | Result 712 | end, 713 | lists:sort(lists:foldl(Fold, [], 714 | cuttlefish_variable:filter_by_prefix(["cluster", "job"], Conf))) 715 | end}. 716 | 717 | 718 | %% @doc Some requests to the vnodes are handled by an asyncronous worker pool. 719 | %% This parameter allows for tuning this pools behaviour when it comes dealing 720 | %% with requests that are queued. 721 | %% The default (fifo) will serve requests in the order they arrive at the worker 722 | %% pool. The alternative is to serve the requests in the reverse order, dealing 723 | %% with the most recent request first. 724 | %% There are pro's and con's for both aproaches, it is best to test out what 725 | %% works best for the desired characteristics. 726 | %% 727 | %% As a very rought rule of thumb: 728 | %% - fifo will lead to lower extremes 729 | %% - filo will lead to lower medians/mediums 730 | {mapping, "worker.queue_strategy", "riak_core.queue_worker_strategy", 731 | [{default, fifo}, 732 | {datatype, {enum, [fifo, filo]}}]}. 733 | -------------------------------------------------------------------------------- /rebar3_riak_core.template: -------------------------------------------------------------------------------- 1 | {description, "Rebar3 Riak Core Application"}. 2 | {variables, [ 3 | {name, "myrcore", "Name of the application"}, 4 | {desc, "A Riak Core Application", "Short description of the app"}, 5 | {default_config_http_port, "{{web_port}}"} 6 | ]}. 7 | 8 | {template, "rebar3_riak_core.app.src.tpl", "{{name}}/apps/{{name}}/src/{{name}}.app.src"}. 9 | {template, "rebar3_riak_core.erl.tpl", "{{name}}/apps/{{name}}/src/{{name}}.erl"}. 10 | {template, "rebar3_riak_core_app.erl.tpl", "{{name}}/apps/{{name}}/src/{{name}}_app.erl"}. 11 | {template, "rebar3_riak_core_sup.erl.tpl", "{{name}}/apps/{{name}}/src/{{name}}_sup.erl"}. 12 | {template, "rebar3_riak_core_console.erl.tpl", "{{name}}/apps/{{name}}/src/{{name}}_console.erl"}. 13 | {template, "rebar3_riak_core_vnode.erl.tpl", "{{name}}/apps/{{name}}/src/{{name}}_vnode.erl"}. 14 | {template, "rebar3_riak_core_rebar.config.tpl", "{{name}}/rebar.config"}. 15 | {template, "rebar3_riak_core_editorconfig.tpl", "{{name}}/.editorconfig"}. 16 | {template, "rebar3_riak_core_gitignore.tpl", "{{name}}/.gitignore"}. 17 | {template, "rebar3_riak_core_README.rst.tpl", "{{name}}/README.rst"}. 18 | {template, "rebar3_riak_core_Makefile.tpl", "{{name}}/Makefile"}. 19 | 20 | {file, "rebar3_riak_core_admin_runner", "{{name}}/config/admin_bin"}. 21 | {template, "rebar3_riak_core_config.schema.tpl", "{{name}}/priv/01-{{name}}.schema"}. 22 | {template, "rebar3_riak_core_advanced.config.tpl", "{{name}}/config/advanced.config"}. 23 | {file, "rebar3_riak_core.lager.schema", "{{name}}/config/lager.schema"}. 24 | {file, "rebar3_riak_core.riak_core.schema", "{{name}}/config/riak_core.schema"}. 25 | {file, "rebar3_riak_core.erlang_vm.schema", "{{name}}/config/erlang_vm.schema"}. 26 | {template, "rebar3_riak_core_vars.config.tpl", "{{name}}/config/vars.config"}. 27 | {template, "rebar3_riak_core_vars_dev1.config.tpl", "{{name}}/config/vars_dev1.config"}. 28 | {template, "rebar3_riak_core_vars_dev2.config.tpl", "{{name}}/config/vars_dev2.config"}. 29 | {template, "rebar3_riak_core_vars_dev3.config.tpl", "{{name}}/config/vars_dev3.config"}. 30 | 31 | {chmod, "{{name}}/config/admin_bin", 8#744}. 32 | -------------------------------------------------------------------------------- /rebar3_riak_core_Makefile.tpl: -------------------------------------------------------------------------------- 1 | BASEDIR = $(shell pwd) 2 | REBAR = rebar3 3 | RELPATH = _build/default/rel/{{ name }} 4 | PRODRELPATH = _build/prod/rel/{{ name }} 5 | DEV1RELPATH = _build/dev1/rel/{{ name }} 6 | DEV2RELPATH = _build/dev2/rel/{{ name }} 7 | DEV3RELPATH = _build/dev3/rel/{{ name }} 8 | APPNAME = {{ name }} 9 | SHELL = /bin/bash 10 | 11 | release: 12 | $(REBAR) release 13 | mkdir -p $(RELPATH)/../{{ name }}_config 14 | [ -f $(RELPATH)/../{{ name }}_config/{{ name }}.conf ] || cp $(RELPATH)/etc/{{ name }}.conf $(RELPATH)/../{{ name }}_config/{{ name }}.conf 15 | [ -f $(RELPATH)/../{{ name }}_config/advanced.config ] || cp $(RELPATH)/etc/advanced.config $(RELPATH)/../{{ name }}_config/advanced.config 16 | 17 | console: 18 | cd $(RELPATH) && ./bin/{{ name }} console 19 | 20 | prod-release: 21 | $(REBAR) as prod release 22 | mkdir -p $(PRODRELPATH)/../{{ name }}_config 23 | [ -f $(PRODRELPATH)/../{{ name }}_config/{{ name }}.conf ] || cp $(PRODRELPATH)/etc/{{ name }}.conf $(PRODRELPATH)/../{{ name }}_config/{{ name }}.conf 24 | [ -f $(PRODRELPATH)/../{{ name }}_config/advanced.config ] || cp $(PRODRELPATH)/etc/advanced.config $(PRODRELPATH)/../{{ name }}_config/advanced.config 25 | 26 | prod-console: 27 | cd $(PRODRELPATH) && ./bin/{{ name }} console 28 | 29 | compile: 30 | $(REBAR) compile 31 | 32 | clean: 33 | $(REBAR) clean 34 | 35 | test: 36 | $(REBAR) ct 37 | 38 | devrel1: 39 | $(REBAR) as dev1 release 40 | mkdir -p $(DEV1RELPATH)/../{{ name }}_config 41 | [ -f $(DEV1RELPATH)/../{{ name }}_config/{{ name }}.conf ] || cp $(DEV1RELPATH)/etc/{{ name }}.conf $(DEV1RELPATH)/../{{ name }}_config/{{ name }}.conf 42 | [ -f $(DEV1RELPATH)/../{{ name }}_config/advanced.config ] || cp $(DEV1RELPATH)/etc/advanced.config $(DEV1RELPATH)/../{{ name }}_config/advanced.config 43 | 44 | devrel2: 45 | $(REBAR) as dev2 release 46 | mkdir -p $(DEV2RELPATH)/../{{ name }}_config 47 | [ -f $(DEV2RELPATH)/../{{ name }}_config/{{ name }}.conf ] || cp $(DEV2RELPATH)/etc/{{ name }}.conf $(DEV2RELPATH)/../{{ name }}_config/{{ name }}.conf 48 | [ -f $(DEV2RELPATH)/../{{ name }}_config/advanced.config ] || cp $(DEV2RELPATH)/etc/advanced.config $(DEV2RELPATH)/../{{ name }}_config/advanced.config 49 | 50 | devrel3: 51 | $(REBAR) as dev3 release 52 | mkdir -p $(DEV3RELPATH)/../{{ name }}_config 53 | [ -f $(DEV3RELPATH)/../{{ name }}_config/{{ name }}.conf ] || cp $(DEV3RELPATH)/etc/{{ name }}.conf $(DEV3RELPATH)/../{{ name }}_config/{{ name }}.conf 54 | [ -f $(DEV3RELPATH)/../{{ name }}_config/advanced.config ] || cp $(DEV3RELPATH)/etc/advanced.config $(DEV3RELPATH)/../{{ name }}_config/advanced.config 55 | 56 | devrel: devrel1 devrel2 devrel3 57 | 58 | dev1-attach: 59 | $(BASEDIR)/_build/dev1/rel/{{ name }}/bin/$(APPNAME) attach 60 | 61 | dev2-attach: 62 | $(BASEDIR)/_build/dev2/rel/{{ name }}/bin/$(APPNAME) attach 63 | 64 | dev3-attach: 65 | $(BASEDIR)/_build/dev3/rel/{{ name }}/bin/$(APPNAME) attach 66 | 67 | dev1-console: 68 | $(BASEDIR)/_build/dev1/rel/{{ name }}/bin/$(APPNAME) console 69 | 70 | dev2-console: 71 | $(BASEDIR)/_build/dev2/rel/{{ name }}/bin/$(APPNAME) console 72 | 73 | dev3-console: 74 | $(BASEDIR)/_build/dev3/rel/{{ name }}/bin/$(APPNAME) console 75 | 76 | devrel-clean: 77 | rm -rf _build/dev*/rel 78 | 79 | devrel-start: 80 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/{{ name }}/bin/$(APPNAME) start; done 81 | 82 | devrel-join: 83 | for d in $(BASEDIR)/_build/dev{2,3}; do $$d/rel/{{ name }}/bin/$(APPNAME)-admin cluster join {{ name }}1@127.0.0.1; done 84 | 85 | devrel-cluster-plan: 86 | $(BASEDIR)/_build/dev1/rel/{{ name }}/bin/$(APPNAME)-admin cluster plan 87 | 88 | devrel-cluster-commit: 89 | $(BASEDIR)/_build/dev1/rel/{{ name }}/bin/$(APPNAME)-admin cluster commit 90 | 91 | devrel-status: 92 | $(BASEDIR)/_build/dev1/rel/{{ name }}/bin/$(APPNAME)-admin member-status 93 | 94 | devrel-ping: 95 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/{{ name }}/bin/$(APPNAME) ping; true; done 96 | 97 | devrel-stop: 98 | for d in $(BASEDIR)/_build/dev*; do $$d/rel/{{ name }}/bin/$(APPNAME) stop; true; done 99 | 100 | start: 101 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) start 102 | 103 | stop: 104 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) stop 105 | 106 | attach: 107 | $(BASEDIR)/$(RELPATH)/bin/$(APPNAME) attach 108 | 109 | -------------------------------------------------------------------------------- /rebar3_riak_core_README.rst.tpl: -------------------------------------------------------------------------------- 1 | {{ name }} 2 | =========== 3 | 4 | A riak_core application 5 | 6 | Build 7 | ----- 8 | 9 | :: 10 | 11 | make release 12 | 13 | Test 14 | ---- 15 | 16 | :: 17 | 18 | rebar3 ct 19 | 20 | Run 21 | --- 22 | 23 | :: 24 | 25 | make console 26 | 27 | Try 28 | --- 29 | 30 | :: 31 | 32 | 1> {{ name }}:ping(). 33 | {pong,753586781748746817198774991869333432010090217472} 34 | 35 | Quit 36 | ---- 37 | 38 | :: 39 | 40 | 2> q(). 41 | 42 | Play with Clustering 43 | -------------------- 44 | 45 | Build 3 releases that can run on the same machine:: 46 | 47 | make devrel 48 | 49 | Start them in different consoles:: 50 | 51 | make dev1-console 52 | make dev2-console 53 | make dev3-console 54 | 55 | join 2 nodes to the first one:: 56 | 57 | make devrel-join 58 | 59 | check the status of the cluster:: 60 | 61 | make devrel-status 62 | 63 | you should see something like this:: 64 | 65 | ================================= Membership ================================== 66 | Status Ring Pending Node 67 | ------------------------------------------------------------------------------- 68 | joining 0.0% -- '{{ name }}2@127.0.0.1' 69 | joining 0.0% -- '{{ name }}3@127.0.0.1' 70 | valid 100.0% -- '{{ name }}1@127.0.0.1' 71 | ------------------------------------------------------------------------------- 72 | Valid:1 / Leaving:0 / Exiting:0 / Joining:2 / Down:0 73 | 74 | it should say that 3 nodes are joining, now check the cluster plan:: 75 | 76 | make devrel-cluster-plan 77 | 78 | it should display the cluster plan, now we can commit the plan:: 79 | 80 | make devrel-cluster-commit 81 | 82 | check the status of the cluster again:: 83 | 84 | make devrel-status 85 | 86 | you could see the vnodes transfering:: 87 | 88 | ================================= Membership ================================== 89 | Status Ring Pending Node 90 | ------------------------------------------------------------------------------- 91 | valid 75.0% 25.0% '{{ name }}1@127.0.0.1' 92 | valid 9.4% 25.0% '{{ name }}2@127.0.0.1' 93 | valid 7.8% 25.0% '{{ name }}3@127.0.0.1' 94 | ------------------------------------------------------------------------------- 95 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 96 | 97 | at some point you should see something like this:: 98 | 99 | ================================= Membership ================================== 100 | Status Ring Pending Node 101 | ------------------------------------------------------------------------------- 102 | valid 33.3% -- '{{ name }}1@127.0.0.1' 103 | valid 33.3% -- '{{ name }}2@127.0.0.1' 104 | valid 33.3% -- '{{ name }}3@127.0.0.1' 105 | ------------------------------------------------------------------------------- 106 | Valid:3 / Leaving:0 / Exiting:0 / Joining:0 / Down:0 107 | 108 | when you are bored you can stop them:: 109 | 110 | make devrel-stop 111 | 112 | Riak Core Metadata 113 | ------------------ 114 | 115 | Create some variables in all nodes you are going to run the calls: 116 | 117 | .. code:: erlang 118 | 119 | FullPrefix = {<<"{{ name }}">>, <<"config">>}. 120 | Key1 = key_1. 121 | Val1 = <<"value 1">>. 122 | 123 | Run each line on any node: 124 | 125 | .. code:: erlang 126 | 127 | riak_core_metadata:get(FullPrefix, Key1). 128 | % undefined 129 | 130 | riak_core_metadata:get(FullPrefix, Key1, [{default, default_value_here}]). 131 | % default_value_here 132 | 133 | riak_core_metadata:put(FullPrefix, Key1, Val1). 134 | % ok 135 | 136 | riak_core_metadata:get(FullPrefix, Key1). 137 | % <<"value 1">> 138 | 139 | riak_core_metadata:to_list(FullPrefix). 140 | % [{key_1,[<<"value 1">>]}] 141 | 142 | riak_core_metadata:delete(FullPrefix, Key1). 143 | % ok 144 | 145 | riak_core_metadata:to_list(FullPrefix). 146 | % [{key_1,['$deleted']}] 147 | 148 | Trace Metadata Calls: 149 | 150 | .. code:: erlang 151 | 152 | ReturnTrace = fun(_) -> return_trace() end. 153 | % at most 1000 calls per second 154 | Rate = {1000, 1000}. 155 | recon_trace:calls([{riak_core_broadcast, '_', 156 | fun ([A, _]) when A /= lazy_tick -> return_trace() end}, 157 | {riak_core_metadata_hashtree, '_', ReturnTrace}, 158 | {riak_core_metadata_object, '_', ReturnTrace}, 159 | {riak_core_metadata_manager, '_', ReturnTrace}, 160 | {riak_core_metadata_exchange_fsm, '_', ReturnTrace}, 161 | {riak_core_metadata, '_', ReturnTrace}], Rate). 162 | 163 | Clear the trace: 164 | 165 | .. code:: erlang 166 | 167 | recon_trace:clear(). 168 | 169 | 170 | TODO 171 | ---- 172 | 173 | * define license and create LICENSE file 174 | 175 | License 176 | ------- 177 | 178 | TODO 179 | -------------------------------------------------------------------------------- /rebar3_riak_core_admin_runner: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # /bin/sh on Solaris is not a POSIX compatible shell, but /usr/bin/ksh is. 4 | if [ `uname -s` = 'SunOS' -a "${POSIX_SHELL}" != "true" ]; then 5 | POSIX_SHELL="true" 6 | export POSIX_SHELL 7 | # To support 'whoami' add /usr/ucb to path 8 | PATH=/usr/ucb:$PATH 9 | export PATH 10 | exec /usr/bin/ksh $0 "$@" 11 | fi 12 | unset POSIX_SHELL # clear it so if we invoke other scripts, they run as ksh as well 13 | 14 | 15 | SCRIPT=$(readlink $0 || true) 16 | if [ -z $SCRIPT ]; then 17 | SCRIPT=$0 18 | fi; 19 | 20 | 21 | SCRIPT_DIR="$(cd `dirname "$SCRIPT"` && pwd -P)" 22 | RELEASE_ROOT_DIR="$(cd "$SCRIPT_DIR/.." && pwd -P)" 23 | REL_NAME="{{ release_name }}" 24 | CUTTLEFISH_CONF="{{ release_name }}.conf" 25 | REL_VSN="{{ rel_vsn }}" 26 | ERTS_VSN="{{ erts_vsn }}" 27 | CODE_LOADING_MODE="${CODE_LOADING_MODE:-embedded}" 28 | REL_DIR="$RELEASE_ROOT_DIR/releases/$REL_VSN" 29 | ERL_OPTS="{{ erl_opts }}" 30 | RUNNER_LOG_DIR="${RUNNER_LOG_DIR:-$RELEASE_ROOT_DIR/log}" 31 | RUNNER_BASE_DIR=$RELEASE_ROOT_DIR 32 | RUNNER_ETC_DIR="${RUNNER_ETC_DIR:-$RELEASE_ROOT_DIR/etc}" 33 | 34 | find_erts_dir() { 35 | __erts_dir="$RELEASE_ROOT_DIR/erts-$ERTS_VSN" 36 | if [ -d "$__erts_dir" ]; then 37 | ERTS_DIR="$__erts_dir"; 38 | ROOTDIR="$RELEASE_ROOT_DIR" 39 | else 40 | __erl="$(which erl)" 41 | code="io:format(\"~s\", [code:root_dir()]), halt()." 42 | __erl_root="$("$__erl" -noshell -eval "$code")" 43 | ERTS_DIR="$__erl_root/erts-$ERTS_VSN" 44 | ROOTDIR="$__erl_root" 45 | fi 46 | } 47 | 48 | # Get node pid 49 | relx_get_pid() { 50 | if output="$(relx_nodetool rpcterms os getpid)" 51 | then 52 | echo "$output" | sed -e 's/"//g' 53 | return 0 54 | else 55 | echo "$output" 56 | return 1 57 | fi 58 | } 59 | 60 | relx_get_longname() { 61 | id="longname$(relx_gen_id)-${NAME}" 62 | "$BINDIR/erl" -boot start_clean -eval 'io:format("~s~n", [node()]), halt()' -noshell -name $id | sed -e 's/.*@//g' 63 | } 64 | 65 | # Connect to a remote node 66 | relx_rem_sh() { 67 | # Generate a unique id used to allow multiple remsh to the same node 68 | # transparently 69 | id="remsh$(relx_gen_id)-${NAME}" 70 | 71 | # Get the node's ticktime so that we use the same thing. 72 | TICKTIME="$(relx_nodetool rpcterms net_kernel get_net_ticktime)" 73 | 74 | # Setup remote shell command to control node 75 | exec "$BINDIR/erl" "$NAME_TYPE" "$id" -remsh "$NAME" -boot start_clean \ 76 | -boot_var ERTS_LIB_DIR "$ERTS_LIB_DIR" \ 77 | -setcookie "$COOKIE" -hidden -kernel net_ticktime $TICKTIME 78 | } 79 | 80 | # Generate a random id 81 | relx_gen_id() { 82 | od -X -N 4 /dev/urandom | head -n1 | awk '{print $2}' 83 | } 84 | 85 | # Control a node 86 | relx_nodetool() { 87 | command="$1"; shift 88 | 89 | "$ERTS_DIR/bin/escript" "$ROOTDIR/bin/nodetool" "$NAME_TYPE" "$NAME" \ 90 | -setcookie "$COOKIE" "$command" $@ 91 | } 92 | 93 | # Run an escript in the node's environment 94 | relx_escript() { 95 | shift; scriptpath="$1"; shift 96 | export RELEASE_ROOT_DIR 97 | 98 | "$ERTS_DIR/bin/escript" "$ROOTDIR/$scriptpath" $@ 99 | } 100 | 101 | # Output a start command for the last argument of run_erl 102 | relx_start_command() { 103 | printf "exec \"%s\" \"%s\"" "$RELEASE_ROOT_DIR/bin/$REL_NAME" \ 104 | "$START_OPTION" 105 | } 106 | 107 | # Make sure log directory exists 108 | mkdir -p "$RUNNER_LOG_DIR" 109 | 110 | # Use $CWD/sys.config if exists, otherwise releases/VSN/sys.config 111 | if [ -z "$NAME_ARG" ]; then 112 | NODENAME=`egrep '^[ \t]*nodename[ \t]*=[ \t]*' $RUNNER_ETC_DIR/$CUTTLEFISH_CONF 2> /dev/null | tail -n 1 | cut -d = -f 2` 113 | if [ -z "$NODENAME" ]; then 114 | echo "vm.args needs to have a -name parameter." 115 | echo " -sname is not supported." 116 | exit 1 117 | else 118 | NAME_TYPE="-name" 119 | NAME="${NODENAME# *}" 120 | fi 121 | fi 122 | 123 | PIPE_DIR="${PIPE_DIR:-/tmp/erl_pipes/$NAME/}" 124 | 125 | # Extract the target cookie 126 | #COOKIE_ARG=`grep -e '-setcookie' $RUNNER_ETC_DIR/vm.args` 127 | if [ -z "$COOKIE_ARG" ]; then 128 | COOKIE=`egrep '^[ \t]*distributed_cookie[ \t]*=[ \t]*' $RUNNER_ETC_DIR/$CUTTLEFISH_CONF 2> /dev/null | cut -d = -f 2 | tr -d ' '` 129 | if [ -z "$COOKIE" ]; then 130 | echo "vm.args needs to have a -setcookie parameter." 131 | exit 1 132 | else 133 | COOKIE_ARG="-setcookie $COOKIE" 134 | fi 135 | fi 136 | 137 | find_erts_dir 138 | export ROOTDIR="$RELEASE_ROOT_DIR" 139 | export BINDIR="$ERTS_DIR/bin" 140 | export EMU="beam" 141 | export PROGNAME="erl" 142 | export LD_LIBRARY_PATH="$ERTS_DIR/lib:$LD_LIBRARY_PATH" 143 | ERTS_LIB_DIR="$ERTS_DIR/../lib" 144 | CUTTLEFISHCMD="$ERTS_DIR/bin/escript $RUNNER_BASE_DIR/bin/cuttlefish" 145 | 146 | cd "$ROOTDIR" 147 | 148 | if CUTTLEFISH_CONFIG=$($CUTTLEFISHCMD -e $RUNNER_ETC_DIR -d $RUNNER_BASE_DIR/generated.conf -s $RUNNER_BASE_DIR/share/schema/ -c $RUNNER_ETC_DIR/$CUTTLEFISH_CONF) 149 | then 150 | CONFIG_FILES="$CUTTLEFISH_CONFIG" 151 | else 152 | echo "Cuttlefish failed! Oh no!" 153 | exit 1 154 | fi 155 | 156 | 157 | # Parse out release and erts info 158 | START_ERL=`cat $RUNNER_BASE_DIR/releases/start_erl.data` 159 | ERTS_VSN=${START_ERL% *} 160 | APP_VSN=${START_ERL#* } 161 | 162 | # TODO: look in the release otherwise use which 163 | ESCRIPT=escript 164 | NODETOOL_PATH=$RUNNER_BASE_DIR/bin 165 | NODETOOL=$NODETOOL_PATH/nodetool 166 | # Setup command to control the node 167 | NODETOOL="$ESCRIPT $NODETOOL $NAME_ARG $COOKIE_ARG" 168 | 169 | ensure_node_running() 170 | { 171 | # Make sure the local node IS running 172 | if ! relx_nodetool "ping"; then 173 | echo "Node is not running!" 174 | exit 1 175 | fi 176 | } 177 | 178 | cluster_admin() 179 | { 180 | case "$1" in 181 | join) 182 | if [ $# -ne 2 ]; then 183 | echo "Usage: $SCRIPT cluster join " 184 | exit 1 185 | fi 186 | ensure_node_running 187 | relx_nodetool rpc {{ release_name }}_console staged_join "$2" 188 | ;; 189 | leave) 190 | if [ $# -eq 1 ]; then 191 | ensure_node_running 192 | relx_nodetool rpc riak_core_console stage_leave 193 | elif [ $# -eq 2 ]; then 194 | ensure_node_running 195 | relx_nodetool rpc riak_core_console stage_leave "$2" 196 | else 197 | echo "Usage: $SCRIPT cluster leave []" 198 | exit 1 199 | fi 200 | ;; 201 | force-remove) 202 | if [ $# -ne 2 ]; then 203 | echo "Usage: $SCRIPT cluster force-remove " 204 | exit 1 205 | fi 206 | ensure_node_running 207 | relx_nodetool rpc riak_core_console stage_remove "$2" 208 | ;; 209 | replace) 210 | if [ $# -ne 3 ]; then 211 | echo "Usage: $SCRIPT cluster replace " 212 | exit 1 213 | fi 214 | ensure_node_running 215 | relx_nodetool rpc riak_core_console stage_replace "$2" "$3" 216 | ;; 217 | force-replace) 218 | if [ $# -ne 3 ]; then 219 | echo "Usage: $SCRIPT cluster force-replace " 220 | exit 1 221 | fi 222 | ensure_node_running 223 | relx_nodetool rpc riak_core_console stage_force_replace "$2" "$3" 224 | ;; 225 | plan) 226 | ensure_node_running 227 | relx_nodetool rpc riak_core_console print_staged 228 | ;; 229 | commit) 230 | ensure_node_running 231 | relx_nodetool rpc riak_core_console commit_staged 232 | ;; 233 | clear) 234 | ensure_node_running 235 | relx_nodetool rpc riak_core_console clear_staged 236 | ;; 237 | *) 238 | echo "\ 239 | Usage: $SCRIPT cluster 240 | 241 | The following commands stage changes to cluster membership. These commands 242 | do not take effect immediately. After staging a set of changes, the staged 243 | plan must be committed to take effect: 244 | 245 | join Join node to the cluster containing 246 | leave Have this node leave the cluster and shutdown 247 | leave Have leave the cluster and shutdown 248 | 249 | force-remove Remove from the cluster without 250 | first handing off data. Designed for 251 | crashed, unrecoverable nodes 252 | 253 | replace Have transfer all data to , 254 | and then leave the cluster and shutdown 255 | 256 | force-replace Reassign all partitions owned by to 257 | without first handing off data, and 258 | remove from the cluster. 259 | 260 | Staging commands: 261 | plan Display the staged changes to the cluster 262 | commit Commit the staged changes 263 | clear Clear the staged changes 264 | " 265 | esac 266 | } 267 | 268 | # Check the first argument for instructions 269 | case "$1" in 270 | down) 271 | if [ $# -ne 2 ]; then 272 | echo "Usage: $SCRIPT down " 273 | exit 1 274 | fi 275 | 276 | ensure_node_running 277 | relx_nodetool rpc {{ release_name }}_console down $@ 278 | ;; 279 | 280 | ringready) 281 | if [ $# -ne 1 ]; then 282 | echo "Usage: $SCRIPT ringready" 283 | exit 1 284 | fi 285 | 286 | ensure_node_running 287 | relx_nodetool rpc {{ release_name }}_console ringready '' 288 | ;; 289 | 290 | member[_-]status) 291 | if [ $# -ne 1 ]; then 292 | echo "Usage: $SCRIPT $1" 293 | exit 1 294 | fi 295 | 296 | ensure_node_running 297 | relx_nodetool rpc riak_core_console member_status '' 298 | ;; 299 | 300 | ring[_-]status) 301 | if [ $# -ne 1 ]; then 302 | echo "Usage: $SCRIPT $1" 303 | exit 1 304 | fi 305 | 306 | ensure_node_running 307 | relx_nodetool rpc riak_core_console ring_status '' 308 | ;; 309 | 310 | services) 311 | relx_nodetool rpcterms riak_core_node_watcher services '' 312 | ;; 313 | 314 | wait[_-]for[_-]service) 315 | SVC=$2 316 | TARGETNODE=$3 317 | if [ $# -lt 3 ]; then 318 | echo "Usage: $SCRIPT $1 " 319 | exit 1 320 | fi 321 | 322 | while (true); do 323 | # Make sure riak_core_node_watcher is up and running locally before trying to query it 324 | # to avoid ugly (but harmless) error messages 325 | NODEWATCHER=`$NODETOOL rpcterms erlang whereis "'riak_core_node_watcher'."` 326 | if [ "$NODEWATCHER" = "undefined" ]; then 327 | echo "$SVC is not up: node watcher is not running" 328 | continue 329 | fi 330 | 331 | # Get the list of services that are available on the requested node 332 | SERVICES=`$NODETOOL rpcterms riak_core_node_watcher services "'${TARGETNODE}'."` 333 | echo "$SERVICES" | grep "[[,]$SVC[],]" > /dev/null 2>&1 334 | if [ "X$?" = "X0" ]; then 335 | echo "$SVC is up" 336 | exit 0 337 | else 338 | echo "$SVC is not up: $SERVICES" 339 | fi 340 | sleep 3 341 | done 342 | ;; 343 | cluster) 344 | shift 345 | cluster_admin "$@" 346 | ;; 347 | *) 348 | echo "Usage: $SCRIPT { cluster | down | ringready | member-status | " 349 | echo " ring-status | services | wait-for-service " 350 | exit 1 351 | ;; 352 | esac 353 | -------------------------------------------------------------------------------- /rebar3_riak_core_advanced.config.tpl: -------------------------------------------------------------------------------- 1 | [ 2 | { {{ name }}, []}, 3 | {riak_core, [ 4 | {schema_dirs, ["share/schema"]} 5 | %% {{ name }} valid permissions to grant 6 | % {permissions, [{ {{ name }}, [put, get, list, grant, delete]}]} 7 | ]}, 8 | %% SASL config 9 | {sasl, [ 10 | {sasl_error_logger, {file, "log/sasl-error.log"}}, 11 | {errlog_type, error}, 12 | {error_logger_mf_dir, "log/sasl"}, % Log directory 13 | {error_logger_mf_maxbytes, 10485760}, % 10 MB max file size 14 | {error_logger_mf_maxfiles, 5} % 5 files max 15 | ] 16 | }, 17 | 18 | %% Lager config 19 | %% see https://github.com/basho/lager#configuration 20 | %% see https://github.com/basho/lager/blob/master/src/lager.app.src 21 | {lager, [ 22 | {handlers, [ 23 | {lager_console_backend, info}, 24 | {lager_file_backend, [{file, "error.log"}, {level, error}, 25 | {size, 10485760}, {date, "$D0"}, {count, 5}]}, 26 | {lager_file_backend, [{file, "console.log"}, {level, info}, 27 | {size, 10485760}, {date, "$D0"}, {count, 5}]} 28 | ]} 29 | ]} 30 | ]. 31 | -------------------------------------------------------------------------------- /rebar3_riak_core_app.erl.tpl: -------------------------------------------------------------------------------- 1 | -module({{name}}_app). 2 | 3 | -behaviour(application). 4 | 5 | %% Application callbacks 6 | -export([start/2, stop/1]). 7 | 8 | %% =================================================================== 9 | %% Application callbacks 10 | %% =================================================================== 11 | 12 | start(_StartType, _StartArgs) -> 13 | case {{name}}_sup:start_link() of 14 | {ok, Pid} -> 15 | ok = riak_core:register([{vnode_module, {{name}}_vnode}]), 16 | ok = riak_core_node_watcher:service_up({{name}}, self()), 17 | 18 | {ok, Pid}; 19 | {error, Reason} -> 20 | {error, Reason} 21 | end. 22 | 23 | stop(_State) -> 24 | ok. 25 | -------------------------------------------------------------------------------- /rebar3_riak_core_config.schema.tpl: -------------------------------------------------------------------------------- 1 | %%-*- mode: erlang -*- 2 | %% ex: ft=erlang ts=4 sw=4 et 3 | 4 | %% @doc Enable/Disable HTTP API 5 | {mapping, "http.enabled", "{{ name }}.http_enabled", [ 6 | {datatype, {flag, yes, no}}, 7 | {default, yes} 8 | ]}. 9 | 10 | %% @doc port to listen to for HTTP API 11 | {mapping, "http.port", "{{ name }}.http_port", [ 12 | {datatype, integer}, 13 | {default, {{default_config_http_port}} } 14 | ]}. 15 | 16 | %% @doc number of acceptors to user for HTTP API 17 | {mapping, "http.acceptors", "{{ name }}.http_acceptors", [ 18 | {datatype, integer}, 19 | {default, 100} 20 | ]}. 21 | 22 | %% @doc Enable/Disable HTTPS API 23 | {mapping, "https.enabled", "{{ name }}.https_enabled", [ 24 | {datatype, {flag, yes, no}}, 25 | {default, no} 26 | ]}. 27 | 28 | %% @doc port to listen to for HTTPS API 29 | {mapping, "https.port", "{{ name }}.https_port", [ 30 | {datatype, integer}, 31 | {default, 8443} 32 | ]}. 33 | 34 | %% @doc number of acceptors to use for HTTPS API 35 | {mapping, "https.acceptors", "{{ name }}.https_acceptors", [ 36 | {datatype, integer}, 37 | {default, 100} 38 | ]}. 39 | 40 | %% @doc Enable/Disable HTTP CORS API 41 | {mapping, "http.cors.enabled", "{{ name }}.cors_enabled", [ 42 | {datatype, {flag, yes, no}}, 43 | {default, no} 44 | ]}. 45 | 46 | %% @doc HTTP CORS API allowed origins, it can be a comma separated list of 47 | %% origins to accept or * to accept all 48 | {mapping, "http.cors.origins", "{{ name }}.cors_origins", [ 49 | {default, "*"} 50 | ]}. 51 | 52 | {translation, "{{ name }}.cors_origins", 53 | fun(Conf) -> 54 | Setting = cuttlefish:conf_get("http.cors.origins", Conf), 55 | case Setting of 56 | "*" -> any; 57 | CSVs -> 58 | Tokens = string:tokens(CSVs, ","), 59 | Cleanup = fun (Token) -> 60 | CleanToken = string:strip(Token), 61 | list_to_binary(CleanToken) 62 | end, 63 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 64 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 65 | end 66 | end}. 67 | 68 | %% @doc HTTP CORS API a comma separated list of allowed headers to accept 69 | {mapping, "http.cors.headers", "{{ name }}.cors_headers", []}. 70 | 71 | {translation, "{{ name }}.cors_headers", 72 | fun(Conf) -> 73 | CSVs = cuttlefish:conf_get("http.cors.headers", Conf), 74 | Tokens = string:tokens(CSVs, ","), 75 | Cleanup = fun (Token) -> 76 | CleanToken = string:strip(Token), 77 | list_to_binary(CleanToken) 78 | end, 79 | FilterEmptyStr = fun ("") -> false; (_) -> true end, 80 | lists:filter(FilterEmptyStr, lists:map(Cleanup, Tokens)) 81 | end}. 82 | 83 | %% @doc HTTP CORS API indicates how long the results of a preflight request can 84 | %% be cached 85 | {mapping, "http.cors.maxage", "{{ name }}.cors_max_age_secs", [ 86 | {datatype, {duration, s}}, 87 | {default, "60s"} 88 | ]}. 89 | 90 | %% @doc secret used to encrypt the session token, IMPORTANT: change this 91 | {mapping, "auth.secret", "{{ name }}.auth_secret", [ 92 | {default, "changeme"} 93 | ]}. 94 | 95 | {translation, "{{ name }}.auth_secret", 96 | fun(Conf) -> 97 | Setting = cuttlefish:conf_get("auth.secret", Conf), 98 | list_to_binary(Setting) 99 | end}. 100 | 101 | %% @doc time a session is valid after login 102 | {mapping, "auth.session.duration", "{{ name }}.session_duration_secs", [ 103 | {datatype, {duration, s}}, 104 | {default, "24h"} 105 | ]}. 106 | -------------------------------------------------------------------------------- /rebar3_riak_core_console.erl.tpl: -------------------------------------------------------------------------------- 1 | %% @doc Interface for riak_searchng-admin commands. 2 | -module({{name}}_console). 3 | -export([staged_join/1, 4 | down/1, 5 | ringready/1]). 6 | -ignore_xref([join/1, 7 | leave/1, 8 | remove/1, 9 | ringready/1]). 10 | 11 | staged_join([NodeStr]) -> 12 | Node = list_to_atom(NodeStr), 13 | join(NodeStr, fun riak_core:staged_join/1, 14 | "Success: staged join request for ~p to ~p~n", [node(), Node]). 15 | 16 | join(NodeStr, JoinFn, SuccessFmt, SuccessArgs) -> 17 | try 18 | case JoinFn(NodeStr) of 19 | ok -> 20 | io:format(SuccessFmt, SuccessArgs), 21 | ok; 22 | {error, not_reachable} -> 23 | io:format("Node ~s is not reachable!~n", [NodeStr]), 24 | error; 25 | {error, different_ring_sizes} -> 26 | io:format("Failed: ~s has a different ring_creation_size~n", 27 | [NodeStr]), 28 | error; 29 | {error, unable_to_get_join_ring} -> 30 | io:format("Failed: Unable to get ring from ~s~n", [NodeStr]), 31 | error; 32 | {error, not_single_node} -> 33 | io:format("Failed: This node is already a member of a " 34 | "cluster~n"), 35 | error; 36 | {error, self_join} -> 37 | io:format("Failed: This node cannot join itself in a " 38 | "cluster~n"), 39 | error; 40 | {error, _} -> 41 | io:format("Join failed. Try again in a few moments.~n", []), 42 | error 43 | end 44 | catch 45 | Exception:Reason -> 46 | lager:error("Join failed ~p:~p", [Exception, Reason]), 47 | io:format("Join failed, see log for details~n"), 48 | error 49 | end. 50 | 51 | 52 | down([Node]) -> 53 | try 54 | case riak_core:down(list_to_atom(Node)) of 55 | ok -> 56 | io:format("Success: ~p marked as down~n", [Node]), 57 | ok; 58 | {error, legacy_mode} -> 59 | io:format("Cluster is currently in legacy mode~n"), 60 | ok; 61 | {error, is_up} -> 62 | io:format("Failed: ~s is up~n", [Node]), 63 | error; 64 | {error, not_member} -> 65 | io:format("Failed: ~p is not a member of the cluster.~n", 66 | [Node]), 67 | error; 68 | {error, only_member} -> 69 | io:format("Failed: ~p is the only member.~n", [Node]), 70 | error 71 | end 72 | catch 73 | Exception:Reason -> 74 | lager:error("Down failed ~p:~p", [Exception, Reason]), 75 | io:format("Down failed, see log for details~n"), 76 | error 77 | end. 78 | 79 | ringready([]) -> 80 | try 81 | case riak_core_status:ringready() of 82 | {ok, Nodes} -> 83 | io:format("TRUE All nodes agree on the ring ~p\n", [Nodes]); 84 | {error, {different_owners, N1, N2}} -> 85 | io:format("FALSE Node ~p and ~p list different partition owners\n", [N1, N2]), 86 | error; 87 | {error, {nodes_down, Down}} -> 88 | io:format("FALSE ~p down. All nodes need to be up to check.\n", [Down]), 89 | error 90 | end 91 | catch 92 | Exception:Reason -> 93 | lager:error("Ringready failed ~p:~p", [Exception, 94 | Reason]), 95 | io:format("Ringready failed, see log for details~n"), 96 | error 97 | end. 98 | -------------------------------------------------------------------------------- /rebar3_riak_core_editorconfig.tpl: -------------------------------------------------------------------------------- 1 | [*.fn] 2 | end_of_line = lf 3 | insert_final_newline = true 4 | charset = utf-8 5 | indent_style = space 6 | indent_size = 2 7 | -------------------------------------------------------------------------------- /rebar3_riak_core_gitignore.tpl: -------------------------------------------------------------------------------- 1 | .rebar3 2 | rebar3 3 | _* 4 | .eunit 5 | *.o 6 | *.beam 7 | *.plt 8 | *.swp 9 | *.swo 10 | .erlang.cookie 11 | ebin 12 | log 13 | erl_crash.dump 14 | .rebar 15 | _rel 16 | _deps 17 | _plugins 18 | _tdeps 19 | logs 20 | -------------------------------------------------------------------------------- /rebar3_riak_core_rebar.config.tpl: -------------------------------------------------------------------------------- 1 | %% -*- mode: erlang; -*- 2 | {erl_opts, [debug_info, {parse_transform, lager_transform}]}. 3 | 4 | {deps, [lager, recon, {riak_core, {pkg, riak_core_ng}}]}. 5 | 6 | {relx, [{release, { {{ name }} , "0.1.0"}, 7 | [{{ name }}, 8 | cuttlefish, 9 | sasl]}, 10 | 11 | {dev_mode, true}, 12 | {include_erts, false}, 13 | 14 | {overlay_vars, "config/vars.config"}, 15 | {overlay, [ 16 | {mkdir, "etc"}, 17 | {mkdir, "bin"}, 18 | {mkdir, "data/ring"}, 19 | {mkdir, "log/sasl"}, 20 | {template, "./config/admin_bin", "bin/{{ name }}-admin"}, 21 | {template, "./config/advanced.config", "etc/advanced.config"}, 22 | {template, "./priv/01-{{ name }}.schema", "share/schema/01-{{ name }}.schema"}, 23 | {template, "./config/erlang_vm.schema", "share/schema/03-vm.schema"}, 24 | {template, "./config/riak_core.schema", "share/schema/04-riak_core.schema"}, 25 | {template, "./config/lager.schema", "share/schema/05-lager.schema"} 26 | ]} 27 | ]}. 28 | 29 | {plugins, [rebar3_run]}. 30 | 31 | {project_plugins, [rebar3_cuttlefish]}. 32 | 33 | {cuttlefish, [{schema_discovery, false}]}. 34 | 35 | {profiles, [ 36 | {prod, [{relx, [{dev_mode, false}, {include_erts, true}]}]}, 37 | {dev1, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev1.config"]}]}]}, 38 | {dev2, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev2.config"]}]}]}, 39 | {dev3, [{relx, [{overlay_vars, ["config/vars.config", "config/vars_dev3.config"]}]}]} 40 | ]}. 41 | 42 | {overrides, 43 | [{override, eleveldb, 44 | [ 45 | {artifacts, ["priv/eleveldb.so"]}, 46 | {pre_hooks, [{compile, "c_src/build_deps.sh get-deps"}, 47 | {compile, "c_src/build_deps.sh"}]}, 48 | 49 | {post_hooks, [{clean, "c_src/build_deps.sh clean"}]}, 50 | 51 | {plugins, [pc]}, 52 | 53 | {provider_hooks, [{post, 54 | [{compile, {pc, compile}}, 55 | {clean, {pc, clean}} 56 | ] 57 | }] 58 | } 59 | ] 60 | }, 61 | {override, riak_ensemble, 62 | [ 63 | {artifacts, ["priv/riak_ensemble_drv.so"]}, 64 | {plugins, [pc]}, 65 | {provider_hooks, [{post, 66 | [{compile, {pc, compile}}, 67 | {clean, {pc, clean}} 68 | ]}]}, 69 | {erl_opts, [debug_info, 70 | warn_untyped_record, 71 | {parse_transform, lager_transform}]} 72 | ]}, 73 | {del, riak_core, [{erl_opts, [warnings_as_errors]}]}, 74 | {del, poolboy, [{erl_opts, [warnings_as_errors]}]}, 75 | {override, cuttlefish, 76 | [{escript_emu_args, "%%! -escript main cuttlefish_escript +S 1 +A 0\n"}]} 77 | ]}. 78 | -------------------------------------------------------------------------------- /rebar3_riak_core_sup.erl.tpl: -------------------------------------------------------------------------------- 1 | -module({{name}}_sup). 2 | 3 | -behaviour(supervisor). 4 | 5 | %% API 6 | -export([start_link/0]). 7 | 8 | %% Supervisor callbacks 9 | -export([init/1]). 10 | 11 | %% =================================================================== 12 | %% API functions 13 | %% =================================================================== 14 | 15 | start_link() -> 16 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 17 | 18 | %% =================================================================== 19 | %% Supervisor callbacks 20 | %% =================================================================== 21 | 22 | init(_Args) -> 23 | VMaster = { {{name}}_vnode_master, 24 | {riak_core_vnode_master, start_link, [{{name}}_vnode]}, 25 | permanent, 5000, worker, [riak_core_vnode_master]}, 26 | 27 | { ok, 28 | { {one_for_one, 5, 10}, 29 | [VMaster]}}. 30 | -------------------------------------------------------------------------------- /rebar3_riak_core_vars.config.tpl: -------------------------------------------------------------------------------- 1 | {cuttlefish_conf, "{{ name }}.conf"}. 2 | {rel_name, "{{ name }}"}. 3 | {node, "{{ name }}@127.0.0.1"}. 4 | 5 | {web_ip, "127.0.0.1"}. 6 | {web_port, 8098}. 7 | {handoff_port, 8099}. 8 | {handoff_ip, "127.0.0.1"}. 9 | {sasl_error_log, "./log/sasl-error.log"}. 10 | {sasl_log_dir, "./log/sasl"}. 11 | 12 | {log_path, "./log"}. 13 | {service, "{{ name }}"}. 14 | 15 | {run_user_home, "$HOME"}. 16 | 17 | {platform_bin_dir, "./bin"}. 18 | {platform_data_dir, "../{{ name }}_data"}. 19 | {platform_etc_dir, "../{{ name }}_config"}. 20 | {platform_lib_dir, "./lib"}. 21 | {platform_log_dir, "./log"}. 22 | 23 | {crash_dump, "erl_crash.dump"}. 24 | -------------------------------------------------------------------------------- /rebar3_riak_core_vars_dev1.config.tpl: -------------------------------------------------------------------------------- 1 | {node, "{{ name }}1@127.0.0.1"}. 2 | 3 | {web_port, 8198}. 4 | {handoff_port, 8199}. 5 | -------------------------------------------------------------------------------- /rebar3_riak_core_vars_dev2.config.tpl: -------------------------------------------------------------------------------- 1 | {node, "{{ name }}2@127.0.0.1"}. 2 | 3 | {web_port, 8298}. 4 | {handoff_port, 8299}. 5 | -------------------------------------------------------------------------------- /rebar3_riak_core_vars_dev3.config.tpl: -------------------------------------------------------------------------------- 1 | {node, "{{ name }}3@127.0.0.1"}. 2 | 3 | {web_port, 8398}. 4 | {handoff_port, 8399}. 5 | -------------------------------------------------------------------------------- /rebar3_riak_core_vnode.erl.tpl: -------------------------------------------------------------------------------- 1 | -module({{name}}_vnode). 2 | -behaviour(riak_core_vnode). 3 | 4 | -export([start_vnode/1, 5 | init/1, 6 | terminate/2, 7 | handle_command/3, 8 | is_empty/1, 9 | delete/1, 10 | handle_handoff_command/3, 11 | handoff_starting/2, 12 | handoff_cancelled/1, 13 | handoff_finished/2, 14 | handle_handoff_data/2, 15 | encode_handoff_item/2, 16 | handle_overload_command/3, 17 | handle_overload_info/2, 18 | handle_coverage/4, 19 | handle_exit/3]). 20 | 21 | -ignore_xref([ 22 | start_vnode/1 23 | ]). 24 | 25 | -record(state, {partition}). 26 | 27 | %% API 28 | start_vnode(I) -> 29 | riak_core_vnode_master:get_vnode_pid(I, ?MODULE). 30 | 31 | init([Partition]) -> 32 | {ok, #state { partition=Partition }}. 33 | 34 | %% Sample command: respond to a ping 35 | handle_command(ping, _Sender, State) -> 36 | {reply, {pong, State#state.partition}, State}; 37 | handle_command(Message, _Sender, State) -> 38 | lager:warning("unhandled_command ~p", [Message]), 39 | {noreply, State}. 40 | 41 | handle_handoff_command(_Message, _Sender, State) -> 42 | {noreply, State}. 43 | 44 | handoff_starting(_TargetNode, State) -> 45 | {true, State}. 46 | 47 | handoff_cancelled(State) -> 48 | {ok, State}. 49 | 50 | handoff_finished(_TargetNode, State) -> 51 | {ok, State}. 52 | 53 | handle_handoff_data(_Data, State) -> 54 | {reply, ok, State}. 55 | 56 | encode_handoff_item(_ObjectName, _ObjectValue) -> 57 | <<>>. 58 | 59 | handle_overload_command(_, _, _) -> 60 | ok. 61 | 62 | handle_overload_info(_, _Idx) -> 63 | ok. 64 | 65 | is_empty(State) -> 66 | {true, State}. 67 | 68 | delete(State) -> 69 | {ok, State}. 70 | 71 | handle_coverage(_Req, _KeySpaces, _Sender, State) -> 72 | {stop, not_implemented, State}. 73 | 74 | handle_exit(_Pid, _Reason, State) -> 75 | {noreply, State}. 76 | 77 | terminate(_Reason, _State) -> 78 | ok. 79 | --------------------------------------------------------------------------------