├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── cover.spec ├── rebar ├── rebar.config ├── rebar.tests.config ├── src ├── lethink.app.src ├── lethink.erl ├── lethink_app.erl ├── lethink_ast.erl ├── lethink_server.erl ├── lethink_sup.erl ├── lethink_worker.erl ├── lethink_workers_sup.erl ├── ql2.proto └── ql2_util.erl └── test ├── database_SUITE.erl ├── functions_SUITE.erl ├── lethink_ast_tests.erl └── math_and_logic_SUITE.erl /.gitignore: -------------------------------------------------------------------------------- 1 | *.plt 2 | deps 3 | ebin 4 | doc 5 | .eunit 6 | include 7 | logs 8 | test/*.beam 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | otp_release: 3 | - R16B 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2013 Taybin Rutkin 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PROJECT=lethink 2 | REBAR=`which rebar || printf ./rebar` 3 | 4 | # Main 5 | 6 | .PHONY: all 7 | all: get-deps compile 8 | 9 | .PHONY: clean 10 | clean: 11 | @$(REBAR) clean 12 | 13 | .PHONY: distclean 14 | distclean: clean-docs 15 | rm -fr deps/ 16 | 17 | .PHONY: get-deps 18 | get-deps: 19 | @$(REBAR) get-deps 20 | 21 | .PHONY: compile 22 | compile: 23 | @$(REBAR) compile 24 | 25 | # Docs 26 | 27 | docs: clean-docs 28 | @$(REBAR) doc skip_deps=true 29 | 30 | clean-docs: 31 | rm -f doc/*.css 32 | rm -f doc/*.html 33 | rm -f doc/*.png 34 | rm -f doc/edoc-info 35 | 36 | # Tests. 37 | 38 | deps/proper: 39 | @$(REBAR) -C rebar.tests.config get-deps 40 | cd deps/proper && $(REBAR) compile 41 | 42 | tests: clean deps/proper all eunit ct 43 | 44 | eunit: all 45 | @$(REBAR) eunit skip_deps=true 46 | 47 | ct: all 48 | @$(REBAR) ct skip_deps=true 49 | 50 | # Dialyzer 51 | 52 | APPS = kernel stdlib sasl erts ssl tools os_mon runtime_tools crypto inets \ 53 | xmerl webtool snmp public_key mnesia eunit syntax_tools compiler 54 | COMBO_PLT = .$(PROJECT).plt 55 | 56 | check_plt: compile 57 | dialyzer --check_plt --plt $(COMBO_PLT) --apps $(APPS) deps ebin 58 | 59 | build_plt: compile 60 | dialyzer --build_plt --output_plt $(COMBO_PLT) --apps $(APPS) deps ebin 61 | 62 | dialyzer: compile 63 | dialyzer -Wno_return --plt $(COMBO_PLT) ebin 64 | 65 | # xref 66 | 67 | xref: 68 | @$(REBAR) xref skip_deps=true 69 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Lethink 2 | ======= 3 | 4 | An erlang driver for [rethinkdb](http://rethinkdb.com). 5 | 6 | Status 7 | ------ 8 | [![Build Status](https://travis-ci.org/taybin/lethink.png?branch=master)](https://travis-ci.org/taybin/lethink) 9 | 10 | Use 11 | --- 12 | 13 | ``` 14 | > lethink:start(). 15 | > application:set_env(lethink, timeout, 60000). 16 | > lethink:add_pool(my_db_pool, 5, [{database, "localhost"}, {port, 28015}]). 17 | > lethink:query(my_db_pool, [{db_create, <<"superheroes">>}]). 18 | > lethink:query(my_db_pool, [{db_list}]). 19 | > lethink:query(my_db_pool, [{db_drop, <<"superheroes">>}]). 20 | > lethink:query(my_db_pool, [{db, <<"superheroes">>}, {table_create, <<"marvel">>}]). 21 | > lethink:query(my_db_pool, [{db, <<"superheroes">>}, 22 | {table_create, <<"marvel">>, [{primary_key, <<"name">>}]}]). 23 | > lethink:query(my_db_pool, [{db, <<"superheroes">>}, {table_list}]). 24 | > lethink:query(my_db_pool, [{db, <<"superheroes">>}, {table_drop, <<"marvel">>}]). 25 | > lethink:use(my_db_pool, <<"superheroes">>). 26 | > JsonProplist = [{[{<<"id">>, 5}, {<<"name">>, <<"batman">>}, {<<"rich">>, true}, {<<"cars">>, [1,2,3]}]}, 27 | {[{<<"id">>, 6}, {<<"name">>, <<"robin">>}, {<<"rich">>, false}, {<<"cars">>, null}]}]. 28 | > lethink:query(my_db_pool, [{table, <<"marvel">>}, 29 | {insert, JsonProplist}]. 30 | > lethink:query(my_db_pool, [{table, <<"marvel">>}, {get, 5}]). 31 | ``` 32 | -------------------------------------------------------------------------------- /cover.spec: -------------------------------------------------------------------------------- 1 | %% Cover data file to export from this session. 2 | %% CoverDataFile = string() 3 | {export, "/tmp/lethink/coverage/ct_data.cover"}. 4 | 5 | %% Cover analysis level. 6 | %% Level = details | overview 7 | {level, details}. 8 | 9 | %% Specific modules to include in cover. 10 | %% Mods = [atom()] 11 | {incl_mods, [ 12 | lethink, 13 | lethink_app, 14 | lethink_ast, 15 | lethink_server, 16 | lethink_sup, 17 | lethink_worker, 18 | lethink_workers_sup 19 | ]}. 20 | 21 | %% Cross cover compilation 22 | %% Tag = atom(), an identifier for a test run 23 | %% Mod = [atom()], modules to compile for accumulated analysis 24 | %% {cross,[{Tag,Mods}]}. 25 | -------------------------------------------------------------------------------- /rebar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/taybin/lethink/f90986dde34f5910c82d5e6bb5e541d7a6fd0c03/rebar -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | %% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*- 2 | %% %% ex: ts=4 sw=4 ft=erlang et 3 | 4 | %% Erlang compiler options 5 | {erl_opts, [bin_opt_info, 6 | warn_missing_spec, 7 | warn_export_all]}. 8 | 9 | %% Eunit compiler options 10 | {eunit_compile_opts, [nowarn_export_all, 11 | nowarn_missing_spec]}. 12 | 13 | {deps, [ 14 | {protobuffs, ".*", {git, "https://github.com/basho/erlang_protobuffs", "master"}} 15 | ] 16 | }. 17 | 18 | %% Option to pass extra parameters when launching Common Test 19 | {ct_extra_params, "-cover cover.spec"}. 20 | -------------------------------------------------------------------------------- /rebar.tests.config: -------------------------------------------------------------------------------- 1 | %% -*- mode: erlang;erlang-indent-level: 4;indent-tabs-mode: nil -*- 2 | %% %% ex: ts=4 sw=4 ft=erlang et 3 | 4 | {deps, [ 5 | {proper, ".*", {git, "https://github.com/manopapad/proper", "master"}} 6 | ]}. 7 | -------------------------------------------------------------------------------- /src/lethink.app.src: -------------------------------------------------------------------------------- 1 | {application, lethink, 2 | [ 3 | {description, ""}, 4 | {vsn, "1"}, 5 | {registered, []}, 6 | {applications, [ 7 | kernel, 8 | stdlib 9 | ]}, 10 | {mod, { lethink_app, []}}, 11 | {env, [{timeout, 30000}]} 12 | ]}. 13 | -------------------------------------------------------------------------------- /src/lethink.erl: -------------------------------------------------------------------------------- 1 | -module(lethink). 2 | 3 | -export([ 4 | start/0, 5 | stop/0, 6 | add_pool/2, 7 | add_pool/3, 8 | remove_pool/1, 9 | use/2, 10 | query/2 11 | ]). 12 | 13 | -type response() :: success() | error(). 14 | -type success() :: {ok, any()}. 15 | -type error() :: {error, binary(), atom(), any()}. 16 | 17 | -type document() :: {[keyvalue()]}. 18 | -type keyvalue() :: {binary(), json_term()}. 19 | -type json_term() :: null | boolean() | number() | binary() | document() | [json_term()]. 20 | 21 | -type connect_options() :: {address, inet:ip_address() | inet:hostname()} | 22 | {port, inet:port_number()} | 23 | {auth_key, binary()} | 24 | {database, binary()}. 25 | 26 | -type table_options() :: {datacenter, binary()} | 27 | {primary_key, binary()} | 28 | {cache_size, pos_integer()}| 29 | {index, binary()}. 30 | 31 | -type insert_options() :: {upsert, binary()}. 32 | 33 | -export_type([response/0, 34 | success/0, 35 | error/0, 36 | connect_options/0, 37 | table_options/0, 38 | insert_options/0, 39 | document/0, 40 | keyvalue/0, 41 | json_term/0]). 42 | 43 | -spec start() -> ok. 44 | start() -> 45 | application:start(lethink), 46 | ok. 47 | 48 | -spec stop() -> ok. 49 | stop() -> 50 | application:stop(lethink), 51 | ok. 52 | 53 | %% @equiv add_pool(any(), pos_integer(), [{address, "localhost"}, {port, 28015}, {database, <<"test">>}]) 54 | -spec add_pool(any(), pos_integer()) -> ok. 55 | add_pool(Ref, NWorkers) when NWorkers > 0 -> 56 | add_pool(Ref, NWorkers, []). 57 | 58 | %% @doc Start a pool of connections to a database. 59 | -spec add_pool(any(), pos_integer(), [connect_options()]) -> ok. 60 | add_pool(Ref, NWorkers, Opts) when NWorkers > 0 -> 61 | ok = lethink_server:add_pool(Ref), 62 | {ok, SupPid} = supervisor:start_child(lethink_sup, 63 | {{lethink_workers_sup, Ref}, {lethink_workers_sup, start_link, []}, 64 | permanent, 5000, supervisor, [lethink_workers_sup]}), 65 | _ = [begin 66 | {ok, _} = supervisor:start_child( 67 | SupPid, [Ref, Opts]) 68 | end || _ <- lists:seq(1, NWorkers)], 69 | ok. 70 | 71 | %% @doc Stop a pool of connections. 72 | -spec remove_pool(any()) -> ok. 73 | remove_pool(Ref) -> 74 | case supervisor:terminate_child(lethink_sup, {lethink_workers_sup, Ref}) of 75 | ok -> 76 | supervisor:delete_child(lethink_sup, {lethink_workers_sup, Ref}); 77 | {error, Reason} -> 78 | {error, Reason} 79 | end. 80 | 81 | %% @doc Change all connections in pool to use database for queries. 82 | -spec use(any(), binary()) -> ok. 83 | use(Ref, Db) when is_binary(Db) -> 84 | WorkerPids = lethink_server:get_all_workers(Ref), 85 | lists:foreach(fun(Pid) -> 86 | lethink_worker:use(Pid, Db) 87 | end, WorkerPids). 88 | 89 | -spec query(any(), list()) -> response(). 90 | query(Ref, OpList) -> 91 | Term = lethink_ast:build_query(OpList), 92 | WorkerPid = lethink_server:get_worker(Ref), 93 | lethink_worker:query(WorkerPid, Term). 94 | -------------------------------------------------------------------------------- /src/lethink_app.erl: -------------------------------------------------------------------------------- 1 | %% @private 2 | -module(lethink_app). 3 | 4 | -behaviour(application). 5 | 6 | %% Application callbacks 7 | -export([start/2, stop/1]). 8 | 9 | %% =================================================================== 10 | %% Application callbacks 11 | %% =================================================================== 12 | 13 | -spec start(atom(), [any()]) -> {error, any()} | {ok, pid()} | {ok, pid(), any()}. 14 | start(_StartType, _StartArgs) -> 15 | lethink_sup:start_link(). 16 | 17 | -spec stop(any()) -> ok. 18 | stop(_State) -> 19 | ok. 20 | -------------------------------------------------------------------------------- /src/lethink_ast.erl: -------------------------------------------------------------------------------- 1 | -module(lethink_ast). 2 | 3 | -export([build_query/1]). 4 | 5 | % @private 6 | -export([db_create/2, 7 | db_drop/2, 8 | db_list/1, 9 | table_create/2, table_create/3, 10 | index_create/2, index_create/3, 11 | table_drop/2, 12 | table_list/1, 13 | db/2, 14 | row/1, 15 | get_field/2, 16 | table/2, table/3, 17 | insert/2, insert/3, 18 | get/2, 19 | between/3, 20 | between/4, 21 | filter/2, 22 | update/2, 23 | expr/1, expr/2, 24 | func/1, 25 | var/2, 26 | add/2, 27 | sub/2, 28 | mul/2, 29 | div_/2, 30 | mod/2, 31 | and_/2, 32 | or_/2, 33 | eq/2, 34 | ne/2, 35 | gt/2, 36 | ge/2, 37 | lt/2, 38 | le/2, 39 | not_/1]). 40 | 41 | -ifdef(TEST). 42 | -compile(export_all). 43 | -endif. 44 | 45 | -include("ql2_pb.hrl"). 46 | 47 | -type build_result() :: #term{} | {error, binary()}. 48 | 49 | %% @doc Build rethinkdb query from operation list 50 | -spec build_query([tuple()]) -> build_result(). 51 | build_query(QueryList) -> 52 | apply_seq(QueryList, []). 53 | 54 | %% @private 55 | %% @doc foldl-inspired poor man's monad 56 | -spec apply_seq([tuple()], [] | #term{} | {error, any()}) -> build_result(). 57 | apply_seq(_, {error, Reason}) -> 58 | {error, Reason}; 59 | apply_seq([T | Ts], Term) -> 60 | [Fun | Args] = tuple_to_list(erlang:append_element(T, Term)), 61 | apply_seq(Ts, apply(?MODULE, Fun, Args)); 62 | apply_seq([], Result) -> Result. 63 | 64 | -spec db_create(binary(), []) -> build_result(). 65 | db_create(Name, []) when is_binary(Name) -> 66 | #term{ 67 | type = 'DB_CREATE', 68 | args = expr(Name) 69 | }; 70 | db_create(Name, _) when is_list(Name) -> 71 | {error, <<"db_create name must be binary">>}; 72 | db_create(_, _) -> 73 | {error, <<"db_create stands alone">>}. 74 | 75 | -spec db_drop(binary(), []) -> build_result(). 76 | db_drop(Name, []) when is_binary(Name) -> 77 | #term{ 78 | type = 'DB_DROP', 79 | args = expr(Name) 80 | }. 81 | 82 | -spec db_list([]) -> build_result(). 83 | db_list([]) -> 84 | #term{ 85 | type = 'DB_LIST' 86 | }. 87 | 88 | -spec table_create(binary(), #term{} | []) -> build_result(). 89 | table_create(Name, Term) -> 90 | table_create(Name, [], Term). 91 | 92 | -spec table_create(binary(), [lethink:table_options()], #term{} | []) -> build_result(). 93 | table_create(Name, Options, []) when is_binary(Name) -> 94 | #term{ 95 | type = 'TABLE_CREATE', 96 | args = expr(Name), 97 | optargs = [ table_option_term(Opt) || Opt <- Options ] 98 | }; 99 | table_create(Name, Options, #term{ type = 'DB' } = Db) when is_binary(Name) -> 100 | #term{ 101 | type = 'TABLE_CREATE', 102 | args = [Db, expr(Name)], 103 | optargs = [ table_option_term(Opt) || Opt <- Options ] 104 | }. 105 | 106 | -spec index_create(binary(), #term{} | []) -> build_result(). 107 | index_create(Name, Term) -> 108 | index_create(Name, [], Term). 109 | 110 | -spec index_create(binary(), [lethink:table_options()], #term{} | []) -> build_result(). 111 | index_create(Name, Options, []) -> 112 | #term{ 113 | type = 'INDEX_CREATE', 114 | args = expr(Name), 115 | optargs = [ index_option_term(Opt) || Opt <- Options ] 116 | 117 | }; 118 | index_create(Name, Options, #term{ type = 'TABLE' } = Table) when is_binary(Name) -> 119 | #term{ 120 | type = 'INDEX_CREATE', 121 | args = [Table, expr(Name)], 122 | optargs = [ index_option_term(Opt) || Opt <- Options ] 123 | }. 124 | 125 | %% @private 126 | -spec table_option_term(lethink:table_options()) -> #term_assocpair{}. 127 | table_option_term({datacenter, Value}) when is_binary(Value) -> 128 | term_assocpair(atom_to_binary(datacenter, utf8), Value); 129 | table_option_term({primary_key, Value}) when is_binary(Value) -> 130 | term_assocpair(atom_to_binary(primary_key, utf8), Value); 131 | table_option_term({index, Value}) when is_binary(Value) -> 132 | term_assocpair(atom_to_binary(index, utf8), Value); 133 | table_option_term({cache_size, Value}) when is_integer(Value) -> 134 | term_assocpair(atom_to_binary(cache_size, utf8), Value). 135 | 136 | -spec index_option_term(lethink:index_options()) -> #term_assocpair{}. 137 | index_option_term(_Opt) -> 138 | %% TODO 139 | #term_assocpair{}. 140 | 141 | -spec table_drop(binary(), [] | #term{}) -> build_result(). 142 | table_drop(Name, []) when is_binary(Name) -> 143 | #term{ 144 | type = 'TABLE_DROP', 145 | args = expr(Name) 146 | }; 147 | table_drop(Name, #term{ type = 'DB' } = Db) when is_binary(Name) -> 148 | #term{ 149 | type = 'TABLE_DROP', 150 | args = [ Db, expr(Name) ] 151 | }. 152 | 153 | -spec table_list([] | #term{}) -> build_result(). 154 | table_list([]) -> 155 | #term{ 156 | type = 'TABLE_LIST' 157 | }; 158 | table_list(#term{ type = 'DB' } = Db) -> 159 | #term{ 160 | type = 'TABLE_LIST', 161 | args = Db 162 | }. 163 | 164 | %% @private 165 | %% @doc Specify a DB. Must be first operation in query list 166 | %% Optional if a default database has been specified via 167 | %% @see lethink:use/2 168 | -spec db(binary(), []) -> build_result(). 169 | db(Name, []) when is_binary(Name) -> 170 | #term { 171 | type = 'DB', 172 | args = expr(Name) 173 | }; 174 | db(Name, _) when is_list(Name) -> 175 | {error, <<"Db name must be binary">>}; 176 | db(_, _) -> 177 | {error, <<"Db must be first operation in query list">>}. 178 | 179 | -spec table(binary(), [] | #term{}) -> build_result(). 180 | table(Name, []) when is_binary(Name) -> 181 | table(Name, false, []); 182 | table(Name, #term{ type = 'DB' } = Db) when is_binary(Name) -> 183 | table(Name, false, Db); 184 | table(Name, _) when is_list(Name) -> 185 | {error, <<"Table name must be binary">>}; 186 | table(_, _) -> 187 | {error, <<"Table can either start or follow db operation">>}. 188 | 189 | -spec table(binary(), boolean(), [] | #term{}) -> build_result(). 190 | table(Name, UseOutdated, []) when is_binary(Name) -> 191 | #term { 192 | type = 'TABLE', 193 | args = expr(Name), 194 | optargs = [term_assocpair(<<"use_outdated">>, UseOutdated)] 195 | }; 196 | table(Name, UseOutdated, #term{ type = 'DB' } = Db) when is_binary(Name) -> 197 | #term { 198 | type = 'TABLE', 199 | args = [Db, expr(Name)], 200 | optargs = [term_assocpair(<<"use_outdated">>, UseOutdated)] 201 | }; 202 | table(Name, _, _) when is_list(Name) -> 203 | {error, <<"Table name must be binary">>}; 204 | table(_, _, _) -> 205 | {error, <<"Table can either start or follow db operation">>}. 206 | 207 | -spec insert(lethink:document(), #term{}) -> build_result(). 208 | insert(Data, #term{ type = 'TABLE' } = Table) -> 209 | #term { 210 | type = 'INSERT', 211 | args = [Table, expr(Data)] 212 | }; 213 | insert(_, _) -> 214 | {error, <<"insert must follow table operator">>}. 215 | 216 | -spec insert(lethink:document(), [lethink:insert_options()], #term{}) -> build_result(). 217 | insert(Data, Options, #term{ type = 'TABLE' } = Table) -> 218 | #term { 219 | type = 'INSERT', 220 | args = [Table, expr(Data)], 221 | optargs = [ insert_option_term(Opt) || Opt <- Options ] 222 | }; 223 | insert(_, _, _) -> 224 | {error, <<"insert must follow table operator">>}. 225 | 226 | %% @private 227 | -spec insert_option_term(lethink:insert_options()) -> #term_assocpair{}. 228 | insert_option_term({upsert, Value}) when is_binary(Value) -> 229 | term_assocpair(atom_to_binary(upsert, utf8), Value). 230 | 231 | -spec get(binary() | number(), #term{}) -> build_result(). 232 | get(Key, #term{ type = 'TABLE' } = Table) when is_binary(Key); is_number(Key) -> 233 | #term { 234 | type = 'GET', 235 | args = [Table, expr(Key)] 236 | }; 237 | get(Key, _) when is_list(Key) -> 238 | {error, <<"get key must be binary or number">>}; 239 | get(_, _) -> 240 | {error, <<"get must follow table operator">>}. 241 | 242 | -spec between(binary() | number(), binary() | number(), #term{}) -> build_result(). 243 | between(Value1, Value2, Term) -> 244 | between(Value1, Value2, [], Term). 245 | 246 | -spec between(binary() | number(), binary() | number(), [lethink:table_options()], #term{}) -> build_result(). 247 | between(Value1, Value2, Options, #term{ type = 'TABLE' } = Table) -> 248 | #term { 249 | type = 'BETWEEN', 250 | args = [Table, expr(Value1), expr(Value2)], 251 | optargs = [table_option_term(Opt) || Opt <- Options] 252 | }; 253 | between(_, _, _, _) -> 254 | {error, "between must follow table operator"}. 255 | 256 | -spec filter(lethink:document(), #term{}) -> build_result(). 257 | filter(Value, #term{ type = Type } = Selection) when 258 | Type == 'TABLE'; Type == 'GET'; 259 | Type == 'BETWEEN' -> 260 | #term { 261 | type = 'FILTER', 262 | args = [Selection, expr(Value)] 263 | }; 264 | filter(_, _) -> 265 | {error, "filter must follow table, get or between operator"}. 266 | 267 | -spec update(lethink:document() | fun(), #term{}) -> build_result(). 268 | update(Data, #term{ type = Type } = Selection) when 269 | Type == 'TABLE'; Type == 'GET'; 270 | Type == 'BETWEEN'; Type == 'FILTER' -> 271 | #term { 272 | type = 'UPDATE', 273 | args = [Selection, func_wrap(Data)] 274 | }. 275 | 276 | -spec row([]) -> build_result(). 277 | row([]) -> 278 | #term { 279 | type = 'IMPLICIT_VAR' 280 | }. 281 | 282 | -spec get_field(binary(), #term{}) -> build_result(). 283 | get_field(Attr, Term) -> 284 | #term { 285 | type = 'GET_FIELD', 286 | args = [Term, expr(Attr)] 287 | }. 288 | 289 | %% Math and Logic Operations 290 | 291 | -spec add(number() | binary(), #term{}) -> build_result(). 292 | add(Value, Term) when is_number(Value); is_binary(Value) -> 293 | #term { 294 | type = 'ADD', 295 | args = [Term, expr(Value)] 296 | }. 297 | 298 | -spec sub(number(), #term{}) -> build_result(). 299 | sub(Value, Term) when is_number(Value) -> 300 | #term { 301 | type = 'SUB', 302 | args = [Term, expr(Value)] 303 | }. 304 | 305 | -spec mul(number(), #term{}) -> build_result(). 306 | mul(Value, Term) when is_number(Value) -> 307 | #term { 308 | type = 'MUL', 309 | args = [Term, expr(Value)] 310 | }. 311 | 312 | -spec div_(number(), #term{}) -> build_result(). 313 | div_(Value, Term) when is_number(Value) -> 314 | #term { 315 | type = 'DIV', 316 | args = [Term, expr(Value)] 317 | }. 318 | 319 | -spec mod(number(), #term{}) -> build_result(). 320 | mod(Value, Term) when is_number(Value) -> 321 | #term { 322 | type = 'MOD', 323 | args = [Term, expr(Value)] 324 | }. 325 | 326 | -spec and_(boolean(), #term{}) -> build_result(). 327 | and_(Value, Term) when is_boolean(Value) -> 328 | #term { 329 | type = 'AND', 330 | args = [Term, expr(Value)] 331 | }. 332 | 333 | -spec or_(boolean(), #term{}) -> build_result(). 334 | or_(Value, Term) when is_boolean(Value) -> 335 | #term { 336 | type = 'OR', 337 | args = [Term, expr(Value)] 338 | }. 339 | 340 | -spec eq(lethink:json_term(), #term{}) -> build_result(). 341 | eq(Value, Term) -> 342 | #term { 343 | type = 'EQ', 344 | args = [Term, expr(Value)] 345 | }. 346 | 347 | -spec ne(lethink:json_term(), #term{}) -> build_result(). 348 | ne(Value, Term) -> 349 | #term { 350 | type = 'NE', 351 | args = [Term, expr(Value)] 352 | }. 353 | 354 | -spec gt(lethink:json_term(), #term{}) -> build_result(). 355 | gt(Value, Term) -> 356 | #term { 357 | type = 'GT', 358 | args = [Term, expr(Value)] 359 | }. 360 | 361 | -spec ge(lethink:json_term(), #term{}) -> build_result(). 362 | ge(Value, Term) -> 363 | #term { 364 | type = 'GE', 365 | args = [Term, expr(Value)] 366 | }. 367 | 368 | -spec lt(lethink:json_term(), #term{}) -> build_result(). 369 | lt(Value, Term) -> 370 | #term { 371 | type = 'LT', 372 | args = [Term, expr(Value)] 373 | }. 374 | 375 | -spec le(lethink:json_term(), #term{}) -> build_result(). 376 | le(Value, Term) -> 377 | #term { 378 | type = 'LE', 379 | args = [Term, expr(Value)] 380 | }. 381 | 382 | -spec not_(#term{}) -> build_result(). 383 | not_(Term) -> 384 | #term { 385 | type = 'NOT', 386 | args = [Term] 387 | }. 388 | 389 | -spec expr(lethink:json_term(), []) -> build_result(). 390 | expr(Data, []) -> 391 | expr(Data). 392 | 393 | -spec expr(lethink:keyvalue() | lethink:json_term() | fun() | #term{} | #term_assocpair{}) -> #term{} | #term_assocpair{}. 394 | expr(Item = #term{}) -> 395 | Item; 396 | expr(Item = #term_assocpair{}) -> 397 | Item; 398 | expr({Items}) when is_list(Items) -> 399 | #term { 400 | type = 'MAKE_OBJ', 401 | optargs = [ expr({K, V}) || {K, V} <- Items ] 402 | }; 403 | expr({Key, Value}) -> 404 | term_assocpair(Key, Value); 405 | expr(Items) when is_list(Items) -> 406 | case lists:all(fun is_json/1, Items) of 407 | true -> make_array(Items); 408 | false -> build_query(Items) 409 | end; 410 | expr(Func) when is_function(Func) -> 411 | func(Func); 412 | expr(Value) -> 413 | #term { 414 | type = 'DATUM', 415 | datum = datum(Value) 416 | }. 417 | 418 | make_array(Items) when is_list(Items) -> 419 | #term { 420 | type = 'MAKE_ARRAY', 421 | args = [ expr(I) || I <- Items ] 422 | }. 423 | 424 | % @private 425 | % @doc create Datums from the four basic types. Arrays and objects 426 | % are created via MAKE_ARRAY and MAKE_OBJ on the server since it's 427 | % cheaper that way. 428 | -spec datum(null | boolean() | number() | binary()) -> #datum{}. 429 | datum(null) -> 430 | #datum { 431 | type = 'R_NULL' 432 | }; 433 | 434 | datum(V) when is_boolean(V) -> 435 | #datum { 436 | type = 'R_BOOL', 437 | r_bool = V 438 | }; 439 | 440 | datum(V) when is_number(V) -> 441 | #datum { 442 | type = 'R_NUM', 443 | r_num = V 444 | }; 445 | 446 | datum(V) when is_binary(V) -> 447 | #datum { 448 | type = 'R_STR', 449 | r_str = V 450 | }. 451 | 452 | -spec var(integer(), []) -> #term{}. 453 | var(N, []) -> 454 | #term { 455 | type = 'VAR', 456 | args = expr(N) 457 | }. 458 | 459 | -spec func(fun()) -> #term{}. 460 | func(Func) -> 461 | {_, Arity} = erlang:fun_info(Func, arity), 462 | Args = [ {var, N} || N <- lists:seq(1, Arity)], 463 | #term { 464 | type = 'FUNC', 465 | args = [make_array(lists:seq(1, Arity)), expr(apply(Func, Args))] 466 | }. 467 | 468 | -spec term_assocpair(binary(), any()) -> #term_assocpair{}. 469 | term_assocpair(Key, Value) -> 470 | #term_assocpair { 471 | key = Key, 472 | val = expr(Value) 473 | }. 474 | 475 | func_wrap(Data) -> 476 | Value = expr(Data), 477 | case ivar_scan(Value) of 478 | true -> func(fun(_X) -> Value end); 479 | false -> Value 480 | end. 481 | 482 | % Scan for IMPLICIT_VAR or JS 483 | -spec ivar_scan(any()) -> boolean(). 484 | ivar_scan(#term{ type = 'IMPLICIT_VAR' }) -> 485 | true; 486 | ivar_scan(#term{ args = Args, optargs = OptArgs }) -> 487 | case is_list(Args) of 488 | true -> lists:any(fun ivar_scan/1, Args); 489 | false -> ivar_scan(Args) 490 | end 491 | orelse 492 | case is_list(OptArgs) of 493 | true -> lists:any(fun ivar_scan/1, OptArgs); 494 | false -> ivar_scan(Args) 495 | end; 496 | ivar_scan(#term_assocpair{ val = Val }) -> 497 | ivar_scan(Val); 498 | ivar_scan(_) -> 499 | false. 500 | 501 | -spec is_json(any()) -> boolean(). 502 | is_json(null) -> true; 503 | is_json(Item) when is_boolean(Item) -> true; 504 | is_json(Item) when is_number(Item) -> true; 505 | is_json(Item) when is_binary(Item) -> true; 506 | is_json({List}) when is_list(List) -> true; 507 | is_json({Key, _Value}) when is_binary(Key) -> true; 508 | is_json(_) -> false. 509 | 510 | %% lethink:query(test, [{table, <<"marvel">>}, {update, {[{<<"age">>, [{row}, {get_field, <<"age">>}, {add, 1}]}]}}]) 511 | %% r.table('marvel').update(lambda x: {'age': x['age'] + 1})) 512 | -------------------------------------------------------------------------------- /src/lethink_server.erl: -------------------------------------------------------------------------------- 1 | %% Based off of bank_server by: 2 | %% Copyright (c) 2012, Loïc Hoguin 3 | %% 4 | %% Permission to use, copy, modify, and/or distribute this software for any 5 | %% purpose with or without fee is hereby granted, provided that the above 6 | %% copyright notice and this permission notice appear in all copies. 7 | %% 8 | %% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 | %% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 | %% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 | %% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 | %% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 | %% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 | %% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 | 16 | %% @private 17 | -module(lethink_server). 18 | -behavior(gen_server). 19 | 20 | %% API. 21 | -export([start_link/0]). 22 | -export([stop/0]). 23 | -export([add_pool/1]). 24 | -export([remove_pool/1]). 25 | -export([add_worker/2]). 26 | -export([get_worker/1]). 27 | -export([get_all_workers/1]). 28 | 29 | %% gen_server. 30 | -export([init/1]). 31 | -export([handle_call/3]). 32 | -export([handle_cast/2]). 33 | -export([handle_info/2]). 34 | -export([terminate/2]). 35 | -export([code_change/3]). 36 | 37 | -record(state, { 38 | pools = [] :: [any()] 39 | }). 40 | 41 | -define(TAB, ?MODULE). 42 | 43 | %% API. 44 | 45 | -spec start_link() -> {ok, pid()}. 46 | start_link() -> 47 | gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). 48 | 49 | -spec stop() -> stopped. 50 | stop() -> 51 | gen_server:call(?MODULE, stop). 52 | 53 | -spec add_pool(any()) -> ok. 54 | add_pool(Ref) -> 55 | gen_server:cast(?MODULE, {add_pool, Ref}). 56 | 57 | -spec remove_pool(any()) -> ok. 58 | remove_pool(Ref) -> 59 | gen_server:cast(?MODULE, {remove_pool, Ref}). 60 | 61 | -spec add_worker(any(), pid()) -> ok. 62 | add_worker(Ref, Pid) -> 63 | gen_server:cast(?MODULE, {add_worker, Ref, Pid}). 64 | 65 | -spec get_worker(any()) -> pid(). 66 | get_worker(Ref) -> 67 | Workers = get_all_workers(Ref), 68 | {_, _, Micro} = erlang:now(), 69 | Random = 1 + Micro rem length(Workers), 70 | lists:nth(Random, Workers). 71 | 72 | -spec get_all_workers(any()) -> [pid()]. 73 | get_all_workers(Ref) -> 74 | ets:lookup_element(?TAB, {pool, Ref}, 2). 75 | 76 | %% gen_server. 77 | 78 | -spec init([]) -> {ok, #state{}}. 79 | init([]) -> 80 | {ok, #state{}}. 81 | 82 | -spec handle_call(any(), {pid(), any()}, #state{}) -> {stop, normal, stopped, #state{}} | 83 | {reply, ignored, #state{}}. 84 | handle_call(stop, _From, State) -> 85 | {stop, normal, stopped, State}; 86 | handle_call(_Request, _From, State) -> 87 | {reply, ignored, State}. 88 | 89 | -spec handle_cast(any(), #state{}) -> {noreply, #state{}}. 90 | handle_cast({add_pool, Ref}, State=#state{pools=Pools}) -> 91 | true = ets:insert_new(?TAB, {{pool, Ref}, []}), 92 | {noreply, State#state{pools=[Ref|Pools]}}; 93 | handle_cast({remove_pool, Ref}, State=#state{pools=Pools}) -> 94 | true = ets:delete(?TAB, {pool, Ref}), 95 | {noreply, State#state{pools=lists:delete(Ref, Pools)}}; 96 | handle_cast({add_worker, Ref, Pid}, State) -> 97 | Workers = ets:lookup_element(?TAB, {pool, Ref}, 2), 98 | true = ets:insert(?TAB, {{pool, Ref}, [Pid|Workers]}), 99 | _ = erlang:monitor(process, Pid), 100 | {noreply, State}; 101 | handle_cast(_Request, State) -> 102 | {noreply, State}. 103 | 104 | -spec handle_info(any(), #state{}) -> {noreply, #state{}}. 105 | handle_info({'DOWN', _, process, Pid, _}, State=#state{pools=Pools}) -> 106 | _ = [begin 107 | Workers = ets:lookup_element(?TAB, {pool, Ref}, 2), 108 | case lists:member(Pid, Workers) of 109 | false -> 110 | false; 111 | true -> 112 | true = ets:insert(?TAB, {{pool, Ref}, 113 | lists:delete(Pid, Workers)}) 114 | end 115 | end || Ref <- Pools], 116 | {noreply, State}; 117 | handle_info(_Info, State) -> 118 | {noreply, State}. 119 | 120 | -spec terminate(any(), #state{}) -> ok. 121 | terminate(_Reason, _State) -> 122 | ok. 123 | 124 | -spec code_change(any(), #state{}, any()) -> {ok, #state{}}. 125 | code_change(_OldVsn, State, _Extra) -> 126 | {ok, State}. 127 | -------------------------------------------------------------------------------- /src/lethink_sup.erl: -------------------------------------------------------------------------------- 1 | %% @private 2 | -module(lethink_sup). 3 | 4 | -behaviour(supervisor). 5 | 6 | %% API 7 | -export([start_link/0]). 8 | 9 | %% Supervisor callbacks 10 | -export([init/1]). 11 | 12 | %% Helper macro for declaring children of supervisor 13 | -define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}). 14 | 15 | %% =================================================================== 16 | %% API functions 17 | %% =================================================================== 18 | 19 | -spec start_link() -> any(). 20 | start_link() -> 21 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 22 | 23 | %% =================================================================== 24 | %% Supervisor callbacks 25 | %% =================================================================== 26 | 27 | -spec init([]) -> {ok,{{supervisor:strategy(),non_neg_integer(), non_neg_integer()},[supervisor:child_spec()]}}. 28 | init([]) -> 29 | lethink_server = ets:new(lethink_server, [ 30 | ordered_set, public, named_table, {read_concurrency, true}]), 31 | Procs = [ 32 | {lethink_server, {lethink_server, start_link, []}, 33 | permanent, 5000, worker, [lethink_server]} 34 | ], 35 | {ok, {{one_for_one, 10, 10}, Procs}}. 36 | -------------------------------------------------------------------------------- /src/lethink_worker.erl: -------------------------------------------------------------------------------- 1 | %% @private 2 | -module(lethink_worker). 3 | 4 | -export([start_link/2, 5 | use/2, 6 | query/2]). 7 | 8 | -export([init/1, 9 | handle_call/3, 10 | handle_cast/2, 11 | handle_info/2, 12 | terminate/2, 13 | code_change/3]). 14 | 15 | -include("ql2_pb.hrl"). 16 | 17 | -record(state, { 18 | socket :: port(), 19 | database :: binary(), 20 | token = 1 :: pos_integer() 21 | }). 22 | 23 | -define(RETHINKDB_VERSION, 16#723081e1). % magic number from ql2.proto 24 | 25 | -spec start_link(any(), [lethink:connect_options()]) -> any(). 26 | start_link(Ref, Opts) -> 27 | {ok, Pid} = gen_server:start_link(?MODULE, [Opts], []), 28 | lethink_server:add_worker(Ref, Pid), 29 | {ok, Pid}. 30 | 31 | -spec use(pid(), binary()) -> ok. 32 | use(Pid, Name) when is_binary(Name) -> 33 | gen_server:cast(Pid, {use, Name}). 34 | 35 | -spec query(pid(), #term{}) -> lethink:response(). 36 | query(Pid, Query) -> 37 | Timeout = application:get_env(lethink, timeout, 30000), 38 | gen_server:call(Pid, {query, Query}, Timeout). 39 | 40 | -spec init([[{atom, any()}]]) -> {ok, #state{}}. 41 | init([Opts]) -> 42 | Host = proplists:get_value(host, Opts, "127.0.0.1"), 43 | Port = proplists:get_value(port, Opts, 28015), 44 | Database = proplists:get_value(database, Opts, <<"test">>), 45 | AuthKey = proplists:get_value(auth_key, Opts, <<>>), 46 | {ok, Socket} = gen_tcp:connect(Host, Port, [binary, {packet, 0}, {active, false}]), 47 | ok = login(AuthKey, Socket), 48 | State = #state{ 49 | socket = Socket, 50 | database = unicode:characters_to_binary(Database) 51 | }, 52 | {ok, State}. 53 | 54 | -spec handle_call(tuple(), pid(), #state{}) -> {reply, ok | lethink:response(), #state{}}. 55 | handle_call({query, Term}, _From, State) -> 56 | Query = #query { 57 | type = 'START', 58 | query = Term, 59 | token = State#state.token, 60 | global_optargs = [ql2_util:global_db(State#state.database)] 61 | }, 62 | Reply = send_and_recv(Query, State#state.socket), 63 | {reply, Reply, State#state{ token = State#state.token + 1 }}; 64 | 65 | handle_call(_Message, _From, State) -> 66 | {reply, ok, State}. 67 | 68 | -spec handle_cast(any(), #state{}) -> {noreply, #state{}}. 69 | handle_cast({use, Name}, State) -> 70 | {noreply, State#state{database = unicode:characters_to_binary(Name)}}; 71 | 72 | handle_cast(_Message, State) -> 73 | {noreply, State}. 74 | 75 | -spec handle_info(any(), #state{}) -> {noreply, #state{}}. 76 | handle_info(Info, State) -> 77 | io:fwrite("Info: ~p", [Info]), 78 | {noreply, State}. 79 | 80 | -spec terminate(any(), #state{}) -> ok. 81 | terminate(Reason, State) -> 82 | io:fwrite("terminating: ~p", [Reason]), 83 | gen_tcp:close(State#state.socket), 84 | ok. 85 | 86 | -spec code_change(any(), #state{}, any()) -> {ok, #state{}}. 87 | code_change(_OldVsn, State, _Extra) -> 88 | {ok, State}. 89 | 90 | -spec send_and_recv(#query{}, port()) -> lethink:response(). 91 | send_and_recv(Query, Socket) -> 92 | send(Query, Socket), 93 | Response = recv(Socket), 94 | handle_response(ql2_pb:decode_response(Response)). 95 | 96 | -spec send(#query{}, port()) -> any(). 97 | send(Query, Socket) -> 98 | Iolist = ql2_pb:encode_query(Query), 99 | Length = iolist_size(Iolist), 100 | gen_tcp:send(Socket, [<>, Iolist]). 101 | 102 | -spec recv(port()) -> any(). 103 | recv(Socket) -> 104 | {ok, ResponseLength} = gen_tcp:recv(Socket, 4), 105 | {ok, Response} = gen_tcp:recv(Socket, binary:decode_unsigned(ResponseLength, little)), 106 | Response. 107 | 108 | -spec handle_response(#response{}) -> lethink:response(). 109 | handle_response(#response{ type = 'SUCCESS_ATOM', response = [Datum]}) -> 110 | {ok, ql2_util:datum_value(Datum)}; 111 | handle_response(#response{ type = 'SUCCESS_SEQUENCE', response = Data}) -> 112 | {ok, lists:map(fun ql2_util:datum_value/1, Data)}; 113 | handle_response(#response{ type = 'SUCCESS_PARTIAL', response = [Datum]}) -> 114 | {ok, ql2_util:datum_value(Datum)}; 115 | 116 | handle_response(#response{ type = 'CLIENT_ERROR', response = [Datum]} = Response) -> 117 | ErrorMsg = ql2_util:datum_value(Datum), 118 | {error, ErrorMsg, Response#response.type, Response#response.backtrace}; 119 | handle_response(#response{ type = 'COMPILE_ERROR', response = [Datum]} = Response) -> 120 | ErrorMsg = ql2_util:datum_value(Datum), 121 | {error, ErrorMsg, Response#response.type, Response#response.backtrace}; 122 | handle_response(#response{ type = 'RUNTIME_ERROR', response = [Datum]} = Response) -> 123 | ErrorMsg = ql2_util:datum_value(Datum), 124 | {error, ErrorMsg, Response#response.type, Response#response.backtrace}. 125 | 126 | -spec login(binary(), port()) -> ok | {error, binary()}. 127 | login(AuthKey, Socket) -> 128 | KeyLength = iolist_size(AuthKey), 129 | ok = gen_tcp:send(Socket, binary:encode_unsigned(?RETHINKDB_VERSION, little)), 130 | ok = gen_tcp:send(Socket, [<>, AuthKey]), 131 | {ok, Response} = read_until_null(Socket), 132 | case Response == <<"SUCCESS",0>> of 133 | true -> ok; 134 | false -> 135 | io:fwrite("Error: ~s~n", [Response]), 136 | {error, Response} 137 | end. 138 | 139 | -spec read_until_null(port()) -> {ok, binary()}. 140 | read_until_null(Socket) -> 141 | read_until_null(Socket, []). 142 | 143 | -spec read_until_null(port(), list()) -> {ok, binary()}. 144 | read_until_null(Socket, Acc) -> 145 | {ok, Response} = gen_tcp:recv(Socket, 0), 146 | Result = [Acc, Response], 147 | case is_null_terminated(Response) of 148 | true -> {ok, iolist_to_binary(Result)}; 149 | false -> read_until_null(Socket, Result) 150 | end. 151 | 152 | -spec is_null_terminated(binary()) -> boolean(). 153 | is_null_terminated(B) -> 154 | binary:at(B, iolist_size(B) - 1) == 0. 155 | -------------------------------------------------------------------------------- /src/lethink_workers_sup.erl: -------------------------------------------------------------------------------- 1 | %% Based off of bank_workers_sup by: 2 | %% Copyright (c) 2012, Loïc Hoguin 3 | %% 4 | %% Permission to use, copy, modify, and/or distribute this software for any 5 | %% purpose with or without fee is hereby granted, provided that the above 6 | %% copyright notice and this permission notice appear in all copies. 7 | %% 8 | %% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 | %% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 | %% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 | %% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 | %% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 | %% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 | %% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 | 16 | %% @private 17 | -module(lethink_workers_sup). 18 | -behaviour(supervisor). 19 | 20 | %% API. 21 | -export([start_link/0]). 22 | 23 | %% supervisor. 24 | -export([init/1]). 25 | 26 | %% API. 27 | 28 | -spec start_link() -> {ok, pid()}. 29 | start_link() -> 30 | supervisor:start_link(?MODULE, []). 31 | 32 | %% supervisor. 33 | 34 | -spec init([]) -> {ok,{{supervisor:strategy(),non_neg_integer(), non_neg_integer()},[supervisor:child_spec()]}}. 35 | init([]) -> 36 | Procs = [ 37 | {lethink_worker, {lethink_worker, start_link, []}, 38 | transient, 5000, worker, [lethink_worker]} 39 | ], 40 | {ok, {{simple_one_for_one, 1000, 10}, Procs}}. 41 | -------------------------------------------------------------------------------- /src/ql2.proto: -------------------------------------------------------------------------------- 1 | //////////////////////////////////////////////////////////////////////////////// 2 | // THE HIGH-LEVEL VIEW // 3 | //////////////////////////////////////////////////////////////////////////////// 4 | 5 | // Process: When you first open a connection, send the magic number 6 | // for the version of the protobuf you're targetting (in the [Version] 7 | // enum). This should **NOT** be sent as a protobuf; just send the 8 | // little-endian 32-bit integer over the wire raw. This number should 9 | // only be sent once per connection. 10 | 11 | // The magic number shall be followed by an authorization key. The 12 | // first 4 bytes are the length of the key to be sent as a little-endian 13 | // 32-bit integer, followed by the key string. Even if there is no key, 14 | // an empty string should be sent (length 0 and no data). The server will 15 | // then respond with a NULL-terminated string response. "SUCCESS" indicates 16 | // that the connection has been accepted. Any other response indicates an 17 | // error, and the response string should describe the error. 18 | 19 | // Next, for each query you want to send, construct a [Query] protobuf 20 | // and serialize it to a binary blob. Send the blob's size to the 21 | // server encoded as a little-endian 32-bit integer, followed by the 22 | // blob itself. You will recieve a [Response] protobuf back preceded 23 | // by its own size, once again encoded as a little-endian 32-bit 24 | // integer. You can see an example exchange below in **EXAMPLE**. 25 | 26 | // A query consists of a [Term] to evaluate and a unique-per-connection 27 | // [token]. 28 | 29 | // Tokens are used for two things: 30 | // * Keeping track of which responses correspond to which queries. 31 | // * Batched queries. Some queries return lots of results, so we send back 32 | // batches of <1000, and you need to send a [CONTINUE] query with the same 33 | // token to get more results from the original query. 34 | //////////////////////////////////////////////////////////////////////////////// 35 | 36 | // This enum contains the magic numbers for your version. See **THE HIGH-LEVEL 37 | // VIEW** for what to do with it. 38 | message VersionDummy { // We need to wrap it like this for some 39 | // non-conforming protobuf libraries 40 | enum Version { 41 | V0_1 = 0x3f61ba36; 42 | V0_2 = 0x723081e1; 43 | } 44 | } 45 | 46 | // You send one of: 47 | // * A [START] query with a [Term] to evaluate and a unique-per-connection token. 48 | // * A [CONTINUE] query with the same token as a [START] query that returned 49 | // [SUCCESS_PARTIAL] in its [Response]. 50 | // * A [STOP] query with the same token as a [START] query that you want to stop. 51 | // * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers 52 | // with a [WAIT_COMPLETE] [Response]. 53 | message Query { 54 | enum QueryType { 55 | START = 1; // Start a new query. 56 | CONTINUE = 2; // Continue a query that returned [SUCCESS_PARTIAL] 57 | // (see [Response]). 58 | STOP = 3; // Stop a query partway through executing. 59 | NOREPLY_WAIT = 4; 60 | // Wait for noreply operations to finish. 61 | } 62 | optional QueryType type = 1; 63 | // A [Term] is how we represent the operations we want a query to perform. 64 | optional Term query = 2; // only present when [type] = [START] 65 | optional int64 token = 3; 66 | // This flag is ignored on the server. `noreply` should be added 67 | // to `global_optargs` instead (the key "noreply" should map to 68 | // either true or false). 69 | optional bool OBSOLETE_noreply = 4 [default = false]; 70 | 71 | // If this is set to [true], then [Datum] values will sometimes be 72 | // of [DatumType] [R_JSON] (see below). This can provide enormous 73 | // speedups in languages with poor protobuf libraries. 74 | optional bool accepts_r_json = 5 [default = false]; 75 | 76 | message AssocPair { 77 | optional string key = 1; 78 | optional Term val = 2; 79 | } 80 | repeated AssocPair global_optargs = 6; 81 | } 82 | 83 | // A backtrace frame (see `backtrace` in Response below) 84 | message Frame { 85 | enum FrameType { 86 | POS = 1; // Error occured in a positional argument. 87 | OPT = 2; // Error occured in an optional argument. 88 | } 89 | optional FrameType type = 1; 90 | optional int64 pos = 2; // The index of the positional argument. 91 | optional string opt = 3; // The name of the optional argument. 92 | } 93 | message Backtrace { 94 | repeated Frame frames = 1; 95 | } 96 | 97 | // You get back a response with the same [token] as your query. 98 | message Response { 99 | enum ResponseType { 100 | // These response types indicate success. 101 | SUCCESS_ATOM = 1; // Query returned a single RQL datatype. 102 | SUCCESS_SEQUENCE = 2; // Query returned a sequence of RQL datatypes. 103 | SUCCESS_PARTIAL = 3; // Query returned a partial sequence of RQL 104 | // datatypes. If you send a [CONTINUE] query with 105 | // the same token as this response, you will get 106 | // more of the sequence. Keep sending [CONTINUE] 107 | // queries until you get back [SUCCESS_SEQUENCE]. 108 | WAIT_COMPLETE = 4; // A [NOREPLY_WAIT] query completed. 109 | 110 | // These response types indicate failure. 111 | CLIENT_ERROR = 16; // Means the client is buggy. An example is if the 112 | // client sends a malformed protobuf, or tries to 113 | // send [CONTINUE] for an unknown token. 114 | COMPILE_ERROR = 17; // Means the query failed during parsing or type 115 | // checking. For example, if you pass too many 116 | // arguments to a function. 117 | RUNTIME_ERROR = 18; // Means the query failed at runtime. An example is 118 | // if you add together two values from a table, but 119 | // they turn out at runtime to be booleans rather 120 | // than numbers. 121 | } 122 | optional ResponseType type = 1; 123 | optional int64 token = 2; // Indicates what [Query] this response corresponds to. 124 | 125 | // [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM], or many RQL 126 | // data if [type] is [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. It contains 1 127 | // error message (of type [R_STR]) in all other cases. 128 | repeated Datum response = 3; 129 | 130 | // If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a 131 | // backtrace will be provided. The backtrace says where in the query the 132 | // error occured. Ideally this information will be presented to the user as 133 | // a pretty-printed version of their query with the erroneous section 134 | // underlined. A backtrace is a series of 0 or more [Frame]s, each of which 135 | // specifies either the index of a positional argument or the name of an 136 | // optional argument. (Those words will make more sense if you look at the 137 | // [Term] message below.) 138 | optional Backtrace backtrace = 4; // Contains n [Frame]s when you get back an error. 139 | 140 | // If the [global_optargs] in the [Query] that this [Response] is a 141 | // response to contains a key "profile" which maps to a static value of 142 | // true then [profile] will contain a [Datum] which provides profiling 143 | // information about the execution of the query. This field should be 144 | // returned to the user along with the result that would normally be 145 | // returned (a datum or a cursor). In official drivers this is accomplished 146 | // by putting them inside of an object with "value" mapping to the return 147 | // value and "profile" mapping to the profile object. 148 | optional Datum profile = 5; 149 | } 150 | 151 | // A [Datum] is a chunk of data that can be serialized to disk or returned to 152 | // the user in a Response. Currently we only support JSON types, but we may 153 | // support other types in the future (e.g., a date type or an integer type). 154 | message Datum { 155 | enum DatumType { 156 | R_NULL = 1; 157 | R_BOOL = 2; 158 | R_NUM = 3; // a double 159 | R_STR = 4; 160 | R_ARRAY = 5; 161 | R_OBJECT = 6; 162 | // This [DatumType] will only be used if [accepts_r_json] is 163 | // set to [true] in [Query]. [r_str] will be filled with a 164 | // JSON encoding of the [Datum]. 165 | R_JSON = 7; // uses r_str 166 | } 167 | optional DatumType type = 1; 168 | optional bool r_bool = 2; 169 | optional double r_num = 3; 170 | optional string r_str = 4; 171 | 172 | repeated Datum r_array = 5; 173 | message AssocPair { 174 | optional string key = 1; 175 | optional Datum val = 2; 176 | } 177 | repeated AssocPair r_object = 6; 178 | 179 | extensions 10000 to 20000; 180 | } 181 | 182 | // A [Term] is either a piece of data (see **Datum** above), or an operator and 183 | // its operands. If you have a [Datum], it's stored in the member [datum]. If 184 | // you have an operator, its positional arguments are stored in [args] and its 185 | // optional arguments are stored in [optargs]. 186 | // 187 | // A note about type signatures: 188 | // We use the following notation to denote types: 189 | // arg1_type, arg2_type, argrest_type... -> result_type 190 | // So, for example, if we have a function `avg` that takes any number of 191 | // arguments and averages them, we might write: 192 | // NUMBER... -> NUMBER 193 | // Or if we had a function that took one number modulo another: 194 | // NUMBER, NUMBER -> NUMBER 195 | // Or a function that takes a table and a primary key of any Datum type, then 196 | // retrieves the entry with that primary key: 197 | // Table, DATUM -> OBJECT 198 | // Some arguments must be provided as literal values (and not the results of sub 199 | // terms). These are marked with a `!`. 200 | // Optional arguments are specified within curly braces as argname `:` value 201 | // type (e.x `{use_outdated:BOOL}`) 202 | // Many RQL operations are polymorphic. For these, alterantive type signatures 203 | // are separated by `|`. 204 | // 205 | // The RQL type hierarchy is as follows: 206 | // Top 207 | // DATUM 208 | // NULL 209 | // BOOL 210 | // NUMBER 211 | // STRING 212 | // OBJECT 213 | // SingleSelection 214 | // ARRAY 215 | // Sequence 216 | // ARRAY 217 | // Stream 218 | // StreamSelection 219 | // Table 220 | // Database 221 | // Function 222 | // Ordering - used only by ORDER_BY 223 | // Pathspec -- an object, string, or array that specifies a path 224 | // Error 225 | message Term { 226 | enum TermType { 227 | // A RQL datum, stored in `datum` below. 228 | DATUM = 1; 229 | 230 | MAKE_ARRAY = 2; // DATUM... -> ARRAY 231 | // Evaluate the terms in [optargs] and make an object 232 | MAKE_OBJ = 3; // {...} -> OBJECT 233 | 234 | // * Compound types 235 | 236 | // Takes an integer representing a variable and returns the value stored 237 | // in that variable. It's the responsibility of the client to translate 238 | // from their local representation of a variable to a unique _non-negative_ 239 | // integer for that variable. (We do it this way instead of letting 240 | // clients provide variable names as strings to discourage 241 | // variable-capturing client libraries, and because it's more efficient 242 | // on the wire.) 243 | VAR = 10; // !NUMBER -> DATUM 244 | // Takes some javascript code and executes it. 245 | JAVASCRIPT = 11; // STRING {timeout: !NUMBER} -> DATUM | 246 | // STRING {timeout: !NUMBER} -> Function(*) 247 | 248 | // Takes a string and throws an error with that message. 249 | // Inside of a `default` block, you can omit the first 250 | // argument to rethrow whatever error you catch (this is most 251 | // useful as an argument to the `default` filter optarg). 252 | ERROR = 12; // STRING -> Error | -> Error 253 | // Takes nothing and returns a reference to the implicit variable. 254 | IMPLICIT_VAR = 13; // -> DATUM 255 | 256 | // * Data Operators 257 | // Returns a reference to a database. 258 | DB = 14; // STRING -> Database 259 | // Returns a reference to a table. 260 | TABLE = 15; // Database, STRING, {use_outdated:BOOL} -> Table | STRING, {use_outdated:BOOL} -> Table 261 | // Gets a single element from a table by its primary or a secondary key. 262 | GET = 16; // Table, STRING -> SingleSelection | Table, NUMBER -> SingleSelection | 263 | // Table, STRING -> NULL | Table, NUMBER -> NULL | 264 | GET_ALL = 78; // Table, DATUM..., {index:!STRING} => ARRAY 265 | 266 | // Simple DATUM Ops 267 | EQ = 17; // DATUM... -> BOOL 268 | NE = 18; // DATUM... -> BOOL 269 | LT = 19; // DATUM... -> BOOL 270 | LE = 20; // DATUM... -> BOOL 271 | GT = 21; // DATUM... -> BOOL 272 | GE = 22; // DATUM... -> BOOL 273 | NOT = 23; // BOOL -> BOOL 274 | // ADD can either add two numbers or concatenate two arrays. 275 | ADD = 24; // NUMBER... -> NUMBER | STRING... -> STRING 276 | SUB = 25; // NUMBER... -> NUMBER 277 | MUL = 26; // NUMBER... -> NUMBER 278 | DIV = 27; // NUMBER... -> NUMBER 279 | MOD = 28; // NUMBER, NUMBER -> NUMBER 280 | 281 | // DATUM Array Ops 282 | // Append a single element to the end of an array (like `snoc`). 283 | APPEND = 29; // ARRAY, DATUM -> ARRAY 284 | // Prepend a single element to the end of an array (like `cons`). 285 | PREPEND = 80; // ARRAY, DATUM -> ARRAY 286 | //Remove the elements of one array from another array. 287 | DIFFERENCE = 95; // ARRAY, ARRAY -> ARRAY 288 | 289 | // DATUM Set Ops 290 | // Set ops work on arrays. They don't use actual sets and thus have 291 | // performance characteristics you would expect from arrays rather than 292 | // from sets. All set operations have the post condition that they 293 | // array they return contains no duplicate values. 294 | SET_INSERT = 88; // ARRAY, DATUM -> ARRAY 295 | SET_INTERSECTION = 89; // ARRAY, ARRAY -> ARRAY 296 | SET_UNION = 90; // ARRAY, ARRAY -> ARRAY 297 | SET_DIFFERENCE = 91; // ARRAY, ARRAY -> ARRAY 298 | 299 | SLICE = 30; // Sequence, NUMBER, NUMBER -> Sequence 300 | SKIP = 70; // Sequence, NUMBER -> Sequence 301 | LIMIT = 71; // Sequence, NUMBER -> Sequence 302 | INDEXES_OF = 87; // Sequence, DATUM -> Sequence | Sequence, Function(1) -> Sequence 303 | CONTAINS = 93; // Sequence, DATUM -> BOOL | Sequence, Function(1) -> BOOL 304 | 305 | // Stream/Object Ops 306 | // Get a particular field from an object, or map that over a 307 | // sequence. 308 | GET_FIELD = 31; // OBJECT, STRING -> DATUM 309 | // | Sequence, STRING -> Sequence 310 | // Return an array containing the keys of the object. 311 | KEYS = 94; // OBJECT -> ARRAY 312 | // Check whether an object contains all the specified fields, 313 | // or filters a sequence so that all objects inside of it 314 | // contain all the specified fields. 315 | HAS_FIELDS = 32; // OBJECT, Pathspec... -> BOOL 316 | // x.with_fields(...) <=> x.has_fields(...).pluck(...) 317 | WITH_FIELDS = 96; // Sequence, Pathspec... -> Sequence 318 | // Get a subset of an object by selecting some attributes to preserve, 319 | // or map that over a sequence. (Both pick and pluck, polymorphic.) 320 | PLUCK = 33; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT 321 | // Get a subset of an object by selecting some attributes to discard, or 322 | // map that over a sequence. (Both unpick and without, polymorphic.) 323 | WITHOUT = 34; // Sequence, Pathspec... -> Sequence | OBJECT, Pathspec... -> OBJECT 324 | // Merge objects (right-preferential) 325 | MERGE = 35; // OBJECT... -> OBJECT | Sequence -> Sequence 326 | 327 | // Sequence Ops 328 | // Get all elements of a sequence between two values. 329 | // Half-open by default, but the openness of either side can be 330 | // changed by passing 'closed' or 'open for `right_bound` or 331 | // `left_bound`. 332 | BETWEEN = 36; // StreamSelection, DATUM, DATUM, {index:!STRING, right_bound:STRING, left_bound:STRING} -> StreamSelection 333 | REDUCE = 37; // Sequence, Function(2), {base:DATUM} -> DATUM 334 | MAP = 38; // Sequence, Function(1) -> Sequence 335 | 336 | // Filter a sequence with either a function or a shortcut 337 | // object (see API docs for details). The body of FILTER is 338 | // wrapped in an implicit `.default(false)`, and you can 339 | // change the default value by specifying the `default` 340 | // optarg. If you make the default `r.error`, all errors 341 | // caught by `default` will be rethrown as if the `default` 342 | // did not exist. 343 | FILTER = 39; // Sequence, Function(1), {default:DATUM} -> Sequence | 344 | // Sequence, OBJECT, {default:DATUM} -> Sequence 345 | // Map a function over a sequence and then concatenate the results together. 346 | CONCATMAP = 40; // Sequence, Function(1) -> Sequence 347 | // Order a sequence based on one or more attributes. 348 | ORDERBY = 41; // Sequence, (!STRING | Ordering)... -> Sequence 349 | // Get all distinct elements of a sequence (like `uniq`). 350 | DISTINCT = 42; // Sequence -> Sequence 351 | // Count the number of elements in a sequence, or only the elements that match 352 | // a given filter. 353 | COUNT = 43; // Sequence -> NUMBER | Sequence, DATUM -> NUMBER | Sequence, Function(1) -> NUMBER 354 | IS_EMPTY = 86; // Sequence -> BOOL 355 | // Take the union of multiple sequences (preserves duplicate elements! (use distinct)). 356 | UNION = 44; // Sequence... -> Sequence 357 | // Get the Nth element of a sequence. 358 | NTH = 45; // Sequence, NUMBER -> DATUM 359 | // Takes a sequence, and three functions: 360 | // - A function to group the sequence by. 361 | // - A function to map over the groups. 362 | // - A reduction to apply to each of the groups. 363 | GROUPED_MAP_REDUCE = 46; // Sequence, Function(1), Function(1), Function(2), {base:DATUM} -> ARRAY 364 | // Groups a sequence by one or more attributes, and then applies a reduction. 365 | // The third argument is a special object literal giving the kind of operation to be 366 | // performed and any necessary arguments. 367 | // At present, GROUPBY suports the following operations 368 | // * {'COUNT': } - count the size of the group 369 | // * {'SUM': attr} - sum the values of the given attribute across the group 370 | // * {'AVG': attr} - average the values of the given attribute across the group 371 | GROUPBY = 47; // Sequence, ARRAY, !GROUP_BY_OBJECT -> Sequence 372 | INNER_JOIN = 48; // Sequence, Sequence, Function(2) -> Sequence 373 | OUTER_JOIN = 49; // Sequence, Sequence, Function(2) -> Sequence 374 | // An inner-join that does an equality comparison on two attributes. 375 | EQ_JOIN = 50; // Sequence, !STRING, Sequence, {index:!STRING} -> Sequence 376 | ZIP = 72; // Sequence -> Sequence 377 | 378 | // Array Ops 379 | // Insert an element in to an array at a given index. 380 | INSERT_AT = 82; // ARRAY, NUMBER, DATUM -> ARRAY 381 | // Remove an element at a given index from an array. 382 | DELETE_AT = 83; // ARRAY, NUMBER -> ARRAY | 383 | // ARRAY, NUMBER, NUMBER -> ARRAY 384 | // Change the element at a given index of an array. 385 | CHANGE_AT = 84; // ARRAY, NUMBER, DATUM -> ARRAY 386 | // Splice one array in to another array. 387 | SPLICE_AT = 85; // ARRAY, NUMBER, ARRAY -> ARRAY 388 | 389 | // * Type Ops 390 | // Coerces a datum to a named type (e.g. "bool"). 391 | // If you previously used `stream_to_array`, you should use this instead 392 | // with the type "array". 393 | COERCE_TO = 51; // Top, STRING -> Top 394 | // Returns the named type of a datum (e.g. TYPEOF(true) = "BOOL") 395 | TYPEOF = 52; // Top -> STRING 396 | 397 | // * Write Ops (the OBJECTs contain data about number of errors etc.) 398 | // Updates all the rows in a selection. Calls its Function with the row 399 | // to be updated, and then merges the result of that call. 400 | UPDATE = 53; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT | 401 | // SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT | 402 | // StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT | 403 | // SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT 404 | // Deletes all the rows in a selection. 405 | DELETE = 54; // StreamSelection, {durability:STRING, return_vals:BOOL} -> OBJECT | SingleSelection -> OBJECT 406 | // Replaces all the rows in a selection. Calls its Function with the row 407 | // to be replaced, and then discards it and stores the result of that 408 | // call. 409 | REPLACE = 55; // StreamSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT | SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT 410 | // Inserts into a table. If `upsert` is true, overwrites entries with 411 | // the same primary key (otherwise errors). 412 | INSERT = 56; // Table, OBJECT, {upsert:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT | Table, Sequence, {upsert:BOOL, durability:STRING, return_vals:BOOL} -> OBJECT 413 | 414 | // * Administrative OPs 415 | // Creates a database with a particular name. 416 | DB_CREATE = 57; // STRING -> OBJECT 417 | // Drops a database with a particular name. 418 | DB_DROP = 58; // STRING -> OBJECT 419 | // Lists all the databases by name. (Takes no arguments) 420 | DB_LIST = 59; // -> ARRAY 421 | // Creates a table with a particular name in a particular 422 | // database. (You may omit the first argument to use the 423 | // default database.) 424 | TABLE_CREATE = 60; // Database, STRING, {datacenter:STRING, primary_key:STRING, cache_size:NUMBER, durability:STRING} -> OBJECT 425 | // STRING, {datacenter:STRING, primary_key:STRING, cache_size:NUMBER, durability:STRING} -> OBJECT 426 | // Drops a table with a particular name from a particular 427 | // database. (You may omit the first argument to use the 428 | // default database.) 429 | TABLE_DROP = 61; // Database, STRING -> OBJECT 430 | // STRING -> OBJECT 431 | // Lists all the tables in a particular database. (You may 432 | // omit the first argument to use the default database.) 433 | TABLE_LIST = 62; // Database -> ARRAY 434 | // -> ARRAY 435 | // Ensures that previously issued soft-durability writes are complete and 436 | // written to disk. 437 | SYNC = 138; // Table -> OBJECT 438 | 439 | // * Secondary indexes OPs 440 | // Creates a new secondary index with a particular name and definition. 441 | INDEX_CREATE = 75; // Table, STRING, Function(1), {multi:BOOL} -> OBJECT 442 | // Drops a secondary index with a particular name from the specified table. 443 | INDEX_DROP = 76; // Table, STRING -> OBJECT 444 | // Lists all secondary indexes on a particular table. 445 | INDEX_LIST = 77; // Table -> ARRAY 446 | // Gets information about whether or not a set of indexes are ready to 447 | // be accessed. Returns a list of objects that look like this: 448 | // {index:STRING, ready:BOOL[, blocks_processed:NUMBER, blocks_total:NUMBER]} 449 | INDEX_STATUS = 139; // Table, STRING... -> ARRAY 450 | // Blocks until a set of indexes are ready to be accessed. Returns the 451 | // same values INDEX_STATUS. 452 | INDEX_WAIT = 140; // Table, STRING... -> ARRAY 453 | 454 | // * Control Operators 455 | // Calls a function on data 456 | FUNCALL = 64; // Function(*), DATUM... -> DATUM 457 | // Executes its first argument, and returns its second argument if it 458 | // got [true] or its third argument if it got [false] (like an `if` 459 | // statement). 460 | BRANCH = 65; // BOOL, Top, Top -> Top 461 | // Returns true if any of its arguments returns true (short-circuits). 462 | // (Like `or` in most languages.) 463 | ANY = 66; // BOOL... -> BOOL 464 | // Returns true if all of its arguments return true (short-circuits). 465 | // (Like `and` in most languages.) 466 | ALL = 67; // BOOL... -> BOOL 467 | // Calls its Function with each entry in the sequence 468 | // and executes the array of terms that Function returns. 469 | FOREACH = 68; // Sequence, Function(1) -> OBJECT 470 | 471 | //////////////////////////////////////////////////////////////////////////////// 472 | ////////// Special Terms 473 | //////////////////////////////////////////////////////////////////////////////// 474 | 475 | // An anonymous function. Takes an array of numbers representing 476 | // variables (see [VAR] above), and a [Term] to execute with those in 477 | // scope. Returns a function that may be passed an array of arguments, 478 | // then executes the Term with those bound to the variable names. The 479 | // user will never construct this directly. We use it internally for 480 | // things like `map` which take a function. The "arity" of a [Function] is 481 | // the number of arguments it takes. 482 | // For example, here's what `_X_.map{|x| x+2}` turns into: 483 | // Term { 484 | // type = MAP; 485 | // args = [_X_, 486 | // Term { 487 | // type = Function; 488 | // args = [Term { 489 | // type = DATUM; 490 | // datum = Datum { 491 | // type = R_ARRAY; 492 | // r_array = [Datum { type = R_NUM; r_num = 1; }]; 493 | // }; 494 | // }, 495 | // Term { 496 | // type = ADD; 497 | // args = [Term { 498 | // type = VAR; 499 | // args = [Term { 500 | // type = DATUM; 501 | // datum = Datum { type = R_NUM; 502 | // r_num = 1}; 503 | // }]; 504 | // }, 505 | // Term { 506 | // type = DATUM; 507 | // datum = Datum { type = R_NUM; r_num = 2; }; 508 | // }]; 509 | // }]; 510 | // }]; 511 | FUNC = 69; // ARRAY, Top -> ARRAY -> Top 512 | 513 | // Indicates to ORDER_BY that this attribute is to be sorted in ascending order. 514 | ASC = 73; // !STRING -> Ordering 515 | // Indicates to ORDER_BY that this attribute is to be sorted in descending order. 516 | DESC = 74; // !STRING -> Ordering 517 | 518 | // Gets info about anything. INFO is most commonly called on tables. 519 | INFO = 79; // Top -> OBJECT 520 | 521 | // `a.match(b)` returns a match object if the string `a` 522 | // matches the regular expression `b`. 523 | MATCH = 97; // STRING, STRING -> DATUM 524 | 525 | // Change the case of a string. 526 | UPCASE = 141; // STRING -> STRING 527 | DOWNCASE = 142; // STRING -> STRING 528 | 529 | // Select a number of elements from sequence with uniform distribution. 530 | SAMPLE = 81; // Sequence, NUMBER -> Sequence 531 | 532 | // Evaluates its first argument. If that argument returns 533 | // NULL or throws an error related to the absence of an 534 | // expected value (for instance, accessing a non-existent 535 | // field or adding NULL to an integer), DEFAULT will either 536 | // return its second argument or execute it if it's a 537 | // function. If the second argument is a function, it will be 538 | // passed either the text of the error or NULL as its 539 | // argument. 540 | DEFAULT = 92; // Top, Top -> Top 541 | 542 | // Parses its first argument as a json string and returns it as a 543 | // datum. 544 | JSON = 98; // STRING -> DATUM 545 | 546 | // Parses its first arguments as an ISO 8601 time and returns it as a 547 | // datum. 548 | ISO8601 = 99; // STRING -> PSEUDOTYPE(TIME) 549 | // Prints a time as an ISO 8601 time. 550 | TO_ISO8601 = 100; // PSEUDOTYPE(TIME) -> STRING 551 | 552 | // Returns a time given seconds since epoch in UTC. 553 | EPOCH_TIME = 101; // NUMBER -> PSEUDOTYPE(TIME) 554 | // Returns seconds since epoch in UTC given a time. 555 | TO_EPOCH_TIME = 102; // PSEUDOTYPE(TIME) -> NUMBER 556 | 557 | // The time the query was received by the server. 558 | NOW = 103; // -> PSEUDOTYPE(TIME) 559 | // Puts a time into an ISO 8601 timezone. 560 | IN_TIMEZONE = 104; // PSEUDOTYPE(TIME), STRING -> PSEUDOTYPE(TIME) 561 | // a.during(b, c) returns whether a is in the range [b, c) 562 | DURING = 105; // PSEUDOTYPE(TIME), PSEUDOTYPE(TIME), PSEUDOTYPE(TIME) -> BOOL 563 | // Retrieves the date portion of a time. 564 | DATE = 106; // PSEUDOTYPE(TIME) -> PSEUDOTYPE(TIME) 565 | // x.time_of_day == x.date - x 566 | TIME_OF_DAY = 126; // PSEUDOTYPE(TIME) -> NUMBER 567 | // Returns the timezone of a time. 568 | TIMEZONE = 127; // PSEUDOTYPE(TIME) -> STRING 569 | 570 | // These access the various components of a time. 571 | YEAR = 128; // PSEUDOTYPE(TIME) -> NUMBER 572 | MONTH = 129; // PSEUDOTYPE(TIME) -> NUMBER 573 | DAY = 130; // PSEUDOTYPE(TIME) -> NUMBER 574 | DAY_OF_WEEK = 131; // PSEUDOTYPE(TIME) -> NUMBER 575 | DAY_OF_YEAR = 132; // PSEUDOTYPE(TIME) -> NUMBER 576 | HOURS = 133; // PSEUDOTYPE(TIME) -> NUMBER 577 | MINUTES = 134; // PSEUDOTYPE(TIME) -> NUMBER 578 | SECONDS = 135; // PSEUDOTYPE(TIME) -> NUMBER 579 | 580 | // Construct a time from a date and optional timezone or a 581 | // date+time and optional timezone. 582 | TIME = 136; // NUMBER, NUMBER, NUMBER -> PSEUDOTYPE(TIME) | 583 | // NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | 584 | // NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER -> PSEUDOTYPE(TIME) | 585 | // NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, NUMBER, STRING -> PSEUDOTYPE(TIME) | 586 | 587 | // Constants for ISO 8601 days of the week. 588 | MONDAY = 107; // -> 1 589 | TUESDAY = 108; // -> 2 590 | WEDNESDAY = 109; // -> 3 591 | THURSDAY = 110; // -> 4 592 | FRIDAY = 111; // -> 5 593 | SATURDAY = 112; // -> 6 594 | SUNDAY = 113; // -> 7 595 | 596 | // Constants for ISO 8601 months. 597 | JANUARY = 114; // -> 1 598 | FEBRUARY = 115; // -> 2 599 | MARCH = 116; // -> 3 600 | APRIL = 117; // -> 4 601 | MAY = 118; // -> 5 602 | JUNE = 119; // -> 6 603 | JULY = 120; // -> 7 604 | AUGUST = 121; // -> 8 605 | SEPTEMBER = 122; // -> 9 606 | OCTOBER = 123; // -> 10 607 | NOVEMBER = 124; // -> 11 608 | DECEMBER = 125; // -> 12 609 | 610 | // Indicates to MERGE to replace the other object rather than merge it. 611 | LITERAL = 137; // JSON -> Merging 612 | } 613 | optional TermType type = 1; 614 | 615 | // This is only used when type is DATUM. 616 | optional Datum datum = 2; 617 | 618 | repeated Term args = 3; // Holds the positional arguments of the query. 619 | message AssocPair { 620 | optional string key = 1; 621 | optional Term val = 2; 622 | } 623 | repeated AssocPair optargs = 4; // Holds the optional arguments of the query. 624 | // (Note that the order of the optional arguments doesn't matter; think of a 625 | // Hash.) 626 | 627 | extensions 10000 to 20000; 628 | } 629 | 630 | //////////////////////////////////////////////////////////////////////////////// 631 | // EXAMPLE // 632 | //////////////////////////////////////////////////////////////////////////////// 633 | // ```ruby 634 | // r.table('tbl', {:use_outdated => true}).insert([{:id => 0}, {:id => 1}]) 635 | // ``` 636 | // Would turn into: 637 | // Term { 638 | // type = INSERT; 639 | // args = [Term { 640 | // type = TABLE; 641 | // args = [Term { 642 | // type = DATUM; 643 | // datum = Datum { type = R_STR; r_str = "tbl"; }; 644 | // }]; 645 | // optargs = [["use_outdated", 646 | // Term { 647 | // type = DATUM; 648 | // datum = Datum { type = R_BOOL; r_bool = true; }; 649 | // }]]; 650 | // }, 651 | // Term { 652 | // type = MAKE_ARRAY; 653 | // args = [Term { 654 | // type = DATUM; 655 | // datum = Datum { type = R_OBJECT; r_object = [["id", 0]]; }; 656 | // }, 657 | // Term { 658 | // type = DATUM; 659 | // datum = Datum { type = R_OBJECT; r_object = [["id", 1]]; }; 660 | // }]; 661 | // }] 662 | // } 663 | // And the server would reply: 664 | // Response { 665 | // type = SUCCESS_ATOM; 666 | // token = 1; 667 | // response = [Datum { type = R_OBJECT; r_object = [["inserted", 2]]; }]; 668 | // } 669 | // Or, if there were an error: 670 | // Response { 671 | // type = RUNTIME_ERROR; 672 | // token = 1; 673 | // response = [Datum { type = R_STR; r_str = "The table `tbl` doesn't exist!"; }]; 674 | // backtrace = [Frame { type = POS; pos = 0; }, Frame { type = POS; pos = 0; }]; 675 | // } 676 | -------------------------------------------------------------------------------- /src/ql2_util.erl: -------------------------------------------------------------------------------- 1 | %% @private 2 | -module(ql2_util). 3 | 4 | -export([datum_value/1, 5 | global_db/1]). 6 | 7 | -include("ql2_pb.hrl"). 8 | 9 | -spec datum_value(#datum{}) -> any(). 10 | datum_value(#datum{ type = 'R_NULL' }) -> 11 | null; 12 | datum_value(#datum{ type = 'R_BOOL', r_bool = Bool}) -> 13 | Bool; 14 | datum_value(#datum{ type = 'R_NUM', r_num = Num}) -> 15 | Num; 16 | datum_value(#datum{ type = 'R_STR', r_str = Str }) -> 17 | list_to_binary(Str); 18 | datum_value(#datum{ type = 'R_ARRAY', r_array = Array }) -> 19 | [ datum_value(D) || D <- Array ]; 20 | datum_value(#datum{ type = 'R_OBJECT', r_object = Objects }) -> 21 | {[ datum_assocpair_tuple(Obj) || Obj <- Objects ]}. 22 | 23 | -spec datum_assocpair_tuple(#datum_assocpair{}) -> {binary(), any()}. 24 | datum_assocpair_tuple(Obj) -> 25 | {list_to_binary(Obj#datum_assocpair.key), datum_value(Obj#datum_assocpair.val)}. 26 | 27 | -spec global_db(binary()) -> #query_assocpair{}. 28 | global_db(Value) -> 29 | #query_assocpair { 30 | key = <<"db">>, 31 | val = #term { 32 | type = 'DB', 33 | args = lethink_ast:expr(Value) 34 | } 35 | }. 36 | -------------------------------------------------------------------------------- /test/database_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(database_SUITE). 2 | -include_lib("common_test/include/ct.hrl"). 3 | -export([suite/0, 4 | all/0, 5 | init_per_suite/1, 6 | end_per_suite/1]). 7 | 8 | -export([databases/1, 9 | tables/1, 10 | use/1]). 11 | 12 | %% Optional suite settings 13 | %%-------------------------------------------------------------------- 14 | %% Function: suite() -> Info 15 | %% Info = [tuple()] 16 | %%-------------------------------------------------------------------- 17 | 18 | suite() -> 19 | [{timetrap,{minutes,5}}]. 20 | 21 | all() -> 22 | [databases, 23 | tables, 24 | use]. 25 | 26 | %% Optional suite pre test initialization 27 | %%-------------------------------------------------------------------- 28 | %% Function: init_per_suite(Config0) -> 29 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 30 | %% Config0 = Config1 = [tuple()] 31 | %% Reason = term() 32 | %%-------------------------------------------------------------------- 33 | 34 | init_per_suite(Config) -> 35 | lethink:start(), 36 | lethink:add_pool(pool, 1), 37 | lethink:query(pool, [{db_drop, <<"test_bin">>}]), 38 | lethink:query(pool, [{db_drop, <<"table_test">>}]), 39 | lethink:query(pool, [{db_drop, <<"use_test1">>}]), 40 | lethink:query(pool, [{db_drop, <<"use_test2">>}]), 41 | Config. 42 | 43 | %% Optional suite post test wind down 44 | %%-------------------------------------------------------------------- 45 | %% Function: end_per_suite(Config0) -> void() | {save_config,Config1} 46 | %% Config0 = Config1 = [tuple()] 47 | %%-------------------------------------------------------------------- 48 | 49 | end_per_suite(_Config) -> 50 | lethink:remove_pool(pool), 51 | lethink:stop(), 52 | ok. 53 | 54 | %% Tests 55 | databases(_Config) -> 56 | {ok, _} = lethink:query(pool, [{db_create, <<"test_bin">>}]), 57 | {ok, Dbs} = lethink:query(pool, [{db_list}]), 58 | true = lists:all(fun is_binary/1, Dbs), 59 | true = lists:member(<<"test_bin">>, Dbs), 60 | {error, _, _, _} = lethink:query(pool, [{db_create, <<"test_bin">>}]), 61 | {ok, _} = lethink:query(pool, [{db_drop, <<"test_bin">>}]), 62 | {ok, Dbs2} = lethink:query(pool, [{db_list}]), 63 | false = lists:member(<<"test_bin">>, Dbs2), 64 | {error, _, _, _} = lethink:query(pool, [{db_drop, <<"test_bin">>}]). 65 | 66 | tables(_Config) -> 67 | {ok, _} = lethink:query(pool, [{db_create,<<"table_test">>}]), 68 | ok = lethink:use(pool, <<"table_test">>), 69 | {ok, []} = lethink:query(pool, [{db, <<"table_test">>}, {table_list}]), 70 | {ok, _} = lethink:query(pool, [{db, <<"table_test">>}, {table_create, <<"table_bin">>}]), 71 | {ok, Tables1} = lethink:query(pool, [{db, <<"table_test">>}, {table_list}]), 72 | true = lists:all(fun is_binary/1, Tables1), 73 | true = lists:member(<<"table_bin">>, Tables1), 74 | {error, _, _, _} = lethink:query(pool, [{db, <<"table_test">>}, {table_create, <<"table_bin">>}]), 75 | {ok, _} = lethink:query(pool, [{db, <<"table_test">>}, {table_drop, <<"table_bin">>}]), 76 | {ok, Tables2} = lethink:query(pool, [{db, <<"table_test">>}, {table_list}]), 77 | false = lists:member(<<"table_bin">>, Tables2), 78 | {error, _, _, _} = lethink:query(pool, [{db, <<"table_test">>}, {table_drop, <<"table_bin">>}]), 79 | {ok, _} = lethink:query(pool, [{db_drop, <<"table_test">>}]). 80 | 81 | use(_Config) -> 82 | {ok, _} = lethink:query(pool, [{db_create, <<"use_test1">>}]), 83 | ok = lethink:use(pool, <<"use_test1">>), 84 | {ok, _} = lethink:query(pool, [{db, <<"use_test1">>}, {table_create, <<"table1">>}]), 85 | {ok, Tables1} = lethink:query(pool, [{db, <<"use_test1">>}, {table_list}]), 86 | true = lists:member(<<"table1">>, Tables1), 87 | {ok, _} = lethink:query(pool, [{db_create, <<"use_test2">>}]), 88 | ok = lethink:use(pool, <<"use_test2">>), 89 | {ok, []} = lethink:query(pool, [{db, <<"use_test2">>}, {table_list}]), 90 | {ok, Tables1} = lethink:query(pool, [{db, <<"use_test1">>}, {table_list}]), 91 | {ok, _} = lethink:query(pool, [{db_drop, <<"use_test1">>}]), 92 | {ok, _} = lethink:query(pool, [{db_drop, <<"use_test2">>}]). 93 | -------------------------------------------------------------------------------- /test/functions_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(functions_SUITE). 2 | -include_lib("common_test/include/ct.hrl"). 3 | -export([suite/0, 4 | all/0, 5 | init_per_suite/1, 6 | end_per_suite/1]). 7 | 8 | -export([update/1]). 9 | 10 | %% Optional suite settings 11 | %%-------------------------------------------------------------------- 12 | %% Function: suite() -> Info 13 | %% Info = [tuple()] 14 | %%-------------------------------------------------------------------- 15 | 16 | suite() -> 17 | [{timetrap,{minutes,5}}]. 18 | 19 | all() -> 20 | [update]. 21 | 22 | %% Optional suite pre test initialization 23 | %%-------------------------------------------------------------------- 24 | %% Function: init_per_suite(Config0) -> 25 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 26 | %% Config0 = Config1 = [tuple()] 27 | %% Reason = term() 28 | %%-------------------------------------------------------------------- 29 | 30 | init_per_suite(Config) -> 31 | lethink:start(), 32 | lethink:add_pool(pool, 1), 33 | lethink:query(pool, [{db_create, <<"function_db">>}]), 34 | lethink:query(pool, [{db, <<"function_db">>}, {table_create, <<"marvel">>}]), 35 | lethink:use(pool, <<"function_db">>), 36 | Config. 37 | 38 | %% Optional suite post test wind down 39 | %%-------------------------------------------------------------------- 40 | %% Function: end_per_suite(Config0) -> void() | {save_config,Config1} 41 | %% Config0 = Config1 = [tuple()] 42 | %%-------------------------------------------------------------------- 43 | 44 | end_per_suite(_Config) -> 45 | lethink:query(pool, [{db_drop, <<"function_db">>}]), 46 | lethink:remove_pool(pool), 47 | lethink:stop(), 48 | ok. 49 | 50 | %% Tests 51 | update(_Config) -> 52 | TestData = [{[{<<"id">>, 1}, {<<"hero">>, <<"batman">>}, {<<"age">>, 30}]}, 53 | {[{<<"id">>, 2}, {<<"hero">>, <<"superman">>}, {<<"age">>, 50}]}], 54 | {ok, _} = lethink:query(pool, [{table, <<"marvel">>}, {insert, TestData}]), 55 | {ok, _} = lethink:query(pool, [{table, <<"marvel">>}, 56 | {update, {[{<<"age">>, [{row}, {get_field, <<"age">>}, {add, 1}]}]}}]), 57 | {ok, _} = lethink:query(pool, [{table, <<"marvel">>}, 58 | {update, fun(Row) -> [Row, {get_field, <<"age">>}, {add, 1}] end}]). 59 | -------------------------------------------------------------------------------- /test/lethink_ast_tests.erl: -------------------------------------------------------------------------------- 1 | -module(lethink_ast_tests). 2 | 3 | -include_lib("eunit/include/eunit.hrl"). 4 | 5 | -include("ql2_pb.hrl"). 6 | 7 | db_test() -> 8 | ?assertMatch(#term{}, lethink_ast:db(<<"test">>, [])), 9 | ?assertMatch({error, _}, lethink_ast:db("test", [])), 10 | ?assertMatch({error, _}, lethink_ast:db(<<"test">>, #term{})). 11 | 12 | table_test() -> 13 | ?assertMatch(#term{}, lethink_ast:table(<<"table">>, [])), 14 | ?assertMatch(#term{}, lethink_ast:table(<<"table">>, #term{ type = 'DB' })), 15 | ?assertMatch({error, _}, lethink_ast:table("table", [])), 16 | ?assertMatch({error, _}, lethink_ast:table(<<"table">>, #term{})). 17 | 18 | func_test() -> 19 | ?assertMatch(#term{}, lethink_ast:func(fun(_N) -> 1 end)). 20 | 21 | is_json_test() -> 22 | true = lethink_ast:is_json(null), 23 | true = lethink_ast:is_json(1), 24 | true = lethink_ast:is_json(1.1), 25 | true = lethink_ast:is_json(<<"test">>), 26 | true = lethink_ast:is_json({[]}), 27 | true = lethink_ast:is_json({<<"key">>, false}), 28 | false = lethink_ast:is_json({key, false}), 29 | false = lethink_ast:is_json({row}), 30 | false = lethink_ast:is_json({<<"test">>}). 31 | 32 | ivar_scan_test() -> 33 | ImpVarTerm = #term{ type = 'IMPLICIT_VAR' }, 34 | true = lethink_ast:ivar_scan(ImpVarTerm), 35 | true = lethink_ast:ivar_scan(#term{ args = ImpVarTerm}), 36 | true = lethink_ast:ivar_scan(#term{ args = [ImpVarTerm]}), 37 | false = lethink_ast:ivar_scan(true). 38 | 39 | expr_test() -> 40 | #term{ type = 'DATUM' } = lethink_ast:expr(5), 41 | #term{ type = 'FUNC' } = lethink_ast:expr(fun(X) -> [X] end). 42 | 43 | func_wrap_test() -> 44 | #term{ type = 'MAKE_ARRAY' } = lethink_ast:func_wrap([1,2,3]), 45 | #term{ type = 'DATUM' } = lethink_ast:func_wrap(<<"test">>), 46 | #term{ type = 'FUNC' } = lethink_ast:func_wrap([{row}]), 47 | #term{ type = 'FUNC' } = lethink_ast:func_wrap(fun(X) -> [X] end). 48 | -------------------------------------------------------------------------------- /test/math_and_logic_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(math_and_logic_SUITE). 2 | -include_lib("common_test/include/ct.hrl"). 3 | -export([suite/0, 4 | all/0, 5 | init_per_suite/1, 6 | end_per_suite/1]). 7 | 8 | -export([math/1, 9 | logic/1]). 10 | 11 | 12 | 13 | %% Optional suite settings 14 | %%-------------------------------------------------------------------- 15 | %% Function: suite() -> Info 16 | %% Info = [tuple()] 17 | %%-------------------------------------------------------------------- 18 | 19 | suite() -> 20 | [{timetrap,{minutes,5}}]. 21 | 22 | all() -> 23 | [math, 24 | logic]. 25 | 26 | %% Optional suite pre test initialization 27 | %%-------------------------------------------------------------------- 28 | %% Function: init_per_suite(Config0) -> 29 | %% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1} 30 | %% Config0 = Config1 = [tuple()] 31 | %% Reason = term() 32 | %%-------------------------------------------------------------------- 33 | 34 | init_per_suite(Config) -> 35 | lethink:start(), 36 | lethink:add_pool(pool, 1), 37 | Config. 38 | 39 | %% Optional suite post test wind down 40 | %%-------------------------------------------------------------------- 41 | %% Function: end_per_suite(Config0) -> void() | {save_config,Config1} 42 | %% Config0 = Config1 = [tuple()] 43 | %%-------------------------------------------------------------------- 44 | 45 | end_per_suite(_Config) -> 46 | lethink:remove_pool(pool), 47 | lethink:stop(), 48 | ok. 49 | 50 | %% Tests 51 | math(_Config) -> 52 | {ok, 3.0} = lethink:query(pool, [{expr, 1}, {add, 2}]), 53 | {ok, <<"ab">>} = lethink:query(pool, [{expr, <<"a">>}, {add, <<"b">>}]), 54 | {ok, 2.0} = lethink:query(pool, [{expr, 5}, {sub, 3}]), 55 | {ok, 2.0} = lethink:query(pool, [{expr, 5.0}, {sub, 3}]), 56 | {ok, 2.0} = lethink:query(pool, [{expr, 5}, {sub, 3.0}]), 57 | {ok, 2.0} = lethink:query(pool, [{expr, 5.0}, {sub, 3.0}]), 58 | {ok, 15.0} = lethink:query(pool, [{expr, 3}, {mul, 5}]), 59 | {ok, 3.0} = lethink:query(pool, [{expr, 12}, {div_, 4}]), 60 | {ok, 1.0} = lethink:query(pool, [{expr, 8}, {mod, 7}]). 61 | 62 | logic(_Config) -> 63 | ok. 64 | --------------------------------------------------------------------------------