├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── src ├── dune ├── dune-project ├── fs.re ├── fs.rei ├── index.re ├── index.rei ├── main.re ├── membuf.re ├── membuf.rei ├── membufhash.re ├── membufhash.rei ├── membufq.re ├── membufq.rei ├── sample.json ├── shard.re ├── shard.rei ├── timeseries.re └── timeseries.rei └── test ├── butterflies.json ├── client.re ├── dune ├── dune-project └── honeybees.json /.gitignore: -------------------------------------------------------------------------------- 1 | ~ 2 | \.\#* 3 | \#*# 4 | 5 | *_build/ 6 | .DS_Store 7 | 8 | *.merlin 9 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ocaml/opam2:alpine 2 | 3 | RUN sudo apk add m4 4 | 5 | RUN opam update \ 6 | && opam install -y oml reason ezjsonm lwt_log \ 7 | && opam depext -i tls ssl irmin-unix 8 | 9 | ADD src src 10 | RUN sudo chown -R opam:nogroup src 11 | RUN cd src && opam config exec -- dune build --profile release ./main.exe 12 | 13 | FROM alpine 14 | 15 | RUN adduser nibble --disabled-password 16 | 17 | WORKDIR /home/nibble 18 | COPY --from=0 /home/opam/opam-repository/src/_build/default/main.exe ./nibbledb 19 | 20 | RUN apk update && apk add gmp libressl zlib openssl git 21 | 22 | USER nibble 23 | 24 | RUN openssl req -x509 -newkey rsa:4096 -keyout /tmp/server.key -out /tmp/server.crt -days 3650 -nodes -subj "/C=UK/ST=foo/L=bar/O=baz/OU= Department/CN=example.com" 25 | 26 | EXPOSE 8000 27 | 28 | ENTRYPOINT ["/home/nibble/nibbledb"] 29 | 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 John Moore 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # nibbledb 2 | 3 | [Documentation](https://jptmoore.gitbook.io/nibble/) -------------------------------------------------------------------------------- /src/dune: -------------------------------------------------------------------------------- 1 | 2 | (executable 3 | (name main) 4 | (libraries ptime.clock.os irmin-unix oml ezjsonm lwt_log)) 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /src/dune-project: -------------------------------------------------------------------------------- 1 | (lang dune 1.6) 2 | -------------------------------------------------------------------------------- /src/fs.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | type t = { 4 | index_dir: string, 5 | cmd: Lwt_process.command 6 | }; 7 | 8 | let gitcmd = "git ls-tree --name-only master"; 9 | 10 | let create = (~index_dir) => { 11 | { 12 | index_dir: index_dir, 13 | cmd: Lwt_process.shell("cd " ++ index_dir ++ ";" ++ gitcmd) 14 | } 15 | }; 16 | 17 | let ts_names = (~ctx) => { 18 | Lwt_process.pread_lines(ctx.cmd) |> 19 | Lwt_stream.to_list 20 | } -------------------------------------------------------------------------------- /src/fs.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let create: (~index_dir: string) => t; 4 | 5 | let ts_names: (~ctx: t) => Lwt.t(list(string)); 6 | -------------------------------------------------------------------------------- /src/index.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | type index = list((int64,int64)); 4 | 5 | let index_t = Irmin.Type.(list(pair(int64, int64))); 6 | 7 | let print = (m) => Fmt.pr("%a\n%!", Irmin.Type.pp_json(index_t), m); 8 | 9 | let merge = Irmin.Merge.(option(idempotent(index_t))); 10 | 11 | module Index: Irmin.Contents.S with type t = index = { 12 | type t = index; 13 | let t = index_t; 14 | let merge = merge; 15 | /* let pp = Irmin.Type.pp_json(index_t); */ 16 | /* let of_string = (s) => Irmin.Type.decode_json(index_t, Jsonm.decoder(`String(s))); */ 17 | }; 18 | 19 | module Store = Irmin_unix.Git.FS.KV(Index); 20 | 21 | type t = Store.t; 22 | 23 | let filter_list = (rem_lis, lis) => { 24 | open List; 25 | let rec loop = (acc, l) => 26 | switch l { 27 | | [] => acc 28 | | [x, ...xs] => mem(x, rem_lis) ? loop(acc, xs) : loop(cons(x, acc), xs) 29 | }; 30 | loop([], lis); 31 | }; 32 | 33 | let bounds = (lis) => { 34 | if (lis == []) { 35 | None; 36 | } else { 37 | Some(List.fold_left(((x,y), (x',y')) => 38 | (min(x,x'),max(y,y')), (Int64.max_int,Int64.min_int), lis)) 39 | } 40 | } 41 | 42 | let tup_sort = (lis) => { 43 | let cmp = ((_, y), (_, y')) => y < y' ? 1 : (-1); 44 | List.sort(cmp, lis); 45 | }; 46 | 47 | let add_tuple = (tup, lis) => List.cons(tup, lis) |> tup_sort; 48 | 49 | 50 | let create = (~file, ~bare) => { 51 | let config = Irmin_git.config(file, ~bare); 52 | let repo = Store.Repo.v(config); 53 | repo >>= (repo => Store.master(repo)); 54 | }; 55 | 56 | let read = (branch, k) => { 57 | branch >>= (branch' => Store.find(branch', [k])); 58 | }; 59 | 60 | let write = (branch, info, k, v) => { 61 | branch >>= (branch' => Store.set_exn(branch', ~info, [k], v)); 62 | }; 63 | 64 | let update = (branch, info, k, tup, remove_list) => { 65 | read(branch, k) >>= 66 | (data) => 67 | switch data { 68 | | Some((curr_lis)) => { 69 | let filtered = filter_list(remove_list, curr_lis); 70 | let new_index = add_tuple(tup, filtered); 71 | write(branch, info, k, new_index) >|= 72 | () => bounds(new_index) 73 | }; 74 | | None => write(branch, info, k, [tup]) >|= 75 | () => bounds([tup]) 76 | }; 77 | }; 78 | 79 | let get = (branch, k) => { 80 | read(branch, k) 81 | }; 82 | 83 | let length = (branch, k) => { 84 | read(branch, k) >|= 85 | (data) => 86 | switch data { 87 | | None => 0; 88 | | Some(index) => List.length(index); 89 | } 90 | } 91 | 92 | let overlap_worker = (index, lis) => { 93 | let (x, y) = index; 94 | List.filter(((x', y')) => x <= y' && y >= x', lis); 95 | }; 96 | 97 | let overlap = (branch, k, index) => { 98 | read(branch, k) >|= 99 | (data) => { 100 | switch data { 101 | | Some((lis)) => overlap_worker(index, lis) 102 | | None => [] 103 | } 104 | }; 105 | }; 106 | 107 | let range = (branch, k) => { 108 | read(branch, k) >|= 109 | (data) => { 110 | switch data { 111 | | None => None 112 | | Some((lis)) => bounds(lis) 113 | } 114 | }; 115 | }; 116 | 117 | 118 | 119 | 120 | -------------------------------------------------------------------------------- /src/index.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let print: (list((int64, int64))) => unit; 4 | 5 | let create: (~file: string, ~bare: bool) => Lwt.t(t); 6 | 7 | let update: (Lwt.t(t), Irmin.Info.f, string, (int64,int64), list((int64, int64))) => Lwt.t(option((int64,int64))); 8 | 9 | let get: (Lwt.t(t), string) => Lwt.t(option(list((int64, int64)))); 10 | 11 | let overlap: (Lwt.t(t), string, (int64,int64)) => Lwt.t(list((int64, int64))); 12 | 13 | let range: (Lwt.t(t), string) => Lwt.t(option((int64,int64))); 14 | 15 | let length: (Lwt.t(t), string) => Lwt.t(int); -------------------------------------------------------------------------------- /src/main.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | open Cohttp; 4 | 5 | open Cohttp_lwt_unix; 6 | 7 | let path_to_db = ref("/tmp/nibble/"); 8 | let http_port = ref(8000); 9 | let max_buffer_size = ref(100000); 10 | let shard_size = ref(20000); 11 | let show_files = ref(false); 12 | let log_mode = ref(false); 13 | let tls_mode = ref(false); 14 | let cert_file = ref("/tmp/server.crt"); 15 | let key_file = ref("/tmp/server.key"); 16 | 17 | let info = (fmt) => Irmin_unix.info(~author="nibbledb", fmt); 18 | 19 | type t = { 20 | db: Timeseries.t, 21 | m: Lwt_mutex.t 22 | }; 23 | 24 | exception Interrupt(string); 25 | 26 | 27 | module Http_response { 28 | let json_header = Header.of_list([("Content-Type", "application/json")]); 29 | let text_header = Header.of_list([("Content-Type", "text/plain")]); 30 | let ok = (~content="", ()) => { 31 | Server.respond_string(~status=`OK, ~body=content, ~headers=json_header, ()); 32 | }; 33 | let bad_request = (~content="", ()) => { 34 | Server.respond_string(~status=`Bad_request, ~body=content, ~headers=text_header, ()); 35 | }; 36 | let not_found = (~content="", ()) => { 37 | Server.respond_string(~status=`Not_found, ~body=content, ~headers=text_header, ()); 38 | }; 39 | let internal_server_error = (~content="", ()) => { 40 | Server.respond_string(~status=`Internal_server_error, ~body=content, ~headers=text_header, ()); 41 | }; 42 | }; 43 | 44 | let post_worker = (ctx, id, json) => { 45 | open Timeseries; 46 | switch(validate_json(json)) { 47 | | Some((t,j)) => write(~ctx=ctx.db, ~info=info("write"), ~timestamp=t, ~id=id, ~json=j) 48 | | None => failwith("badly formatted json") 49 | }; 50 | } 51 | 52 | let handle_json(data) { 53 | try { 54 | Ezjsonm.from_string(data) 55 | } { 56 | | _ => failwith("badly formatted json"); 57 | }; 58 | } 59 | 60 | let post = (ctx, id, body) => { 61 | open Ezjsonm; 62 | body |> Cohttp_lwt.Body.to_string >|= 63 | handle_json >>= json => switch(json) { 64 | | `O(_) => post_worker(ctx, id, json) 65 | | `A(lis) => Lwt_list.iter_s(x => post_worker(ctx, id, `O(get_dict(x))), lis) 66 | } >>= fun () => Http_response.ok() 67 | }; 68 | 69 | let post_req = (ctx, path_list, body) => { 70 | switch (path_list) { 71 | | [_, _, _, "ts", id] => post(ctx, id, body) 72 | | _ => failwith("unknown path") 73 | } 74 | }; 75 | 76 | let read_last = (ctx, ids, n, xargs) => { 77 | open Timeseries; 78 | let id_list = String.split_on_char(',', ids); 79 | read_last(~ctx=ctx.db, ~info=info("read_last"), ~id_list, ~n=int_of_string(n), ~xargs) >|= 80 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 81 | }; 82 | 83 | let read_first = (ctx, ids, n, xargs) => { 84 | open Timeseries; 85 | let id_list = String.split_on_char(',', ids); 86 | read_first(~ctx=ctx.db, ~info=info("read_first"), ~id_list, ~n=int_of_string(n), ~xargs) >|= 87 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 88 | }; 89 | 90 | let read_since = (ctx, ids, from, xargs) => { 91 | open Timeseries; 92 | let id_list = String.split_on_char(',', ids); 93 | read_since(~ctx=ctx.db, ~id_list, ~from=Int64.of_string(from), ~xargs) >|= 94 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 95 | }; 96 | 97 | let delete_since = (ctx, ids, from, xargs) => { 98 | open Timeseries; 99 | let id_list = String.split_on_char(',', ids); 100 | read_since(~ctx=ctx.db, ~id_list, ~from=Int64.of_string(from), ~xargs) >>= 101 | json => delete(~ctx=ctx.db, ~info=info("delete_since"), ~id_list, ~json) >>= 102 | () => Http_response.ok() 103 | }; 104 | 105 | let read_range = (ctx, ids, from, to_, xargs) => { 106 | open Timeseries; 107 | let id_list = String.split_on_char(',', ids); 108 | read_range(~ctx=ctx.db, ~id_list, ~from=Int64.of_string(from), ~to_=Int64.of_string(to_), ~xargs) >|= 109 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 110 | }; 111 | 112 | let delete_range = (ctx, ids, from, to_, xargs) => { 113 | open Timeseries; 114 | let id_list = String.split_on_char(',', ids); 115 | read_range(~ctx=ctx.db, ~id_list, ~from=Int64.of_string(from), ~to_=Int64.of_string(to_), ~xargs) >>= 116 | json => delete(~ctx=ctx.db, ~info=info("delete_range"), ~id_list, ~json) >>= 117 | () => Http_response.ok() 118 | }; 119 | 120 | let length = (ctx, ids) => { 121 | open Timeseries; 122 | let id_list = String.split_on_char(',', ids); 123 | length(~ctx=ctx.db, ~id_list) >>= 124 | n => Http_response.ok(~content=Printf.sprintf("{\"length\":%d}", n), ()) 125 | } 126 | 127 | let length_in_memory = (ctx, ids) => { 128 | open Timeseries; 129 | let id_list = String.split_on_char(',', ids); 130 | length_in_memory(~ctx=ctx.db, ~id_list) >>= 131 | n => Http_response.ok(~content=Printf.sprintf("{\"length\":%d}", n), ()) 132 | } 133 | 134 | let length_on_disk = (ctx, ids) => { 135 | open Timeseries; 136 | let id_list = String.split_on_char(',', ids); 137 | length_on_disk(~ctx=ctx.db, ~id_list) >>= 138 | n => Http_response.ok(~content=Printf.sprintf("{\"length\":%d}", n), ()) 139 | } 140 | 141 | let length_of_index = (ctx, ids) => { 142 | open Timeseries; 143 | let id_list = String.split_on_char(',', ids); 144 | length_of_index(~ctx=ctx.db, ~id_list) >>= 145 | n => Http_response.ok(~content=Printf.sprintf("{\"length\":%d}", n), ()) 146 | } 147 | 148 | let get_index = (ctx, id) => { 149 | open Timeseries; 150 | get_index(~ctx=ctx.db, ~id) >|= 151 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 152 | } 153 | 154 | let timeseries_sync = (ctx) => { 155 | Timeseries.flush(~ctx=ctx.db, ~info=info("sync")) >>= 156 | () => Http_response.ok() 157 | } 158 | 159 | let timeseries_names = (ctx) => { 160 | Timeseries.names(~ctx=ctx.db) >|= 161 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 162 | } 163 | 164 | let timeseries_stats = (ctx) => { 165 | Timeseries.stats(~ctx=ctx.db) >|= 166 | Ezjsonm.to_string >>= s => Http_response.ok(~content=s, ()) 167 | } 168 | 169 | let health_check = (ctx) => { 170 | open Ezjsonm; 171 | let json = dict([("status", string("ok"))]); 172 | Http_response.ok(~content=to_string(json), ()) 173 | } 174 | 175 | let get_req = (ctx, path_list) => { 176 | switch (path_list) { 177 | | [_, _, _, "ts", ids, "last", n, ...xargs] => read_last(ctx, ids, n, xargs) 178 | | [_, _, _, "ts", ids, "latest", ...xargs] => read_last(ctx, ids, "1", xargs) 179 | | [_, _, _, "ts", ids, "first", n, ...xargs] => read_first(ctx, ids, n, xargs) 180 | | [_, _, _, "ts", ids, "earliest", ...xargs] => read_first(ctx, ids, "1", xargs) 181 | | [_, _, _, "ts", ids, "since", from, ...xargs] => read_since(ctx, ids, from, xargs) 182 | | [_, _, _, "ts", ids, "range", from, to_, ...xargs] => read_range(ctx, ids, from, to_, xargs) 183 | | [_, _, _, "ts", ids, "length"] => length(ctx, ids) 184 | | [_, _, _, "ts", ids, "memory", "length"] => length_in_memory(ctx, ids) 185 | | [_, _, _, "ts", ids, "disk", "length"] => length_on_disk(ctx, ids) 186 | | [_, _, _, "ts", ids, "index", "length"] => length_of_index(ctx, ids) 187 | | [_, _, _, "ts", id, "index"] => get_index(ctx, id) 188 | | [_, _, _, "info", "ts", "names"] => timeseries_names(ctx) 189 | | [_, _, _, "info", "ts", "stats"] => timeseries_stats(ctx) 190 | | [_, _, _, "info", "status"] => health_check(ctx) 191 | | [_, _, _, "ctl", "ts", "sync"] => timeseries_sync(ctx) 192 | | _ => Http_response.bad_request(~content="Error:unknown path\n", ()) 193 | } 194 | }; 195 | 196 | let delete_req = (ctx, path_list) => { 197 | switch (path_list) { 198 | | [_, _, _, "ts", ids, "since", from, ...xargs] => delete_since(ctx, ids, from, xargs) 199 | | [_, _, _, "ts", ids, "range", from, to_, ...xargs] => delete_range(ctx, ids, from, to_, xargs) 200 | | _ => Http_response.bad_request(~content="Error:unknown path\n", ()) 201 | } 202 | }; 203 | 204 | let handle_req_worker = (ctx, req, body) => { 205 | let meth = req |> Request.meth; 206 | let uri_path = req |> Request.uri |> Uri.to_string; 207 | let path_list = String.split_on_char('/', uri_path); 208 | switch (meth) { 209 | | `POST => post_req(ctx, path_list, body); 210 | | `GET => get_req(ctx, path_list); 211 | | `DELETE => delete_req(ctx, path_list); 212 | | _ => Http_response.bad_request(~content="Error:unknown method\n", ()) 213 | } 214 | }; 215 | 216 | let handle_req_safe = (ctx, req, body) => { 217 | () => Lwt.catch( 218 | () => handle_req_worker(ctx, req, body), 219 | fun 220 | | Failure(m) => Http_response.bad_request(~content=Printf.sprintf("Error:%s\n",m), ()) 221 | | e => Lwt.fail(e) 222 | ); 223 | }; 224 | 225 | let handle_req = (ctx, req, body) => { 226 | Lwt_mutex.with_lock(ctx.m, handle_req_safe(ctx, req, body)) 227 | }; 228 | 229 | 230 | let server (~ctx) = { 231 | let callback = (_conn, req, body) => handle_req(ctx, req, body); 232 | let http = `TCP(`Port(http_port^)); 233 | let https = `TLS((`Crt_file_path(cert_file^), `Key_file_path(key_file^), `No_password, `Port(http_port^))); 234 | Server.create(~mode=(tls_mode^ ? https : http), Server.make(~callback, ())); 235 | }; 236 | 237 | 238 | let register_signal_handlers = () => { 239 | Lwt_unix.(on_signal(Sys.sigterm, (_) => raise(Interrupt("Caught SIGTERM"))) |> 240 | _ => on_signal(Sys.sighup, (_) => raise(Interrupt("Caught SIGHUP"))) |> 241 | _ => on_signal(Sys.sigint, (_) => raise(Interrupt("Caught SIGINT")))); 242 | }; 243 | 244 | let parse_cmdline = () => { 245 | let usage = "usage: " ++ Sys.argv[0]; 246 | let speclist = [ 247 | ( 248 | "--db", 249 | Arg.Set_string(path_to_db), 250 | ": to set the location for the database files" 251 | ), 252 | ( 253 | "--cert-file", 254 | Arg.Set_string(cert_file), 255 | ": to provide the TLS certificate" 256 | ), 257 | ( 258 | "--key-file", 259 | Arg.Set_string(key_file), 260 | ": to provide the TLS key" 261 | ), 262 | ( 263 | "--http-port", 264 | Arg.Set_int(http_port), 265 | ": to set the http port" 266 | ), 267 | ( 268 | "--max-buffer-size", 269 | Arg.Set_int(max_buffer_size), 270 | ": to set the max buffer size" 271 | ), 272 | ( 273 | "--shard-size", 274 | Arg.Set_int(shard_size), 275 | ": to set the shard size" 276 | ), 277 | ( 278 | "--show-files", 279 | Arg.Set(show_files), 280 | ": to show files in git" 281 | ), 282 | ("--enable-debug", Arg.Set(log_mode), ": turn debug mode on"), 283 | ("--enable-tls", Arg.Set(tls_mode), ": use https") 284 | 285 | ]; 286 | Arg.parse(speclist, x => raise(Arg.Bad("Bad argument : " ++ x)), usage); 287 | }; 288 | 289 | let enable_debug = () => { 290 | Lwt_log_core.default := 291 | Lwt_log.channel( 292 | ~template="$(date).$(milliseconds) [$(level)] $(message)", 293 | ~close_mode=`Keep, 294 | ~channel=Lwt_io.stdout, 295 | () 296 | ); 297 | Lwt_log_core.add_rule("*", Lwt_log_core.Debug); 298 | }; 299 | 300 | let init = () => { 301 | let () = ignore(register_signal_handlers()); 302 | parse_cmdline(); 303 | log_mode^ ? enable_debug() : (); 304 | { 305 | db: Timeseries.create(~path_to_db=path_to_db^, ~max_buffer_size=max_buffer_size^, ~shard_size=shard_size^, ~show_files=show_files^), 306 | m: Lwt_mutex.create() 307 | }; 308 | }; 309 | 310 | let flush_server = (ctx) => { 311 | Lwt_main.run { 312 | Lwt_io.printf("\nShutting down server...\n") >>= 313 | () => Timeseries.flush(~ctx=ctx.db, ~info=info("flush")) >>= 314 | () => Lwt_unix.sleep(1.0) >>= 315 | () => Lwt_io.printf("OK\n") 316 | }; 317 | }; 318 | 319 | let run_server = (~ctx) => { 320 | let () = { 321 | try (Lwt_main.run(server(~ctx))) { 322 | | Interrupt(_) => ignore(flush_server(ctx)); 323 | }; 324 | }; 325 | }; 326 | 327 | run_server(~ctx=init()); 328 | 329 | -------------------------------------------------------------------------------- /src/membuf.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | type t = { 4 | ht: Membufhash.t, 5 | m: Lwt_mutex.t 6 | }; 7 | 8 | let create = () => {ht: Membufhash.create(), m: Lwt_mutex.create()}; 9 | 10 | let serialise_worker = (ctx, key) => { 11 | let q = Membufhash.get(ctx.ht, key); 12 | (key, Membufq.to_list(q)); 13 | }; 14 | 15 | let serialise = (ctx) => { 16 | let keys = Membufhash.get_keys(ctx.ht); 17 | List.map((k) => serialise_worker(ctx, k), keys) |> Lwt.return; 18 | }; 19 | 20 | let empty_series = (ctx, key) => { 21 | let q = Membufhash.get(ctx.ht, key); 22 | Membufq.clear(q); 23 | Lwt.return_unit; 24 | }; 25 | 26 | let empty = (ctx) => { 27 | let keys = Membufhash.get_keys(ctx.ht); 28 | Lwt_list.iter_s((k) => empty_series(ctx, k), keys); 29 | }; 30 | 31 | let get_keys = (ctx) => { 32 | Membufhash.get_keys(ctx.ht); 33 | } 34 | 35 | let handle_write = (ctx, id, elt) => 36 | if (Membufhash.exists(ctx.ht, id)) { 37 | let q = Membufhash.get(ctx.ht, id); 38 | Membufq.push(q, elt); 39 | Membufhash.replace(ctx.ht, id, q); 40 | } else { 41 | let q = Membufq.create(); 42 | Membufq.push(q, elt); 43 | Membufhash.add(ctx.ht, id, q); 44 | }; 45 | 46 | let write = (ctx, id, elt) => 47 | Lwt_mutex.lock(ctx.m) 48 | >>= (() => handle_write(ctx, id, elt) |> (() => Lwt_mutex.unlock(ctx.m) |> Lwt.return)); 49 | 50 | let read = (ctx, id) => { 51 | let q = Membufhash.get(ctx.ht, id); 52 | Membufq.pop(q) |> Lwt.return; 53 | }; 54 | 55 | let length = (ctx, id) => { 56 | let q = Membufhash.get(ctx.ht, id); 57 | Membufq.length(q) |> Lwt.return; 58 | }; 59 | 60 | let exists = (ctx, id) => Membufhash.exists(ctx.ht, id); 61 | 62 | let to_list = (ctx, id) => { 63 | let q = Membufhash.get(ctx.ht, id); 64 | Membufq.to_list(q) |> Lwt.return; 65 | }; 66 | 67 | let is_ascending = (ctx, id, ub) => { 68 | let q = Membufhash.get(ctx.ht, id); 69 | Membufq.is_ascending(q, ub); 70 | }; 71 | 72 | let is_descending = (ctx, id, lb) => { 73 | let q = Membufhash.get(ctx.ht, id); 74 | Membufq.is_descending(q, lb); 75 | }; 76 | 77 | let set_disk_range = (ctx, id, range) => { 78 | let q = Membufhash.get(ctx.ht, id); 79 | Membufq.set_disk_range(q, range); 80 | }; 81 | 82 | let get_disk_range = (ctx, id) => { 83 | let q = Membufhash.get(ctx.ht, id); 84 | Membufq.get_disk_range(q); 85 | }; -------------------------------------------------------------------------------- /src/membuf.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let create: unit => t; 4 | 5 | let write: (t, string, (int64, Ezjsonm.t)) => Lwt.t(unit); 6 | 7 | let read: (t, string) => Lwt.t((int64, Ezjsonm.t)); 8 | 9 | let length: (t, string) => Lwt.t(int); 10 | 11 | let exists: (t, string) => bool; 12 | 13 | let to_list: (t, string) => Lwt.t(list((int64, Ezjsonm.t))); 14 | 15 | let is_ascending: (t, string, int64) => bool; 16 | 17 | let is_descending: (t, string, int64) => bool; 18 | 19 | let serialise: t => Lwt.t(list((string, list((int64, Ezjsonm.t))))); 20 | 21 | let empty: t => Lwt.t(unit); 22 | 23 | let empty_series: (t, string) => Lwt.t(unit); 24 | 25 | let set_disk_range: (t, string, option((int64, int64))) => unit; 26 | 27 | let get_disk_range: (t, string) => option((int64, int64)); 28 | 29 | let get_keys: (t) => list(string); -------------------------------------------------------------------------------- /src/membufhash.re: -------------------------------------------------------------------------------- 1 | 2 | type t = {ht: Hashtbl.t(string, Membufq.t)}; 3 | 4 | let create = () => {ht: Hashtbl.create(~random=false, 10)}; 5 | 6 | let get_keys = (ctx) => Hashtbl.fold((k, _, acc) => [k, ...acc], ctx.ht, []); 7 | 8 | let exists = (ctx, id) => Hashtbl.mem(ctx.ht, id); 9 | 10 | let add = (ctx, id, q) => Hashtbl.replace(ctx.ht, id, q); 11 | 12 | let replace = (ctx, id, q) => Hashtbl.replace(ctx.ht, id, q); 13 | 14 | let get = (ctx, id) => Hashtbl.find(ctx.ht, id); -------------------------------------------------------------------------------- /src/membufhash.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let create: unit => t; 4 | 5 | let add: (t, string, Membufq.t) => unit; 6 | 7 | let replace: (t, string, Membufq.t) => unit; 8 | 9 | let get: (t, string) => Membufq.t; 10 | 11 | let exists: (t, string) => bool; 12 | 13 | let get_keys: t => list(string); -------------------------------------------------------------------------------- /src/membufq.re: -------------------------------------------------------------------------------- 1 | 2 | type t = { 3 | q: Queue.t((int64, Ezjsonm.t)), 4 | mutable disk_range: option((int64, int64)) 5 | }; 6 | 7 | let create = () => {q: Queue.create(), disk_range: None}; 8 | 9 | let push = (ctx, n) => Queue.push(n, ctx.q); 10 | 11 | let pop = (ctx) => Queue.pop(ctx.q); 12 | 13 | let length = (ctx) => Queue.length(ctx.q); 14 | 15 | let to_list = (ctx) => Queue.fold((x, y) => List.cons(y, x), [], ctx.q); 16 | 17 | let is_ascending = (ctx, ub) => { 18 | let rec is_sorted = (lis) => 19 | switch lis { 20 | | [(t1,_), (t2,j2), ...l] => 21 | t1 >= t2 && is_sorted([(t2, j2), ...l]) 22 | | _ => true 23 | }; 24 | switch (to_list(ctx)) { 25 | | [] => true 26 | | [(t,j), ...l] => is_sorted([(t, j), ...l]) && t >= ub 27 | }; 28 | }; 29 | 30 | let is_descending = (ctx, lb) => { 31 | let rec is_sorted = (lis) => 32 | switch lis { 33 | | [(t1,_), (t2,j2), ...l] => 34 | t1 <= t2 && is_sorted([(t2, j2), ...l]) 35 | | _ => true 36 | }; 37 | switch (to_list(ctx)) { 38 | | [] => true 39 | | [(t,j), ...l] => is_sorted([(t, j), ...l]) && t <= lb 40 | }; 41 | }; 42 | 43 | let clear = (ctx) => Queue.clear(ctx.q); 44 | 45 | let set_disk_range = (ctx, range) => ctx.disk_range = range; 46 | 47 | let get_disk_range = (ctx) => ctx.disk_range; -------------------------------------------------------------------------------- /src/membufq.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let create: unit => t; 4 | 5 | let push: (t, (int64, Ezjsonm.t)) => unit; 6 | 7 | let pop: t => (int64, Ezjsonm.t); 8 | 9 | let length: t => int; 10 | 11 | let to_list: t => list((int64, Ezjsonm.t)); 12 | 13 | let is_ascending: (t, int64) => bool; 14 | 15 | let is_descending: (t, int64) => bool; 16 | 17 | let clear: t => unit; 18 | 19 | let set_disk_range: (t, option((int64, int64))) => unit; 20 | 21 | let get_disk_range: t => option((int64, int64)); -------------------------------------------------------------------------------- /src/sample.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"value":1},{"value":2},{"value":3},{"value":4},{"value":5},{"value":6},{"value":7},{"value":8},{"value":9},{"value":10}, 3 | {"value":11},{"value":12},{"value":13},{"value":14},{"value":15},{"value":16},{"value":17},{"value":18},{"value":19},{"value":20}, 4 | {"value":21},{"value":22},{"value":23},{"value":24},{"value":25},{"value":26},{"value":27},{"value":28},{"value":29},{"value":30}, 5 | {"value":31},{"value":32},{"value":33},{"value":34},{"value":35},{"value":36},{"value":37},{"value":38},{"value":39},{"value":40}, 6 | {"value":41},{"value":42},{"value":43},{"value":44},{"value":45},{"value":46},{"value":47},{"value":48},{"value":49},{"value":50}, 7 | {"value":51},{"value":52},{"value":53},{"value":54},{"value":55},{"value":56},{"value":57},{"value":58},{"value":59},{"value":60}, 8 | {"value":61},{"value":62},{"value":63},{"value":64},{"value":65},{"value":66},{"value":67},{"value":68},{"value":69},{"value":70}, 9 | {"value":71},{"value":72},{"value":73},{"value":74},{"value":75},{"value":76},{"value":77},{"value":78},{"value":79},{"value":80}, 10 | {"value":81},{"value":82},{"value":83},{"value":84},{"value":85},{"value":86},{"value":87},{"value":88},{"value":89},{"value":90}, 11 | {"value":91},{"value":92},{"value":93},{"value":94},{"value":95},{"value":96},{"value":97},{"value":98},{"value":99},{"value":100} 12 | 13 | ] 14 | -------------------------------------------------------------------------------- /src/shard.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | type datapoint = { 4 | tag: option(list((string,string))), 5 | value: float 6 | }; 7 | 8 | let datapoint_t = 9 | Irmin.Type.( 10 | record("datapoint", (tag, value) => {tag, value}) 11 | |+ field("tag", option(list(pair(string,string))), (t) => t.tag) 12 | |+ field("value", float, (t) => t.value) 13 | |> sealr 14 | ); 15 | 16 | type shard = list((int64,datapoint)); 17 | 18 | let shard_t = Irmin.Type.(list(pair(int64, datapoint_t))); 19 | 20 | 21 | let print = (m) => Fmt.pr("%a\n%!", Irmin.Type.pp_json(shard_t), m); 22 | 23 | let merge = Irmin.Merge.(option(idempotent(shard_t))); 24 | 25 | 26 | module Shard: Irmin.Contents.S with type t = shard = { 27 | type t = shard; 28 | let t = shard_t; 29 | let merge = merge; 30 | /* let pp = Irmin.Type.pp_json(shard_t); */ 31 | /* let of_string = (s) => Irmin.Type.decode_json(shard_t, Jsonm.decoder(`String(s))); */ 32 | }; 33 | 34 | module Store = Irmin_unix.Git.FS.KV(Shard); 35 | 36 | type t = Store.t; 37 | type err = Store.write_error; 38 | 39 | let make_native_tag = (data) => { 40 | open List; 41 | let rec loop = (acc, l) => { 42 | switch (l) { 43 | | `A([]) => rev(acc); 44 | | `A([`O([(s1, `String(s2))]), ...rest]) => loop(cons((s1,s2), acc), `A(rest)); 45 | | _ => failwith("badly formatted json"); 46 | } 47 | }; 48 | loop([], data); 49 | } 50 | 51 | let make_json_tag = (data) => { 52 | open List; 53 | let rec loop = (acc, l) => { 54 | switch (l) { 55 | | [] => `A(rev(acc)); 56 | | [(name,value), ...rest] => 57 | loop(cons(Ezjsonm.dict([(name, `String(value))]), acc), rest); 58 | } 59 | }; 60 | loop([], data); 61 | } 62 | 63 | let format_datapoint = (ts, tag, value) => { 64 | (ts,{tag: tag, value: value}); 65 | }; 66 | 67 | 68 | let convert_worker = (ts, datapoint) => { 69 | open Ezjsonm; 70 | switch(datapoint) { 71 | | [(_, n)] => 72 | format_datapoint(ts, None, get_float(n)); 73 | | [("tag", tag), (_, n)] => 74 | format_datapoint(ts, Some(make_native_tag(tag)), get_float(n)); 75 | | _ => failwith("badly formatted json"); 76 | } 77 | }; 78 | 79 | let convert = (data) => { 80 | open List; 81 | let rec loop = (acc, l) => { 82 | switch (l) { 83 | | [] => rev(acc); 84 | | [ (ts, json), ...rest] => { 85 | let dp = Ezjsonm.get_dict(Ezjsonm.value(json)); 86 | loop(cons(convert_worker(ts, dp), acc), rest); 87 | }; 88 | } 89 | }; 90 | loop([], data); 91 | }; 92 | 93 | 94 | let to_json_worker = (ts, datapoint) => { 95 | open Ezjsonm; 96 | switch(datapoint) { 97 | | {tag: t, value: v} => { 98 | switch(t) { 99 | | Some(t) => dict([("timestamp", int64(ts)), ("tag", make_json_tag(t)), ("value", float(v))]); 100 | | None => dict([("timestamp", int64(ts)), ("value", float(v))]); 101 | } 102 | } 103 | } 104 | }; 105 | 106 | let to_json = (data) => { 107 | Ezjsonm.list(((ts,dp)) => to_json_worker(ts,dp), data); 108 | }; 109 | 110 | let values = (data) => { 111 | List.rev_map(((_,dp)) => dp.value, data); 112 | }; 113 | 114 | 115 | let filter_worker_helper = (name, value, tagset, func) => { 116 | switch (List.assoc_opt(name, tagset)) { 117 | | None => false 118 | | Some(value') => func(value, value') 119 | } 120 | } 121 | 122 | let filter_worker = (data, func, name, value) => { 123 | List.filter(((_,dp)) => switch (dp.tag) { 124 | | None => false; 125 | | Some(tagset) => filter_worker_helper(name, value, tagset, func) 126 | }, data) 127 | }; 128 | 129 | let or_filter = (data, func, names, values) => { 130 | List.fold_left2((acc, name, value) => 131 | filter_worker(data, func, name, value) |> 132 | List.rev_append(acc), [], names, values); 133 | } 134 | 135 | let and_filter = (data, func, names, values) => { 136 | let rec loop = (res, names, values) => { 137 | switch (names, values) { 138 | | ([], []) => List.rev(res); 139 | | ([ name, ...rest_names], [ value, ...rest_values]) => 140 | loop(filter_worker(res, func, name, value), rest_names, rest_values); 141 | | _ => failwith("invalid filter format") 142 | } 143 | }; 144 | loop(data, names, values); 145 | }; 146 | 147 | let same_names = (name, names) => { 148 | let rec loop = (names) => { 149 | switch (names) { 150 | | [] => true; 151 | | [x, ...xs] => name == x ? loop(xs) : false; 152 | } 153 | } 154 | loop(names); 155 | } 156 | 157 | let gen_filter_lists_helper = (x, (n,v)) => { 158 | x == n; 159 | } 160 | 161 | let gen_filter_lists = (l1, l2) => { 162 | open List; 163 | let l3 = combine(l1, l2); 164 | let l4 = sort_uniq(compare, l1); 165 | map(x=> split(filter(y => gen_filter_lists_helper(x,y), l3)), l4); 166 | } 167 | 168 | let apply_filter = (data, func, names, values) => { 169 | if (same_names(List.hd(names), names)) { 170 | or_filter(data, func, names, values); 171 | } else { 172 | and_filter(data, func, names, values); 173 | } 174 | } 175 | 176 | let filter = (data, func, tag) => { 177 | let (name_set, value_set) = tag; 178 | let names = String.split_on_char(',', name_set); 179 | let values = String.split_on_char(',', value_set); 180 | List.length(names) != List.length(values) ? failwith("invalid filter format") : () 181 | let rec loop = (res, lis) => { 182 | switch lis { 183 | | [] => List.rev(res); 184 | | [(names, values), ...rest] => 185 | loop(apply_filter(res, func, names, values), rest); 186 | } 187 | }; 188 | loop(data, gen_filter_lists(names, values)); 189 | } 190 | 191 | let create = (~file, ~bare) => { 192 | let config = Irmin_git.config(file, ~bare); 193 | let repo = Store.Repo.v(config); 194 | repo >>= (repo => Store.master(repo)); 195 | }; 196 | 197 | let add = (branch, info, k, v) => { 198 | branch >>= (branch' => Store.set_exn(branch', ~info, k, v)); 199 | }; 200 | 201 | let sort_shard = (lis) => { 202 | let cmp = ((x, _), (x', _)) => x < x' ? 1 : (-1); 203 | List.sort(cmp, lis); 204 | }; 205 | 206 | let get = (branch, k) => { 207 | branch >>= (branch' => Store.get(branch', k)) >|= sort_shard; 208 | }; 209 | 210 | let remove = (branch, info, key_list) => { 211 | Lwt_list.iter_s(k => add(branch, info, k, []), key_list); 212 | }; 213 | 214 | 215 | 216 | -------------------------------------------------------------------------------- /src/shard.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | type datapoint; 3 | type err; 4 | 5 | let convert: list((int64, Ezjsonm.t)) => list((int64,datapoint)); 6 | 7 | let to_json: list((int64,datapoint)) => Ezjsonm.t; 8 | 9 | let values: list((int64,datapoint)) => list(float); 10 | 11 | let filter: (list((int64, datapoint)), (string, string) => bool, (string, string)) => list((int64, datapoint)); 12 | 13 | let print: (list((int64,datapoint))) => unit; 14 | 15 | let create: (~file: string, ~bare: bool) => Lwt.t(t); 16 | 17 | let add: (Lwt.t(t), Irmin.Info.f, list(string), list((int64,datapoint))) => Lwt.t(unit); 18 | 19 | let get: (Lwt.t(t), list(string)) => Lwt.t(list((int64,datapoint))); 20 | 21 | let remove: (Lwt.t(t), Irmin.Info.f, list(list(string))) => Lwt.t(unit); 22 | 23 | 24 | -------------------------------------------------------------------------------- /src/timeseries.re: -------------------------------------------------------------------------------- 1 | open Lwt.Infix; 2 | 3 | type t = { 4 | membuf: Membuf.t, 5 | index: Lwt.t(Index.t), 6 | shard: Lwt.t(Shard.t), 7 | fs: Fs.t, 8 | max_buffer_size: int, 9 | shard_size: int 10 | }; 11 | 12 | let create = (~path_to_db, ~max_buffer_size, ~shard_size, ~show_files) => { 13 | membuf: Membuf.create(), 14 | index: Index.create(~file=path_to_db ++ "_index_store", ~bare=!show_files), 15 | shard: Shard.create(~file=path_to_db ++ "_shard_store", ~bare=!show_files), 16 | fs: Fs.create(~index_dir=path_to_db ++ "_index_store"), 17 | max_buffer_size: max_buffer_size, 18 | shard_size: shard_size 19 | }; 20 | 21 | 22 | /* let get_milliseconds = () => { 23 | let t = Unix.gettimeofday() *. 1000.0; 24 | Int64.of_float(t); 25 | }; 26 | 27 | let get_nanoseconds = () => { 28 | let t = Unix.gettimeofday() *. 1000.0 *. 1000.0 *. 1000.0; 29 | Int64.of_float(t); 30 | }; */ 31 | 32 | /* let get_microseconds = () => { 33 | let t = Unix.gettimeofday() *. 1000.0 *. 1000.0; 34 | Int64.of_float(t); 35 | }; */ 36 | 37 | let get_microseconds = () => { 38 | open Int64; 39 | let (days, picoseconds) = Ptime_clock.now_d_ps(); 40 | let seconds_from_days = days * 86400; 41 | let microseconds_from_seconds = of_int(seconds_from_days * 1000 * 1000); 42 | let microseconds_from_picoseconds = div(picoseconds, of_int(1000 * 1000)); 43 | add(microseconds_from_seconds, microseconds_from_picoseconds); 44 | }; 45 | 46 | /* let get_milliseconds_ptime = () => { 47 | open Int64; 48 | let (days, picoseconds) = Ptime_clock.now_d_ps(); 49 | let seconds_from_days = days * 86400; 50 | let milliseconds_from_seconds = of_int(seconds_from_days * 1000); 51 | let milliseconds_from_picoseconds = div(picoseconds, of_int(1000 * 1000 * 1000)); 52 | add(milliseconds_from_seconds, milliseconds_from_picoseconds); 53 | }; 54 | 55 | let get_nanoseconds_ptime = () => { 56 | open Int64; 57 | let (days, picoseconds) = Ptime_clock.now_d_ps(); 58 | let seconds_from_days = days * 86400; 59 | let nanoseconds_from_seconds = of_int(seconds_from_days * 1000 * 1000 * 1000); 60 | let nanoseconds_from_picoseconds = div(picoseconds, of_int(1000)); 61 | add(nanoseconds_from_seconds, nanoseconds_from_picoseconds); 62 | }; */ 63 | 64 | let validate_json = (json) => { 65 | open Ezjsonm; 66 | open Int64; 67 | switch (get_dict(value(json))) { 68 | | [("value",`Float _)] => 69 | Some((get_microseconds(), json)); 70 | | [("tag", tag), ("value",`Float n)] => 71 | Some((get_microseconds(), json)); 72 | | [("timestamp",`Float ts), ("value",`Float n)] => 73 | Some((of_float(ts), dict([("value",`Float(n))]))); 74 | | [("timestamp",`Float ts), ("tag", tag), ("value",`Float n)] => 75 | Some((of_float(ts), dict([("tag", tag), ("value",`Float(n))]))); 76 | | _ => None; 77 | } 78 | }; 79 | 80 | let shard_range = lis => { 81 | open List; 82 | let cmp = (x, y) => x > y ? 1 : (-1); 83 | switch (lis) { 84 | | [] => None 85 | | _ => 86 | Some( 87 | map(((ts, _)) => ts, lis) 88 | |> sort(cmp) 89 | |> (lis' => (hd(lis'), hd(rev(lis')))), 90 | ) 91 | }; 92 | }; 93 | 94 | let make_key = (id, (t1, t2)) => [ 95 | id, 96 | Int64.to_string(t1), 97 | Int64.to_string(t2), 98 | ]; 99 | 100 | let shard_data = (ctx, id) => { 101 | let rec loop = (n, shard) => 102 | if (n > 0) { 103 | Membuf.read(ctx.membuf, id) 104 | >>= (elt => loop(n - 1, List.cons(elt, shard))); 105 | } else { 106 | Lwt.return(shard); 107 | }; 108 | loop(ctx.shard_size, []); 109 | }; 110 | 111 | 112 | 113 | let string_of_key = lis => List.fold_left((x, y) => x ++ ":" ++ y, "", lis); 114 | 115 | let log_index = (str, lis) => { 116 | Lwt_list.iter_s(((x, y)) => Lwt_log_core.debug_f("%s:(%Ld,%Ld)\n", str, x, y), lis); 117 | }; 118 | 119 | let remove_leftover_shards = (ctx, k, keep_index, remove_list, info) => { 120 | open List; 121 | let index_list = filter(i => i != keep_index, remove_list); 122 | let key_list = map(i => make_key(k, i), index_list); 123 | Shard.remove(ctx.shard, info, key_list); 124 | }; 125 | 126 | let handle_shard_overlap_worker = (ctx, k, shard, shard_lis, overlap_list, info) => { 127 | open List; 128 | let new_shard = flatten(cons(shard, shard_lis)); 129 | Lwt_log_core.debug_f("shard len:%d", List.length(new_shard)) >>= 130 | () => switch (shard_range(new_shard)) { 131 | | Some(new_range) => 132 | let key = make_key(k, new_range); 133 | Lwt_log_core.debug_f("Adding shard with key%s", string_of_key(key)) >>= 134 | () => Index.update(ctx.index, info, k, new_range, overlap_list) >>= 135 | bounds => Membuf.set_disk_range(ctx.membuf, k, bounds) |> 136 | () => Shard.add(ctx.shard, info, key, new_shard) >>= 137 | _ => remove_leftover_shards(ctx, k, new_range, overlap_list, info); 138 | | None => Lwt.return_unit; 139 | }; 140 | }; 141 | 142 | let handle_shard_overlap = (ctx, k, shard, range, info) => { 143 | Index.overlap(ctx.index, k, range) >>= 144 | overlap_list => log_index("overlapping shards", overlap_list) >>= 145 | () => Lwt_list.map_s(r => Shard.get(ctx.shard, make_key(k, r)), overlap_list) >>= 146 | shard_list => handle_shard_overlap_worker(ctx, k, shard, shard_list, overlap_list, info) 147 | }; 148 | 149 | 150 | 151 | let handle_shard = (ctx, k, shard, info) => { 152 | switch (shard_range(shard)) { 153 | | Some(range) => handle_shard_overlap(ctx, k, shard, range, info) 154 | | None => Lwt.return_unit 155 | }; 156 | }; 157 | 158 | let write = (~ctx, ~info, ~timestamp as t, ~id as k, ~json as j) => { 159 | Membuf.write(ctx.membuf, k, (t, j)) >>= 160 | () => Membuf.length(ctx.membuf, k) >>= 161 | current_buffer_size => 162 | if (current_buffer_size == ctx.max_buffer_size) { 163 | shard_data(ctx, k) >>= 164 | (data => handle_shard(ctx, k, Shard.convert(data), info)); 165 | } else { 166 | Lwt.return_unit; 167 | } 168 | }; 169 | 170 | let flush_series = (ctx, k, shard, info) => { 171 | handle_shard(ctx, k, Shard.convert(shard), info) >>= 172 | () => Membuf.empty_series(ctx.membuf, k); 173 | }; 174 | 175 | let flush = (~ctx, ~info) => { 176 | Membuf.serialise(ctx.membuf) >>= 177 | lis => Lwt_list.iter_s(((key, shard)) => 178 | flush_series(ctx, key, shard, info), lis); 179 | }; 180 | 181 | let number_of_records_on_disk = (ctx, k, lis) => { 182 | Lwt_list.fold_left_s( (acc, x) => 183 | Shard.get(ctx.shard, make_key(k, x)) >|= 184 | (x => List.length(x) + acc), 0, lis); 185 | }; 186 | 187 | let number_of_records_in_memory = (ctx, k) => { 188 | Membuf.(exists(ctx.membuf, k) ? length(ctx.membuf, k) : Lwt.return(0)); 189 | }; 190 | 191 | let length_on_disk_worker = (~ctx, ~id as k) => { 192 | Index.get(ctx.index, k) >>= 193 | data => { 194 | switch (data) { 195 | | Some(lis) => number_of_records_on_disk(ctx, k, lis) 196 | | None => Lwt.return(0) 197 | } 198 | }; 199 | }; 200 | 201 | let length_in_memory_worker = (~ctx, ~id as k) => { 202 | number_of_records_in_memory(ctx, k); 203 | }; 204 | 205 | let length_on_disk = (~ctx, ~id_list) => { 206 | Lwt_list.fold_left_s((acc, id) => length_on_disk_worker(~ctx, ~id) >|= 207 | (x => x + acc), 0, id_list) 208 | }; 209 | 210 | let length_in_memory = (~ctx, ~id_list) => { 211 | Lwt_list.fold_left_s((acc, id) => length_in_memory_worker(~ctx, ~id) >|= 212 | (x => x + acc), 0, id_list) 213 | }; 214 | 215 | let length = (~ctx, ~id_list) => { 216 | length_in_memory(ctx, id_list) >>= n1 => 217 | length_on_disk(ctx, id_list) >|= n2 => 218 | n1 + n2; 219 | }; 220 | 221 | let length_of_index_worker = (~ctx, ~id as k) => { 222 | Index.length(ctx.index, k); 223 | }; 224 | 225 | let length_of_index = (~ctx, ~id_list) => { 226 | Lwt_list.fold_left_s((acc, id) => length_of_index_worker(~ctx, ~id) >|= 227 | (x => x + acc), 0, id_list) 228 | }; 229 | 230 | let get_index = (~ctx, ~id as k) => { 231 | open Ezjsonm; 232 | Index.get(ctx.index, k) >|= 233 | (data) => { 234 | switch data { 235 | | Some((lis)) => lis 236 | | None => [] 237 | } 238 | } |> list(item => pair(x=>int64(x), y=>int64(y), item)) 239 | |> arr => dict([(k, arr)]) 240 | }; 241 | 242 | 243 | let read_memory_all = (ctx, id) => { 244 | Membuf.exists(ctx.membuf, id) ? 245 | Membuf.to_list(ctx.membuf, id) : Lwt.return([]); 246 | }; 247 | 248 | let flush_memory = (ctx, k, info) => { 249 | read_memory_all(ctx, k) >>= 250 | (shard => flush_series(ctx, k, shard, info)); 251 | }; 252 | 253 | let flush_memory_worker = (ctx, id, info) => { 254 | Membuf.exists(ctx.membuf, id) ? 255 | flush_memory(ctx, id, info) : Lwt.return_unit; 256 | }; 257 | 258 | 259 | let get_timestamps = json => { 260 | open Ezjsonm; 261 | List.rev_map(x => get_int64(x), get_list(x => find(x, ["timestamp"]), json)); 262 | }; 263 | 264 | let filter_shard_worker = (ctx, key, timestamps, info) => { 265 | Shard.get(ctx.shard, key) >>= 266 | lis => List.filter(((t, _)) => ! List.mem(t, timestamps), lis) |> 267 | (lis' => Shard.add(ctx.shard, info, key, lis')) 268 | }; 269 | 270 | let delete_worker = (ctx, key_list, timestamps, info) => { 271 | Lwt_list.iter_s(k => Lwt.return(ignore(filter_shard_worker(ctx, k, timestamps, info))), key_list); 272 | }; 273 | 274 | let make_shard_keys_worker = (id, lb, lis) => { 275 | let rec loop = (acc, lis) => 276 | switch (lis) { 277 | | [] => acc 278 | | [(_, t2), ...rest] when lb > t2 => loop(acc, rest) 279 | | [(t1, t2), ...rest] => loop(List.cons(make_key(id, (t1, t2)), acc), rest) 280 | }; 281 | loop([], lis); 282 | }; 283 | 284 | let make_shard_keys = (ctx, id, lb) => { 285 | Index.get(ctx.index, id) >>= 286 | lis => { 287 | switch (lis) { 288 | | None => [] 289 | | Some(lis') => make_shard_keys_worker(id, lb, lis') 290 | } |> Lwt.return 291 | }; 292 | }; 293 | 294 | let delete = (~ctx, ~info, ~id_list, ~json) => { 295 | let timestamps = get_timestamps(Ezjsonm.value(json)); 296 | switch (timestamps) { 297 | | [] => Lwt.return_unit 298 | | [lb, ..._] => 299 | Lwt_list.iter_s(id => flush_memory_worker(ctx, id, info), id_list) >>= 300 | () => Lwt_list.map_s(k => make_shard_keys(ctx, k, lb), id_list) >>= 301 | keys' => Lwt_list.iter_s(k => delete_worker(ctx, k, timestamps, info), keys') 302 | }; 303 | }; 304 | 305 | let take = (n, lis) => { 306 | open List; 307 | let rec loop = (n, acc, l) => 308 | switch (l) { 309 | | [] => acc 310 | | [_, ..._] when n == 0 => acc 311 | | [xs, ...rest] => loop(n - 1, cons(xs, acc), rest) 312 | }; 313 | rev(loop(n, [], lis)); 314 | }; 315 | 316 | let sort_result = (mode, lis) => { 317 | open List; 318 | switch (mode) { 319 | | `Last => fast_sort(((x, _), (x', _)) => x < x' ? 1 : (-1), lis) 320 | | `First => fast_sort(((x, _), (x', _)) => x > x' ? 1 : (-1), lis) 321 | | `None => lis 322 | }; 323 | }; 324 | 325 | 326 | let return_data = (~sort as mode, lis) => { 327 | sort_result(mode, lis) |> Shard.to_json |> Lwt.return; 328 | }; 329 | 330 | 331 | let read_disk = (ctx, k, n, mode) => { 332 | open List; 333 | let rec loop = (n, acc, lis) => 334 | switch (lis) { 335 | | [] => acc |> Lwt.return 336 | | [tup, ...rest] => Shard.get(ctx.shard, make_key(k, tup)) >>= 337 | shard => { 338 | let leftover = n - length(shard); 339 | if (leftover > 0) { 340 | loop(leftover, rev_append(shard, acc), rest); 341 | } else { 342 | rev_append(take(n, sort_result(mode, shard)), acc) |> Lwt.return; 343 | }; 344 | } 345 | }; 346 | Lwt_log_core.debug_f("read_disk\n") >>= 347 | () => Index.get(ctx.index, k) >>= 348 | data => { 349 | switch (mode, data) { 350 | | (`Last, Some(lis)) => lis 351 | | (`First, Some(lis)) => rev(lis) 352 | | (_, None) => [] 353 | } |> loop(n, []) 354 | }; 355 | }; 356 | 357 | let flush_memory_read_from_disk = (ctx, k, n, mode, info) => { 358 | Lwt_log_core.debug_f("flush_memory_read_from_disk\n") >>= 359 | (() => flush_memory(ctx, k, info) >>= 360 | (() => read_disk(ctx, k, n, mode))); 361 | }; 362 | 363 | let is_ascending = (ctx, k) => { 364 | Membuf.get_disk_range(ctx.membuf, k) |> 365 | (range) => { 366 | switch range { 367 | | None => false 368 | | Some(((_, ub))) => Membuf.is_ascending(ctx.membuf, k, ub) 369 | } 370 | }; 371 | }; 372 | 373 | 374 | let is_descending = (ctx, k) => { 375 | Membuf.get_disk_range(ctx.membuf, k) |> 376 | (range) => { 377 | switch range { 378 | | None => false 379 | | Some(((lb, _))) => Membuf.is_descending(ctx.membuf, k, lb) 380 | } 381 | }; 382 | }; 383 | 384 | let take_from_memory = (n, lis, mode) => { 385 | open List; 386 | let count = min(n, length(lis)); 387 | let sorted = sort_result(mode, lis); 388 | (n - count, take(count, sorted)) |> Lwt.return; 389 | }; 390 | 391 | let read_memory = (ctx, id, n, mode) => { 392 | Lwt_log_core.debug_f("read_memory\n") >>= 393 | () => Membuf.to_list(ctx.membuf, id) >>= 394 | ((mem_shard) => take_from_memory(n, mem_shard, mode)) 395 | }; 396 | 397 | let read_disk = (ctx, k, n, mode) => { 398 | open List; 399 | let rec loop = (n, acc, lis) => { 400 | switch lis { 401 | | [] => acc |> Lwt.return 402 | | [tup, ...rest] => Shard.get(ctx.shard, make_key(k, tup)) >>= 403 | (shard) => { 404 | let leftover = n - length(shard); 405 | if (leftover > 0) { 406 | loop(leftover, rev_append(shard, acc), rest); 407 | } else { 408 | rev_append(take(n, sort_result(mode, shard)), acc) |> Lwt.return; 409 | }; 410 | } 411 | }; 412 | }; 413 | Lwt_log_core.debug_f("read_disk\n") >>= 414 | () => Index.get(ctx.index, k) >>= 415 | data => { 416 | switch (mode, data) { 417 | | (`Last, Some((lis))) => lis 418 | | (`First, Some((lis))) => rev(lis) 419 | | (_, None) => [] 420 | } |> loop(n, []) 421 | }; 422 | }; 423 | 424 | let read_memory_then_disk = (ctx, k, n, mode) => { 425 | Lwt_log_core.debug_f("read_memory_then_disk\n") >>= 426 | () => read_memory(ctx, k, n, mode) >>= 427 | ((leftover, mem)) => 428 | if (leftover > 0) { 429 | read_disk(ctx, k, leftover, mode) 430 | >|= (disk => List.rev_append(Shard.convert(mem), disk)); 431 | } else { 432 | Shard.convert(mem) |> Lwt.return; 433 | } 434 | }; 435 | 436 | let aggregate = (data, name, func) => { 437 | open Ezjsonm; 438 | if (List.length(data) == 0) { 439 | dict([]); 440 | } else { 441 | Shard.values(data) |> Array.of_list |> func |> 442 | result => dict([(name, `Float(result))]); 443 | } 444 | }; 445 | 446 | let sum = (data) => { 447 | let sum = List.fold_left((+.), 0., Shard.values(data)); 448 | Ezjsonm.dict([("sum", `Float(sum))]); 449 | }; 450 | 451 | let count = (data) => { 452 | let count = float_of_int(List.length(data)); 453 | Ezjsonm.dict([("count", `Float(count))]); 454 | }; 455 | 456 | let return_aggregate_data = (data, arg) => { 457 | open Oml.Util.Array; 458 | open Oml.Statistics.Descriptive; 459 | switch (arg) { 460 | | ["sum"] => sum(data); 461 | | ["max"] => aggregate(data, "max", max); 462 | | ["min"] => aggregate(data, "min", min); 463 | | ["mean"] => aggregate(data, "mean", mean); 464 | | ["sd"] => aggregate(data, "sd", sd); 465 | | ["median"] => aggregate(data, "median", median); 466 | | ["count"] => count(data) 467 | | _ => failwith("unknown path") 468 | } |> Lwt.return; 469 | }; 470 | 471 | let read_last_worker = (~ctx, ~id as k, ~n, ~info) => { 472 | if (Membuf.exists(ctx.membuf, k)) { 473 | is_ascending(ctx, k) ? 474 | read_memory_then_disk(ctx, k, n, `Last) : flush_memory_read_from_disk(ctx, k, n, `Last, info); 475 | } else { 476 | read_disk(ctx, k, n, `Last); 477 | }; 478 | }; 479 | 480 | let return_filtered_data = (~sort, ~tag, data, func) => { 481 | Shard.filter(data, func, tag) |> 482 | data' => return_data(~sort, data') 483 | }; 484 | 485 | let return_filtered_aggregate_data = (~tag, data, func, agg_mode) => { 486 | Shard.filter(data, func, tag) |> 487 | data' => return_aggregate_data(data', [agg_mode]); 488 | }; 489 | 490 | module String_extra = { 491 | let contains = (s1, s2) => { 492 | let re = Str.regexp_string(s1); 493 | try { 494 | ignore(Str.search_forward(re, s2, 0)); 495 | true; 496 | } { 497 | | Not_found => false 498 | }; 499 | }; 500 | }; 501 | 502 | let process_data = (data, args, ~sort) => { 503 | switch (args) { 504 | | [] => return_data(~sort, data) 505 | | ["filter", name, "equals", value] => return_filtered_data(~sort, ~tag=(name,value), data, String.equal) 506 | | ["filter", name, "equals", value, agg_mode] => return_filtered_aggregate_data(~tag=(name,value), data, String_extra.contains, agg_mode) 507 | | ["filter", name, "contains", value] => return_filtered_data(~sort, ~tag=(name,value), data, String_extra.contains) 508 | | ["filter", name, "contains", value, agg_mode] => return_filtered_aggregate_data(~tag=(name,value), data, String_extra.contains, agg_mode) 509 | | _ => return_aggregate_data(data, args) 510 | } 511 | }; 512 | 513 | let read_last = (~ctx, ~info, ~id_list, ~n, ~xargs) => { 514 | Lwt_list.fold_left_s((acc, id) => 515 | read_last_worker(~ctx=ctx, ~id=id, ~n=n, ~info=info) >|= 516 | (x => List.rev_append(x, acc)), [], id_list) 517 | >>= data => process_data(data, xargs, ~sort=`Last) 518 | }; 519 | 520 | 521 | let read_latest = (~ctx, ~info, ~id_list, ~xargs) => { 522 | read_last(~ctx=ctx, ~info=info, ~id_list=id_list, ~n=1, ~xargs); 523 | }; 524 | 525 | let read_first_worker = (~ctx, ~id as k, ~n, ~info) => { 526 | if (Membuf.exists(ctx.membuf, k)) { 527 | is_descending(ctx, k) ? 528 | read_memory_then_disk(ctx, k, n, `First) : 529 | flush_memory_read_from_disk(ctx, k, n, `First, info); 530 | } else { 531 | read_disk(ctx, k, n, `First); 532 | }; 533 | }; 534 | 535 | let read_first = (~ctx, ~info, ~id_list, ~n, ~xargs) => { 536 | Lwt_list.fold_left_s((acc, id) => 537 | read_first_worker(~ctx=ctx, ~id=id, ~n=n, ~info=info) >|= 538 | (x => List.rev_append(x, acc)), [], id_list) >>= 539 | (data => process_data(data, xargs, ~sort=`First)); 540 | }; 541 | 542 | let read_earliest = (~ctx, ~info, ~id_list, ~xargs) => { 543 | read_first(~ctx=ctx, ~info=info, ~id_list=id_list, ~n=1, ~xargs); 544 | }; 545 | 546 | 547 | let make_filter_elt = (k, tup, mode) => { 548 | (mode, make_key(k, tup)); 549 | }; 550 | 551 | let filter_since = (ts, lis) => { 552 | List.filter(((t, _)) => t >= ts, lis); 553 | }; 554 | 555 | let read_since_disk_worker = (ctx, k, ts, status) => { 556 | switch status { 557 | | `Complete => Shard.get(ctx.shard, k) 558 | | `Partial => Shard.get(ctx.shard, k) >|= 559 | (shard => filter_since(ts, shard)) 560 | }; 561 | }; 562 | 563 | let handle_read_since_disk = (ctx, ts, lis) => { 564 | Lwt_list.fold_left_s((acc, (status, key)) => 565 | read_since_disk_worker(ctx, key, ts, status) >|= 566 | (x => List.rev_append(x, acc)), [], lis); 567 | }; 568 | 569 | let read_since_disk = (ctx, k, ts) => { 570 | open List; 571 | let rec loop = (acc, lis) => { 572 | switch lis { 573 | | [] => acc 574 | | [(lb,ub), ...rest] when lb >= ts && ub >= ts => 575 | loop(cons(make_filter_elt(k, (lb, ub), `Complete), acc), rest) 576 | | [(lb,ub), ..._] when ub >= ts => 577 | cons(make_filter_elt(k, (lb, ub), `Partial), acc) 578 | | [_, ...rest] => loop(acc, rest) 579 | }; 580 | }; 581 | Index.get(ctx.index, k) >>= 582 | (data) => { 583 | switch data { 584 | | Some((lis)) => lis 585 | | None => [] 586 | } 587 | } |> loop([]) |> handle_read_since_disk(ctx, ts) 588 | }; 589 | 590 | let read_since_memory = (ctx, k, ts) => { 591 | read_memory_all(ctx, k) >|= 592 | (data => filter_since(ts, data)); 593 | }; 594 | 595 | let read_since_worker = (~ctx, ~id as k, ~from as ts) => { 596 | read_since_memory(ctx, k, ts) >>= 597 | (mem) => read_since_disk(ctx, k, ts) >|= 598 | ((disk) => List.rev_append(Shard.convert(mem), disk)) 599 | }; 600 | 601 | let read_since = (~ctx, ~id_list, ~from as ts, ~xargs) => { 602 | Lwt_list.fold_left_s((acc, id) => 603 | read_since_worker(~ctx=ctx, ~id=id, ~from=ts) >|= 604 | (x => List.rev_append(x, acc)), [], id_list) >>= 605 | (data => process_data(data, xargs, ~sort=`Last)) 606 | }; 607 | 608 | let filter_until = (ts, lis) => { 609 | List.filter(((t, _)) => t <= ts, lis); 610 | }; 611 | 612 | let read_range_worker = (~ctx, ~id as k, ~from as t1, ~to_ as t2) => { 613 | read_since_memory(ctx, k, t1) >>= 614 | (mem) => read_since_disk(ctx, k, t1) >>= 615 | ((disk) => List.rev_append(Shard.convert(mem), disk) |> 616 | filter_until(t2) |> Lwt.return) 617 | }; 618 | 619 | let read_range = (~ctx, ~id_list, ~from as t1, ~to_ as t2, ~xargs) => { 620 | Lwt_list.fold_left_s((acc, id) => 621 | read_range_worker(~ctx=ctx, ~id=id, ~from=t1, ~to_=t2) >|= 622 | (x => List.rev_append(x, acc)), [], id_list) >>= 623 | (data => process_data(data, xargs, ~sort=`Last)) 624 | }; 625 | 626 | let names = (~ctx) => { 627 | open Ezjsonm; 628 | let mem_list = Membuf.get_keys(ctx.membuf); 629 | Fs.ts_names(ctx.fs) >|= List.rev_append(mem_list) >|= 630 | List.sort_uniq((x,y) => compare(x,y)) >|= 631 | strings >|= x => dict([("timeseries", value(x))]) 632 | } 633 | 634 | let ts_names_value = (x) => { 635 | open Ezjsonm; 636 | get_strings(find(x, ["timeseries"])); 637 | } 638 | 639 | let lengths_worker = (ctx, id) => { 640 | open Ezjsonm; 641 | length(ctx, [id]) >|= 642 | x => dict([(id, int(x))]) 643 | } 644 | 645 | let lengths_in_memory_worker = (ctx, id) => { 646 | open Ezjsonm; 647 | length_in_memory(ctx, [id]) >|= 648 | x => dict([(id, int(x))]) 649 | } 650 | 651 | let lengths_on_disk_worker = (ctx, id) => { 652 | open Ezjsonm; 653 | length_on_disk(ctx, [id]) >|= 654 | x => dict([(id, int(x))]) 655 | } 656 | 657 | let lengths_of_index_worker = (ctx, id) => { 658 | open Ezjsonm; 659 | length_of_index(ctx, [id]) >|= 660 | x => dict([(id, int(x))]) 661 | } 662 | 663 | let lengths = (ctx, ts) => { 664 | open Ezjsonm; 665 | Lwt_list.map_s(x => lengths_worker(ctx, x), ts) >|= 666 | x => dict([("length", list(y=>y, x))]) 667 | } 668 | 669 | let lengths_in_memory = (ctx, ts) => { 670 | open Ezjsonm; 671 | Lwt_list.map_s(x => lengths_in_memory_worker(ctx, x), ts) >|= 672 | x => dict([("length_in_memory", list(y=>y, x))]) 673 | } 674 | 675 | let lengths_on_disk = (ctx, ts) => { 676 | open Ezjsonm; 677 | Lwt_list.map_s(x => lengths_on_disk_worker(ctx, x), ts) >|= 678 | x => dict([("length_on_disk", list(y=>y, x))]) 679 | } 680 | 681 | let lengths_of_index = (ctx, ts) => { 682 | open Ezjsonm; 683 | Lwt_list.map_s(x => lengths_of_index_worker(ctx, x), ts) >|= 684 | x => dict([("length_of_index", list(y=>y, x))]) 685 | } 686 | 687 | let stats = (~ctx) => { 688 | open Ezjsonm; 689 | names(ctx) >|= ts_names_value >>= 690 | ts => lengths(ctx, ts) >>= 691 | length => lengths_in_memory(ctx, ts) >>= 692 | length_in_memory => lengths_on_disk(ctx, ts) >>= 693 | length_on_disk => lengths_of_index(ctx, ts) >|= 694 | length_of_index => list(x=>x, [length, length_in_memory, length_on_disk, length_of_index]) 695 | } -------------------------------------------------------------------------------- /src/timeseries.rei: -------------------------------------------------------------------------------- 1 | type t; 2 | 3 | let create: (~path_to_db: string, ~max_buffer_size: int, ~shard_size: int, ~show_files: bool) => t; 4 | 5 | let validate_json: Ezjsonm.t => option((int64, Ezjsonm.t)); 6 | 7 | let write: (~ctx: t, ~info: Irmin.Info.f, ~timestamp: int64, ~id: string, ~json: Ezjsonm.t) => Lwt.t(unit); 8 | 9 | let flush: (~ctx: t, ~info: Irmin.Info.f) => Lwt.t(unit); 10 | 11 | let length: (~ctx: t, ~id_list: list(string)) => Lwt.t(int); 12 | 13 | let length_in_memory: (~ctx: t, ~id_list: list(string)) => Lwt.t(int); 14 | 15 | let length_on_disk: (~ctx: t, ~id_list: list(string)) => Lwt.t(int); 16 | 17 | let length_of_index: (~ctx: t, ~id_list: list(string)) => Lwt.t(int); 18 | 19 | let get_index: (~ctx: t, ~id: string) => Lwt.t(Ezjsonm.t); 20 | 21 | let delete: (~ctx: t, ~info: Irmin.Info.f, ~id_list: list(string), ~json: Ezjsonm.t) => Lwt.t(unit); 22 | 23 | let read_last: (~ctx: t, ~info: Irmin.Info.f, ~id_list: list(string), ~n: int, ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 24 | 25 | let read_latest: (~ctx: t, ~info: Irmin.Info.f, ~id_list: list(string), ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 26 | 27 | let read_first: (~ctx: t, ~info: Irmin.Info.f, ~id_list: list(string), ~n: int, ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 28 | 29 | let read_earliest: (~ctx: t, ~info: Irmin.Info.f, ~id_list: list(string), ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 30 | 31 | let read_since: (~ctx: t, ~id_list: list(string), ~from: int64, ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 32 | 33 | let read_range: (~ctx: t, ~id_list: list(string), ~from: int64, ~to_: int64, ~xargs: list(string)) => Lwt.t(Ezjsonm.t); 34 | 35 | let names: (~ctx: t) => Lwt.t(Ezjsonm.t); 36 | 37 | let stats: (~ctx: t) => Lwt.t(Ezjsonm.t); 38 | -------------------------------------------------------------------------------- /test/butterflies.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"timestamp": 1439856000000000, "tag": [{"location":"1"},{"scientist":"langstroth"}], "value": 12}, 3 | {"timestamp": 1439856000000000, "tag": [{"location":"1"},{"scientist":"perpetua"}], "value": 1}, 4 | {"timestamp": 1439856360000000, "tag": [{"location":"1"},{"scientist":"langstroth"}], "value": 11}, 5 | {"timestamp": 1439856360000000, "tag": [{"location":"1"},{"scientist":"perpetua"}], "value": 3}, 6 | {"timestamp": 1439877240000000, "tag": [{"location":"2"},{"scientist":"langstroth"}], "value": 2}, 7 | {"timestamp": 1439877600000000, "tag": [{"location":"2"},{"scientist":"langstroth"}], "value": 1}, 8 | {"timestamp": 1439877960000000, "tag": [{"location":"2"},{"scientist":"perpetua"}], "value": 8}, 9 | {"timestamp": 1439878320000000, "tag": [{"location":"2"},{"scientist":"perpetua"}], "value": 7} 10 | ] -------------------------------------------------------------------------------- /test/client.re: -------------------------------------------------------------------------------- 1 | open Lwt; 2 | 3 | open Cohttp_lwt_unix; 4 | 5 | let uri = ref("http://127.0.0.1:8000/ts/foo"); 6 | 7 | let content_format = ref("json"); 8 | 9 | let payload = ref("{\"value\": 42}"); 10 | 11 | let loop_count = ref(0); 12 | 13 | let call_freq = ref(1.0); 14 | 15 | let file = ref(false); 16 | 17 | let send_request = (~uri, ~payload) => { 18 | let headers = Cohttp.Header.of_list([("Content-Type", content_format^), ("Connection", "keep-alive")]); 19 | let body = Cohttp_lwt.Body.of_string(payload); 20 | Client.post(~headers=headers, ~body=body, Uri.of_string(uri)) >>= 21 | (((_, body)) => body |> Cohttp_lwt.Body.to_string); 22 | }; 23 | 24 | let post_loop = (count) => { 25 | let rec loop = (n) => 26 | send_request(~uri=uri^, ~payload=payload^) >>= 27 | (_) => Lwt_io.printf("=> Created\n") >>= () => 28 | if (n > 1) { 29 | Lwt_unix.sleep(call_freq^) >>= () => loop(n - 1); 30 | } else { 31 | Lwt.return_unit; 32 | } 33 | loop(count); 34 | }; 35 | 36 | let post_test = () => post_loop(loop_count^); 37 | 38 | let handle_format = (format) => { 39 | let format = 40 | switch format { 41 | | "text" => "text/plain" 42 | | "json" => "application/json" 43 | | "binary" => "application/octet-stream" 44 | | _ => raise(Arg.Bad("Unsupported format")) 45 | }; 46 | content_format := format; 47 | }; 48 | 49 | let parse_cmdline = () => { 50 | let usage = "usage: " ++ Sys.argv[0]; 51 | let speclist = [ 52 | ("--uri", Arg.Set_string(uri), ": to set the uri"), 53 | ("--payload", Arg.Set_string(payload), ": to set the message payload"), 54 | ( 55 | "--format", 56 | Arg.Symbol(["text", "json", "binary"], handle_format), 57 | ": to set the message content type" 58 | ), 59 | ( 60 | "--loop", 61 | Arg.Set_int(loop_count), 62 | ": to set the number of times to run post/get/observe test" 63 | ), 64 | ( 65 | "--freq", 66 | Arg.Set_float(call_freq), 67 | ": to set the number of seconds to wait between each get/post operation" 68 | ), 69 | ("--file", Arg.Set(file), ": payload contents comes from a file") 70 | ]; 71 | Arg.parse(speclist, (err) => raise(Arg.Bad("Bad argument : " ++ err)), usage); 72 | }; 73 | 74 | let set_payload_from = (file) => { 75 | let data = Fpath.v(file) |> Bos.OS.File.read |> Rresult.R.get_ok; 76 | payload := data; 77 | }; 78 | 79 | parse_cmdline(); 80 | 81 | file^ ? set_payload_from(payload^) : (); 82 | 83 | Lwt_main.run(post_test()); -------------------------------------------------------------------------------- /test/dune: -------------------------------------------------------------------------------- 1 | 2 | (executable 3 | (name client) 4 | (libraries cohttp cohttp-lwt cohttp-lwt-unix bos)) 5 | -------------------------------------------------------------------------------- /test/dune-project: -------------------------------------------------------------------------------- 1 | (lang dune 1.6) 2 | -------------------------------------------------------------------------------- /test/honeybees.json: -------------------------------------------------------------------------------- 1 | [ 2 | {"timestamp": 1439856000000000, "tag": [{"location":"1"},{"scientist":"langstroth"}], "value": 23}, 3 | {"timestamp": 1439856000000000, "tag": [{"location":"1"},{"scientist":"perpetua"}], "value": 30}, 4 | {"timestamp": 1439856360000000, "tag": [{"location":"1"},{"scientist":"langstroth"}], "value": 28}, 5 | {"timestamp": 1439856360000000, "tag": [{"location":"1"},{"scientist":"perpetua"}], "value": 28}, 6 | {"timestamp": 1439877240000000, "tag": [{"location":"2"},{"scientist":"langstroth"}], "value": 11}, 7 | {"timestamp": 1439877600000000, "tag": [{"location":"2"},{"scientist":"langstroth"}], "value": 10}, 8 | {"timestamp": 1439877960000000, "tag": [{"location":"2"},{"scientist":"perpetua"}], "value": 23}, 9 | {"timestamp": 1439878320000000, "tag": [{"location":"2"},{"scientist":"perpetua"}], "value": 22} 10 | ] --------------------------------------------------------------------------------