├── .gitignore ├── README.md ├── c_src ├── NeuralTable.cpp ├── NeuralTable.h ├── neural.cpp ├── neural_utils.cpp └── neural_utils.h ├── rebar.config ├── src ├── neural.app.src ├── neural.erl ├── neural_app.erl └── neural_sup.erl └── test └── neural_concurrency.erl /.gitignore: -------------------------------------------------------------------------------- 1 | .eunit 2 | deps 3 | *.o 4 | *.beam 5 | *.plt -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | NEURAL: Erlang's Universal Record Adjustment Layer 2 | ------ 3 | 4 | NEURAL provides an ets-like interface to shared terms with additional support for in-place operations on stored data, such as incrementing a value, or adding/removing a value to a list. 5 | 6 | ### Usage ### 7 | 8 | The API is similar to that of ets, but at the time is more limited in its configurability. All tables are sets and are always identified by atoms. Key position is configurable (great for use with records). As with ets, a key can be any term. The value at the key position is hashed with erlang:phash2/1 before calling to the NIF. 9 | 10 | #### Create a table #### 11 | Use neural:new/2 12 | 13 | ```erlang 14 | neural:new(tuple_table, []). 15 | neural:new(record_table, [{key_pos, 2}]). 16 | ``` 17 | 18 | #### Insert a Tuple #### 19 | Use neural:insert/2 or neural:insert_new/2 20 | 21 | ```erlang 22 | neural:insert(table_name, {"an element", 1}). 23 | false = neural:insert_new(table_name, {"an element", []}). 24 | true = neural:insert_new(table_name, {"another element", []}). 25 | ``` 26 | 27 | #### Retrieve a Tuple #### 28 | Use neural:lookup/2 29 | 30 | ```erlang 31 | {"an_element", 1} = neural:lookup(table_name, "an element"). 32 | undefined = neural:lookup(table_name, "no such key"). 33 | ``` 34 | 35 | #### Delete a Tuple #### 36 | Use neural:delete/2 37 | 38 | This function also returns the stored tuple if such a tuple is found, or undefined if it wasn't. 39 | 40 | ```erlang 41 | {ok, {"an_element", 1}} = neural:delete(table_name, "an element"). 42 | {ok, undefined} = neural:delete(table_name, "no such key"). 43 | ``` 44 | 45 | #### Update Counter #### 46 | Use neural:increment/3 47 | 48 | The third argument is the operation to perform. Like ets:update_counter/3, the update operation specifies a position and an increment as a tuple. The third argument can be a list of such operations, a single operation, or a integer (in which case it is treated as the value and the position is assumed to be the key position + 1. 49 | 50 | ```erlang 51 | 2 = neural:increment(table_name, "an element", 1). 52 | 3 = neural:increment(table_name, "an element", {2, 1}). 53 | [4, 5] = neural:increment(table_name, "an_element", [{2, 1}, {2, 1}]). 54 | ``` 55 | 56 | #### Update Element #### 57 | User neural:swap/3 58 | 59 | The third argument is the operation to perform. Like ets:update_element/3, the update operation specifies a position and a value to insert at the position as a tuple. The third operation must be either a single update operation tuple or a list of such operations. 60 | 61 | ```erlang 62 | [5, []] = neural:swap(table_name, "an element", [{2, []}, {2, 5}]). 63 | ``` 64 | 65 | #### Update List #### 66 | Use neural:shift/3 to remove elements from a list 67 | 68 | Use neural:unshift/3 to add elements to a list 69 | 70 | ##### neural:unshift/3 ###### 71 | The third argument is the operation to perform. Unlike neural:increment/3, the third argument must either be a single operation tuple or a list of operation tuples. This is because of the ambiguity of a list of values to unshift. Elements are copied from head to tail by adding to the head of the stored element. 72 | 73 | Returns the new length of the list. 74 | 75 | ```erlang 76 | % Results in [d, c, b, a] 77 | 4 = neural:unshift(table_name, "another element", {2, [a, b, c, d]}). 78 | ``` 79 | 80 | ##### neural:shift/3 ##### 81 | The third argument is the operation to perform. An operation specifies a position to modify and a number of elements to remove. The third argument may be a list of such operations, a single operation tuple, or a single integer (in which case it assumes key position + 1). Elements are taken from the head of the stored list and appended to the head of the return list. 82 | 83 | If the number of elements to remove is less than 0, the entire list will be removed and returned. 84 | 85 | ```erlang 86 | [d] = neural:shift(table_name, "another element", 1). 87 | [a, b, c] = neural:shift(table_name, "another element", -1). 88 | ``` 89 | 90 | #### Batch Operations #### 91 | Use neural:dump/1 to read the entire contents of the table 92 | 93 | Use neural:drain/1 to read and remove the entire contents of the table 94 | 95 | Use neural:erase/1 to remove the entire contents of the table 96 | 97 | Each function takes only the table name as an argument. Batch operations are executed in a separate thread, and the results are sent via message passing to the calling process. This is because calling a long-running NIF call from an erlang process can cause problems with Erlang's schedulers. Other potentially long-running calls could eventually be moved into batch threads as well. 98 | 99 | ### Garbage Collection ### 100 | NEURAL stores terms by copying them to a process-independent environment. Most modifications to the data therein will therefore result in discarded terms. For this reason, NEURAL has to deliberately collect garbage (erlang terms are valid for the entire life of their environment). 101 | 102 | It does this by keeping track of the approximate word size of each term that is discarded, and, when the amount discarded surpasses a certain threshold, triggers the garbage collection condition. Each table has a dedicated garbage collection thread which triggers on this condition. The garbage collection thread walks through each bucket in the table, copies each bucket's terms to a new environment, and frees the old environment. 103 | -------------------------------------------------------------------------------- /c_src/NeuralTable.cpp: -------------------------------------------------------------------------------- 1 | #include "NeuralTable.h" 2 | /* !!!! A NOTE ON KEYS !!!! 3 | * Keys should be integer values passed from the erlang emulator, 4 | * and should be generated by a hashing function. There is no easy 5 | * way to hash an erlang term from a NIF, but ERTS is more than 6 | * capable of doing so. 7 | * 8 | * Additionally, this workaround means that traditional collision 9 | * handling mechanisms for hash tables will not work without 10 | * special consideration. For instance, to compare keys as you 11 | * would by storing linked lists, you must retrieve the stored 12 | * tuple and call enif_compare or enif_is_identical on the key 13 | * elements of each tuple. 14 | */ 15 | 16 | table_set NeuralTable::tables; 17 | atomic NeuralTable::running(true); 18 | ErlNifMutex *NeuralTable::table_mutex; 19 | 20 | NeuralTable::NeuralTable(unsigned int kp) { 21 | for (int i = 0; i < BUCKET_COUNT; ++i) { 22 | ErlNifEnv *env = enif_alloc_env(); 23 | env_buckets[i] = env; 24 | locks[i] = enif_rwlock_create("neural_table"); 25 | garbage_cans[i] = 0; 26 | reclaimable[i] = enif_make_list(env, 0); 27 | } 28 | 29 | start_gc(); 30 | start_batch(); 31 | 32 | key_pos = kp; 33 | } 34 | 35 | NeuralTable::~NeuralTable() { 36 | stop_batch(); 37 | stop_gc(); 38 | for (int i = 0; i < BUCKET_COUNT; ++i) { 39 | enif_rwlock_destroy(locks[i]); 40 | enif_free_env(env_buckets[i]); 41 | } 42 | } 43 | 44 | /* ================================================================ 45 | * MakeTable 46 | * Allocates a new table, assuming a unique atom identifier. This 47 | * table is stored in a static container. All interactions with 48 | * the table must be performed through the static class API. 49 | */ 50 | ERL_NIF_TERM NeuralTable::MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM key_pos) { 51 | char *atom; 52 | string key; 53 | unsigned int len = 0, 54 | pos = 0; 55 | ERL_NIF_TERM ret; 56 | 57 | // Allocate space for the name of the table 58 | enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1); 59 | atom = (char*)enif_alloc(len + 1); 60 | 61 | // Fetch the value of the atom and store it in a string (because I can, that's why) 62 | enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1); 63 | key = atom; 64 | 65 | // Deallocate that space 66 | enif_free(atom); 67 | 68 | // Get the key position value 69 | enif_get_uint(env, key_pos, &pos); 70 | 71 | enif_mutex_lock(table_mutex); 72 | if (NeuralTable::tables.find(key) != NeuralTable::tables.end()) { 73 | // Table already exists? Bad monkey! 74 | ret = enif_make_badarg(env); 75 | } else { 76 | // All good. Make the table 77 | NeuralTable::tables[key] = new NeuralTable(pos); 78 | ret = enif_make_atom(env, "ok"); 79 | } 80 | enif_mutex_unlock(table_mutex); 81 | 82 | return ret; 83 | } 84 | 85 | /* ================================================================ 86 | * GetTable 87 | * Retrieves a handle to the table referenced by name, assuming 88 | * such a table exists. If not, throw badarg. 89 | */ 90 | NeuralTable* NeuralTable::GetTable(ErlNifEnv *env, ERL_NIF_TERM name) { 91 | char *atom = NULL; 92 | string key; 93 | unsigned len = 0; 94 | NeuralTable *ret = NULL; 95 | table_set::const_iterator it; 96 | 97 | // Allocate space for the table name 98 | enif_get_atom_length(env, name, &len, ERL_NIF_LATIN1); 99 | atom = (char*)enif_alloc(len + 1); 100 | 101 | // Copy the table name into a string 102 | enif_get_atom(env, name, atom, len + 1, ERL_NIF_LATIN1); 103 | key = atom; 104 | 105 | // Deallocate that space 106 | enif_free(atom); 107 | 108 | // Look for the table and return its pointer if found 109 | it = NeuralTable::tables.find(key); 110 | if (it != NeuralTable::tables.end()) { 111 | ret = it->second; 112 | } 113 | 114 | return ret; 115 | } 116 | 117 | /* ================================================================ 118 | * Insert 119 | * Inserts a tuple into the table with key. 120 | */ 121 | ERL_NIF_TERM NeuralTable::Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) { 122 | NeuralTable *tb; 123 | ERL_NIF_TERM ret, old; 124 | unsigned long int entry_key = 0; 125 | 126 | // Grab table or bail. 127 | tb = GetTable(env, table); 128 | if (tb == NULL) { 129 | return enif_make_badarg(env); 130 | } 131 | 132 | // Get key value. 133 | enif_get_ulong(env, key, &entry_key); 134 | 135 | // Lock the key. 136 | tb->rwlock(entry_key); 137 | 138 | // Attempt to lookup the value. If nonempty, increment 139 | // discarded term counter and return a copy of the 140 | // old value 141 | if (tb->find(entry_key, old)) { 142 | tb->reclaim(entry_key, old); 143 | ret = enif_make_tuple2(env, enif_make_atom(env, "ok"), enif_make_copy(env, old)); 144 | } else { 145 | ret = enif_make_atom(env, "ok"); 146 | } 147 | 148 | // Write that shit out 149 | tb->put(entry_key, object); 150 | 151 | // Oh, and unlock the key if you would. 152 | tb->rwunlock(entry_key); 153 | 154 | return ret; 155 | } 156 | 157 | /* ================================================================ 158 | * InsertNew 159 | * Inserts a tuple into the table with key, assuming there is not 160 | * a value with key already. Returns true if there was no value 161 | * for key, or false if there was. 162 | */ 163 | ERL_NIF_TERM NeuralTable::InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object) { 164 | NeuralTable *tb; 165 | ERL_NIF_TERM ret, old; 166 | unsigned long int entry_key = 0; 167 | 168 | // Get the table or bail 169 | tb = GetTable(env, table); 170 | if (tb == NULL) { 171 | return enif_make_badarg(env); 172 | } 173 | 174 | // Get the key value 175 | enif_get_ulong(env, key, &entry_key); 176 | 177 | // Get write lock for the key 178 | tb->rwlock(entry_key); 179 | 180 | if (tb->find(entry_key, old)) { 181 | // Key was found. Return false and do not insert 182 | ret = enif_make_atom(env, "false"); 183 | } else { 184 | // Key was not found. Return true and insert 185 | tb->put(entry_key, object); 186 | ret = enif_make_atom(env, "true"); 187 | } 188 | 189 | // Release write lock for the key 190 | tb->rwunlock(entry_key); 191 | 192 | return ret; 193 | } 194 | 195 | /* ================================================================ 196 | * Increment 197 | * Processes a list of update operations. Each operation specifies 198 | * a position in the stored tuple to update and an integer to add 199 | * to it. 200 | */ 201 | ERL_NIF_TERM NeuralTable::Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { 202 | NeuralTable *tb; 203 | ERL_NIF_TERM ret, old; 204 | ERL_NIF_TERM it; 205 | unsigned long int entry_key = 0; 206 | 207 | // Get table handle or bail 208 | tb = GetTable(env, table); 209 | if (tb == NULL) { 210 | return enif_make_badarg(env); 211 | } 212 | 213 | // Get key value 214 | enif_get_ulong(env, key, &entry_key); 215 | 216 | // Acquire read/write lock for key 217 | tb->rwlock(entry_key); 218 | 219 | // Try to read the value as it is 220 | if (tb->find(entry_key, old)) { 221 | // Value exists 222 | ERL_NIF_TERM op_cell; 223 | const ERL_NIF_TERM *tb_tpl; 224 | const ERL_NIF_TERM *op_tpl; 225 | ERL_NIF_TERM *new_tpl; 226 | ErlNifEnv *bucket_env = tb->get_env(entry_key); 227 | unsigned long int pos = 0; 228 | long int incr = 0; 229 | unsigned int ops_length = 0; 230 | int op_arity = 0, 231 | tb_arity = 0; 232 | 233 | // Expand tuple to work on elements 234 | enif_get_tuple(bucket_env, old, &tb_arity, &tb_tpl); 235 | 236 | // Allocate space for a copy the contents of the table 237 | // tuple and copy it in. All changes are to be made to 238 | // the copy of the tuple. 239 | new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity); 240 | memcpy(new_tpl, tb_tpl, sizeof(ERL_NIF_TERM) * tb_arity); 241 | 242 | // Create empty list cell for return value. 243 | ret = enif_make_list(env, 0); 244 | 245 | // Set iterator to first cell of ops 246 | it = ops; 247 | while(!enif_is_empty_list(env, it)) { 248 | long int value = 0; 249 | enif_get_list_cell(env, it, &op_cell, &it); // op_cell = hd(it), it = tl(it) 250 | enif_get_tuple(env, op_cell, &op_arity, &op_tpl); // op_arity = tuple_size(op_cell), op_tpl = [TplPos1, TplPos2] 251 | enif_get_ulong(env, op_tpl[0], &pos); // pos = (uint64)op_tpl[0] 252 | enif_get_long(env, op_tpl[1], &incr); // incr = (uint64)op_tpl[1] 253 | 254 | // Is the operation trying to modify a nonexistant 255 | // position? 256 | if (pos <= 0 || pos > tb_arity) { 257 | ret = enif_make_badarg(env); 258 | goto bailout; 259 | } 260 | 261 | // Is the operation trying to add to a value that's 262 | // not a number? 263 | if (!enif_is_number(bucket_env, new_tpl[pos - 1])) { 264 | ret = enif_make_badarg(env); 265 | goto bailout; 266 | } 267 | 268 | // Update the value stored in the tuple. 269 | enif_get_long(env, new_tpl[pos - 1], &value); 270 | tb->reclaim(entry_key, new_tpl[pos - 1]); 271 | new_tpl[pos - 1] = enif_make_long(bucket_env, value + incr); 272 | 273 | // Copy the new value to the head of the return list 274 | ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos - 1]), ret); 275 | } 276 | 277 | tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); 278 | 279 | // Bailout allows cancelling the update opertion 280 | // in case something goes wrong. It must always 281 | // come after tb->put and before enif_free and 282 | // rwunlock 283 | bailout: 284 | enif_free(new_tpl); 285 | } else { 286 | ret = enif_make_badarg(env); 287 | } 288 | // Release the rwlock for entry_key 289 | tb->rwunlock(entry_key); 290 | 291 | return ret; 292 | } 293 | 294 | /* ================================================================ 295 | * Unshift 296 | * Processes a list of update operations. Each update operation is 297 | * a tuple specifying the position of a list in the stored value to 298 | * update and a list of values to append. Elements are shifted from 299 | * the input list to the stored list, so: 300 | * 301 | * unshift([a,b,c,d]) results in [d,c,b,a] 302 | */ 303 | ERL_NIF_TERM NeuralTable::Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { 304 | NeuralTable *tb; 305 | ERL_NIF_TERM ret, old, it; 306 | unsigned long int entry_key; 307 | ErlNifEnv *bucket_env; 308 | 309 | tb = GetTable(env, table); 310 | if (tb == NULL) { 311 | return enif_make_badarg(env); 312 | } 313 | 314 | enif_get_ulong(env, key, &entry_key); 315 | 316 | tb->rwlock(entry_key); 317 | bucket_env = tb->get_env(entry_key); 318 | if (tb->find(entry_key, old)) { 319 | const ERL_NIF_TERM *old_tpl, 320 | *op_tpl; 321 | ERL_NIF_TERM *new_tpl; 322 | int tb_arity = 0, 323 | op_arity = 0; 324 | unsigned long pos = 0; 325 | unsigned int new_length = 0; 326 | ERL_NIF_TERM op, 327 | unshift, 328 | copy_it, 329 | copy_val; 330 | 331 | enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); 332 | new_tpl = (ERL_NIF_TERM*)enif_alloc(sizeof(ERL_NIF_TERM) * tb_arity); 333 | memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); 334 | 335 | it = ops; 336 | ret = enif_make_list(env, 0); 337 | 338 | while (!enif_is_empty_list(env, it)) { 339 | // Examine the operation. 340 | enif_get_list_cell(env, it, &op, &it); // op = hd(it), it = tl(it) 341 | enif_get_tuple(env, op, &op_arity, &op_tpl); // op_arity = tuple_size(op), op_tpl = [TplPos1, TplPos2] 342 | enif_get_ulong(env, op_tpl[0], &pos); // Tuple position to modify 343 | unshift = op_tpl[1]; // Values to unshfit 344 | 345 | // Argument 1 of the operation tuple is position; 346 | // make sure it's within the bounds of the tuple 347 | // in the table. 348 | if (pos <= 0 || pos > tb_arity) { 349 | ret = enif_make_badarg(env); 350 | goto bailout; 351 | } 352 | 353 | // Make sure we were passed a list of things to push 354 | // onto the posth element of the entry 355 | if (!enif_is_list(env, unshift)) { 356 | ret = enif_make_badarg(env); 357 | } 358 | 359 | // Now iterate over unshift, moving its values to 360 | // the head of new_tpl[pos - 1] one by one 361 | copy_it = unshift; 362 | while (!enif_is_empty_list(env, copy_it)) { 363 | enif_get_list_cell(env, copy_it, ©_val, ©_it); 364 | new_tpl[pos - 1] = enif_make_list_cell(bucket_env, enif_make_copy(bucket_env, copy_val), new_tpl[pos - 1]); 365 | } 366 | enif_get_list_length(bucket_env, new_tpl[pos - 1], &new_length); 367 | ret = enif_make_list_cell(env, enif_make_uint(env, new_length), ret); 368 | } 369 | 370 | tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); 371 | 372 | bailout: 373 | enif_free(new_tpl); 374 | } else { 375 | ret = enif_make_badarg(env); 376 | } 377 | tb->rwunlock(entry_key); 378 | 379 | return ret; 380 | } 381 | 382 | ERL_NIF_TERM NeuralTable::Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { 383 | NeuralTable *tb; 384 | ERL_NIF_TERM ret, old, it; 385 | unsigned long int entry_key; 386 | ErlNifEnv *bucket_env; 387 | 388 | tb = GetTable(env, table); 389 | if (tb == NULL) { 390 | return enif_make_badarg(env); 391 | } 392 | 393 | enif_get_ulong(env, key, &entry_key); 394 | 395 | tb->rwlock(entry_key); 396 | bucket_env = tb->get_env(entry_key); 397 | if (tb->find(entry_key, old)) { 398 | const ERL_NIF_TERM *old_tpl; 399 | const ERL_NIF_TERM *op_tpl; 400 | ERL_NIF_TERM *new_tpl; 401 | int tb_arity = 0, 402 | op_arity = 0; 403 | unsigned long pos = 0, 404 | count = 0; 405 | ERL_NIF_TERM op, list, shifted, reclaim; 406 | 407 | enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); 408 | new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM)); 409 | memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); 410 | 411 | it = ops; 412 | ret = enif_make_list(env, 0); 413 | reclaim = enif_make_list(bucket_env, 0); 414 | 415 | while(!enif_is_empty_list(env, it)) { 416 | enif_get_list_cell(env, it, &op, &it); 417 | enif_get_tuple(env, op, &op_arity, &op_tpl); 418 | enif_get_ulong(env, op_tpl[0], &pos); 419 | enif_get_ulong(env, op_tpl[1], &count); 420 | 421 | if (pos <= 0 || pos > tb_arity) { 422 | ret = enif_make_badarg(env); 423 | goto bailout; 424 | } 425 | 426 | if (!enif_is_list(env, new_tpl[pos -1])) { 427 | ret = enif_make_badarg(env); 428 | goto bailout; 429 | } 430 | 431 | shifted = enif_make_list(env, 0); 432 | if (count > 0) { 433 | ERL_NIF_TERM copy_it = new_tpl[pos - 1], 434 | val; 435 | int i = 0; 436 | while (i < count && !enif_is_empty_list(bucket_env, copy_it)) { 437 | enif_get_list_cell(bucket_env, copy_it, &val, ©_it); 438 | ++i; 439 | shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted); 440 | reclaim = enif_make_list_cell(env, val, reclaim); 441 | } 442 | new_tpl[pos - 1] = copy_it; 443 | } else if (count < 0) { 444 | ERL_NIF_TERM copy_it = new_tpl[pos - 1], 445 | val; 446 | while (!enif_is_empty_list(bucket_env, copy_it)) { 447 | enif_get_list_cell(bucket_env, copy_it, &val, ©_it); 448 | shifted = enif_make_list_cell(env, enif_make_copy(env, val), shifted); 449 | reclaim = enif_make_list_cell(env, val, reclaim); 450 | } 451 | new_tpl[pos - 1] = copy_it; 452 | } 453 | ret = enif_make_list_cell(env, shifted, ret); 454 | } 455 | 456 | tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); 457 | tb->reclaim(entry_key, reclaim); 458 | bailout: 459 | enif_free(new_tpl); 460 | } else { 461 | ret = enif_make_badarg(env); 462 | } 463 | tb->rwunlock(entry_key); 464 | 465 | return ret; 466 | } 467 | 468 | ERL_NIF_TERM NeuralTable::Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops) { 469 | NeuralTable *tb; 470 | ERL_NIF_TERM ret, old, it; 471 | unsigned long int entry_key; 472 | ErlNifEnv *bucket_env; 473 | 474 | tb = GetTable(env, table); 475 | if (tb == NULL) { 476 | return enif_make_badarg(env); 477 | } 478 | 479 | enif_get_ulong(env, key, &entry_key); 480 | 481 | tb->rwlock(entry_key); 482 | bucket_env = tb->get_env(entry_key); 483 | if (tb->find(entry_key, old)) { 484 | const ERL_NIF_TERM *old_tpl; 485 | const ERL_NIF_TERM *op_tpl; 486 | ERL_NIF_TERM *new_tpl; 487 | int tb_arity = 0, 488 | op_arity = 0; 489 | unsigned long pos = 0; 490 | ERL_NIF_TERM op, list, shifted, reclaim; 491 | 492 | enif_get_tuple(bucket_env, old, &tb_arity, &old_tpl); 493 | new_tpl = (ERL_NIF_TERM*)enif_alloc(tb_arity * sizeof(ERL_NIF_TERM)); 494 | memcpy(new_tpl, old_tpl, sizeof(ERL_NIF_TERM) * tb_arity); 495 | 496 | it = ops; 497 | ret = enif_make_list(env, 0); 498 | reclaim = enif_make_list(bucket_env, 0); 499 | 500 | while (!enif_is_empty_list(env, it)) { 501 | enif_get_list_cell(env, it, &op, &it); 502 | enif_get_tuple(env, op, &op_arity, &op_tpl); 503 | enif_get_ulong(env, op_tpl[0], &pos); 504 | 505 | if (pos <= 0 || pos > tb_arity) { 506 | ret = enif_make_badarg(env); 507 | goto bailout; 508 | } 509 | 510 | reclaim = enif_make_list_cell(bucket_env, new_tpl[pos - 1], reclaim); 511 | ret = enif_make_list_cell(env, enif_make_copy(env, new_tpl[pos -1]), ret); 512 | new_tpl[pos - 1] = enif_make_copy(bucket_env, op_tpl[1]); 513 | } 514 | 515 | tb->put(entry_key, enif_make_tuple_from_array(bucket_env, new_tpl, tb_arity)); 516 | tb->reclaim(entry_key, reclaim); 517 | bailout: 518 | enif_free(new_tpl); 519 | } else { 520 | ret = enif_make_badarg(env); 521 | } 522 | tb->rwunlock(entry_key); 523 | 524 | return ret; 525 | } 526 | 527 | ERL_NIF_TERM NeuralTable::Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) { 528 | NeuralTable *tb; 529 | ERL_NIF_TERM val, ret; 530 | unsigned long int entry_key; 531 | 532 | tb = GetTable(env, table); 533 | if (tb == NULL) { return enif_make_badarg(env); } 534 | 535 | enif_get_ulong(env, key, &entry_key); 536 | 537 | tb->rwlock(entry_key); 538 | 539 | if (tb->erase(entry_key, val)) { 540 | tb->reclaim(entry_key, val); 541 | ret = enif_make_copy(env, val); 542 | } else { 543 | ret = enif_make_atom(env, "undefined"); 544 | } 545 | 546 | tb->rwunlock(entry_key); 547 | 548 | return ret; 549 | } 550 | 551 | ERL_NIF_TERM NeuralTable::Empty(ErlNifEnv *env, ERL_NIF_TERM table) { 552 | NeuralTable *tb; 553 | int n = 0; 554 | 555 | tb = GetTable(env, table); 556 | if (tb == NULL) { return enif_make_badarg(env); } 557 | 558 | // First, lock EVERY bucket. We want this to be an isolated operation. 559 | for (n = 0; n < BUCKET_COUNT; ++n) { 560 | enif_rwlock_rwlock(tb->locks[n]); 561 | } 562 | 563 | // Now clear the table 564 | for (n = 0; n < BUCKET_COUNT; ++n) { 565 | tb->hash_buckets[n].clear(); 566 | enif_clear_env(tb->env_buckets[n]); 567 | tb->garbage_cans[n] = 0; 568 | tb->reclaimable[n] = enif_make_list(tb->env_buckets[n], 0); 569 | } 570 | 571 | // Now unlock every bucket. 572 | for (n = 0; n < BUCKET_COUNT; ++n) { 573 | enif_rwlock_rwunlock(tb->locks[n]); 574 | } 575 | 576 | return enif_make_atom(env, "ok"); 577 | } 578 | 579 | ERL_NIF_TERM NeuralTable::Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key) { 580 | NeuralTable *tb; 581 | ERL_NIF_TERM ret, val; 582 | unsigned long int entry_key; 583 | 584 | // Acquire table handle, or quit if the table doesn't exist. 585 | tb = GetTable(env, table); 586 | if (tb == NULL) { return enif_make_badarg(env); } 587 | 588 | // Get key value 589 | enif_get_ulong(env, key, &entry_key); 590 | 591 | // Lock the key 592 | tb->rlock(entry_key); 593 | 594 | // Read current value 595 | if (!tb->find(entry_key, val)) { 596 | ret = enif_make_atom(env, "undefined"); 597 | } else { 598 | ret = enif_make_copy(env, val); 599 | } 600 | 601 | tb->runlock(entry_key); 602 | 603 | return ret; 604 | } 605 | 606 | ERL_NIF_TERM NeuralTable::Dump(ErlNifEnv *env, ERL_NIF_TERM table) { 607 | NeuralTable *tb = GetTable(env, table); 608 | ErlNifPid self; 609 | ERL_NIF_TERM ret; 610 | 611 | if (tb == NULL) { return enif_make_badarg(env); } 612 | 613 | enif_self(env, &self); 614 | 615 | tb->add_batch_job(self, &NeuralTable::batch_dump); 616 | 617 | return enif_make_atom(env, "$neural_batch_wait"); 618 | } 619 | 620 | ERL_NIF_TERM NeuralTable::Drain(ErlNifEnv *env, ERL_NIF_TERM table) { 621 | NeuralTable *tb = GetTable(env, table); 622 | ErlNifPid self; 623 | int ret; 624 | 625 | if (tb == NULL) { return enif_make_badarg(env); } 626 | 627 | enif_self(env, &self); 628 | 629 | tb->add_batch_job(self, &NeuralTable::batch_drain); 630 | 631 | return enif_make_atom(env, "$neural_batch_wait"); 632 | } 633 | 634 | ERL_NIF_TERM NeuralTable::GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table) { 635 | NeuralTable *tb = GetTable(env, table); 636 | 637 | if (tb == NULL) { return enif_make_badarg(env); } 638 | return enif_make_uint(env, tb->key_pos); 639 | } 640 | 641 | ERL_NIF_TERM NeuralTable::GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table) { 642 | NeuralTable *tb = GetTable(env, table); 643 | if (tb == NULL) { return enif_make_badarg(env); } 644 | 645 | enif_cond_signal(tb->gc_cond); 646 | 647 | return enif_make_atom(env, "ok"); 648 | } 649 | 650 | ERL_NIF_TERM NeuralTable::GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table) { 651 | NeuralTable *tb = GetTable(env, table); 652 | unsigned long int size = 0; 653 | 654 | if (tb == NULL) { return enif_make_badarg(env); } 655 | 656 | size = tb->garbage_size(); 657 | 658 | return enif_make_ulong(env, size); 659 | } 660 | 661 | void* NeuralTable::DoGarbageCollection(void *table) { 662 | NeuralTable *tb = (NeuralTable*)table; 663 | 664 | enif_mutex_lock(tb->gc_mutex); 665 | 666 | while (running.load(memory_order_acquire)) { 667 | while (running.load(memory_order_acquire) && tb->garbage_size() < RECLAIM_THRESHOLD) { 668 | enif_cond_wait(tb->gc_cond, tb->gc_mutex); 669 | } 670 | tb->gc(); 671 | } 672 | 673 | enif_mutex_unlock(tb->gc_mutex); 674 | 675 | return NULL; 676 | } 677 | 678 | void* NeuralTable::DoReclamation(void *table) { 679 | const int max_eat = 5; 680 | NeuralTable *tb = (NeuralTable*)table; 681 | int i = 0, c = 0, t = 0;; 682 | ERL_NIF_TERM tl, hd; 683 | ErlNifEnv *env; 684 | 685 | while (running.load(memory_order_acquire)) { 686 | for (i = 0; i < BUCKET_COUNT; ++i) { 687 | c = 0; 688 | t = 0; 689 | tb->rwlock(i); 690 | env = tb->get_env(i); 691 | tl = tb->reclaimable[i]; 692 | while (c++ < max_eat && !enif_is_empty_list(env, tl)) { 693 | enif_get_list_cell(env, tl, &hd, &tl); 694 | tb->garbage_cans[i] += estimate_size(env, hd); 695 | t += tb->garbage_cans[i]; 696 | } 697 | tb->rwunlock(i); 698 | 699 | if (t >= RECLAIM_THRESHOLD) { 700 | enif_cond_signal(tb->gc_cond); 701 | } 702 | } 703 | usleep(50000); 704 | } 705 | 706 | return NULL; 707 | } 708 | 709 | void* NeuralTable::DoBatchOperations(void *table) { 710 | NeuralTable *tb = (NeuralTable*)table; 711 | 712 | enif_mutex_lock(tb->batch_mutex); 713 | 714 | while (running.load(memory_order_acquire)) { 715 | while (running.load(memory_order_acquire) && tb->batch_jobs.empty()) { 716 | enif_cond_wait(tb->batch_cond, tb->batch_mutex); 717 | } 718 | BatchJob job = tb->batch_jobs.front(); 719 | (tb->*job.fun)(job.pid); 720 | tb->batch_jobs.pop(); 721 | } 722 | 723 | enif_mutex_unlock(tb->batch_mutex); 724 | 725 | return NULL; 726 | } 727 | 728 | void NeuralTable::start_gc() { 729 | int ret; 730 | 731 | gc_mutex = enif_mutex_create("neural_table_gc"); 732 | gc_cond = enif_cond_create("neural_table_gc"); 733 | 734 | ret = enif_thread_create("neural_garbage_collector", &gc_tid, NeuralTable::DoGarbageCollection, (void*)this, NULL); 735 | if (ret != 0) { 736 | printf("[neural_gc] Can't create GC thread. Error Code: %d\r\n", ret); 737 | } 738 | 739 | // Start the reclaimer after the garbage collector. 740 | ret = enif_thread_create("neural_reclaimer", &rc_tid, NeuralTable::DoReclamation, (void*)this, NULL); 741 | if (ret != 0) { 742 | printf("[neural_gc] Can't create reclamation thread. Error Code: %d\r\n", ret); 743 | } 744 | } 745 | 746 | void NeuralTable::stop_gc() { 747 | enif_cond_signal(gc_cond); 748 | // Join the reclaimer before the garbage collector. 749 | enif_thread_join(rc_tid, NULL); 750 | enif_thread_join(gc_tid, NULL); 751 | } 752 | 753 | void NeuralTable::start_batch() { 754 | int ret; 755 | 756 | batch_mutex = enif_mutex_create("neural_table_batch"); 757 | batch_cond = enif_cond_create("neural_table_batch"); 758 | 759 | ret = enif_thread_create("neural_batcher", &batch_tid, NeuralTable::DoBatchOperations, (void*)this, NULL); 760 | if (ret != 0) { 761 | printf("[neural_batch] Can't create batch thread. Error Code: %d\r\n", ret); 762 | } 763 | } 764 | 765 | void NeuralTable::stop_batch() { 766 | enif_cond_signal(batch_cond); 767 | enif_thread_join(batch_tid, NULL); 768 | } 769 | 770 | void NeuralTable::put(unsigned long int key, ERL_NIF_TERM tuple) { 771 | ErlNifEnv *env = get_env(key); 772 | hash_buckets[GET_BUCKET(key)][key] = enif_make_copy(env, tuple); 773 | } 774 | 775 | ErlNifEnv* NeuralTable::get_env(unsigned long int key) { 776 | return env_buckets[GET_BUCKET(key)]; 777 | } 778 | 779 | bool NeuralTable::find(unsigned long int key, ERL_NIF_TERM &ret) { 780 | hash_table *bucket = &hash_buckets[GET_BUCKET(key)]; 781 | hash_table::iterator it = bucket->find(key); 782 | if (bucket->end() == it) { 783 | return false; 784 | } else { 785 | ret = it->second; 786 | return true; 787 | } 788 | } 789 | 790 | bool NeuralTable::erase(unsigned long int key, ERL_NIF_TERM &val) { 791 | hash_table *bucket = &hash_buckets[GET_BUCKET(key)]; 792 | hash_table::iterator it = bucket->find(key); 793 | bool ret = false; 794 | if (it != bucket->end()) { 795 | ret = true; 796 | val = it->second; 797 | bucket->erase(it); 798 | } 799 | return ret; 800 | } 801 | 802 | void NeuralTable::add_batch_job(ErlNifPid pid, BatchFunction fun) { 803 | BatchJob job; 804 | job.pid = pid; 805 | job.fun = fun; 806 | 807 | enif_mutex_lock(batch_mutex); 808 | batch_jobs.push(job); 809 | enif_mutex_unlock(batch_mutex); 810 | 811 | enif_cond_signal(batch_cond); 812 | } 813 | 814 | void NeuralTable::batch_drain(ErlNifPid pid) { 815 | ErlNifEnv *env = enif_alloc_env(); 816 | ERL_NIF_TERM msg, value; 817 | 818 | value = enif_make_list(env, 0); 819 | for (int i = 0; i < BUCKET_COUNT; ++i) { 820 | enif_rwlock_rwlock(locks[i]); 821 | 822 | for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) { 823 | value = enif_make_list_cell(env, enif_make_copy(env, it->second), value); 824 | } 825 | enif_clear_env(env_buckets[i]); 826 | hash_buckets[i].clear(); 827 | garbage_cans[i] = 0; 828 | reclaimable[i] = enif_make_list(env_buckets[i], 0); 829 | 830 | enif_rwlock_rwunlock(locks[i]); 831 | } 832 | 833 | msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value); 834 | 835 | enif_send(NULL, &pid, env, msg); 836 | 837 | enif_free_env(env); 838 | } 839 | 840 | void NeuralTable::batch_dump(ErlNifPid pid) { 841 | ErlNifEnv *env = enif_alloc_env(); 842 | ERL_NIF_TERM msg, value; 843 | 844 | value = enif_make_list(env, 0); 845 | for (int i = 0; i < BUCKET_COUNT; ++i) { 846 | enif_rwlock_rlock(locks[i]); 847 | for (hash_table::iterator it = hash_buckets[i].begin(); it != hash_buckets[i].end(); ++it) { 848 | value = enif_make_list_cell(env, enif_make_copy(env, it->second), value); 849 | } 850 | enif_rwlock_runlock(locks[i]); 851 | } 852 | 853 | msg = enif_make_tuple2(env, enif_make_atom(env, "$neural_batch_response"), value); 854 | 855 | enif_send(NULL, &pid, env, msg); 856 | 857 | enif_free_env(env); 858 | } 859 | 860 | void NeuralTable::reclaim(unsigned long int key, ERL_NIF_TERM term) { 861 | int bucket = GET_BUCKET(key); 862 | ErlNifEnv *env = get_env(key); 863 | reclaimable[bucket] = enif_make_list_cell(env, term, reclaimable[bucket]); 864 | } 865 | 866 | void NeuralTable::gc() { 867 | ErlNifEnv *fresh = NULL, 868 | *old = NULL; 869 | hash_table *bucket = NULL; 870 | hash_table::iterator it; 871 | unsigned int gc_curr = 0; 872 | 873 | for (; gc_curr < BUCKET_COUNT; ++gc_curr) { 874 | bucket = &hash_buckets[gc_curr]; 875 | old = env_buckets[gc_curr]; 876 | fresh = enif_alloc_env(); 877 | 878 | enif_rwlock_rwlock(locks[gc_curr]); 879 | for (it = bucket->begin(); it != bucket->end(); ++it) { 880 | it->second = enif_make_copy(fresh, it->second); 881 | } 882 | 883 | garbage_cans[gc_curr] = 0; 884 | env_buckets[gc_curr] = fresh; 885 | reclaimable[gc_curr] = enif_make_list(fresh, 0); 886 | enif_free_env(old); 887 | enif_rwlock_rwunlock(locks[gc_curr]); 888 | } 889 | } 890 | 891 | unsigned long int NeuralTable::garbage_size() { 892 | unsigned long int size = 0; 893 | for (int i = 0; i < BUCKET_COUNT; ++i) { 894 | enif_rwlock_rlock(locks[i]); 895 | size += garbage_cans[i]; 896 | enif_rwlock_runlock(locks[i]); 897 | } 898 | return size; 899 | } 900 | -------------------------------------------------------------------------------- /c_src/NeuralTable.h: -------------------------------------------------------------------------------- 1 | #ifndef NEURALTABLE_H 2 | #define NEURALTABLE_H 3 | 4 | #include "erl_nif.h" 5 | #include "neural_utils.h" 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | #define BUCKET_COUNT 64 15 | #define BUCKET_MASK (BUCKET_COUNT - 1) 16 | #define GET_BUCKET(key) key & BUCKET_MASK 17 | #define GET_LOCK(key) key & BUCKET_MASK 18 | #define RECLAIM_THRESHOLD 1048576 19 | 20 | using namespace std; 21 | 22 | class NeuralTable; 23 | 24 | typedef unordered_map table_set; 25 | typedef unordered_map hash_table; 26 | typedef void (NeuralTable::*BatchFunction)(ErlNifPid pid); 27 | 28 | class NeuralTable { 29 | public: 30 | static ERL_NIF_TERM MakeTable(ErlNifEnv *env, ERL_NIF_TERM name, ERL_NIF_TERM keypos); 31 | static ERL_NIF_TERM Insert(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object); 32 | static ERL_NIF_TERM InsertNew(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM object); 33 | static ERL_NIF_TERM Delete(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key); 34 | static ERL_NIF_TERM Empty(ErlNifEnv *env, ERL_NIF_TERM table); 35 | static ERL_NIF_TERM Get(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key); 36 | static ERL_NIF_TERM Increment(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); 37 | static ERL_NIF_TERM Shift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); 38 | static ERL_NIF_TERM Unshift(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); 39 | static ERL_NIF_TERM Swap(ErlNifEnv *env, ERL_NIF_TERM table, ERL_NIF_TERM key, ERL_NIF_TERM ops); 40 | static ERL_NIF_TERM Dump(ErlNifEnv *env, ERL_NIF_TERM table); 41 | static ERL_NIF_TERM Drain(ErlNifEnv *env, ERL_NIF_TERM table); 42 | static ERL_NIF_TERM GetKeyPosition(ErlNifEnv *env, ERL_NIF_TERM table); 43 | static ERL_NIF_TERM GarbageCollect(ErlNifEnv *env, ERL_NIF_TERM table); 44 | static ERL_NIF_TERM GarbageSize(ErlNifEnv *env, ERL_NIF_TERM table); 45 | static NeuralTable* GetTable(ErlNifEnv *env, ERL_NIF_TERM name); 46 | static void* DoGarbageCollection(void *table); 47 | static void* DoBatchOperations(void *table); 48 | static void* DoReclamation(void *table); 49 | static void Initialize() { 50 | table_mutex = enif_mutex_create("neural_table_maker"); 51 | } 52 | static void Shutdown() { 53 | running = false; 54 | table_set::iterator it(tables.begin()); 55 | 56 | while (it != tables.end()) { 57 | delete it->second; 58 | tables.erase(it); 59 | it = tables.begin(); 60 | } 61 | 62 | enif_mutex_destroy(table_mutex); 63 | } 64 | 65 | void rlock(unsigned long int key) { enif_rwlock_rlock(locks[GET_LOCK(key)]); } 66 | void runlock(unsigned long int key) { enif_rwlock_runlock(locks[GET_LOCK(key)]); } 67 | void rwlock(unsigned long int key) { enif_rwlock_rwlock(locks[GET_LOCK(key)]); } 68 | void rwunlock(unsigned long int key) { enif_rwlock_rwunlock(locks[GET_LOCK(key)]); } 69 | 70 | ErlNifEnv *get_env(unsigned long int key); 71 | bool erase(unsigned long int key, ERL_NIF_TERM &ret); 72 | bool find(unsigned long int key, ERL_NIF_TERM &ret); 73 | void put(unsigned long int key, ERL_NIF_TERM tuple); 74 | void batch_dump(ErlNifPid pid); 75 | void batch_drain(ErlNifPid pid); 76 | void start_gc(); 77 | void stop_gc(); 78 | void start_batch(); 79 | void stop_batch(); 80 | void gc(); 81 | void reclaim(unsigned long int key, ERL_NIF_TERM reclaim); 82 | unsigned long int garbage_size(); 83 | void add_batch_job(ErlNifPid pid, BatchFunction fun); 84 | 85 | protected: 86 | static table_set tables; 87 | static atomic running; 88 | static ErlNifMutex *table_mutex; 89 | 90 | struct BatchJob { 91 | ErlNifPid pid; 92 | BatchFunction fun; 93 | }; 94 | 95 | NeuralTable(unsigned int kp); 96 | ~NeuralTable(); 97 | 98 | unsigned int garbage_cans[BUCKET_COUNT]; 99 | hash_table hash_buckets[BUCKET_COUNT]; 100 | ErlNifEnv *env_buckets[BUCKET_COUNT]; 101 | ERL_NIF_TERM reclaimable[BUCKET_COUNT]; 102 | ErlNifRWLock *locks[BUCKET_COUNT]; 103 | ErlNifCond *gc_cond; 104 | ErlNifMutex *gc_mutex; 105 | ErlNifTid gc_tid; 106 | ErlNifTid rc_tid; 107 | ErlNifCond *batch_cond; 108 | ErlNifMutex *batch_mutex; 109 | queue batch_jobs; 110 | ErlNifTid batch_tid; 111 | 112 | unsigned int key_pos; 113 | }; 114 | 115 | #endif 116 | -------------------------------------------------------------------------------- /c_src/neural.cpp: -------------------------------------------------------------------------------- 1 | #include "erl_nif.h" 2 | #include "NeuralTable.h" 3 | #include 4 | 5 | // Prototypes 6 | static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 7 | static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 8 | static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 9 | static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 10 | static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 11 | static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 12 | static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 13 | static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 14 | static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 15 | static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 16 | static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 17 | static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 18 | static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 19 | static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 20 | static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]); 21 | 22 | static ErlNifFunc nif_funcs[] = 23 | { 24 | {"make_table", 2, neural_new}, 25 | {"do_fetch", 2, neural_get}, 26 | {"do_delete", 2, neural_delete}, 27 | {"do_dump", 1, neural_dump}, 28 | {"do_drain", 1, neural_drain}, 29 | {"empty", 1, neural_empty}, 30 | {"insert", 3, neural_put}, 31 | {"insert_new", 3, neural_put_new}, 32 | {"do_increment", 3, neural_increment}, 33 | {"do_unshift", 3, neural_unshift}, 34 | {"do_shift", 3, neural_shift}, 35 | {"do_swap", 3, neural_swap}, 36 | {"garbage", 1, neural_garbage}, 37 | {"garbage_size", 1, neural_garbage_size}, 38 | {"key_pos", 1, neural_key_pos} 39 | }; 40 | 41 | static ERL_NIF_TERM neural_key_pos(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 42 | // This function is directly exposed, so no strict guards or patterns protecting us. 43 | if (argc != 1 || !enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 44 | 45 | return NeuralTable::GetKeyPosition(env, argv[0]); 46 | } 47 | 48 | static ERL_NIF_TERM neural_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 49 | return NeuralTable::MakeTable(env, argv[0], argv[1]); 50 | } 51 | 52 | static ERL_NIF_TERM neural_put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 53 | return NeuralTable::Insert(env, argv[0], argv[1], argv[2]); 54 | } 55 | 56 | static ERL_NIF_TERM neural_put_new(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 57 | return NeuralTable::InsertNew(env, argv[0], argv[1], argv[2]); 58 | } 59 | 60 | static ERL_NIF_TERM neural_increment(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 61 | if (!enif_is_atom(env, argv[0]) || !enif_is_number(env, argv[1]) || !enif_is_list(env, argv[2])) { 62 | return enif_make_badarg(env); 63 | } 64 | 65 | return NeuralTable::Increment(env, argv[0], argv[1], argv[2]); 66 | } 67 | 68 | static ERL_NIF_TERM neural_shift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 69 | return NeuralTable::Shift(env, argv[0], argv[1], argv[2]); 70 | } 71 | 72 | static ERL_NIF_TERM neural_unshift(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 73 | return NeuralTable::Unshift(env, argv[0], argv[1], argv[2]); 74 | } 75 | 76 | static ERL_NIF_TERM neural_swap(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]){ 77 | return NeuralTable::Swap(env, argv[0], argv[1], argv[2]); 78 | } 79 | 80 | static ERL_NIF_TERM neural_get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 81 | return NeuralTable::Get(env, argv[0], argv[1]); 82 | } 83 | 84 | static ERL_NIF_TERM neural_delete(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 85 | return NeuralTable::Delete(env, argv[0], argv[1]); 86 | } 87 | 88 | static ERL_NIF_TERM neural_empty(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 89 | if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 90 | 91 | return NeuralTable::Empty(env, argv[0]); 92 | } 93 | 94 | static ERL_NIF_TERM neural_dump(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 95 | if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 96 | 97 | return NeuralTable::Dump(env, argv[0]); 98 | } 99 | 100 | static ERL_NIF_TERM neural_drain(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 101 | if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 102 | 103 | return NeuralTable::Drain(env, argv[0]); 104 | } 105 | 106 | static ERL_NIF_TERM neural_garbage(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 107 | if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 108 | 109 | return NeuralTable::GarbageCollect(env, argv[0]); 110 | } 111 | 112 | static ERL_NIF_TERM neural_garbage_size(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) { 113 | if (!enif_is_atom(env, argv[0])) { return enif_make_badarg(env); } 114 | 115 | return NeuralTable::GarbageSize(env, argv[0]); 116 | } 117 | 118 | static void neural_resource_cleanup(ErlNifEnv* env, void* arg) 119 | { 120 | /* Delete any dynamically allocated memory stored in neural_handle */ 121 | /* neural_handle* handle = (neural_handle*)arg; */ 122 | } 123 | 124 | static int on_load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) 125 | { 126 | NeuralTable::Initialize(); 127 | return 0; 128 | } 129 | 130 | static void on_unload(ErlNifEnv *env, void *priv_data) { 131 | NeuralTable::Shutdown(); 132 | } 133 | 134 | ERL_NIF_INIT(neural, nif_funcs, &on_load, NULL, NULL, &on_unload); 135 | -------------------------------------------------------------------------------- /c_src/neural_utils.cpp: -------------------------------------------------------------------------------- 1 | #include "neural_utils.h" 2 | 3 | unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term) { 4 | if (enif_is_atom(env, term)) { 5 | return WORD_SIZE; 6 | } 7 | 8 | // Treating all numbers like longs. 9 | if (enif_is_number(env, term)) { 10 | return 2 * WORD_SIZE; 11 | } 12 | 13 | if (enif_is_binary(env, term)) { 14 | ErlNifBinary bin; 15 | enif_inspect_binary(env, term, &bin); 16 | return bin.size + (6 * WORD_SIZE); 17 | } 18 | 19 | if (enif_is_list(env, term)) { 20 | unsigned long int size = 0; 21 | ERL_NIF_TERM it, curr; 22 | it = term; 23 | size += WORD_SIZE; 24 | while (!enif_is_empty_list(env, it)) { 25 | enif_get_list_cell(env, it, &curr, &it); 26 | size += estimate_size(env, curr) + WORD_SIZE; 27 | } 28 | return size; 29 | } 30 | 31 | if (enif_is_tuple(env, term)) { 32 | unsigned long int size = 0; 33 | const ERL_NIF_TERM *tpl; 34 | int arity; 35 | enif_get_tuple(env, term, &arity, &tpl); 36 | for (int i = 0; i < arity; ++i) { 37 | size += estimate_size(env, tpl[i]); 38 | } 39 | return size; 40 | } 41 | 42 | // Return 1 word by default 43 | return WORD_SIZE; 44 | } 45 | 46 | 47 | -------------------------------------------------------------------------------- /c_src/neural_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef NEURAL_UTILS_H 2 | #define NEURAL_UTILS_H 3 | 4 | #include "erl_nif.h" 5 | #define WORD_SIZE sizeof(int) 6 | 7 | unsigned long int estimate_size(ErlNifEnv *env, ERL_NIF_TERM term); 8 | 9 | #endif 10 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {port_specs, [ 2 | {"priv/neural.so", ["c_src/*.cpp"]} 3 | ]}. 4 | {port_env, [ 5 | {".*", "CXXFLAGS", "$CXXFLAGS -Ic_src/ -Wno-write-strings -std=c++11 -O3"}, 6 | {".*", "LDFLAGS", "$LDFLAGS -lstdc++ -shared"} 7 | ]}. 8 | {erl_opts, [ 9 | {src_dirs, ["src", "test"]} 10 | ]}. 11 | {deps, [ 12 | {parse_trans, ".*", {git, "https://github.com/esl/parse_trans.git", {tag, "2.5.2"}}} 13 | ]}. 14 | -------------------------------------------------------------------------------- /src/neural.app.src: -------------------------------------------------------------------------------- 1 | {application, neural, 2 | [ 3 | {description, ""}, 4 | {vsn, "0.3.2"}, 5 | {registered, []}, 6 | {applications, [ 7 | kernel, 8 | stdlib 9 | ]}, 10 | {mod, { neural_app, []}}, 11 | {env, []} 12 | ]}. 13 | -------------------------------------------------------------------------------- /src/neural.erl: -------------------------------------------------------------------------------- 1 | -module(neural). 2 | 3 | -export([new/2, empty/1, drain/1, dump/1, % Table operations 4 | garbage/1, garbage_size/1, 5 | key_pos/1]). 6 | -export([lookup/2]). % Getters 7 | -export([insert/2, insert_new/2, delete/2]). % Setters 8 | -export([increment/3, unshift/3, shift/3, swap/3]). % Delta operations 9 | -on_load(init/0). 10 | -record(table_opts, { 11 | keypos = 1 :: integer() 12 | }). 13 | 14 | -define(nif_stub, nif_stub_error(?LINE)). 15 | nif_stub_error(Line) -> 16 | erlang:nif_error({nif_not_loaded,module,?MODULE,line,Line}). 17 | 18 | -ifdef(TEST). 19 | -include_lib("eunit/include/eunit.hrl"). 20 | -endif. 21 | 22 | init() -> 23 | PrivDir = case code:priv_dir(?MODULE) of 24 | {error, bad_name} -> 25 | EbinDir = filename:dirname(code:which(?MODULE)), 26 | AppPath = filename:dirname(EbinDir), 27 | filename:join(AppPath, "priv"); 28 | Path -> 29 | Path 30 | end, 31 | erlang:load_nif(filename:join(PrivDir, ?MODULE), 0). 32 | 33 | new(Table, Opts) -> 34 | new(Table, Opts, #table_opts{}). 35 | 36 | new(Table, [{key_pos, KeyPos}|Opts], TableOpts) -> 37 | new(Table, Opts, TableOpts#table_opts{keypos = KeyPos}); 38 | new(Table, [], _TableOpts = #table_opts{keypos = KeyPos}) when is_integer(KeyPos) -> 39 | make_table(Table, KeyPos). 40 | 41 | make_table(_Table, _KeyPos) -> 42 | ?nif_stub. 43 | 44 | insert(Table, Object) when is_atom(Table), is_tuple(Object) -> 45 | Key = element(key_pos(Table), Object), 46 | insert(Table, erlang:phash2(Key), Object). 47 | 48 | insert(_Table, _Key, _Object) -> 49 | ?nif_stub. 50 | 51 | insert_new(Table, Object) when is_atom(Table), is_tuple(Object) -> 52 | Key = element(key_pos(Table), Object), 53 | insert_new(Table, erlang:phash2(Key), Object). 54 | 55 | insert_new(_Table, _Key, _Object) -> 56 | ?nif_stub. 57 | 58 | increment(Table, Key, Value) when is_integer(Value) -> 59 | [N] = increment(Table, Key, [{key_pos(Table) + 1, Value}]), 60 | N; 61 | increment(Table, Key, Op = {Position, Value}) when is_integer(Position), is_integer(Value) -> 62 | [N] = increment(Table, Key, [Op]), 63 | N; 64 | increment(Table, Key, Op = [_|_]) when is_atom(Table) -> 65 | case lists:all(fun is_incr_op/1, Op) of 66 | true -> 67 | lists:reverse(do_increment(Table, erlang:phash2(Key), Op)); 68 | false -> 69 | error(badarg) 70 | end. 71 | 72 | shift(Table, Key, Value) when is_integer(Value) -> 73 | [R] = shift(Table, Key, [{key_pos(Table) + 1, Value}]), 74 | R; 75 | shift(Table, Key, Op = {Position, Value}) when is_integer(Position), is_integer(Value) -> 76 | [R] = shift(Table, Key, [Op]), 77 | R; 78 | shift(Table, Key, Op = [_|_]) when is_atom(Table) -> 79 | case lists:all(fun is_shift_op/1, Op) of 80 | true -> 81 | lists:reverse(do_shift(Table, erlang:phash2(Key), Op)); 82 | false -> 83 | error(badarg) 84 | end. 85 | 86 | unshift(Table, Key, Op = {Position, Value}) when is_integer(Position), is_list(Value) -> 87 | [R] = unshift(Table, Key, [Op]), 88 | R; 89 | unshift(Table, Key, Op = [_|_]) when is_atom(Table) -> 90 | case lists:all(fun is_unshift_op/1, Op) of 91 | true -> 92 | lists:reverse(do_unshift(Table, erlang:phash2(Key), Op), []); 93 | false -> 94 | error(badarg) 95 | end. 96 | 97 | swap(Table, Key, Op = {Position, _Value}) when is_integer(Position) -> 98 | [R] = swap(Table, Key, [Op]), 99 | R; 100 | swap(Table, Key, Op = [_|_]) when is_atom(Table) -> 101 | case lists:all(fun is_swap_op/1, Op) of 102 | true -> 103 | lists:reverse(do_swap(Table, erlang:phash2(Key), Op)); 104 | false -> 105 | error(badarg) 106 | end. 107 | 108 | is_incr_op({P,V}) when is_integer(P), is_integer(V) -> true; 109 | is_incr_op(_) -> false. 110 | 111 | is_shift_op({P,V}) when is_integer(P), is_integer(V) -> true; 112 | is_shift_op(_) -> false. 113 | 114 | is_unshift_op({P,L}) when is_integer(P), is_list(L) -> true; 115 | is_unshift_op(_) -> false. 116 | 117 | is_swap_op({P,V}) when is_integer(P) -> true; 118 | is_swap_op(_) -> false. 119 | 120 | do_increment(_Table, _Key, _Op) -> 121 | ?nif_stub. 122 | 123 | do_shift(_Table, _Key, _Op) -> 124 | ?nif_stub. 125 | 126 | do_unshift(_Table, _Key, _Op) -> 127 | ?nif_stub. 128 | 129 | do_swap(_Table, _Key, _Op) -> 130 | ?nif_stub. 131 | 132 | lookup(Table, Key) when is_atom(Table) -> 133 | do_fetch(Table, erlang:phash2(Key)). 134 | 135 | do_fetch(_Table, _Key) -> 136 | ?nif_stub. 137 | 138 | delete(Table, Key) when is_atom(Table) -> 139 | do_delete(Table, erlang:phash2(Key)). 140 | 141 | do_delete(_Table, _key) -> 142 | ?nif_stub. 143 | 144 | garbage(_Table) -> 145 | ?nif_stub. 146 | 147 | garbage_size(_Table) -> 148 | ?nif_stub. 149 | 150 | empty(_Table) -> 151 | ?nif_stub. 152 | 153 | drain(Table) -> 154 | '$neural_batch_wait' = do_drain(Table), 155 | wait_batch_response(). 156 | 157 | do_drain(_Table) -> 158 | ?nif_stub. 159 | 160 | dump(Table) -> 161 | '$neural_batch_wait' = do_dump(Table), 162 | wait_batch_response(). 163 | 164 | do_dump(_Table) -> 165 | ?nif_stub. 166 | 167 | key_pos(_Table) -> 168 | ?nif_stub. 169 | 170 | wait_batch_response() -> 171 | receive 172 | {'$neural_batch_response', Response} -> Response 173 | end. 174 | 175 | %% =================================================================== 176 | %% EUnit tests 177 | %% =================================================================== 178 | -ifdef(TEST). 179 | 180 | basic_test() -> 181 | {ok, Ref} = new(), 182 | ?assertEqual(ok, myfunction(Ref)). 183 | 184 | -endif. 185 | -------------------------------------------------------------------------------- /src/neural_app.erl: -------------------------------------------------------------------------------- 1 | -module(neural_app). 2 | 3 | -behaviour(application). 4 | 5 | %% Application callbacks 6 | -export([start/2, stop/1]). 7 | 8 | %% =================================================================== 9 | %% Application callbacks 10 | %% =================================================================== 11 | 12 | start(_StartType, _StartArgs) -> 13 | neural_sup:start_link(). 14 | 15 | stop(_State) -> 16 | ok. 17 | -------------------------------------------------------------------------------- /src/neural_sup.erl: -------------------------------------------------------------------------------- 1 | 2 | -module(neural_sup). 3 | 4 | -behaviour(supervisor). 5 | 6 | %% API 7 | -export([start_link/0]). 8 | 9 | %% Supervisor callbacks 10 | -export([init/1]). 11 | 12 | %% Helper macro for declaring children of supervisor 13 | -define(CHILD(I, Type), {I, {I, start_link, []}, permanent, 5000, Type, [I]}). 14 | 15 | %% =================================================================== 16 | %% API functions 17 | %% =================================================================== 18 | 19 | start_link() -> 20 | supervisor:start_link({local, ?MODULE}, ?MODULE, []). 21 | 22 | %% =================================================================== 23 | %% Supervisor callbacks 24 | %% =================================================================== 25 | 26 | init([]) -> 27 | {ok, { 28 | {one_for_one, 5, 10}, 29 | [] 30 | }}. 31 | 32 | -------------------------------------------------------------------------------- /test/neural_concurrency.erl: -------------------------------------------------------------------------------- 1 | -module(neural_concurrency). 2 | -compile([{parse_transform, ct_expand}]). 3 | -export([test/0]). 4 | -define(KEYS, ct_expand:term([ {"test_key", N} || N <- lists:seq(1, 1000) ])). 5 | -define(NUM_KEYS, 1000). 6 | 7 | test() -> 8 | neural:new(test, []), 9 | io:format("Insert time: ~p~n", [begin {Dur, _} = timer:tc(fun() -> [ neural:insert(test, {Key, 0, 0, 0}) || Key <- ?KEYS ] end), Dur end]), 10 | Pids = [ spawn(fun neural_worker/0) || _ <- lists:seq(1,64) ], 11 | Refs = lists:flatten([ [ begin Ref = make_ref(), Pid ! {go, self(), Ref}, Ref end || Pid <- Pids ] || _ <- lists:seq(1, 1000) ]), 12 | io:format("Generated ~p requests.~n", [length(Refs)]), 13 | wait(Pids, Refs). 14 | 15 | wait(Pids, Refs) -> 16 | wait(Pids, Refs, []). 17 | 18 | wait(Pids, [], Durations) -> 19 | [ exit(Pid, normal) || Pid <- Pids ], 20 | Sorted = lists:sort(Durations), 21 | Min = lists:min(Durations), 22 | Max = lists:max(Durations), 23 | Cnt = length(Durations), 24 | Nth = trunc(Cnt * 0.9), 25 | U90 = lists:sublist(Sorted, Nth), 26 | Avg = lists:sum(U90) / Nth, 27 | Percentile = lists:nth(Nth, U90), 28 | Res = neural:dump(test), 29 | io:format("Min: ~p; Max: ~p: Avg: ~p; U90: ~p; Res: ~n", [Min, Max, Avg, Percentile]), 30 | [ io:format("~p~n", [Val]) || Val <- Res ], 31 | ok; 32 | wait(Pids, Refs, Durations) -> 33 | receive 34 | {done, Ref, Dur} -> 35 | case crypto:rand_uniform(1, 1000) of 36 | 1 -> io:format("Refs remaining: ~p~n", [length(Refs)]); 37 | _ -> ok 38 | end, 39 | wait(Pids, lists:delete(Ref, Refs), [Dur|Durations]) 40 | end. 41 | 42 | neural_worker() -> 43 | receive 44 | {go, Pid, Ref} -> 45 | Key = lists:nth(crypto:rand_uniform(1, ?NUM_KEYS + 1), ?KEYS), 46 | {Dur, _} = timer:tc(fun() -> neural:increment(test, Key, [{2, 1}, {3, 10}, {4, 100}]) end), 47 | Pid ! {done, Ref, Dur}, 48 | neural_worker() 49 | end. 50 | --------------------------------------------------------------------------------