├── LICENSE ├── atomic_hashtable.h ├── atomic_hashtable_n.h ├── README.md ├── test_n_main.c ├── test_main.c ├── atomic_hashtable_n.c └── atomic_hashtable.c /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Taymindis Woon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /atomic_hashtable.h: -------------------------------------------------------------------------------- 1 | #ifndef HEADER_ATOMICHASH 2 | #define HEADER_ATOMICHASH 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | typedef void* (*atomic_hash_malloc_fn)(size_t); 13 | typedef void (*atomic_hash_free_fn)(void*); 14 | 15 | typedef void *(*atomic_hash_read_node_fn)(void*); 16 | typedef void (*atomic_hash_free_node_fn)(void*); 17 | 18 | void init_hash_malloc_free_hooker(atomic_hash_malloc_fn malloc_fun, atomic_hash_free_fn free_fun); 19 | 20 | typedef char* HashKey; 21 | 22 | struct __atomic_node_s { 23 | void *val; 24 | HashKey key; 25 | // size_t key_len; 26 | /*atomic__*/ int used; 27 | struct __atomic_node_s *next; 28 | 29 | /*atomic__ */int reading_counter; 30 | }; 31 | 32 | typedef struct { 33 | struct __atomic_node_s *node_buf; 34 | size_t total_size; 35 | 36 | /*atomic__ */ size_t size; 37 | 38 | // Handling realloc and destroy counter 39 | /*atomic__*/ int accessing_counter; 40 | 41 | atomic_hash_read_node_fn read_node_fn; 42 | atomic_hash_free_node_fn free_node_fn; 43 | 44 | } __atomic_hash; 45 | 46 | 47 | __atomic_hash* __atomic_hash_init(size_t , atomic_hash_read_node_fn , atomic_hash_free_node_fn ); 48 | int __atomic_hash_put(__atomic_hash *, HashKey , void *); 49 | void* __atomic_hash_replace(__atomic_hash *, HashKey , void *); 50 | void* __atomic_hash_pop(__atomic_hash *, HashKey ); 51 | void* __atomic_hash_get(__atomic_hash *, HashKey ); 52 | void* __atomic_hash_read(__atomic_hash *, HashKey ); 53 | void __atomic_hash_destroy(__atomic_hash *); 54 | 55 | #ifdef __cplusplus 56 | } 57 | #endif 58 | 59 | #endif 60 | -------------------------------------------------------------------------------- /atomic_hashtable_n.h: -------------------------------------------------------------------------------- 1 | #ifndef HEADER_ATOMICHASH_N 2 | #define HEADER_ATOMICHASH_N 3 | 4 | #ifdef __cplusplus 5 | extern "C" { 6 | #endif 7 | 8 | #include 9 | #include 10 | #include 11 | 12 | typedef void* (*atomic_hash_n_malloc_fn)(size_t); 13 | typedef void (*atomic_hash_n_free_fn)(void*); 14 | 15 | typedef void *(*atomic_hash_n_read_node_fn)(void*); 16 | typedef void (*atomic_hash_n_free_node_fn)(void*); 17 | 18 | void init_hash_n_malloc_free_hooker(atomic_hash_n_malloc_fn malloc_fun, atomic_hash_n_free_fn free_fun); 19 | 20 | typedef unsigned long atom_NumKey; 21 | 22 | struct __atomic_node_n { 23 | void *val; 24 | atom_NumKey key; 25 | /*atomic__*/ int used; 26 | struct __atomic_node_n *next; 27 | 28 | /*atomic__ */int reading_counter; 29 | }; 30 | 31 | typedef struct { 32 | struct __atomic_node_n *node_buf; 33 | size_t total_size; 34 | 35 | /*atomic__ */ size_t size; 36 | 37 | // Handling realloc and destroy counter 38 | /*atomic__*/ int accessing_counter; 39 | 40 | atomic_hash_n_read_node_fn read_node_fn; 41 | atomic_hash_n_free_node_fn free_node_fn; 42 | 43 | } __atomic_hash_n; 44 | 45 | 46 | __atomic_hash_n* __atomic_hash_n_init(size_t , atomic_hash_n_read_node_fn , atomic_hash_n_free_node_fn ); 47 | int __atomic_hash_n_put(__atomic_hash_n *, atom_NumKey , void *); 48 | void* __atomic_hash_n_replace(__atomic_hash_n *, atom_NumKey , void *); 49 | void* __atomic_hash_n_pop(__atomic_hash_n *, atom_NumKey ); 50 | void* __atomic_hash_n_get(__atomic_hash_n *, atom_NumKey); 51 | void* __atomic_hash_n_read(__atomic_hash_n *, atom_NumKey); 52 | void __atomic_hash_n_destroy(__atomic_hash_n *); 53 | 54 | #ifdef __cplusplus 55 | } 56 | #endif 57 | 58 | #endif 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # atomic_hashtable 2 | Fast movement for free and writeable, and read without locking while multithreading purposes 3 | 4 | ## atomic_hashtable for string key 5 | ## atomic_hashtable_n for integer key 6 | 7 | ### How does atomic_hashtable work? 8 | - it is traveling around the buffer(s), each buffer has it own metadata for atomic counter of read and write permission without affecting other buffer's transaction. Meanwhile, all buffer are controlled by parent to own an accessing atomic counter in order to controlled the memory relocation only. 9 | 10 | ### Target Project 11 | - this lib mostly target for big data read and write at the same time with multiple transaction commit at the same time without locking. 12 | 13 | ### Requirement 14 | - gcc/clang/llvm-gcc (with Atomic-Builtins) 15 | 16 | 17 | ### Sample of concurrent Testing 18 | - goto root directory 19 | - gcc -std=c11 test_main.c atomic_hashtable.c -pthread -otest 20 | - ./test 21 | 22 | ### API document 23 | ###### __atomic_hash* __atomic_hash_init(size_t , atomic_hash_read_node_fn , atomic_hash_free_node_fn ) 24 | * arg0=The size of table 25 | * arg1=the function of read node(only for __atomic_hash_read) could not be NULL unless * you are not using __atomic_hash_read) 26 | * arg2=The function of free node(if NULL, it just free) 27 | * return=return the atomic hash 28 | 29 | ###### int __atomic_hash_put(__atomic_hash *, HashKey , void *); 30 | * arg0=The name hashtable 31 | * arg1=the key in char string 32 | * arg2= the pre-allocated value 33 | * return 1 if true, 0 if false 34 | 35 | ###### void* __atomic_hash_replace(__atomic_hash *, HashKey , void *); 36 | * arg0=The name hashtable 37 | * arg1=the key in string 38 | * arg2= the pre-allocated value to replace 39 | * return previous held pre-allocated value, return NULL if cannot find 40 | 41 | ###### void* __atomic_hash_pop(__atomic_hash *, HashKey ); 42 | * arg0=The name hashtable 43 | * arg1=the key in string 44 | * return pop out pre-allocated value if found, else return NULL 45 | 46 | ###### void* __atomic_hash_get(__atomic_hash *, HashKey ); 47 | * arg0=The name hashtable 48 | * arg1=the key in string 49 | * return reference value if found, else return NULL 50 | * reference pointer from the hashtable, it is not recommended for concurrent write while other thread is free the field value, instead, should use read 51 | 52 | ###### void* __atomic_hash_read(__atomic_hash *, HashKey ); 53 | * arg0=The name hashtable 54 | * arg1=the key in string 55 | * return duplicated value if found, else return NULL 56 | * need to have read function when initializing the atomic_hashtale 57 | 58 | ###### void __atomic_hash_destroy(__atomic_hash *); 59 | * arg0=The name hashtable 60 | 61 | 62 | 63 | ### What is pre-allocated value in the readme 64 | * it means the value must allocate memory before put into the hashtable. The buffer will keeping the same value until you pop out and free it 65 | 66 | 67 | ### what is atomic_hash_read_node_fn in the readme 68 | * it mainly for __atomic_hash_read function, it will trigger the atomic_hash_read_node_fn to duplicate the new node, the current still remain on buffer. 69 | * see test_main.c for example. 70 | 71 | ### what is atomic_hash_free_node_fn in the readme 72 | * it mainly for __atomic_hash_destroy function, it will iterate the buffer and trigger the atomic_hash_free_node_fn when destroying the node if any remaining memory inside. 73 | * see test_main.c for example. 74 | 75 | ### Simple Right?! Enjoy your lock free travelling!! 76 | 77 | -------------------------------------------------------------------------------- /test_n_main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "atomic_hashtable_n.h" 7 | 8 | /** Test SCOPE **/ 9 | typedef struct { 10 | unsigned long name; 11 | } Object; 12 | 13 | 14 | static void freeObject(void *node) { 15 | // printf("%s\n", "Free from POP"); 16 | free(node); 17 | } 18 | 19 | static void* readObject(void *origin) { 20 | Object *a = malloc(sizeof(Object)); 21 | 22 | memcpy(a, origin, sizeof(Object)); 23 | return a; 24 | } 25 | 26 | 27 | #define PRINT_INT(x) printf("DEBUG:: %d\n", x) 28 | #define PRINT_SIZE(x) printf("DEBUG:: %zu\n", x) 29 | #define PRINT_STR(x) printf("DEBUG:: %s\n", x) 30 | 31 | unsigned int 32 | randr(unsigned int min, unsigned int max) 33 | { 34 | double scaled = (double)rand() / RAND_MAX; 35 | 36 | return (max - min + 1) * scaled + min; 37 | } 38 | 39 | 40 | __atomic_hash_n *my_hashtable; 41 | int atomic_key_name; 42 | int i ; 43 | 44 | void *myThreadFun(void *vargp) 45 | { 46 | 47 | atomic_key_name = randr(0, 2000000); 48 | 49 | 50 | atom_NumKey num_key = (atom_NumKey)atomic_key_name; 51 | 52 | Object *o = (Object*)malloc(sizeof(Object)); 53 | o->name = num_key; 54 | // printf("hash is %lu, and resize is %zu \n", hash(num_key), my_hashtable->resize ); 55 | __atomic_hash_n_put(my_hashtable, num_key, o); 56 | 57 | 58 | o = (Object*) __atomic_hash_n_read(my_hashtable, num_key); 59 | if (o) { 60 | printf("%lu\n", o->name); 61 | free(o); 62 | } 63 | 64 | 65 | 66 | o = (Object*) __atomic_hash_n_get(my_hashtable, num_key); 67 | if(!o) { 68 | printf("%s\n", "not found"); 69 | } 70 | 71 | Object *s_st, *old_v ; 72 | s_st = (Object*)malloc(sizeof(Object)); 73 | s_st->name = num_key; 74 | old_v = __atomic_hash_n_replace(my_hashtable, num_key, s_st); 75 | if (old_v) { 76 | free(old_v); 77 | } else { 78 | free(s_st); 79 | } 80 | 81 | /* Uncomment if you want to pop function */ 82 | // s_st = (Object*) __atomic_hash_n_pop(my_hashtable, num_key); 83 | // if (s_st) { 84 | 85 | // // printf("%s\n", s_st->name); 86 | // // printf("%.*s\n", 11, s_st->name); 87 | 88 | // free(s_st); 89 | // } else { 90 | // printf("%s\n", "nothing"); 91 | // } 92 | 93 | 94 | return NULL; 95 | } 96 | 97 | 98 | #define MAX_THREAD 5000 99 | 100 | int main(void) { 101 | my_hashtable = __atomic_hash_n_init(4000, readObject, freeObject); 102 | // atomic_key_name = ATOMIC_VAR_INIT(0); 103 | 104 | PRINT_SIZE(my_hashtable->total_size); 105 | 106 | struct timeval t0, t1; 107 | unsigned int i; 108 | 109 | pthread_t tid[MAX_THREAD]; 110 | 111 | gettimeofday(&t0, NULL); 112 | for (i = 0; i < MAX_THREAD; i++) { 113 | pthread_create(&tid[i], NULL, myThreadFun, NULL); 114 | // pthread_join(tid[i], NULL); 115 | } 116 | 117 | 118 | for (i = 0; i < MAX_THREAD; i++) 119 | pthread_join(tid[i], NULL); 120 | 121 | gettimeofday(&t1, NULL); 122 | printf("Did %u calls in %.2g seconds\n", i, t1.tv_sec - t0.tv_sec + 1E-6 * (t1.tv_usec - t0.tv_usec)); 123 | 124 | PRINT_INT(i); 125 | PRINT_STR("total size"); 126 | PRINT_SIZE(my_hashtable->total_size); 127 | 128 | __atomic_hash_n_destroy(my_hashtable); 129 | 130 | return 0; 131 | } -------------------------------------------------------------------------------- /test_main.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include "atomic_hashtable.h" 7 | 8 | /** Test SCOPE **/ 9 | typedef struct { 10 | char* name; 11 | } Object; 12 | 13 | 14 | static void freeObject(void *node) { 15 | // printf("%s\n", "Free from POP"); 16 | free(((Object*)node)->name); 17 | free(node); 18 | } 19 | 20 | static void* readObject(void *origin) { 21 | Object *a = malloc(sizeof(Object)); 22 | 23 | memcpy(a, origin, sizeof(Object)); 24 | 25 | size_t sz = (strlen(((Object*)origin)->name) + 1) * sizeof(char); 26 | 27 | a->name = malloc(sz); 28 | memcpy(a->name, ((Object*)origin)->name, sz); 29 | 30 | 31 | return a; 32 | } 33 | 34 | 35 | #define PRINT_INT(x) printf("DEBUG:: %d\n", x) 36 | #define PRINT_SIZE(x) printf("DEBUG:: %zu\n", x) 37 | #define PRINT_STR(x) printf("DEBUG:: %s\n", x) 38 | 39 | unsigned int 40 | randr(unsigned int min, unsigned int max) 41 | { 42 | double scaled = (double)rand() / RAND_MAX; 43 | 44 | return (max - min + 1) * scaled + min; 45 | } 46 | 47 | 48 | __atomic_hash *my_hashtable; 49 | int atomic_key_name; 50 | int i ; 51 | 52 | void *myThreadFun(void *vargp) 53 | { 54 | // static int loop_index = 0; 55 | 56 | // if(loop_index < 100) { 57 | // usleep(1000*200); 58 | // } 59 | // atomic_fetch_add(&atomic_key_name, 1); 60 | 61 | // if(atomic_load(&atomic_key_name) >= 500) { 62 | // atomic_fetch_sub_explicit(&atomic_key_name, 500, __ATOMIC_RELEASE); 63 | // } 64 | atomic_key_name = randr(0, 2000000); 65 | 66 | 67 | char str[10]; 68 | sprintf(str, "%d", atomic_key_name); 69 | 70 | Object *o = (Object*)malloc(sizeof(Object)); 71 | o->name = malloc(10*sizeof(char)); 72 | memcpy(o->name, str, 10 * sizeof(char)); 73 | // printf("hash is %lu, and resize is %zu \n", hash(str), my_hashtable->resize ); 74 | __atomic_hash_put(my_hashtable, str, o); 75 | 76 | o = (Object*) __atomic_hash_read(my_hashtable, str); 77 | if (o) { 78 | printf("%s\n", o->name); 79 | free(o->name); 80 | free(o); 81 | } 82 | 83 | o = (Object*) __atomic_hash_get(my_hashtable, str); 84 | if (!o) { 85 | // printf("%s\n", o->name); 86 | printf("%s\n", "not found"); 87 | } else { 88 | // printf("%s\n", o->name); 89 | } 90 | 91 | Object *s_st, *old_v ; 92 | s_st = (Object*)malloc(sizeof(Object)); 93 | s_st->name = malloc(10*sizeof(char)); 94 | memcpy(s_st->name, str, 10 * sizeof(char)); 95 | old_v = __atomic_hash_replace(my_hashtable, str, s_st); 96 | if (old_v) { 97 | free(old_v->name); 98 | free(old_v); 99 | } else { 100 | free(s_st->name); 101 | free(s_st); 102 | } 103 | 104 | // s_st = (Object*) __atomic_hash_pop(my_hashtable, str); 105 | // if (s_st) { 106 | 107 | // // printf("%s\n", s_st->name); 108 | // // printf("%.*s\n", 11, s_st->name); 109 | 110 | // free(s_st->name); 111 | // free(s_st); 112 | // } else { 113 | // printf("%s\n", "nothing"); 114 | // } 115 | 116 | 117 | return NULL; 118 | } 119 | 120 | 121 | #define MAX_THREAD 5000 122 | 123 | int main(void) { 124 | my_hashtable = __atomic_hash_init(4000, readObject, freeObject); 125 | // atomic_key_name = ATOMIC_VAR_INIT(0); 126 | 127 | PRINT_SIZE(my_hashtable->total_size); 128 | 129 | struct timeval t0, t1; 130 | unsigned int i; 131 | 132 | pthread_t tid[MAX_THREAD]; 133 | 134 | gettimeofday(&t0, NULL); 135 | for (i = 0; i < MAX_THREAD; i++) { 136 | pthread_create(&tid[i], NULL, myThreadFun, NULL); 137 | // pthread_join(tid[i], NULL); 138 | } 139 | 140 | 141 | for (i = 0; i < MAX_THREAD; i++) 142 | pthread_join(tid[i], NULL); 143 | 144 | gettimeofday(&t1, NULL); 145 | printf("Did %u calls in %.2g seconds\n", i, t1.tv_sec - t0.tv_sec + 1E-6 * (t1.tv_usec - t0.tv_usec)); 146 | 147 | PRINT_INT(i); 148 | PRINT_STR("total size"); 149 | PRINT_SIZE(my_hashtable->total_size); 150 | 151 | __atomic_hash_destroy(my_hashtable); 152 | 153 | return 0; 154 | } -------------------------------------------------------------------------------- /atomic_hashtable_n.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "atomic_hashtable_n.h" 5 | 6 | /*** 7 | * Author: taymindis@gmail.com - taymindis 8 | * Atomic Hashtable for Integer, The faster free and writeable, travese and read without locking 9 | * priority for multithreading purposes. 10 | * Atomic Hashtable care your modify share memory when other people using. 11 | * Version 0.0.1, 22-AUG-2017 12 | ****/ 13 | void* __atomic_hash_n_alloc_slot_(__atomic_hash_n *atom_hash, unsigned long hash_index); 14 | int __atomic_hash_n_release_slot_(__atomic_hash_n *atom_hash, struct __atomic_node_n *n); 15 | 16 | int __atomic_hash_n_realloc_buffer_(__atomic_hash_n *atom_hash); 17 | 18 | 19 | #define get_hash_index(s) s%atom_hash->total_size 20 | 21 | 22 | static atomic_hash_n_malloc_fn __atomic_hash_n_malloc_fn = malloc; 23 | static atomic_hash_n_free_fn __atomic_hash_n_free_fn = free; 24 | 25 | void __atomic_hash_n_check_add_slot_(__atomic_hash_n *atom_hash); 26 | 27 | void 28 | init_hash_n_malloc_free_hooker(atomic_hash_n_malloc_fn malloc_fun, atomic_hash_n_free_fn free_fun) { 29 | __atomic_hash_n_malloc_fn = malloc_fun; 30 | __atomic_hash_n_free_fn = free_fun; 31 | } 32 | 33 | void* 34 | __atomic_hash_n_alloc_slot_(__atomic_hash_n *atom_hash, unsigned long hash_index) { 35 | // printf("hash index is %lu\n", hash_index); 36 | struct __atomic_node_n * return_ = atom_hash->node_buf + hash_index; 37 | size_t total_size = atom_hash->total_size, i; 38 | RETRY: 39 | for (i = 0; i < total_size && __sync_lock_test_and_set(&return_->used, 1) ; i++) { 40 | // return_++; 41 | return_ = return_->next; 42 | } 43 | 44 | // To identify if i less than total size means slot found 45 | if (i < total_size) { 46 | return return_; 47 | } 48 | // if i over finding, but size still less than total size , retry to find a slot, it might happen when reach the last slot 49 | else if (__atomic_load_n(&atom_hash->size, __ATOMIC_SEQ_CST) < atom_hash->total_size) { 50 | return_ = atom_hash->node_buf + hash_index; // reset to starting position and retry 51 | goto RETRY; 52 | } 53 | // No more slot, it might not happen 54 | else { 55 | return NULL; 56 | } 57 | 58 | } 59 | 60 | int 61 | __atomic_hash_n_release_slot_(__atomic_hash_n *atom_hash, struct __atomic_node_n *n) { 62 | 63 | n->key = 0; 64 | n->val = NULL; 65 | 66 | // To make it unused slot 67 | __sync_lock_release(&n->used); 68 | 69 | return 1; 70 | } 71 | 72 | /*API Endpoint*/ 73 | __atomic_hash_n* 74 | __atomic_hash_n_init(size_t hash_size, atomic_hash_n_read_node_fn read_node_fn_, atomic_hash_n_free_node_fn free_node_fn_) { 75 | if (hash_size <= 1) { 76 | perror("ERROR:: Hash size must at least more than 1" ); 77 | return NULL; 78 | } 79 | 80 | if (read_node_fn_ == NULL) 81 | printf("%s\n", "WARNING:: Must have read function provided to read the value, else do not use read function"); 82 | 83 | if (free_node_fn_ == NULL) 84 | printf("%s\n", "WARNING:: Must have free node function provided to free the node, else it won't free the value when destroying the hashtable"); 85 | 86 | 87 | __atomic_hash_n *atom_hash = __atomic_hash_n_malloc_fn(sizeof(__atomic_hash_n)); 88 | 89 | if (atom_hash == NULL) return 0; 90 | 91 | atom_hash->total_size = hash_size; 92 | 93 | 94 | /** Pre-allocate all nodes **/ 95 | atom_hash->node_buf = __atomic_hash_n_malloc_fn(hash_size * sizeof(struct __atomic_node_n)); 96 | 97 | 98 | if (atom_hash->node_buf == NULL) { 99 | __atomic_hash_n_free_fn(atom_hash); 100 | return 0; 101 | } 102 | 103 | size_t i; 104 | for (i = 0; i < hash_size; i++) { 105 | atom_hash->node_buf[i].key = 0; 106 | // atom_hash->node_buf[i].key_len = 0; 107 | atom_hash->node_buf[i].val = NULL; 108 | atom_hash->node_buf[i].next = atom_hash->node_buf + i + 1; 109 | atom_hash->node_buf[i].used = 0; 110 | // Atomic Init 111 | atom_hash->node_buf[i].reading_counter = 0; 112 | // atomic_flag_clear(&atom_hash->node_buf[i].used); 113 | 114 | } 115 | //For Last Node refer to first node 116 | atom_hash->node_buf[hash_size - 1].next = atom_hash->node_buf; 117 | 118 | // Atomic Init 119 | atom_hash->size = 0; 120 | 121 | // Atomic Init 122 | atom_hash->accessing_counter = 0; 123 | 124 | // atom_hash->is_maintaining = (atomic_flag)ATOMIC_FLAG_INIT; 125 | // atom_hash->resize = 1; // starting is 1 126 | 127 | // read function when get the obejct for secure copy 128 | atom_hash->read_node_fn = read_node_fn_; 129 | atom_hash->free_node_fn = free_node_fn_; 130 | 131 | return atom_hash; 132 | } 133 | 134 | int 135 | __atomic_hash_n_realloc_buffer_(__atomic_hash_n *atom_hash) { 136 | 137 | struct __atomic_node_n *old_node_buf = atom_hash->node_buf; 138 | size_t total_size = atom_hash->total_size; 139 | size_t new_total_size = atom_hash->total_size * 2, i, old_hash_index; 140 | 141 | /** Pre-allocate all nodes **/ 142 | struct __atomic_node_n *new_node_buf = __atomic_hash_n_malloc_fn(new_total_size * sizeof(struct __atomic_node_n)); 143 | // printf("Realloc Starting\n\n"); 144 | 145 | if (new_node_buf == NULL) { 146 | return 0; // no more memory 147 | } 148 | 149 | for (i = 0; i < new_total_size; i++) { 150 | new_node_buf[i].used = 0; 151 | } 152 | 153 | // Reform the hash code algorithm by starting to set total size at first 154 | atom_hash->total_size = new_total_size; 155 | 156 | 157 | for (old_hash_index = 0; old_hash_index < total_size; old_hash_index++) { 158 | if (atom_hash->node_buf[old_hash_index].used == 1) { 159 | size_t new_hash_index = get_hash_index(atom_hash->node_buf[old_hash_index].key); 160 | // Linear Probing Logic 161 | while (new_node_buf[new_hash_index].used != 0) { 162 | new_hash_index++; 163 | // wrap around 164 | new_hash_index %= new_total_size; 165 | } 166 | memcpy(new_node_buf + new_hash_index, atom_hash->node_buf + old_hash_index, sizeof(struct __atomic_node_n)); 167 | } 168 | 169 | } 170 | 171 | for (i = 0; i < new_total_size; i++) { 172 | if (new_node_buf[i].used == 0) { 173 | new_node_buf[i].key = 0; 174 | // new_node_buf[i].key_len = 0; 175 | new_node_buf[i].val = NULL; 176 | new_node_buf[i].reading_counter = 0; 177 | } 178 | 179 | new_node_buf[i].next = new_node_buf + i + 1; 180 | 181 | } 182 | 183 | atom_hash->node_buf = new_node_buf; 184 | // atom_hash->resize = new_resize; 185 | 186 | //For Last Node refer to the first node 187 | atom_hash->node_buf[new_total_size - 1].next = atom_hash->node_buf; 188 | 189 | 190 | __atomic_hash_n_free_fn(old_node_buf); 191 | 192 | return 1; 193 | } 194 | 195 | void 196 | __atomic_hash_n_check_add_slot_(__atomic_hash_n *atom_hash) { 197 | if (__atomic_fetch_add(&atom_hash->size, 1, __ATOMIC_ACQUIRE) >= atom_hash->total_size) { 198 | int replace_val = -1, 199 | expected_val = 0; 200 | 201 | 202 | // means spinning lock for reading counter 203 | while (!__sync_bool_compare_and_swap( 204 | &atom_hash->accessing_counter 205 | , expected_val 206 | , replace_val)); 207 | 208 | if (__atomic_load_n(&atom_hash->size, __ATOMIC_SEQ_CST) > atom_hash->total_size) { 209 | __atomic_hash_n_realloc_buffer_(atom_hash); 210 | } 211 | 212 | // reset back the accessing counter, the reason accessing counter is global share, you have to use atomic exchange to change value 213 | // eventually reset accessing counter to 0 to let other thread accessing back 214 | if (atom_hash->accessing_counter % 2 != 0) 215 | __atomic_fetch_add(&atom_hash->accessing_counter, 1, __ATOMIC_RELAXED); 216 | 217 | } 218 | } 219 | 220 | int 221 | __atomic_hash_n_put(__atomic_hash_n *atom_hash, atom_NumKey key_, void *value) { 222 | int success = 0; 223 | 224 | __atomic_hash_n_check_add_slot_(atom_hash); 225 | 226 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 227 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 228 | } 229 | 230 | // it is spin lock allocation 231 | struct __atomic_node_n *node = __atomic_hash_n_alloc_slot_(atom_hash, get_hash_index(key_)); 232 | 233 | if (node) { 234 | node->val = value; 235 | node->key = key_; 236 | success = 1; 237 | } else { 238 | __atomic_fetch_sub(&atom_hash->size, 1, __ATOMIC_RELAXED); //ROLLBACK If not allocable 239 | } 240 | 241 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 242 | return success; 243 | } 244 | 245 | void* 246 | __atomic_hash_n_replace(__atomic_hash_n *atom_hash, atom_NumKey key_, void *value) { 247 | size_t total_size = atom_hash->total_size, i; 248 | void *found = NULL; 249 | 250 | __atomic_hash_n_check_add_slot_(atom_hash); 251 | 252 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 253 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 254 | } 255 | 256 | struct __atomic_node_n *buffer = atom_hash->node_buf + (get_hash_index(key_)); 257 | 258 | for (i = 0; i < total_size ; i++) { 259 | // Use even number to prevent conflict issue with pop, even number means popable 260 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 261 | buffer->used == 1 && key_ == buffer->key) 262 | goto SUCCESS; 263 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 264 | buffer = buffer->next; 265 | } 266 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 267 | return found; 268 | SUCCESS: 269 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // still need to release read back 270 | 271 | // it is spin lock allocation 272 | struct __atomic_node_n *node = __atomic_hash_n_alloc_slot_(atom_hash, get_hash_index(key_)); 273 | 274 | if (node) { 275 | 276 | int replace_val = -1; 277 | int expected_val = 0; 278 | 279 | while (!__sync_bool_compare_and_swap( 280 | &buffer->reading_counter 281 | , expected_val 282 | , replace_val)); 283 | 284 | found = buffer->val; 285 | 286 | if (buffer->used == 1 && key_ == buffer->key) { 287 | node->val = value; 288 | node->key = key_; 289 | // node->key_len = buffer->key_len; 290 | __atomic_hash_n_release_slot_(atom_hash, buffer); // release the old slot 291 | } else { 292 | // if someone has replace this slot, release back the 293 | __atomic_hash_n_release_slot_(atom_hash, node); 294 | found = NULL; 295 | } 296 | __atomic_add_fetch(&buffer->reading_counter, 1, __ATOMIC_RELEASE ); 297 | } 298 | 299 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 300 | return found; 301 | } 302 | 303 | void* 304 | __atomic_hash_n_pop(__atomic_hash_n *atom_hash, atom_NumKey key_) { 305 | void * return_ = NULL; 306 | size_t total_size = atom_hash->total_size, i; 307 | 308 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 309 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 310 | } 311 | 312 | struct __atomic_node_n *buffer = atom_hash->node_buf + (get_hash_index(key_)); 313 | 314 | for (i = 0; i < total_size ; i++) { 315 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 316 | buffer->used == 1 && key_ == buffer->key) 317 | goto SUCCESS; 318 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 319 | buffer = buffer->next; 320 | } 321 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 322 | return return_; 323 | SUCCESS: 324 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 325 | 326 | int replace_val = -1; 327 | int expected_val = 0; 328 | 329 | // means spinning lock for reading counter 330 | while (!__sync_bool_compare_and_swap(&buffer->reading_counter 331 | , expected_val 332 | , replace_val)); 333 | 334 | return_ = buffer->val; 335 | 336 | if (buffer->used == 1 && key_ == buffer->key && 337 | __atomic_hash_n_release_slot_(atom_hash, buffer)) { 338 | __atomic_fetch_sub(&atom_hash->size, 1, __ATOMIC_RELAXED); 339 | } else { 340 | return_ = NULL; 341 | } 342 | __atomic_add_fetch(&buffer->reading_counter, 1, __ATOMIC_RELEASE ); 343 | 344 | // release access back 345 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 346 | 347 | return return_; 348 | } 349 | 350 | /** get is getting the reference pointer from the hashtable, it is not good for multi concurrent write 351 | while other thread is free the field value, should use read **/ 352 | void* 353 | __atomic_hash_n_get(__atomic_hash_n *atom_hash, atom_NumKey key_) { 354 | void * return_ = NULL; 355 | size_t total_size = atom_hash->total_size, i; 356 | 357 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 358 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 359 | } 360 | 361 | struct __atomic_node_n *buffer = atom_hash->node_buf + (get_hash_index(key_)); 362 | 363 | for (i = 0; i < total_size ; i++) { 364 | // Use even number to prevent conflict issue with pop, even number means popable 365 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 366 | buffer->used == 1 && key_ == buffer->key) { 367 | // if (i > 2)printf("loop index %zu\n", i ); 368 | goto SUCCESS; 369 | } 370 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 371 | buffer = buffer->next; 372 | } 373 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 374 | return return_; 375 | SUCCESS: 376 | return_ = buffer->val; 377 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 378 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 379 | 380 | return return_; 381 | } 382 | 383 | void* 384 | __atomic_hash_n_read(__atomic_hash_n *atom_hash, atom_NumKey key_) { 385 | void * return_ = NULL; 386 | size_t total_size = atom_hash->total_size, i; 387 | 388 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 389 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 390 | } 391 | 392 | struct __atomic_node_n *buffer = atom_hash->node_buf + (get_hash_index(key_)); 393 | 394 | for (i = 0; i < total_size ; i++) { 395 | // Use even number to prevent conflict issue with pop, even number means popable 396 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 397 | buffer->used == 1 && key_ == buffer->key) { 398 | // if (i > 2)printf("loop index %zu\n", i ); 399 | goto SUCCESS; 400 | } 401 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 402 | buffer = buffer->next; 403 | } 404 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 405 | return return_; 406 | SUCCESS: 407 | return_ = atom_hash->read_node_fn(buffer->val); 408 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 409 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 410 | 411 | return return_; 412 | } 413 | 414 | void 415 | __atomic_hash_n_destroy(__atomic_hash_n *atom_hash) { 416 | int replace_val = -1; 417 | int expected_val = 0; 418 | size_t total_size = atom_hash->total_size, i; 419 | 420 | 421 | // means spinning lock for reading counter 422 | while (!__sync_bool_compare_and_swap(&atom_hash->accessing_counter 423 | , expected_val 424 | , replace_val)); 425 | 426 | // this is loop from start 427 | struct __atomic_node_n *buffer = atom_hash->node_buf; 428 | 429 | 430 | for (i = 0; i < total_size ; i++) { 431 | if (buffer->used) { 432 | buffer->key = 0; 433 | if (atom_hash->free_node_fn) 434 | atom_hash->free_node_fn(buffer->val); 435 | // else __atomic_hash_n_free_fn(buffer->val); 436 | } 437 | buffer = buffer->next; 438 | } 439 | 440 | __atomic_hash_n_free_fn(atom_hash->node_buf); 441 | 442 | __atomic_hash_n_free_fn(atom_hash); 443 | 444 | atom_hash = 0; 445 | } 446 | -------------------------------------------------------------------------------- /atomic_hashtable.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "atomic_hashtable.h" 5 | 6 | /*** 7 | * Author: taymindis@gmail.com - taymindis 8 | * Atomic Hashtable, The faster free and writeable, travese and read without locking 9 | * priority for multithreading purposes. 10 | * Atomic Hashtable care your modify share memory when other people using. 11 | * Version 0.0.1, 22-AUG-2017 12 | ****/ 13 | void* __atomic_hash_alloc_slot_(__atomic_hash *atom_hash, unsigned long hash_index); 14 | int __atomic_hash_release_slot_(__atomic_hash *atom_hash, struct __atomic_node_s *n); 15 | 16 | int __atomic_hash_realloc_buffer_(__atomic_hash *atom_hash); 17 | 18 | // Logic Derived from djb2 by Dan Bernstein. 19 | static unsigned long hash(unsigned char *str); 20 | 21 | 22 | #define get_hash_index(s) hash((unsigned char*)s)%atom_hash->total_size 23 | 24 | void __atomic_hash_check_add_slot_(__atomic_hash *atom_hash); 25 | 26 | static atomic_hash_malloc_fn __atomic_hash_malloc_fn = malloc; 27 | static atomic_hash_free_fn __atomic_hash_free_fn = free; 28 | 29 | void 30 | init_hash_malloc_free_hooker(atomic_hash_malloc_fn malloc_fun, atomic_hash_free_fn free_fun) { 31 | __atomic_hash_malloc_fn = malloc_fun; 32 | __atomic_hash_free_fn = free_fun; 33 | } 34 | 35 | void* 36 | __atomic_hash_alloc_slot_(__atomic_hash *atom_hash, unsigned long hash_index) { 37 | // printf("hash index is %lu\n", hash_index); 38 | struct __atomic_node_s * return_ = atom_hash->node_buf + hash_index; 39 | size_t total_size = atom_hash->total_size, i; 40 | RETRY: 41 | for (i = 0; i < total_size && __sync_lock_test_and_set(&return_->used, 1) ; i++) { 42 | // return_++; 43 | return_ = return_->next; 44 | } 45 | 46 | // To identify if i less than total size means slot found 47 | if (i < total_size) { 48 | return return_; 49 | } 50 | // if i over finding, but size still less than total size , retry to find a slot, it might happen when reach the last slot 51 | else if (__atomic_load_n(&atom_hash->size, __ATOMIC_SEQ_CST) < atom_hash->total_size) { 52 | return_ = atom_hash->node_buf + hash_index; // reset to starting position and retry 53 | goto RETRY; 54 | } 55 | // No more slot, it might not happen 56 | else { 57 | return NULL; 58 | } 59 | 60 | } 61 | 62 | int 63 | __atomic_hash_release_slot_(__atomic_hash *atom_hash, struct __atomic_node_s *n) { 64 | // HashKey temp_key = n->key; 65 | if (n->key) { 66 | __atomic_hash_free_fn(n->key); 67 | n->key = NULL; 68 | } 69 | 70 | // n->key_len = 0; 71 | n->val = NULL; 72 | 73 | // To make it unused slot 74 | __sync_lock_release(&n->used); 75 | 76 | return 1; 77 | } 78 | 79 | /*API Endpoint*/ 80 | __atomic_hash* 81 | __atomic_hash_init(size_t hash_size, atomic_hash_read_node_fn read_node_fn_, atomic_hash_free_node_fn free_node_fn_) { 82 | if (hash_size <= 1) { 83 | perror("ERROR:: Hash size must at least more than 1" ); 84 | return NULL; 85 | } 86 | 87 | if (read_node_fn_ == NULL) 88 | printf("%s\n", "WARNING:: Must have read node function provided to read the node, else do not use read function"); 89 | 90 | if (free_node_fn_ == NULL) 91 | printf("%s\n", "WARNING:: Must have free node function provided to free the node, else it won't free the value when destroying the hashtable"); 92 | 93 | __atomic_hash *atom_hash = __atomic_hash_malloc_fn(sizeof(__atomic_hash)); 94 | 95 | if (atom_hash == NULL) return 0; 96 | 97 | atom_hash->total_size = hash_size; 98 | 99 | // for assigning const val, this is one time intial only 100 | // *(size_t *)&atom_hash->modulus_size = hash_size; 101 | 102 | /** Pre-allocate all nodes **/ 103 | atom_hash->node_buf = __atomic_hash_malloc_fn(hash_size * sizeof(struct __atomic_node_s)); 104 | 105 | 106 | if (atom_hash->node_buf == NULL) { 107 | __atomic_hash_free_fn(atom_hash); 108 | return 0; 109 | } 110 | 111 | size_t i; 112 | for (i = 0; i < hash_size; i++) { 113 | atom_hash->node_buf[i].key = 0; 114 | // atom_hash->node_buf[i].key_len = 0; 115 | atom_hash->node_buf[i].val = NULL; 116 | atom_hash->node_buf[i].next = atom_hash->node_buf + i + 1; 117 | atom_hash->node_buf[i].used = 0; 118 | // Atomic Init 119 | atom_hash->node_buf[i].reading_counter = 0; 120 | // atomic_flag_clear(&atom_hash->node_buf[i].used); 121 | 122 | } 123 | //For Last Node refer to first node 124 | atom_hash->node_buf[hash_size - 1].next = atom_hash->node_buf; 125 | 126 | // Atomic Init 127 | atom_hash->size = 0; 128 | 129 | // Atomic Init 130 | atom_hash->accessing_counter = 0; 131 | 132 | // read function when get the obejct for secure copy 133 | atom_hash->read_node_fn = read_node_fn_; 134 | atom_hash->free_node_fn = free_node_fn_; 135 | 136 | return atom_hash; 137 | } 138 | 139 | int 140 | __atomic_hash_realloc_buffer_(__atomic_hash *atom_hash) { 141 | 142 | struct __atomic_node_s *old_node_buf = atom_hash->node_buf; 143 | size_t total_size = atom_hash->total_size; 144 | size_t new_total_size = atom_hash->total_size * 2, i, old_hash_index; 145 | 146 | /** Pre-allocate all nodes **/ 147 | struct __atomic_node_s *new_node_buf = __atomic_hash_malloc_fn(new_total_size * sizeof(struct __atomic_node_s)); 148 | 149 | 150 | if (new_node_buf == NULL) { 151 | return 0; // no more memory 152 | } 153 | 154 | for (i = 0; i < new_total_size; i++) 155 | new_node_buf[i].key = NULL; 156 | 157 | atom_hash->total_size = new_total_size; 158 | 159 | 160 | 161 | 162 | // Relocation the hash code index, to prevent this trigger often, Suggestion to init with big size of hashtable 163 | for (old_hash_index = 0; old_hash_index < total_size; old_hash_index++) { 164 | if (atom_hash->node_buf[old_hash_index].used == 1) { 165 | size_t new_hash_index = get_hash_index(atom_hash->node_buf[old_hash_index].key); 166 | // Linear Probing Logic 167 | while (new_node_buf[new_hash_index].key != NULL) { 168 | new_hash_index++; 169 | // wrap around 170 | new_hash_index %= new_total_size; 171 | } 172 | memcpy(new_node_buf + new_hash_index, atom_hash->node_buf + old_hash_index, sizeof(struct __atomic_node_s)); 173 | } 174 | 175 | } 176 | 177 | 178 | 179 | for (i = 0; i < new_total_size; i++) { 180 | if (new_node_buf[i].key == NULL) { 181 | // new_node_buf[i].key = 0; // it is already set at above 182 | // new_node_buf[i].key_len = 0; 183 | new_node_buf[i].val = NULL; 184 | new_node_buf[i].used = 0; 185 | new_node_buf[i].reading_counter = 0; 186 | } 187 | 188 | new_node_buf[i].next = new_node_buf + i + 1; 189 | 190 | } 191 | 192 | atom_hash->node_buf = new_node_buf; 193 | 194 | //For Last Node refer to the first node 195 | atom_hash->node_buf[new_total_size - 1].next = atom_hash->node_buf; 196 | 197 | __atomic_hash_free_fn(old_node_buf); 198 | 199 | return 1; 200 | } 201 | 202 | void 203 | __atomic_hash_check_add_slot_(__atomic_hash *atom_hash) { 204 | if (__atomic_fetch_add(&atom_hash->size, 1, __ATOMIC_ACQUIRE) >= atom_hash->total_size) { 205 | int replace_val = -1, 206 | expected_val = 0; 207 | 208 | 209 | // means spinning lock for reading counter 210 | while (!__sync_bool_compare_and_swap( 211 | &atom_hash->accessing_counter 212 | , expected_val 213 | , replace_val)); 214 | 215 | if (__atomic_load_n(&atom_hash->size, __ATOMIC_SEQ_CST) > atom_hash->total_size) { 216 | __atomic_hash_realloc_buffer_(atom_hash); 217 | } 218 | 219 | // reset back the accessing counter, the reason accessing counter is global share, you have to use atomic exchange to change value 220 | // eventually reset accessing counter to 0 to let other thread accessing back 221 | if (atom_hash->accessing_counter % 2 != 0) 222 | __atomic_fetch_add(&atom_hash->accessing_counter, 1, __ATOMIC_RELAXED); 223 | 224 | } 225 | } 226 | 227 | int 228 | __atomic_hash_put(__atomic_hash *atom_hash, HashKey key_, void *value) { 229 | int success = 0; 230 | size_t keyLen = strlen(key_); 231 | size_t size_of_key_with_padding = (keyLen + 1) * sizeof(char); 232 | 233 | __atomic_hash_check_add_slot_(atom_hash); 234 | 235 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 236 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 237 | } 238 | 239 | // it is spin lock allocation 240 | struct __atomic_node_s *node = __atomic_hash_alloc_slot_(atom_hash, get_hash_index(key_)); 241 | 242 | if (node) { 243 | node->val = value; 244 | node->key = __atomic_hash_malloc_fn(size_of_key_with_padding); 245 | memcpy(node->key, key_, size_of_key_with_padding); 246 | node->key[keyLen] = 0; 247 | success = 1; 248 | } else { 249 | __atomic_fetch_sub(&atom_hash->size, 1, __ATOMIC_RELAXED); //ROLLBACK If not allocable 250 | } 251 | 252 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 253 | return success; 254 | } 255 | 256 | void* 257 | __atomic_hash_replace(__atomic_hash *atom_hash, HashKey key_, void *value) { 258 | size_t keyLen = strlen(key_); 259 | size_t size_of_key_with_padding = (keyLen + 1) * sizeof(char); 260 | size_t total_size = atom_hash->total_size, i; 261 | void *found = NULL; 262 | 263 | __atomic_hash_check_add_slot_(atom_hash); 264 | 265 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 266 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 267 | } 268 | 269 | struct __atomic_node_s *buffer = atom_hash->node_buf + (get_hash_index(key_)); 270 | 271 | for (i = 0; i < total_size ; i++) { 272 | // Use even number to prevent conflict issue with pop, even number means popable 273 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 274 | buffer->key && strncmp(key_, buffer->key, keyLen) == 0) 275 | goto SUCCESS; 276 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 277 | buffer = buffer->next; 278 | } 279 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 280 | return found; 281 | SUCCESS: 282 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // still need to release read back 283 | 284 | // it is spin lock allocation 285 | struct __atomic_node_s *node = __atomic_hash_alloc_slot_(atom_hash, get_hash_index(key_)); 286 | 287 | if (node) { 288 | 289 | int replace_val = -1; 290 | int expected_val = 0; 291 | 292 | while (!__sync_bool_compare_and_swap( 293 | &buffer->reading_counter 294 | , expected_val 295 | , replace_val)); 296 | 297 | found = buffer->val; 298 | 299 | if (buffer->key && strncmp(key_, buffer->key, keyLen) == 0) { 300 | node->val = value; 301 | node->key = __atomic_hash_malloc_fn(size_of_key_with_padding); 302 | memcpy(node->key, key_, size_of_key_with_padding); 303 | node->key[keyLen] = 0; 304 | // node->key_len = buffer->key_len; 305 | __atomic_hash_release_slot_(atom_hash, buffer); // release the old slot 306 | } else { 307 | // if someone has replace this slot, release back the 308 | __atomic_hash_release_slot_(atom_hash, node); 309 | found = NULL; 310 | } 311 | __atomic_add_fetch(&buffer->reading_counter, 1, __ATOMIC_RELEASE ); 312 | } 313 | 314 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 315 | return found; 316 | } 317 | 318 | void* 319 | __atomic_hash_pop(__atomic_hash *atom_hash, HashKey key_) { 320 | void * return_ = NULL; 321 | size_t keyLen = strlen(key_); 322 | size_t total_size = atom_hash->total_size, i; 323 | 324 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 325 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 326 | } 327 | 328 | struct __atomic_node_s *buffer = atom_hash->node_buf + (get_hash_index(key_)); 329 | 330 | for (i = 0; i < total_size ; i++) { 331 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 332 | buffer->key && strncmp(key_, buffer->key, keyLen) == 0) 333 | goto SUCCESS; 334 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 335 | buffer = buffer->next; 336 | } 337 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 338 | return return_; 339 | SUCCESS: 340 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 341 | 342 | int replace_val = -1; 343 | int expected_val = 0; 344 | 345 | // means spinning lock for reading counter 346 | while (!__sync_bool_compare_and_swap(&buffer->reading_counter 347 | , expected_val 348 | , replace_val)); 349 | 350 | return_ = buffer->val; 351 | 352 | if (buffer->key && 353 | strncmp(key_, buffer->key, keyLen) == 0 && 354 | __atomic_hash_release_slot_(atom_hash, buffer)) { 355 | __atomic_fetch_sub(&atom_hash->size, 1, __ATOMIC_RELAXED); 356 | } else { 357 | return_ = NULL; 358 | } 359 | __atomic_add_fetch(&buffer->reading_counter, 1, __ATOMIC_RELEASE ); 360 | 361 | // release access back 362 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 363 | 364 | return return_; 365 | } 366 | 367 | /** get is getting the reference pointer from the hashtable, it is not good for multi concurrent write 368 | while other thread is free the field value, should use read **/ 369 | void* 370 | __atomic_hash_get(__atomic_hash *atom_hash, HashKey key_) { 371 | void * return_ = NULL; 372 | size_t keyLen = strlen(key_); 373 | size_t total_size = atom_hash->total_size, i; 374 | 375 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 376 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 377 | } 378 | 379 | struct __atomic_node_s *buffer = atom_hash->node_buf + (get_hash_index(key_)); 380 | 381 | for (i = 0; i < total_size ; i++) { 382 | // Use even number to prevent conflict issue with pop, even number means popable 383 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 384 | buffer->key && strncmp(key_, buffer->key, keyLen) == 0) { 385 | // printf("loop index %zu\n", i ); 386 | goto SUCCESS; 387 | } 388 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 389 | buffer = buffer->next; 390 | } 391 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 392 | return return_; 393 | SUCCESS: 394 | return_ = buffer->val; 395 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 396 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 397 | 398 | return return_; 399 | } 400 | 401 | void* 402 | __atomic_hash_read(__atomic_hash *atom_hash, HashKey key_) { 403 | void * return_ = NULL; 404 | size_t keyLen = strlen(key_); 405 | size_t total_size = atom_hash->total_size, i; 406 | 407 | while (__atomic_fetch_add(&atom_hash->accessing_counter, 2, __ATOMIC_ACQUIRE) % 2 != 0 ) { 408 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); 409 | } 410 | 411 | struct __atomic_node_s *buffer = atom_hash->node_buf + (get_hash_index(key_)); 412 | 413 | for (i = 0; i < total_size ; i++) { 414 | // Use even number to prevent conflict issue with pop, even number means popable 415 | if (__atomic_fetch_add(&buffer->reading_counter, 2, __ATOMIC_ACQUIRE) % 2 == 0 && 416 | buffer->key && strncmp(key_, buffer->key, keyLen) == 0) { 417 | // printf("loop index %zu\n", i ); 418 | goto SUCCESS; 419 | } 420 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 421 | buffer = buffer->next; 422 | } 423 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 424 | return return_; 425 | SUCCESS: 426 | return_ = atom_hash->read_node_fn(buffer->val); 427 | __atomic_fetch_sub(&buffer->reading_counter, 2, __ATOMIC_RELEASE); // release read back 428 | __atomic_fetch_sub(&atom_hash->accessing_counter, 2, __ATOMIC_RELEASE); // release access back 429 | 430 | return return_; 431 | } 432 | 433 | void 434 | __atomic_hash_destroy(__atomic_hash *atom_hash) { 435 | int replace_val = -1; 436 | int expected_val = 0; 437 | size_t total_size = atom_hash->total_size, i; 438 | 439 | 440 | // means spinning lock for reading counter 441 | while (!__sync_bool_compare_and_swap(&atom_hash->accessing_counter 442 | , expected_val 443 | , replace_val)); 444 | 445 | // this is loop from start 446 | struct __atomic_node_s *buffer = atom_hash->node_buf; 447 | 448 | 449 | for (i = 0; i < total_size ; i++) { 450 | if (buffer->key) { 451 | __atomic_hash_free_fn(buffer->key); 452 | buffer->key = 0; 453 | 454 | if (atom_hash->free_node_fn) 455 | atom_hash->free_node_fn(buffer->val); 456 | // else __atomic_hash_free_fn(buffer->val); 457 | } 458 | buffer = buffer->next; 459 | } 460 | 461 | __atomic_hash_free_fn(atom_hash->node_buf); 462 | 463 | __atomic_hash_free_fn(atom_hash); 464 | 465 | atom_hash = 0; 466 | } 467 | 468 | // Logic Derived from djb2 by Dan Bernstein. 469 | static unsigned long 470 | hash(unsigned char *str) 471 | { 472 | unsigned long hash = 5381; 473 | int c; 474 | 475 | while ((c = *str++)) 476 | hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ 477 | 478 | return hash; 479 | } 480 | --------------------------------------------------------------------------------