├── .travis.yml ├── README.md ├── c_src ├── nif.c ├── queue.h ├── tree.h └── uthash.h ├── rebar.config └── src ├── e2qc.app.src ├── e2qc.erl └── e2qc_nif.erl /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | otp_release: 3 | - R16B03-1 4 | - R16B02 5 | - R16B01 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Erlang 2Q NIF cache. 2 | 3 | This is an implementation of the 2Q Cache Management algorithm (http://www.inf.fu-berlin.de/lehre/WS10/DBS-Tech/Reader/2QBufferManagement.pdf) as an Erlang NIF. 4 | 5 | 2Q is a refinement of the classic LRU (Least-Recently-Used) cache management algorithm that achieves better hit rates with many common workloads -- especially those that benefit from rejection of sequential scans. In the worst case, it performs no worse than plain LRU and still retains much of its simplicity. 6 | 7 | This implementation's primary goals are: 8 | * a *very* simple to use API, easy to integrate into your project 9 | * high performance when having high hit rate (ie, hits being fast is preferred over misses being fast) 10 | 11 | Cache hits can be zero-copy (using resource binaries) and the updates to the cache structure are deferred to a background thread to avoid blocking the Erlang VM. Benchmarks welcome! 12 | 13 | ## How to add e2qc to your project 14 | 15 | 1. Add e2qc as a `rebar.config` dependency in your project: 16 | 17 | ``` 18 | {deps, [ 19 | {e2qc, ".*", {git, "git://github.com/arekinath/e2qc.git", "HEAD"}} 20 | ]} 21 | ``` 22 | 23 | 2. Use it! Wrap your slow processing that you want to cache in a call to `e2qc:cache`: 24 | 25 | ``` 26 | some_function(Input) -> 27 | do_slow_thing(Input). 28 | 29 | becomes 30 | 31 | some_function(Input) -> 32 | e2qc:cache(slow_thing, Input, fun() -> 33 | do_slow_thing(Input) 34 | end). 35 | ``` 36 | 37 | It's really that simple. Each "cache" is named by a unique atom (in this case we've used a cache called `slow_thing`). You don't need to explicitly create or configure the cache before using it -- it will be created on the first use. The default configuration will cache up to 4MB of data. 38 | 39 | You can use the same cache from any number of Erlang processes at once on the same node (and it will be just one shared cache). No passing around handles or pids for the cache, and no extra setup required. 40 | 41 | ## Changing settings 42 | 43 | If you want to adjust the `size` of the cache, or set a different Q1 `ratio` (see the paper on the 2Q algorithm for details; the default is 0.3 or 30%), use the `e2qc:setup` function: 44 | 45 | ok = e2qc:setup(slow_thing, [{size, 16*1024*1024}, {ratio, 0.4}]). 46 | 47 | Put this in your startup procedure somewhere and it will configure the `slow_thing` cache with a 16MB size instead of the default 4MB, and a Q1 ratio of 0.4 (the value for the target size of Q1 during eviction will be 6.4MB). 48 | 49 | Currently, if you make a call to `e2qc:setup/2` with a size that is smaller than the default, the call has to happen before the cache is used for the first time (otherwise it will throw an error). Fixing this is an open TODO. 50 | 51 | ## Using timed eviction (expiry) 52 | 53 | There is also support now for timed eviction or expiry of entries. This is also easy to use -- just call `e2qc:cache/4` instead of `e2qc:cache/3`: 54 | 55 | some_function(Input) -> 56 | e2qc:cache(slow_thing, Input, 30, fun() -> 57 | do_slow_thing(Input) 58 | end). 59 | 60 | This will keep the cache entries for a maximum of 30 seconds and then automatically evict them. The argument is in seconds and not milliseconds (like most other Erlang timers), because to keep the overhead of expiry to a minimum, e2qc sacrifices precision on this parameter. If you need high-precision expiry times, you should use a normal timer and a call to `e2qc:evict/2`. 61 | 62 | ## Statistics 63 | 64 | The `e2qc:stats/1` function is useful if you want to know how your cache is doing: 65 | 66 | => e2qc:stats(slow_thing). 67 | [{hits,5674},{misses,11},{q1size,280},{q2size,735}] 68 | 69 | ## Deleting or deliberately expiring entries 70 | 71 | If you know that an entry in the cache is stale or needs to be evicted, you can use the `e2qc:evict/2` function to clear it out: 72 | 73 | e2qc:evict(slow_thing, OldInput) 74 | 75 | Now the next attempt to look for `OldInput` will miss and be re-calculated. 76 | 77 | You can also destroy an entire cache if you wish, using `e2qc:teardown/1`. This will destroy the cache and all of its entries entirely (but note that if another call attempts to use it afterwards, it will be re-created implicitly with default settings). 78 | 79 | ## TODO 80 | 81 | * Shrinking with `e2qc:setup/2` after cache has already started 82 | -------------------------------------------------------------------------------- /c_src/nif.c: -------------------------------------------------------------------------------- 1 | /* 2 | %% 3 | %% e2qc erlang cache 4 | %% 5 | %% Copyright 2014 Alex Wilson , The University of Queensland 6 | %% All rights reserved. 7 | %% 8 | %% Redistribution and use in source and binary forms, with or without 9 | %% modification, are permitted provided that the following conditions 10 | %% are met: 11 | %% 1. Redistributions of source code must retain the above copyright 12 | %% notice, this list of conditions and the following disclaimer. 13 | %% 2. Redistributions in binary form must reproduce the above copyright 14 | %% notice, this list of conditions and the following disclaimer in the 15 | %% documentation and/or other materials provided with the distribution. 16 | %% 17 | %% THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 | %% IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 | %% OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 | %% IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 | %% INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 | %% NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 | %% DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 | %% THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 | %% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 | %% THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 | */ 28 | 29 | #include 30 | #include 31 | #include 32 | #include 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | #include 39 | 40 | #include "queue.h" 41 | #include "tree.h" 42 | 43 | /* use paul hsieh's hash function */ 44 | #define HASH_FUNCTION HASH_SFH 45 | #include "uthash.h" 46 | 47 | #include "erl_nif.h" 48 | 49 | /* 50 | * 2q cache made using TAILQs 51 | * uthash table for lookup of key -> queue node 52 | * keys and values are binaries 53 | * values are in TAILQ node (they are resource binaries) 54 | uthash nodes only have pointer to TAILQ node 55 | 56 | * global RB tree to find a cache (2q+hash) by atom name 57 | * background thread per cache to handle promotion and eviction 58 | * per-cache "promotion increment queue" that gets handled by bg thread 59 | * always do promotion before eviction 60 | * waitcond to wake up the bg thread upon insertion to queue or cache 61 | 62 | * can either explicitly create a cache with its atom name and settings 63 | or implicitly create on first use (gets settings from application:get_env 64 | or falls back to hard-coded defaults) 65 | * config: max total size (excl. overheads) default 8M 66 | 2q fill ratio default 1:1 67 | 68 | * rwlock around the global RB tree 69 | * rwlock on each cache covering hash + queues 70 | * mutex on each cache's promotion increment queue 71 | */ 72 | 73 | struct cache_node { 74 | TAILQ_ENTRY(cache_node) entry; 75 | RB_ENTRY(cache_node) expiry_entry; 76 | UT_hash_handle hh; 77 | char *key; /* key buffer, from enif_alloc */ 78 | char *val; /* value buffer, from enif_alloc_resource */ 79 | int size; /* total size (bytes) = vsize + ksize */ 80 | int vsize; /* size of value in bytes */ 81 | int ksize; /* size of key in bytes */ 82 | struct timespec expiry; /* expiry time */ 83 | struct cache *c; /* the cache we belong to */ 84 | struct cache_queue *q; /* the cache_queue we are currently on */ 85 | }; 86 | 87 | /* deferred promotion operations are queued up on the "incr_queue" of the cache 88 | this is a node on that queue */ 89 | struct cache_incr_node { 90 | TAILQ_ENTRY(cache_incr_node) entry; 91 | struct cache_node *node; 92 | }; 93 | 94 | struct cache_queue { 95 | TAILQ_HEAD(cache_q, cache_node) head; 96 | ErlNifUInt64 size; /* sum of node->size for all nodes in the queue */ 97 | }; 98 | 99 | #define FL_DYING 1 100 | 101 | struct atom_node; 102 | 103 | #define N_INCR_BKT 8 104 | 105 | /* lock ordering: cache_lock then lookup_lock then ctrl_lock */ 106 | struct cache { 107 | ErlNifUInt64 max_size; /* these are only set at construction */ 108 | ErlNifUInt64 min_q1_size; 109 | struct atom_node *atom_node; 110 | 111 | ErlNifUInt64 hit; /* protected by ctrl_lock */ 112 | ErlNifUInt64 miss; 113 | ErlNifUInt64 wakeups, dud_wakeups; 114 | int flags; 115 | 116 | TAILQ_HEAD(cache_incr_q, cache_incr_node) incr_head[N_INCR_BKT]; 117 | ErlNifMutex *incr_lock[N_INCR_BKT]; 118 | 119 | int incr_count; 120 | ErlNifMutex *ctrl_lock; 121 | ErlNifCond *check_cond; 122 | ErlNifTid bg_thread; 123 | 124 | struct cache_queue q1; /* protected by cache_lock */ 125 | struct cache_queue q2; 126 | RB_HEAD(expiry_tree, cache_node) expiry_head; 127 | ErlNifRWLock *cache_lock; 128 | 129 | struct cache_node *lookup; /* a uthash, protected by lookup_lock */ 130 | ErlNifRWLock *lookup_lock; 131 | }; 132 | 133 | /* a node in the RB tree of atom -> struct cache */ 134 | struct atom_node { 135 | RB_ENTRY(atom_node) entry; 136 | ERL_NIF_TERM atom; /* inside atom_env */ 137 | struct cache *cache; 138 | }; 139 | 140 | struct nif_globals { 141 | RB_HEAD(atom_tree, atom_node) atom_head; 142 | int atom_count; 143 | ErlNifRWLock *atom_lock; 144 | ErlNifEnv *atom_env; 145 | }; 146 | 147 | /* the resource type used for struct cache_node -> val */ 148 | static ErlNifResourceType *value_type; 149 | 150 | static struct nif_globals *gbl; 151 | 152 | /* comparison operator for the atom -> cache RB tree */ 153 | static int 154 | atom_tree_cmp(struct atom_node *a1, struct atom_node *a2) 155 | { 156 | return enif_compare(a1->atom, a2->atom); 157 | } 158 | 159 | RB_GENERATE(atom_tree, atom_node, entry, atom_tree_cmp); 160 | 161 | static int 162 | expiry_tree_cmp(struct cache_node *n1, struct cache_node *n2) 163 | { 164 | if (n1->expiry.tv_sec < n2->expiry.tv_sec) 165 | return -1; 166 | if (n1->expiry.tv_sec > n2->expiry.tv_sec) 167 | return 1; 168 | if (n1->expiry.tv_nsec < n2->expiry.tv_nsec) 169 | return -1; 170 | if (n1->expiry.tv_nsec > n2->expiry.tv_nsec) 171 | return 1; 172 | return 0; 173 | } 174 | 175 | RB_GENERATE(expiry_tree, cache_node, expiry_entry, expiry_tree_cmp); 176 | 177 | /* platform wrapper around clock_gettime (even though it's POSIX, some 178 | people, cough OSX cough, don't implement it) */ 179 | #if defined(__MACH__) 180 | # include 181 | # include 182 | #endif 183 | 184 | void 185 | clock_now(struct timespec *ts) 186 | { 187 | #if defined(__MACH__) 188 | /* this is not quite monotonic time, but hopefully it's good enough */ 189 | clock_serv_t cclock; 190 | mach_timespec_t mts; 191 | host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); 192 | clock_get_time(cclock, &mts); 193 | mach_port_deallocate(mach_task_self(), cclock); 194 | ts->tv_sec = mts.tv_sec; 195 | ts->tv_nsec = mts.tv_nsec; 196 | #else 197 | clock_gettime(CLOCK_MONOTONIC, ts); 198 | #endif 199 | } 200 | 201 | /* to call this you must have all of the caches locks held 202 | (cache_lock, lookup_lock and ctrl_lock)! */ 203 | static void 204 | destroy_cache_node(struct cache_node *n) 205 | { 206 | struct cache_incr_node *in, *nextin; 207 | int i; 208 | 209 | TAILQ_REMOVE(&(n->q->head), n, entry); 210 | n->q->size -= n->size; 211 | n->q = NULL; 212 | HASH_DEL(n->c->lookup, n); 213 | if (n->expiry.tv_sec != 0) 214 | RB_REMOVE(expiry_tree, &(n->c->expiry_head), n); 215 | 216 | for (i = 0; i < N_INCR_BKT; ++i) { 217 | enif_mutex_lock(n->c->incr_lock[i]); 218 | nextin = TAILQ_FIRST(&(n->c->incr_head[i])); 219 | while ((in = nextin)) { 220 | nextin = TAILQ_NEXT(in, entry); 221 | if (in->node == n) { 222 | TAILQ_REMOVE(&(n->c->incr_head[i]), in, entry); 223 | __sync_sub_and_fetch(&(n->c->incr_count), 1); 224 | in->node = 0; 225 | enif_free(in); 226 | } 227 | } 228 | enif_mutex_unlock(n->c->incr_lock[i]); 229 | } 230 | 231 | n->c = NULL; 232 | enif_free(n->key); 233 | n->key = NULL; 234 | enif_release_resource(n->val); 235 | n->val = NULL; 236 | enif_free(n); 237 | } 238 | 239 | static void * 240 | cache_bg_thread(void *arg) 241 | { 242 | struct cache *c = (struct cache *)arg; 243 | int i, dud; 244 | 245 | while (1) { 246 | enif_mutex_lock(c->ctrl_lock); 247 | 248 | /* if we've been told to die, quit this loop and start cleaning up */ 249 | if (c->flags & FL_DYING) { 250 | enif_mutex_unlock(c->ctrl_lock); 251 | break; 252 | } 253 | 254 | /* sleep until there is work to do */ 255 | enif_cond_wait(c->check_cond, c->ctrl_lock); 256 | 257 | __sync_add_and_fetch(&(c->wakeups), 1); 258 | dud = 1; 259 | 260 | /* we have to let go of ctrl_lock so we can take cache_lock then 261 | ctrl_lock again to get them back in the right order */ 262 | enif_mutex_unlock(c->ctrl_lock); 263 | enif_rwlock_rwlock(c->cache_lock); 264 | enif_mutex_lock(c->ctrl_lock); 265 | 266 | /* first process the promotion queue before we do any evicting */ 267 | for (i = 0; i < N_INCR_BKT; ++i) { 268 | enif_mutex_lock(c->incr_lock[i]); 269 | while (!TAILQ_EMPTY(&(c->incr_head[i]))) { 270 | struct cache_incr_node *n; 271 | n = TAILQ_FIRST(&(c->incr_head[i])); 272 | TAILQ_REMOVE(&(c->incr_head[i]), n, entry); 273 | __sync_sub_and_fetch(&(c->incr_count), 1); 274 | 275 | dud = 0; 276 | 277 | /* let go of the ctrl_lock here, we don't need it when we aren't looking 278 | at the incr_queue, and this way other threads can use it while we shuffle 279 | queue nodes around */ 280 | enif_mutex_unlock(c->incr_lock[i]); 281 | enif_mutex_unlock(c->ctrl_lock); 282 | 283 | if (n->node->q == &(c->q1)) { 284 | TAILQ_REMOVE(&(c->q1.head), n->node, entry); 285 | c->q1.size -= n->node->size; 286 | TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); 287 | n->node->q = &(c->q2); 288 | c->q2.size += n->node->size; 289 | 290 | } else if (n->node->q == &(c->q2)) { 291 | TAILQ_REMOVE(&(c->q2.head), n->node, entry); 292 | TAILQ_INSERT_HEAD(&(c->q2.head), n->node, entry); 293 | } 294 | 295 | enif_free(n); 296 | 297 | /* take the ctrl_lock back again for the next loop around */ 298 | enif_mutex_lock(c->ctrl_lock); 299 | enif_mutex_lock(c->incr_lock[i]); 300 | } 301 | enif_mutex_unlock(c->incr_lock[i]); 302 | } 303 | 304 | /* let go of the ctrl_lock here for two reasons: 305 | 1. avoid lock inversion, because if we have evictions to do we 306 | will need to take lookup_lock, and we must take lookup_lock 307 | before taking ctrl_lock 308 | 2. if we don't need to do evictions, we're done with the structures 309 | that are behind ctrl_lock so we should give it up for others */ 310 | enif_mutex_unlock(c->ctrl_lock); 311 | 312 | /* do timed evictions -- if anything has expired, nuke it */ 313 | { 314 | struct cache_node *n; 315 | if ((n = RB_MIN(expiry_tree, &(c->expiry_head)))) { 316 | struct timespec now; 317 | clock_now(&now); 318 | while (n && n->expiry.tv_sec < now.tv_sec) { 319 | enif_mutex_lock(c->ctrl_lock); 320 | dud = 0; 321 | destroy_cache_node(n); 322 | enif_mutex_unlock(c->ctrl_lock); 323 | n = RB_MIN(expiry_tree, &(c->expiry_head)); 324 | } 325 | } 326 | } 327 | 328 | /* now check if we need to do ordinary size limit evictions */ 329 | if (c->q1.size + c->q2.size > c->max_size) { 330 | enif_rwlock_rwlock(c->lookup_lock); 331 | enif_mutex_lock(c->ctrl_lock); 332 | 333 | while ((c->q1.size + c->q2.size > c->max_size) && 334 | (c->q1.size > c->min_q1_size)) { 335 | struct cache_node *n; 336 | n = TAILQ_LAST(&(c->q1.head), cache_q); 337 | destroy_cache_node(n); 338 | } 339 | 340 | while (c->q1.size + c->q2.size > c->max_size) { 341 | struct cache_node *n; 342 | n = TAILQ_LAST(&(c->q2.head), cache_q); 343 | destroy_cache_node(n); 344 | } 345 | 346 | dud = 0; 347 | 348 | enif_mutex_unlock(c->ctrl_lock); 349 | enif_rwlock_rwunlock(c->lookup_lock); 350 | } 351 | 352 | if (dud) 353 | __sync_add_and_fetch(&(c->dud_wakeups), 1); 354 | /* now let go of the cache_lock that we took right back at the start of 355 | this iteration */ 356 | enif_rwlock_rwunlock(c->cache_lock); 357 | } 358 | 359 | /* first remove us from the atom_tree, so we get no new operations coming in */ 360 | enif_rwlock_rwlock(gbl->atom_lock); 361 | RB_REMOVE(atom_tree, &(gbl->atom_head), c->atom_node); 362 | enif_rwlock_rwunlock(gbl->atom_lock); 363 | enif_free(c->atom_node); 364 | 365 | /* now take all of our locks, to make sure any pending operations are done */ 366 | enif_rwlock_rwlock(c->cache_lock); 367 | enif_rwlock_rwlock(c->lookup_lock); 368 | enif_mutex_lock(c->ctrl_lock); 369 | 370 | c->atom_node = NULL; 371 | 372 | /* free the actual cache queues */ 373 | { 374 | struct cache_node *n, *nextn; 375 | nextn = TAILQ_FIRST(&(c->q1.head)); 376 | while ((n = nextn)) { 377 | nextn = TAILQ_NEXT(n, entry); 378 | destroy_cache_node(n); 379 | } 380 | nextn = TAILQ_FIRST(&(c->q2.head)); 381 | while ((n = nextn)) { 382 | nextn = TAILQ_NEXT(n, entry); 383 | destroy_cache_node(n); 384 | } 385 | } 386 | 387 | for (i = 0; i < N_INCR_BKT; ++i) 388 | enif_mutex_lock(c->incr_lock[i]); 389 | 390 | /* free the incr_queue */ 391 | for (i = 0; i < N_INCR_BKT; ++i) { 392 | struct cache_incr_node *in, *nextin; 393 | nextin = TAILQ_FIRST(&(c->incr_head[i])); 394 | while ((in = nextin)) { 395 | nextin = TAILQ_NEXT(in, entry); 396 | TAILQ_REMOVE(&(c->incr_head[i]), in, entry); 397 | in->node = 0; 398 | enif_free(in); 399 | } 400 | enif_mutex_unlock(c->incr_lock[i]); 401 | enif_mutex_destroy(c->incr_lock[i]); 402 | } 403 | 404 | /* unlock and destroy! */ 405 | enif_cond_destroy(c->check_cond); 406 | 407 | enif_mutex_unlock(c->ctrl_lock); 408 | enif_mutex_destroy(c->ctrl_lock); 409 | 410 | enif_rwlock_rwunlock(c->lookup_lock); 411 | enif_rwlock_destroy(c->lookup_lock); 412 | 413 | enif_rwlock_rwunlock(c->cache_lock); 414 | enif_rwlock_destroy(c->cache_lock); 415 | 416 | enif_free(c); 417 | 418 | return 0; 419 | } 420 | 421 | static struct cache * 422 | get_cache(ERL_NIF_TERM atom) 423 | { 424 | struct atom_node n, *res; 425 | struct cache *ret = NULL; 426 | 427 | memset(&n, 0, sizeof(n)); 428 | n.atom = atom; 429 | 430 | enif_rwlock_rlock(gbl->atom_lock); 431 | res = RB_FIND(atom_tree, &(gbl->atom_head), &n); 432 | if (res) 433 | ret = res->cache; 434 | enif_rwlock_runlock(gbl->atom_lock); 435 | 436 | return ret; 437 | } 438 | 439 | static struct cache * 440 | new_cache(ERL_NIF_TERM atom, int max_size, int min_q1_size) 441 | { 442 | struct cache *c; 443 | struct atom_node *an; 444 | int i; 445 | 446 | c = enif_alloc(sizeof(*c)); 447 | memset(c, 0, sizeof(*c)); 448 | c->max_size = max_size; 449 | c->min_q1_size = min_q1_size; 450 | c->lookup_lock = enif_rwlock_create("cache->lookup_lock"); 451 | c->cache_lock = enif_rwlock_create("cache->cache_lock"); 452 | c->ctrl_lock = enif_mutex_create("cache->ctrl_lock"); 453 | c->check_cond = enif_cond_create("cache->check_cond"); 454 | TAILQ_INIT(&(c->q1.head)); 455 | TAILQ_INIT(&(c->q2.head)); 456 | for (i = 0; i < N_INCR_BKT; ++i) { 457 | TAILQ_INIT(&(c->incr_head[i])); 458 | c->incr_lock[i] = enif_mutex_create("cache->incr_lock"); 459 | } 460 | RB_INIT(&(c->expiry_head)); 461 | 462 | an = enif_alloc(sizeof(*an)); 463 | memset(an, 0, sizeof(*an)); 464 | an->atom = enif_make_copy(gbl->atom_env, atom); 465 | an->cache = c; 466 | 467 | c->atom_node = an; 468 | 469 | enif_rwlock_rwlock(gbl->atom_lock); 470 | RB_INSERT(atom_tree, &(gbl->atom_head), an); 471 | /* start the background thread for the cache. after this, the bg thread now 472 | owns the cache and all its data and will free it at exit */ 473 | enif_thread_create("cachethread", &(c->bg_thread), cache_bg_thread, c, NULL); 474 | enif_rwlock_rwunlock(gbl->atom_lock); 475 | 476 | return c; 477 | } 478 | 479 | /* destroy(Cache :: atom()) -- destroys and entire cache 480 | destroy(Cache :: atom(), Key :: binary()) -- removes an entry 481 | from a cache */ 482 | static ERL_NIF_TERM 483 | destroy(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) 484 | { 485 | ERL_NIF_TERM atom; 486 | struct cache *c; 487 | ErlNifBinary kbin; 488 | struct cache_node *n; 489 | 490 | if (!enif_is_atom(env, argv[0])) 491 | return enif_make_badarg(env); 492 | atom = argv[0]; 493 | 494 | if ((c = get_cache(atom))) { 495 | if (argc == 2) { 496 | if (!enif_inspect_binary(env, argv[1], &kbin)) 497 | return enif_make_badarg(env); 498 | 499 | enif_rwlock_rwlock(c->cache_lock); 500 | enif_rwlock_rwlock(c->lookup_lock); 501 | 502 | HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n); 503 | if (!n) { 504 | enif_rwlock_rwunlock(c->lookup_lock); 505 | enif_rwlock_rwunlock(c->cache_lock); 506 | return enif_make_atom(env, "notfound"); 507 | } 508 | 509 | enif_mutex_lock(c->ctrl_lock); 510 | 511 | destroy_cache_node(n); 512 | 513 | enif_mutex_unlock(c->ctrl_lock); 514 | enif_rwlock_rwunlock(c->lookup_lock); 515 | enif_rwlock_rwunlock(c->cache_lock); 516 | 517 | enif_consume_timeslice(env, 50); 518 | 519 | return enif_make_atom(env, "ok"); 520 | 521 | } else { 522 | enif_mutex_lock(c->ctrl_lock); 523 | c->flags |= FL_DYING; 524 | enif_mutex_unlock(c->ctrl_lock); 525 | enif_cond_broadcast(c->check_cond); 526 | 527 | enif_thread_join(c->bg_thread, NULL); 528 | 529 | enif_consume_timeslice(env, 100); 530 | 531 | return enif_make_atom(env, "ok"); 532 | } 533 | 534 | return enif_make_atom(env, "ok"); 535 | } 536 | 537 | return enif_make_atom(env, "notfound"); 538 | } 539 | 540 | /* create(Cache :: atom(), MaxSize :: integer(), MinQ1Size :: integer()) */ 541 | static ERL_NIF_TERM 542 | create(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) 543 | { 544 | ERL_NIF_TERM atom; 545 | ErlNifUInt64 max_size, min_q1_size; 546 | struct cache *c; 547 | 548 | if (!enif_is_atom(env, argv[0])) 549 | return enif_make_badarg(env); 550 | atom = argv[0]; 551 | 552 | if (!enif_get_uint64(env, argv[1], &max_size)) 553 | return enif_make_badarg(env); 554 | if (!enif_get_uint64(env, argv[2], &min_q1_size)) 555 | return enif_make_badarg(env); 556 | 557 | if ((c = get_cache(atom))) { 558 | ERL_NIF_TERM ret = enif_make_atom(env, "already_exists"); 559 | enif_consume_timeslice(env, 5); 560 | 561 | enif_rwlock_rwlock(c->cache_lock); 562 | /* expansion is safe because we don't have to engage the background 563 | thread and won't cause sudden eviction pressure 564 | TODO: a nice way to shrink the cache without seizing it up */ 565 | if (c->max_size < max_size && c->min_q1_size < min_q1_size) { 566 | c->max_size = max_size; 567 | c->min_q1_size = min_q1_size; 568 | enif_rwlock_rwunlock(c->cache_lock); 569 | 570 | ret = enif_make_atom(env, "ok"); 571 | enif_consume_timeslice(env, 10); 572 | } else { 573 | enif_rwlock_rwunlock(c->cache_lock); 574 | } 575 | 576 | return ret; 577 | } else { 578 | c = new_cache(atom, max_size, min_q1_size); 579 | enif_consume_timeslice(env, 20); 580 | return enif_make_atom(env, "ok"); 581 | } 582 | } 583 | 584 | static ERL_NIF_TERM 585 | stats(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) 586 | { 587 | ERL_NIF_TERM atom; 588 | ERL_NIF_TERM ret, q1s, q2s, incrs, wakeups, duds; 589 | struct cache *c; 590 | 591 | if (!enif_is_atom(env, argv[0])) 592 | return enif_make_badarg(env); 593 | atom = argv[0]; 594 | 595 | if ((c = get_cache(atom))) { 596 | enif_rwlock_rlock(c->cache_lock); 597 | q1s = enif_make_uint64(env, c->q1.size); 598 | q2s = enif_make_uint64(env, c->q2.size); 599 | incrs = enif_make_uint64(env, __sync_fetch_and_add(&(c->incr_count), 0)); 600 | wakeups = enif_make_uint64(env, __sync_fetch_and_add(&(c->wakeups), 0)); 601 | duds = enif_make_uint64(env, __sync_fetch_and_add(&(c->dud_wakeups), 0)); 602 | enif_rwlock_runlock(c->cache_lock); 603 | ret = enif_make_tuple7(env, 604 | enif_make_uint64(env, c->hit), 605 | enif_make_uint64(env, c->miss), 606 | q1s, q2s, incrs, wakeups, duds); 607 | enif_consume_timeslice(env, 10); 608 | return ret; 609 | } else { 610 | return enif_make_atom(env, "notfound"); 611 | } 612 | } 613 | 614 | static ERL_NIF_TERM 615 | put(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) 616 | { 617 | ERL_NIF_TERM atom; 618 | ErlNifBinary kbin, vbin; 619 | struct cache *c; 620 | struct cache_node *n, *ng; 621 | ErlNifUInt64 lifetime = 0; 622 | 623 | if (!enif_is_atom(env, argv[0])) 624 | return enif_make_badarg(env); 625 | atom = argv[0]; 626 | 627 | if (!enif_inspect_binary(env, argv[1], &kbin)) 628 | return enif_make_badarg(env); 629 | if (!enif_inspect_binary(env, argv[2], &vbin)) 630 | return enif_make_badarg(env); 631 | 632 | if ((c = get_cache(atom))) { 633 | enif_consume_timeslice(env, 1); 634 | 635 | } else { 636 | /* if we've been asked to put() in to a cache that doesn't exist yet 637 | then we should create it! */ 638 | ErlNifUInt64 max_size, min_q1_size; 639 | if (!enif_get_uint64(env, argv[3], &max_size)) 640 | return enif_make_badarg(env); 641 | if (!enif_get_uint64(env, argv[4], &min_q1_size)) 642 | return enif_make_badarg(env); 643 | c = new_cache(atom, max_size, min_q1_size); 644 | enif_consume_timeslice(env, 20); 645 | } 646 | 647 | if (argc > 5) 648 | if (!enif_get_uint64(env, argv[5], &lifetime)) 649 | return enif_make_badarg(env); 650 | 651 | n = enif_alloc(sizeof(*n)); 652 | memset(n, 0, sizeof(*n)); 653 | n->c = c; 654 | n->vsize = vbin.size; 655 | n->ksize = kbin.size; 656 | n->size = vbin.size + kbin.size; 657 | n->key = enif_alloc(kbin.size); 658 | memcpy(n->key, kbin.data, kbin.size); 659 | n->val = enif_alloc_resource(value_type, vbin.size); 660 | memcpy(n->val, vbin.data, vbin.size); 661 | n->q = &(c->q1); 662 | if (lifetime) { 663 | clock_now(&(n->expiry)); 664 | n->expiry.tv_sec += lifetime; 665 | } 666 | 667 | enif_rwlock_rwlock(c->cache_lock); 668 | enif_rwlock_rwlock(c->lookup_lock); 669 | HASH_FIND(hh, c->lookup, kbin.data, kbin.size, ng); 670 | if (ng) { 671 | enif_mutex_lock(c->ctrl_lock); 672 | destroy_cache_node(ng); 673 | enif_mutex_unlock(c->ctrl_lock); 674 | } 675 | TAILQ_INSERT_HEAD(&(c->q1.head), n, entry); 676 | c->q1.size += n->size; 677 | HASH_ADD_KEYPTR(hh, c->lookup, n->key, n->ksize, n); 678 | if (lifetime) { 679 | struct cache_node *rn; 680 | rn = RB_INSERT(expiry_tree, &(c->expiry_head), n); 681 | /* it's possible to get two timestamps that are the same, if this happens 682 | just bump us forwards by 1 usec until we're unique */ 683 | while (rn != NULL) { 684 | ++(n->expiry.tv_nsec); 685 | rn = RB_INSERT(expiry_tree, &(c->expiry_head), n); 686 | } 687 | } 688 | enif_rwlock_rwunlock(c->lookup_lock); 689 | enif_rwlock_rwunlock(c->cache_lock); 690 | 691 | enif_cond_broadcast(c->check_cond); 692 | enif_consume_timeslice(env, 50); 693 | 694 | return enif_make_atom(env, "ok"); 695 | } 696 | 697 | static ERL_NIF_TERM 698 | get(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[]) 699 | { 700 | ERL_NIF_TERM atom; 701 | ErlNifBinary kbin; 702 | struct cache *c; 703 | struct cache_node *n; 704 | struct cache_incr_node *in; 705 | struct timespec now; 706 | int incrqs, hashv, bkt; 707 | ERL_NIF_TERM ret; 708 | ErlNifTid tid; 709 | 710 | if (!enif_is_atom(env, argv[0])) 711 | return enif_make_badarg(env); 712 | atom = argv[0]; 713 | 714 | if (!enif_inspect_binary(env, argv[1], &kbin)) 715 | return enif_make_badarg(env); 716 | 717 | if ((c = get_cache(atom))) { 718 | enif_rwlock_rlock(c->lookup_lock); 719 | HASH_FIND(hh, c->lookup, kbin.data, kbin.size, n); 720 | if (!n) { 721 | enif_rwlock_runlock(c->lookup_lock); 722 | __sync_add_and_fetch(&c->miss, 1); 723 | enif_consume_timeslice(env, 10); 724 | return enif_make_atom(env, "notfound"); 725 | } 726 | 727 | if (n->expiry.tv_sec != 0) { 728 | clock_now(&now); 729 | if (n->expiry.tv_sec < now.tv_sec) { 730 | enif_rwlock_runlock(c->lookup_lock); 731 | __sync_add_and_fetch(&c->miss, 1); 732 | enif_consume_timeslice(env, 10); 733 | return enif_make_atom(env, "notfound"); 734 | } 735 | } 736 | 737 | in = enif_alloc(sizeof(*in)); 738 | memset(in, 0, sizeof(*in)); 739 | in->node = n; 740 | __sync_add_and_fetch(&c->hit, 1); 741 | 742 | tid = enif_thread_self(); 743 | HASH_SFH(&tid, sizeof(ErlNifTid), N_INCR_BKT, hashv, bkt); 744 | enif_mutex_lock(c->incr_lock[bkt]); 745 | TAILQ_INSERT_TAIL(&(c->incr_head[bkt]), in, entry); 746 | enif_mutex_unlock(c->incr_lock[bkt]); 747 | incrqs = __sync_add_and_fetch(&(c->incr_count), 1); 748 | 749 | ret = enif_make_resource_binary(env, n->val, n->val, n->vsize); 750 | enif_rwlock_runlock(c->lookup_lock); 751 | 752 | if (incrqs > 1024) 753 | enif_cond_broadcast(c->check_cond); 754 | 755 | enif_consume_timeslice(env, 20); 756 | 757 | return ret; 758 | 759 | } 760 | 761 | return enif_make_atom(env, "notfound"); 762 | } 763 | 764 | static int 765 | load_cb(ErlNifEnv *env, void **priv_data, ERL_NIF_TERM load_info) 766 | { 767 | ErlNifResourceFlags tried; 768 | 769 | gbl = enif_alloc(sizeof(*gbl)); 770 | memset(gbl, 0, sizeof(*gbl)); 771 | RB_INIT(&(gbl->atom_head)); 772 | gbl->atom_lock = enif_rwlock_create("gbl->atom_lock"); 773 | gbl->atom_env = enif_alloc_env(); 774 | 775 | value_type = enif_open_resource_type(env, NULL, "value", NULL, 776 | ERL_NIF_RT_CREATE | ERL_NIF_RT_TAKEOVER, &tried); 777 | 778 | return 0; 779 | } 780 | 781 | static void 782 | unload_cb(ErlNifEnv *env, void *priv_data) 783 | { 784 | struct atom_node *an; 785 | 786 | enif_rwlock_rwlock(gbl->atom_lock); 787 | 788 | /* when we unload, we want to tell all of the active caches to die, 789 | then join() their bg_threads to wait until they're completely gone */ 790 | while ((an = RB_MIN(atom_tree, &(gbl->atom_head)))) { 791 | struct cache *c = an->cache; 792 | enif_rwlock_rwunlock(gbl->atom_lock); 793 | 794 | enif_mutex_lock(c->ctrl_lock); 795 | c->flags |= FL_DYING; 796 | enif_mutex_unlock(c->ctrl_lock); 797 | enif_cond_broadcast(c->check_cond); 798 | 799 | enif_thread_join(c->bg_thread, NULL); 800 | 801 | enif_rwlock_rwlock(gbl->atom_lock); 802 | } 803 | 804 | enif_rwlock_rwunlock(gbl->atom_lock); 805 | enif_rwlock_destroy(gbl->atom_lock); 806 | enif_clear_env(gbl->atom_env); 807 | enif_free(gbl); 808 | 809 | gbl = NULL; 810 | } 811 | 812 | static ErlNifFunc nif_funcs[] = 813 | { 814 | {"get", 2, get}, 815 | {"put", 5, put}, 816 | {"put", 6, put}, 817 | {"create", 3, create}, 818 | {"destroy", 1, destroy}, 819 | {"destroy", 2, destroy}, 820 | {"stats", 1, stats} 821 | }; 822 | 823 | ERL_NIF_INIT(e2qc_nif, nif_funcs, load_cb, NULL, NULL, unload_cb) 824 | -------------------------------------------------------------------------------- /c_src/queue.h: -------------------------------------------------------------------------------- 1 | /* $OpenBSD: queue.h,v 1.36 2012/04/11 13:29:14 naddy Exp $ */ 2 | /* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */ 3 | 4 | /* 5 | * Copyright (c) 1991, 1993 6 | * The Regents of the University of California. All rights reserved. 7 | * 8 | * Redistribution and use in source and binary forms, with or without 9 | * modification, are permitted provided that the following conditions 10 | * are met: 11 | * 1. Redistributions of source code must retain the above copyright 12 | * notice, this list of conditions and the following disclaimer. 13 | * 2. Redistributions in binary form must reproduce the above copyright 14 | * notice, this list of conditions and the following disclaimer in the 15 | * documentation and/or other materials provided with the distribution. 16 | * 3. Neither the name of the University nor the names of its contributors 17 | * may be used to endorse or promote products derived from this software 18 | * without specific prior written permission. 19 | * 20 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 | * SUCH DAMAGE. 31 | * 32 | * @(#)queue.h 8.5 (Berkeley) 8/20/94 33 | */ 34 | 35 | #ifndef _SYS_QUEUE_H_ 36 | #define _SYS_QUEUE_H_ 37 | 38 | /* 39 | * This file defines five types of data structures: singly-linked lists, 40 | * lists, simple queues, tail queues, and circular queues. 41 | * 42 | * 43 | * A singly-linked list is headed by a single forward pointer. The elements 44 | * are singly linked for minimum space and pointer manipulation overhead at 45 | * the expense of O(n) removal for arbitrary elements. New elements can be 46 | * added to the list after an existing element or at the head of the list. 47 | * Elements being removed from the head of the list should use the explicit 48 | * macro for this purpose for optimum efficiency. A singly-linked list may 49 | * only be traversed in the forward direction. Singly-linked lists are ideal 50 | * for applications with large datasets and few or no removals or for 51 | * implementing a LIFO queue. 52 | * 53 | * A list is headed by a single forward pointer (or an array of forward 54 | * pointers for a hash table header). The elements are doubly linked 55 | * so that an arbitrary element can be removed without a need to 56 | * traverse the list. New elements can be added to the list before 57 | * or after an existing element or at the head of the list. A list 58 | * may only be traversed in the forward direction. 59 | * 60 | * A simple queue is headed by a pair of pointers, one the head of the 61 | * list and the other to the tail of the list. The elements are singly 62 | * linked to save space, so elements can only be removed from the 63 | * head of the list. New elements can be added to the list before or after 64 | * an existing element, at the head of the list, or at the end of the 65 | * list. A simple queue may only be traversed in the forward direction. 66 | * 67 | * A tail queue is headed by a pair of pointers, one to the head of the 68 | * list and the other to the tail of the list. The elements are doubly 69 | * linked so that an arbitrary element can be removed without a need to 70 | * traverse the list. New elements can be added to the list before or 71 | * after an existing element, at the head of the list, or at the end of 72 | * the list. A tail queue may be traversed in either direction. 73 | * 74 | * A circle queue is headed by a pair of pointers, one to the head of the 75 | * list and the other to the tail of the list. The elements are doubly 76 | * linked so that an arbitrary element can be removed without a need to 77 | * traverse the list. New elements can be added to the list before or after 78 | * an existing element, at the head of the list, or at the end of the list. 79 | * A circle queue may be traversed in either direction, but has a more 80 | * complex end of list detection. 81 | * 82 | * For details on the use of these macros, see the queue(3) manual page. 83 | */ 84 | 85 | #if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC)) 86 | #define _Q_INVALIDATE(a) (a) = ((void *)-1) 87 | #else 88 | #define _Q_INVALIDATE(a) 89 | #endif 90 | 91 | /* 92 | * Singly-linked List definitions. 93 | */ 94 | #define SLIST_HEAD(name, type) \ 95 | struct name { \ 96 | struct type *slh_first; /* first element */ \ 97 | } 98 | 99 | #define SLIST_HEAD_INITIALIZER(head) \ 100 | { NULL } 101 | 102 | #define SLIST_ENTRY(type) \ 103 | struct { \ 104 | struct type *sle_next; /* next element */ \ 105 | } 106 | 107 | /* 108 | * Singly-linked List access methods. 109 | */ 110 | #define SLIST_FIRST(head) ((head)->slh_first) 111 | #define SLIST_END(head) NULL 112 | #define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head)) 113 | #define SLIST_NEXT(elm, field) ((elm)->field.sle_next) 114 | 115 | #define SLIST_FOREACH(var, head, field) \ 116 | for((var) = SLIST_FIRST(head); \ 117 | (var) != SLIST_END(head); \ 118 | (var) = SLIST_NEXT(var, field)) 119 | 120 | #define SLIST_FOREACH_SAFE(var, head, field, tvar) \ 121 | for ((var) = SLIST_FIRST(head); \ 122 | (var) && ((tvar) = SLIST_NEXT(var, field), 1); \ 123 | (var) = (tvar)) 124 | 125 | /* 126 | * Singly-linked List functions. 127 | */ 128 | #define SLIST_INIT(head) { \ 129 | SLIST_FIRST(head) = SLIST_END(head); \ 130 | } 131 | 132 | #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \ 133 | (elm)->field.sle_next = (slistelm)->field.sle_next; \ 134 | (slistelm)->field.sle_next = (elm); \ 135 | } while (0) 136 | 137 | #define SLIST_INSERT_HEAD(head, elm, field) do { \ 138 | (elm)->field.sle_next = (head)->slh_first; \ 139 | (head)->slh_first = (elm); \ 140 | } while (0) 141 | 142 | #define SLIST_REMOVE_AFTER(elm, field) do { \ 143 | (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next; \ 144 | } while (0) 145 | 146 | #define SLIST_REMOVE_HEAD(head, field) do { \ 147 | (head)->slh_first = (head)->slh_first->field.sle_next; \ 148 | } while (0) 149 | 150 | #define SLIST_REMOVE(head, elm, type, field) do { \ 151 | if ((head)->slh_first == (elm)) { \ 152 | SLIST_REMOVE_HEAD((head), field); \ 153 | } else { \ 154 | struct type *curelm = (head)->slh_first; \ 155 | \ 156 | while (curelm->field.sle_next != (elm)) \ 157 | curelm = curelm->field.sle_next; \ 158 | curelm->field.sle_next = \ 159 | curelm->field.sle_next->field.sle_next; \ 160 | _Q_INVALIDATE((elm)->field.sle_next); \ 161 | } \ 162 | } while (0) 163 | 164 | /* 165 | * List definitions. 166 | */ 167 | #define LIST_HEAD(name, type) \ 168 | struct name { \ 169 | struct type *lh_first; /* first element */ \ 170 | } 171 | 172 | #define LIST_HEAD_INITIALIZER(head) \ 173 | { NULL } 174 | 175 | #define LIST_ENTRY(type) \ 176 | struct { \ 177 | struct type *le_next; /* next element */ \ 178 | struct type **le_prev; /* address of previous next element */ \ 179 | } 180 | 181 | /* 182 | * List access methods 183 | */ 184 | #define LIST_FIRST(head) ((head)->lh_first) 185 | #define LIST_END(head) NULL 186 | #define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head)) 187 | #define LIST_NEXT(elm, field) ((elm)->field.le_next) 188 | 189 | #define LIST_FOREACH(var, head, field) \ 190 | for((var) = LIST_FIRST(head); \ 191 | (var)!= LIST_END(head); \ 192 | (var) = LIST_NEXT(var, field)) 193 | 194 | #define LIST_FOREACH_SAFE(var, head, field, tvar) \ 195 | for ((var) = LIST_FIRST(head); \ 196 | (var) && ((tvar) = LIST_NEXT(var, field), 1); \ 197 | (var) = (tvar)) 198 | 199 | /* 200 | * List functions. 201 | */ 202 | #define LIST_INIT(head) do { \ 203 | LIST_FIRST(head) = LIST_END(head); \ 204 | } while (0) 205 | 206 | #define LIST_INSERT_AFTER(listelm, elm, field) do { \ 207 | if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ 208 | (listelm)->field.le_next->field.le_prev = \ 209 | &(elm)->field.le_next; \ 210 | (listelm)->field.le_next = (elm); \ 211 | (elm)->field.le_prev = &(listelm)->field.le_next; \ 212 | } while (0) 213 | 214 | #define LIST_INSERT_BEFORE(listelm, elm, field) do { \ 215 | (elm)->field.le_prev = (listelm)->field.le_prev; \ 216 | (elm)->field.le_next = (listelm); \ 217 | *(listelm)->field.le_prev = (elm); \ 218 | (listelm)->field.le_prev = &(elm)->field.le_next; \ 219 | } while (0) 220 | 221 | #define LIST_INSERT_HEAD(head, elm, field) do { \ 222 | if (((elm)->field.le_next = (head)->lh_first) != NULL) \ 223 | (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ 224 | (head)->lh_first = (elm); \ 225 | (elm)->field.le_prev = &(head)->lh_first; \ 226 | } while (0) 227 | 228 | #define LIST_REMOVE(elm, field) do { \ 229 | if ((elm)->field.le_next != NULL) \ 230 | (elm)->field.le_next->field.le_prev = \ 231 | (elm)->field.le_prev; \ 232 | *(elm)->field.le_prev = (elm)->field.le_next; \ 233 | _Q_INVALIDATE((elm)->field.le_prev); \ 234 | _Q_INVALIDATE((elm)->field.le_next); \ 235 | } while (0) 236 | 237 | #define LIST_REPLACE(elm, elm2, field) do { \ 238 | if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \ 239 | (elm2)->field.le_next->field.le_prev = \ 240 | &(elm2)->field.le_next; \ 241 | (elm2)->field.le_prev = (elm)->field.le_prev; \ 242 | *(elm2)->field.le_prev = (elm2); \ 243 | _Q_INVALIDATE((elm)->field.le_prev); \ 244 | _Q_INVALIDATE((elm)->field.le_next); \ 245 | } while (0) 246 | 247 | /* 248 | * Simple queue definitions. 249 | */ 250 | #define SIMPLEQ_HEAD(name, type) \ 251 | struct name { \ 252 | struct type *sqh_first; /* first element */ \ 253 | struct type **sqh_last; /* addr of last next element */ \ 254 | } 255 | 256 | #define SIMPLEQ_HEAD_INITIALIZER(head) \ 257 | { NULL, &(head).sqh_first } 258 | 259 | #define SIMPLEQ_ENTRY(type) \ 260 | struct { \ 261 | struct type *sqe_next; /* next element */ \ 262 | } 263 | 264 | /* 265 | * Simple queue access methods. 266 | */ 267 | #define SIMPLEQ_FIRST(head) ((head)->sqh_first) 268 | #define SIMPLEQ_END(head) NULL 269 | #define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head)) 270 | #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) 271 | 272 | #define SIMPLEQ_FOREACH(var, head, field) \ 273 | for((var) = SIMPLEQ_FIRST(head); \ 274 | (var) != SIMPLEQ_END(head); \ 275 | (var) = SIMPLEQ_NEXT(var, field)) 276 | 277 | #define SIMPLEQ_FOREACH_SAFE(var, head, field, tvar) \ 278 | for ((var) = SIMPLEQ_FIRST(head); \ 279 | (var) && ((tvar) = SIMPLEQ_NEXT(var, field), 1); \ 280 | (var) = (tvar)) 281 | 282 | /* 283 | * Simple queue functions. 284 | */ 285 | #define SIMPLEQ_INIT(head) do { \ 286 | (head)->sqh_first = NULL; \ 287 | (head)->sqh_last = &(head)->sqh_first; \ 288 | } while (0) 289 | 290 | #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ 291 | if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ 292 | (head)->sqh_last = &(elm)->field.sqe_next; \ 293 | (head)->sqh_first = (elm); \ 294 | } while (0) 295 | 296 | #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ 297 | (elm)->field.sqe_next = NULL; \ 298 | *(head)->sqh_last = (elm); \ 299 | (head)->sqh_last = &(elm)->field.sqe_next; \ 300 | } while (0) 301 | 302 | #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 303 | if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ 304 | (head)->sqh_last = &(elm)->field.sqe_next; \ 305 | (listelm)->field.sqe_next = (elm); \ 306 | } while (0) 307 | 308 | #define SIMPLEQ_REMOVE_HEAD(head, field) do { \ 309 | if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ 310 | (head)->sqh_last = &(head)->sqh_first; \ 311 | } while (0) 312 | 313 | #define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \ 314 | if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \ 315 | == NULL) \ 316 | (head)->sqh_last = &(elm)->field.sqe_next; \ 317 | } while (0) 318 | 319 | /* 320 | * Tail queue definitions. 321 | */ 322 | #define TAILQ_HEAD(name, type) \ 323 | struct name { \ 324 | struct type *tqh_first; /* first element */ \ 325 | struct type **tqh_last; /* addr of last next element */ \ 326 | } 327 | 328 | #define TAILQ_HEAD_INITIALIZER(head) \ 329 | { NULL, &(head).tqh_first } 330 | 331 | #define TAILQ_ENTRY(type) \ 332 | struct { \ 333 | struct type *tqe_next; /* next element */ \ 334 | struct type **tqe_prev; /* address of previous next element */ \ 335 | } 336 | 337 | /* 338 | * tail queue access methods 339 | */ 340 | #define TAILQ_FIRST(head) ((head)->tqh_first) 341 | #define TAILQ_END(head) NULL 342 | #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) 343 | #define TAILQ_LAST(head, headname) \ 344 | (*(((struct headname *)((head)->tqh_last))->tqh_last)) 345 | /* XXX */ 346 | #define TAILQ_PREV(elm, headname, field) \ 347 | (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) 348 | #define TAILQ_EMPTY(head) \ 349 | (TAILQ_FIRST(head) == TAILQ_END(head)) 350 | 351 | #define TAILQ_FOREACH(var, head, field) \ 352 | for((var) = TAILQ_FIRST(head); \ 353 | (var) != TAILQ_END(head); \ 354 | (var) = TAILQ_NEXT(var, field)) 355 | 356 | #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \ 357 | for ((var) = TAILQ_FIRST(head); \ 358 | (var) != TAILQ_END(head) && \ 359 | ((tvar) = TAILQ_NEXT(var, field), 1); \ 360 | (var) = (tvar)) 361 | 362 | 363 | #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \ 364 | for((var) = TAILQ_LAST(head, headname); \ 365 | (var) != TAILQ_END(head); \ 366 | (var) = TAILQ_PREV(var, headname, field)) 367 | 368 | #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ 369 | for ((var) = TAILQ_LAST(head, headname); \ 370 | (var) != TAILQ_END(head) && \ 371 | ((tvar) = TAILQ_PREV(var, headname, field), 1); \ 372 | (var) = (tvar)) 373 | 374 | /* 375 | * Tail queue functions. 376 | */ 377 | #define TAILQ_INIT(head) do { \ 378 | (head)->tqh_first = NULL; \ 379 | (head)->tqh_last = &(head)->tqh_first; \ 380 | } while (0) 381 | 382 | #define TAILQ_INSERT_HEAD(head, elm, field) do { \ 383 | if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ 384 | (head)->tqh_first->field.tqe_prev = \ 385 | &(elm)->field.tqe_next; \ 386 | else \ 387 | (head)->tqh_last = &(elm)->field.tqe_next; \ 388 | (head)->tqh_first = (elm); \ 389 | (elm)->field.tqe_prev = &(head)->tqh_first; \ 390 | } while (0) 391 | 392 | #define TAILQ_INSERT_TAIL(head, elm, field) do { \ 393 | (elm)->field.tqe_next = NULL; \ 394 | (elm)->field.tqe_prev = (head)->tqh_last; \ 395 | *(head)->tqh_last = (elm); \ 396 | (head)->tqh_last = &(elm)->field.tqe_next; \ 397 | } while (0) 398 | 399 | #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ 400 | if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ 401 | (elm)->field.tqe_next->field.tqe_prev = \ 402 | &(elm)->field.tqe_next; \ 403 | else \ 404 | (head)->tqh_last = &(elm)->field.tqe_next; \ 405 | (listelm)->field.tqe_next = (elm); \ 406 | (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ 407 | } while (0) 408 | 409 | #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ 410 | (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ 411 | (elm)->field.tqe_next = (listelm); \ 412 | *(listelm)->field.tqe_prev = (elm); \ 413 | (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ 414 | } while (0) 415 | 416 | #define TAILQ_REMOVE(head, elm, field) do { \ 417 | if (((elm)->field.tqe_next) != NULL) \ 418 | (elm)->field.tqe_next->field.tqe_prev = \ 419 | (elm)->field.tqe_prev; \ 420 | else \ 421 | (head)->tqh_last = (elm)->field.tqe_prev; \ 422 | *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ 423 | _Q_INVALIDATE((elm)->field.tqe_prev); \ 424 | _Q_INVALIDATE((elm)->field.tqe_next); \ 425 | } while (0) 426 | 427 | #define TAILQ_REPLACE(head, elm, elm2, field) do { \ 428 | if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \ 429 | (elm2)->field.tqe_next->field.tqe_prev = \ 430 | &(elm2)->field.tqe_next; \ 431 | else \ 432 | (head)->tqh_last = &(elm2)->field.tqe_next; \ 433 | (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \ 434 | *(elm2)->field.tqe_prev = (elm2); \ 435 | _Q_INVALIDATE((elm)->field.tqe_prev); \ 436 | _Q_INVALIDATE((elm)->field.tqe_next); \ 437 | } while (0) 438 | 439 | /* 440 | * Circular queue definitions. 441 | */ 442 | #define CIRCLEQ_HEAD(name, type) \ 443 | struct name { \ 444 | struct type *cqh_first; /* first element */ \ 445 | struct type *cqh_last; /* last element */ \ 446 | } 447 | 448 | #define CIRCLEQ_HEAD_INITIALIZER(head) \ 449 | { CIRCLEQ_END(&head), CIRCLEQ_END(&head) } 450 | 451 | #define CIRCLEQ_ENTRY(type) \ 452 | struct { \ 453 | struct type *cqe_next; /* next element */ \ 454 | struct type *cqe_prev; /* previous element */ \ 455 | } 456 | 457 | /* 458 | * Circular queue access methods 459 | */ 460 | #define CIRCLEQ_FIRST(head) ((head)->cqh_first) 461 | #define CIRCLEQ_LAST(head) ((head)->cqh_last) 462 | #define CIRCLEQ_END(head) ((void *)(head)) 463 | #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) 464 | #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) 465 | #define CIRCLEQ_EMPTY(head) \ 466 | (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head)) 467 | 468 | #define CIRCLEQ_FOREACH(var, head, field) \ 469 | for((var) = CIRCLEQ_FIRST(head); \ 470 | (var) != CIRCLEQ_END(head); \ 471 | (var) = CIRCLEQ_NEXT(var, field)) 472 | 473 | #define CIRCLEQ_FOREACH_SAFE(var, head, field, tvar) \ 474 | for ((var) = CIRCLEQ_FIRST(head); \ 475 | (var) != CIRCLEQ_END(head) && \ 476 | ((tvar) = CIRCLEQ_NEXT(var, field), 1); \ 477 | (var) = (tvar)) 478 | 479 | #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \ 480 | for((var) = CIRCLEQ_LAST(head); \ 481 | (var) != CIRCLEQ_END(head); \ 482 | (var) = CIRCLEQ_PREV(var, field)) 483 | 484 | #define CIRCLEQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \ 485 | for ((var) = CIRCLEQ_LAST(head, headname); \ 486 | (var) != CIRCLEQ_END(head) && \ 487 | ((tvar) = CIRCLEQ_PREV(var, headname, field), 1); \ 488 | (var) = (tvar)) 489 | 490 | /* 491 | * Circular queue functions. 492 | */ 493 | #define CIRCLEQ_INIT(head) do { \ 494 | (head)->cqh_first = CIRCLEQ_END(head); \ 495 | (head)->cqh_last = CIRCLEQ_END(head); \ 496 | } while (0) 497 | 498 | #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ 499 | (elm)->field.cqe_next = (listelm)->field.cqe_next; \ 500 | (elm)->field.cqe_prev = (listelm); \ 501 | if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \ 502 | (head)->cqh_last = (elm); \ 503 | else \ 504 | (listelm)->field.cqe_next->field.cqe_prev = (elm); \ 505 | (listelm)->field.cqe_next = (elm); \ 506 | } while (0) 507 | 508 | #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ 509 | (elm)->field.cqe_next = (listelm); \ 510 | (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ 511 | if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \ 512 | (head)->cqh_first = (elm); \ 513 | else \ 514 | (listelm)->field.cqe_prev->field.cqe_next = (elm); \ 515 | (listelm)->field.cqe_prev = (elm); \ 516 | } while (0) 517 | 518 | #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ 519 | (elm)->field.cqe_next = (head)->cqh_first; \ 520 | (elm)->field.cqe_prev = CIRCLEQ_END(head); \ 521 | if ((head)->cqh_last == CIRCLEQ_END(head)) \ 522 | (head)->cqh_last = (elm); \ 523 | else \ 524 | (head)->cqh_first->field.cqe_prev = (elm); \ 525 | (head)->cqh_first = (elm); \ 526 | } while (0) 527 | 528 | #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ 529 | (elm)->field.cqe_next = CIRCLEQ_END(head); \ 530 | (elm)->field.cqe_prev = (head)->cqh_last; \ 531 | if ((head)->cqh_first == CIRCLEQ_END(head)) \ 532 | (head)->cqh_first = (elm); \ 533 | else \ 534 | (head)->cqh_last->field.cqe_next = (elm); \ 535 | (head)->cqh_last = (elm); \ 536 | } while (0) 537 | 538 | #define CIRCLEQ_REMOVE(head, elm, field) do { \ 539 | if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \ 540 | (head)->cqh_last = (elm)->field.cqe_prev; \ 541 | else \ 542 | (elm)->field.cqe_next->field.cqe_prev = \ 543 | (elm)->field.cqe_prev; \ 544 | if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \ 545 | (head)->cqh_first = (elm)->field.cqe_next; \ 546 | else \ 547 | (elm)->field.cqe_prev->field.cqe_next = \ 548 | (elm)->field.cqe_next; \ 549 | _Q_INVALIDATE((elm)->field.cqe_prev); \ 550 | _Q_INVALIDATE((elm)->field.cqe_next); \ 551 | } while (0) 552 | 553 | #define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \ 554 | if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \ 555 | CIRCLEQ_END(head)) \ 556 | (head).cqh_last = (elm2); \ 557 | else \ 558 | (elm2)->field.cqe_next->field.cqe_prev = (elm2); \ 559 | if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \ 560 | CIRCLEQ_END(head)) \ 561 | (head).cqh_first = (elm2); \ 562 | else \ 563 | (elm2)->field.cqe_prev->field.cqe_next = (elm2); \ 564 | _Q_INVALIDATE((elm)->field.cqe_prev); \ 565 | _Q_INVALIDATE((elm)->field.cqe_next); \ 566 | } while (0) 567 | 568 | #endif /* !_SYS_QUEUE_H_ */ 569 | -------------------------------------------------------------------------------- /c_src/tree.h: -------------------------------------------------------------------------------- 1 | /* $OpenBSD: tree.h,v 1.13 2011/07/09 00:19:45 pirofti Exp $ */ 2 | /* 3 | * Copyright 2002 Niels Provos 4 | * All rights reserved. 5 | * 6 | * Redistribution and use in source and binary forms, with or without 7 | * modification, are permitted provided that the following conditions 8 | * are met: 9 | * 1. Redistributions of source code must retain the above copyright 10 | * notice, this list of conditions and the following disclaimer. 11 | * 2. Redistributions in binary form must reproduce the above copyright 12 | * notice, this list of conditions and the following disclaimer in the 13 | * documentation and/or other materials provided with the distribution. 14 | * 15 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 | */ 26 | 27 | #ifndef _SYS_TREE_H_ 28 | #define _SYS_TREE_H_ 29 | 30 | /* 31 | * This file defines data structures for different types of trees: 32 | * splay trees and red-black trees. 33 | * 34 | * A splay tree is a self-organizing data structure. Every operation 35 | * on the tree causes a splay to happen. The splay moves the requested 36 | * node to the root of the tree and partly rebalances it. 37 | * 38 | * This has the benefit that request locality causes faster lookups as 39 | * the requested nodes move to the top of the tree. On the other hand, 40 | * every lookup causes memory writes. 41 | * 42 | * The Balance Theorem bounds the total access time for m operations 43 | * and n inserts on an initially empty tree as O((m + n)lg n). The 44 | * amortized cost for a sequence of m accesses to a splay tree is O(lg n); 45 | * 46 | * A red-black tree is a binary search tree with the node color as an 47 | * extra attribute. It fulfills a set of conditions: 48 | * - every search path from the root to a leaf consists of the 49 | * same number of black nodes, 50 | * - each red node (except for the root) has a black parent, 51 | * - each leaf node is black. 52 | * 53 | * Every operation on a red-black tree is bounded as O(lg n). 54 | * The maximum height of a red-black tree is 2lg (n+1). 55 | */ 56 | 57 | #define SPLAY_HEAD(name, type) \ 58 | struct name { \ 59 | struct type *sph_root; /* root of the tree */ \ 60 | } 61 | 62 | #define SPLAY_INITIALIZER(root) \ 63 | { NULL } 64 | 65 | #define SPLAY_INIT(root) do { \ 66 | (root)->sph_root = NULL; \ 67 | } while (0) 68 | 69 | #define SPLAY_ENTRY(type) \ 70 | struct { \ 71 | struct type *spe_left; /* left element */ \ 72 | struct type *spe_right; /* right element */ \ 73 | } 74 | 75 | #define SPLAY_LEFT(elm, field) (elm)->field.spe_left 76 | #define SPLAY_RIGHT(elm, field) (elm)->field.spe_right 77 | #define SPLAY_ROOT(head) (head)->sph_root 78 | #define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL) 79 | 80 | /* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */ 81 | #define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \ 82 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \ 83 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 84 | (head)->sph_root = tmp; \ 85 | } while (0) 86 | 87 | #define SPLAY_ROTATE_LEFT(head, tmp, field) do { \ 88 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \ 89 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 90 | (head)->sph_root = tmp; \ 91 | } while (0) 92 | 93 | #define SPLAY_LINKLEFT(head, tmp, field) do { \ 94 | SPLAY_LEFT(tmp, field) = (head)->sph_root; \ 95 | tmp = (head)->sph_root; \ 96 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \ 97 | } while (0) 98 | 99 | #define SPLAY_LINKRIGHT(head, tmp, field) do { \ 100 | SPLAY_RIGHT(tmp, field) = (head)->sph_root; \ 101 | tmp = (head)->sph_root; \ 102 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \ 103 | } while (0) 104 | 105 | #define SPLAY_ASSEMBLE(head, node, left, right, field) do { \ 106 | SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \ 107 | SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\ 108 | SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \ 109 | SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \ 110 | } while (0) 111 | 112 | /* Generates prototypes and inline functions */ 113 | 114 | #define SPLAY_PROTOTYPE(name, type, field, cmp) \ 115 | void name##_SPLAY(struct name *, struct type *); \ 116 | void name##_SPLAY_MINMAX(struct name *, int); \ 117 | struct type *name##_SPLAY_INSERT(struct name *, struct type *); \ 118 | struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \ 119 | \ 120 | /* Finds the node with the same key as elm */ \ 121 | static __inline struct type * \ 122 | name##_SPLAY_FIND(struct name *head, struct type *elm) \ 123 | { \ 124 | if (SPLAY_EMPTY(head)) \ 125 | return(NULL); \ 126 | name##_SPLAY(head, elm); \ 127 | if ((cmp)(elm, (head)->sph_root) == 0) \ 128 | return (head->sph_root); \ 129 | return (NULL); \ 130 | } \ 131 | \ 132 | static __inline struct type * \ 133 | name##_SPLAY_NEXT(struct name *head, struct type *elm) \ 134 | { \ 135 | name##_SPLAY(head, elm); \ 136 | if (SPLAY_RIGHT(elm, field) != NULL) { \ 137 | elm = SPLAY_RIGHT(elm, field); \ 138 | while (SPLAY_LEFT(elm, field) != NULL) { \ 139 | elm = SPLAY_LEFT(elm, field); \ 140 | } \ 141 | } else \ 142 | elm = NULL; \ 143 | return (elm); \ 144 | } \ 145 | \ 146 | static __inline struct type * \ 147 | name##_SPLAY_MIN_MAX(struct name *head, int val) \ 148 | { \ 149 | name##_SPLAY_MINMAX(head, val); \ 150 | return (SPLAY_ROOT(head)); \ 151 | } 152 | 153 | /* Main splay operation. 154 | * Moves node close to the key of elm to top 155 | */ 156 | #define SPLAY_GENERATE(name, type, field, cmp) \ 157 | struct type * \ 158 | name##_SPLAY_INSERT(struct name *head, struct type *elm) \ 159 | { \ 160 | if (SPLAY_EMPTY(head)) { \ 161 | SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \ 162 | } else { \ 163 | int __comp; \ 164 | name##_SPLAY(head, elm); \ 165 | __comp = (cmp)(elm, (head)->sph_root); \ 166 | if(__comp < 0) { \ 167 | SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\ 168 | SPLAY_RIGHT(elm, field) = (head)->sph_root; \ 169 | SPLAY_LEFT((head)->sph_root, field) = NULL; \ 170 | } else if (__comp > 0) { \ 171 | SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\ 172 | SPLAY_LEFT(elm, field) = (head)->sph_root; \ 173 | SPLAY_RIGHT((head)->sph_root, field) = NULL; \ 174 | } else \ 175 | return ((head)->sph_root); \ 176 | } \ 177 | (head)->sph_root = (elm); \ 178 | return (NULL); \ 179 | } \ 180 | \ 181 | struct type * \ 182 | name##_SPLAY_REMOVE(struct name *head, struct type *elm) \ 183 | { \ 184 | struct type *__tmp; \ 185 | if (SPLAY_EMPTY(head)) \ 186 | return (NULL); \ 187 | name##_SPLAY(head, elm); \ 188 | if ((cmp)(elm, (head)->sph_root) == 0) { \ 189 | if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \ 190 | (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\ 191 | } else { \ 192 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 193 | (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\ 194 | name##_SPLAY(head, elm); \ 195 | SPLAY_RIGHT((head)->sph_root, field) = __tmp; \ 196 | } \ 197 | return (elm); \ 198 | } \ 199 | return (NULL); \ 200 | } \ 201 | \ 202 | void \ 203 | name##_SPLAY(struct name *head, struct type *elm) \ 204 | { \ 205 | struct type __node, *__left, *__right, *__tmp; \ 206 | int __comp; \ 207 | \ 208 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 209 | __left = __right = &__node; \ 210 | \ 211 | while ((__comp = (cmp)(elm, (head)->sph_root))) { \ 212 | if (__comp < 0) { \ 213 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ 214 | if (__tmp == NULL) \ 215 | break; \ 216 | if ((cmp)(elm, __tmp) < 0){ \ 217 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 218 | if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 219 | break; \ 220 | } \ 221 | SPLAY_LINKLEFT(head, __right, field); \ 222 | } else if (__comp > 0) { \ 223 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 224 | if (__tmp == NULL) \ 225 | break; \ 226 | if ((cmp)(elm, __tmp) > 0){ \ 227 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ 228 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 229 | break; \ 230 | } \ 231 | SPLAY_LINKRIGHT(head, __left, field); \ 232 | } \ 233 | } \ 234 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 235 | } \ 236 | \ 237 | /* Splay with either the minimum or the maximum element \ 238 | * Used to find minimum or maximum element in tree. \ 239 | */ \ 240 | void name##_SPLAY_MINMAX(struct name *head, int __comp) \ 241 | { \ 242 | struct type __node, *__left, *__right, *__tmp; \ 243 | \ 244 | SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\ 245 | __left = __right = &__node; \ 246 | \ 247 | while (1) { \ 248 | if (__comp < 0) { \ 249 | __tmp = SPLAY_LEFT((head)->sph_root, field); \ 250 | if (__tmp == NULL) \ 251 | break; \ 252 | if (__comp < 0){ \ 253 | SPLAY_ROTATE_RIGHT(head, __tmp, field); \ 254 | if (SPLAY_LEFT((head)->sph_root, field) == NULL)\ 255 | break; \ 256 | } \ 257 | SPLAY_LINKLEFT(head, __right, field); \ 258 | } else if (__comp > 0) { \ 259 | __tmp = SPLAY_RIGHT((head)->sph_root, field); \ 260 | if (__tmp == NULL) \ 261 | break; \ 262 | if (__comp > 0) { \ 263 | SPLAY_ROTATE_LEFT(head, __tmp, field); \ 264 | if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\ 265 | break; \ 266 | } \ 267 | SPLAY_LINKRIGHT(head, __left, field); \ 268 | } \ 269 | } \ 270 | SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \ 271 | } 272 | 273 | #define SPLAY_NEGINF -1 274 | #define SPLAY_INF 1 275 | 276 | #define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y) 277 | #define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y) 278 | #define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y) 279 | #define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y) 280 | #define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \ 281 | : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF)) 282 | #define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \ 283 | : name##_SPLAY_MIN_MAX(x, SPLAY_INF)) 284 | 285 | #define SPLAY_FOREACH(x, name, head) \ 286 | for ((x) = SPLAY_MIN(name, head); \ 287 | (x) != NULL; \ 288 | (x) = SPLAY_NEXT(name, head, x)) 289 | 290 | /* Macros that define a red-black tree */ 291 | #define RB_HEAD(name, type) \ 292 | struct name { \ 293 | struct type *rbh_root; /* root of the tree */ \ 294 | } 295 | 296 | #define RB_INITIALIZER(root) \ 297 | { NULL } 298 | 299 | #define RB_INIT(root) do { \ 300 | (root)->rbh_root = NULL; \ 301 | } while (0) 302 | 303 | #define RB_BLACK 0 304 | #define RB_RED 1 305 | #define RB_ENTRY(type) \ 306 | struct { \ 307 | struct type *rbe_left; /* left element */ \ 308 | struct type *rbe_right; /* right element */ \ 309 | struct type *rbe_parent; /* parent element */ \ 310 | int rbe_color; /* node color */ \ 311 | } 312 | 313 | #define RB_LEFT(elm, field) (elm)->field.rbe_left 314 | #define RB_RIGHT(elm, field) (elm)->field.rbe_right 315 | #define RB_PARENT(elm, field) (elm)->field.rbe_parent 316 | #define RB_COLOR(elm, field) (elm)->field.rbe_color 317 | #define RB_ROOT(head) (head)->rbh_root 318 | #define RB_EMPTY(head) (RB_ROOT(head) == NULL) 319 | 320 | #define RB_SET(elm, parent, field) do { \ 321 | RB_PARENT(elm, field) = parent; \ 322 | RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \ 323 | RB_COLOR(elm, field) = RB_RED; \ 324 | } while (0) 325 | 326 | #define RB_SET_BLACKRED(black, red, field) do { \ 327 | RB_COLOR(black, field) = RB_BLACK; \ 328 | RB_COLOR(red, field) = RB_RED; \ 329 | } while (0) 330 | 331 | #ifndef RB_AUGMENT 332 | #define RB_AUGMENT(x) do {} while (0) 333 | #endif 334 | 335 | #define RB_ROTATE_LEFT(head, elm, tmp, field) do { \ 336 | (tmp) = RB_RIGHT(elm, field); \ 337 | if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \ 338 | RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \ 339 | } \ 340 | RB_AUGMENT(elm); \ 341 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ 342 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 343 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 344 | else \ 345 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 346 | } else \ 347 | (head)->rbh_root = (tmp); \ 348 | RB_LEFT(tmp, field) = (elm); \ 349 | RB_PARENT(elm, field) = (tmp); \ 350 | RB_AUGMENT(tmp); \ 351 | if ((RB_PARENT(tmp, field))) \ 352 | RB_AUGMENT(RB_PARENT(tmp, field)); \ 353 | } while (0) 354 | 355 | #define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \ 356 | (tmp) = RB_LEFT(elm, field); \ 357 | if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \ 358 | RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \ 359 | } \ 360 | RB_AUGMENT(elm); \ 361 | if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \ 362 | if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \ 363 | RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \ 364 | else \ 365 | RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \ 366 | } else \ 367 | (head)->rbh_root = (tmp); \ 368 | RB_RIGHT(tmp, field) = (elm); \ 369 | RB_PARENT(elm, field) = (tmp); \ 370 | RB_AUGMENT(tmp); \ 371 | if ((RB_PARENT(tmp, field))) \ 372 | RB_AUGMENT(RB_PARENT(tmp, field)); \ 373 | } while (0) 374 | 375 | /* Generates prototypes and inline functions */ 376 | #define RB_PROTOTYPE(name, type, field, cmp) \ 377 | RB_PROTOTYPE_INTERNAL(name, type, field, cmp,) 378 | #define RB_PROTOTYPE_STATIC(name, type, field, cmp) \ 379 | RB_PROTOTYPE_INTERNAL(name, type, field, cmp, __attribute__((__unused__)) static) 380 | #define RB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \ 381 | attr void name##_RB_INSERT_COLOR(struct name *, struct type *); \ 382 | attr void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\ 383 | attr struct type *name##_RB_REMOVE(struct name *, struct type *); \ 384 | attr struct type *name##_RB_INSERT(struct name *, struct type *); \ 385 | attr struct type *name##_RB_FIND(struct name *, struct type *); \ 386 | attr struct type *name##_RB_NFIND(struct name *, struct type *); \ 387 | attr struct type *name##_RB_NEXT(struct type *); \ 388 | attr struct type *name##_RB_PREV(struct type *); \ 389 | attr struct type *name##_RB_MINMAX(struct name *, int); \ 390 | \ 391 | 392 | /* Main rb operation. 393 | * Moves node close to the key of elm to top 394 | */ 395 | #define RB_GENERATE(name, type, field, cmp) \ 396 | RB_GENERATE_INTERNAL(name, type, field, cmp,) 397 | #define RB_GENERATE_STATIC(name, type, field, cmp) \ 398 | RB_GENERATE_INTERNAL(name, type, field, cmp, __attribute__((__unused__)) static) 399 | #define RB_GENERATE_INTERNAL(name, type, field, cmp, attr) \ 400 | attr void \ 401 | name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \ 402 | { \ 403 | struct type *parent, *gparent, *tmp; \ 404 | while ((parent = RB_PARENT(elm, field)) && \ 405 | RB_COLOR(parent, field) == RB_RED) { \ 406 | gparent = RB_PARENT(parent, field); \ 407 | if (parent == RB_LEFT(gparent, field)) { \ 408 | tmp = RB_RIGHT(gparent, field); \ 409 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 410 | RB_COLOR(tmp, field) = RB_BLACK; \ 411 | RB_SET_BLACKRED(parent, gparent, field);\ 412 | elm = gparent; \ 413 | continue; \ 414 | } \ 415 | if (RB_RIGHT(parent, field) == elm) { \ 416 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 417 | tmp = parent; \ 418 | parent = elm; \ 419 | elm = tmp; \ 420 | } \ 421 | RB_SET_BLACKRED(parent, gparent, field); \ 422 | RB_ROTATE_RIGHT(head, gparent, tmp, field); \ 423 | } else { \ 424 | tmp = RB_LEFT(gparent, field); \ 425 | if (tmp && RB_COLOR(tmp, field) == RB_RED) { \ 426 | RB_COLOR(tmp, field) = RB_BLACK; \ 427 | RB_SET_BLACKRED(parent, gparent, field);\ 428 | elm = gparent; \ 429 | continue; \ 430 | } \ 431 | if (RB_LEFT(parent, field) == elm) { \ 432 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 433 | tmp = parent; \ 434 | parent = elm; \ 435 | elm = tmp; \ 436 | } \ 437 | RB_SET_BLACKRED(parent, gparent, field); \ 438 | RB_ROTATE_LEFT(head, gparent, tmp, field); \ 439 | } \ 440 | } \ 441 | RB_COLOR(head->rbh_root, field) = RB_BLACK; \ 442 | } \ 443 | \ 444 | attr void \ 445 | name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \ 446 | { \ 447 | struct type *tmp; \ 448 | while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \ 449 | elm != RB_ROOT(head)) { \ 450 | if (RB_LEFT(parent, field) == elm) { \ 451 | tmp = RB_RIGHT(parent, field); \ 452 | if (RB_COLOR(tmp, field) == RB_RED) { \ 453 | RB_SET_BLACKRED(tmp, parent, field); \ 454 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 455 | tmp = RB_RIGHT(parent, field); \ 456 | } \ 457 | if ((RB_LEFT(tmp, field) == NULL || \ 458 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 459 | (RB_RIGHT(tmp, field) == NULL || \ 460 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 461 | RB_COLOR(tmp, field) = RB_RED; \ 462 | elm = parent; \ 463 | parent = RB_PARENT(elm, field); \ 464 | } else { \ 465 | if (RB_RIGHT(tmp, field) == NULL || \ 466 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\ 467 | struct type *oleft; \ 468 | if ((oleft = RB_LEFT(tmp, field)))\ 469 | RB_COLOR(oleft, field) = RB_BLACK;\ 470 | RB_COLOR(tmp, field) = RB_RED; \ 471 | RB_ROTATE_RIGHT(head, tmp, oleft, field);\ 472 | tmp = RB_RIGHT(parent, field); \ 473 | } \ 474 | RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 475 | RB_COLOR(parent, field) = RB_BLACK; \ 476 | if (RB_RIGHT(tmp, field)) \ 477 | RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\ 478 | RB_ROTATE_LEFT(head, parent, tmp, field);\ 479 | elm = RB_ROOT(head); \ 480 | break; \ 481 | } \ 482 | } else { \ 483 | tmp = RB_LEFT(parent, field); \ 484 | if (RB_COLOR(tmp, field) == RB_RED) { \ 485 | RB_SET_BLACKRED(tmp, parent, field); \ 486 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 487 | tmp = RB_LEFT(parent, field); \ 488 | } \ 489 | if ((RB_LEFT(tmp, field) == NULL || \ 490 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\ 491 | (RB_RIGHT(tmp, field) == NULL || \ 492 | RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\ 493 | RB_COLOR(tmp, field) = RB_RED; \ 494 | elm = parent; \ 495 | parent = RB_PARENT(elm, field); \ 496 | } else { \ 497 | if (RB_LEFT(tmp, field) == NULL || \ 498 | RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\ 499 | struct type *oright; \ 500 | if ((oright = RB_RIGHT(tmp, field)))\ 501 | RB_COLOR(oright, field) = RB_BLACK;\ 502 | RB_COLOR(tmp, field) = RB_RED; \ 503 | RB_ROTATE_LEFT(head, tmp, oright, field);\ 504 | tmp = RB_LEFT(parent, field); \ 505 | } \ 506 | RB_COLOR(tmp, field) = RB_COLOR(parent, field);\ 507 | RB_COLOR(parent, field) = RB_BLACK; \ 508 | if (RB_LEFT(tmp, field)) \ 509 | RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\ 510 | RB_ROTATE_RIGHT(head, parent, tmp, field);\ 511 | elm = RB_ROOT(head); \ 512 | break; \ 513 | } \ 514 | } \ 515 | } \ 516 | if (elm) \ 517 | RB_COLOR(elm, field) = RB_BLACK; \ 518 | } \ 519 | \ 520 | attr struct type * \ 521 | name##_RB_REMOVE(struct name *head, struct type *elm) \ 522 | { \ 523 | struct type *child, *parent, *old = elm; \ 524 | int color; \ 525 | if (RB_LEFT(elm, field) == NULL) \ 526 | child = RB_RIGHT(elm, field); \ 527 | else if (RB_RIGHT(elm, field) == NULL) \ 528 | child = RB_LEFT(elm, field); \ 529 | else { \ 530 | struct type *left; \ 531 | elm = RB_RIGHT(elm, field); \ 532 | while ((left = RB_LEFT(elm, field))) \ 533 | elm = left; \ 534 | child = RB_RIGHT(elm, field); \ 535 | parent = RB_PARENT(elm, field); \ 536 | color = RB_COLOR(elm, field); \ 537 | if (child) \ 538 | RB_PARENT(child, field) = parent; \ 539 | if (parent) { \ 540 | if (RB_LEFT(parent, field) == elm) \ 541 | RB_LEFT(parent, field) = child; \ 542 | else \ 543 | RB_RIGHT(parent, field) = child; \ 544 | RB_AUGMENT(parent); \ 545 | } else \ 546 | RB_ROOT(head) = child; \ 547 | if (RB_PARENT(elm, field) == old) \ 548 | parent = elm; \ 549 | (elm)->field = (old)->field; \ 550 | if (RB_PARENT(old, field)) { \ 551 | if (RB_LEFT(RB_PARENT(old, field), field) == old)\ 552 | RB_LEFT(RB_PARENT(old, field), field) = elm;\ 553 | else \ 554 | RB_RIGHT(RB_PARENT(old, field), field) = elm;\ 555 | RB_AUGMENT(RB_PARENT(old, field)); \ 556 | } else \ 557 | RB_ROOT(head) = elm; \ 558 | RB_PARENT(RB_LEFT(old, field), field) = elm; \ 559 | if (RB_RIGHT(old, field)) \ 560 | RB_PARENT(RB_RIGHT(old, field), field) = elm; \ 561 | if (parent) { \ 562 | left = parent; \ 563 | do { \ 564 | RB_AUGMENT(left); \ 565 | } while ((left = RB_PARENT(left, field))); \ 566 | } \ 567 | goto color; \ 568 | } \ 569 | parent = RB_PARENT(elm, field); \ 570 | color = RB_COLOR(elm, field); \ 571 | if (child) \ 572 | RB_PARENT(child, field) = parent; \ 573 | if (parent) { \ 574 | if (RB_LEFT(parent, field) == elm) \ 575 | RB_LEFT(parent, field) = child; \ 576 | else \ 577 | RB_RIGHT(parent, field) = child; \ 578 | RB_AUGMENT(parent); \ 579 | } else \ 580 | RB_ROOT(head) = child; \ 581 | color: \ 582 | if (color == RB_BLACK) \ 583 | name##_RB_REMOVE_COLOR(head, parent, child); \ 584 | return (old); \ 585 | } \ 586 | \ 587 | /* Inserts a node into the RB tree */ \ 588 | attr struct type * \ 589 | name##_RB_INSERT(struct name *head, struct type *elm) \ 590 | { \ 591 | struct type *tmp; \ 592 | struct type *parent = NULL; \ 593 | int comp = 0; \ 594 | tmp = RB_ROOT(head); \ 595 | while (tmp) { \ 596 | parent = tmp; \ 597 | comp = (cmp)(elm, parent); \ 598 | if (comp < 0) \ 599 | tmp = RB_LEFT(tmp, field); \ 600 | else if (comp > 0) \ 601 | tmp = RB_RIGHT(tmp, field); \ 602 | else \ 603 | return (tmp); \ 604 | } \ 605 | RB_SET(elm, parent, field); \ 606 | if (parent != NULL) { \ 607 | if (comp < 0) \ 608 | RB_LEFT(parent, field) = elm; \ 609 | else \ 610 | RB_RIGHT(parent, field) = elm; \ 611 | RB_AUGMENT(parent); \ 612 | } else \ 613 | RB_ROOT(head) = elm; \ 614 | name##_RB_INSERT_COLOR(head, elm); \ 615 | return (NULL); \ 616 | } \ 617 | \ 618 | /* Finds the node with the same key as elm */ \ 619 | attr struct type * \ 620 | name##_RB_FIND(struct name *head, struct type *elm) \ 621 | { \ 622 | struct type *tmp = RB_ROOT(head); \ 623 | int comp; \ 624 | while (tmp) { \ 625 | comp = cmp(elm, tmp); \ 626 | if (comp < 0) \ 627 | tmp = RB_LEFT(tmp, field); \ 628 | else if (comp > 0) \ 629 | tmp = RB_RIGHT(tmp, field); \ 630 | else \ 631 | return (tmp); \ 632 | } \ 633 | return (NULL); \ 634 | } \ 635 | \ 636 | /* Finds the first node greater than or equal to the search key */ \ 637 | attr struct type * \ 638 | name##_RB_NFIND(struct name *head, struct type *elm) \ 639 | { \ 640 | struct type *tmp = RB_ROOT(head); \ 641 | struct type *res = NULL; \ 642 | int comp; \ 643 | while (tmp) { \ 644 | comp = cmp(elm, tmp); \ 645 | if (comp < 0) { \ 646 | res = tmp; \ 647 | tmp = RB_LEFT(tmp, field); \ 648 | } \ 649 | else if (comp > 0) \ 650 | tmp = RB_RIGHT(tmp, field); \ 651 | else \ 652 | return (tmp); \ 653 | } \ 654 | return (res); \ 655 | } \ 656 | \ 657 | /* ARGSUSED */ \ 658 | attr struct type * \ 659 | name##_RB_NEXT(struct type *elm) \ 660 | { \ 661 | if (RB_RIGHT(elm, field)) { \ 662 | elm = RB_RIGHT(elm, field); \ 663 | while (RB_LEFT(elm, field)) \ 664 | elm = RB_LEFT(elm, field); \ 665 | } else { \ 666 | if (RB_PARENT(elm, field) && \ 667 | (elm == RB_LEFT(RB_PARENT(elm, field), field))) \ 668 | elm = RB_PARENT(elm, field); \ 669 | else { \ 670 | while (RB_PARENT(elm, field) && \ 671 | (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\ 672 | elm = RB_PARENT(elm, field); \ 673 | elm = RB_PARENT(elm, field); \ 674 | } \ 675 | } \ 676 | return (elm); \ 677 | } \ 678 | \ 679 | /* ARGSUSED */ \ 680 | attr struct type * \ 681 | name##_RB_PREV(struct type *elm) \ 682 | { \ 683 | if (RB_LEFT(elm, field)) { \ 684 | elm = RB_LEFT(elm, field); \ 685 | while (RB_RIGHT(elm, field)) \ 686 | elm = RB_RIGHT(elm, field); \ 687 | } else { \ 688 | if (RB_PARENT(elm, field) && \ 689 | (elm == RB_RIGHT(RB_PARENT(elm, field), field))) \ 690 | elm = RB_PARENT(elm, field); \ 691 | else { \ 692 | while (RB_PARENT(elm, field) && \ 693 | (elm == RB_LEFT(RB_PARENT(elm, field), field)))\ 694 | elm = RB_PARENT(elm, field); \ 695 | elm = RB_PARENT(elm, field); \ 696 | } \ 697 | } \ 698 | return (elm); \ 699 | } \ 700 | \ 701 | attr struct type * \ 702 | name##_RB_MINMAX(struct name *head, int val) \ 703 | { \ 704 | struct type *tmp = RB_ROOT(head); \ 705 | struct type *parent = NULL; \ 706 | while (tmp) { \ 707 | parent = tmp; \ 708 | if (val < 0) \ 709 | tmp = RB_LEFT(tmp, field); \ 710 | else \ 711 | tmp = RB_RIGHT(tmp, field); \ 712 | } \ 713 | return (parent); \ 714 | } 715 | 716 | #define RB_NEGINF -1 717 | #define RB_INF 1 718 | 719 | #define RB_INSERT(name, x, y) name##_RB_INSERT(x, y) 720 | #define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y) 721 | #define RB_FIND(name, x, y) name##_RB_FIND(x, y) 722 | #define RB_NFIND(name, x, y) name##_RB_NFIND(x, y) 723 | #define RB_NEXT(name, x, y) name##_RB_NEXT(y) 724 | #define RB_PREV(name, x, y) name##_RB_PREV(y) 725 | #define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF) 726 | #define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF) 727 | 728 | #define RB_FOREACH(x, name, head) \ 729 | for ((x) = RB_MIN(name, head); \ 730 | (x) != NULL; \ 731 | (x) = name##_RB_NEXT(x)) 732 | 733 | #define RB_FOREACH_SAFE(x, name, head, y) \ 734 | for ((x) = RB_MIN(name, head); \ 735 | ((x) != NULL) && ((y) = name##_RB_NEXT(x), 1); \ 736 | (x) = (y)) 737 | 738 | #define RB_FOREACH_REVERSE(x, name, head) \ 739 | for ((x) = RB_MAX(name, head); \ 740 | (x) != NULL; \ 741 | (x) = name##_RB_PREV(x)) 742 | 743 | #define RB_FOREACH_REVERSE_SAFE(x, name, head, y) \ 744 | for ((x) = RB_MAX(name, head); \ 745 | ((x) != NULL) && ((y) = name##_RB_PREV(x), 1); \ 746 | (x) = (y)) 747 | 748 | #endif /* _SYS_TREE_H_ */ 749 | -------------------------------------------------------------------------------- /c_src/uthash.h: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2003-2014, Troy D. Hanson http://troydhanson.github.com/uthash/ 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | 11 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 12 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 13 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A 14 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER 15 | OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 16 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 17 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 18 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 19 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 20 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 21 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 22 | */ 23 | 24 | #ifndef UTHASH_H 25 | #define UTHASH_H 26 | 27 | #include /* memcmp,strlen */ 28 | #include /* ptrdiff_t */ 29 | #include /* exit() */ 30 | 31 | /* These macros use decltype or the earlier __typeof GNU extension. 32 | As decltype is only available in newer compilers (VS2010 or gcc 4.3+ 33 | when compiling c++ source) this code uses whatever method is needed 34 | or, for VS2008 where neither is available, uses casting workarounds. */ 35 | #ifdef _MSC_VER /* MS compiler */ 36 | #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ 37 | #define DECLTYPE(x) (decltype(x)) 38 | #else /* VS2008 or older (or VS2010 in C mode) */ 39 | #define NO_DECLTYPE 40 | #define DECLTYPE(x) 41 | #endif 42 | #else /* GNU, Sun and other compilers */ 43 | #define DECLTYPE(x) (__typeof(x)) 44 | #endif 45 | 46 | #ifdef NO_DECLTYPE 47 | #define DECLTYPE_ASSIGN(dst,src) \ 48 | do { \ 49 | char **_da_dst = (char**)(&(dst)); \ 50 | *_da_dst = (char*)(src); \ 51 | } while(0) 52 | #else 53 | #define DECLTYPE_ASSIGN(dst,src) \ 54 | do { \ 55 | (dst) = DECLTYPE(dst)(src); \ 56 | } while(0) 57 | #endif 58 | 59 | /* a number of the hash function use uint32_t which isn't defined on win32 */ 60 | #ifdef _MSC_VER 61 | typedef unsigned int uint32_t; 62 | typedef unsigned char uint8_t; 63 | #else 64 | #include /* uint32_t */ 65 | #endif 66 | 67 | #define UTHASH_VERSION 1.9.9 68 | 69 | #ifndef uthash_fatal 70 | #define uthash_fatal(msg) exit(-1) /* fatal error (out of memory,etc) */ 71 | #endif 72 | #ifndef uthash_malloc 73 | #define uthash_malloc(sz) malloc(sz) /* malloc fcn */ 74 | #endif 75 | #ifndef uthash_free 76 | #define uthash_free(ptr,sz) free(ptr) /* free fcn */ 77 | #endif 78 | 79 | #ifndef uthash_noexpand_fyi 80 | #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ 81 | #endif 82 | #ifndef uthash_expand_fyi 83 | #define uthash_expand_fyi(tbl) /* can be defined to log expands */ 84 | #endif 85 | 86 | /* initial number of buckets */ 87 | #define HASH_INITIAL_NUM_BUCKETS 32 /* initial number of buckets */ 88 | #define HASH_INITIAL_NUM_BUCKETS_LOG2 5 /* lg2 of initial number of buckets */ 89 | #define HASH_BKT_CAPACITY_THRESH 10 /* expand when bucket count reaches */ 90 | 91 | /* calculate the element whose hash handle address is hhe */ 92 | #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) 93 | 94 | #define HASH_FIND(hh,head,keyptr,keylen,out) \ 95 | do { \ 96 | unsigned _hf_bkt,_hf_hashv; \ 97 | out=NULL; \ 98 | if (head) { \ 99 | HASH_FCN(keyptr,keylen, (head)->hh.tbl->num_buckets, _hf_hashv, _hf_bkt); \ 100 | if (HASH_BLOOM_TEST((head)->hh.tbl, _hf_hashv)) { \ 101 | HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], \ 102 | keyptr,keylen,out); \ 103 | } \ 104 | } \ 105 | } while (0) 106 | 107 | #ifdef HASH_BLOOM 108 | #define HASH_BLOOM_BITLEN (1ULL << HASH_BLOOM) 109 | #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8) + ((HASH_BLOOM_BITLEN%8) ? 1:0) 110 | #define HASH_BLOOM_MAKE(tbl) \ 111 | do { \ 112 | (tbl)->bloom_nbits = HASH_BLOOM; \ 113 | (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ 114 | if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \ 115 | memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \ 116 | (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ 117 | } while (0) 118 | 119 | #define HASH_BLOOM_FREE(tbl) \ 120 | do { \ 121 | uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ 122 | } while (0) 123 | 124 | #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8] |= (1U << ((idx)%8))) 125 | #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8] & (1U << ((idx)%8))) 126 | 127 | #define HASH_BLOOM_ADD(tbl,hashv) \ 128 | HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 129 | 130 | #define HASH_BLOOM_TEST(tbl,hashv) \ 131 | HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1))) 132 | 133 | #else 134 | #define HASH_BLOOM_MAKE(tbl) 135 | #define HASH_BLOOM_FREE(tbl) 136 | #define HASH_BLOOM_ADD(tbl,hashv) 137 | #define HASH_BLOOM_TEST(tbl,hashv) (1) 138 | #define HASH_BLOOM_BYTELEN 0 139 | #endif 140 | 141 | #define HASH_MAKE_TABLE(hh,head) \ 142 | do { \ 143 | (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \ 144 | sizeof(UT_hash_table)); \ 145 | if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \ 146 | memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \ 147 | (head)->hh.tbl->tail = &((head)->hh); \ 148 | (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ 149 | (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ 150 | (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ 151 | (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ 152 | HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 153 | if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \ 154 | memset((head)->hh.tbl->buckets, 0, \ 155 | HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ 156 | HASH_BLOOM_MAKE((head)->hh.tbl); \ 157 | (head)->hh.tbl->signature = HASH_SIGNATURE; \ 158 | } while(0) 159 | 160 | #define HASH_ADD(hh,head,fieldname,keylen_in,add) \ 161 | HASH_ADD_KEYPTR(hh,head,&((add)->fieldname),keylen_in,add) 162 | 163 | #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ 164 | do { \ 165 | replaced=NULL; \ 166 | HASH_FIND(hh,head,&((add)->fieldname),keylen_in,replaced); \ 167 | if (replaced!=NULL) { \ 168 | HASH_DELETE(hh,head,replaced); \ 169 | }; \ 170 | HASH_ADD(hh,head,fieldname,keylen_in,add); \ 171 | } while(0) 172 | 173 | #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ 174 | do { \ 175 | unsigned _ha_bkt; \ 176 | (add)->hh.next = NULL; \ 177 | (add)->hh.key = (char*)(keyptr); \ 178 | (add)->hh.keylen = (unsigned)(keylen_in); \ 179 | if (!(head)) { \ 180 | head = (add); \ 181 | (head)->hh.prev = NULL; \ 182 | HASH_MAKE_TABLE(hh,head); \ 183 | } else { \ 184 | (head)->hh.tbl->tail->next = (add); \ 185 | (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ 186 | (head)->hh.tbl->tail = &((add)->hh); \ 187 | } \ 188 | (head)->hh.tbl->num_items++; \ 189 | (add)->hh.tbl = (head)->hh.tbl; \ 190 | HASH_FCN(keyptr,keylen_in, (head)->hh.tbl->num_buckets, \ 191 | (add)->hh.hashv, _ha_bkt); \ 192 | HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt],&(add)->hh); \ 193 | HASH_BLOOM_ADD((head)->hh.tbl,(add)->hh.hashv); \ 194 | HASH_EMIT_KEY(hh,head,keyptr,keylen_in); \ 195 | HASH_FSCK(hh,head); \ 196 | } while(0) 197 | 198 | #define HASH_TO_BKT( hashv, num_bkts, bkt ) \ 199 | do { \ 200 | bkt = ((hashv) & ((num_bkts) - 1)); \ 201 | } while(0) 202 | 203 | /* delete "delptr" from the hash table. 204 | * "the usual" patch-up process for the app-order doubly-linked-list. 205 | * The use of _hd_hh_del below deserves special explanation. 206 | * These used to be expressed using (delptr) but that led to a bug 207 | * if someone used the same symbol for the head and deletee, like 208 | * HASH_DELETE(hh,users,users); 209 | * We want that to work, but by changing the head (users) below 210 | * we were forfeiting our ability to further refer to the deletee (users) 211 | * in the patch-up process. Solution: use scratch space to 212 | * copy the deletee pointer, then the latter references are via that 213 | * scratch pointer rather than through the repointed (users) symbol. 214 | */ 215 | #define HASH_DELETE(hh,head,delptr) \ 216 | do { \ 217 | unsigned _hd_bkt; \ 218 | struct UT_hash_handle *_hd_hh_del; \ 219 | if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \ 220 | uthash_free((head)->hh.tbl->buckets, \ 221 | (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 222 | HASH_BLOOM_FREE((head)->hh.tbl); \ 223 | uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 224 | head = NULL; \ 225 | } else { \ 226 | _hd_hh_del = &((delptr)->hh); \ 227 | if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \ 228 | (head)->hh.tbl->tail = \ 229 | (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 230 | (head)->hh.tbl->hho); \ 231 | } \ 232 | if ((delptr)->hh.prev) { \ 233 | ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \ 234 | (head)->hh.tbl->hho))->next = (delptr)->hh.next; \ 235 | } else { \ 236 | DECLTYPE_ASSIGN(head,(delptr)->hh.next); \ 237 | } \ 238 | if (_hd_hh_del->next) { \ 239 | ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \ 240 | (head)->hh.tbl->hho))->prev = \ 241 | _hd_hh_del->prev; \ 242 | } \ 243 | HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ 244 | HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ 245 | (head)->hh.tbl->num_items--; \ 246 | } \ 247 | HASH_FSCK(hh,head); \ 248 | } while (0) 249 | 250 | 251 | /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ 252 | #define HASH_FIND_STR(head,findstr,out) \ 253 | HASH_FIND(hh,head,findstr,strlen(findstr),out) 254 | #define HASH_ADD_STR(head,strfield,add) \ 255 | HASH_ADD(hh,head,strfield[0],strlen(add->strfield),add) 256 | #define HASH_REPLACE_STR(head,strfield,add,replaced) \ 257 | HASH_REPLACE(hh,head,strfield,strlen(add->strfield),add,replaced) 258 | #define HASH_FIND_INT(head,findint,out) \ 259 | HASH_FIND(hh,head,findint,sizeof(int),out) 260 | #define HASH_ADD_INT(head,intfield,add) \ 261 | HASH_ADD(hh,head,intfield,sizeof(int),add) 262 | #define HASH_REPLACE_INT(head,intfield,add,replaced) \ 263 | HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) 264 | #define HASH_FIND_PTR(head,findptr,out) \ 265 | HASH_FIND(hh,head,findptr,sizeof(void *),out) 266 | #define HASH_ADD_PTR(head,ptrfield,add) \ 267 | HASH_ADD(hh,head,ptrfield,sizeof(void *),add) 268 | #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ 269 | HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) 270 | #define HASH_DEL(head,delptr) \ 271 | HASH_DELETE(hh,head,delptr) 272 | 273 | /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. 274 | * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. 275 | */ 276 | #ifdef HASH_DEBUG 277 | #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0) 278 | #define HASH_FSCK(hh,head) \ 279 | do { \ 280 | unsigned _bkt_i; \ 281 | unsigned _count, _bkt_count; \ 282 | char *_prev; \ 283 | struct UT_hash_handle *_thh; \ 284 | if (head) { \ 285 | _count = 0; \ 286 | for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \ 287 | _bkt_count = 0; \ 288 | _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ 289 | _prev = NULL; \ 290 | while (_thh) { \ 291 | if (_prev != (char*)(_thh->hh_prev)) { \ 292 | HASH_OOPS("invalid hh_prev %p, actual %p\n", \ 293 | _thh->hh_prev, _prev ); \ 294 | } \ 295 | _bkt_count++; \ 296 | _prev = (char*)(_thh); \ 297 | _thh = _thh->hh_next; \ 298 | } \ 299 | _count += _bkt_count; \ 300 | if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ 301 | HASH_OOPS("invalid bucket count %d, actual %d\n", \ 302 | (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ 303 | } \ 304 | } \ 305 | if (_count != (head)->hh.tbl->num_items) { \ 306 | HASH_OOPS("invalid hh item count %d, actual %d\n", \ 307 | (head)->hh.tbl->num_items, _count ); \ 308 | } \ 309 | /* traverse hh in app order; check next/prev integrity, count */ \ 310 | _count = 0; \ 311 | _prev = NULL; \ 312 | _thh = &(head)->hh; \ 313 | while (_thh) { \ 314 | _count++; \ 315 | if (_prev !=(char*)(_thh->prev)) { \ 316 | HASH_OOPS("invalid prev %p, actual %p\n", \ 317 | _thh->prev, _prev ); \ 318 | } \ 319 | _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ 320 | _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \ 321 | (head)->hh.tbl->hho) : NULL ); \ 322 | } \ 323 | if (_count != (head)->hh.tbl->num_items) { \ 324 | HASH_OOPS("invalid app item count %d, actual %d\n", \ 325 | (head)->hh.tbl->num_items, _count ); \ 326 | } \ 327 | } \ 328 | } while (0) 329 | #else 330 | #define HASH_FSCK(hh,head) 331 | #endif 332 | 333 | /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to 334 | * the descriptor to which this macro is defined for tuning the hash function. 335 | * The app can #include to get the prototype for write(2). */ 336 | #ifdef HASH_EMIT_KEYS 337 | #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ 338 | do { \ 339 | unsigned _klen = fieldlen; \ 340 | write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ 341 | write(HASH_EMIT_KEYS, keyptr, fieldlen); \ 342 | } while (0) 343 | #else 344 | #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) 345 | #endif 346 | 347 | /* default to Jenkin's hash unless overridden e.g. DHASH_FUNCTION=HASH_SAX */ 348 | #ifdef HASH_FUNCTION 349 | #define HASH_FCN HASH_FUNCTION 350 | #else 351 | #define HASH_FCN HASH_JEN 352 | #endif 353 | 354 | /* The Bernstein hash function, used in Perl prior to v5.6 */ 355 | #define HASH_BER(key,keylen,num_bkts,hashv,bkt) \ 356 | do { \ 357 | unsigned _hb_keylen=keylen; \ 358 | char *_hb_key=(char*)(key); \ 359 | (hashv) = 0; \ 360 | while (_hb_keylen--) { (hashv) = ((hashv) * 33) + *_hb_key++; } \ 361 | bkt = (hashv) & (num_bkts-1); \ 362 | } while (0) 363 | 364 | 365 | /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at 366 | * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */ 367 | #define HASH_SAX(key,keylen,num_bkts,hashv,bkt) \ 368 | do { \ 369 | unsigned _sx_i; \ 370 | char *_hs_key=(char*)(key); \ 371 | hashv = 0; \ 372 | for(_sx_i=0; _sx_i < keylen; _sx_i++) \ 373 | hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ 374 | bkt = hashv & (num_bkts-1); \ 375 | } while (0) 376 | 377 | #define HASH_FNV(key,keylen,num_bkts,hashv,bkt) \ 378 | do { \ 379 | unsigned _fn_i; \ 380 | char *_hf_key=(char*)(key); \ 381 | hashv = 2166136261UL; \ 382 | for(_fn_i=0; _fn_i < keylen; _fn_i++) \ 383 | hashv = (hashv * 16777619) ^ _hf_key[_fn_i]; \ 384 | bkt = hashv & (num_bkts-1); \ 385 | } while(0) 386 | 387 | #define HASH_OAT(key,keylen,num_bkts,hashv,bkt) \ 388 | do { \ 389 | unsigned _ho_i; \ 390 | char *_ho_key=(char*)(key); \ 391 | hashv = 0; \ 392 | for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ 393 | hashv += _ho_key[_ho_i]; \ 394 | hashv += (hashv << 10); \ 395 | hashv ^= (hashv >> 6); \ 396 | } \ 397 | hashv += (hashv << 3); \ 398 | hashv ^= (hashv >> 11); \ 399 | hashv += (hashv << 15); \ 400 | bkt = hashv & (num_bkts-1); \ 401 | } while(0) 402 | 403 | #define HASH_JEN_MIX(a,b,c) \ 404 | do { \ 405 | a -= b; a -= c; a ^= ( c >> 13 ); \ 406 | b -= c; b -= a; b ^= ( a << 8 ); \ 407 | c -= a; c -= b; c ^= ( b >> 13 ); \ 408 | a -= b; a -= c; a ^= ( c >> 12 ); \ 409 | b -= c; b -= a; b ^= ( a << 16 ); \ 410 | c -= a; c -= b; c ^= ( b >> 5 ); \ 411 | a -= b; a -= c; a ^= ( c >> 3 ); \ 412 | b -= c; b -= a; b ^= ( a << 10 ); \ 413 | c -= a; c -= b; c ^= ( b >> 15 ); \ 414 | } while (0) 415 | 416 | #define HASH_JEN(key,keylen,num_bkts,hashv,bkt) \ 417 | do { \ 418 | unsigned _hj_i,_hj_j,_hj_k; \ 419 | unsigned char *_hj_key=(unsigned char*)(key); \ 420 | hashv = 0xfeedbeef; \ 421 | _hj_i = _hj_j = 0x9e3779b9; \ 422 | _hj_k = (unsigned)(keylen); \ 423 | while (_hj_k >= 12) { \ 424 | _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ 425 | + ( (unsigned)_hj_key[2] << 16 ) \ 426 | + ( (unsigned)_hj_key[3] << 24 ) ); \ 427 | _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ 428 | + ( (unsigned)_hj_key[6] << 16 ) \ 429 | + ( (unsigned)_hj_key[7] << 24 ) ); \ 430 | hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ 431 | + ( (unsigned)_hj_key[10] << 16 ) \ 432 | + ( (unsigned)_hj_key[11] << 24 ) ); \ 433 | \ 434 | HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 435 | \ 436 | _hj_key += 12; \ 437 | _hj_k -= 12; \ 438 | } \ 439 | hashv += keylen; \ 440 | switch ( _hj_k ) { \ 441 | case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); \ 442 | case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); \ 443 | case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); \ 444 | case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); \ 445 | case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); \ 446 | case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); \ 447 | case 5: _hj_j += _hj_key[4]; \ 448 | case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); \ 449 | case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); \ 450 | case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); \ 451 | case 1: _hj_i += _hj_key[0]; \ 452 | } \ 453 | HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ 454 | bkt = hashv & (num_bkts-1); \ 455 | } while(0) 456 | 457 | /* The Paul Hsieh hash function */ 458 | #undef get16bits 459 | #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ 460 | || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) 461 | #define get16bits(d) (*((const uint16_t *) (d))) 462 | #endif 463 | 464 | #if !defined (get16bits) 465 | #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ 466 | +(uint32_t)(((const uint8_t *)(d))[0]) ) 467 | #endif 468 | #define HASH_SFH(key,keylen,num_bkts,hashv,bkt) \ 469 | do { \ 470 | unsigned char *_sfh_key=(unsigned char*)(key); \ 471 | uint32_t _sfh_tmp, _sfh_len = keylen; \ 472 | \ 473 | int _sfh_rem = _sfh_len & 3; \ 474 | _sfh_len >>= 2; \ 475 | hashv = 0xcafebabe; \ 476 | \ 477 | /* Main loop */ \ 478 | for (;_sfh_len > 0; _sfh_len--) { \ 479 | hashv += get16bits (_sfh_key); \ 480 | _sfh_tmp = (uint32_t)(get16bits (_sfh_key+2)) << 11 ^ hashv; \ 481 | hashv = (hashv << 16) ^ _sfh_tmp; \ 482 | _sfh_key += 2*sizeof (uint16_t); \ 483 | hashv += hashv >> 11; \ 484 | } \ 485 | \ 486 | /* Handle end cases */ \ 487 | switch (_sfh_rem) { \ 488 | case 3: hashv += get16bits (_sfh_key); \ 489 | hashv ^= hashv << 16; \ 490 | hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)] << 18); \ 491 | hashv += hashv >> 11; \ 492 | break; \ 493 | case 2: hashv += get16bits (_sfh_key); \ 494 | hashv ^= hashv << 11; \ 495 | hashv += hashv >> 17; \ 496 | break; \ 497 | case 1: hashv += *_sfh_key; \ 498 | hashv ^= hashv << 10; \ 499 | hashv += hashv >> 1; \ 500 | } \ 501 | \ 502 | /* Force "avalanching" of final 127 bits */ \ 503 | hashv ^= hashv << 3; \ 504 | hashv += hashv >> 5; \ 505 | hashv ^= hashv << 4; \ 506 | hashv += hashv >> 17; \ 507 | hashv ^= hashv << 25; \ 508 | hashv += hashv >> 6; \ 509 | bkt = hashv & (num_bkts-1); \ 510 | } while(0) 511 | 512 | #ifdef HASH_USING_NO_STRICT_ALIASING 513 | /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads. 514 | * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error. 515 | * MurmurHash uses the faster approach only on CPU's where we know it's safe. 516 | * 517 | * Note the preprocessor built-in defines can be emitted using: 518 | * 519 | * gcc -m64 -dM -E - < /dev/null (on gcc) 520 | * cc -## a.c (where a.c is a simple test file) (Sun Studio) 521 | */ 522 | #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86)) 523 | #define MUR_GETBLOCK(p,i) p[i] 524 | #else /* non intel */ 525 | #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 0x3) == 0) 526 | #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 0x3) == 1) 527 | #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 0x3) == 2) 528 | #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 0x3) == 3) 529 | #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL)) 530 | #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__)) 531 | #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24)) 532 | #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16)) 533 | #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8)) 534 | #else /* assume little endian non-intel */ 535 | #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24)) 536 | #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16)) 537 | #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8)) 538 | #endif 539 | #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \ 540 | (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \ 541 | (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \ 542 | MUR_ONE_THREE(p)))) 543 | #endif 544 | #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) 545 | #define MUR_FMIX(_h) \ 546 | do { \ 547 | _h ^= _h >> 16; \ 548 | _h *= 0x85ebca6b; \ 549 | _h ^= _h >> 13; \ 550 | _h *= 0xc2b2ae35l; \ 551 | _h ^= _h >> 16; \ 552 | } while(0) 553 | 554 | #define HASH_MUR(key,keylen,num_bkts,hashv,bkt) \ 555 | do { \ 556 | const uint8_t *_mur_data = (const uint8_t*)(key); \ 557 | const int _mur_nblocks = (keylen) / 4; \ 558 | uint32_t _mur_h1 = 0xf88D5353; \ 559 | uint32_t _mur_c1 = 0xcc9e2d51; \ 560 | uint32_t _mur_c2 = 0x1b873593; \ 561 | uint32_t _mur_k1 = 0; \ 562 | const uint8_t *_mur_tail; \ 563 | const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+_mur_nblocks*4); \ 564 | int _mur_i; \ 565 | for(_mur_i = -_mur_nblocks; _mur_i; _mur_i++) { \ 566 | _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \ 567 | _mur_k1 *= _mur_c1; \ 568 | _mur_k1 = MUR_ROTL32(_mur_k1,15); \ 569 | _mur_k1 *= _mur_c2; \ 570 | \ 571 | _mur_h1 ^= _mur_k1; \ 572 | _mur_h1 = MUR_ROTL32(_mur_h1,13); \ 573 | _mur_h1 = _mur_h1*5+0xe6546b64; \ 574 | } \ 575 | _mur_tail = (const uint8_t*)(_mur_data + _mur_nblocks*4); \ 576 | _mur_k1=0; \ 577 | switch((keylen) & 3) { \ 578 | case 3: _mur_k1 ^= _mur_tail[2] << 16; \ 579 | case 2: _mur_k1 ^= _mur_tail[1] << 8; \ 580 | case 1: _mur_k1 ^= _mur_tail[0]; \ 581 | _mur_k1 *= _mur_c1; \ 582 | _mur_k1 = MUR_ROTL32(_mur_k1,15); \ 583 | _mur_k1 *= _mur_c2; \ 584 | _mur_h1 ^= _mur_k1; \ 585 | } \ 586 | _mur_h1 ^= (keylen); \ 587 | MUR_FMIX(_mur_h1); \ 588 | hashv = _mur_h1; \ 589 | bkt = hashv & (num_bkts-1); \ 590 | } while(0) 591 | #endif /* HASH_USING_NO_STRICT_ALIASING */ 592 | 593 | /* key comparison function; return 0 if keys equal */ 594 | #define HASH_KEYCMP(a,b,len) memcmp(a,b,len) 595 | 596 | /* iterate over items in a known bucket to find desired item */ 597 | #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,out) \ 598 | do { \ 599 | if (head.hh_head) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,head.hh_head)); \ 600 | else out=NULL; \ 601 | while (out) { \ 602 | if ((out)->hh.keylen == keylen_in) { \ 603 | if ((HASH_KEYCMP((out)->hh.key,keyptr,keylen_in)) == 0) break; \ 604 | } \ 605 | if ((out)->hh.hh_next) DECLTYPE_ASSIGN(out,ELMT_FROM_HH(tbl,(out)->hh.hh_next)); \ 606 | else out = NULL; \ 607 | } \ 608 | } while(0) 609 | 610 | /* add an item to a bucket */ 611 | #define HASH_ADD_TO_BKT(head,addhh) \ 612 | do { \ 613 | head.count++; \ 614 | (addhh)->hh_next = head.hh_head; \ 615 | (addhh)->hh_prev = NULL; \ 616 | if (head.hh_head) { (head).hh_head->hh_prev = (addhh); } \ 617 | (head).hh_head=addhh; \ 618 | if (head.count >= ((head.expand_mult+1) * HASH_BKT_CAPACITY_THRESH) \ 619 | && (addhh)->tbl->noexpand != 1) { \ 620 | HASH_EXPAND_BUCKETS((addhh)->tbl); \ 621 | } \ 622 | } while(0) 623 | 624 | /* remove an item from a given bucket */ 625 | #define HASH_DEL_IN_BKT(hh,head,hh_del) \ 626 | (head).count--; \ 627 | if ((head).hh_head == hh_del) { \ 628 | (head).hh_head = hh_del->hh_next; \ 629 | } \ 630 | if (hh_del->hh_prev) { \ 631 | hh_del->hh_prev->hh_next = hh_del->hh_next; \ 632 | } \ 633 | if (hh_del->hh_next) { \ 634 | hh_del->hh_next->hh_prev = hh_del->hh_prev; \ 635 | } 636 | 637 | /* Bucket expansion has the effect of doubling the number of buckets 638 | * and redistributing the items into the new buckets. Ideally the 639 | * items will distribute more or less evenly into the new buckets 640 | * (the extent to which this is true is a measure of the quality of 641 | * the hash function as it applies to the key domain). 642 | * 643 | * With the items distributed into more buckets, the chain length 644 | * (item count) in each bucket is reduced. Thus by expanding buckets 645 | * the hash keeps a bound on the chain length. This bounded chain 646 | * length is the essence of how a hash provides constant time lookup. 647 | * 648 | * The calculation of tbl->ideal_chain_maxlen below deserves some 649 | * explanation. First, keep in mind that we're calculating the ideal 650 | * maximum chain length based on the *new* (doubled) bucket count. 651 | * In fractions this is just n/b (n=number of items,b=new num buckets). 652 | * Since the ideal chain length is an integer, we want to calculate 653 | * ceil(n/b). We don't depend on floating point arithmetic in this 654 | * hash, so to calculate ceil(n/b) with integers we could write 655 | * 656 | * ceil(n/b) = (n/b) + ((n%b)?1:0) 657 | * 658 | * and in fact a previous version of this hash did just that. 659 | * But now we have improved things a bit by recognizing that b is 660 | * always a power of two. We keep its base 2 log handy (call it lb), 661 | * so now we can write this with a bit shift and logical AND: 662 | * 663 | * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) 664 | * 665 | */ 666 | #define HASH_EXPAND_BUCKETS(tbl) \ 667 | do { \ 668 | unsigned _he_bkt; \ 669 | unsigned _he_bkt_i; \ 670 | struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ 671 | UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ 672 | _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ 673 | 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 674 | if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \ 675 | memset(_he_new_buckets, 0, \ 676 | 2 * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ 677 | tbl->ideal_chain_maxlen = \ 678 | (tbl->num_items >> (tbl->log2_num_buckets+1)) + \ 679 | ((tbl->num_items & ((tbl->num_buckets*2)-1)) ? 1 : 0); \ 680 | tbl->nonideal_items = 0; \ 681 | for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \ 682 | { \ 683 | _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \ 684 | while (_he_thh) { \ 685 | _he_hh_nxt = _he_thh->hh_next; \ 686 | HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2, _he_bkt); \ 687 | _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \ 688 | if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \ 689 | tbl->nonideal_items++; \ 690 | _he_newbkt->expand_mult = _he_newbkt->count / \ 691 | tbl->ideal_chain_maxlen; \ 692 | } \ 693 | _he_thh->hh_prev = NULL; \ 694 | _he_thh->hh_next = _he_newbkt->hh_head; \ 695 | if (_he_newbkt->hh_head) _he_newbkt->hh_head->hh_prev = \ 696 | _he_thh; \ 697 | _he_newbkt->hh_head = _he_thh; \ 698 | _he_thh = _he_hh_nxt; \ 699 | } \ 700 | } \ 701 | uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \ 702 | tbl->num_buckets *= 2; \ 703 | tbl->log2_num_buckets++; \ 704 | tbl->buckets = _he_new_buckets; \ 705 | tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \ 706 | (tbl->ineff_expands+1) : 0; \ 707 | if (tbl->ineff_expands > 1) { \ 708 | tbl->noexpand=1; \ 709 | uthash_noexpand_fyi(tbl); \ 710 | } \ 711 | uthash_expand_fyi(tbl); \ 712 | } while(0) 713 | 714 | 715 | /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ 716 | /* Note that HASH_SORT assumes the hash handle name to be hh. 717 | * HASH_SRT was added to allow the hash handle name to be passed in. */ 718 | #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) 719 | #define HASH_SRT(hh,head,cmpfcn) \ 720 | do { \ 721 | unsigned _hs_i; \ 722 | unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ 723 | struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ 724 | if (head) { \ 725 | _hs_insize = 1; \ 726 | _hs_looping = 1; \ 727 | _hs_list = &((head)->hh); \ 728 | while (_hs_looping) { \ 729 | _hs_p = _hs_list; \ 730 | _hs_list = NULL; \ 731 | _hs_tail = NULL; \ 732 | _hs_nmerges = 0; \ 733 | while (_hs_p) { \ 734 | _hs_nmerges++; \ 735 | _hs_q = _hs_p; \ 736 | _hs_psize = 0; \ 737 | for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \ 738 | _hs_psize++; \ 739 | _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 740 | ((void*)((char*)(_hs_q->next) + \ 741 | (head)->hh.tbl->hho)) : NULL); \ 742 | if (! (_hs_q) ) break; \ 743 | } \ 744 | _hs_qsize = _hs_insize; \ 745 | while ((_hs_psize > 0) || ((_hs_qsize > 0) && _hs_q )) { \ 746 | if (_hs_psize == 0) { \ 747 | _hs_e = _hs_q; \ 748 | _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 749 | ((void*)((char*)(_hs_q->next) + \ 750 | (head)->hh.tbl->hho)) : NULL); \ 751 | _hs_qsize--; \ 752 | } else if ( (_hs_qsize == 0) || !(_hs_q) ) { \ 753 | _hs_e = _hs_p; \ 754 | if (_hs_p){ \ 755 | _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 756 | ((void*)((char*)(_hs_p->next) + \ 757 | (head)->hh.tbl->hho)) : NULL); \ 758 | } \ 759 | _hs_psize--; \ 760 | } else if (( \ 761 | cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \ 762 | DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \ 763 | ) <= 0) { \ 764 | _hs_e = _hs_p; \ 765 | if (_hs_p){ \ 766 | _hs_p = (UT_hash_handle*)((_hs_p->next) ? \ 767 | ((void*)((char*)(_hs_p->next) + \ 768 | (head)->hh.tbl->hho)) : NULL); \ 769 | } \ 770 | _hs_psize--; \ 771 | } else { \ 772 | _hs_e = _hs_q; \ 773 | _hs_q = (UT_hash_handle*)((_hs_q->next) ? \ 774 | ((void*)((char*)(_hs_q->next) + \ 775 | (head)->hh.tbl->hho)) : NULL); \ 776 | _hs_qsize--; \ 777 | } \ 778 | if ( _hs_tail ) { \ 779 | _hs_tail->next = ((_hs_e) ? \ 780 | ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \ 781 | } else { \ 782 | _hs_list = _hs_e; \ 783 | } \ 784 | if (_hs_e) { \ 785 | _hs_e->prev = ((_hs_tail) ? \ 786 | ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \ 787 | } \ 788 | _hs_tail = _hs_e; \ 789 | } \ 790 | _hs_p = _hs_q; \ 791 | } \ 792 | if (_hs_tail){ \ 793 | _hs_tail->next = NULL; \ 794 | } \ 795 | if ( _hs_nmerges <= 1 ) { \ 796 | _hs_looping=0; \ 797 | (head)->hh.tbl->tail = _hs_tail; \ 798 | DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ 799 | } \ 800 | _hs_insize *= 2; \ 801 | } \ 802 | HASH_FSCK(hh,head); \ 803 | } \ 804 | } while (0) 805 | 806 | /* This function selects items from one hash into another hash. 807 | * The end result is that the selected items have dual presence 808 | * in both hashes. There is no copy of the items made; rather 809 | * they are added into the new hash through a secondary hash 810 | * hash handle that must be present in the structure. */ 811 | #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ 812 | do { \ 813 | unsigned _src_bkt, _dst_bkt; \ 814 | void *_last_elt=NULL, *_elt; \ 815 | UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ 816 | ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ 817 | if (src) { \ 818 | for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ 819 | for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ 820 | _src_hh; \ 821 | _src_hh = _src_hh->hh_next) { \ 822 | _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ 823 | if (cond(_elt)) { \ 824 | _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \ 825 | _dst_hh->key = _src_hh->key; \ 826 | _dst_hh->keylen = _src_hh->keylen; \ 827 | _dst_hh->hashv = _src_hh->hashv; \ 828 | _dst_hh->prev = _last_elt; \ 829 | _dst_hh->next = NULL; \ 830 | if (_last_elt_hh) { _last_elt_hh->next = _elt; } \ 831 | if (!dst) { \ 832 | DECLTYPE_ASSIGN(dst,_elt); \ 833 | HASH_MAKE_TABLE(hh_dst,dst); \ 834 | } else { \ 835 | _dst_hh->tbl = (dst)->hh_dst.tbl; \ 836 | } \ 837 | HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ 838 | HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \ 839 | (dst)->hh_dst.tbl->num_items++; \ 840 | _last_elt = _elt; \ 841 | _last_elt_hh = _dst_hh; \ 842 | } \ 843 | } \ 844 | } \ 845 | } \ 846 | HASH_FSCK(hh_dst,dst); \ 847 | } while (0) 848 | 849 | #define HASH_CLEAR(hh,head) \ 850 | do { \ 851 | if (head) { \ 852 | uthash_free((head)->hh.tbl->buckets, \ 853 | (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ 854 | HASH_BLOOM_FREE((head)->hh.tbl); \ 855 | uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ 856 | (head)=NULL; \ 857 | } \ 858 | } while(0) 859 | 860 | #define HASH_OVERHEAD(hh,head) \ 861 | (size_t)((((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ 862 | ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ 863 | (sizeof(UT_hash_table)) + \ 864 | (HASH_BLOOM_BYTELEN))) 865 | 866 | #ifdef NO_DECLTYPE 867 | #define HASH_ITER(hh,head,el,tmp) \ 868 | for((el)=(head), (*(char**)(&(tmp)))=(char*)((head)?(head)->hh.next:NULL); \ 869 | el; (el)=(tmp),(*(char**)(&(tmp)))=(char*)((tmp)?(tmp)->hh.next:NULL)) 870 | #else 871 | #define HASH_ITER(hh,head,el,tmp) \ 872 | for((el)=(head),(tmp)=DECLTYPE(el)((head)?(head)->hh.next:NULL); \ 873 | el; (el)=(tmp),(tmp)=DECLTYPE(el)((tmp)?(tmp)->hh.next:NULL)) 874 | #endif 875 | 876 | /* obtain a count of items in the hash */ 877 | #define HASH_COUNT(head) HASH_CNT(hh,head) 878 | #define HASH_CNT(hh,head) ((head)?((head)->hh.tbl->num_items):0) 879 | 880 | typedef struct UT_hash_bucket { 881 | struct UT_hash_handle *hh_head; 882 | unsigned count; 883 | 884 | /* expand_mult is normally set to 0. In this situation, the max chain length 885 | * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If 886 | * the bucket's chain exceeds this length, bucket expansion is triggered). 887 | * However, setting expand_mult to a non-zero value delays bucket expansion 888 | * (that would be triggered by additions to this particular bucket) 889 | * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. 890 | * (The multiplier is simply expand_mult+1). The whole idea of this 891 | * multiplier is to reduce bucket expansions, since they are expensive, in 892 | * situations where we know that a particular bucket tends to be overused. 893 | * It is better to let its chain length grow to a longer yet-still-bounded 894 | * value, than to do an O(n) bucket expansion too often. 895 | */ 896 | unsigned expand_mult; 897 | 898 | } UT_hash_bucket; 899 | 900 | /* random signature used only to find hash tables in external analysis */ 901 | #define HASH_SIGNATURE 0xa0111fe1 902 | #define HASH_BLOOM_SIGNATURE 0xb12220f2 903 | 904 | typedef struct UT_hash_table { 905 | UT_hash_bucket *buckets; 906 | unsigned num_buckets, log2_num_buckets; 907 | unsigned num_items; 908 | struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ 909 | ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ 910 | 911 | /* in an ideal situation (all buckets used equally), no bucket would have 912 | * more than ceil(#items/#buckets) items. that's the ideal chain length. */ 913 | unsigned ideal_chain_maxlen; 914 | 915 | /* nonideal_items is the number of items in the hash whose chain position 916 | * exceeds the ideal chain maxlen. these items pay the penalty for an uneven 917 | * hash distribution; reaching them in a chain traversal takes >ideal steps */ 918 | unsigned nonideal_items; 919 | 920 | /* ineffective expands occur when a bucket doubling was performed, but 921 | * afterward, more than half the items in the hash had nonideal chain 922 | * positions. If this happens on two consecutive expansions we inhibit any 923 | * further expansion, as it's not helping; this happens when the hash 924 | * function isn't a good fit for the key domain. When expansion is inhibited 925 | * the hash will still work, albeit no longer in constant time. */ 926 | unsigned ineff_expands, noexpand; 927 | 928 | uint32_t signature; /* used only to find hash tables in external analysis */ 929 | #ifdef HASH_BLOOM 930 | uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ 931 | uint8_t *bloom_bv; 932 | char bloom_nbits; 933 | #endif 934 | 935 | } UT_hash_table; 936 | 937 | typedef struct UT_hash_handle { 938 | struct UT_hash_table *tbl; 939 | void *prev; /* prev element in app order */ 940 | void *next; /* next element in app order */ 941 | struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ 942 | struct UT_hash_handle *hh_next; /* next hh in bucket order */ 943 | void *key; /* ptr to enclosing struct's key */ 944 | unsigned keylen; /* enclosing struct's key len */ 945 | unsigned hashv; /* result of hash-fcn(key) */ 946 | } UT_hash_handle; 947 | 948 | #endif /* UTHASH_H */ 949 | -------------------------------------------------------------------------------- /rebar.config: -------------------------------------------------------------------------------- 1 | {port_env, [ 2 | {"CFLAGS", "$CFLAGS -O2"} 3 | ]}. 4 | {port_specs, [ 5 | {"priv/e2qc_nif.so", ["c_src/*.c"]} 6 | ]}. 7 | -------------------------------------------------------------------------------- /src/e2qc.app.src: -------------------------------------------------------------------------------- 1 | {application, e2qc, 2 | [ 3 | {description, "2q cache"}, 4 | {vsn, "1.0"}, 5 | {registered, []}, 6 | {included_applications, [ 7 | ]}, 8 | {applications, [ 9 | kernel, 10 | stdlib 11 | ]}, 12 | {start_phases, []}, 13 | {env, []} 14 | ]}. 15 | -------------------------------------------------------------------------------- /src/e2qc.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% e2qc erlang cache 3 | %% 4 | %% Copyright 2014 Alex Wilson , The University of Queensland 5 | %% All rights reserved. 6 | %% 7 | %% Redistribution and use in source and binary forms, with or without 8 | %% modification, are permitted provided that the following conditions 9 | %% are met: 10 | %% 1. Redistributions of source code must retain the above copyright 11 | %% notice, this list of conditions and the following disclaimer. 12 | %% 2. Redistributions in binary form must reproduce the above copyright 13 | %% notice, this list of conditions and the following disclaimer in the 14 | %% documentation and/or other materials provided with the distribution. 15 | %% 16 | %% THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 | %% IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | %% OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 | %% IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | %% INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 | %% NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | %% DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | %% THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | %% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 | %% THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | %% 27 | 28 | %% @author Alex Wilson 29 | %% @doc e2qc public API 30 | -module(e2qc). 31 | 32 | -ifdef(TEST). 33 | -include_lib("eunit/include/eunit.hrl"). 34 | -endif. 35 | 36 | -export([cache/3, cache/4, setup/2, stats/1, evict/2, teardown/1]). 37 | 38 | -define(DEFAULT_MAX_SIZE, 4*1024*1024). 39 | -define(DEFAULT_Q1_MIN_SIZE, round(0.3 * ?DEFAULT_MAX_SIZE)). 40 | 41 | %% @doc Cache an operation using the given key. 42 | %% 43 | %% ValFun is a zero-argument fun that computes some expensive operation 44 | %% and returns any term. The call to cache/3 will return that term, either 45 | %% by running the fun, or consulting the named Cache for Key. 46 | -spec cache(Cache :: atom(), Key :: term(), ValFun :: function()) -> term(). 47 | cache(Cache, Key, ValFun) -> 48 | KeyBin = key_to_bin(Key), 49 | case e2qc_nif:get(Cache, KeyBin) of 50 | B when is_binary(B) -> bin_to_val(B); 51 | notfound -> 52 | Val = ValFun(), 53 | ValBin = val_to_bin(Val), 54 | ok = e2qc_nif:put(Cache, KeyBin, ValBin, 55 | ?DEFAULT_MAX_SIZE, ?DEFAULT_Q1_MIN_SIZE), 56 | Val 57 | end. 58 | 59 | %% @doc Cache an operation using the given key with a timeout. 60 | %% 61 | %% As for e2qc:cache/3, but the Lifetime argument contains a number of seconds for 62 | %% which this cache entry should remain valid. After Lifetime seconds have elapsed, 63 | %% the entry is automatically evicted and will be recalculated if a miss occurs. 64 | -spec cache(Cache :: atom(), Key :: term(), Lifetime :: integer(), ValFun :: function()) -> term(). 65 | cache(Cache, Key, Lifetime, ValFun) -> 66 | KeyBin = key_to_bin(Key), 67 | case e2qc_nif:get(Cache, KeyBin) of 68 | B when is_binary(B) -> bin_to_val(B); 69 | notfound -> 70 | Val = ValFun(), 71 | ValBin = val_to_bin(Val), 72 | ok = e2qc_nif:put(Cache, KeyBin, ValBin, 73 | ?DEFAULT_MAX_SIZE, ?DEFAULT_Q1_MIN_SIZE, Lifetime), 74 | Val 75 | end. 76 | 77 | %% @doc Remove an entry from a cache. 78 | -spec evict(Cache :: atom(), Key :: term()) -> ok | notfound. 79 | evict(Cache, Key) -> 80 | KeyBin = key_to_bin(Key), 81 | e2qc_nif:destroy(Cache, KeyBin). 82 | 83 | %% @doc Tear-down a cache, destroying all entries and settings. 84 | -spec teardown(Cache :: atom()) -> ok | notfound. 85 | teardown(Cache) -> 86 | e2qc_nif:destroy(Cache). 87 | 88 | -type max_size_setting() :: {size | max_size, Bytes :: integer()}. 89 | -type q1_size_setting() :: {ratio, Ratio :: float()} | {min_q1_size, Bytes :: integer()}. 90 | -type setting() :: max_size_setting() | q1_size_setting(). 91 | 92 | %% @doc Configure a cache with given settings. 93 | -spec setup(Cache :: atom(), Config :: [setting()]) -> ok. 94 | setup(Cache, Config) -> 95 | {MaxSize, MinQ1Size} = process_settings(Config), 96 | case e2qc_nif:create(Cache, MaxSize, MinQ1Size) of 97 | already_exists -> error(already_exists); 98 | ok -> ok 99 | end. 100 | 101 | -type cache_stat() :: {hits | misses | q1size | q2size | incrq_size | wakeups | dud_wakeups, Value :: integer()}. 102 | 103 | %% @doc Gather some basic statistics about a cache. 104 | -spec stats(Cache :: atom()) -> [cache_stat()]. 105 | stats(Cache) -> 106 | case e2qc_nif:stats(Cache) of 107 | notfound -> [{hits, 0}, {misses, 0}, {q1size, 0}, {q2size, 0}, {incrq_size, 0}, {wakeups, 0}, {dud_wakeups, 0}]; 108 | {Hits, Misses, Q1Size, Q2Size, IncrQSize, Wakeups, DudWakeups} -> 109 | [{hits, Hits}, {misses, Misses}, {q1size, Q1Size}, {q2size, Q2Size}, {incrq_size, IncrQSize}, {wakeups, Wakeups}, {dud_wakeups, DudWakeups}] 110 | end. 111 | 112 | %% @private 113 | -spec process_settings([setting()]) -> {MaxSize :: integer(), MinQ1Size :: integer()}. 114 | process_settings(Config) -> 115 | MaxSize = proplists:get_value(max_size, Config, 116 | proplists:get_value(size, Config, ?DEFAULT_MAX_SIZE)), 117 | MinQ1Size = case proplists:get_value(min_q1_size, Config) of 118 | undefined -> 119 | R = proplists:get_value(ratio, Config, 0.3), 120 | round(R * MaxSize); 121 | V when is_integer(V) -> V; 122 | V when is_float(V) -> round(V) 123 | end, 124 | {MaxSize, MinQ1Size}. 125 | 126 | %% @private 127 | -spec key_to_bin(term()) -> binary(). 128 | key_to_bin(Key) when is_binary(Key) -> 129 | Key; 130 | key_to_bin(Key) when is_integer(Key) and (Key >= 0) -> 131 | binary:encode_unsigned(Key); 132 | key_to_bin(Key) -> 133 | term_to_binary(Key). 134 | 135 | %% @private 136 | -spec val_to_bin(term()) -> binary(). 137 | val_to_bin(V) when is_binary(V) -> 138 | <<1, V/binary>>; 139 | val_to_bin(V) -> 140 | VBin = term_to_binary(V), 141 | <<2, VBin/binary>>. 142 | 143 | %% @private 144 | -spec bin_to_val(binary()) -> term(). 145 | bin_to_val(<<1, V/binary>>) -> 146 | V; 147 | bin_to_val(<<2, V/binary>>) -> 148 | binary_to_term(V). 149 | 150 | -ifdef(TEST). 151 | 152 | settings_test() -> 153 | ?assertMatch({100, A} when is_integer(A), process_settings([{size, 100}])), 154 | ?assertMatch({100, A} when is_integer(A), process_settings([{max_size, 100}])), 155 | ?assertMatch({100, 30}, process_settings([{size, 100}, {ratio, 0.3}])), 156 | ?assertMatch({100, 41}, process_settings([{size, 100}, {min_q1_size, 41}])). 157 | 158 | cache_miss_test() -> 159 | ?assertMatch(notfound, e2qc_nif:get(cache_miss, key_to_bin(1500))), 160 | ?assertMatch({foo, bar}, cache(cache_miss, 1500, fun() -> {foo, bar} end)), 161 | ?assertMatch(B when is_binary(B), e2qc_nif:get(cache_miss, key_to_bin(1500))). 162 | 163 | cache_hit_test() -> 164 | ?assertMatch({foo, bar}, cache(cache_hit, 1500, fun() -> {foo, bar} end)), 165 | ?assertMatch({foo, bar}, cache(cache_hit, 1500, fun() -> {notfoo, notbar} end)). 166 | 167 | factorial(1) -> 1; 168 | factorial(N) when is_integer(N) and (N > 1) -> N * factorial(N-1). 169 | 170 | slow_func(K) -> 171 | [math:sqrt(factorial(K)) || _ <- lists:seq(1,200)]. 172 | cache_slow_func(K) -> 173 | e2qc:cache(slow_func, K, fun() -> 174 | slow_func(K) 175 | end). 176 | mean(List) -> lists:sum(List) / length(List). 177 | dev(List) -> U = mean(List), mean([(N-U)*(N-U) || N <- List]). 178 | bench(Nums) -> 179 | e2qc_nif:destroy(slow_func), 180 | T1 = os:timestamp(), 181 | [slow_func(N) || N <- Nums], 182 | T2 = os:timestamp(), 183 | [cache_slow_func(N) || N <- Nums], 184 | T3 = os:timestamp(), 185 | {timer:now_diff(T2, T1) / length(Nums), 186 | timer:now_diff(T3, T2) / length(Nums)}. 187 | bench_t_tester() -> 188 | % generate 100 +ve ints to be keys that are vaguely normally distributed 189 | % (we just add some uniform random numbers together, it will have enough 190 | % of a hump for our purposes, see central limit theorem) 191 | Nums = [100 + round(4*lists:sum( 192 | [crypto:rand_uniform(1,1000) / 1000 || _ <- lists:seq(1,15)])) 193 | || _ <- lists:seq(1, 70)], 194 | TimesZip = [bench(Nums) || _ <- lists:seq(1,50)], 195 | {NoCacheTimes, CacheTimes} = lists:unzip(TimesZip), 196 | 197 | N = length(CacheTimes), 198 | S1 = dev(CacheTimes), 199 | S2 = dev(NoCacheTimes), 200 | U1 = mean(CacheTimes), 201 | U2 = mean(NoCacheTimes), 202 | 203 | % compute t-value 204 | T = (U1 - U2) / math:sqrt(S1/N + S2/N), 205 | DF = math:pow(S1/N + S2/N, 2) / ((S1/N)*(S1/N) / (N-1) + (S2/N)*(S2/N) / (N-1)), 206 | io:format("N = ~p, S1 = ~p, S2 = ~p, U1 = ~p, U2 = ~p, T = ~p, DF = ~p", 207 | [N, S1, S2, U1, U2, T, DF]), 208 | 209 | ?assertMatch(Df when (Df >= 40), DF), 210 | ?assertMatch(Tt when (Tt > 3.307), abs(T)), % t-value threshold for 99.9% confidence 211 | ?assert(T < 0). 212 | 213 | is_fast_test_() -> 214 | {timeout, 60, 215 | fun() -> bench_t_tester() end}. 216 | 217 | -endif. 218 | -------------------------------------------------------------------------------- /src/e2qc_nif.erl: -------------------------------------------------------------------------------- 1 | %% 2 | %% e2qc erlang cache 3 | %% 4 | %% Copyright 2014 Alex Wilson , The University of Queensland 5 | %% All rights reserved. 6 | %% 7 | %% Redistribution and use in source and binary forms, with or without 8 | %% modification, are permitted provided that the following conditions 9 | %% are met: 10 | %% 1. Redistributions of source code must retain the above copyright 11 | %% notice, this list of conditions and the following disclaimer. 12 | %% 2. Redistributions in binary form must reproduce the above copyright 13 | %% notice, this list of conditions and the following disclaimer in the 14 | %% documentation and/or other materials provided with the distribution. 15 | %% 16 | %% THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 | %% IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 | %% OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 | %% IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 | %% INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 | %% NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 | %% DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 | %% THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | %% (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 | %% THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | %% 27 | 28 | -module(e2qc_nif). 29 | 30 | -ifdef(TEST). 31 | -include_lib("eunit/include/eunit.hrl"). 32 | -endif. 33 | 34 | -export([get/2, put/3, put/5, put/6, create/3, destroy/1, destroy/2, stats/1]). 35 | 36 | -on_load(init/0). 37 | 38 | %% @private 39 | init() -> 40 | SoName = case code:priv_dir(e2qc) of 41 | {error, bad_name} -> 42 | case filelib:is_dir(filename:join(["..", priv])) of 43 | true -> 44 | filename:join(["..", priv, ?MODULE]); 45 | false -> 46 | filename:join([priv, ?MODULE]) 47 | end; 48 | Dir -> 49 | filename:join(Dir, ?MODULE) 50 | end, 51 | ok = erlang:load_nif(SoName, 0). 52 | 53 | %% @private 54 | -spec get(Cache :: atom(), Key :: binary()) -> notfound | binary(). 55 | get(_Cache, _Key) -> 56 | erlang:nif_error(badnif). 57 | 58 | %% @private 59 | %% for tests only 60 | put(Cache, Key, Val) -> 61 | put(Cache, Key, Val, 0, 0). 62 | 63 | %% @private 64 | -spec put(Cache :: atom(), Key :: binary(), Val :: binary(), MaxSize :: integer(), MinQ1Size :: integer()) -> ok. 65 | put(_Cache, _Key, _Val, _MaxSize, _MinQ1Size) -> 66 | erlang:nif_error(badnif). 67 | 68 | %% @private 69 | -spec put(Cache :: atom(), Key :: binary(), Val :: binary(), MaxSize :: integer(), MinQ1Size :: integer(), Lifetime :: integer()) -> ok. 70 | put(_Cache, _Key, _Val, _MaxSize, _MinQ1Size, _Lifetime) -> 71 | erlang:nif_error(badnif). 72 | 73 | %% @private 74 | -spec create(Cache :: atom(), MaxSize :: integer(), MinQ1Size :: integer()) -> already_exists | ok. 75 | create(_Cache, _MaxSize, _MinQ1Size) -> 76 | erlang:nif_error(badnif). 77 | 78 | %% @private 79 | -spec destroy(Cache :: atom()) -> notfound | ok. 80 | destroy(_Cache) -> 81 | erlang:nif_error(badnif). 82 | 83 | %% @private 84 | -spec destroy(Cache :: atom(), Key :: binary()) -> notfound | ok. 85 | destroy(_Cache, _Key) -> 86 | erlang:nif_error(badnif). 87 | 88 | %% @private 89 | -spec stats(Cache :: atom()) -> notfound | {Hits :: integer(), Misses :: integer(), Q1Size :: integer(), Q2Size :: integer(), IncrQSize :: integer()}. 90 | stats(_Cache) -> 91 | erlang:nif_error(badnif). 92 | 93 | -ifdef(TEST). 94 | 95 | get_cache_notfound_test() -> 96 | ?assertMatch(notfound, get(invalid_cache, <<"foo">>)). 97 | get_key_notfound_test() -> 98 | ok = create(get_key_test, 1024, 512), 99 | ?assertMatch(notfound, get(get_key_test, <<"foo">>)). 100 | put_implicit_create_test() -> 101 | ?assertMatch(notfound, stats(put_implicit_create)), 102 | ?assertMatch(ok, put(put_implicit_create, <<"foo">>, <<"bar">>)), 103 | ?assertMatch(T when is_tuple(T), stats(put_implicit_create)). 104 | put_then_get_test() -> 105 | ?assertMatch(ok, put(put_then_get, <<"foo">>, <<"bar">>, 1024, 512)), 106 | ?assertMatch(<<"bar">>, get(put_then_get, <<"foo">>)). 107 | 108 | put_evict_q1_test() -> 109 | ok = create(put_evict_q1, 20, 10), 110 | [ok = put(put_evict_q1, <>, <>) || N <- lists:seq(1,10)], 111 | % these gets will promote 1 and 10 to q2, so <<2>> will be first 112 | % to be evicted 113 | ?assertMatch(<<1>>, get(put_evict_q1, <<1>>)), 114 | ?assertMatch(<<10>>, get(put_evict_q1, <<10>>)), 115 | ok = put(put_evict_q1, <<11>>, <<11>>), 116 | % 1s should always be enough for the bg_thread to wake up 117 | % (usually happens within 1ms or so) 118 | timer:sleep(2000), 119 | ok = put(put_evict_q1, <<12>>, <<12>>), 120 | timer:sleep(2000), 121 | ?assertMatch(notfound, get(put_evict_q1, <<2>>)), 122 | ?assertMatch(<<1>>, get(put_evict_q1, <<1>>)), 123 | ?assertMatch(<<10>>, get(put_evict_q1, <<10>>)), 124 | ?assertMatch(<<11>>, get(put_evict_q1, <<11>>)). 125 | put_evict_q2_test() -> 126 | ok = create(put_evict_q2, 20, 10), 127 | % fill q1 with entries 128 | [ok = put(put_evict_q2, <>, <>) || N <- lists:seq(1,10)], 129 | % promote them all to q2 130 | [<> = get(put_evict_q2, <>) || N <- lists:seq(1,10)], 131 | % now add an extra to q1 (q1 will be < min_q1_size) 132 | ok = put(put_evict_q2, <<11>>, <<11>>), 133 | timer:sleep(2000), 134 | ok = put(put_evict_q2, <<12>>, <<12>>), 135 | timer:sleep(2000), 136 | % we should have evicted the least recently used thing on q2, 137 | % which will be <<1>> 138 | ?assertMatch(notfound, get(put_evict_q2, <<1>>)), 139 | ?assertMatch(<<3>>, get(put_evict_q2, <<3>>)), 140 | ?assertMatch(<<11>>, get(put_evict_q2, <<11>>)). 141 | 142 | expand_test() -> 143 | ok = create(expand, 20, 10), 144 | [ok = put(expand, <>, <>) || N <- lists:seq(1,10)], 145 | % these gets will promote 1 and 10 to q2, so <<2>> will be first 146 | % to be evicted 147 | ?assertMatch(<<1>>, get(expand, <<1>>)), 148 | ?assertMatch(<<10>>, get(expand, <<10>>)), 149 | ok = create(expand, 50, 20), 150 | ok = put(expand, <<11>>, <<11>>), 151 | % 1s should always be enough for the bg_thread to wake up 152 | % (usually happens within 1ms or so) 153 | timer:sleep(2000), 154 | ok = put(expand, <<12>>, <<12>>), 155 | timer:sleep(2000), 156 | ?assertMatch(<<2>>, get(expand, <<2>>)), 157 | ?assertMatch(<<1>>, get(expand, <<1>>)), 158 | ?assertMatch(<<10>>, get(expand, <<10>>)), 159 | ?assertMatch(<<11>>, get(expand, <<11>>)). 160 | 161 | destroy_key_test() -> 162 | ok = create(destroy_key, 20, 10), 163 | ?assertMatch(notfound, destroy(destroy_key, <<"foo">>)), 164 | ok = put(destroy_key, <<"foo">>, <<"bar">>), 165 | ?assertMatch(<<"bar">>, get(destroy_key, <<"foo">>)), 166 | ?assertMatch(ok, destroy(destroy_key, <<"foo">>)), 167 | ?assertMatch(notfound, get(destroy_key, <<"foo">>)). 168 | 169 | put_overwrite_test() -> 170 | ok = create(put_overwrite, 20, 10), 171 | ok = put(put_overwrite, <<"foo">>, <<"bar">>), 172 | ?assertMatch(<<"bar">>, get(put_overwrite, <<"foo">>)), 173 | ok = put(put_overwrite, <<"foo">>, <<"foobar">>), 174 | ?assertMatch(<<"foobar">>, get(put_overwrite, <<"foo">>)). 175 | 176 | destroy_cache_test() -> 177 | ok = create(destroy_cache, 20, 10), 178 | ok = put(destroy_cache, <<"foo">>, <<"bar">>), 179 | ?assertMatch(ok, destroy(destroy_cache)), 180 | ?assertMatch(notfound, get(destroy_cache, <<"foo">>)), 181 | ?assertMatch(ok, create(destroy_cache, 20, 10)). 182 | 183 | timed_expiry_test() -> 184 | ok = create(timed_expiry, 20, 10), 185 | ok = put(timed_expiry, <<"foo">>, <<"bar">>, 20, 10, 1), 186 | ?assertMatch(<<"bar">>, get(timed_expiry, <<"foo">>)), 187 | timer:sleep(2000), 188 | ?assertMatch(notfound, get(timed_expiry, <<"foo">>)), 189 | ok = put(timed_expiry, <<1>>, <<1>>), 190 | timer:sleep(2000), 191 | ?assertMatch({_, _, Q1, Q2, _, _, _} when (Q1 >= 2) and (Q2 < 1), stats(timed_expiry)). 192 | 193 | -endif. 194 | --------------------------------------------------------------------------------