├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── slab.c ├── slab.h └── test.c /.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.out -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Lucas Sa 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CC=gcc 2 | CFLAGS=-c -O3 3 | 4 | all: tests 5 | 6 | debug: CC += -Wall -DDEBUG -g 7 | debug: tests 8 | 9 | tests: slab.o test.o 10 | $(CC) $? -o tests 11 | 12 | slab.o: slab.c slab.h 13 | $(CC) $(CFLAGS) $< -o $@ 14 | 15 | test.o: test.c slab.h 16 | $(CC) $(CFLAGS) $< -o $@ 17 | 18 | clean: 19 | rm -rf *.o slab tests -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Slab Allocator 2 | ===== 3 | 4 | This is an implementation of the Slab Allocator as described in ["The Slab Allocator: An Object-Caching Kernel Memory Allocator"](https://www.usenix.org/legacy/publications/library/proceedings/bos94/full_papers/bonwick.a) by Jeff Bonwick. -------------------------------------------------------------------------------- /slab.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include "slab.h" 5 | 6 | /* Creates a cache of objects. 7 | 8 | @name used for reference 9 | @size size of the objects 10 | @align align boundary 11 | @constructor object constructor 12 | @destructor object destructor 13 | 14 | Returns a cache pointer or NULL if no memory is available. 15 | */ 16 | kmem_cache_t 17 | kmem_cache_create(char *name, size_t size, int align, 18 | void (*constructor)(void *, size_t), 19 | void (*destructor)(void *, size_t)) { 20 | 21 | kmem_cache_t cp = malloc(sizeof(struct kmem_cache)); 22 | 23 | if (cp != NULL) { 24 | if (align == 0) align = SLAB_DEFAULT_ALIGN; 25 | 26 | cp->name = name; 27 | cp->size = size; 28 | cp->effsize = align * ((size-1)/align + 1); 29 | cp->constructor = constructor; 30 | cp->destructor = destructor; 31 | cp->slabs = NULL; 32 | cp->slabs_back = NULL; 33 | 34 | // if this is for small object 35 | if (cp->size <= SLAB_SMALL_OBJ_SZ) { 36 | cp->slab_maxbuf = (PAGE_SZ - sizeof(struct kmem_slab)) / cp->effsize; 37 | } 38 | else { 39 | // TODO: compute number of objects programmatically 40 | cp->slab_maxbuf = 8; 41 | 42 | // create hash table... 43 | // hcreate(cp->slab_maxbuf * 100); 44 | } 45 | } 46 | 47 | return cp; 48 | } 49 | 50 | /* Grow a specified cache. Specifically adds one slab to it. 51 | 52 | @cp cache pointer 53 | */ 54 | void 55 | kmem_cache_grow(kmem_cache_t cp) { 56 | void *mem; 57 | kmem_slab_t slab; 58 | void *p, *lastbuf; 59 | int i; 60 | kmem_bufctl_t bufctl; 61 | 62 | // if this is a small object 63 | if (cp->size <= SLAB_SMALL_OBJ_SZ) { 64 | // allocating one page 65 | if (0 != posix_memalign(&mem, PAGE_SZ, PAGE_SZ)) 66 | return; 67 | 68 | // positioning slab at the end of the page 69 | slab = mem + PAGE_SZ - sizeof(struct kmem_slab); 70 | 71 | slab->next = slab->prev = slab; 72 | slab->bufcount = 0; 73 | slab->free_list = mem; 74 | 75 | // creating linkage 76 | lastbuf = mem + (cp->effsize * (cp->slab_maxbuf-1)); 77 | for (p=mem; p < lastbuf; p+=cp->effsize) 78 | *((void **)p) = p + cp->effsize; 79 | 80 | // complete slab at the front... 81 | __slab_move_to_front(cp, slab); 82 | assert(cp->slabs == slab); 83 | 84 | // printf("\n%p\n%p\n%#x\n%#x\n", mem, slab, sizeof(struct kmem_slab), sizeof(struct kmem_cache)); 85 | } 86 | // if this is a large object 87 | else { 88 | // allocating pages 89 | if (0 != posix_memalign(&mem, PAGE_SZ, (cp->slab_maxbuf * cp->effsize)/PAGE_SZ)) 90 | return; 91 | 92 | // allocating slab 93 | slab = (kmem_slab_t)malloc(sizeof(struct kmem_slab)); 94 | 95 | // initializing slab 96 | slab->next = slab->prev = slab; 97 | slab->bufcount = 0; 98 | 99 | bufctl = (kmem_bufctl_t)malloc(sizeof(struct kmem_bufctl) * cp->slab_maxbuf); 100 | bufctl[0].next = NULL; 101 | bufctl[0].buf = mem; 102 | bufctl[0].slab = slab; 103 | slab->start = &bufctl[0]; 104 | slab->free_list = &bufctl[0]; 105 | // creating addtl bufctls 106 | for (i=1; i < cp->slab_maxbuf; i++) { 107 | bufctl[i].next = slab->free_list; 108 | bufctl[i].buf = mem + (i*cp->effsize + (PAGE_SZ%cp->effsize * (((i+1)*cp->effsize)/PAGE_SZ))); 109 | bufctl[i].slab = slab; 110 | slab->free_list = &bufctl[i]; 111 | } 112 | 113 | // complete slab at the front... 114 | __slab_move_to_front(cp, slab); 115 | 116 | // printf("\n%p\n%p\n%#x\n%#x\n", mem, slab, sizeof(struct kmem_slab), sizeof(struct kmem_cache)); 117 | } 118 | } 119 | 120 | /* Requests an allocated object from the cache. 121 | 122 | @cp cache pointer 123 | @flags flags KM_SLEEP or KM_NOSLEEP 124 | */ 125 | void * 126 | kmem_cache_alloc(kmem_cache_t cp, int flags) { 127 | void *buf; 128 | 129 | // grow the cache if necessary... 130 | if (cp->slabs == NULL) 131 | kmem_cache_grow(cp); 132 | 133 | if (cp->slabs->bufcount == cp->slab_maxbuf) 134 | kmem_cache_grow(cp); 135 | 136 | // if this is a small object 137 | if (cp->size <= SLAB_SMALL_OBJ_SZ) { 138 | buf = cp->slabs->free_list; 139 | cp->slabs->free_list = *((void**)buf); 140 | cp->slabs->bufcount++; 141 | } 142 | else { 143 | kmem_bufctl_t bufctl = cp->slabs->free_list; 144 | cp->slabs->free_list = bufctl->next; 145 | buf = bufctl->buf; 146 | cp->slabs->bufcount++; 147 | } 148 | 149 | // if slab is empty 150 | if (cp->slabs->bufcount == cp->slab_maxbuf) 151 | __slab_move_to_back(cp, cp->slabs); 152 | 153 | return buf; 154 | } 155 | 156 | /* Frees an allocated object from the cache. 157 | 158 | @cp cache pointer 159 | @buf object pointer 160 | */ 161 | void 162 | kmem_cache_free(kmem_cache_t cp, void *buf) { 163 | void * mem; 164 | kmem_slab_t slab; 165 | // kmem_bufctl_t bufctl; 166 | 167 | // if this is a small object 168 | if (cp->size <= SLAB_SMALL_OBJ_SZ) { 169 | // compute slab position 170 | // TODO: DO IT GENERIC (PAGE_SZ != 0x1000) 171 | mem = (void*)((long)buf >> 12 << 12); 172 | slab = mem + PAGE_SZ - sizeof(struct kmem_slab); 173 | 174 | // put buffer back in the slab free list 175 | *((void **)buf) = slab->free_list; 176 | slab->free_list = buf; 177 | 178 | slab->bufcount--; 179 | 180 | // if slab is now complete, discard whole page 181 | if (slab->bufcount == 0) { 182 | __slab_remove(cp, slab); 183 | free(mem); 184 | } 185 | 186 | // if slab WAS empty, re-add to non-empty slabs 187 | if (slab->bufcount == cp->slab_maxbuf-1) 188 | __slab_move_to_front(cp, slab); 189 | 190 | } 191 | // if this is a large object 192 | else { 193 | // use hash table to get to bufctl 194 | 195 | // ... 196 | // bufctl = (kmem_cache_t)0x4000; 197 | // slab = bufctl->slab; 198 | // put bufctl back in the slab free list 199 | // bufctl->next = slab->free_list; 200 | // slab->free_list = bufctl; 201 | 202 | // if slab is now complete, discard whole page 203 | // if (slab->bufcount == 0) { 204 | // __slab_remove(cp, slab); 205 | // free(slab->start->buf); // free objects 206 | // free(slab->start); // free bufctls 207 | // free(slab); // free slab 208 | // } 209 | } 210 | } 211 | 212 | /* Destroys a specified cache. 213 | 214 | @cp cache pointer 215 | */ 216 | void 217 | kmem_cache_destroy(kmem_cache_t cp) { 218 | kmem_slab_t slab; 219 | void * mem; 220 | 221 | if (cp->size <= SLAB_SMALL_OBJ_SZ) { 222 | // freeing all allocated memory 223 | while (cp->slabs) { 224 | slab = cp->slabs; 225 | __slab_remove(cp, slab); 226 | mem = (void*)slab - PAGE_SZ + sizeof(struct kmem_slab); 227 | free(mem); 228 | } 229 | } 230 | else { 231 | while (cp->slabs) { 232 | slab = cp->slabs; 233 | __slab_remove(cp, slab); 234 | free(slab->start->buf); // free objects 235 | free(slab->start); // free bufctls 236 | free(slab); // free slab 237 | } 238 | } 239 | 240 | free(cp); 241 | } 242 | 243 | 244 | /* Internal auxiliary to remove slab of freelist 245 | 246 | @cp cache pointer 247 | @slab slab pointer 248 | */ 249 | inline void 250 | __slab_remove(kmem_cache_t cp, kmem_slab_t slab) { 251 | slab->next->prev = slab->prev; 252 | slab->prev->next = slab->next; 253 | 254 | // if front slab... 255 | if (cp->slabs == slab) { 256 | // if last slab 257 | if (slab->prev == slab) 258 | cp->slabs = NULL; 259 | else 260 | cp->slabs = slab->prev; 261 | } 262 | 263 | // if back slab 264 | if (cp->slabs_back == slab) { 265 | // if last slab 266 | if (slab->next == slab) 267 | cp->slabs_back = NULL; 268 | else 269 | cp->slabs_back = slab->next; 270 | } 271 | } 272 | 273 | /* Internal auxiliary to move slab to the front of freelist 274 | 275 | @cp cache pointer 276 | @slab slab pointer 277 | */ 278 | inline void 279 | __slab_move_to_front(kmem_cache_t cp, kmem_slab_t slab) { 280 | if (cp->slabs == slab) return; 281 | 282 | __slab_remove(cp, slab); 283 | 284 | // check if there is any slab in the cache 285 | if (cp->slabs == NULL) { 286 | slab->prev = slab; 287 | slab->next = slab; 288 | 289 | cp->slabs_back = slab; 290 | } 291 | else { 292 | slab->prev = cp->slabs; 293 | cp->slabs->next = slab; 294 | 295 | slab->next = cp->slabs_back; 296 | cp->slabs_back->prev = slab; 297 | } 298 | cp->slabs = slab; 299 | } 300 | 301 | /* Internal auxiliary to move slab to the front of freelist 302 | 303 | @cp cache pointer 304 | @slab slab pointer 305 | */ 306 | inline void 307 | __slab_move_to_back(kmem_cache_t cp, kmem_slab_t slab) { 308 | if (cp->slabs_back == slab) return; 309 | 310 | __slab_remove(cp, slab); 311 | 312 | // check if there is any slab in the cache 313 | if (cp->slabs == NULL) { 314 | slab->prev = slab; 315 | slab->next = slab; 316 | 317 | cp->slabs = slab; 318 | } 319 | else { 320 | slab->prev = cp->slabs; 321 | cp->slabs->next = slab; 322 | 323 | slab->next = cp->slabs_back; 324 | cp->slabs_back->prev = slab; 325 | } 326 | cp->slabs_back = slab; 327 | } 328 | -------------------------------------------------------------------------------- /slab.h: -------------------------------------------------------------------------------- 1 | #ifndef SLAB_H 2 | #define SLAB_H 1 3 | 4 | #include 5 | #include 6 | 7 | #define PAGE_SZ (size_t)sysconf(_SC_PAGESIZE) 8 | #define SLAB_SMALL_OBJ_SZ PAGE_SZ/8 9 | #define SLAB_DEFAULT_ALIGN 8 10 | #define CACHE_LINE_SZ 0x40 11 | 12 | #define KM_SLEEP 0x00 13 | #define KM_NOSLEEP 0x01 14 | 15 | struct kmem_bufctl; 16 | struct kmem_slab; 17 | struct kmem_cache; 18 | 19 | typedef struct kmem_cache * kmem_cache_t; 20 | typedef struct kmem_bufctl * kmem_bufctl_t; 21 | typedef struct kmem_slab * kmem_slab_t; 22 | 23 | struct kmem_bufctl { 24 | void * buf; 25 | kmem_bufctl_t next; 26 | kmem_slab_t slab; 27 | }; 28 | 29 | struct kmem_slab { 30 | kmem_slab_t next; 31 | kmem_slab_t prev; 32 | kmem_bufctl_t start; 33 | void* free_list; /* may point to bufctl or buf directly */ 34 | int bufcount; 35 | }; 36 | 37 | struct kmem_cache { 38 | char * name; 39 | size_t size; 40 | size_t effsize; 41 | int slab_maxbuf; 42 | void (*constructor)(void *, size_t); 43 | void (*destructor)(void *, size_t); 44 | kmem_slab_t slabs; 45 | kmem_slab_t slabs_back; 46 | }; 47 | 48 | 49 | kmem_cache_t 50 | kmem_cache_create(char *name, size_t size, int align, 51 | void (*constructor)(void *, size_t), 52 | void (*destructor)(void *, size_t)); 53 | 54 | 55 | void * 56 | kmem_cache_alloc(kmem_cache_t cp, int flags); 57 | 58 | void 59 | kmem_cache_free(kmem_cache_t cp, void *buf); 60 | 61 | void 62 | kmem_cache_destroy(kmem_cache_t cp); 63 | 64 | // TODO 65 | void 66 | kmem_cache_grow(kmem_cache_t cp); 67 | 68 | void 69 | kmem_cache_reap(void); 70 | 71 | inline void 72 | __slab_remove(kmem_cache_t cp, kmem_slab_t slab); 73 | 74 | inline void 75 | __slab_move_to_front(kmem_cache_t cp, kmem_slab_t slab); 76 | 77 | inline void 78 | __slab_move_to_back(kmem_cache_t cp, kmem_slab_t slab); 79 | 80 | #endif 81 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include "slab.h" 3 | #define ITERATIONS 30000000 4 | 5 | #define rdtscll(val) __asm__ __volatile__("rdtsc" : "=A" (val)) 6 | 7 | /* test approach based inspired by MinUnit 8 | http://www.jera.com/techinfo/jtns/jtn002.html */ 9 | #define assertt(message, test) do { if (!(test)) return message; } while (0) 10 | #define run_test(test) do { char *message = test(); tests_run++; \ 11 | if (message) return (char*)message; } while (0) 12 | int tests_run = 0; 13 | 14 | static char * 15 | test_cache_create() { 16 | kmem_cache_t cp = kmem_cache_create("test", 12, 0, NULL, NULL); 17 | assertt("cache creation returned null?", cp); 18 | 19 | assertt("effective size miscalculated", cp->effsize == 16); 20 | 21 | kmem_cache_destroy(cp); 22 | 23 | return 0; 24 | } 25 | 26 | static char * 27 | test_cache_grow() { 28 | kmem_cache_t cp = kmem_cache_create("test", 12, 0, NULL, NULL); 29 | 30 | kmem_cache_grow(cp); 31 | 32 | kmem_cache_destroy(cp); 33 | 34 | return 0; 35 | } 36 | 37 | static char * 38 | test_cache_alloc() { 39 | // 12-byte struct 40 | struct test { 41 | int a, b, c; 42 | }; 43 | struct test * obj; 44 | 45 | kmem_cache_t cp = kmem_cache_create("test", sizeof(struct test), 0, NULL, NULL); 46 | 47 | obj = (struct test *)kmem_cache_alloc(cp, KM_NOSLEEP); 48 | 49 | obj->a=1; 50 | obj->b=1; 51 | obj->c=1; 52 | 53 | obj = (struct test *)kmem_cache_alloc(cp, KM_NOSLEEP); 54 | 55 | obj->a=1; 56 | obj->b=1; 57 | obj->c=1; 58 | 59 | kmem_cache_destroy(cp); 60 | 61 | return 0; 62 | } 63 | 64 | static char * 65 | test_perf_cache_alloc() { 66 | unsigned long long start, end; 67 | int i; 68 | // 12-byte struct 69 | struct test { 70 | int a, b, c; 71 | }; 72 | struct test * obj; 73 | 74 | kmem_cache_t cp = kmem_cache_create("test", sizeof(struct test), 0, NULL, NULL); 75 | 76 | rdtscll(start); 77 | for (i=0; ia = 1; 83 | 84 | rdtscll(start); 85 | for (i=0; ia=1; 110 | obj->b=1; 111 | obj->c=1; 112 | 113 | kmem_cache_free(cp, obj); 114 | 115 | kmem_cache_destroy(cp); 116 | 117 | return 0; 118 | } 119 | 120 | static char * 121 | test_big_object() { 122 | int i; 123 | void * pos; 124 | kmem_cache_t cp = kmem_cache_create("test", 1000, 0, NULL, NULL); 125 | 126 | // alocating enough for two slabs (auto-growing) 127 | for (i = 0; i < 9; i++) { 128 | pos = kmem_cache_alloc(cp, KM_NOSLEEP); 129 | } 130 | 131 | kmem_cache_destroy(cp); 132 | 133 | return 0; 134 | } 135 | 136 | static char * 137 | test_all () { 138 | run_test(test_cache_create); 139 | run_test(test_cache_grow); 140 | run_test(test_cache_alloc); 141 | // run_test(test_perf_cache_alloc); 142 | run_test(test_cache_free); 143 | run_test(test_big_object); 144 | return 0; 145 | } 146 | 147 | int 148 | main(void) { 149 | char * result = test_all(); 150 | if (result) 151 | printf("Test failed: %s\n", result); 152 | else 153 | printf("ALL TESTS PASSED!\n"); 154 | 155 | printf("=====================\nTOTAL TESTS:\t%04d\n", tests_run); 156 | 157 | return (result != 0); 158 | } --------------------------------------------------------------------------------