├── Makefile ├── README ├── include-ia64 ├── atomic.h └── queue.h ├── include-ppc64 ├── atomic.h └── queue.h ├── include-x86 ├── atomic.h └── queue.h ├── malloc_new.cpp ├── michael.c └── michael.h /Makefile: -------------------------------------------------------------------------------- 1 | # What architecture are we on? 2 | # Options: 3 | # - ppc64 (IBM PowerPC, 64-bit) 4 | # - x86 (Intel x86, 32-bit) 5 | # - ia64 (Intel Itanium, 64-bit) 6 | ASM = x86 7 | 8 | ifeq ($(ASM), ppc64) 9 | BITS = -m64 10 | FPIC = -fPIC 11 | endif 12 | ifeq ($(ASM), x86) 13 | BITS = -m32 14 | endif 15 | ifeq ($(ASM), ia64) 16 | FPIC = -fPIC 17 | endif 18 | 19 | CC = gcc 20 | CXX = g++ 21 | 22 | CLFLAGS = -lpthread -lm -lstdc++ 23 | CFLAGS = -D$(ASM) -D_GNU_SOURCE -D_REENTRANT #-DDEBUG 24 | 25 | GCC_CFLAGS = -Wall $(BITS) -fno-strict-aliasing $(FPIC) 26 | GCC_OPT = -O3 -ggdb #-DDEBUG 27 | 28 | ICC_CFLAGS = -Wall -wd279 -wd981 -wd1418 -wd1469 -wd383 -wd869 -wd522 -wd810 -wd1684 29 | ICC_OPT = -O3 -pipe -finline-functions -fomit-frame-pointer #-march=pentium4 -mcpu=pentium4 30 | 31 | ifeq ($(CC), gcc) 32 | OPT = $(GCC_OPT) 33 | CFLAGS += $(GCC_CFLAGS) 34 | endif 35 | ifeq ($(CC), icc) 36 | OPT = $(ICC_OPT) 37 | CFLAGS += $(ICC_CFLAGS) 38 | endif 39 | 40 | # Rules 41 | 42 | all: libmichael.so 43 | 44 | clean: 45 | rm -f *.o *.so 46 | 47 | michael.o: michael.h michael.c include-$(ASM)/atomic.h include-$(ASM)/queue.h 48 | $(CC) $(CFLAGS) $(OPT) -Iinclude-$(ASM) -c michael.c 49 | 50 | malloc_new.o: malloc_new.cpp michael.h 51 | $(CXX) $(CFLAGS) $(OPT) -Iinclude-$(ASM) -c malloc_new.cpp 52 | 53 | libmichael.so: michael.o malloc_new.o 54 | $(CXX) $(CFLAGS) $(OPT) michael.o malloc_new.o -o libmichael.so $(CLFLAGS) -shared 55 | 56 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | This library is an implementation of Maged Michael's algorithms as presented in 2 | his 2004 PLDI paper, "Scalable Lock-Free Dynamic Memory Allocation". 3 | 4 | --- 5 | Implementation contributors 6 | --- 7 | Scott Schneider, scschnei@cs.vt.edu 8 | Christos Antonopoulos, cda@cs.wm.edu 9 | Dimitrios Nikolopoulos, dsn@cs.vt.edu 10 | 11 | --- 12 | Copyright & License 13 | --- 14 | Copyright (C) 2007 Scott Schneider, Christos Antonopoulos 15 | 16 | This library is free software; you can redistribute it and/or 17 | modify it under the terms of the GNU Lesser General Public 18 | License as published by the Free Software Foundation; either 19 | version 2.1 of the License, or (at your option) any later version. 20 | 21 | This library is distributed in the hope that it will be useful, 22 | but WITHOUT ANY WARRANTY; without even the implied warranty of 23 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 24 | Lesser General Public License for more details. 25 | 26 | You should have received a copy of the GNU Lesser General Public 27 | License along with this library; if not, write to the Free Software 28 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 29 | 30 | The full license can be found at: 31 | http://www.gnu.org/copyleft/lesser.html 32 | 33 | --- 34 | Notes 35 | --- 36 | - We have tried to faithfully implement Michael's allocator as he 37 | described it in his PLDI 2004 paper. However, our implementation has 38 | no page manager and performs no superblock caching. Our experiments 39 | show that these two features will significantly impact performance on 40 | some applications. 41 | 42 | -------------------------------------------------------------------------------- /include-ia64/atomic.h: -------------------------------------------------------------------------------- 1 | #ifndef __SYNCHRO_ATOMIC_H__ 2 | #define __SYNCHRO_ATOMIC_H__ 3 | 4 | inline unsigned long atmc_fetch_and_add(volatile unsigned long *address, unsigned int inc); 5 | 6 | static inline unsigned long fetch_and_store(volatile unsigned int *address, unsigned int value) 7 | { 8 | unsigned long long ia64_intri_res; 9 | asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) 10 | : "r" (address), "r" (value) : "memory"); 11 | return ia64_intri_res; 12 | } 13 | 14 | #define atmc_fetch_and_add(address, inc) \ 15 | ({ \ 16 | unsigned long long res; \ 17 | asm volatile ("fetchadd4.acq %0=[%1],%2" \ 18 | : "=r"(res) : "r"(address), "i" (inc) \ 19 | : "memory"); \ 20 | res; \ 21 | }) 22 | 23 | static inline unsigned int compare_and_swap32(volatile unsigned int *address, unsigned int old_value, unsigned int new_value) 24 | { 25 | unsigned long long res; 26 | asm volatile ("mov ar.ccv=%0;;" :: "rO"(old_value)); 27 | asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": 28 | "=r"(res) : "r"(address), "r"(new_value) : "memory"); 29 | return res == old_value; 30 | } 31 | 32 | static inline unsigned int compare_and_swap64(volatile unsigned long long *address, 33 | unsigned long long old_value, unsigned long long new_value) 34 | { 35 | unsigned long long res; 36 | asm volatile ("mov ar.ccv=%0;;" :: "rO"(old_value)); 37 | asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": 38 | "=r"(res) : "r"(address), "r"(new_value) : "memory"); 39 | return res == old_value; 40 | } 41 | 42 | static inline void atmc_add32(volatile unsigned int* address, int inc) 43 | { 44 | int old_value; 45 | int new_value; 46 | 47 | do { 48 | old_value = *address; 49 | new_value = old_value + inc; 50 | } while (!compare_and_swap32(address, old_value, new_value)); 51 | } 52 | 53 | static inline void atmc_add64(volatile unsigned long long* address, unsigned long long inc) 54 | { 55 | long long old_value; 56 | long long new_value; 57 | 58 | do { 59 | old_value = *address; 60 | new_value = old_value + inc; 61 | } while (!compare_and_swap64(address, old_value, new_value)); 62 | } 63 | 64 | static inline unsigned long compare_and_swap_ptr(volatile void *address, void* old_ptr, void* new_ptr) 65 | { 66 | return compare_and_swap64((volatile unsigned long long *)address, (unsigned long long)old_ptr, (unsigned long long)new_ptr); 67 | } 68 | 69 | #endif 70 | 71 | -------------------------------------------------------------------------------- /include-ia64/queue.h: -------------------------------------------------------------------------------- 1 | #ifndef __QUEUE_H_ 2 | #define __QUEUE_H_ 3 | 4 | #include "atomic.h" 5 | 6 | typedef struct { 7 | volatile void* top; 8 | } top_aba_t; 9 | 10 | // Pseudostructure for lock-free list elements. 11 | // The only requirement is that the 5th-8th byte of 12 | // each element should be available to be used as 13 | // the pointer for the implementation of a singly-linked 14 | // list. 15 | struct queue_elem_t { 16 | char *_dummy; 17 | volatile struct queue_elem_t *next; 18 | }; 19 | 20 | typedef struct { 21 | unsigned long long _pad0[8]; 22 | top_aba_t both; 23 | unsigned long long _pad1[8]; 24 | } lf_fifo_queue_t; 25 | 26 | #define LF_FIFO_QUEUE_STATIC_INIT {{0, 0, 0, 0, 0, 0, 0, 0}, {0}, {0, 0, 0, 0, 0, 0, 0, 0}} 27 | 28 | /******************************************************************************/ 29 | 30 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue); 31 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element); 32 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue); 33 | 34 | /******************************************************************************/ 35 | 36 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue) 37 | { 38 | queue->both.top = 0; 39 | } 40 | 41 | /******************************************************************************/ 42 | 43 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue) 44 | { 45 | top_aba_t head; 46 | top_aba_t next; 47 | 48 | while(1) { 49 | head.top = queue->both.top; 50 | if (head.top == 0) { 51 | return NULL; 52 | } 53 | next.top = ((struct queue_elem_t *)head.top)->next; 54 | if (compare_and_swap64((volatile unsigned long long *)&(queue->both), *((unsigned long long*)&head), *((unsigned long long*)&next))) { 55 | return ((void *)head.top); 56 | } 57 | } 58 | } 59 | 60 | /******************************************************************************/ 61 | 62 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element) 63 | { 64 | top_aba_t old_top; 65 | top_aba_t new_top; 66 | 67 | while(1) { 68 | old_top.top = queue->both.top; 69 | 70 | ((struct queue_elem_t *)element)->next = (struct queue_elem_t *)old_top.top; 71 | new_top.top = element; 72 | if (compare_and_swap64((volatile unsigned long long*)&(queue->both), *((unsigned long long*)&old_top), *((unsigned long long*)&new_top))) { 73 | return 0; 74 | } 75 | } 76 | } 77 | 78 | #endif 79 | 80 | -------------------------------------------------------------------------------- /include-ppc64/atomic.h: -------------------------------------------------------------------------------- 1 | #ifndef __SYNCHRO_ATOMIC_H__ 2 | #define __SYNCHRO_ATOMIC_H__ 3 | 4 | #define mb() __asm__ __volatile__ ("sync" : : : "memory") 5 | #define EIEIO_ON_SMP "eieio\n" 6 | #define ISYNC_ON_SMP "\n\tisync" 7 | 8 | static __inline__ unsigned long fetch_and_store(volatile unsigned int *address, unsigned int value) 9 | { 10 | unsigned long dummy; 11 | 12 | __asm__ __volatile__( 13 | EIEIO_ON_SMP 14 | "1: lwarx %0,0,%3 # __xchg_u32\n\ 15 | stwcx. %2,0,%3\n\ 16 | 2: bne- 1b" 17 | ISYNC_ON_SMP 18 | : "=&r" (dummy), "=m" (*address) 19 | : "r" (value), "r" (address) 20 | : "cc", "memory"); 21 | 22 | return (dummy); 23 | } 24 | 25 | static __inline__ void atmc_add(volatile unsigned int* address, int value) 26 | { 27 | int t; 28 | 29 | __asm__ __volatile__( 30 | "1: lwarx %0,0,%3 # atomic_add\n\ 31 | add %0,%2,%0\n\ 32 | stwcx. %0,0,%3\n\ 33 | bne- 1b" 34 | : "=&r" (t), "=m" (*address) 35 | : "r" (value), "r" (address), "m" (*address) 36 | : "cc"); 37 | } 38 | 39 | static __inline__ unsigned int compare_and_swap32(volatile unsigned int *address, unsigned int old_value, unsigned int new_value) 40 | { 41 | unsigned int prev; 42 | 43 | __asm__ __volatile__ ( 44 | EIEIO_ON_SMP 45 | "1: lwarx %0,0,%2 # __cmpxchg_u32\n\ 46 | cmpw 0,%0,%3\n\ 47 | bne- 2f\n\ 48 | stwcx. %4,0,%2\n\ 49 | bne- 1b" 50 | ISYNC_ON_SMP 51 | "\n\ 52 | 2:" 53 | : "=&r" (prev), "=m" (*address) 54 | : "r" (address), "r" (old_value), "r" (new_value), "m" (*address) 55 | : "cc", "memory"); 56 | 57 | return prev == old_value; 58 | } 59 | 60 | static __inline__ unsigned int compare_and_swap64(volatile unsigned long *address, 61 | unsigned long old_value, unsigned long new_value) 62 | { 63 | unsigned long prev = 0; 64 | 65 | __asm__ __volatile__ ( 66 | EIEIO_ON_SMP 67 | "1: ldarx %0,0,%2 # __cmpxchg_u64\n\ 68 | cmpd 0,%0,%3\n\ 69 | bne- 2f\n\ 70 | stdcx. %4,0,%2\n\ 71 | bne- 1b" 72 | ISYNC_ON_SMP 73 | "\n\ 74 | 2:" 75 | : "=&r" (prev), "=m" (*address) 76 | : "r" (address), "r" (old_value), "r" (new_value), "m" (*address) 77 | : "cc", "memory"); 78 | 79 | return prev == old_value; 80 | } 81 | 82 | static inline unsigned long compare_and_swap_ptr(volatile void *address, void* old_ptr, void* new_ptr) 83 | { 84 | return compare_and_swap64((volatile unsigned long *)address, (unsigned long)old_ptr, (unsigned long)new_ptr); 85 | } 86 | 87 | #endif 88 | 89 | -------------------------------------------------------------------------------- /include-ppc64/queue.h: -------------------------------------------------------------------------------- 1 | #ifndef __QUEUE_H_ 2 | #define __QUEUE_H_ 3 | 4 | #include "atomic.h" 5 | 6 | typedef struct { 7 | volatile unsigned long long top:46, ocount:18; 8 | } top_aba_t; 9 | 10 | // Pseudostructure for lock-free list elements. 11 | // The only requirement is that the 5th-8th byte of 12 | // each element should be available to be used as 13 | // the pointer for the implementation of a singly-linked 14 | // list. 15 | struct queue_elem_t { 16 | char *_dummy; 17 | volatile struct queue_elem_t *next; 18 | }; 19 | 20 | typedef struct { 21 | unsigned long long _pad0[8]; 22 | top_aba_t both; 23 | unsigned long long _pad1[8]; 24 | } lf_fifo_queue_t; 25 | 26 | #define LF_FIFO_QUEUE_STATIC_INIT {{0, 0, 0, 0, 0, 0, 0, 0}, {0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}} 27 | 28 | /******************************************************************************/ 29 | 30 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue); 31 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element); 32 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue); 33 | 34 | /******************************************************************************/ 35 | 36 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue) 37 | { 38 | queue->both.top = 0; 39 | queue->both.ocount = 0; 40 | } 41 | 42 | /******************************************************************************/ 43 | 44 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue) 45 | { 46 | top_aba_t head; 47 | top_aba_t next; 48 | 49 | while(1) { 50 | head.top = queue->both.top; 51 | head.ocount = queue->both.ocount; 52 | if (head.top == 0) { 53 | return NULL; 54 | } 55 | next.top = (unsigned long)(((struct queue_elem_t *)head.top)->next); 56 | next.ocount = head.ocount + 1; 57 | if (compare_and_swap64((volatile unsigned long *)&(queue->both), *((unsigned long*)&head), *((unsigned long*)&next))) { 58 | return ((void *)head.top); 59 | } 60 | } 61 | } 62 | 63 | /******************************************************************************/ 64 | 65 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element) 66 | { 67 | top_aba_t old_top; 68 | top_aba_t new_top; 69 | 70 | while(1) { 71 | old_top.ocount = queue->both.ocount; 72 | old_top.top = queue->both.top; 73 | 74 | ((struct queue_elem_t *)element)->next = (struct queue_elem_t *)old_top.top; 75 | new_top.top = (unsigned long)element; 76 | new_top.ocount = old_top.ocount + 1; 77 | if (compare_and_swap64((volatile unsigned long *)&(queue->both), *((unsigned long*)&old_top), *((unsigned long*)&new_top))) { 78 | return 0; 79 | } 80 | } 81 | } 82 | 83 | #endif 84 | 85 | -------------------------------------------------------------------------------- /include-x86/atomic.h: -------------------------------------------------------------------------------- 1 | #ifndef __SYNCHRO_ATOMIC_H__ 2 | #define __SYNCHRO_ATOMIC_H__ 3 | 4 | #define __fool_gcc(x) (*(struct {unsigned int a[100];} *)x) 5 | 6 | inline unsigned long fetch_and_store(volatile unsigned long *address, 7 | unsigned long value); 8 | inline void atmc_add(volatile long *address, long value); 9 | inline unsigned long compare_and_swap32(volatile unsigned long *address, 10 | unsigned long old_value, 11 | unsigned long new_value); 12 | 13 | inline unsigned long compare_and_swap64(volatile unsigned long long *address, 14 | unsigned long long old_value, 15 | unsigned long long new_value); 16 | 17 | inline unsigned long compare_and_swap_ptr(volatile unsigned long *address, 18 | unsigned long old_value, 19 | unsigned long new_value); 20 | /***************************************************************************/ 21 | 22 | #define fetch_and_store(address, value) \ 23 | ({ \ 24 | volatile unsigned long ret_val; \ 25 | \ 26 | __asm__ __volatile__ ("lock; xchgl %0, %1" \ 27 | :"=r" ((ret_val)) \ 28 | :"m" (__fool_gcc((address))), "0" ((value)) \ 29 | :"memory"); \ 30 | ret_val; \ 31 | }) 32 | 33 | #define atmc_add(address, value) \ 34 | ({ \ 35 | register volatile long val = (value); \ 36 | \ 37 | __asm__ __volatile__ ("lock; addl %1, %0" \ 38 | : "=m" (*(address)) \ 39 | : "r" (val)); \ 40 | }) 41 | 42 | #define compare_and_swap32(address, old_value, new_value) \ 43 | ({ \ 44 | unsigned long ret_val = 0; \ 45 | __asm__ __volatile__ ("lock\n\t" \ 46 | "cmpxchgl %2, (%1)\n\t" \ 47 | "sete (%3)\n\t" \ 48 | : \ 49 | : "a" (old_value), "r" (address), "r" (new_value), \ 50 | "r" (&ret_val) \ 51 | : "memory"); \ 52 | ret_val; \ 53 | }) 54 | 55 | #define compare_and_swap64(address, old_value, new_value) \ 56 | ({ \ 57 | unsigned long ret_val = 0; \ 58 | __asm__ __volatile__ ("lock\n\t" \ 59 | "cmpxchg8b (%0)\n\t" \ 60 | "sete (%1)\n\t" \ 61 | : \ 62 | : "r" (address), "r" (&ret_val), \ 63 | "d" (*(((unsigned int*)&(old_value))+1)), "a" (*(((unsigned int*)&(old_value))+0)), \ 64 | "c" (*(((unsigned int*)&(new_value))+1)), "b" (*(((unsigned int*)&(new_value))+0)) \ 65 | : "memory"); \ 66 | ret_val; \ 67 | }) 68 | 69 | #define compare_and_swap_ptr(address, old_value, new_value) compare_and_swap32(address, old_value, new_value) 70 | 71 | #endif 72 | 73 | -------------------------------------------------------------------------------- /include-x86/queue.h: -------------------------------------------------------------------------------- 1 | #ifndef __QUEUE_H_ 2 | #define __QUEUE_H_ 3 | 4 | #include "atomic.h" 5 | 6 | typedef struct { 7 | volatile unsigned long long top:46, ocount:18; 8 | } top_aba_t; 9 | 10 | // Pseudostructure for lock-free list elements. 11 | // The only requirement is that the 5th-8th byte of 12 | // each element should be available to be used as 13 | // the pointer for the implementation of a singly-linked 14 | // list. 15 | struct queue_elem_t { 16 | char *_dummy; 17 | volatile struct queue_elem_t *next; 18 | }; 19 | 20 | typedef struct { 21 | unsigned long long _pad0[8]; 22 | top_aba_t both; 23 | unsigned long long _pad1[8]; 24 | } lf_fifo_queue_t; 25 | 26 | #define LF_FIFO_QUEUE_STATIC_INIT {{0, 0, 0, 0, 0, 0, 0, 0}, {0, 0}, {0, 0, 0, 0, 0, 0, 0, 0}} 27 | 28 | /******************************************************************************/ 29 | 30 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue); 31 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element); 32 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue); 33 | 34 | /******************************************************************************/ 35 | 36 | static __inline__ void lf_fifo_queue_init(lf_fifo_queue_t *queue) 37 | { 38 | queue->both.top = 0; 39 | queue->both.ocount = 0; 40 | } 41 | 42 | /******************************************************************************/ 43 | 44 | static __inline__ void *lf_fifo_dequeue(lf_fifo_queue_t *queue) 45 | { 46 | top_aba_t head; 47 | top_aba_t next; 48 | 49 | while(1) { 50 | head.top = queue->both.top; 51 | head.ocount = queue->both.ocount; 52 | if (head.top == 0) { 53 | return NULL; 54 | } 55 | next.top = (unsigned long)(((struct queue_elem_t *)head.top)->next); 56 | next.ocount = head.ocount + 1; 57 | if (compare_and_swap64((volatile unsigned long *)&(queue->both), *((unsigned long*)&head), *((unsigned long*)&next))) { 58 | return ((void *)head.top); 59 | } 60 | } 61 | } 62 | 63 | /******************************************************************************/ 64 | 65 | static __inline__ int lf_fifo_enqueue(lf_fifo_queue_t *queue, void *element) 66 | { 67 | top_aba_t old_top; 68 | top_aba_t new_top; 69 | 70 | while(1) { 71 | old_top.ocount = queue->both.ocount; 72 | old_top.top = queue->both.top; 73 | 74 | ((struct queue_elem_t *)element)->next = (struct queue_elem_t *)old_top.top; 75 | new_top.top = (unsigned long)element; 76 | new_top.ocount = old_top.ocount + 1; 77 | if (compare_and_swap64((volatile unsigned long *)&(queue->both), *((unsigned long*)&old_top), *((unsigned long*)&new_top))) { 78 | return 0; 79 | } 80 | } 81 | } 82 | 83 | #endif 84 | 85 | -------------------------------------------------------------------------------- /malloc_new.cpp: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007 Scott Schneider, Christos Antonopoulos 3 | * 4 | * This library is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU Lesser General Public 6 | * License as published by the Free Software Foundation; either 7 | * version 2.1 of the License, or (at your option) any later version. 8 | * 9 | * This library is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | * Lesser General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Lesser General Public 15 | * License along with this library; if not, write to the Free Software 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | */ 18 | 19 | extern "C" { 20 | #include "michael.h" 21 | } 22 | 23 | #include 24 | 25 | void* operator new(std::size_t sz) throw (std::bad_alloc) 26 | { 27 | return malloc(sz); 28 | } 29 | 30 | void* operator new (size_t sz, const std::nothrow_t&) throw() 31 | { 32 | return malloc(sz); 33 | } 34 | 35 | void operator delete (void * ptr) throw() 36 | { 37 | free(ptr); 38 | } 39 | 40 | void* operator new[](std::size_t sz) throw (std::bad_alloc) 41 | { 42 | return malloc(sz); 43 | } 44 | 45 | void* operator new[] (size_t sz, const std::nothrow_t&) throw() 46 | { 47 | return malloc(sz); 48 | } 49 | 50 | void operator delete[] (void * ptr) throw() 51 | { 52 | free(ptr); 53 | } 54 | 55 | -------------------------------------------------------------------------------- /michael.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007 Scott Schneider, Christos Antonopoulos 3 | * 4 | * This library is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU Lesser General Public 6 | * License as published by the Free Software Foundation; either 7 | * version 2.1 of the License, or (at your option) any later version. 8 | * 9 | * This library is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | * Lesser General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Lesser General Public 15 | * License along with this library; if not, write to the Free Software 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | */ 18 | 19 | #include "michael.h" 20 | 21 | /* This is large and annoying, but it saves us from needing an 22 | * initialization routine. */ 23 | sizeclass sizeclasses[2048 / GRANULARITY] = 24 | { 25 | {LF_FIFO_QUEUE_STATIC_INIT, 8, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 16, SBSIZE}, 26 | {LF_FIFO_QUEUE_STATIC_INIT, 24, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 32, SBSIZE}, 27 | {LF_FIFO_QUEUE_STATIC_INIT, 40, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 48, SBSIZE}, 28 | {LF_FIFO_QUEUE_STATIC_INIT, 56, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 64, SBSIZE}, 29 | {LF_FIFO_QUEUE_STATIC_INIT, 72, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 80, SBSIZE}, 30 | {LF_FIFO_QUEUE_STATIC_INIT, 88, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 96, SBSIZE}, 31 | {LF_FIFO_QUEUE_STATIC_INIT, 104, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 112, SBSIZE}, 32 | {LF_FIFO_QUEUE_STATIC_INIT, 120, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 128, SBSIZE}, 33 | {LF_FIFO_QUEUE_STATIC_INIT, 136, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 144, SBSIZE}, 34 | {LF_FIFO_QUEUE_STATIC_INIT, 152, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 160, SBSIZE}, 35 | {LF_FIFO_QUEUE_STATIC_INIT, 168, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 176, SBSIZE}, 36 | {LF_FIFO_QUEUE_STATIC_INIT, 184, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 192, SBSIZE}, 37 | {LF_FIFO_QUEUE_STATIC_INIT, 200, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 208, SBSIZE}, 38 | {LF_FIFO_QUEUE_STATIC_INIT, 216, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 224, SBSIZE}, 39 | {LF_FIFO_QUEUE_STATIC_INIT, 232, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 240, SBSIZE}, 40 | {LF_FIFO_QUEUE_STATIC_INIT, 248, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 256, SBSIZE}, 41 | {LF_FIFO_QUEUE_STATIC_INIT, 264, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 272, SBSIZE}, 42 | {LF_FIFO_QUEUE_STATIC_INIT, 280, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 288, SBSIZE}, 43 | {LF_FIFO_QUEUE_STATIC_INIT, 296, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 304, SBSIZE}, 44 | {LF_FIFO_QUEUE_STATIC_INIT, 312, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 320, SBSIZE}, 45 | {LF_FIFO_QUEUE_STATIC_INIT, 328, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 336, SBSIZE}, 46 | {LF_FIFO_QUEUE_STATIC_INIT, 344, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 352, SBSIZE}, 47 | {LF_FIFO_QUEUE_STATIC_INIT, 360, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 368, SBSIZE}, 48 | {LF_FIFO_QUEUE_STATIC_INIT, 376, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 384, SBSIZE}, 49 | {LF_FIFO_QUEUE_STATIC_INIT, 392, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 400, SBSIZE}, 50 | {LF_FIFO_QUEUE_STATIC_INIT, 408, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 416, SBSIZE}, 51 | {LF_FIFO_QUEUE_STATIC_INIT, 424, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 432, SBSIZE}, 52 | {LF_FIFO_QUEUE_STATIC_INIT, 440, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 448, SBSIZE}, 53 | {LF_FIFO_QUEUE_STATIC_INIT, 456, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 464, SBSIZE}, 54 | {LF_FIFO_QUEUE_STATIC_INIT, 472, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 480, SBSIZE}, 55 | {LF_FIFO_QUEUE_STATIC_INIT, 488, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 496, SBSIZE}, 56 | {LF_FIFO_QUEUE_STATIC_INIT, 504, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 512, SBSIZE}, 57 | {LF_FIFO_QUEUE_STATIC_INIT, 520, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 528, SBSIZE}, 58 | {LF_FIFO_QUEUE_STATIC_INIT, 536, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 544, SBSIZE}, 59 | {LF_FIFO_QUEUE_STATIC_INIT, 552, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 560, SBSIZE}, 60 | {LF_FIFO_QUEUE_STATIC_INIT, 568, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 576, SBSIZE}, 61 | {LF_FIFO_QUEUE_STATIC_INIT, 584, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 592, SBSIZE}, 62 | {LF_FIFO_QUEUE_STATIC_INIT, 600, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 608, SBSIZE}, 63 | {LF_FIFO_QUEUE_STATIC_INIT, 616, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 624, SBSIZE}, 64 | {LF_FIFO_QUEUE_STATIC_INIT, 632, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 640, SBSIZE}, 65 | {LF_FIFO_QUEUE_STATIC_INIT, 648, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 656, SBSIZE}, 66 | {LF_FIFO_QUEUE_STATIC_INIT, 664, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 672, SBSIZE}, 67 | {LF_FIFO_QUEUE_STATIC_INIT, 680, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 688, SBSIZE}, 68 | {LF_FIFO_QUEUE_STATIC_INIT, 696, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 704, SBSIZE}, 69 | {LF_FIFO_QUEUE_STATIC_INIT, 712, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 720, SBSIZE}, 70 | {LF_FIFO_QUEUE_STATIC_INIT, 728, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 736, SBSIZE}, 71 | {LF_FIFO_QUEUE_STATIC_INIT, 744, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 752, SBSIZE}, 72 | {LF_FIFO_QUEUE_STATIC_INIT, 760, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 768, SBSIZE}, 73 | {LF_FIFO_QUEUE_STATIC_INIT, 776, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 784, SBSIZE}, 74 | {LF_FIFO_QUEUE_STATIC_INIT, 792, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 800, SBSIZE}, 75 | {LF_FIFO_QUEUE_STATIC_INIT, 808, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 816, SBSIZE}, 76 | {LF_FIFO_QUEUE_STATIC_INIT, 824, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 832, SBSIZE}, 77 | {LF_FIFO_QUEUE_STATIC_INIT, 840, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 848, SBSIZE}, 78 | {LF_FIFO_QUEUE_STATIC_INIT, 856, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 864, SBSIZE}, 79 | {LF_FIFO_QUEUE_STATIC_INIT, 872, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 880, SBSIZE}, 80 | {LF_FIFO_QUEUE_STATIC_INIT, 888, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 896, SBSIZE}, 81 | {LF_FIFO_QUEUE_STATIC_INIT, 904, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 912, SBSIZE}, 82 | {LF_FIFO_QUEUE_STATIC_INIT, 920, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 928, SBSIZE}, 83 | {LF_FIFO_QUEUE_STATIC_INIT, 936, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 944, SBSIZE}, 84 | {LF_FIFO_QUEUE_STATIC_INIT, 952, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 960, SBSIZE}, 85 | {LF_FIFO_QUEUE_STATIC_INIT, 968, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 976, SBSIZE}, 86 | {LF_FIFO_QUEUE_STATIC_INIT, 984, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 992, SBSIZE}, 87 | {LF_FIFO_QUEUE_STATIC_INIT, 1000, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1008, SBSIZE}, 88 | {LF_FIFO_QUEUE_STATIC_INIT, 1016, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1024, SBSIZE}, 89 | {LF_FIFO_QUEUE_STATIC_INIT, 1032, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1040, SBSIZE}, 90 | {LF_FIFO_QUEUE_STATIC_INIT, 1048, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1056, SBSIZE}, 91 | {LF_FIFO_QUEUE_STATIC_INIT, 1064, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1072, SBSIZE}, 92 | {LF_FIFO_QUEUE_STATIC_INIT, 1080, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1088, SBSIZE}, 93 | {LF_FIFO_QUEUE_STATIC_INIT, 1096, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1104, SBSIZE}, 94 | {LF_FIFO_QUEUE_STATIC_INIT, 1112, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1120, SBSIZE}, 95 | {LF_FIFO_QUEUE_STATIC_INIT, 1128, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1136, SBSIZE}, 96 | {LF_FIFO_QUEUE_STATIC_INIT, 1144, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1152, SBSIZE}, 97 | {LF_FIFO_QUEUE_STATIC_INIT, 1160, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1168, SBSIZE}, 98 | {LF_FIFO_QUEUE_STATIC_INIT, 1176, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1184, SBSIZE}, 99 | {LF_FIFO_QUEUE_STATIC_INIT, 1192, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1200, SBSIZE}, 100 | {LF_FIFO_QUEUE_STATIC_INIT, 1208, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1216, SBSIZE}, 101 | {LF_FIFO_QUEUE_STATIC_INIT, 1224, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1232, SBSIZE}, 102 | {LF_FIFO_QUEUE_STATIC_INIT, 1240, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1248, SBSIZE}, 103 | {LF_FIFO_QUEUE_STATIC_INIT, 1256, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1264, SBSIZE}, 104 | {LF_FIFO_QUEUE_STATIC_INIT, 1272, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1280, SBSIZE}, 105 | {LF_FIFO_QUEUE_STATIC_INIT, 1288, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1296, SBSIZE}, 106 | {LF_FIFO_QUEUE_STATIC_INIT, 1304, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1312, SBSIZE}, 107 | {LF_FIFO_QUEUE_STATIC_INIT, 1320, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1328, SBSIZE}, 108 | {LF_FIFO_QUEUE_STATIC_INIT, 1336, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1344, SBSIZE}, 109 | {LF_FIFO_QUEUE_STATIC_INIT, 1352, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1360, SBSIZE}, 110 | {LF_FIFO_QUEUE_STATIC_INIT, 1368, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1376, SBSIZE}, 111 | {LF_FIFO_QUEUE_STATIC_INIT, 1384, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1392, SBSIZE}, 112 | {LF_FIFO_QUEUE_STATIC_INIT, 1400, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1408, SBSIZE}, 113 | {LF_FIFO_QUEUE_STATIC_INIT, 1416, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1424, SBSIZE}, 114 | {LF_FIFO_QUEUE_STATIC_INIT, 1432, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1440, SBSIZE}, 115 | {LF_FIFO_QUEUE_STATIC_INIT, 1448, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1456, SBSIZE}, 116 | {LF_FIFO_QUEUE_STATIC_INIT, 1464, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1472, SBSIZE}, 117 | {LF_FIFO_QUEUE_STATIC_INIT, 1480, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1488, SBSIZE}, 118 | {LF_FIFO_QUEUE_STATIC_INIT, 1496, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1504, SBSIZE}, 119 | {LF_FIFO_QUEUE_STATIC_INIT, 1512, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1520, SBSIZE}, 120 | {LF_FIFO_QUEUE_STATIC_INIT, 1528, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1536, SBSIZE}, 121 | {LF_FIFO_QUEUE_STATIC_INIT, 1544, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1552, SBSIZE}, 122 | {LF_FIFO_QUEUE_STATIC_INIT, 1560, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1568, SBSIZE}, 123 | {LF_FIFO_QUEUE_STATIC_INIT, 1576, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1584, SBSIZE}, 124 | {LF_FIFO_QUEUE_STATIC_INIT, 1592, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1600, SBSIZE}, 125 | {LF_FIFO_QUEUE_STATIC_INIT, 1608, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1616, SBSIZE}, 126 | {LF_FIFO_QUEUE_STATIC_INIT, 1624, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1632, SBSIZE}, 127 | {LF_FIFO_QUEUE_STATIC_INIT, 1640, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1648, SBSIZE}, 128 | {LF_FIFO_QUEUE_STATIC_INIT, 1656, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1664, SBSIZE}, 129 | {LF_FIFO_QUEUE_STATIC_INIT, 1672, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1680, SBSIZE}, 130 | {LF_FIFO_QUEUE_STATIC_INIT, 1688, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1696, SBSIZE}, 131 | {LF_FIFO_QUEUE_STATIC_INIT, 1704, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1712, SBSIZE}, 132 | {LF_FIFO_QUEUE_STATIC_INIT, 1720, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1728, SBSIZE}, 133 | {LF_FIFO_QUEUE_STATIC_INIT, 1736, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1744, SBSIZE}, 134 | {LF_FIFO_QUEUE_STATIC_INIT, 1752, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1760, SBSIZE}, 135 | {LF_FIFO_QUEUE_STATIC_INIT, 1768, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1776, SBSIZE}, 136 | {LF_FIFO_QUEUE_STATIC_INIT, 1784, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1792, SBSIZE}, 137 | {LF_FIFO_QUEUE_STATIC_INIT, 1800, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1808, SBSIZE}, 138 | {LF_FIFO_QUEUE_STATIC_INIT, 1816, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1824, SBSIZE}, 139 | {LF_FIFO_QUEUE_STATIC_INIT, 1832, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1840, SBSIZE}, 140 | {LF_FIFO_QUEUE_STATIC_INIT, 1848, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1856, SBSIZE}, 141 | {LF_FIFO_QUEUE_STATIC_INIT, 1864, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1872, SBSIZE}, 142 | {LF_FIFO_QUEUE_STATIC_INIT, 1880, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1888, SBSIZE}, 143 | {LF_FIFO_QUEUE_STATIC_INIT, 1896, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1904, SBSIZE}, 144 | {LF_FIFO_QUEUE_STATIC_INIT, 1912, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1920, SBSIZE}, 145 | {LF_FIFO_QUEUE_STATIC_INIT, 1928, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1936, SBSIZE}, 146 | {LF_FIFO_QUEUE_STATIC_INIT, 1944, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1952, SBSIZE}, 147 | {LF_FIFO_QUEUE_STATIC_INIT, 1960, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1968, SBSIZE}, 148 | {LF_FIFO_QUEUE_STATIC_INIT, 1976, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 1984, SBSIZE}, 149 | {LF_FIFO_QUEUE_STATIC_INIT, 1992, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 2000, SBSIZE}, 150 | {LF_FIFO_QUEUE_STATIC_INIT, 2008, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 2016, SBSIZE}, 151 | {LF_FIFO_QUEUE_STATIC_INIT, 2024, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 2032, SBSIZE}, 152 | {LF_FIFO_QUEUE_STATIC_INIT, 2040, SBSIZE}, {LF_FIFO_QUEUE_STATIC_INIT, 2048, SBSIZE}, 153 | }; 154 | 155 | __thread procheap* heaps[2048 / GRANULARITY] = { }; 156 | 157 | static volatile descriptor_queue queue_head; 158 | 159 | static inline long min(long a, long b) 160 | { 161 | return a < b ? a : b; 162 | } 163 | 164 | static inline long max(long a, long b) 165 | { 166 | return a > b ? a : b; 167 | } 168 | 169 | static void* AllocNewSB(size_t size, unsigned long alignement) 170 | { 171 | void* addr; 172 | 173 | addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 174 | if (addr == MAP_FAILED) { 175 | fprintf(stderr, "AllocNewSB() mmap failed, %lu, tag %d: ", size, queue_head.tag); 176 | switch (errno) { 177 | case EBADF: fprintf(stderr, "EBADF"); break; 178 | case EACCES: fprintf(stderr, "EACCES"); break; 179 | case EINVAL: fprintf(stderr, "EINVAL"); break; 180 | case ETXTBSY: fprintf(stderr, "ETXBSY"); break; 181 | case EAGAIN: fprintf(stderr, "EAGAIN"); break; 182 | case ENOMEM: fprintf(stderr, "ENOMEM"); break; 183 | case ENODEV: fprintf(stderr, "ENODEV"); break; 184 | } 185 | fprintf(stderr, "\n"); 186 | fflush(stderr); 187 | exit(1); 188 | } 189 | else if (addr == NULL) { 190 | fprintf(stderr, "AllocNewSB() mmap of size %lu returned NULL, tag %d\n", size, queue_head.tag); 191 | fflush(stderr); 192 | exit(1); 193 | } 194 | 195 | return addr; 196 | } 197 | 198 | static void organize_desc_list(descriptor* start, unsigned long count, unsigned long stride) 199 | { 200 | unsigned long ptr; 201 | unsigned int i; 202 | 203 | start->Next = (descriptor*)(start + stride); 204 | ptr = (unsigned long)start; 205 | for (i = 1; i < count - 1; i++) { 206 | ptr += stride; 207 | ((descriptor*)ptr)->Next = (descriptor*)(ptr + stride); 208 | } 209 | ptr += stride; 210 | ((descriptor*)ptr)->Next = NULL; 211 | } 212 | 213 | static void organize_list(void* start, unsigned long count, unsigned long stride) 214 | { 215 | unsigned long ptr; 216 | unsigned long i; 217 | 218 | ptr = (unsigned long)start; 219 | for (i = 1; i < count - 1; i++) { 220 | ptr += stride; 221 | *((unsigned long*)ptr) = i + 1; 222 | } 223 | } 224 | 225 | static descriptor* DescAlloc() { 226 | 227 | descriptor_queue old_queue, new_queue; 228 | descriptor* desc; 229 | 230 | #ifdef DEBUG 231 | fprintf(stderr, "In DescAlloc\n"); 232 | fflush(stderr); 233 | #endif 234 | 235 | while(1) { 236 | old_queue = queue_head; 237 | if (old_queue.DescAvail) { 238 | new_queue.DescAvail = (unsigned long)((descriptor*)old_queue.DescAvail)->Next; 239 | new_queue.tag = old_queue.tag + 1; 240 | if (compare_and_swap64((volatile unsigned long*)&queue_head, *((unsigned long*)&old_queue), *((unsigned long*)&new_queue))) { 241 | desc = (descriptor*)old_queue.DescAvail; 242 | #ifdef DEBUG 243 | fprintf(stderr, "Returning recycled descriptor %p (tag %hu)\n", desc, queue_head.tag); 244 | fflush(stderr); 245 | #endif 246 | break; 247 | } 248 | } 249 | else { 250 | desc = AllocNewSB(DESCSBSIZE, sizeof(descriptor)); 251 | organize_desc_list((void *)desc, DESCSBSIZE / sizeof(descriptor), sizeof(descriptor)); 252 | 253 | new_queue.DescAvail = (unsigned long)desc->Next; 254 | new_queue.tag = old_queue.tag + 1; 255 | if (compare_and_swap64((volatile unsigned long*)&queue_head, *((unsigned long*)&old_queue), *((unsigned long*)&new_queue))) { 256 | #ifdef DEBUG 257 | fprintf(stderr, "Returning descriptor %p from new descriptor block\n", desc); 258 | fflush(stderr); 259 | #endif 260 | break; 261 | } 262 | munmap((void*)desc, DESCSBSIZE); 263 | } 264 | } 265 | 266 | return desc; 267 | } 268 | 269 | void DescRetire(descriptor* desc) 270 | { 271 | descriptor_queue old_queue, new_queue; 272 | 273 | #ifdef DEBUG 274 | fprintf(stderr, "Recycling descriptor %p (sb %p, tag %hu)\n", desc, desc->sb, queue_head.tag); 275 | fflush(stderr); 276 | #endif 277 | do { 278 | old_queue = queue_head; 279 | desc->Next = (descriptor*)old_queue.DescAvail; 280 | new_queue.DescAvail = (unsigned long)desc; 281 | new_queue.tag = old_queue.tag + 1; 282 | } while (!compare_and_swap64((volatile unsigned long*)&queue_head, *((unsigned long*)&old_queue), *((unsigned long*)&new_queue))); 283 | } 284 | 285 | static void ListRemoveEmptyDesc(sizeclass* sc) 286 | { 287 | /* 288 | descriptor *desc; 289 | lf_fifo_queue_t temp = LF_FIFO_QUEUE_STATIC_INIT; 290 | 291 | while (desc = (descriptor *)lf_fifo_dequeue(&sc->Partial)) { 292 | lf_fifo_enqueue(&temp, (void *)desc); 293 | if (desc->sb == NULL) { 294 | DescRetire(desc); 295 | } 296 | else { 297 | break; 298 | } 299 | } 300 | 301 | while (desc = (descriptor *)lf_fifo_dequeue(&temp)) { 302 | lf_fifo_enqueue(&sc->Partial, (void *)desc); 303 | } 304 | */ 305 | } 306 | 307 | static descriptor* ListGetPartial(sizeclass* sc) 308 | { 309 | return (descriptor*)lf_fifo_dequeue(&sc->Partial); 310 | } 311 | 312 | static void ListPutPartial(descriptor* desc) 313 | { 314 | lf_fifo_enqueue(&desc->heap->sc->Partial, (void*)desc); 315 | } 316 | 317 | static void RemoveEmptyDesc(procheap* heap, descriptor* desc) 318 | { 319 | if (compare_and_swap_ptr(&heap->Partial, desc, NULL)) { 320 | DescRetire(desc); 321 | } 322 | else { 323 | ListRemoveEmptyDesc(heap->sc); 324 | } 325 | } 326 | 327 | static descriptor* HeapGetPartial(procheap* heap) 328 | { 329 | descriptor* desc; 330 | 331 | do { 332 | desc = *((descriptor**)&heap->Partial); // casts away the volatile 333 | if (desc == NULL) { 334 | return ListGetPartial(heap->sc); 335 | } 336 | } while (!compare_and_swap_ptr(&heap->Partial, desc, NULL)); 337 | 338 | return desc; 339 | } 340 | 341 | static void HeapPutPartial(descriptor* desc) 342 | { 343 | descriptor* prev; 344 | 345 | do { 346 | prev = (descriptor*)desc->heap->Partial; // casts away volatile 347 | } while (!compare_and_swap_ptr(&desc->heap->Partial, prev, desc)); 348 | 349 | if (prev) { 350 | ListPutPartial(prev); 351 | } 352 | } 353 | 354 | static void UpdateActive(procheap* heap, descriptor* desc, unsigned long morecredits) 355 | { 356 | active oldactive, newactive; 357 | anchor oldanchor, newanchor; 358 | 359 | #ifdef DEBUG 360 | fprintf(stderr, "UpdateActive() heap->Active %p, credits %lu\n", *((void**)&heap->Active), morecredits); 361 | fflush(stderr); 362 | #endif 363 | 364 | *((unsigned long long*)&oldactive) = 0; 365 | newactive.ptr = (unsigned long)desc; 366 | newactive.credits = morecredits - 1; 367 | if (compare_and_swap64((volatile unsigned long *)&heap->Active, *((unsigned long*)&oldactive), *((unsigned long*)&newactive))) { 368 | return; 369 | } 370 | 371 | // Someone installed another active sb 372 | // Return credits to sb and make it partial 373 | do { 374 | newanchor = oldanchor = desc->Anchor; 375 | newanchor.count += morecredits; 376 | newanchor.state = PARTIAL; 377 | } while (!compare_and_swap64((volatile unsigned long *)&desc->Anchor, *((unsigned long*)&oldanchor), *((unsigned long*)&newanchor))); 378 | 379 | HeapPutPartial(desc); 380 | } 381 | 382 | static descriptor* mask_credits(active oldactive) 383 | { 384 | return (descriptor*)oldactive.ptr; 385 | } 386 | 387 | static void* MallocFromActive(procheap *heap) 388 | { 389 | active newactive, oldactive; 390 | descriptor* desc; 391 | anchor oldanchor, newanchor; 392 | void* addr; 393 | unsigned long morecredits = 0; 394 | unsigned int next = 0; 395 | 396 | // First step: reserve block 397 | do { 398 | newactive = oldactive = heap->Active; 399 | if (!(*((unsigned long long*)(&oldactive)))) { 400 | return NULL; 401 | } 402 | if (oldactive.credits == 0) { 403 | *((unsigned long long*)(&newactive)) = 0; 404 | #ifdef DEBUG 405 | fprintf(stderr, "MallocFromActive() setting active to NULL, %lu, %d\n", newactive.ptr, newactive.credits); 406 | fflush(stderr); 407 | #endif 408 | } 409 | else { 410 | --newactive.credits; 411 | } 412 | } while (!compare_and_swap64((volatile unsigned long*)&heap->Active, *((unsigned long*)&oldactive), *((unsigned long*)&newactive))); 413 | 414 | #ifdef DEBUG 415 | fprintf(stderr, "MallocFromActive() heap->Active %p, credits %hu\n", *((void**)&heap->Active), oldactive.credits); 416 | fflush(stderr); 417 | #endif 418 | 419 | // Second step: pop block 420 | desc = mask_credits(oldactive); 421 | do { 422 | // state may be ACTIVE, PARTIAL or FULL 423 | newanchor = oldanchor = desc->Anchor; 424 | addr = (void *)((unsigned long)desc->sb + oldanchor.avail * desc->sz); 425 | next = *(unsigned long *)addr; 426 | newanchor.avail = next; 427 | ++newanchor.tag; 428 | 429 | if (oldactive.credits == 0) { 430 | 431 | // state must be ACTIVE 432 | if (oldanchor.count == 0) { 433 | #ifdef DEBUG 434 | fprintf(stderr, "MallocFromActive() setting superblock %p to FULL\n", desc->sb); 435 | fflush(stderr); 436 | #endif 437 | newanchor.state = FULL; 438 | } 439 | else { 440 | morecredits = min(oldanchor.count, MAXCREDITS); 441 | newanchor.count -= morecredits; 442 | } 443 | } 444 | } while (!compare_and_swap64((volatile unsigned long*)&desc->Anchor, *((unsigned long*)&oldanchor), *((unsigned long*)&newanchor))); 445 | 446 | #ifdef DEBUG 447 | fprintf(stderr, "MallocFromActive() sb %p, Active %p, avail %d, oldanchor.count %hu, newanchor.count %hu, morecredits %lu, MAX %d\n", 448 | desc->sb, *((void**)&heap->Active), desc->Anchor.avail, oldanchor.count, newanchor.count, morecredits, MAXCREDITS); 449 | fflush(stderr); 450 | #endif 451 | 452 | if (oldactive.credits == 0 && oldanchor.count > 0) { 453 | UpdateActive(heap, desc, morecredits); 454 | } 455 | 456 | *((char*)addr) = (char)SMALL; 457 | addr += TYPE_SIZE; 458 | *((descriptor**)addr) = desc; 459 | return ((void*)((unsigned long)addr + PTR_SIZE)); 460 | } 461 | 462 | static void* MallocFromPartial(procheap* heap) 463 | { 464 | descriptor* desc; 465 | anchor oldanchor, newanchor; 466 | unsigned long morecredits; 467 | void* addr; 468 | 469 | retry: 470 | desc = HeapGetPartial(heap); 471 | if (!desc) { 472 | return NULL; 473 | } 474 | 475 | desc->heap = heap; 476 | do { 477 | // reserve blocks 478 | newanchor = oldanchor = desc->Anchor; 479 | if (oldanchor.state == EMPTY) { 480 | DescRetire(desc); 481 | goto retry; 482 | } 483 | 484 | // oldanchor state must be PARTIAL 485 | // oldanchor count must be > 0 486 | morecredits = min(oldanchor.count - 1, MAXCREDITS); 487 | newanchor.count -= morecredits + 1; 488 | newanchor.state = (morecredits > 0) ? ACTIVE : FULL; 489 | } while (!compare_and_swap64((volatile unsigned long*)&desc->Anchor, *((unsigned long*)&oldanchor), *((unsigned long*)&newanchor))); 490 | 491 | do { 492 | // pop reserved block 493 | newanchor = oldanchor = desc->Anchor; 494 | addr = (void*)((unsigned long)desc->sb + oldanchor.avail * desc->sz); 495 | 496 | newanchor.avail = *(unsigned long*)addr; 497 | ++newanchor.tag; 498 | } while (!compare_and_swap64((volatile unsigned long*)&desc->Anchor, *((unsigned long*)&oldanchor), *((unsigned long*)&newanchor))); 499 | 500 | if (morecredits > 0) { 501 | UpdateActive(heap, desc, morecredits); 502 | } 503 | 504 | *((char*)addr) = (char)SMALL; 505 | addr += TYPE_SIZE; 506 | *((descriptor**)addr) = desc; 507 | return ((void *)((unsigned long)addr + PTR_SIZE)); 508 | } 509 | 510 | static void* MallocFromNewSB(procheap* heap) 511 | { 512 | descriptor* desc; 513 | void* addr; 514 | active newactive, oldactive; 515 | 516 | *((unsigned long long*)&oldactive) = 0; 517 | desc = DescAlloc(); 518 | desc->sb = AllocNewSB(heap->sc->sbsize, SBSIZE); 519 | 520 | desc->heap = heap; 521 | desc->Anchor.avail = 1; 522 | desc->sz = heap->sc->sz; 523 | desc->maxcount = heap->sc->sbsize / desc->sz; 524 | 525 | // Organize blocks in a linked list starting with index 0. 526 | organize_list(desc->sb, desc->maxcount, desc->sz); 527 | 528 | #ifdef DEBUG 529 | fprintf(stderr, "New SB %p associated with desc %p (sz %u, sbsize %d, heap %p, Anchor.avail %hu, Anchor.count %hu)\n", 530 | desc->sb, desc, desc->sz, heap->sc->sbsize, heap, desc->Anchor.avail, desc->Anchor.count); 531 | fflush(stderr); 532 | #endif 533 | 534 | *((unsigned long long*)&newactive) = 0; 535 | newactive.ptr = (unsigned long)desc; 536 | newactive.credits = min(desc->maxcount - 1, MAXCREDITS) - 1; 537 | 538 | desc->Anchor.count = max(((signed long)desc->maxcount - 1 ) - ((signed long)newactive.credits + 1), 0); // max added by Scott 539 | desc->Anchor.state = ACTIVE; 540 | 541 | #ifdef DEBUG 542 | fprintf(stderr, "MallocFromNewSB() sz %u, maxcount %u, Anchor.count %hu, newactive.credits %hu, max %ld\n", 543 | desc->sz, desc->maxcount, desc->Anchor.count, newactive.credits, 544 | ((signed long)desc->maxcount - 1 ) - ((signed long)newactive.credits + 1)); 545 | fflush(stderr); 546 | #endif 547 | 548 | // memory fence. 549 | if (compare_and_swap64((volatile unsigned long*)&heap->Active, *((unsigned long*)&oldactive), *((unsigned long*)&newactive))) { 550 | addr = desc->sb; 551 | *((char*)addr) = (char)SMALL; 552 | addr += TYPE_SIZE; 553 | *((descriptor **)addr) = desc; 554 | return (void *)((unsigned long)addr + PTR_SIZE); 555 | } 556 | else { 557 | //Free the superblock desc->sb. 558 | munmap(desc->sb, desc->heap->sc->sbsize); 559 | DescRetire(desc); 560 | return NULL; 561 | } 562 | } 563 | 564 | static procheap* find_heap(size_t sz) 565 | { 566 | procheap* heap; 567 | 568 | // We need to fit both the object and the descriptor in a single block 569 | sz += HEADER_SIZE; 570 | if (sz > 2048) { 571 | return NULL; 572 | } 573 | 574 | heap = heaps[sz / GRANULARITY]; 575 | if (heap == NULL) { 576 | heap = mmap(NULL, sizeof(procheap), PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 577 | *((unsigned long long*)&(heap->Active)) = 0; 578 | heap->Partial = NULL; 579 | heap->sc = &sizeclasses[sz / GRANULARITY]; 580 | heaps[sz / GRANULARITY] = heap; 581 | } 582 | 583 | return heap; 584 | } 585 | 586 | static void* alloc_large_block(size_t sz) 587 | { 588 | void* addr; 589 | addr = mmap(NULL, sz + HEADER_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 590 | 591 | // If the highest bit of the descriptor is 1, then the object is large (allocated / freed directly from / to the OS) 592 | *((char*)addr) = (char)LARGE; 593 | addr += TYPE_SIZE; 594 | *((unsigned long *)addr) = sz + HEADER_SIZE; 595 | return (void*)(addr + PTR_SIZE); 596 | } 597 | 598 | void* malloc(size_t sz) 599 | { 600 | procheap *heap; 601 | void* addr; 602 | 603 | #ifdef DEBUG 604 | fprintf(stderr, "malloc() sz %lu\n", sz); 605 | fflush(stderr); 606 | #endif 607 | // Use sz and thread id to find heap. 608 | heap = find_heap(sz); 609 | 610 | if (!heap) { 611 | // Large block 612 | addr = alloc_large_block(sz); 613 | #ifdef DEBUG 614 | fprintf(stderr, "Large block allocation: %p\n", addr); 615 | fflush(stderr); 616 | #endif 617 | return addr; 618 | } 619 | 620 | while(1) { 621 | addr = MallocFromActive(heap); 622 | if (addr) { 623 | #ifdef DEBUG 624 | fprintf(stderr, "malloc() return MallocFromActive %p\n", addr); 625 | fflush(stderr); 626 | #endif 627 | return addr; 628 | } 629 | addr = MallocFromPartial(heap); 630 | if (addr) { 631 | #ifdef DEBUG 632 | fprintf(stderr, "malloc() return MallocFromPartial %p\n", addr); 633 | fflush(stderr); 634 | #endif 635 | return addr; 636 | } 637 | addr = MallocFromNewSB(heap); 638 | if (addr) { 639 | #ifdef DEBUG 640 | fprintf(stderr, "malloc() return MallocFromNewSB %p\n", addr); 641 | fflush(stderr); 642 | #endif 643 | return addr; 644 | } 645 | } 646 | } 647 | 648 | void free(void* ptr) 649 | { 650 | descriptor* desc; 651 | void* sb; 652 | anchor oldanchor, newanchor; 653 | procheap* heap = NULL; 654 | 655 | #ifdef DEBUG 656 | fprintf(stderr, "Calling my free %p\n", ptr); 657 | fflush(stderr); 658 | #endif 659 | 660 | if (!ptr) { 661 | return; 662 | } 663 | 664 | // get prefix 665 | ptr = (void*)((unsigned long)ptr - HEADER_SIZE); 666 | if (*((char*)ptr) == (char)LARGE) { 667 | #ifdef DEBUG 668 | fprintf(stderr, "Freeing large block\n"); 669 | fflush(stderr); 670 | #endif 671 | munmap(ptr, *((unsigned long *)(ptr + TYPE_SIZE))); 672 | return; 673 | } 674 | desc = *((descriptor**)((unsigned long)ptr + TYPE_SIZE)); 675 | 676 | sb = desc->sb; 677 | do { 678 | newanchor = oldanchor = desc->Anchor; 679 | 680 | *((unsigned long*)ptr) = oldanchor.avail; 681 | newanchor.avail = ((unsigned long)ptr - (unsigned long)sb) / desc->sz; 682 | 683 | if (oldanchor.state == FULL) { 684 | #ifdef DEBUG 685 | fprintf(stderr, "Marking superblock %p as PARTIAL\n", sb); 686 | fflush(stderr); 687 | #endif 688 | newanchor.state = PARTIAL; 689 | } 690 | 691 | if (oldanchor.count == desc->maxcount - 1) { 692 | heap = desc->heap; 693 | // instruction fence. 694 | #ifdef DEBUG 695 | fprintf(stderr, "Marking superblock %p as EMPTY; count %d\n", sb, oldanchor.count); 696 | fflush(stderr); 697 | #endif 698 | newanchor.state = EMPTY; 699 | } 700 | else { 701 | ++newanchor.count; 702 | } 703 | // memory fence. 704 | } while (!compare_and_swap64((volatile unsigned long*)&desc->Anchor, *((unsigned long*)&oldanchor), *((unsigned long*)&newanchor))); 705 | 706 | if (newanchor.state == EMPTY) { 707 | #ifdef DEBUG 708 | fprintf(stderr, "Freeing superblock %p with desc %p (count %hu)\n", sb, desc, desc->Anchor.count); 709 | fflush(stderr); 710 | #endif 711 | 712 | munmap(sb, heap->sc->sbsize); 713 | RemoveEmptyDesc(heap, desc); 714 | } 715 | else if (oldanchor.state == FULL) { 716 | #ifdef DEBUG 717 | fprintf(stderr, "Puting superblock %p to PARTIAL heap\n", sb); 718 | fflush(stderr); 719 | #endif 720 | HeapPutPartial(desc); 721 | } 722 | } 723 | 724 | void *calloc(size_t nmemb, size_t size) 725 | { 726 | void *ptr; 727 | 728 | ptr = malloc(nmemb*size); 729 | if (!ptr) { 730 | return NULL; 731 | } 732 | 733 | return memset(ptr, 0, nmemb*size); 734 | } 735 | 736 | void *valloc(size_t size) 737 | { 738 | fprintf(stderr, "valloc() called in libmaged. Not implemented. Exiting.\n"); 739 | fflush(stderr); 740 | exit(1); 741 | } 742 | 743 | void *memalign(size_t boundary, size_t size) 744 | { 745 | void *p; 746 | 747 | p = malloc((size + boundary - 1) & ~(boundary - 1)); 748 | if (!p) { 749 | return NULL; 750 | } 751 | 752 | return(void*)(((unsigned long)p + boundary - 1) & ~(boundary - 1)); 753 | } 754 | 755 | int posix_memalign(void **memptr, size_t alignment, size_t size) 756 | { 757 | *memptr = memalign(alignment, size); 758 | if (*memptr) { 759 | return 0; 760 | } 761 | else { 762 | /* We have to "personalize" the return value according to the error */ 763 | return -1; 764 | } 765 | } 766 | 767 | void *realloc(void *object, size_t size) 768 | { 769 | descriptor* desc; 770 | void* header; 771 | void* ret; 772 | 773 | if (object == NULL) { 774 | return malloc(size); 775 | } 776 | else if (size == 0) { 777 | free(object); 778 | return NULL; 779 | } 780 | 781 | header = (void*)((unsigned long)object - HEADER_SIZE); 782 | 783 | if (*((char*)header) == (char)LARGE) { 784 | ret = malloc(size); 785 | memcpy(ret, object, *((unsigned long *)(header + TYPE_SIZE))); 786 | munmap(object, *((unsigned long *)(header + TYPE_SIZE))); 787 | } 788 | else { 789 | desc = *((descriptor**)((unsigned long)header + TYPE_SIZE)); 790 | if (size <= desc->sz - HEADER_SIZE) { 791 | ret = object; 792 | } 793 | else { 794 | ret = malloc(size); 795 | memcpy(ret, object, desc->sz - HEADER_SIZE); 796 | free(object); 797 | } 798 | } 799 | 800 | return ret; 801 | } 802 | 803 | -------------------------------------------------------------------------------- /michael.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2007 Scott Schneider, Christos Antonopoulos 3 | * 4 | * This library is free software; you can redistribute it and/or 5 | * modify it under the terms of the GNU Lesser General Public 6 | * License as published by the Free Software Foundation; either 7 | * version 2.1 of the License, or (at your option) any later version. 8 | * 9 | * This library is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | * Lesser General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Lesser General Public 15 | * License along with this library; if not, write to the Free Software 16 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | */ 18 | 19 | #ifndef __MAGED_H__ 20 | #define __MAGED_H__ 21 | 22 | #include 23 | #include 24 | #include 25 | #include 26 | #include 27 | #include 28 | #include 29 | 30 | #include "atomic.h" 31 | #include "queue.h" 32 | 33 | struct Descriptor; 34 | typedef struct Descriptor descriptor; 35 | struct Procheap; 36 | typedef struct Procheap procheap; 37 | 38 | #define TYPE_SIZE 4 39 | #define PTR_SIZE sizeof(void*) 40 | #define HEADER_SIZE (TYPE_SIZE + PTR_SIZE) 41 | 42 | #define LARGE 0 43 | #define SMALL 1 44 | 45 | #define PAGESIZE 4096 46 | #define SBSIZE (16 * PAGESIZE) 47 | #define DESCSBSIZE (1024 * sizeof(descriptor)) 48 | 49 | #define ACTIVE 0 50 | #define FULL 1 51 | #define PARTIAL 2 52 | #define EMPTY 3 53 | 54 | #define MAXCREDITS 64 // 2^(bits for credits in active) 55 | #define GRANULARITY 8 56 | 57 | /* We need to squeeze this in 64-bits, but conceptually 58 | * this is the case: 59 | * descriptor* DescAvail; 60 | */ 61 | typedef struct { 62 | unsigned long long DescAvail:46, tag:18; 63 | } descriptor_queue; 64 | 65 | /* Superblock descriptor structure. We bumped avail and count 66 | * to 24 bits to support larger superblock sizes. */ 67 | typedef struct { 68 | unsigned long long avail:24,count:24, state:2, tag:14; 69 | } anchor; 70 | 71 | struct Descriptor { 72 | struct queue_elem_t lf_fifo_queue_padding; 73 | volatile anchor Anchor; 74 | descriptor* Next; 75 | void* sb; // pointer to superblock 76 | procheap* heap; // pointer to owner procheap 77 | unsigned int sz; // block size 78 | unsigned int maxcount; // superblock size / sz 79 | }; 80 | 81 | typedef struct { 82 | lf_fifo_queue_t Partial; // initially empty 83 | unsigned int sz; // block size 84 | unsigned int sbsize; // superblock size 85 | } sizeclass; 86 | 87 | typedef struct { 88 | unsigned long long ptr:58, credits:6; 89 | } active; 90 | 91 | struct Procheap { 92 | volatile active Active; // initially NULL 93 | volatile descriptor* Partial; // initially NULL 94 | sizeclass* sc; // pointer to parent sizeclass 95 | }; 96 | 97 | extern void* malloc(size_t sz); 98 | extern void free(void* ptr); 99 | 100 | #endif /* __MAGED_H__ */ 101 | 102 | --------------------------------------------------------------------------------