├── .clang-format ├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── assets └── data-structure.png ├── bench.c ├── test.c ├── tlsf.c └── tlsf.h /.clang-format: -------------------------------------------------------------------------------- 1 | BasedOnStyle: Chromium 2 | Language: Cpp 3 | MaxEmptyLinesToKeep: 3 4 | IndentCaseLabels: false 5 | AllowShortIfStatementsOnASingleLine: false 6 | AllowShortCaseLabelsOnASingleLine: false 7 | AllowShortLoopsOnASingleLine: false 8 | DerivePointerAlignment: false 9 | PointerAlignment: Right 10 | SpaceAfterCStyleCast: true 11 | TabWidth: 4 12 | UseTab: Never 13 | IndentWidth: 4 14 | BreakBeforeBraces: Linux 15 | AccessModifierOffset: -4 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.o 3 | *.i 4 | *.s 5 | *.P 6 | *.elf 7 | *.bin 8 | *.hex 9 | *.map 10 | *.swp 11 | *.fuse* 12 | *.pyc 13 | *.swo 14 | *.out 15 | build/ 16 | tags 17 | core 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | 3 | dist: bionic 4 | 5 | script: make test 6 | 7 | matrix: 8 | include: 9 | - os: linux 10 | addons: 11 | apt: 12 | sources: 13 | - llvm-toolchain-trusty-3.9 14 | packages: 15 | - clang-3.9 16 | env: 17 | - MATRIX_EVAL="CC=clang-3.9" 18 | 19 | - os: linux 20 | addons: 21 | apt: 22 | sources: 23 | - llvm-toolchain-trusty-4.0 24 | packages: 25 | - clang-4.0 26 | env: 27 | - MATRIX_EVAL="CC=clang-4.0; export CFLAGS=-fsanitize=undefined" 28 | 29 | - os: linux 30 | addons: 31 | apt: 32 | sources: 33 | - ubuntu-toolchain-r-test 34 | packages: 35 | - gcc-5 36 | env: 37 | - MATRIX_EVAL="CC=gcc-5" 38 | 39 | - os: linux 40 | addons: 41 | apt: 42 | sources: 43 | - ubuntu-toolchain-r-test 44 | packages: 45 | - gcc-6 46 | env: 47 | - MATRIX_EVAL="CC=gcc-6" 48 | 49 | - os: linux 50 | addons: 51 | apt: 52 | sources: 53 | - ubuntu-toolchain-r-test 54 | packages: 55 | - gcc-7 56 | env: 57 | - MATRIX_EVAL="CC=gcc-7; export CFLAGS=-fsanitize=undefined" 58 | 59 | - os: linux 60 | addons: 61 | apt: 62 | sources: 63 | - ubuntu-toolchain-r-test 64 | packages: 65 | - gcc-7-i686-linux-gnu 66 | - gcc-7-multilib-i686-linux-gnu 67 | env: 68 | - MATRIX_EVAL="CC=i686-linux-gnu-gcc-7; export LDFLAGS=-static" 69 | 70 | before_install: 71 | - eval "${MATRIX_EVAL}" 72 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2006-2016, Matthew Conte 4 | Copyright (c) 2017-2020, Daniel Mendler 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are met: 9 | 10 | 1. Redistributions of source code must retain the above copyright notice, this 11 | list of conditions and the following disclaimer. 12 | 13 | 2. Redistributions in binary form must reproduce the above copyright notice, 14 | this list of conditions and the following disclaimer in the documentation 15 | and/or other materials provided with the distribution. 16 | 17 | 3. Neither the name of the copyright holder nor the names of its 18 | contributors may be used to endorse or promote products derived from 19 | this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | OUT = build 2 | 3 | TARGETS = \ 4 | test \ 5 | bench 6 | TARGETS := $(addprefix $(OUT)/,$(TARGETS)) 7 | 8 | all: $(TARGETS) 9 | 10 | test: all 11 | ./build/bench 12 | ./build/bench -s 32 13 | ./build/bench -s 10:12345 14 | ./build/test 15 | 16 | CFLAGS += \ 17 | -std=gnu11 -g -O2 \ 18 | -Wall -Wextra -Wshadow -Wpointer-arith -Wcast-qual -Wconversion -Wc++-compat \ 19 | -DTLSF_ENABLE_ASSERT -DTLSF_ENABLE_CHECK 20 | 21 | OBJS = tlsf.o 22 | OBJS := $(addprefix $(OUT)/,$(OBJS)) 23 | deps := $(OBJS:%.o=%.o.d) 24 | 25 | $(OUT)/test: $(OBJS) test.c 26 | $(CC) $(CFLAGS) -o $@ $^ $(LDFLAGS) 27 | 28 | $(OUT)/bench: $(OBJS) bench.c 29 | $(CC) $(CFLAGS) -o $@ -MMD -MF $@.d $^ $(LDFLAGS) 30 | 31 | $(OUT)/%.o: %.c 32 | @mkdir -p $(OUT) 33 | $(CC) $(CFLAGS) -c -o $@ -MMD -MF $@.d $< 34 | 35 | CMDSEP = ; echo "Please wait..." ; 36 | check: $(TARGETS) 37 | MALLOC_CHECK_=3 $(foreach prog,$(TARGETS),./$(prog) $(CMDSEP)) 38 | 39 | clean: 40 | $(RM) $(TARGETS) $(OBJS) $(deps) 41 | 42 | .PHONY: all check clean test 43 | 44 | -include $(deps) 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tlsf-bsd: Two-Level Segregated Fit Memory Allocator 2 | 3 | Two-Level Segregated Fit (TLSF) memory allocator implementation derived from the BSD-licensed implementation by [Matthew Conte](https://github.com/mattconte/tlsf). 4 | This code was based on the [TLSF documentation](http://www.gii.upv.es/tlsf/main/docs.html). 5 | 6 | A novel technique called TLSF for dynamic memory allocation that maintains the effectiveness of the allocation and deallocation operations with a temporal cost of O(1). 7 | For long-running applications, the fragmentation issue also has a greater influence on system performance. 8 | The proposed method also produces a limited and tiny fragmentation. 9 | 10 | This implementation was written to the specification of the document, 11 | therefore no GPL restrictions apply. 12 | 13 | ## Features 14 | * O(1) cost for `malloc`, `free`, `realloc`, `aligned_alloc` 15 | * Low overhead per allocation (one word) 16 | * Low overhead for the TLSF metadata (~4kB) 17 | * Low fragmentation 18 | * Very small - only ~500 lines of code 19 | * Compiles to only a few kB of code and data 20 | * Uses a linear memory area, which is resized on demand 21 | * Not thread safe. API calls must be protected by a mutex in a multi-threaded environment. 22 | * Works in environments with only minimal libc, uses only `stddef.h`, `stdbool.h`, `stdint.h` and `string.h`. 23 | 24 | ## Design principals 25 | 1. Immediate coalescing: As soon as a block is freed, the algorithm is designed to merge the freed block with adjacent free blocks ,if any, to build up larger free block. 26 | 2. Splitting threshold: The smallest block of allocatable memory is 16 bytes. By this limit, it is possible to store the information needed to manage them, including the list of free blocks pointers. 27 | 3. Good-fit strategy: TLSF uses a large set of free lists where each list is a non-ordered list of free blocks whose size is between the size class and the new size class. Each segregated list contains blocks of the same class. 28 | 4. Same strategy for all block sizes: Some dynamic storage allocation (DSA) algorithms use different allocation strategies for different requested sizes. TLSF uses the same strategy for all sizes which provide uniform behavior, thus predictable WCET. 29 | 5. Memory is not cleaned-up: In multi-user environments, DSA algorithms have to clean the memory by usually filling with zeros in order to avoid security problems. In TLSF, since we assume the algorithm will be used in a trusted environment, the algorithm does not clean up the memory when allocating which would bring considerable overhead. 30 | 31 | ## How it works 32 | 33 | This package offers constant, O(1)-time memory block allocation and deallocation, by means of segregated fit mechanism. 34 | 35 | Two-level structure to speedup access and reduce fragmentation. 36 | * First level: Divides free blocks in classes of power of 2 37 | * Second level: Divides the size range indicated by the first level by 4. e.g., 2^6 first level covers the range of free list blocks of [2^6,2^7) 38 | - This range is divided into 4 equidistant blocks. 39 | 40 | ![TLSF Data Structure for Free Blocks](assets/data-structure.png) 41 | 42 | The structure consists of an array indexed by `log(2, requested_size)`. 43 | In other words, requests are divided up according to the requsted size's most significant bit (MSB). 44 | A pointer to the second level of the structure is contained in each item of the array. 45 | At this level, the free blocks of each slab size are divided into x additional groups, 46 | where x is a configurable number. 47 | An array of size x that implements this partitioning is indexed by taking the value of the `log(2, x)` bits that follow the MSB. 48 | Each value denotes the start of a linked list of free blocks (or is `NULL`). 49 | 50 | Finding a free block in the correctly sized class (or, if none are available, in a larger size class) in constant time requires using the bitmaps representing the availability of free blocks (of a certain size class). 51 | 52 | When `tlsf_free()` is called, the block examines if it may coalesce with nearby free blocks before returning to the free list. 53 | 54 | ### Finding a free block in TLSF `malloc()` 55 | 56 | TLSF searches for a free block for a request in the order: 57 | 1. First level index and second level index corresponding to the request is calculated. The indices are checked if a free block is available. If a free block is available at the indices, the block is returned. 58 | 2. If a free block is not available at the indices, remaining second level indices are searched to find a free block. If a free block is available, it is returned. 59 | 3. If not found, the next first level index whose value is 1 in the bitmap is searched to find a free block which guarantees to find a free block. 60 | 61 | #### Worst case happens when 62 | 1. The first level index calculated for the requested size is 1 and second level indices are examined which results in a fail to find a free block = or > the requested size. 63 | 2. The next free block available is on the right-most free-blocks list of the second level of the left-most first level index. When a small block is requested with size x, x bytes will be extracted from this huge block and returned. The remaining huge block going to the lower first level index results in the most overhead for this allocation operation. 64 | 65 | ### Freeing a block in TLSF `free()` 66 | 1. When a block is freed, the first thing done is to check if the physical neighbour blocks are free or not. 67 | 2. If either of the neighbours are free, it is merged with the newly freed block. After the merge, new big block is inserted in the appropriate segregated list. (Mapping function is used to find first level and second level indices of the block) 68 | 3. If neither of the neighbours are free, only the freed block is put on to the appropriate place in the segregated list. 69 | 70 | ## Reference 71 | 72 | M. Masmano, I. Ripoll, A. Crespo, and J. Real. 73 | TLSF: a new dynamic memory allocator for real-time systems. 74 | In Proc. ECRTS (2004), IEEE Computer Society, pp. 79-86. 75 | 76 | ## Related Projects 77 | 78 | * [tlsf-pmr](https://github.com/LiemDQ/tlsf-pmr): a memory resource for use with `polymorphic_allocator` that uses the Two-Level Segregated Fit algorithm as an allocation scheme. 79 | 80 | 81 | ## Licensing 82 | 83 | TLSF-BSD is freely redistributable under the 3-clause BSD License. 84 | Use of this source code is governed by a BSD-style license that can be found 85 | in the `LICENSE` file. 86 | -------------------------------------------------------------------------------- /assets/data-structure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jserv/tlsf-bsd/564ee494860ee5dc0b5f3579fd008da1035c04c9/assets/data-structure.png -------------------------------------------------------------------------------- /bench.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2016 National Cheng Kung University, Taiwan. 3 | * All rights reserved. 4 | * Use of this source code is governed by a BSD-style license. 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include "tlsf.h" 19 | 20 | static tlsf_t t = TLSF_INIT; 21 | 22 | static void usage(const char *name) 23 | { 24 | printf( 25 | "run a malloc benchmark.\n" 26 | "usage: %s [-s blk-size|blk-min:blk-max] [-l loop-count] " 27 | "[-n num-blocks] [-c]\n", 28 | name); 29 | exit(-1); 30 | } 31 | 32 | /* Parse an integer argument. */ 33 | static size_t parse_int_arg(const char *arg, const char *exe_name) 34 | { 35 | long ret = strtol(arg, NULL, 0); 36 | if (errno) 37 | usage(exe_name); 38 | 39 | return (size_t) ret; 40 | } 41 | 42 | /* Parse a size argument, which is either an integer or two integers separated 43 | * by a colon, denoting a range. 44 | */ 45 | static void parse_size_arg(const char *arg, 46 | const char *exe_name, 47 | size_t *blk_min, 48 | size_t *blk_max) 49 | { 50 | char *endptr; 51 | *blk_min = (size_t) strtol(arg, &endptr, 0); 52 | 53 | if (errno) 54 | usage(exe_name); 55 | 56 | if (endptr && *endptr == ':') { 57 | *blk_max = (size_t) strtol(endptr + 1, NULL, 0); 58 | if (errno) 59 | usage(exe_name); 60 | } 61 | 62 | if (*blk_min > *blk_max) 63 | usage(exe_name); 64 | } 65 | 66 | /* Get a random block size between blk_min and blk_max. */ 67 | static size_t get_random_block_size(size_t blk_min, size_t blk_max) 68 | { 69 | if (blk_max > blk_min) 70 | return blk_min + ((size_t) rand() % (blk_max - blk_min)); 71 | return blk_min; 72 | } 73 | 74 | static void run_alloc_benchmark(size_t loops, 75 | size_t blk_min, 76 | size_t blk_max, 77 | void **blk_array, 78 | size_t num_blks, 79 | bool clear) 80 | { 81 | while (loops--) { 82 | size_t next_idx = (size_t) rand() % num_blks; 83 | size_t blk_size = get_random_block_size(blk_min, blk_max); 84 | 85 | if (blk_array[next_idx]) { 86 | if (rand() % 10 == 0) { 87 | /* Insert the newly alloced block into the array at a random 88 | * point. 89 | */ 90 | blk_array[next_idx] = 91 | tlsf_realloc(&t, blk_array[next_idx], blk_size); 92 | } else { 93 | tlsf_free(&t, blk_array[next_idx]); 94 | /* Insert the newly alloced block into the array at a random 95 | * point. 96 | */ 97 | blk_array[next_idx] = tlsf_malloc(&t, blk_size); 98 | } 99 | } else { 100 | /* Insert the newly alloced block into the array at a random point. 101 | */ 102 | blk_array[next_idx] = tlsf_malloc(&t, blk_size); 103 | } 104 | if (clear) 105 | memset(blk_array[next_idx], 0, blk_size); 106 | } 107 | 108 | /* Free up all allocated blocks. */ 109 | for (size_t i = 0; i < num_blks; i++) { 110 | if (blk_array[i]) 111 | tlsf_free(&t, blk_array[i]); 112 | } 113 | } 114 | 115 | static size_t max_size; 116 | static void *mem = 0; 117 | 118 | void *tlsf_resize(tlsf_t *_t, size_t req_size) 119 | { 120 | (void) _t; 121 | return req_size <= max_size ? mem : 0; 122 | } 123 | 124 | int main(int argc, char **argv) 125 | { 126 | size_t blk_min = 512, blk_max = 512, num_blks = 10000; 127 | size_t loops = 10000000; 128 | bool clear = false; 129 | int opt; 130 | 131 | while ((opt = getopt(argc, argv, "s:l:r:t:n:b:ch")) > 0) { 132 | switch (opt) { 133 | case 's': 134 | parse_size_arg(optarg, argv[0], &blk_min, &blk_max); 135 | break; 136 | case 'l': 137 | loops = parse_int_arg(optarg, argv[0]); 138 | break; 139 | case 'n': 140 | num_blks = parse_int_arg(optarg, argv[0]); 141 | break; 142 | case 'c': 143 | clear = true; 144 | break; 145 | case 'h': 146 | usage(argv[0]); 147 | break; 148 | default: 149 | usage(argv[0]); 150 | break; 151 | } 152 | } 153 | 154 | max_size = blk_max * num_blks; 155 | mem = malloc(max_size); 156 | 157 | void **blk_array = (void **) calloc(num_blks, sizeof(void *)); 158 | assert(blk_array); 159 | 160 | struct timespec start, end; 161 | 162 | int err = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start); 163 | assert(err == 0); 164 | 165 | printf("blk_min=%zu to blk_max=%zu\n", blk_min, blk_max); 166 | 167 | run_alloc_benchmark(loops, blk_min, blk_max, blk_array, num_blks, clear); 168 | 169 | err = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &end); 170 | assert(err == 0); 171 | free(blk_array); 172 | 173 | double elapsed = (double) (end.tv_sec - start.tv_sec) + 174 | (double) (end.tv_nsec - start.tv_nsec) * 1e-9; 175 | 176 | struct rusage usage; 177 | err = getrusage(RUSAGE_SELF, &usage); 178 | assert(err == 0); 179 | 180 | /* Dump both machine and human readable versions */ 181 | printf( 182 | "%zu:%zu:%zu:%u:%lu:%.6f: took %.6f s for %zu malloc/free\nbenchmark " 183 | "loops of %zu-%zu " 184 | "bytes. ~%.3f us per loop\n", 185 | blk_min, blk_max, loops, clear, usage.ru_maxrss, elapsed, elapsed, 186 | loops, blk_min, blk_max, elapsed / (double) loops * 1e6); 187 | 188 | return 0; 189 | } 190 | -------------------------------------------------------------------------------- /test.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2016 National Cheng Kung University, Taiwan. 3 | * All rights reserved. 4 | * Use of this source code is governed by a BSD-style license. 5 | */ 6 | 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | 17 | #include "tlsf.h" 18 | 19 | static size_t PAGE; 20 | static size_t MAX_PAGES; 21 | static size_t curr_pages = 0; 22 | static void *start_addr = 0; 23 | 24 | void *tlsf_resize(tlsf_t *t, size_t req_size) 25 | { 26 | (void) t; 27 | 28 | if (!start_addr) 29 | start_addr = mmap(0, MAX_PAGES * PAGE, PROT_READ | PROT_WRITE, 30 | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); 31 | 32 | size_t req_pages = (req_size + PAGE - 1) / PAGE; 33 | if (req_pages > MAX_PAGES) 34 | return 0; 35 | 36 | if (req_pages != curr_pages) { 37 | if (req_pages < curr_pages) 38 | madvise((char *) start_addr + PAGE * req_pages, 39 | (size_t) (curr_pages - req_pages) * PAGE, MADV_DONTNEED); 40 | curr_pages = req_pages; 41 | } 42 | 43 | return start_addr; 44 | } 45 | 46 | static void random_test(tlsf_t *t, size_t spacelen, const size_t cap) 47 | { 48 | const size_t maxitems = 2 * spacelen; 49 | 50 | void **p = (void **) malloc(maxitems * sizeof(void *)); 51 | assert(p); 52 | 53 | /* Allocate random sizes up to the cap threshold. 54 | * Track them in an array. 55 | */ 56 | int64_t rest = (int64_t) spacelen * (rand() % 6 + 1); 57 | unsigned i = 0; 58 | while (rest > 0) { 59 | size_t len = ((size_t) rand() % cap) + 1; 60 | if (rand() % 2 == 0) { 61 | p[i] = tlsf_malloc(t, len); 62 | } else { 63 | size_t align = 1U << (rand() % 20); 64 | if (cap < align) 65 | align = 0; 66 | else 67 | len = align * (((size_t) rand() % (cap / align)) + 1); 68 | p[i] = !align || !len ? tlsf_malloc(t, len) 69 | : tlsf_aalloc(t, align, len); 70 | if (align) 71 | assert(!((size_t) p[i] % align)); 72 | } 73 | assert(p[i]); 74 | rest -= (int64_t) len; 75 | 76 | if (rand() % 10 == 0) { 77 | len = ((size_t) rand() % cap) + 1; 78 | p[i] = tlsf_realloc(t, p[i], len); 79 | assert(p[i]); 80 | } 81 | 82 | tlsf_check(t); 83 | 84 | /* Fill with magic (only when testing up to 1MB). */ 85 | uint8_t *data = (uint8_t *) p[i]; 86 | if (spacelen <= 1024 * 1024) 87 | memset(data, 0, len); 88 | data[0] = 0xa5; 89 | 90 | if (i++ == maxitems) 91 | break; 92 | } 93 | 94 | /* Randomly deallocate the memory blocks until all of them are freed. 95 | * The free space should match the free space after initialisation. 96 | */ 97 | for (unsigned n = i; n;) { 98 | size_t target = (size_t) rand() % i; 99 | if (p[target] == NULL) 100 | continue; 101 | uint8_t *data = (uint8_t *) p[target]; 102 | assert(data[0] == 0xa5); 103 | tlsf_free(t, p[target]); 104 | p[target] = NULL; 105 | n--; 106 | 107 | tlsf_check(t); 108 | } 109 | 110 | free(p); 111 | } 112 | 113 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) 114 | 115 | static void random_sizes_test(tlsf_t *t) 116 | { 117 | const size_t sizes[] = {16, 32, 64, 128, 256, 512, 1024, 1024 * 1024}; 118 | 119 | for (unsigned i = 0; i < ARRAY_SIZE(sizes); i++) { 120 | unsigned n = 1024; 121 | 122 | while (n--) { 123 | size_t cap = (size_t) rand() % sizes[i] + 1; 124 | printf("sizes = %zu, cap = %zu\n", sizes[i], cap); 125 | random_test(t, sizes[i], cap); 126 | } 127 | } 128 | } 129 | 130 | static void large_alloc(tlsf_t *t, size_t s) 131 | { 132 | printf("large alloc %zu\n", s); 133 | for (size_t d = 0; d < 100 && d < s; ++d) { 134 | void *p = tlsf_malloc(t, s - d); 135 | assert(p); 136 | 137 | void *q = tlsf_malloc(t, s - d); 138 | assert(q); 139 | tlsf_free(t, q); 140 | 141 | q = tlsf_malloc(t, s - d); 142 | assert(q); 143 | tlsf_free(t, q); 144 | 145 | tlsf_free(t, p); 146 | tlsf_check(t); 147 | } 148 | } 149 | 150 | static void large_size_test(tlsf_t *t) 151 | { 152 | size_t s = 1; 153 | while (s <= TLSF_MAX_SIZE) { 154 | large_alloc(t, s); 155 | s *= 2; 156 | } 157 | 158 | s = TLSF_MAX_SIZE; 159 | while (s > 0) { 160 | large_alloc(t, s); 161 | s /= 2; 162 | } 163 | } 164 | 165 | int main(void) 166 | { 167 | PAGE = (size_t) sysconf(_SC_PAGESIZE); 168 | MAX_PAGES = 20 * TLSF_MAX_SIZE / PAGE; 169 | tlsf_t t = TLSF_INIT; 170 | srand((unsigned int) time(0)); 171 | large_size_test(&t); 172 | random_sizes_test(&t); 173 | puts("OK!"); 174 | return 0; 175 | } 176 | -------------------------------------------------------------------------------- /tlsf.c: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2006-2016, Matthew Conte 3 | * Copyright (c) 2017-2020, Daniel Mendler 4 | * All rights reserved. 5 | * Use of this source code is governed by a BSD-style license. 6 | */ 7 | 8 | #include 9 | #include 10 | 11 | #include "tlsf.h" 12 | 13 | #ifndef UNLIKELY 14 | #define UNLIKELY(x) __builtin_expect(!!(x), false) 15 | #endif 16 | 17 | /* All allocation sizes and addresses are aligned. */ 18 | #define ALIGN_SIZE ((size_t) 1 << ALIGN_SHIFT) 19 | #if __SIZE_WIDTH__ == 64 20 | #define ALIGN_SHIFT 3 21 | #else 22 | #define ALIGN_SHIFT 2 23 | #endif 24 | 25 | /* First level (FL) and second level (SL) counts */ 26 | #define SL_SHIFT 4 27 | #define SL_COUNT (1U << SL_SHIFT) 28 | #define FL_MAX _TLSF_FL_MAX 29 | #define FL_SHIFT (SL_SHIFT + ALIGN_SHIFT) 30 | #define FL_COUNT (FL_MAX - FL_SHIFT + 1) 31 | 32 | /* Block status bits are stored in the least significant bits (LSB) of the 33 | * size field. 34 | */ 35 | #define BLOCK_BIT_FREE ((size_t) 1) 36 | #define BLOCK_BIT_PREV_FREE ((size_t) 2) 37 | #define BLOCK_BITS (BLOCK_BIT_FREE | BLOCK_BIT_PREV_FREE) 38 | 39 | /* A free block must be large enough to store its header minus the size of the 40 | * prev field. 41 | */ 42 | #define BLOCK_OVERHEAD (sizeof(size_t)) 43 | #define BLOCK_SIZE_MIN (sizeof(tlsf_block_t) - sizeof(tlsf_block_t *)) 44 | #define BLOCK_SIZE_MAX ((size_t) 1 << (FL_MAX - 1)) 45 | #define BLOCK_SIZE_SMALL ((size_t) 1 << FL_SHIFT) 46 | 47 | #ifndef ASSERT 48 | #ifdef TLSF_ENABLE_ASSERT 49 | #include 50 | #define ASSERT(cond, msg) assert((cond) && msg) 51 | #else 52 | #define ASSERT(cond, msg) 53 | #endif 54 | #endif 55 | 56 | #ifndef INLINE 57 | #define INLINE static inline __attribute__((always_inline)) 58 | #endif 59 | 60 | typedef struct tlsf_block { 61 | /* Points to the previous block. 62 | * This field is only valid if the previous block is free and is actually 63 | * stored at the end of the previous block. 64 | */ 65 | struct tlsf_block *prev; 66 | 67 | /* Size and block bits */ 68 | size_t header; 69 | 70 | /* Next and previous free blocks. 71 | * These fields are only valid if the corresponding block is free. 72 | */ 73 | struct tlsf_block *next_free, *prev_free; 74 | } tlsf_block_t; 75 | 76 | _Static_assert(sizeof(size_t) == 4 || sizeof(size_t) == 8, 77 | "size_t must be 32 or 64 bit"); 78 | _Static_assert(sizeof(size_t) == sizeof(void *), 79 | "size_t must equal pointer size"); 80 | _Static_assert(ALIGN_SIZE == BLOCK_SIZE_SMALL / SL_COUNT, 81 | "sizes are not properly set"); 82 | _Static_assert(BLOCK_SIZE_MIN < BLOCK_SIZE_SMALL, 83 | "min allocation size is wrong"); 84 | _Static_assert(BLOCK_SIZE_MAX == TLSF_MAX_SIZE + BLOCK_OVERHEAD, 85 | "max allocation size is wrong"); 86 | _Static_assert(FL_COUNT <= 32, "index too large"); 87 | _Static_assert(SL_COUNT <= 32, "index too large"); 88 | _Static_assert(FL_COUNT == _TLSF_FL_COUNT, "invalid level configuration"); 89 | _Static_assert(SL_COUNT == _TLSF_SL_COUNT, "invalid level configuration"); 90 | 91 | INLINE uint32_t bitmap_ffs(uint32_t x) 92 | { 93 | uint32_t i = (uint32_t) __builtin_ffs((int32_t) x); 94 | ASSERT(i, "no set bit found"); 95 | return i - 1U; 96 | } 97 | 98 | INLINE uint32_t log2floor(size_t x) 99 | { 100 | ASSERT(x > 0, "log2 of zero"); 101 | #if __SIZE_WIDTH__ == 64 102 | return (uint32_t) (63 - (uint32_t) __builtin_clzll((unsigned long long) x)); 103 | #else 104 | return (uint32_t) (31 - (uint32_t) __builtin_clzl((unsigned long) x)); 105 | #endif 106 | } 107 | 108 | INLINE size_t block_size(const tlsf_block_t *block) 109 | { 110 | return block->header & ~BLOCK_BITS; 111 | } 112 | 113 | INLINE void block_set_size(tlsf_block_t *block, size_t size) 114 | { 115 | ASSERT(!(size % ALIGN_SIZE), "invalid size"); 116 | block->header = size | (block->header & BLOCK_BITS); 117 | } 118 | 119 | INLINE bool block_is_free(const tlsf_block_t *block) 120 | { 121 | return !!(block->header & BLOCK_BIT_FREE); 122 | } 123 | 124 | INLINE bool block_is_prev_free(const tlsf_block_t *block) 125 | { 126 | return !!(block->header & BLOCK_BIT_PREV_FREE); 127 | } 128 | 129 | INLINE void block_set_prev_free(tlsf_block_t *block, bool free) 130 | { 131 | block->header = free ? block->header | BLOCK_BIT_PREV_FREE 132 | : block->header & ~BLOCK_BIT_PREV_FREE; 133 | } 134 | 135 | INLINE size_t align_up(size_t x, size_t align) 136 | { 137 | ASSERT(!(align & (align - 1)), "must align to a power of two"); 138 | return (((x - 1) | (align - 1)) + 1); 139 | } 140 | 141 | INLINE char *align_ptr(char *p, size_t align) 142 | { 143 | return (char *) align_up((size_t) p, align); 144 | } 145 | 146 | INLINE char *block_payload(tlsf_block_t *block) 147 | { 148 | return (char *) block + offsetof(tlsf_block_t, header) + BLOCK_OVERHEAD; 149 | } 150 | 151 | INLINE tlsf_block_t *to_block(void *ptr) 152 | { 153 | tlsf_block_t *block = (tlsf_block_t *) ptr; 154 | ASSERT(block_payload(block) == align_ptr(block_payload(block), ALIGN_SIZE), 155 | "block not aligned properly"); 156 | return block; 157 | } 158 | 159 | INLINE tlsf_block_t *block_from_payload(void *ptr) 160 | { 161 | return to_block((char *) ptr - offsetof(tlsf_block_t, header) - 162 | BLOCK_OVERHEAD); 163 | } 164 | 165 | /* Return location of previous block. */ 166 | INLINE tlsf_block_t *block_prev(const tlsf_block_t *block) 167 | { 168 | ASSERT(block_is_prev_free(block), "previous block must be free"); 169 | return block->prev; 170 | } 171 | 172 | /* Return location of next existing block. */ 173 | INLINE tlsf_block_t *block_next(tlsf_block_t *block) 174 | { 175 | tlsf_block_t *next = 176 | to_block(block_payload(block) + block_size(block) - BLOCK_OVERHEAD); 177 | ASSERT(block_size(block), "block is last"); 178 | return next; 179 | } 180 | 181 | /* Link a new block with its neighbor, return the neighbor. */ 182 | INLINE tlsf_block_t *block_link_next(tlsf_block_t *block) 183 | { 184 | tlsf_block_t *next = block_next(block); 185 | next->prev = block; 186 | return next; 187 | } 188 | 189 | INLINE bool block_can_split(tlsf_block_t *block, size_t size) 190 | { 191 | return block_size(block) >= sizeof(tlsf_block_t) + size; 192 | } 193 | 194 | INLINE void block_set_free(tlsf_block_t *block, bool free) 195 | { 196 | ASSERT(block_is_free(block) != free, "block free bit unchanged"); 197 | block->header = 198 | free ? block->header | BLOCK_BIT_FREE : block->header & ~BLOCK_BIT_FREE; 199 | block_set_prev_free(block_link_next(block), free); 200 | } 201 | 202 | /* Adjust allocation size to be aligned, and no smaller than internal minimum. 203 | */ 204 | INLINE size_t adjust_size(size_t size, size_t align) 205 | { 206 | size = align_up(size, align); 207 | return size < BLOCK_SIZE_MIN ? BLOCK_SIZE_MIN : size; 208 | } 209 | 210 | /* Round up to the next block size */ 211 | INLINE size_t round_block_size(size_t size) 212 | { 213 | size_t t = ((size_t) 1 << (log2floor(size) - SL_SHIFT)) - 1; 214 | return size >= BLOCK_SIZE_SMALL ? (size + t) & ~t : size; 215 | } 216 | 217 | INLINE void mapping(size_t size, uint32_t *fl, uint32_t *sl) 218 | { 219 | if (size < BLOCK_SIZE_SMALL) { 220 | /* Store small blocks in first list. */ 221 | *fl = 0; 222 | *sl = (uint32_t) size / (BLOCK_SIZE_SMALL / SL_COUNT); 223 | } else { 224 | uint32_t t = log2floor(size); 225 | *sl = (uint32_t) (size >> (t - SL_SHIFT)) ^ SL_COUNT; 226 | *fl = t - FL_SHIFT + 1; 227 | } 228 | ASSERT(*fl < FL_COUNT, "wrong first level"); 229 | ASSERT(*sl < SL_COUNT, "wrong second level"); 230 | } 231 | 232 | INLINE tlsf_block_t *block_find_suitable(tlsf_t *t, uint32_t *fl, uint32_t *sl) 233 | { 234 | ASSERT(*fl < FL_COUNT, "wrong first level"); 235 | ASSERT(*sl < SL_COUNT, "wrong second level"); 236 | 237 | /* Search for a block in the list associated with the given fl/sl index. */ 238 | uint32_t sl_map = t->sl[*fl] & (~0U << *sl); 239 | if (!sl_map) { 240 | /* No block exists. Search in the next largest first-level list. */ 241 | uint32_t fl_map = t->fl & (uint32_t) (~(uint64_t) 0 << (*fl + 1)); 242 | 243 | /* No free blocks available, memory has been exhausted. */ 244 | if (UNLIKELY(!fl_map)) 245 | return NULL; 246 | 247 | *fl = bitmap_ffs(fl_map); 248 | ASSERT(*fl < FL_COUNT, "wrong first level"); 249 | 250 | sl_map = t->sl[*fl]; 251 | ASSERT(sl_map, "second level bitmap is null"); 252 | } 253 | 254 | *sl = bitmap_ffs(sl_map); 255 | ASSERT(*sl < SL_COUNT, "wrong second level"); 256 | 257 | return t->block[*fl][*sl]; 258 | } 259 | 260 | /* Remove a free block from the free list. */ 261 | INLINE void remove_free_block(tlsf_t *t, 262 | tlsf_block_t *block, 263 | uint32_t fl, 264 | uint32_t sl) 265 | { 266 | ASSERT(fl < FL_COUNT, "wrong first level"); 267 | ASSERT(sl < SL_COUNT, "wrong second level"); 268 | 269 | tlsf_block_t *prev = block->prev_free; 270 | tlsf_block_t *next = block->next_free; 271 | if (next) 272 | next->prev_free = prev; 273 | if (prev) 274 | prev->next_free = next; 275 | 276 | /* If this block is the head of the free list, set new head. */ 277 | if (t->block[fl][sl] == block) { 278 | t->block[fl][sl] = next; 279 | 280 | /* If the new head is null, clear the bitmap. */ 281 | if (!next) { 282 | t->sl[fl] &= ~(1U << sl); 283 | 284 | /* If the second bitmap is now empty, clear the fl bitmap. */ 285 | if (!t->sl[fl]) 286 | t->fl &= ~(1U << fl); 287 | } 288 | } 289 | } 290 | 291 | /* Insert a free block into the free block list and mark the bitmaps. */ 292 | INLINE void insert_free_block(tlsf_t *t, 293 | tlsf_block_t *block, 294 | uint32_t fl, 295 | uint32_t sl) 296 | { 297 | tlsf_block_t *current = t->block[fl][sl]; 298 | ASSERT(block, "cannot insert a null entry into the free list"); 299 | block->next_free = current; 300 | block->prev_free = 0; 301 | if (current) 302 | current->prev_free = block; 303 | t->block[fl][sl] = block; 304 | t->fl |= 1U << fl; 305 | t->sl[fl] |= 1U << sl; 306 | } 307 | 308 | /* Remove a given block from the free list. */ 309 | INLINE void block_remove(tlsf_t *t, tlsf_block_t *block) 310 | { 311 | uint32_t fl, sl; 312 | mapping(block_size(block), &fl, &sl); 313 | remove_free_block(t, block, fl, sl); 314 | } 315 | 316 | /* Insert a given block into the free list. */ 317 | INLINE void block_insert(tlsf_t *t, tlsf_block_t *block) 318 | { 319 | uint32_t fl, sl; 320 | mapping(block_size(block), &fl, &sl); 321 | insert_free_block(t, block, fl, sl); 322 | } 323 | 324 | /* Split a block into two, the second of which is free. */ 325 | INLINE tlsf_block_t *block_split(tlsf_block_t *block, size_t size) 326 | { 327 | tlsf_block_t *rest = to_block(block_payload(block) + size - BLOCK_OVERHEAD); 328 | size_t rest_size = block_size(block) - (size + BLOCK_OVERHEAD); 329 | ASSERT(block_size(block) == rest_size + size + BLOCK_OVERHEAD, 330 | "rest block size is wrong"); 331 | ASSERT(rest_size >= BLOCK_SIZE_MIN, "block split with invalid size"); 332 | rest->header = rest_size; 333 | ASSERT(!(rest_size % ALIGN_SIZE), "invalid block size"); 334 | block_set_free(rest, true); 335 | block_set_size(block, size); 336 | return rest; 337 | } 338 | 339 | /* Absorb a free block's storage into an adjacent previous free block. */ 340 | INLINE tlsf_block_t *block_absorb(tlsf_block_t *prev, tlsf_block_t *block) 341 | { 342 | ASSERT(block_size(prev), "previous block can't be last"); 343 | /* Note: Leaves flags untouched. */ 344 | prev->header += block_size(block) + BLOCK_OVERHEAD; 345 | block_link_next(prev); 346 | return prev; 347 | } 348 | 349 | /* Merge a just-freed block with an adjacent previous free block. */ 350 | INLINE tlsf_block_t *block_merge_prev(tlsf_t *t, tlsf_block_t *block) 351 | { 352 | if (block_is_prev_free(block)) { 353 | tlsf_block_t *prev = block_prev(block); 354 | ASSERT(prev, "prev block can't be null"); 355 | ASSERT(block_is_free(prev), 356 | "prev block is not free though marked as such"); 357 | block_remove(t, prev); 358 | block = block_absorb(prev, block); 359 | } 360 | return block; 361 | } 362 | 363 | /* Merge a just-freed block with an adjacent free block. */ 364 | INLINE tlsf_block_t *block_merge_next(tlsf_t *t, tlsf_block_t *block) 365 | { 366 | tlsf_block_t *next = block_next(block); 367 | ASSERT(next, "next block can't be null"); 368 | if (block_is_free(next)) { 369 | ASSERT(block_size(block), "previous block can't be last"); 370 | block_remove(t, next); 371 | block = block_absorb(block, next); 372 | } 373 | return block; 374 | } 375 | 376 | /* Trim any trailing block space off the end of a block, return to pool. */ 377 | INLINE void block_rtrim_free(tlsf_t *t, tlsf_block_t *block, size_t size) 378 | { 379 | ASSERT(block_is_free(block), "block must be free"); 380 | if (!block_can_split(block, size)) 381 | return; 382 | tlsf_block_t *rest = block_split(block, size); 383 | block_link_next(block); 384 | block_set_prev_free(rest, true); 385 | block_insert(t, rest); 386 | } 387 | 388 | /* Trim any trailing block space off the end of a used block, return to pool. */ 389 | INLINE void block_rtrim_used(tlsf_t *t, tlsf_block_t *block, size_t size) 390 | { 391 | ASSERT(!block_is_free(block), "block must be used"); 392 | if (!block_can_split(block, size)) 393 | return; 394 | tlsf_block_t *rest = block_split(block, size); 395 | block_set_prev_free(rest, false); 396 | rest = block_merge_next(t, rest); 397 | block_insert(t, rest); 398 | } 399 | 400 | INLINE tlsf_block_t *block_ltrim_free(tlsf_t *t, 401 | tlsf_block_t *block, 402 | size_t size) 403 | { 404 | ASSERT(block_is_free(block), "block must be free"); 405 | ASSERT(block_can_split(block, size), "block is too small"); 406 | tlsf_block_t *rest = block_split(block, size - BLOCK_OVERHEAD); 407 | block_set_prev_free(rest, true); 408 | block_link_next(block); 409 | block_insert(t, block); 410 | return rest; 411 | } 412 | 413 | INLINE void *block_use(tlsf_t *t, tlsf_block_t *block, size_t size) 414 | { 415 | block_rtrim_free(t, block, size); 416 | block_set_free(block, false); 417 | return block_payload(block); 418 | } 419 | 420 | INLINE void check_sentinel(tlsf_block_t *block) 421 | { 422 | (void) block; 423 | ASSERT(!block_size(block), "sentinel should be last"); 424 | ASSERT(!block_is_free(block), "sentinel block should not be free"); 425 | } 426 | 427 | static bool arena_grow(tlsf_t *t, size_t size) 428 | { 429 | size_t req_size = 430 | (t->size ? t->size + BLOCK_OVERHEAD : 2 * BLOCK_OVERHEAD) + size; 431 | void *addr = tlsf_resize(t, req_size); 432 | if (!addr) 433 | return false; 434 | ASSERT((size_t) addr % ALIGN_SIZE == 0, "wrong heap alignment address"); 435 | tlsf_block_t *block = 436 | to_block(t->size ? (char *) addr + t->size - 2 * BLOCK_OVERHEAD 437 | : (char *) addr - BLOCK_OVERHEAD); 438 | if (!t->size) 439 | block->header = 0; 440 | check_sentinel(block); 441 | block->header |= size | BLOCK_BIT_FREE; 442 | block = block_merge_prev(t, block); 443 | block_insert(t, block); 444 | tlsf_block_t *sentinel = block_link_next(block); 445 | sentinel->header = BLOCK_BIT_PREV_FREE; 446 | t->size = req_size; 447 | check_sentinel(sentinel); 448 | return true; 449 | } 450 | 451 | static void arena_shrink(tlsf_t *t, tlsf_block_t *block) 452 | { 453 | check_sentinel(block_next(block)); 454 | size_t size = block_size(block); 455 | ASSERT(t->size + BLOCK_OVERHEAD >= size, "invalid heap size before shrink"); 456 | t->size = t->size - size - BLOCK_OVERHEAD; 457 | if (t->size == BLOCK_OVERHEAD) 458 | t->size = 0; 459 | tlsf_resize(t, t->size); 460 | if (t->size) { 461 | block->header = 0; 462 | check_sentinel(block); 463 | } 464 | } 465 | 466 | INLINE tlsf_block_t *block_find_free(tlsf_t *t, size_t size) 467 | { 468 | size_t rounded = round_block_size(size); 469 | uint32_t fl, sl; 470 | mapping(rounded, &fl, &sl); 471 | tlsf_block_t *block = block_find_suitable(t, &fl, &sl); 472 | if (UNLIKELY(!block)) { 473 | if (!arena_grow(t, rounded)) 474 | return NULL; 475 | block = block_find_suitable(t, &fl, &sl); 476 | ASSERT(block, "no block found"); 477 | } 478 | ASSERT(block_size(block) >= size, "insufficient block size"); 479 | remove_free_block(t, block, fl, sl); 480 | return block; 481 | } 482 | 483 | void *tlsf_malloc(tlsf_t *t, size_t size) 484 | { 485 | size = adjust_size(size, ALIGN_SIZE); 486 | if (UNLIKELY(size > TLSF_MAX_SIZE)) 487 | return NULL; 488 | tlsf_block_t *block = block_find_free(t, size); 489 | if (UNLIKELY(!block)) 490 | return NULL; 491 | return block_use(t, block, size); 492 | } 493 | 494 | void *tlsf_aalloc(tlsf_t *t, size_t align, size_t size) 495 | { 496 | size_t adjust = adjust_size(size, ALIGN_SIZE); 497 | 498 | if (UNLIKELY( 499 | !size || 500 | ((align | size) & (align - 1)) /* align!=2**x, size!=n*align */ || 501 | adjust > TLSF_MAX_SIZE - align - 502 | sizeof(tlsf_block_t) /* size is too large */)) 503 | return NULL; 504 | 505 | if (align <= ALIGN_SIZE) 506 | return tlsf_malloc(t, size); 507 | 508 | size_t asize = 509 | adjust_size(adjust + align - 1 + sizeof(tlsf_block_t), align); 510 | tlsf_block_t *block = block_find_free(t, asize); 511 | if (UNLIKELY(!block)) 512 | return NULL; 513 | 514 | char *mem = align_ptr(block_payload(block) + sizeof(tlsf_block_t), align); 515 | block = block_ltrim_free(t, block, (size_t) (mem - block_payload(block))); 516 | return block_use(t, block, adjust); 517 | } 518 | 519 | void tlsf_free(tlsf_t *t, void *mem) 520 | { 521 | if (UNLIKELY(!mem)) 522 | return; 523 | 524 | tlsf_block_t *block = block_from_payload(mem); 525 | ASSERT(!block_is_free(block), "block already marked as free"); 526 | 527 | block_set_free(block, true); 528 | block = block_merge_prev(t, block); 529 | block = block_merge_next(t, block); 530 | 531 | if (UNLIKELY(!block_size(block_next(block)))) 532 | arena_shrink(t, block); 533 | else 534 | block_insert(t, block); 535 | } 536 | 537 | void *tlsf_realloc(tlsf_t *t, void *mem, size_t size) 538 | { 539 | /* Zero-size requests are treated as free. */ 540 | if (UNLIKELY(mem && !size)) { 541 | tlsf_free(t, mem); 542 | return NULL; 543 | } 544 | 545 | /* Null-pointer requests are treated as malloc. */ 546 | if (UNLIKELY(!mem)) 547 | return tlsf_malloc(t, size); 548 | 549 | tlsf_block_t *block = block_from_payload(mem); 550 | size_t avail = block_size(block); 551 | size = adjust_size(size, ALIGN_SIZE); 552 | if (UNLIKELY(size > TLSF_MAX_SIZE)) 553 | return NULL; 554 | 555 | ASSERT(!block_is_free(block), "block already marked as free"); 556 | 557 | /* Do we need to expand to the next block? */ 558 | if (size > avail) { 559 | /* If the next block is used or too small, we must relocate and copy. */ 560 | tlsf_block_t *next = block_next(block); 561 | if (!block_is_free(next) || 562 | size > avail + block_size(next) + BLOCK_OVERHEAD) { 563 | void *dst = tlsf_malloc(t, size); 564 | if (dst) { 565 | memcpy(dst, mem, avail); 566 | tlsf_free(t, mem); 567 | } 568 | return dst; 569 | } 570 | 571 | block_merge_next(t, block); 572 | block_set_prev_free(block_next(block), false); 573 | } 574 | 575 | /* Trim the resulting block and return the original pointer. */ 576 | block_rtrim_used(t, block, size); 577 | return mem; 578 | } 579 | 580 | #ifdef TLSF_ENABLE_CHECK 581 | #include 582 | #include 583 | #define CHECK(cond, msg) \ 584 | ({ \ 585 | if (!(cond)) { \ 586 | fprintf(stderr, "TLSF CHECK: %s - %s\n", msg, #cond); \ 587 | abort(); \ 588 | } \ 589 | }) 590 | void tlsf_check(tlsf_t *t) 591 | { 592 | for (uint32_t i = 0; i < FL_COUNT; ++i) { 593 | for (uint32_t j = 0; j < SL_COUNT; ++j) { 594 | size_t fl_map = t->fl & (1U << i), sl_list = t->sl[i], 595 | sl_map = sl_list & (1U << j); 596 | tlsf_block_t *block = t->block[i][j]; 597 | 598 | /* Check that first- and second-level lists agree. */ 599 | if (!fl_map) 600 | CHECK(!sl_map, "second-level map must be null"); 601 | 602 | if (!sl_map) { 603 | CHECK(!block, "block list must be null"); 604 | continue; 605 | } 606 | 607 | /* Check that there is at least one free block. */ 608 | CHECK(sl_list, "no free blocks in second-level map"); 609 | 610 | while (block) { 611 | uint32_t fl, sl; 612 | CHECK(block_is_free(block), "block should be free"); 613 | CHECK(!block_is_prev_free(block), 614 | "blocks should have coalesced"); 615 | CHECK(!block_is_free(block_next(block)), 616 | "blocks should have coalesced"); 617 | CHECK(block_is_prev_free(block_next(block)), 618 | "block should be free"); 619 | CHECK(block_size(block) >= BLOCK_SIZE_MIN, 620 | "block not minimum size"); 621 | 622 | mapping(block_size(block), &fl, &sl); 623 | CHECK(fl == i && sl == j, "block size indexed in wrong list"); 624 | block = block->next_free; 625 | } 626 | } 627 | } 628 | } 629 | #endif 630 | -------------------------------------------------------------------------------- /tlsf.h: -------------------------------------------------------------------------------- 1 | /* 2 | * SPDX-License-Identifier: BSD-3-Clause 3 | */ 4 | 5 | #pragma once 6 | 7 | /* Inhibit C++ name-mangling for tlsf functions */ 8 | #ifdef __cplusplus 9 | extern "C" { 10 | #endif /* __cplusplus */ 11 | 12 | #include 13 | #include 14 | 15 | #define _TLSF_SL_COUNT 16 16 | #if __SIZE_WIDTH__ == 64 17 | #define _TLSF_FL_COUNT 32 18 | #define _TLSF_FL_MAX 38 19 | #else 20 | #define _TLSF_FL_COUNT 25 21 | #define _TLSF_FL_MAX 30 22 | #endif 23 | #define TLSF_MAX_SIZE (((size_t) 1 << (_TLSF_FL_MAX - 1)) - sizeof(size_t)) 24 | #define TLSF_INIT ((tlsf_t){.size = 0}) 25 | 26 | typedef struct { 27 | uint32_t fl, sl[_TLSF_FL_COUNT]; 28 | struct tlsf_block *block[_TLSF_FL_COUNT][_TLSF_SL_COUNT]; 29 | size_t size; 30 | } tlsf_t; 31 | 32 | void *tlsf_resize(tlsf_t *, size_t); 33 | void *tlsf_aalloc(tlsf_t *, size_t, size_t); 34 | 35 | /** 36 | * Allocates the requested @size bytes of memory and returns a pointer to it. 37 | * On failure, returns NULL. 38 | */ 39 | void *tlsf_malloc(tlsf_t *, size_t size); 40 | void *tlsf_realloc(tlsf_t *, void *, size_t); 41 | 42 | /** 43 | * Releases the previously allocated memory, given the pointer. 44 | */ 45 | void tlsf_free(tlsf_t *, void *); 46 | 47 | #ifdef TLSF_ENABLE_CHECK 48 | void tlsf_check(tlsf_t *); 49 | #else 50 | static inline void tlsf_check(tlsf_t *t) 51 | { 52 | (void) t; 53 | } 54 | #endif 55 | 56 | #ifdef __cplusplus 57 | } 58 | #endif 59 | --------------------------------------------------------------------------------