├── .github └── workflows │ └── verifier.yml ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── adaptabuild_artifacts.mak ├── adaptabuild_config.mak ├── adaptabuild_module.mak ├── adaptabuild_product.mak ├── cpputest ├── main.cpp ├── support_umm_malloc.cpp ├── support_umm_malloc.h ├── test_FirstMalloc.cpp ├── test_Free.cpp ├── test_Metrics.cpp ├── test_MultiMalloc.cpp ├── test_Poison.cpp ├── test_Realloc.cpp ├── test_Stress.cpp └── test_TooBigMalloc.cpp ├── multitest.sh ├── src ├── options │ ├── default.h │ ├── enable_critical_depth_check.h │ ├── enable_first_fit.h │ ├── enable_inline_metrics.h │ ├── enable_integrity_check.h │ ├── enable_poison_check.h │ └── enable_umm_info.h ├── umm_info.c ├── umm_integrity.c ├── umm_malloc.c ├── umm_malloc.h ├── umm_malloc_cfg.h └── umm_poison.c ├── uncrustify.cfg └── unittest └── config └── host └── umm_malloc_cfgport.h /.github/workflows/verifier.yml: -------------------------------------------------------------------------------- 1 | name: verifyer 2 | on: [pull_request, workflow_dispatch] 3 | jobs: 4 | multi_test: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@main 8 | 9 | - name: Install cpputest and lcov ... 10 | run: | 11 | sudo apt install --no-install-recommends -y cpputest lcov 12 | sudo pip install junit2html 13 | 14 | - name: Directory test 15 | run: | 16 | echo ${HOME} 17 | echo ${PWD} 18 | mkdir -p ${HOME}/work/umm_malloc/src 19 | find ${HOME}/work/umm_malloc -type d 20 | 21 | - name: Link umm_malloc to src 22 | run: | 23 | ln -s ${HOME}/work/umm_malloc/umm_malloc ${HOME}/work/umm_malloc/src/umm_malloc 24 | ls -al ${HOME}/work/umm_malloc/src/umm_malloc 25 | 26 | - name: Link unittest to src 27 | run: | 28 | ln -s ${HOME}/work/umm_malloc/umm_malloc/unittest ${HOME}/work/umm_malloc/src/unittest 29 | ls -al ${HOME}/work/umm_malloc/src/unittest 30 | 31 | - name: Copy adaptabuild config files 32 | run: | 33 | cp ${HOME}/work/umm_malloc/umm_malloc/adaptabuild/* ${HOME}/work/umm_malloc 34 | ls -al ${HOME}/work/umm_malloc 35 | 36 | - name: Clone adaptabuild 37 | run: | 38 | git clone https://github.com/rhempel/adaptabuild.git ${HOME}/work/umm_malloc/adaptabuild 39 | ls -al ${HOME}/work/umm_malloc 40 | 41 | - name: Run the unit tests 42 | run: | 43 | make -f ${HOME}/work/umm_malloc/adaptabuild.mak MCU=host PRODUCT=unittest unittest 44 | ls -al ${HOME}/work/umm_malloc/artifacts 45 | find ${HOME}/work/umm_malloc/artifacts 46 | 47 | - name: Archive test and coverage results 48 | uses: actions/upload-artifact@main 49 | with: 50 | name: Test and Coverage Results 51 | path: ~/work/umm_malloc/artifacts/unittest/host/umm_malloc 52 | 53 | 54 | # ls -al unittest 55 | # ls -al unittest/artifacts/unittest/host/umm_malloc/ 56 | # ls -al ~ 57 | # ls -al ${PWD}/unittest/artifacts/unittest/host/umm_malloc 58 | # echo ${PWD} 59 | # echo ${HOME} 60 | # find ${HOME} -type d 61 | 62 | # - name: Archive test and coverage results 63 | # uses: actions/upload-artifact@main 64 | # with: 65 | # name: Test and Coverage Results 66 | # path: ${HOME}/work/unittest/artifacts/unittest/host/umm_malloc 67 | 68 | # cd unittest && cp src/umm_malloc/adaptabuild/* . 69 | # cd unittest && make -f adaptabuild.mak MCU=host PRODUCT=unittest unittest 70 | # ls -al unittest 71 | 72 | # - name: Link unittest source up one level 73 | # run: sudo apt install --no-install-recommends -y cpputest 74 | 75 | # - name: Run multi-configuration tests 76 | # run: ./multitest.sh 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary editor files 2 | *.swp 3 | 4 | # Object files 5 | *.o 6 | 7 | # Other files 8 | .DS_Store 9 | 10 | # Unity Test Framework output directory 11 | build 12 | artifacts 13 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "adaptabuild"] 2 | path = adaptabuild 3 | url = https://github.com/rhempel/adaptabuild.git 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Ralph Hempel 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![verifyer](https://github.com/rhempel/umm_malloc/actions/workflows/verifier.yml/badge.svg?branch=master)](https://github.com/rhempel/umm_malloc/actions/workflows/verifier.yml) 2 | 3 | # umm_malloc - Memory Manager For Small(ish) Microprocessors 4 | 5 | This is a memory management library specifically designed to work with the 6 | ARM7 embedded processor, but it should work on many other 32 bit processors, 7 | as well as 16 and 8 bit devices. 8 | 9 | You can even use it on a bigger project where a single process might want 10 | to manage a large number of smaller objects, and using the system heap 11 | might get expensive. 12 | 13 | ## Acknowledgements 14 | 15 | Joerg Wunsch and the avr-libc provided the first `malloc()` implementation 16 | that I examined in detail. 17 | 18 | `http://www.nongnu.org/avr-libc` 19 | 20 | Doug Lea's paper on malloc() was another excellent reference and provides 21 | a lot of detail on advanced memory management techniques such as binning. 22 | 23 | `http://gee.cs.oswego.edu/dl/html/malloc.html` 24 | 25 | Bill Dittman provided excellent suggestions, including macros to support 26 | using these functions in critical sections, and for optimizing `realloc()` 27 | further by checking to see if the previous block was free and could be 28 | used for the new block size. This can help to reduce heap fragmentation 29 | significantly. 30 | 31 | Yaniv Ankin suggested that a way to dump the current heap condition 32 | might be useful. I combined this with an idea from plarroy to also 33 | allow checking a free pointer to make sure it's valid. 34 | 35 | Dimitry Frank contributed many helpful additions to make things more 36 | robust including a user specified config file and a method of testing 37 | the integrity of the data structures. 38 | 39 | GitHub user @devyte provided useful feedback on the nesting of functions 40 | as well as a fix for the problem that separates out the core free and 41 | malloc functionality. 42 | 43 | GitHub users @d-a-v and @devyte provided great input on establishing 44 | a heap fragmentation metric which they graciously allowed to be used 45 | in umm_malloc. 46 | 47 | Katherine Whitlock (@stellar-aria) extended the library for usage in 48 | scenarios where more than one heap or memory space is needed. 49 | 50 | ## Usage 51 | 52 | This library is designed to be included in your application as a 53 | submodule that has default configuration that can be overridden 54 | as needed by your application code. 55 | 56 | The `umm_malloc` library can be initialized two ways. The first is 57 | at link time: 58 | 59 | - Set `UMM_MALLOC_CFG_HEAP_ADDR` to the symbol representing 60 | the starting address of the heap. The heap must be 61 | aligned on the natural boundary size of the processor. 62 | - Set `UMM_MALLOC_CFG_HEAP_SIZE` to the size of the heap in bytes. 63 | The heap size must be a multiple of the natural boundary size of 64 | the processor. 65 | 66 | This is how the `umm_init()` call handles initializing the heap. 67 | 68 | We can also call `umm_init_heap(void *pheap, size_t size)` where the 69 | heap details are passed in manually. This is useful in systems where 70 | you can allocate a block of memory at run time - for example in Rust. 71 | 72 | ### Multiple heaps 73 | 74 | For usage in a scenario that requires multiple heaps, the heap type 75 | `umm_heap` is exposed. All API functions (`malloc`, `free`, `realloc`, etc.) 76 | have a corresponding `umm_multi_*` variant that take a pointer to this 77 | type as their first parameter. 78 | 79 | Much like standard initialization, there are two methods: 80 | - `umm_multi_init(umm_heap *heap)`, which initializes a given heap 81 | using linker symbols 82 | - `umm_multi_init_heap(umm_heap *heap, void *ptr, size_t size)`, which 83 | will initialize a given heap using a known address and size. 84 | 85 | ## Automated Testing 86 | 87 | `umm_malloc` is designed to be testable in standalone 88 | mode using `ceedling`. To run the test suite, just make sure you have 89 | `ceedling` installed and then run: 90 | 91 | ``` 92 | ceedling clean 93 | ceedling test:all 94 | ``` 95 | 96 | ## Configuration 97 | 98 | > :warning: **You MUST provide a file called `umm_malloc_cfgport.h` 99 | > somewhere in your app, even if it's blank** 100 | 101 | The reason for this is the way the configuration override heirarchy 102 | works. The priority for configuration overrides is as follows: 103 | 104 | 1. Command line defines using `-D UMM_xxx` 105 | 2. A custom config filename using `-D UMM_MALLOC_CFGFILE=""` 106 | 3. The default config filename `path/to/config/umm_malloc_cfgport.h` 107 | 4. The default configuration in `src/umm_malloc_cfg.h` 108 | 109 | 110 | The following `#define`s are set to useful defaults in 111 | `src/umm_malloc_cfg.h` and can be overridden as needed. 112 | 113 | The fit algorithm is defined as either: 114 | 115 | - `UMM_BEST_FIT` which scans the entire free list and looks 116 | for either an exact fit or the smallest block that will 117 | satisfy the request. This is the default fit method. 118 | - `UMM_FIRST_FIT` which scans the entire free list and looks 119 | for the first block that satisfies the request. 120 | 121 | The following `#define`s are disabled by default and should 122 | remain disabled for production use. They are helpful when 123 | testing allocation errors (which are normally due to bugs in 124 | the application code) or for running the test suite when 125 | making changes to the code. 126 | 127 | - `UMM_INFO` is used to include code that allows dumping 128 | the entire heap structure (helpful when there's a problem). 129 | 130 | - `UMM_INTEGRITY_CHECK` is used to include code that 131 | performs an integrity check on the heap structure. It's 132 | up to you to call the `umm_integrity_check()` function. 133 | 134 | - `UMM_POISON_CHECK` is used to include code that 135 | adds some bytes around the memory being allocated that 136 | are filled with known data. If the data is not intact 137 | when the block is checked, then somone has written outside 138 | of the memory block they have been allocated. It is up 139 | to you to call the `umm_poison_check()` function. 140 | 141 | ## API 142 | 143 | The following functions are available for your application: 144 | 145 | ```c 146 | void *umm_malloc(size_t size) 147 | void *umm_calloc(size_t num, size_t size) 148 | void *umm_realloc(void *ptr, size_t size) 149 | void umm_free(void *ptr) 150 | ``` 151 | 152 | They have exactly the same semantics as the corresponding standard library 153 | functions. 154 | 155 | To initialize the library there are two options: 156 | 157 | ```c 158 | void umm_init(void) 159 | void umm_init_heap(void *ptr, size_t size) 160 | ``` 161 | 162 | ### Multi-Heap API 163 | 164 | For the case of multiple heaps, corresponding `umm_multi_*` functions are provided. 165 | 166 | ```c 167 | void *umm_multi_malloc(umm_heap *heap, size_t size) 168 | void *umm_multi_calloc(umm_heap *heap, size_t num, size_t size) 169 | void *umm_multi_realloc(umm_heap *heap, void *ptr, size_t size) 170 | void umm_multi_free(umm_heap *heap, void *ptr) 171 | ``` 172 | 173 | As with the standard API, there are two options for initialization: 174 | 175 | ```c 176 | void umm_multi_init(umm_heap *heap) 177 | void umm_multi_init_heap(umm_heap *heap, void *ptr, size_t size) 178 | ``` 179 | 180 | ## Background 181 | 182 | The memory manager assumes the following things: 183 | 184 | 1. The standard POSIX compliant malloc/calloc/realloc/free semantics are used 185 | 1. All memory used by the manager is allocated at link time, it is aligned 186 | on a 32 bit boundary, it is contiguous, and its extent (start and end 187 | address) is filled in by the linker. 188 | 1. All memory used by the manager is initialized to 0 as part of the 189 | runtime startup routine. No other initialization is required. 190 | 191 | The fastest linked list implementations use doubly linked lists so that 192 | its possible to insert and delete blocks in constant time. This memory 193 | manager keeps track of both free and used blocks in a doubly linked list. 194 | 195 | Most memory managers use a list structure made up of pointers 196 | to keep track of used - and sometimes free - blocks of memory. In an 197 | embedded system, this can get pretty expensive as each pointer can use 198 | up to 32 bits. 199 | 200 | In most embedded systems there is no need for managing a large quantity 201 | of memory block dynamically, so a full 32 bit pointer based data structure 202 | for the free and used block lists is wasteful. A block of memory on 203 | the free list would use 16 bytes just for the pointers! 204 | 205 | This memory management library sees the heap as an array of blocks, 206 | and uses block numbers to keep track of locations. The block numbers are 207 | 15 bits - which allows for up to 32767 blocks of memory. The high order 208 | bit marks a block as being either free or in use, which will be explained 209 | later. 210 | 211 | The result is that a block of memory on the free list uses just 8 bytes 212 | instead of 16. 213 | 214 | In fact, we go even one step futher when we realize that the free block 215 | index values are available to store data when the block is allocated. 216 | 217 | The overhead of an allocated block is therefore just 4 bytes. 218 | 219 | Each memory block holds 8 bytes, and there are up to 32767 blocks 220 | available, for about 256K of heap space. If that's not enough, you 221 | can always add more data bytes to the body of the memory block 222 | at the expense of free block size overhead. 223 | 224 | There are a lot of little features and optimizations in this memory 225 | management system that makes it especially suited to small systems, and 226 | the best way to appreciate them is to review the data structures and 227 | algorithms used, so let's get started. 228 | 229 | ## Detailed Description 230 | 231 | We have a general notation for a block that we'll use to describe the 232 | different scenarios that our memory allocation algorithm must deal with: 233 | 234 | ``` 235 | +----+----+----+----+ 236 | c |* n | p | nf | pf | 237 | +----+----+----+----+ 238 | ``` 239 | 240 | Where: 241 | 242 | - c is the index of this block 243 | - * is the indicator for a free block 244 | - n is the index of the next block in the heap 245 | - p is the index of the previous block in the heap 246 | - nf is the index of the next block in the free list 247 | - pf is the index of the previous block in the free list 248 | 249 | The fact that we have forward and backward links in the block descriptors 250 | means that malloc() and free() operations can be very fast. It's easy 251 | to either allocate the whole free item to a new block or to allocate part 252 | of the free item and leave the rest on the free list without traversing 253 | the list from front to back first. 254 | 255 | The entire block of memory used by the heap is assumed to be initialized 256 | to 0. The very first block in the heap is special - it't the head of the 257 | free block list. It is never assimilated with a free block (more on this 258 | later). 259 | 260 | Once a block has been allocated to the application, it looks like this: 261 | 262 | ``` 263 | +----+----+----+----+ 264 | c | n | p | ... | 265 | +----+----+----+----+ 266 | ``` 267 | 268 | Where: 269 | 270 | - c is the index of this block 271 | - n is the index of the next block in the heap 272 | - p is the index of the previous block in the heap 273 | 274 | Note that the free list information is gone because it's now 275 | being used to store actual data for the application. If we had 276 | even 500 items in use, that would be 2,000 bytes for 277 | free list information. We simply can't afford to waste that much. 278 | 279 | The address of the `...` area is what is returned to the application 280 | for data storage. 281 | 282 | The following sections describe the scenarios encountered during the 283 | operation of the library. There are two additional notation conventions: 284 | 285 | `??` inside a pointer block means that the data is irrelevant. We don't care 286 | about it because we don't read or modify it in the scenario being 287 | described. 288 | 289 | `...` between memory blocks indicates zero or more additional blocks are 290 | allocated for use by the upper block. 291 | 292 | While we're talking about "upper" and "lower" blocks, we should make 293 | a comment about adresses. In the diagrams, a block higher up in the 294 | picture is at a lower address. And the blocks grow downwards their 295 | block index increases as does their physical address. 296 | 297 | Finally, there's one very important characteristic of the individual 298 | blocks that make up the heap - there can never be two consecutive free 299 | memory blocks, but there can be consecutive used memory blocks. 300 | 301 | The reason is that we always want to have a short free list of the 302 | largest possible block sizes. By always assimilating a newly freed block 303 | with adjacent free blocks, we maximize the size of each free memory area. 304 | 305 | ### Operation of malloc right after system startup 306 | 307 | As part of the system startup code, all of the heap has been cleared. 308 | 309 | During the very first malloc operation, we start traversing the free list 310 | starting at index 0. The index of the next free block is 0, which means 311 | we're at the end of the list! 312 | 313 | At this point, the malloc has a special test that checks if the current 314 | block index is 0, which it is. This special case initializes the free 315 | list to point at block index 1 and then points block 1 to the 316 | last block (lf) on the heap. 317 | 318 | ``` 319 | BEFORE AFTER 320 | 321 | +----+----+----+----+ +----+----+----+----+ 322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 323 | +----+----+----+----+ +----+----+----+----+ 324 | +----+----+----+----+ 325 | 1 |*lf | 0 | 0 | 0 | 326 | +----+----+----+----+ 327 | ... 328 | +----+----+----+----+ 329 | lf | 0 | 1 | 0 | 0 | 330 | +----+----+----+----+ 331 | ``` 332 | 333 | The heap is now ready to complete the first malloc operation. 334 | 335 | ### Operation of malloc when we have reached the end of the free list and there is no block large enough to accommodate the request. 336 | 337 | This happens at the very first malloc operation, or any time the free 338 | list is traversed and no free block large enough for the request is 339 | found. 340 | 341 | The current block pointer will be at the end of the free list, and we 342 | know we're at the end of the list because the nf index is 0, like this: 343 | 344 | ``` 345 | BEFORE AFTER 346 | 347 | +----+----+----+----+ +----+----+----+----+ 348 | pf |*?? | ?? | cf | ?? | pf |*?? | ?? | lf | ?? | 349 | +----+----+----+----+ +----+----+----+----+ 350 | ... ... 351 | +----+----+----+----+ +----+----+----+----+ 352 | p | cf | ?? | ... | p | cf | ?? | ... | 353 | +----+----+----+----+ +----+----+----+----+ 354 | +----+----+----+----+ +----+----+----+----+ 355 | cf | 0 | p | 0 | pf | c | lf | p | ... | 356 | +----+----+----+----+ +----+----+----+----+ 357 | +----+----+----+----+ 358 | lf | 0 | cf | 0 | pf | 359 | +----+----+----+----+ 360 | ``` 361 | 362 | As we walk the free list looking for a block of size b or larger, we get 363 | to cf, which is the last item in the free list. We know this because the 364 | next index is 0. 365 | 366 | So we're going to turn cf into the new block of memory, and then create 367 | a new block that represents the last free entry (lf) and adjust the prev 368 | index of lf to point at the block we just created. We also need to adjust 369 | the next index of the new block (c) to point to the last free block. 370 | 371 | Note that the next free index of the pf block must point to the new lf 372 | because cf is no longer a free block! 373 | 374 | ### Operation of malloc when we have found a block (cf) that will fit the current request of b units exactly 375 | 376 | This one is pretty easy, just clear the free list bit in the current 377 | block and unhook it from the free list. 378 | 379 | ``` 380 | BEFORE AFTER 381 | 382 | +----+----+----+----+ +----+----+----+----+ 383 | pf |*?? | ?? | cf | ?? | pf |*?? | ?? | nf | ?? | 384 | +----+----+----+----+ +----+----+----+----+ 385 | ... ... 386 | +----+----+----+----+ +----+----+----+----+ 387 | p | cf | ?? | ... | p | cf | ?? | ... | 388 | +----+----+----+----+ +----+----+----+----+ 389 | +----+----+----+----+ +----+----+----+----+ Clear the free 390 | cf |* n | p | nf | pf | cf | n | p | .. | list bit here 391 | +----+----+----+----+ +----+----+----+----+ 392 | +----+----+----+----+ +----+----+----+----+ 393 | n | ?? | cf | ... | n | ?? | cf | ... | 394 | +----+----+----+----+ +----+----+----+----+ 395 | ... ... 396 | +----+----+----+----+ +----+----+----+----+ 397 | nf |*?? | ?? | ?? | cf | nf | ?? | ?? | ?? | pf | 398 | +----+----+----+----+ +----+----+----+----+ 399 | ``` 400 | 401 | Unhooking from the free list is accomplished by adjusting the next and 402 | prev free list index values in the pf and nf blocks. 403 | 404 | ### Operation of malloc when we have found a block that will fit the current request of b units with some left over 405 | 406 | We'll allocate the new block at the END of the current free block so we 407 | don't have to change ANY free list pointers. 408 | 409 | ``` 410 | BEFORE AFTER 411 | 412 | +----+----+----+----+ +----+----+----+----+ 413 | pf |*?? | ?? | cf | ?? | pf |*?? | ?? | cf | ?? | 414 | +----+----+----+----+ +----+----+----+----+ 415 | ... ... 416 | +----+----+----+----+ +----+----+----+----+ 417 | p | cf | ?? | ... | p | cf | ?? | ... | 418 | +----+----+----+----+ +----+----+----+----+ 419 | +----+----+----+----+ +----+----+----+----+ 420 | cf |* n | p | nf | pf | cf |* c | p | nf | pf | 421 | +----+----+----+----+ +----+----+----+----+ 422 | +----+----+----+----+ This is the new 423 | c | n | cf | .. | block at cf+b 424 | +----+----+----+----+ 425 | +----+----+----+----+ +----+----+----+----+ 426 | n | ?? | cf | ... | n | ?? | c | ... | 427 | +----+----+----+----+ +----+----+----+----+ 428 | ... ... 429 | +----+----+----+----+ +----+----+----+----+ 430 | nf |*?? | ?? | ?? | cf | nf | ?? | ?? | ?? | pf | 431 | +----+----+----+----+ +----+----+----+----+ 432 | ``` 433 | 434 | This one is prety easy too, except we don't need to mess with the 435 | free list indexes at all becasue we'll allocate the new block at the 436 | end of the current free block. We do, however have to adjust the 437 | indexes in cf, c, and n. 438 | 439 | That covers the initialization and all possible malloc scenarios, so now 440 | we need to cover the free operation possibilities... 441 | 442 | ### Free Scenarios 443 | 444 | The operation of free depends on the position of the current block being 445 | freed relative to free list items immediately above or below it. The code 446 | works like this: 447 | 448 | ``` 449 | if next block is free 450 | assimilate with next block already on free list 451 | if prev block is free 452 | assimilate with prev block already on free list 453 | else 454 | put current block at head of free list 455 | ``` 456 | 457 | Step 1 of the free operation checks if the next block is free, and if it 458 | is assimilate the next block with this one. 459 | 460 | Note that c is the block we are freeing up, cf is the free block that 461 | follows it. 462 | 463 | ``` 464 | BEFORE AFTER 465 | 466 | +----+----+----+----+ +----+----+----+----+ 467 | pf |*?? | ?? | cf | ?? | pf |*?? | ?? | nf | ?? | 468 | +----+----+----+----+ +----+----+----+----+ 469 | ... ... 470 | +----+----+----+----+ +----+----+----+----+ 471 | p | c | ?? | ... | p | c | ?? | ... | 472 | +----+----+----+----+ +----+----+----+----+ 473 | +----+----+----+----+ +----+----+----+----+ This block is 474 | c | cf | p | ... | c | nn | p | ... | disconnected 475 | +----+----+----+----+ +----+----+----+----+ from free list, 476 | +----+----+----+----+ assimilated with 477 | cf |*nn | c | nf | pf | the next, and 478 | +----+----+----+----+ ready for step 2 479 | +----+----+----+----+ +----+----+----+----+ 480 | nn | ?? | cf | ?? | ?? | nn | ?? | c | ... | 481 | +----+----+----+----+ +----+----+----+----+ 482 | ... ... 483 | +----+----+----+----+ +----+----+----+----+ 484 | nf |*?? | ?? | ?? | cf | nf |*?? | ?? | ?? | pf | 485 | +----+----+----+----+ +----+----+----+----+ 486 | ``` 487 | 488 | Take special note that the newly assimilated block (c) is completely 489 | disconnected from the free list, and it does not have its free list 490 | bit set. This is important as we move on to step 2 of the procedure... 491 | 492 | Step 2 of the free operation checks if the prev block is free, and if it 493 | is then assimilate it with this block. 494 | 495 | Note that c is the block we are freeing up, pf is the free block that 496 | precedes it. 497 | 498 | ``` 499 | BEFORE AFTER 500 | 501 | +----+----+----+----+ +----+----+----+----+ This block has 502 | pf |* c | ?? | nf | ?? | pf |* n | ?? | nf | ?? | assimilated the 503 | +----+----+----+----+ +----+----+----+----+ current block 504 | +----+----+----+----+ 505 | c | n | pf | ... | 506 | +----+----+----+----+ 507 | +----+----+----+----+ +----+----+----+----+ 508 | n | ?? | c | ... | n | ?? | pf | ?? | ?? | 509 | +----+----+----+----+ +----+----+----+----+ 510 | ... ... 511 | +----+----+----+----+ +----+----+----+----+ 512 | nf |*?? | ?? | ?? | pf | nf |*?? | ?? | ?? | pf | 513 | +----+----+----+----+ +----+----+----+----+ 514 | ``` 515 | 516 | Nothing magic here, except that when we're done, the current block (c) 517 | is gone since it's been absorbed into the previous free block. Note that 518 | the previous step guarantees that the next block (n) is not free. 519 | 520 | Step 3 of the free operation only runs if the previous block is not free. 521 | it just inserts the current block to the head of the free list. 522 | 523 | Remember, 0 is always the first block in the memory heap, and it's always 524 | head of the free list! 525 | 526 | ``` 527 | BEFORE AFTER 528 | 529 | +----+----+----+----+ +----+----+----+----+ 530 | 0 | ?? | ?? | nf | 0 | 0 | ?? | ?? | c | 0 | 531 | +----+----+----+----+ +----+----+----+----+ 532 | ... ... 533 | +----+----+----+----+ +----+----+----+----+ 534 | p | c | ?? | ... | p | c | ?? | ... | 535 | +----+----+----+----+ +----+----+----+----+ 536 | +----+----+----+----+ +----+----+----+----+ 537 | c | n | p | .. | c |* n | p | nf | 0 | 538 | +----+----+----+----+ +----+----+----+----+ 539 | +----+----+----+----+ +----+----+----+----+ 540 | n | ?? | c | ... | n | ?? | c | ... | 541 | +----+----+----+----+ +----+----+----+----+ 542 | ... ... 543 | +----+----+----+----+ +----+----+----+----+ 544 | nf |*?? | ?? | ?? | 0 | nf |*?? | ?? | ?? | c | 545 | +----+----+----+----+ +----+----+----+----+ 546 | ``` 547 | 548 | Again, nothing spectacular here, we're simply adjusting a few pointers 549 | to make the most recently freed block the first item in the free list. 550 | 551 | That's because finding the previous free block would mean a reverse 552 | traversal of blocks until we found a free one, and it's just easier to 553 | put it at the head of the list. No traversal is needed. 554 | 555 | ### Realloc Scenarios 556 | 557 | Finally, we can cover realloc, which has the following basic operation. 558 | 559 | The first thing we do is assimilate up with the next free block of 560 | memory if possible. This step might help if we're resizing to a bigger 561 | block of memory. It also helps if we're downsizing and creating a new 562 | free block with the leftover memory. 563 | 564 | First we check to see if the next block is free, and we assimilate it 565 | to this block if it is. If the previous block is also free, and if 566 | combining it with the current block would satisfy the request, then we 567 | assimilate with that block and move the current data down to the new 568 | location. 569 | 570 | Assimilating with the previous free block and moving the data works 571 | like this: 572 | 573 | ``` 574 | BEFORE AFTER 575 | 576 | +----+----+----+----+ +----+----+----+----+ 577 | pf |*?? | ?? | cf | ?? | pf |*?? | ?? | nf | ?? | 578 | +----+----+----+----+ +----+----+----+----+ 579 | ... ... 580 | +----+----+----+----+ +----+----+----+----+ 581 | cf |* c | ?? | nf | pf | c | n | ?? | ... | The data gets 582 | +----+----+----+----+ +----+----+----+----+ moved from c to 583 | +----+----+----+----+ the new data area 584 | c | n | cf | ... | in cf, then c is 585 | +----+----+----+----+ adjusted to cf 586 | +----+----+----+----+ +----+----+----+----+ 587 | n | ?? | c | ... | n | ?? | c | ?? | ?? | 588 | +----+----+----+----+ +----+----+----+----+ 589 | ... ... 590 | +----+----+----+----+ +----+----+----+----+ 591 | nf |*?? | ?? | ?? | cf | nf |*?? | ?? | ?? | pf | 592 | +----+----+----+----+ +----+----+----+----+ 593 | ``` 594 | 595 | Once we're done that, there are three scenarios to consider: 596 | 597 | 1. The current block size is exactly the right size, so no more work is 598 | needed. 599 | 600 | 2. The current block is bigger than the new required size, so carve off 601 | the excess and add it to the free list. 602 | 603 | 3. The current block is still smaller than the required size, so malloc 604 | a new block of the correct size and copy the current data into the new 605 | block before freeing the current block. 606 | 607 | The only one of these scenarios that involves an operation that has not 608 | yet been described is the second one, and it's shown below: 609 | 610 | ``` 611 | BEFORE AFTER 612 | 613 | +----+----+----+----+ +----+----+----+----+ 614 | p | c | ?? | ... | p | c | ?? | ... | 615 | +----+----+----+----+ +----+----+----+----+ 616 | +----+----+----+----+ +----+----+----+----+ 617 | c | n | p | ... | c | s | p | ... | 618 | +----+----+----+----+ +----+----+----+----+ 619 | +----+----+----+----+ This is the 620 | s | n | c | .. | new block at 621 | +----+----+----+----+ c+blocks 622 | +----+----+----+----+ +----+----+----+----+ 623 | n | ?? | c | ... | n | ?? | s | ... | 624 | +----+----+----+----+ +----+----+----+----+ 625 | ``` 626 | 627 | Then we call free() with the adress of the data portion of the new 628 | block (s) which adds it to the free list. 629 | -------------------------------------------------------------------------------- /adaptabuild_artifacts.mak: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # adaptabuild_artifacts.mak - product specific libraries file 3 | # 4 | # Here is where you specify the libraries or other artifacts your product 5 | # needs to have built. 6 | # ---------------------------------------------------------------------------- 7 | 8 | ifeq (host,$(MCU)) 9 | # Do nothing - we want the standard library for host builds 10 | else 11 | # Do nothing 12 | endif 13 | 14 | include $(SRC_PATH)/adaptabuild_module.mak 15 | -------------------------------------------------------------------------------- /adaptabuild_config.mak: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # Do NOT move these functions - they must live in the top level makefile 3 | # 4 | # ROOT_PATH is the path to project relative to the directory that you called 5 | # this makefile from. The adaptabuild system need to know this so that it can 6 | # include the files it needs. 7 | # 8 | ROOT_PATH := $(dir $(firstword $(MAKEFILE_LIST))) 9 | 10 | #ROOT_PATH := $(patsubst %/,%,$(dir $(firstword $(MAKEFILE_LIST)))) 11 | 12 | # The adaptabuild path MUST be at the root level - unfortunately there is 13 | # currently no obvious (to me) way to move this boilerplate into an include file. 14 | # 15 | ADAPTABUILD_PATH := $(ROOT_PATH)/adaptabuild 16 | 17 | # ---------------------------------------------------------------------------- 18 | # Now that we have ADAPTABUILD_PATH set we can import the log utilities 19 | # 20 | LOG_WARNING ?= x 21 | LOG_NOTICE ?= x 22 | LOG_INFO ?= x 23 | LOG_DEBUG ?= x 24 | 25 | include $(ADAPTABUILD_PATH)/make/log.mak 26 | 27 | # There can be only one top level source directory - all of the artifacts to 28 | # be built with adaptabuild must live under this directory! 29 | # 30 | # SRC_PATH is always specified relative to the ROOT_PATH. It doesn't 31 | # really matter how complex the path is, the adaptabuild system will 32 | # normalize it relative to the ROOT_PATH internally. 33 | # 34 | # For example, if you have organized all the artifact source code under a 35 | # directory called "src" you would write: 36 | # 37 | # SRC_PATH := $(ROOT_PATH)/src 38 | # 39 | # If the artifacts at the same level as the ROOT_PATH, the following values 40 | # can be used and are equivalent: 41 | # 42 | # SRC_PATH := $(ROOT_PATH) 43 | # SRC_PATH := $(ROOT_PATH)/ 44 | # SRC_PATH := $(ROOT_PATH)/. 45 | # 46 | SRC_PATH := $(ROOT_PATH)/. 47 | 48 | # --------------------------------------------------------------------------- 49 | # Do NOT move this include - it MUST be before the definition of MCU_MAK 50 | # and after the definition of SRC_PATH 51 | # 52 | include $(ADAPTABUILD_PATH)/make/adaptabuild.mak 53 | -------------------------------------------------------------------------------- /adaptabuild_module.mak: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # umm_malloc makefile for adaptabuild 3 | # 4 | # This is designed to be included as part of a make system designed 5 | # to be expandable and maintainable using techniques found in: 6 | # 7 | # Managing Projects with GNU Make - Robert Mecklenburg - ISBN 0-596-00610-1 8 | # ---------------------------------------------------------------------------- 9 | 10 | MODULE := umm_malloc 11 | 12 | MODULE_PATH := $(call make_current_module_path) 13 | $(call log_debug,MODULE_PATH is $(MODULE_PATH)) 14 | 15 | $(MODULE)_PATH := $(MODULE_PATH) 16 | $(call log_debug,$(MODULE)_PATH is $($(MODULE)_PATH)) 17 | 18 | # ---------------------------------------------------------------------------- 19 | # Source file lists go here, C dependencies are automatically generated 20 | # by the compiler using the -m option 21 | # 22 | # You can set up a common source path late in the file 23 | # 24 | # Note that each module gets its own, privately scoped variable for building 25 | # ---------------------------------------------------------------------------- 26 | 27 | # We need both else a previous definition is used :-) Can we make this an include? 28 | 29 | SRC_C := 30 | SRC_ASM := 31 | SRC_TEST := 32 | 33 | # Here is where we begin to add files to list of sources 34 | 35 | SRC_C += src/umm_malloc.c 36 | 37 | SRC_TEST += cpputest/test_FirstMalloc.c 38 | SRC_TEST += cpputest/test_TooBigMalloc.c 39 | SRC_TEST += cpputest/test_Free.c 40 | SRC_TEST += cpputest/test_Realloc.c 41 | SRC_TEST += cpputest/test_MultiMalloc.c 42 | SRC_TEST += cpputest/test_Metrics.c 43 | SRC_TEST += cpputest/test_Poison.c 44 | SRC_TEST += cpputest/test_Stress.c 45 | SRC_TEST += cpputest/support_umm_malloc.c 46 | SRC_TEST += cpputest/main.c 47 | 48 | # ---------------------------------------------------------------------------- 49 | # Set up the module level include path 50 | 51 | $(MODULE)_INCPATH := 52 | 53 | ifeq (unittest,$(MAKECMDGOALS)) 54 | $(MODULE)_INCPATH += $(MODULE_PATH)/src 55 | $(MODULE)_INCPATH += $(MODULE_PATH)/cpputest 56 | endif 57 | 58 | # ---------------------------------------------------------------------------- 59 | # NOTE: The default config file must be created somehow - it is normally 60 | # up to the developer to specify which defines are needed and how they 61 | # are to be configured. 62 | # 63 | # By convention we place config files in $(PRODUCT)/config/$(MCU) because 64 | # that's an easy pace to leave things like HAL config, linker scripts etc 65 | 66 | $(MODULE)_INCPATH += $(PRODUCT)/config/$(MCU) 67 | 68 | # ---------------------------------------------------------------------------- 69 | # Set any module level compile time defaults here 70 | 71 | $(MODULE)_CDEFS := 72 | $(MODULE)_CDEFS += 73 | 74 | $(call log_notice,UMM_MALLOC_CFGFILE is $(UMM_MALLOC_CFGFILE)) 75 | 76 | ifneq (,$(UMM_MALLOC_CFGFILE)) 77 | $(MODULE)_CDEFS += UMM_MALLOC_CFGFILE=$(UMM_MALLOC_CFGFILE) 78 | endif 79 | 80 | $(MODULE)_CFLAGS := 81 | $(MODULE)_CFLAGS += 82 | 83 | ifeq (unittest,$(MAKECMDGOALS)) 84 | $(MODULE)_CDEFS += 85 | $(MODULE)_test_main := cpputest/main.o 86 | endif 87 | 88 | # ---------------------------------------------------------------------------- 89 | # Include the adaptabuild library makefile - must be done for each module! 90 | 91 | include $(ADAPTABUILD_PATH)/make/library.mak 92 | 93 | # # ---------------------------------------------------------------------------- 94 | # # Include the unit test framework makefile that works for this module 95 | # # if the target is cpputest 96 | # 97 | # ifeq (unittest,$(MAKECMDGOALS)) 98 | # TESTABLE_MODULES += $(MODULE)_UNITTEST 99 | # $(MODULE)_test_main := cpputest/main.o 100 | # include $(ADAPTABUILD_PATH)/make/test/cpputest.mak 101 | # endif 102 | 103 | # ---------------------------------------------------------------------------- 104 | -------------------------------------------------------------------------------- /adaptabuild_product.mak: -------------------------------------------------------------------------------- 1 | # ---------------------------------------------------------------------------- 2 | # adaptabuild_product.mak - product specific include file 3 | # 4 | # Here is where you specify your product options 5 | # ---------------------------------------------------------------------------- 6 | 7 | # The umm_malloc library can be build with a number of different options, and 8 | # we control the build by setting the UMM_MALLOC_CFGFILE on the command line 9 | # 10 | # The following configurations can be built as separate products, each one has 11 | # a corresponding .h file 12 | # 13 | # default 14 | # enable_critical_depth_check 15 | # enable_info 16 | # enable_inline_metrics 17 | # enable_integrity_checks 18 | # enable_poison_check 19 | # enable_first_fit 20 | # 21 | PRODUCT_LIST := default \ 22 | enable_info \ 23 | enable_critical_depth_check \ 24 | enable_inline_metrics \ 25 | enable_integrity_check \ 26 | enable_poison_check \ 27 | enable_first_fit 28 | 29 | ifneq ($(filter $(PRODUCT),$(PRODUCT_LIST)),) 30 | # Set PRODUCT_MAIN here based on $(PRODUCT) 31 | # The unittest product typically does not need PRODUCT_MAIN 32 | ifeq (default,$(PRODUCT)) 33 | UMM_MALLOC_CFGFILE := \"options/default.h\" 34 | else ifeq (enable_info,$(PRODUCT)) 35 | UMM_MALLOC_CFGFILE := \"options/enable_umm_info.h\" 36 | else ifeq (enable_critical_depth_check,$(PRODUCT)) 37 | UMM_MALLOC_CFGFILE := \"options/enable_critical_depth_check.h\" 38 | else ifeq (enable_inline_metrics,$(PRODUCT)) 39 | UMM_MALLOC_CFGFILE := \"options/enable_inline_metrics.h\" 40 | else ifeq (enable_integrity_check,$(PRODUCT)) 41 | UMM_MALLOC_CFGFILE := \"options/enable_integrity_check.h\" 42 | else ifeq (enable_poison_check,$(PRODUCT)) 43 | UMM_MALLOC_CFGFILE := \"options/enable_poison_check.h\" 44 | else ifeq (enable_first_fit,$(PRODUCT)) 45 | UMM_MALLOC_CFGFILE := \"options/enable_first_fit.h\" 46 | endif 47 | else 48 | $(error PRODUCT must be one of $(PRODUCT_LIST)) 49 | endif 50 | 51 | 52 | -------------------------------------------------------------------------------- /cpputest/main.cpp: -------------------------------------------------------------------------------- 1 | #include "CppUTest/CommandLineTestRunner.h" 2 | 3 | IMPORT_TEST_GROUP (FirstMalloc); 4 | IMPORT_TEST_GROUP (TooBigMalloc); 5 | IMPORT_TEST_GROUP (Free); 6 | IMPORT_TEST_GROUP (Realloc); 7 | IMPORT_TEST_GROUP (MultiMalloc); 8 | IMPORT_TEST_GROUP (Metrics); 9 | IMPORT_TEST_GROUP (Poison); 10 | IMPORT_TEST_GROUP (Stress); 11 | 12 | int main(int ac, char** av) 13 | { 14 | return CommandLineTestRunner::RunAllTests(ac, av); 15 | } -------------------------------------------------------------------------------- /cpputest/support_umm_malloc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "umm_malloc_cfg.h" 8 | #include "umm_malloc.h" 9 | 10 | #include "support_umm_malloc.h" 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | /* Start addresses and the size of the heap */ 15 | uint32_t UMM_MALLOC_CFG_HEAP_SIZE = SUPPORT_UMM_MALLOC_HEAP_SIZE; 16 | 17 | char test_umm_heap[0x2000][UMM_BLOCK_BODY_SIZE]; 18 | void *UMM_MALLOC_CFG_HEAP_ADDR = &test_umm_heap; 19 | 20 | int umm_max_critical_depth; 21 | int umm_critical_depth; 22 | 23 | bool check_all_bytes(uint8_t *p, size_t s, uint8_t v) { 24 | while ((*p == v) && s) { 25 | ++p; 26 | --s; 27 | } 28 | 29 | return s == 0; 30 | } 31 | 32 | // Note, the get_xxx() functions depend on knowledge of the internals 33 | // of umm_malloc.c which are not to be exposed to the user of the library 34 | // 35 | // Specifically, the array offsets represent the block indexes 36 | // within the umm_block_t as follows: 37 | // 38 | // 0 - header.used.next (plus free flag) 39 | // 1 - header.used.prev 40 | // 2 - body.free.next 41 | // 3 - body.free.prev 42 | 43 | bool get_block_is_free(int b) { 44 | return (((uint16_t *)(&test_umm_heap[b]))[0] & 0x8000) == 0x8000; 45 | } 46 | 47 | uint16_t get_block_next(int b) { 48 | return ((uint16_t *)(&test_umm_heap[b]))[0] & 0x7FFF; 49 | } 50 | 51 | uint16_t get_block_prev(int b) { 52 | return ((uint16_t *)(&test_umm_heap[b]))[1]; 53 | } 54 | 55 | uint16_t get_block_next_free(int b) { 56 | return ((uint16_t *)(&test_umm_heap[b]))[2]; 57 | } 58 | 59 | uint16_t get_block_prev_free(int b) { 60 | return ((uint16_t *)(&test_umm_heap[b]))[3]; 61 | } 62 | 63 | char block_test_msg[TEST_MSG_LEN]; 64 | char block_actual_msg[TEST_MSG_LEN]; 65 | char test_msg[256]; 66 | 67 | bool check_block(struct block_test_values *t) { 68 | snprintf(block_test_msg, TEST_MSG_LEN, "\nTest__: Block %04d f %d n %04d p %04d nf %04d pf %04d", t->block 69 | , t->is_free 70 | , t->next 71 | , t->prev 72 | , t->next_free 73 | , t->prev_free); 74 | snprintf(block_actual_msg, TEST_MSG_LEN, "\nActual: Block %04d f %d n %04d p %04d nf %04d pf %04d\n", t->block 75 | , get_block_is_free(t->block) 76 | , get_block_next(t->block) 77 | , get_block_prev(t->block) 78 | , get_block_next_free(t->block) 79 | , get_block_prev_free(t->block)); 80 | strncpy(test_msg, block_test_msg, 256); 81 | strncat(test_msg, block_actual_msg, 256); 82 | 83 | CHECK_EQUAL_TEXT(t->is_free, get_block_is_free(t->block), test_msg); 84 | CHECK_EQUAL_TEXT(t->next, get_block_next(t->block), test_msg); 85 | CHECK_EQUAL_TEXT(t->prev, get_block_prev(t->block), test_msg); 86 | CHECK_EQUAL_TEXT(t->next_free, get_block_next_free(t->block), test_msg); 87 | CHECK_EQUAL_TEXT(t->prev_free, get_block_prev_free(t->block), test_msg); 88 | 89 | return true; 90 | } 91 | 92 | size_t normalize_allocation_size(size_t s) { 93 | size_t first_block; 94 | size_t full_blocks; 95 | size_t extra_bytes; 96 | 97 | // This function normalizes the number of bytes to allocate so that all 98 | // the test cases work the same. The original test cases were designed 99 | // for the smallest block body size, so we need to calculate: 100 | // 101 | // 1. If we need only the initial block 102 | // 2. Number of additional full blocks 103 | // 3. Any extra bytes 104 | 105 | if (s <= (UMM_MIN_BLOCK_BODY_SIZE - UMM_BLOCK_HEADER_SIZE)) { 106 | first_block = 0; 107 | full_blocks = 0; 108 | extra_bytes = s; 109 | } else { 110 | first_block = 1; 111 | full_blocks = ((s - (UMM_MIN_BLOCK_BODY_SIZE - UMM_BLOCK_HEADER_SIZE)) / UMM_MIN_BLOCK_BODY_SIZE); 112 | extra_bytes = (s - (UMM_MIN_BLOCK_BODY_SIZE - UMM_BLOCK_HEADER_SIZE) - (full_blocks * UMM_MIN_BLOCK_BODY_SIZE)); 113 | } 114 | 115 | return first_block * UMM_FIRST_BLOCK_BODY_SIZE + full_blocks * UMM_BLOCK_BODY_SIZE + extra_bytes; 116 | } 117 | 118 | bool check_blocks(struct block_test_values *t, size_t n) { 119 | int i; 120 | for (i = 0; i < n; ++i) { 121 | CHECK_TRUE(check_block(&t[i])); 122 | } 123 | return true; 124 | } 125 | 126 | static uint64_t seed = 0; 127 | 128 | void srand32(uint32_t s) { 129 | seed = s; 130 | } 131 | 132 | uint32_t rand32(void) { 133 | seed = (uint32_t)(((uint64_t)2862933555777941757 * seed) + 3037000493); 134 | 135 | return seed >> 10; 136 | } 137 | 138 | #define STRESS_TEST_ENTRIES (256) 139 | 140 | #ifdef UMM_POISON_CHECK 141 | struct umm_test_functions umm_test_functions = { 142 | umm_poison_malloc, 143 | umm_poison_calloc, 144 | umm_poison_realloc, 145 | umm_poison_free, 146 | umm_poison_check, 147 | }; 148 | #else 149 | bool umm_check(void) { 150 | return true; 151 | } 152 | 153 | struct umm_test_functions umm_test_functions = { 154 | umm_malloc, 155 | umm_calloc, 156 | umm_realloc, 157 | umm_free, 158 | umm_check, 159 | }; 160 | #endif 161 | 162 | #define UMM_TEST_GETTIME(a) (clock_gettime(CLOCK_REALTIME, &a)) 163 | 164 | #define UMM_TEST_DIFFTIME(a,b) ((b.tv_sec - a.tv_sec) * (uint64_t)(1000 * 1000 * 1000) \ 165 | + (b.tv_nsec - a.tv_nsec + 100)) 166 | 167 | uint64_t stress_test(int iterations, struct umm_test_functions *f) { 168 | void *p[STRESS_TEST_ENTRIES]; 169 | int i,j,k; 170 | size_t s; 171 | 172 | uint64_t umm_malloc_time = 0; 173 | 174 | struct timespec start, end; 175 | 176 | srand32(0); 177 | 178 | for (j = 0; j < STRESS_TEST_ENTRIES; ++j) { 179 | p[j] = (void *)NULL; 180 | } 181 | 182 | for (i = 0; i < iterations; ++i) { 183 | j = rand32() % STRESS_TEST_ENTRIES; 184 | 185 | switch (rand32() % 16) { 186 | 187 | case 0: 188 | case 1: 189 | case 2: 190 | case 3: 191 | case 4: 192 | case 5: 193 | case 6: { 194 | UMM_TEST_GETTIME(start); 195 | p[j] = f->umm_test_realloc(p[j], 0); 196 | UMM_TEST_GETTIME(end); 197 | umm_malloc_time += UMM_TEST_DIFFTIME(start,end); 198 | 199 | // TEST_ASSERT_NULL(p[j]); 200 | break; 201 | } 202 | case 7: 203 | case 8: { 204 | s = normalize_allocation_size(rand32() % 64); 205 | // UMM_TEST_GETTIME(start); 206 | p[j] = f->umm_test_realloc(p[j], s); 207 | // UMM_TEST_GETTIME(end); 208 | umm_malloc_time += UMM_TEST_DIFFTIME(start,end); 209 | 210 | if (s) { 211 | // TEST_ASSERT_NOT_NULL(p[j]); 212 | memset(p[j], 0xfe, s); 213 | } else { 214 | // TEST_ASSERT_NULL(p[j]); 215 | } 216 | break; 217 | } 218 | 219 | case 9: 220 | case 10: 221 | case 11: 222 | case 12: { 223 | s = normalize_allocation_size(rand32() % 100); 224 | // UMM_TEST_GETTIME(start); 225 | p[j] = f->umm_test_realloc(p[j], s); 226 | // UMM_TEST_GETTIME(end); 227 | umm_malloc_time += UMM_TEST_DIFFTIME(start,end); 228 | 229 | if (s) { 230 | // TEST_ASSERT_NOT_NULL(p[j]); 231 | memset(p[j], 0xfe, s); 232 | } else { 233 | // TEST_ASSERT_NULL(p[j]); 234 | } 235 | break; 236 | } 237 | 238 | case 13: 239 | case 14: { 240 | s = normalize_allocation_size(rand32() % 200); 241 | // UMM_TEST_GETTIME(start); 242 | f->umm_test_free(p[j]); 243 | p[j] = f->umm_test_calloc(1, s); 244 | // UMM_TEST_GETTIME(end); 245 | umm_malloc_time += UMM_TEST_DIFFTIME(start,end); 246 | 247 | if (s) { 248 | // TEST_ASSERT_NOT_NULL(p[j]); 249 | // TEST_ASSERT_TRUE(check_all_bytes(p[j], s, 0x00)); 250 | memset(p[j], 0xfe, s); 251 | } else { 252 | // TEST_ASSERT_NULL(p[j]); 253 | } 254 | break; 255 | } 256 | 257 | default: { 258 | s = normalize_allocation_size(rand32() % 400); 259 | // UMM_TEST_GETTIME(start); 260 | f->umm_test_free(p[j]); 261 | p[j] = f->umm_test_malloc(s); 262 | // UMM_TEST_GETTIME(end); 263 | umm_malloc_time += UMM_TEST_DIFFTIME(start,end); 264 | 265 | if (s) { 266 | // TEST_ASSERT_NOT_NULL(p[j]); 267 | memset(p[j], 0xfe, s); 268 | } else { 269 | // TEST_ASSERT_NULL(p[j]); 270 | } 271 | break; 272 | } 273 | } 274 | 275 | // TEST_ASSERT_NOT_EQUAL(0, INTEGRITY_CHECK()); 276 | // TEST_ASSERT_NOT_EQUAL(0, POISON_CHECK()); 277 | } 278 | 279 | return umm_malloc_time; 280 | } 281 | -------------------------------------------------------------------------------- /cpputest/support_umm_malloc.h: -------------------------------------------------------------------------------- 1 | #ifndef _SUPPORT_UMM_MALLOC_H 2 | #define _SUPPORT_UMM_MALLOC_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | /* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 9 | 10 | #ifdef UMM_DBGLOG_ENABLE 11 | #include "dbglog/dbglog.h" 12 | #endif 13 | 14 | #define UMM_BLOCK_HEADER_SIZE (4) 15 | #define UMM_FIRST_BLOCK_BODY_SIZE (UMM_BLOCK_BODY_SIZE - UMM_BLOCK_HEADER_SIZE) 16 | 17 | extern char test_umm_heap[][UMM_BLOCK_BODY_SIZE]; 18 | extern int umm_max_critical_depth; 19 | extern int umm_critical_depth; 20 | 21 | struct block_test_values { 22 | uint16_t block; 23 | bool is_free; 24 | uint16_t next; 25 | uint16_t prev; 26 | uint16_t next_free; 27 | uint16_t prev_free; 28 | }; 29 | 30 | extern size_t normalize_allocation_size(size_t); 31 | extern bool check_block(struct block_test_values *); 32 | extern bool check_blocks(struct block_test_values *, size_t); 33 | 34 | extern void srand32(uint32_t); 35 | extern uint32_t rand32(void); 36 | 37 | // Note, the block size calculation depends on knowledge of the internals 38 | // of umm_malloc.c which are not to be exposed to the user of the library 39 | // 40 | // We now can configure the block body size - to make the tests pass 41 | // without changing the ptr indexes we scale the UMM_MALLOC_HEAP_SIZE 42 | // to the UMM_BLOCK_BODY_SIZE 43 | 44 | #define ARRAYELEMENTCOUNT(x) (sizeof (x) / sizeof (x)[0]) 45 | 46 | #define SUPPORT_UMM_MALLOC_BLOCKS (0x2000) 47 | #define SUPPORT_UMM_MALLOC_HEAP_SIZE (SUPPORT_UMM_MALLOC_BLOCKS * UMM_BLOCK_BODY_SIZE) 48 | #define UMM_LASTBLOCK ((SUPPORT_UMM_MALLOC_HEAP_SIZE / UMM_BLOCK_BODY_SIZE) - 1) 49 | 50 | #define TEST_MSG_LEN (132) 51 | 52 | struct umm_test_functions { 53 | void *(*umm_test_malloc)(size_t); 54 | void *(*umm_test_calloc)(size_t, size_t); 55 | void *(*umm_test_realloc)(void *, size_t); 56 | void (*umm_test_free)(void *); 57 | bool (*umm_test_check)(void); 58 | }; 59 | 60 | extern struct umm_test_functions umm_test_functions; 61 | 62 | extern uint64_t stress_test(int, struct umm_test_functions *); 63 | 64 | #endif // _SUPPORT_UMM_MALLOC_H 65 | -------------------------------------------------------------------------------- /cpputest/test_FirstMalloc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | TEST_GROUP(FirstMalloc) 15 | { 16 | void setup(void) { 17 | umm_init(); 18 | umm_critical_depth = 0; 19 | umm_max_critical_depth = 0; 20 | } 21 | 22 | void teardown(void) { 23 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 24 | } 25 | }; 26 | 27 | struct block_test_values Initialization_test_values[] = 28 | { 29 | {0, false, 1, 0, 1, 1} 30 | , {1, true, UMM_LASTBLOCK, 0, 0, 0} 31 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 32 | }; 33 | 34 | TEST(FirstMalloc, testHeapInitialization) 35 | { 36 | // DBGLOG_FORCE(true, "support heapsize %08x\n", SUPPORT_UMM_MALLOC_HEAP_SIZE); 37 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 38 | } 39 | 40 | TEST(FirstMalloc, testHeapFirstMalloc0Bytes) 41 | { 42 | POINTERS_EQUAL((void *)NULL, (umm_malloc(0))); 43 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 44 | } 45 | 46 | struct block_test_values MallocFirstBlock_test_values[] = 47 | { {0, false, 1, 0, 2, 2} 48 | , {1, false, 2, 0, 0, 0} 49 | , {2, true, UMM_LASTBLOCK, 1, 0, 0} 50 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 51 | }; 52 | 53 | TEST(FirstMalloc, testHeapFirstMalloc1Bytes) 54 | { 55 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(1))); 56 | CHECK_TRUE(check_blocks(MallocFirstBlock_test_values, ARRAYELEMENTCOUNT(MallocFirstBlock_test_values))); 57 | } 58 | 59 | TEST(FirstMalloc, testHeapFirstMalloc2Bytes) 60 | { 61 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(2))); 62 | CHECK_TRUE(check_blocks(MallocFirstBlock_test_values, ARRAYELEMENTCOUNT(MallocFirstBlock_test_values))); 63 | } 64 | 65 | TEST(FirstMalloc, testHeapMallocFirstBlockBodyMinusOneBytes) 66 | { 67 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE - 1))); 68 | CHECK_TRUE(check_blocks(MallocFirstBlock_test_values, ARRAYELEMENTCOUNT(MallocFirstBlock_test_values))); 69 | } 70 | 71 | TEST(FirstMalloc, testHeapMallocFirstBlockBodyBytes) 72 | { 73 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE))); 74 | CHECK_TRUE(check_blocks(MallocFirstBlock_test_values, ARRAYELEMENTCOUNT(MallocFirstBlock_test_values))); 75 | } 76 | 77 | struct block_test_values MallocSecondBlock_test_values[] = 78 | { {0, false, 1, 0, 3, 3} 79 | , {1, false, 3, 0, 0, 0} 80 | , {3, true, UMM_LASTBLOCK, 1, 0, 0} 81 | , {UMM_LASTBLOCK, false, 0, 3, 0, 0} 82 | }; 83 | 84 | TEST(FirstMalloc, testHeapMallocFirstBlockBodyPlusOneBytes) 85 | { 86 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + 1))); 87 | CHECK_TRUE(check_blocks(MallocSecondBlock_test_values, ARRAYELEMENTCOUNT(MallocSecondBlock_test_values))); 88 | } 89 | 90 | TEST(FirstMalloc, testHeapMallocFirstBlockBodyPlusFullBlockBytes) 91 | { 92 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE))); 93 | CHECK_TRUE(check_blocks(MallocSecondBlock_test_values, ARRAYELEMENTCOUNT(MallocSecondBlock_test_values))); 94 | } 95 | 96 | struct block_test_values MallocThirdBlock_test_values[] = 97 | { {0, false, 1, 0, 4, 4} 98 | , {1, false, 4, 0, 0, 0} 99 | , {4, true, UMM_LASTBLOCK, 1, 0, 0} 100 | , {UMM_LASTBLOCK, false, 0, 4, 0, 0} 101 | }; 102 | 103 | TEST(FirstMalloc, testHeapMallocFirstAndSecondBlockBodyPlusOneBytes) 104 | { 105 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE + 1))); 106 | CHECK_TRUE(check_blocks(MallocThirdBlock_test_values, ARRAYELEMENTCOUNT(MallocThirdBlock_test_values))); 107 | } 108 | 109 | struct block_test_values MallocFirstBlockBodyPlus500Blocks_test_values[] = 110 | { {0, false, 1, 0, 502, 502} 111 | , {1, false, 502, 0, 0, 0} 112 | , {502, true, UMM_LASTBLOCK, 1, 0, 0} 113 | , {UMM_LASTBLOCK, false, 0, 502, 0, 0} 114 | }; 115 | 116 | TEST(FirstMalloc, testHeapMallocFirstBlockBodyPlus500Blocks) 117 | { 118 | // Note that this test will actually allocate 510 blocks as the first 119 | // block contains the prev/next pointer pair. 120 | // 121 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE * 500))); 122 | CHECK_TRUE(check_blocks(MallocFirstBlockBodyPlus500Blocks_test_values, ARRAYELEMENTCOUNT(MallocFirstBlockBodyPlus500Blocks_test_values))); 123 | } 124 | 125 | struct block_test_values MallocAllBlocks_test_values[] = 126 | { {0, false, 1, 0, 0, 0} 127 | , {1, false, UMM_LASTBLOCK, 0, 0, 0} 128 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 129 | }; 130 | 131 | TEST(FirstMalloc, testHeapAllBlocks) 132 | { 133 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE * (SUPPORT_UMM_MALLOC_BLOCKS - 3)))); 134 | CHECK_TRUE(check_blocks(MallocAllBlocks_test_values, ARRAYELEMENTCOUNT(MallocAllBlocks_test_values))); 135 | } 136 | 137 | TEST(FirstMalloc, testHeapAllBlocksPlusOneByte) 138 | { 139 | POINTERS_EQUAL((void *)NULL, (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE * (SUPPORT_UMM_MALLOC_BLOCKS - 3) + 1))); 140 | } 141 | -------------------------------------------------------------------------------- /cpputest/test_Free.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | void *p[5]; 15 | 16 | TEST_GROUP(Free) 17 | { 18 | void setup(void) { 19 | umm_init(); 20 | umm_critical_depth = 0; 21 | umm_max_critical_depth = 0; 22 | 23 | p[0] = umm_malloc(4); 24 | p[1] = umm_malloc(4); 25 | p[2] = umm_malloc(4); 26 | p[3] = umm_malloc(4); 27 | p[4] = umm_malloc(4); 28 | } 29 | 30 | void teardown(void) { 31 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 32 | } 33 | }; 34 | 35 | struct block_test_values FreeDoNothing_test_values[] = 36 | { 37 | {0, false, 1, 0, 6, 6} 38 | , {1, false, 2, 0, 0, 0} 39 | , {2, false, 3, 1, 0, 0} 40 | , {3, false, 4, 2, 0, 0} 41 | , {4, false, 5, 3, 0, 0} 42 | , {5, false, 6, 4, 0, 0} 43 | , {6, true, UMM_LASTBLOCK, 5, 0, 0} 44 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 45 | }; 46 | 47 | TEST(Free, testFreeNullPtr) 48 | { 49 | umm_free((void *)NULL); 50 | 51 | CHECK_TRUE(check_blocks(FreeDoNothing_test_values, ARRAYELEMENTCOUNT(FreeDoNothing_test_values))); 52 | 53 | } 54 | 55 | TEST(Free, testFreeLowPtr) 56 | { 57 | umm_free((test_umm_heap - 1)); 58 | 59 | CHECK_TRUE(check_blocks(FreeDoNothing_test_values, ARRAYELEMENTCOUNT(FreeDoNothing_test_values))); 60 | } 61 | 62 | TEST(Free, testFreeHighPtr) 63 | { 64 | umm_free(test_umm_heap + SUPPORT_UMM_MALLOC_HEAP_SIZE); 65 | 66 | CHECK_TRUE(check_blocks(FreeDoNothing_test_values, ARRAYELEMENTCOUNT(FreeDoNothing_test_values))); 67 | } 68 | 69 | struct block_test_values FreeFirst_test_values[] = { 70 | {0, false, 1, 0, 1, 1} 71 | , {1, true, UMM_LASTBLOCK, 0, 0, 0} 72 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 73 | }; 74 | 75 | TEST(Free, testFreeFirst) 76 | { 77 | umm_init(); 78 | umm_free(umm_malloc(4)); 79 | 80 | CHECK_TRUE(check_blocks(FreeFirst_test_values, ARRAYELEMENTCOUNT(FreeFirst_test_values))); 81 | } 82 | 83 | struct block_test_values FreeLast_test_values[] = { 84 | {0, false, 1, 0, 5, 5} 85 | , {1, false, 2, 0, 0, 0} 86 | , {2, false, 3, 1, 0, 0} 87 | , {3, false, 4, 2, 0, 0} 88 | , {4, false, 5, 3, 0, 0} 89 | , {5, true, UMM_LASTBLOCK, 4, 0, 0} 90 | , {UMM_LASTBLOCK, false, 0, 5, 0, 0} 91 | }; 92 | 93 | TEST(Free, testFreeLast) 94 | { 95 | umm_free(p[4]); 96 | 97 | CHECK_TRUE(check_blocks(FreeLast_test_values, ARRAYELEMENTCOUNT(FreeLast_test_values))); 98 | } 99 | 100 | struct block_test_values FreeSecondLast_test_values[] = { 101 | {0, false, 1, 0, 4, 6} 102 | , {1, false, 2, 0, 0, 0} 103 | , {2, false, 3, 1, 0, 0} 104 | , {3, false, 4, 2, 0, 0} 105 | , {4, true, 5, 3, 6, 0} 106 | , {5, false, 6, 4, 0, 0} 107 | , {6, true, UMM_LASTBLOCK, 5, 0, 4} 108 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0}}; 109 | 110 | TEST(Free, testFreeSecondLast) 111 | { 112 | umm_free(p[3]); 113 | 114 | CHECK_TRUE(check_blocks(FreeSecondLast_test_values, ARRAYELEMENTCOUNT(FreeSecondLast_test_values))); 115 | } 116 | 117 | struct block_test_values FreeAssimilateUp_test_values[] = { 118 | {0, false, 1, 0, 3, 6} 119 | , {1, false, 2, 0, 0, 0} 120 | , {2, false, 3, 1, 0, 0} 121 | , {3, true, 5, 2, 6, 0} 122 | , {5, false, 6, 3, 0, 0} 123 | , {6, true, UMM_LASTBLOCK, 5, 0, 3} 124 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 125 | }; 126 | 127 | TEST(Free, testFreeAssimilateUp) 128 | { 129 | umm_free(p[3]); 130 | umm_free(p[2]); 131 | 132 | CHECK_TRUE(check_blocks(FreeAssimilateUp_test_values, ARRAYELEMENTCOUNT(FreeAssimilateUp_test_values))); 133 | } 134 | 135 | struct block_test_values FreeAssimilateDown_test_values[] = { 136 | {0, false, 1, 0, 3, 6} 137 | , {1, false, 2, 0, 0, 0} 138 | , {2, false, 3, 1, 0, 0} 139 | , {3, true, 5, 2, 6, 0} 140 | , {5, false, 6, 3, 0, 0} 141 | , {6, true, UMM_LASTBLOCK, 5, 0, 3} 142 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 143 | }; 144 | 145 | TEST(Free, testFreeAssimilateDown) 146 | { 147 | umm_free(p[2]); 148 | umm_free(p[3]); 149 | 150 | CHECK_TRUE(check_blocks(FreeAssimilateDown_test_values, ARRAYELEMENTCOUNT(FreeAssimilateDown_test_values))); 151 | } 152 | 153 | struct block_test_values FreeAssimilateUpDown_test_values[] = { 154 | {0, false, 1, 0, 2, 6} 155 | , {1, false, 2, 0, 0, 0} 156 | , {2, true, 5, 1, 6, 0} 157 | , {5, false, 6, 2, 0, 0} 158 | , {6, true, UMM_LASTBLOCK, 5, 0, 2} 159 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 160 | }; 161 | 162 | TEST(Free, testFreeAssimilateUpDown) 163 | { 164 | umm_free(p[3]); 165 | umm_free(p[1]); 166 | umm_free(p[2]); 167 | 168 | CHECK_TRUE(check_blocks(FreeAssimilateUpDown_test_values, ARRAYELEMENTCOUNT(FreeAssimilateUpDown_test_values))); 169 | } 170 | 171 | struct block_test_values FreeAssimilateDownUp_test_values[] = { 172 | {0, false, 1, 0, 2, 6} 173 | , {1, false, 2, 0, 0, 0} 174 | , {2, true, 5, 1, 6, 0} 175 | , {5, false, 6, 2, 0, 0} 176 | , {6, true, UMM_LASTBLOCK, 5, 0, 2} 177 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 178 | }; 179 | 180 | TEST(Free, testFreeAssimilateDownUp) 181 | { 182 | umm_free(p[2]); 183 | umm_free(p[1]); 184 | umm_free(p[3]); 185 | 186 | CHECK_TRUE(check_blocks(FreeAssimilateDownUp_test_values, ARRAYELEMENTCOUNT(FreeAssimilateDownUp_test_values))); 187 | } 188 | 189 | struct block_test_values FreeAssimilateFirst_test_values[] = { 190 | {0, false, 1, 0, 1, 6} 191 | , {1, true, 3, 0, 6, 0} 192 | , {3, false, 4, 1, 0, 0} 193 | , {4, false, 5, 3, 0, 0} 194 | , {5, false, 6, 4, 0, 0} 195 | , {6, true, UMM_LASTBLOCK, 5, 0, 1} 196 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 197 | }; 198 | 199 | TEST(Free, testFreeAssimilateFirst) 200 | { 201 | umm_free(p[1]); 202 | umm_free(p[0]); 203 | 204 | CHECK_TRUE(check_blocks(FreeAssimilateFirst_test_values, ARRAYELEMENTCOUNT(FreeAssimilateFirst_test_values))); 205 | } 206 | 207 | struct block_test_values FreeAssimilateLast_test_values[] = { 208 | {0, false, 1, 0, 4, 4} 209 | , {1, false, 2, 0, 0, 0} 210 | , {2, false, 3, 1, 0, 0} 211 | , {3, false, 4, 2, 0, 0} 212 | , {4, true, UMM_LASTBLOCK, 3, 0, 0} 213 | , {UMM_LASTBLOCK, false, 0, 4, 0, 0} 214 | }; 215 | 216 | TEST(Free, testFreeAssimilateLast) 217 | { 218 | umm_free(p[3]); 219 | umm_free(p[4]); 220 | 221 | CHECK_TRUE(check_blocks(FreeAssimilateLast_test_values, ARRAYELEMENTCOUNT(FreeAssimilateLast_test_values))); 222 | } 223 | 224 | 225 | struct block_test_values FreeHiLo_test_values[] = { 226 | {0, false, 1, 0, 2, 6} 227 | , {1, false, 2, 0, 0, 0} 228 | , {2, true, 3, 1, 4, 0} 229 | , {3, false, 4, 2, 0, 0} 230 | , {4, true, 5, 3, 6, 2} 231 | , {5, false, 6, 4, 0, 0} 232 | , {6, true, UMM_LASTBLOCK, 5, 0, 4} 233 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 234 | }; 235 | 236 | TEST(Free, testFreeHiLo) 237 | { 238 | umm_free(p[3]); 239 | umm_free(p[1]); 240 | 241 | CHECK_TRUE(check_blocks(FreeHiLo_test_values, ARRAYELEMENTCOUNT(FreeHiLo_test_values))); 242 | } 243 | 244 | struct block_test_values FreeLoHi_test_values[] = { 245 | {0, false, 1, 0, 4, 6} 246 | , {1, false, 2, 0, 0, 0} 247 | , {2, true, 3, 1, 6, 4} 248 | , {3, false, 4, 2, 0, 0} 249 | , {4, true, 5, 3, 2, 0} 250 | , {5, false, 6, 4, 0, 0} 251 | , {6, true, UMM_LASTBLOCK, 5, 0, 2} 252 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 253 | }; 254 | 255 | TEST(Free, testFreeLoHi) { 256 | umm_free(p[1]); 257 | umm_free(p[3]); 258 | 259 | CHECK_TRUE(check_blocks(FreeLoHi_test_values, ARRAYELEMENTCOUNT(FreeLoHi_test_values))); 260 | } 261 | -------------------------------------------------------------------------------- /cpputest/test_Metrics.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | TEST_GROUP(Metrics) 15 | { 16 | void setup(void) { 17 | umm_init(); 18 | umm_critical_depth = 0; 19 | umm_max_critical_depth = 0; 20 | } 21 | 22 | void teardown(void) { 23 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 24 | } 25 | }; 26 | 27 | TEST(Metrics, testMetricsRandom) 28 | { 29 | int p[1000]; 30 | int i; 31 | 32 | for (i = 0; i < 1000; ++i) { 33 | p[i] = 0; 34 | } 35 | 36 | srand32(0); 37 | 38 | for (i = 0; i < (1000 * 1000 * 1000); ++i) { 39 | p[rand32() % 1000]++; 40 | } 41 | 42 | for (i = 0; i < 1000; ++i) { 43 | CHECK_COMPARE(1 * 1000 * 1000 + 3600, >=, p[i]); 44 | CHECK_COMPARE(1 * 1000 * 1000 - 3600, <=, p[i]); 45 | } 46 | } 47 | 48 | TEST(Metrics, testMetricsEmpty) 49 | { 50 | #ifdef UMM_INLINE_METRICS 51 | CHECK_COMPARE(0, ==, umm_fragmentation_metric()); 52 | #endif 53 | #ifdef UMM_INFO 54 | umm_info(0, false); 55 | CHECK_COMPARE(0, ==, umm_fragmentation_metric()); 56 | #endif 57 | } 58 | 59 | TEST(Metrics, testMetricsFull) 60 | { 61 | void *p[UMM_LASTBLOCK]; 62 | int i; 63 | 64 | memset(p, sizeof(p), 0); 65 | 66 | for (i = 0; i < (UMM_LASTBLOCK - 1); ++i) { 67 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 68 | } 69 | 70 | #ifdef UMM_INLINE_METRICS 71 | CHECK_COMPARE(0, ==, umm_fragmentation_metric()); 72 | #endif 73 | #ifdef UMM_INFO 74 | umm_info(0, false); 75 | CHECK_COMPARE(0, ==, umm_fragmentation_metric()); 76 | #endif 77 | } 78 | 79 | TEST(Metrics, testMetricsSparseFull) 80 | { 81 | void *p[UMM_LASTBLOCK]; 82 | int i; 83 | 84 | memset(p, sizeof(p), 0); 85 | 86 | for (i = 0; i < (UMM_LASTBLOCK); ++i) { 87 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 88 | } 89 | 90 | for (i = 1; i < (UMM_LASTBLOCK); i += 2) { 91 | umm_free(p[i]); 92 | } 93 | 94 | #ifdef UMM_INLINE_METRICS 95 | CHECK_COMPARE(99, ==, umm_fragmentation_metric()); 96 | #endif 97 | #ifdef UMM_INFO 98 | umm_info(0, false); 99 | CHECK_COMPARE(99, ==, umm_fragmentation_metric()); 100 | #endif 101 | } 102 | 103 | TEST(Metrics, testMetricsSparse7of8) 104 | { 105 | void *p[UMM_LASTBLOCK]; 106 | int i; 107 | 108 | memset(p, sizeof(p), 0); 109 | 110 | for (i = 0; i < ((UMM_LASTBLOCK * 7) / 8); ++i) { 111 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 112 | } 113 | 114 | for (i = 1; i < ((UMM_LASTBLOCK * 7) / 8); i += 2) { 115 | umm_free(p[i]); 116 | } 117 | 118 | #ifdef UMM_INLINE_METRICS 119 | CHECK_COMPARE(78, ==, umm_fragmentation_metric()); 120 | #endif 121 | #ifdef UMM_INFO 122 | umm_info(0, false); 123 | CHECK_COMPARE(78, ==, umm_fragmentation_metric()); 124 | #endif 125 | } 126 | 127 | TEST(Metrics, testMetricsSparse3of4) 128 | { 129 | void *p[UMM_LASTBLOCK]; 130 | int i; 131 | 132 | memset(p, sizeof(p), 0); 133 | 134 | for (i = 0; i < ((UMM_LASTBLOCK * 3) / 4); ++i) { 135 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 136 | } 137 | 138 | for (i = 1; i < ((UMM_LASTBLOCK * 3) / 4); i += 2) { 139 | umm_free(p[i]); 140 | } 141 | 142 | #ifdef UMM_INLINE_METRICS 143 | CHECK_COMPARE(61, ==, umm_fragmentation_metric()); 144 | #endif 145 | #ifdef UMM_INFO 146 | umm_info(0, false); 147 | CHECK_COMPARE(61, ==, umm_fragmentation_metric()); 148 | #endif 149 | } 150 | 151 | TEST(Metrics, testMetricsSparse1of2) 152 | { 153 | void *p[UMM_LASTBLOCK]; 154 | int i; 155 | 156 | memset(p, sizeof(p), 0); 157 | 158 | for (i = 0; i < ((UMM_LASTBLOCK * 1) / 2); ++i) { 159 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 160 | } 161 | 162 | for (i = 1; i < ((UMM_LASTBLOCK * 1) / 2); i += 2) { 163 | umm_free(p[i]); 164 | } 165 | 166 | #ifdef UMM_INLINE_METRICS 167 | CHECK_COMPARE(34, ==, umm_fragmentation_metric()); 168 | #endif 169 | #ifdef UMM_INFO 170 | umm_info(0, false); 171 | CHECK_COMPARE(34, ==, umm_fragmentation_metric()); 172 | #endif 173 | } 174 | 175 | TEST(Metrics, testMetricsSparse1of4) 176 | { 177 | void *p[UMM_LASTBLOCK]; 178 | int i; 179 | 180 | memset(p, sizeof(p), 0); 181 | 182 | for (i = 0; i < ((UMM_LASTBLOCK * 1) / 4); ++i) { 183 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 184 | } 185 | 186 | for (i = 1; i < ((UMM_LASTBLOCK * 1) / 4); i += 2) { 187 | umm_free(p[i]); 188 | } 189 | 190 | #ifdef UMM_INLINE_METRICS 191 | CHECK_COMPARE(15, ==, umm_fragmentation_metric()); 192 | #endif 193 | #ifdef UMM_INFO 194 | umm_info(0, false); 195 | CHECK_COMPARE(15, ==, umm_fragmentation_metric()); 196 | #endif 197 | } 198 | 199 | TEST(Metrics, testMetricsSparse1of8) 200 | { 201 | void *p[UMM_LASTBLOCK]; 202 | int i; 203 | 204 | memset(p, sizeof(p), 0); 205 | 206 | for (i = 0; i < ((UMM_LASTBLOCK * 1) / 8); ++i) { 207 | p[i] = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 208 | } 209 | 210 | for (i = 1; i < ((UMM_LASTBLOCK * 1) / 8); i += 2) { 211 | umm_free(p[i]); 212 | } 213 | 214 | #ifdef UMM_INLINE_METRICS 215 | CHECK_COMPARE(7, ==, umm_fragmentation_metric()); 216 | #endif 217 | #ifdef UMM_INFO 218 | umm_info(0, false); 219 | CHECK_COMPARE(7, ==, umm_fragmentation_metric()); 220 | #endif 221 | } 222 | -------------------------------------------------------------------------------- /cpputest/test_MultiMalloc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | static void *p[5]; 15 | 16 | TEST_GROUP(MultiMalloc) 17 | { 18 | void setup(void) { 19 | umm_init(); 20 | umm_critical_depth = 0; 21 | umm_max_critical_depth = 0; 22 | } 23 | 24 | void teardown(void) { 25 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 26 | } 27 | }; 28 | 29 | IGNORE_TEST(MultiMalloc,testMultMallocWithMultipleHeaps) 30 | { 31 | FAIL("Currently this only tests the interface to umm_malloc, not multiple heaps"); 32 | } 33 | 34 | struct block_test_values MultiMallocManySmall_test_values[] = { 35 | {0, false, 1, 0, 6, 6} 36 | , {1, false, 2, 0, 0, 0} 37 | , {2, false, 3, 1, 0, 0} 38 | , {3, false, 4, 2, 0, 0} 39 | , {4, false, 5, 3, 0, 0} 40 | , {5, false, 6, 4, 0, 0} 41 | , {6, true, UMM_LASTBLOCK, 5, 0, 0} 42 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 43 | }; 44 | 45 | TEST(MultiMalloc, testMultiMallocManySmall) 46 | { 47 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 48 | POINTERS_EQUAL((void *)&test_umm_heap[2][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 49 | POINTERS_EQUAL((void *)&test_umm_heap[3][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 50 | POINTERS_EQUAL((void *)&test_umm_heap[4][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 51 | POINTERS_EQUAL((void *)&test_umm_heap[5][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 52 | 53 | CHECK_TRUE(check_blocks(MultiMallocManySmall_test_values, ARRAYELEMENTCOUNT(MultiMallocManySmall_test_values))); 54 | } 55 | 56 | struct block_test_values MultiMallocManyMed_test_values[] = { 57 | { 0, false, 1, 0, 2501, 2501} 58 | , { 1, false, 501, 0, 0, 0} 59 | , { 501, false, 1001, 1, 0, 0} 60 | , {1001, false, 1501, 501, 0, 0} 61 | , {1501, false, 2001, 1001, 0, 0} 62 | , {2001, false, 2501, 1501, 0, 0} 63 | , {2501, true, UMM_LASTBLOCK, 2001, 0, 0} 64 | , {UMM_LASTBLOCK, false, 0, 2501, 0, 0} 65 | }; 66 | 67 | TEST(MultiMalloc, testMultiMallocManyMed) 68 | { 69 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 500 - UMM_FIRST_BLOCK_BODY_SIZE)); 70 | POINTERS_EQUAL((void *)&test_umm_heap[ 501][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 500 - UMM_FIRST_BLOCK_BODY_SIZE)); 71 | POINTERS_EQUAL((void *)&test_umm_heap[1001][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 500 - UMM_FIRST_BLOCK_BODY_SIZE)); 72 | POINTERS_EQUAL((void *)&test_umm_heap[1501][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 500 - UMM_FIRST_BLOCK_BODY_SIZE)); 73 | POINTERS_EQUAL((void *)&test_umm_heap[2001][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 500 - UMM_FIRST_BLOCK_BODY_SIZE)); 74 | 75 | CHECK_TRUE(check_blocks(MultiMallocManyMed_test_values, ARRAYELEMENTCOUNT(MultiMallocManyMed_test_values))); 76 | } 77 | 78 | struct block_test_values MultiMallocManyLarge_test_values[] = { 79 | { 0, false, 1, 0, 7501, 7501} 80 | , { 1, false, 2501, 0, 0, 0} 81 | , { 2501, false, 5001, 1, 0, 0} 82 | , { 5001, false, 7501, 2501, 0, 0} 83 | , { 7501, true, UMM_LASTBLOCK, 5001, 0, 0} 84 | , {UMM_LASTBLOCK, false, 0, 7501, 0, 0} 85 | }; 86 | 87 | TEST(MultiMalloc, testMultiMallocManyLarge) 88 | { 89 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 2500 - UMM_FIRST_BLOCK_BODY_SIZE)); 90 | POINTERS_EQUAL((void *)&test_umm_heap[2501][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 2500 - UMM_FIRST_BLOCK_BODY_SIZE)); 91 | POINTERS_EQUAL((void *)&test_umm_heap[5001][UMM_BLOCK_HEADER_SIZE], umm_malloc(UMM_BLOCK_BODY_SIZE * 2500 - UMM_FIRST_BLOCK_BODY_SIZE)); 92 | POINTERS_EQUAL((void *)NULL, umm_malloc(UMM_BLOCK_BODY_SIZE * 2500 - UMM_FIRST_BLOCK_BODY_SIZE)); 93 | 94 | CHECK_TRUE(check_blocks(MultiMallocManyLarge_test_values, ARRAYELEMENTCOUNT(MultiMallocManyLarge_test_values))); 95 | } 96 | -------------------------------------------------------------------------------- /cpputest/test_Poison.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #warning "Add support for the DBGLOG macros - consider a more generic log that plus into Linux or Segger" 5 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 6 | // 7 | //#define DBGLOG_LEVEL 0 8 | // 9 | //#ifdef DBGLOG_ENABLE 10 | // #include "dbglog/dbglog.h" 11 | //#endif 12 | 13 | #include "CppUTest/TestHarness.h" 14 | 15 | TEST_GROUP(Poison) 16 | { 17 | void setup(void) { 18 | umm_init(); 19 | umm_critical_depth = 0; 20 | umm_max_critical_depth = 0; 21 | } 22 | 23 | void teardown(void) { 24 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 25 | } 26 | }; 27 | 28 | #ifdef UMM_POISON_CHECK 29 | TEST(Poison, testPoisonFirst) 30 | { 31 | CHECK_COMPARE((long int)NULL, !=, umm_test_functions.umm_test_malloc(UMM_FIRST_BLOCK_BODY_SIZE)); 32 | } 33 | 34 | TEST(Poison, testPoisonClobberLeading) 35 | { 36 | void *p = umm_test_functions.umm_test_malloc(UMM_BLOCK_BODY_SIZE * 8); 37 | 38 | p = (char *)p - 1; 39 | *(char *)p = 0x00; 40 | 41 | CHECK_COMPARE(0, ==, POISON_CHECK()); 42 | } 43 | 44 | TEST(Poison, testPoisonClobberTrailing) 45 | { 46 | void *p = umm_test_functions.umm_test_malloc(UMM_BLOCK_BODY_SIZE * 8); 47 | 48 | p = (char *)p + UMM_BLOCK_BODY_SIZE * 8; 49 | *(char *)p = 0x00; 50 | 51 | CHECK_COMPARE(0, ==, POISON_CHECK()); 52 | } 53 | #endif 54 | 55 | TEST(Poison, testPoisonRandom) 56 | { 57 | void *p[100]; 58 | int i,j; 59 | size_t s; 60 | 61 | srand32(0); 62 | 63 | for (i = 0; i < 100; ++i) { 64 | p[i] = (void *)NULL; 65 | } 66 | 67 | for (i = 0; i < 100000; ++i) { 68 | 69 | CHECK_COMPARE(0, !=, INTEGRITY_CHECK()); 70 | CHECK_COMPARE(0, !=, POISON_CHECK()); 71 | 72 | j = rand32() % 100; 73 | s = rand32() % 64; 74 | 75 | if (p[j]) { 76 | umm_test_functions.umm_test_free(p[j]); 77 | } 78 | 79 | p[j] = umm_test_functions.umm_test_malloc(normalize_allocation_size(s)); 80 | 81 | if (0 == s) { 82 | CHECK_COMPARE((long int)NULL, ==, p[j]); 83 | } else { 84 | CHECK_COMPARE((long int)NULL, !=, p[j]); 85 | } 86 | } 87 | } 88 | 89 | TEST(Poison, testPoisonStress) 90 | { 91 | uint64_t t = stress_test(100 * 1000, &umm_test_functions); 92 | 93 | umm_info(0, true); 94 | DBGLOG_FORCE(true, "Free Heap Size: %d\n", umm_free_heap_size()); 95 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)t / ((100 * 1000))); 96 | } 97 | 98 | TEST(Poison, testPoisonStressLoop) 99 | { 100 | int i; 101 | uint64_t t = 0; 102 | uint64_t total = 0; 103 | 104 | for (i = 0; i < 4; ++i) { 105 | umm_init(); 106 | t = stress_test(100 * 1000, &umm_test_functions); 107 | umm_info(0, false); 108 | DBGLOG_FORCE(true, "Free Heap Size: %d\n", umm_free_heap_size()); 109 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)t / ((100 * 1000))); 110 | total += t; 111 | } 112 | 113 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)total / (4 * (100 * 1000))); 114 | } 115 | -------------------------------------------------------------------------------- /cpputest/test_Realloc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | TEST_GROUP(Realloc) 15 | { 16 | void setup(void) { 17 | umm_init(); 18 | umm_critical_depth = 0; 19 | umm_max_critical_depth = 0; 20 | } 21 | 22 | void teardown(void) { 23 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 24 | } 25 | }; 26 | 27 | struct block_test_values ReallocTooBig_test_values[] = { 28 | {0, false, 1, 0, 2, 2} 29 | , {1, false, 2, 0, 0, 0} 30 | , {2, true, UMM_LASTBLOCK, 1, 0, 0} 31 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 32 | }; 33 | 34 | TEST(Realloc, testReallocTooBig) 35 | { 36 | void *foo = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 37 | 38 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], foo); 39 | CHECK_TRUE(check_blocks(ReallocTooBig_test_values, ARRAYELEMENTCOUNT(ReallocTooBig_test_values))); 40 | 41 | // Realloc with a request that is too big should return NULL and leave the original memory untouched. 42 | 43 | POINTERS_EQUAL((void *)NULL, (umm_realloc(foo,UMM_BLOCK_BODY_SIZE * (SUPPORT_UMM_MALLOC_BLOCKS - 2)))); 44 | CHECK_TRUE(check_blocks(ReallocTooBig_test_values, ARRAYELEMENTCOUNT(ReallocTooBig_test_values))); 45 | } 46 | 47 | struct block_test_values ReallocSameSize_test_values[] = { 48 | {0, false, 1, 0, 2, 2} 49 | , {1, false, 2, 0, 0, 0} 50 | , {2, true, UMM_LASTBLOCK, 1, 0, 0} 51 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 52 | }; 53 | 54 | TEST(Realloc, testReallocSameSize) 55 | { 56 | void *foo = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE / 2); 57 | 58 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], foo); 59 | CHECK_TRUE(check_blocks(ReallocSameSize_test_values, ARRAYELEMENTCOUNT(ReallocSameSize_test_values))); 60 | 61 | // Realloc with a request that is same size or block size should leave the original memory untouched. 62 | 63 | POINTERS_EQUAL((void *)foo, (umm_realloc(foo, UMM_FIRST_BLOCK_BODY_SIZE / 2))); 64 | CHECK_TRUE(check_blocks(ReallocSameSize_test_values, ARRAYELEMENTCOUNT(ReallocSameSize_test_values))); 65 | 66 | // Realloc with a request that is same size or block size should leave the original memory untouched. 67 | 68 | POINTERS_EQUAL((void *)foo, (umm_realloc(foo, 1))); 69 | CHECK_TRUE(check_blocks(ReallocSameSize_test_values, ARRAYELEMENTCOUNT(ReallocSameSize_test_values))); 70 | 71 | // Realloc with a request that is same size or block size should leave the original memory untouched. 72 | 73 | POINTERS_EQUAL((void *)foo, (umm_realloc(foo, UMM_FIRST_BLOCK_BODY_SIZE))); 74 | CHECK_TRUE(check_blocks(ReallocSameSize_test_values, ARRAYELEMENTCOUNT(ReallocSameSize_test_values))); 75 | } 76 | 77 | struct block_test_values ReallocFree_test_values[] = { 78 | {0, false, 1, 0, 1, 1} 79 | , {1, true, UMM_LASTBLOCK, 0, 0, 0} 80 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 81 | }; 82 | 83 | TEST(Realloc, testReallocFree) 84 | { 85 | void *foo = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE / 2); 86 | 87 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], foo); 88 | 89 | // Realloc with a request that is 0 size should free the block 90 | 91 | POINTERS_EQUAL((void *)NULL, (umm_realloc(foo, 0))); 92 | CHECK_TRUE(check_blocks(ReallocFree_test_values, ARRAYELEMENTCOUNT(ReallocFree_test_values))); 93 | } 94 | 95 | struct block_test_values ReallocFreeRealloc_test_values[] = { 96 | {0, false, 1, 0, 2, 2} 97 | , {1, false, 2, 0, 0, 0} 98 | , {2, true, UMM_LASTBLOCK, 1, 0, 0} 99 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 100 | }; 101 | 102 | TEST(Realloc, testReallocFreeRealloc) 103 | { 104 | void *foo = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE / 2); 105 | 106 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], foo); 107 | CHECK_TRUE(check_blocks(ReallocFreeRealloc_test_values, ARRAYELEMENTCOUNT(ReallocFreeRealloc_test_values))); 108 | 109 | // Realloc with a request that is 0 size should free the block 110 | 111 | POINTERS_EQUAL((void *)NULL, (umm_realloc(foo, 0))); 112 | 113 | // Realloc with a request that is same size or block size should leave the original memory untouched. 114 | 115 | POINTERS_EQUAL((void *)foo, (umm_realloc(NULL, UMM_FIRST_BLOCK_BODY_SIZE))); 116 | CHECK_TRUE(check_blocks(ReallocSameSize_test_values, ARRAYELEMENTCOUNT(ReallocSameSize_test_values))); 117 | } 118 | 119 | struct block_test_values ReallocAssimilateUpExact[] = { 120 | {0, false, 1, 0, 5, 5} 121 | , {1, false, 2, 0, 0, 0} 122 | , {2, false, 4, 1, 0, 0} 123 | , {4, false, 5, 2, 0, 0} 124 | , {5, true, UMM_LASTBLOCK, 4, 0, 0} 125 | , {UMM_LASTBLOCK, false, 0, 5, 0, 0} 126 | }; 127 | 128 | TEST(Realloc, testReallocAssimilateUpExact) 129 | { 130 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 131 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 132 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 133 | void *mem3 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 134 | 135 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 136 | POINTERS_EQUAL((void *)&test_umm_heap[ 2][UMM_BLOCK_HEADER_SIZE], mem1); 137 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem2); 138 | POINTERS_EQUAL((void *)&test_umm_heap[ 4][UMM_BLOCK_HEADER_SIZE], mem3); 139 | 140 | // Free a middle block and then realloc the first block to use it 141 | 142 | umm_free(mem2); 143 | 144 | POINTERS_EQUAL((void *)mem1, (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + 1))); 145 | CHECK_TRUE(check_blocks(ReallocAssimilateUpExact, ARRAYELEMENTCOUNT(ReallocAssimilateUpExact))); 146 | } 147 | 148 | struct block_test_values ReallocAssimilateUp[] = { 149 | {0, false, 1, 0, 4, 6} 150 | , {1, false, 2, 0, 0, 0} 151 | , {2, false, 4, 1, 0, 0} 152 | , {4, true, 5, 2, 6, 0} 153 | , {5, false, 6, 4, 0, 0} 154 | , {6, true, UMM_LASTBLOCK, 5, 0, 4} 155 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 156 | }; 157 | 158 | TEST(Realloc, testReallocAssimilateUp) 159 | { 160 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 161 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 162 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 163 | void *mem3 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 164 | void *mem4 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 165 | 166 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 167 | POINTERS_EQUAL((void *)&test_umm_heap[ 2][UMM_BLOCK_HEADER_SIZE], mem1); 168 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem2); 169 | POINTERS_EQUAL((void *)&test_umm_heap[ 4][UMM_BLOCK_HEADER_SIZE], mem3); 170 | POINTERS_EQUAL((void *)&test_umm_heap[ 5][UMM_BLOCK_HEADER_SIZE], mem4); 171 | 172 | // Free two middle block and then realloc the first block to use one of them 173 | 174 | umm_free(mem2); 175 | umm_free(mem3); 176 | 177 | POINTERS_EQUAL((void *)mem1, (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + 8))); 178 | CHECK_TRUE(check_blocks(ReallocAssimilateUp, ARRAYELEMENTCOUNT(ReallocAssimilateUp))); 179 | } 180 | 181 | struct block_test_values ReallocAssimilateDown[] = { 182 | {0, false, 1, 0, 4, 4} 183 | , {1, false, 3, 0, 0, 0} 184 | , {3, false, 4, 1, 0, 0} 185 | , {4, true, UMM_LASTBLOCK, 3, 0, 0} 186 | , {UMM_LASTBLOCK, false, 0, 4, 0, 0} 187 | }; 188 | 189 | TEST(Realloc, testReallocAssimilateDown) 190 | { 191 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 192 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 193 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 194 | 195 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 196 | POINTERS_EQUAL((void *)&test_umm_heap[ 2][UMM_BLOCK_HEADER_SIZE], mem1); 197 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem2); 198 | 199 | // Free the first block and then realloc the middle block to use it 200 | 201 | umm_free(mem0); 202 | 203 | POINTERS_EQUAL((void *)mem0, (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + 1))); 204 | CHECK_TRUE(check_blocks(ReallocAssimilateDown, ARRAYELEMENTCOUNT(ReallocAssimilateDown))); 205 | } 206 | 207 | struct block_test_values ReallocAssimilateUpDown[] = { 208 | {0, false, 1, 0, 5, 5} 209 | , {1, false, 4, 0, 0, 0} 210 | , {4, false, 5, 1, 0, 0} 211 | , {5, true, UMM_LASTBLOCK, 4, 0, 0} 212 | , {UMM_LASTBLOCK, false, 0, 5, 0, 0} 213 | }; 214 | 215 | TEST(Realloc, testReallocAssimilateUpDown) 216 | { 217 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 218 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 219 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 220 | void *mem3 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 221 | 222 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 223 | POINTERS_EQUAL((void *)&test_umm_heap[ 2][UMM_BLOCK_HEADER_SIZE], mem1); 224 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem2); 225 | POINTERS_EQUAL((void *)&test_umm_heap[ 4][UMM_BLOCK_HEADER_SIZE], mem3); 226 | 227 | // Free the first and third block and then realloc the middle block to use both 228 | 229 | umm_free(mem0); 230 | umm_free(mem2); 231 | 232 | POINTERS_EQUAL((void *)mem0, (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE * 2))); 233 | CHECK_TRUE(check_blocks(ReallocAssimilateUpDown, ARRAYELEMENTCOUNT(ReallocAssimilateUpDown))); 234 | } 235 | 236 | struct block_test_values ReallocAssimilateForceDown[] = { 237 | {0, false, 1, 0, 3, 7} 238 | , {1, false, 3, 0, 0, 0} 239 | , {3, true, 6, 1, 7, 0} 240 | , {6, false, 7, 3, 0, 0} 241 | , {7, true, UMM_LASTBLOCK, 6, 0, 3} 242 | , {UMM_LASTBLOCK, false, 0, 7, 0, 0} 243 | }; 244 | 245 | TEST(Realloc, testReallocAssimilateForceDown) 246 | { 247 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE); 248 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 249 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE); 250 | void *mem3 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 251 | 252 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 253 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem1); 254 | POINTERS_EQUAL((void *)&test_umm_heap[ 4][UMM_BLOCK_HEADER_SIZE], mem2); 255 | POINTERS_EQUAL((void *)&test_umm_heap[ 6][UMM_BLOCK_HEADER_SIZE], mem3); 256 | 257 | // Free the first and third blocks and then realloc the 258 | // middle block to use the freed space - force down 259 | 260 | umm_free(mem0); 261 | umm_free(mem2); 262 | 263 | POINTERS_EQUAL((void *)mem0, (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE))); 264 | CHECK_TRUE(check_blocks(ReallocAssimilateForceDown, ARRAYELEMENTCOUNT(ReallocAssimilateForceDown))); 265 | } 266 | 267 | struct block_test_values ReallocNewBlock[] = { 268 | {0, false, 1, 0, 2, 6} 269 | , {1, false, 2, 0, 0, 0} 270 | , {2, true, 3, 1, 6, 0} 271 | , {3, false, 4, 2, 0, 0} 272 | , {4, false, 6, 3, 0, 0} 273 | , {6, true, UMM_LASTBLOCK, 4, 0, 2} 274 | , {UMM_LASTBLOCK, false, 0, 6, 0, 0} 275 | }; 276 | 277 | TEST(Realloc, testReallocNewBlock) 278 | { 279 | void *mem0 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 280 | void *mem1 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 281 | void *mem2 = umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE); 282 | 283 | POINTERS_EQUAL((void *)&test_umm_heap[ 1][UMM_BLOCK_HEADER_SIZE], mem0); 284 | POINTERS_EQUAL((void *)&test_umm_heap[ 2][UMM_BLOCK_HEADER_SIZE], mem1); 285 | POINTERS_EQUAL((void *)&test_umm_heap[ 3][UMM_BLOCK_HEADER_SIZE], mem2); 286 | 287 | /* Realloc the middle block - should need a totally new block */ 288 | 289 | POINTERS_EQUAL((void *)&test_umm_heap[ 4][UMM_BLOCK_HEADER_SIZE], (umm_realloc(mem1, UMM_FIRST_BLOCK_BODY_SIZE + UMM_BLOCK_BODY_SIZE))); 290 | CHECK_TRUE(check_blocks(ReallocNewBlock, ARRAYELEMENTCOUNT(ReallocNewBlock))); 291 | } 292 | -------------------------------------------------------------------------------- /cpputest/test_Stress.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | TEST_GROUP(Stress) 15 | { 16 | void setup(void) { 17 | umm_init(); 18 | umm_critical_depth = 0; 19 | umm_max_critical_depth = 0; 20 | } 21 | 22 | void teardown(void) { 23 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 24 | } 25 | }; 26 | 27 | TEST(Stress, testStress) 28 | { 29 | uint64_t t = stress_test(100 * 1/*000*/, &umm_test_functions); 30 | 31 | umm_info(0, true); 32 | DBGLOG_FORCE(true, "Free Heap Size: %d\n", umm_free_heap_size()); 33 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)t / ((100 * 1000))); 34 | } 35 | 36 | TEST(Stress, testStressLoop) 37 | { 38 | int i; 39 | uint64_t t = 0; 40 | uint64_t total = 0; 41 | 42 | for (i = 0; i < 4; ++i) { 43 | umm_init(); 44 | t = stress_test(100 * 1/*000*/, &umm_test_functions); 45 | umm_info(0, false); 46 | DBGLOG_FORCE(true, "Free Heap Size: %d\n", umm_free_heap_size()); 47 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)t / ((100 * 1000))); 48 | total += t; 49 | } 50 | 51 | DBGLOG_FORCE(true, "Typical Time (usec): %f\n", (double)total / (4 * (100 * 1000))); 52 | } 53 | -------------------------------------------------------------------------------- /cpputest/test_TooBigMalloc.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 5 | // 6 | //#define DBGLOG_LEVEL 0 7 | // 8 | //#ifdef DBGLOG_ENABLE 9 | // #include "dbglog/dbglog.h" 10 | //#endif 11 | 12 | #include "CppUTest/TestHarness.h" 13 | 14 | TEST_GROUP(TooBigMalloc) 15 | { 16 | void setup(void) { 17 | umm_init(); 18 | umm_critical_depth = 0; 19 | umm_max_critical_depth = 0; 20 | } 21 | 22 | void teardown(void) { 23 | CHECK_COMPARE(1, >= ,umm_max_critical_depth); 24 | } 25 | }; 26 | 27 | static struct block_test_values Initialization_test_values[] = { 28 | {0, false, 1, 0, 1, 1} 29 | , {1, true, UMM_LASTBLOCK, 0, 0, 0} 30 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 31 | }; 32 | 33 | TEST(TooBigMalloc, testHeapFirstMallocMaxHeapBlocks) 34 | { 35 | POINTERS_EQUAL((void *)NULL, (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS) * UMM_BLOCK_BODY_SIZE))); 36 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 37 | } 38 | 39 | TEST(TooBigMalloc, testHeapFirstMallocMaxHeapBlocksMinus1) 40 | { 41 | POINTERS_EQUAL((void *)NULL, (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-1) * UMM_BLOCK_BODY_SIZE))); 42 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 43 | } 44 | 45 | TEST(TooBigMalloc, testHeapFirstMallocMaxHeapBlocksMinus2) 46 | { 47 | POINTERS_EQUAL((void *)NULL, (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-2) * UMM_BLOCK_BODY_SIZE))); 48 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 49 | } 50 | 51 | TEST(TooBigMalloc, testHeapFirstMallocInsaneBigMalloc) 52 | { 53 | POINTERS_EQUAL((void *)NULL, (umm_malloc((INT16_MAX + 1) * UMM_BLOCK_BODY_SIZE))); 54 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 55 | } 56 | 57 | struct block_test_values MallocMaxHeapBlocksMinus3_test_values[] = { 58 | {0, false, 1, 0, 0, 0} 59 | , {1, false, UMM_LASTBLOCK, 0, 0, 0} 60 | , {UMM_LASTBLOCK, false, 0, 1, 0, 0} 61 | }; 62 | 63 | TEST(TooBigMalloc, testHeapFirstMallocMaxHeapBlocksMinus3) 64 | { 65 | // This is a fairly complex test, so we will break it down 66 | // 67 | // First allocate the largest block possible ... 68 | // 69 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-3) * UMM_BLOCK_BODY_SIZE))); 70 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksMinus3_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksMinus3_test_values))); 71 | 72 | // Then free it ... 73 | umm_free((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE]); 74 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 75 | } 76 | 77 | struct block_test_values MallocMaxHeapBlocksBig_test_values[] = { 78 | {0, false, 1, 0, UMM_LASTBLOCK-1, UMM_LASTBLOCK-1} 79 | , {1, false, UMM_LASTBLOCK-1, 0, 0, 0} 80 | , {UMM_LASTBLOCK-1, true, UMM_LASTBLOCK, 1, 0, 0} 81 | , {UMM_LASTBLOCK, false, 0, UMM_LASTBLOCK-1, 0, 0} 82 | }; 83 | 84 | struct block_test_values MallocMaxHeapBlocksBigThenSmall_test_values[] = { 85 | {0, false, 1, 0, 0, 0} 86 | , {1, false, UMM_LASTBLOCK-1, 0, 0, 0} 87 | , {UMM_LASTBLOCK-1, 0, UMM_LASTBLOCK, 1, 0, 0} 88 | , {UMM_LASTBLOCK, false, 0, UMM_LASTBLOCK-1, 0, 0} 89 | }; 90 | 91 | TEST(TooBigMalloc, testHeapTooBigMalloc_BigThenSmallMax) 92 | { 93 | // This is a fairly complex test, so we will break it down 94 | // 95 | // First allocate the largest block possible that leaves exactly one block free ... 96 | // 97 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-4) * UMM_BLOCK_BODY_SIZE))); 98 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksBig_test_values))); 99 | 100 | // Then allocate exactly one more block ... 101 | // 102 | POINTERS_EQUAL((void *)&test_umm_heap[UMM_LASTBLOCK-1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(1))); 103 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksBigThenSmall_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksBigThenSmall_test_values))); 104 | 105 | // Then allocate exactly one more block ... which should fail 106 | // 107 | POINTERS_EQUAL((void *)NULL, (umm_malloc(1))); 108 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksBigThenSmall_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksBigThenSmall_test_values))); 109 | 110 | // Then free the last block ... 111 | umm_free((void *)&test_umm_heap[UMM_LASTBLOCK-1][UMM_BLOCK_HEADER_SIZE]); 112 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksBig_test_values))); 113 | 114 | // Then free the first block ... which should get us back to the initialized state 115 | umm_free((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE]); 116 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 117 | } 118 | 119 | struct block_test_values MallocMaxHeapBlocksSmall_test_values[] = { 120 | {0, false, 1, 0, 2, 2} 121 | , {1, false, 2, 0, 0, 0} 122 | , {2, true, UMM_LASTBLOCK, 1, 0, 0} 123 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 124 | }; 125 | 126 | struct block_test_values MallocMaxHeapBlocksSmallThenBig_test_values[] = { 127 | {0, false, 1, 0, 0, 0} 128 | , {1, false, 2, 0, 0, 0} 129 | , {2, 0, UMM_LASTBLOCK, 1, 0, 0} 130 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 131 | }; 132 | 133 | TEST(TooBigMalloc, testHeapTooBigMalloc_SmallThenBigMax) 134 | { 135 | // This is a fairly complex test, so we will break it down 136 | // 137 | // First allocate the smallest block possible that leaves a large block free ... 138 | // 139 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(1))); 140 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmall_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmall_test_values))); 141 | 142 | // Then allocate the largest possible leftover block ... 143 | // 144 | POINTERS_EQUAL((void *)&test_umm_heap[2][UMM_BLOCK_HEADER_SIZE], (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-4) * UMM_BLOCK_BODY_SIZE))); 145 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmallThenBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmallThenBig_test_values))); 146 | 147 | // Then allocate exactly one more block ... which should fail 148 | // 149 | POINTERS_EQUAL((void *)NULL, (umm_malloc(1))); 150 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmallThenBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmallThenBig_test_values))); 151 | 152 | // Then free the large block ... 153 | umm_free((void *)&test_umm_heap[2][UMM_BLOCK_HEADER_SIZE]); 154 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmall_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmall_test_values))); 155 | 156 | // Then free the small block ... which should get us back to the initialized state 157 | umm_free((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE]); 158 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 159 | } 160 | 161 | struct block_test_values MallocMaxHeapBlocksSmallThenBig_ReverseFreetest_values[] = { 162 | {0, false, 1, 0, 1, 1} 163 | , {1, true, 2, 0, 0, 0} 164 | , {2, 0, UMM_LASTBLOCK, 1, 0, 0} 165 | , {UMM_LASTBLOCK, false, 0, 2, 0, 0} 166 | }; 167 | 168 | TEST(TooBigMalloc, testHeapTooBigMalloc_SmallThenBigMax_ReverseFree) 169 | { 170 | // This is a fairly complex test, so we will break it down 171 | // 172 | // First allocate the smallest block possible that leaves a large block free ... 173 | // 174 | POINTERS_EQUAL((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE], (umm_malloc(1))); 175 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmall_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmall_test_values))); 176 | 177 | // Then allocate the largest possible leftover block ... 178 | // 179 | POINTERS_EQUAL((void *)&test_umm_heap[2][UMM_BLOCK_HEADER_SIZE], (umm_malloc((SUPPORT_UMM_MALLOC_BLOCKS-4) * UMM_BLOCK_BODY_SIZE))); 180 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmallThenBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmallThenBig_test_values))); 181 | 182 | // Then allocate exactly one more block ... which should fail 183 | // 184 | POINTERS_EQUAL((void *)NULL, (umm_malloc(1))); 185 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmallThenBig_test_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmallThenBig_test_values))); 186 | 187 | // Then free the small block ... 188 | umm_free((void *)&test_umm_heap[1][UMM_BLOCK_HEADER_SIZE]); 189 | CHECK_TRUE(check_blocks(MallocMaxHeapBlocksSmallThenBig_ReverseFreetest_values, ARRAYELEMENTCOUNT(MallocMaxHeapBlocksSmallThenBig_ReverseFreetest_values))); 190 | 191 | // Then free the large block ... 192 | umm_free((void *)&test_umm_heap[2][UMM_BLOCK_HEADER_SIZE]); 193 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 194 | } 195 | 196 | TEST(TooBigMalloc, testHeapFirstMallocMaxNumBlocks_Minus1) 197 | { 198 | POINTERS_EQUAL((void *)NULL, (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + ((INT16_MAX-2) * 500)))); 199 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 200 | } 201 | 202 | TEST(TooBigMalloc, testHeapFirstMallocMaxNumBlocks) 203 | { 204 | POINTERS_EQUAL((void *)NULL, (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + ((INT16_MAX-1) * 500)))); 205 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 206 | } 207 | 208 | TEST(TooBigMalloc, testHeapFirstMallocMaxNumBlocks_Plus1) 209 | { 210 | POINTERS_EQUAL((void *)NULL, (umm_malloc(UMM_FIRST_BLOCK_BODY_SIZE + ((INT16_MAX-0) * 500)))); 211 | CHECK_TRUE(check_blocks(Initialization_test_values, ARRAYELEMENTCOUNT(Initialization_test_values))); 212 | } 213 | -------------------------------------------------------------------------------- /multitest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Iterate through the files in test/options and pass them to ceedling 4 | 5 | for filename in test/options/*; do 6 | [ -e "$filename" ] || continue 7 | name=$(basename $filename) 8 | printf "# ---------------------------------------------\n" 9 | printf "# ceedling options:%s clean test\n" ${name%.yml} 10 | printf "# ---------------------------------------------\n" 11 | ceedling options:${name%.yml} clean test 12 | done 13 | -------------------------------------------------------------------------------- /src/options/default.h: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rhempel/umm_malloc/ed68d0b5f40d83b34fd14cbbd99c4fc1e8aad77d/src/options/default.h -------------------------------------------------------------------------------- /src/options/enable_critical_depth_check.h: -------------------------------------------------------------------------------- 1 | #define UMM_MAX_CRITICAL_DEPTH_CHECK -------------------------------------------------------------------------------- /src/options/enable_first_fit.h: -------------------------------------------------------------------------------- 1 | #define UMM_FIRST_FIT -------------------------------------------------------------------------------- /src/options/enable_inline_metrics.h: -------------------------------------------------------------------------------- 1 | #define UMM_INLINE_METRICS -------------------------------------------------------------------------------- /src/options/enable_integrity_check.h: -------------------------------------------------------------------------------- 1 | #define UMM_INTEGRITY_CHECK -------------------------------------------------------------------------------- /src/options/enable_poison_check.h: -------------------------------------------------------------------------------- 1 | #define UMM_POISON_CHECK -------------------------------------------------------------------------------- /src/options/enable_umm_info.h: -------------------------------------------------------------------------------- 1 | #define UMM_INFO -------------------------------------------------------------------------------- /src/umm_info.c: -------------------------------------------------------------------------------- 1 | #ifdef UMM_INFO 2 | 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | /* ---------------------------------------------------------------------------- 10 | * One of the coolest things about this little library is that it's VERY 11 | * easy to get debug information about the memory heap by simply iterating 12 | * through all of the memory blocks. 13 | * 14 | * As you go through all the blocks, you can check to see if it's a free 15 | * block by looking at the high order bit of the next block index. You can 16 | * also see how big the block is by subtracting the next block index from 17 | * the current block number. 18 | * 19 | * The umm_info function does all of that and makes the results available 20 | * in the ummHeapInfo structure. 21 | * ---------------------------------------------------------------------------- 22 | */ 23 | 24 | UMM_HEAP_INFO ummHeapInfo; 25 | 26 | extern umm_heap umm_heap_current; 27 | 28 | void compute_usage_metric(void) 29 | { 30 | if (0 == ummHeapInfo.freeBlocks) { 31 | ummHeapInfo.usage_metric = -1; // No free blocks! 32 | } else { 33 | ummHeapInfo.usage_metric = (int)((ummHeapInfo.usedBlocks * 100) / (ummHeapInfo.freeBlocks)); 34 | } 35 | } 36 | 37 | void compute_fragmentation_metric(void) 38 | { 39 | if (0 == ummHeapInfo.freeBlocks) { 40 | ummHeapInfo.fragmentation_metric = 0; // No free blocks ... so no fragmentation either! 41 | } else { 42 | ummHeapInfo.fragmentation_metric = 100 - (((uint32_t)(sqrtf(ummHeapInfo.freeBlocksSquared)) * 100) / (ummHeapInfo.freeBlocks)); 43 | } 44 | } 45 | 46 | void *umm_info(void *ptr, bool force) { 47 | return umm_multi_info(&umm_heap_current, ptr, force); 48 | } 49 | 50 | void *umm_multi_info(umm_heap* heap, void *ptr, bool force) { 51 | uint16_t blockNo = 0; 52 | 53 | UMM_CRITICAL_DECL(id_info); 54 | 55 | UMM_CHECK_INITIALIZED(); 56 | 57 | /* Protect the critical section... */ 58 | UMM_CRITICAL_ENTRY(id_info); 59 | 60 | /* 61 | * Clear out all of the entries in the ummHeapInfo structure before doing 62 | * any calculations.. 63 | */ 64 | memset(&ummHeapInfo, 0, sizeof(ummHeapInfo)); 65 | 66 | DBGLOG_FORCE(force, "\n"); 67 | DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n"); 68 | DBGLOG_FORCE(force, "|0x%08x|B %5i|NB %5i|PB %5i|Z %5i|NF %5i|PF %5i|\n", 69 | DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)), 70 | blockNo, 71 | UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK, 72 | UMM_PBLOCK(blockNo), 73 | (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo, 74 | UMM_NFREE(blockNo), 75 | UMM_PFREE(blockNo)); 76 | 77 | /* 78 | * Now loop through the block lists, and keep track of the number and size 79 | * of used and free blocks. The terminating condition is an nb pointer with 80 | * a value of zero... 81 | */ 82 | 83 | blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK; 84 | 85 | while (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) { 86 | size_t curBlocks = (UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK) - blockNo; 87 | 88 | ++ummHeapInfo.totalEntries; 89 | ummHeapInfo.totalBlocks += curBlocks; 90 | 91 | /* Is this a free block? */ 92 | 93 | if (UMM_NBLOCK(blockNo) & UMM_FREELIST_MASK) { 94 | ++ummHeapInfo.freeEntries; 95 | ummHeapInfo.freeBlocks += curBlocks; 96 | ummHeapInfo.freeBlocksSquared += (curBlocks * curBlocks); 97 | 98 | if (ummHeapInfo.maxFreeContiguousBlocks < curBlocks) { 99 | ummHeapInfo.maxFreeContiguousBlocks = curBlocks; 100 | } 101 | 102 | DBGLOG_FORCE(force, "|0x%08x|B %5i|NB %5i|PB %5i|Z %5u|NF %5i|PF %5i|\n", 103 | DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)), 104 | blockNo, 105 | UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK, 106 | UMM_PBLOCK(blockNo), 107 | (uint16_t)curBlocks, 108 | UMM_NFREE(blockNo), 109 | UMM_PFREE(blockNo)); 110 | 111 | /* Does this block address match the ptr we may be trying to free? */ 112 | 113 | if (ptr == &UMM_BLOCK(blockNo)) { 114 | 115 | /* Release the critical section... */ 116 | UMM_CRITICAL_EXIT(id_info); 117 | 118 | return ptr; 119 | } 120 | } else { 121 | ++ummHeapInfo.usedEntries; 122 | ummHeapInfo.usedBlocks += curBlocks; 123 | 124 | DBGLOG_FORCE(force, "|0x%08x|B %5i|NB %5i|PB %5i|Z %5u| |\n", 125 | DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)), 126 | blockNo, 127 | UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK, 128 | UMM_PBLOCK(blockNo), 129 | (uint16_t)curBlocks); 130 | } 131 | 132 | blockNo = UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK; 133 | } 134 | 135 | /* 136 | * The very last block is used as a placeholder to indicate that 137 | * there are no more blocks in the heap, so it cannot be used 138 | * for anything - at the same time, the size of this block must 139 | * ALWAYS be exactly 1 ! 140 | */ 141 | 142 | DBGLOG_FORCE(force, "|0x%08x|B %5i|NB %5i|PB %5i|Z %5i|NF %5i|PF %5i|\n", 143 | DBGLOG_32_BIT_PTR(&UMM_BLOCK(blockNo)), 144 | blockNo, 145 | UMM_NBLOCK(blockNo) & UMM_BLOCKNO_MASK, 146 | UMM_PBLOCK(blockNo), 147 | UMM_NUMBLOCKS - blockNo, 148 | UMM_NFREE(blockNo), 149 | UMM_PFREE(blockNo)); 150 | 151 | DBGLOG_FORCE(force, "+----------+-------+--------+--------+-------+--------+--------+\n"); 152 | 153 | DBGLOG_FORCE(force, "Total Entries %5i Used Entries %5i Free Entries %5i\n", 154 | ummHeapInfo.totalEntries, 155 | ummHeapInfo.usedEntries, 156 | ummHeapInfo.freeEntries); 157 | 158 | DBGLOG_FORCE(force, "Total Blocks %5i Used Blocks %5i Free Blocks %5i\n", 159 | ummHeapInfo.totalBlocks, 160 | ummHeapInfo.usedBlocks, 161 | ummHeapInfo.freeBlocks); 162 | 163 | DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n"); 164 | 165 | compute_usage_metric(); 166 | DBGLOG_FORCE(force, "Usage Metric: %5i\n", ummHeapInfo.usage_metric); 167 | 168 | compute_fragmentation_metric(); 169 | DBGLOG_FORCE(force, "Fragmentation Metric: %5i\n", ummHeapInfo.fragmentation_metric); 170 | 171 | DBGLOG_FORCE(force, "+--------------------------------------------------------------+\n"); 172 | 173 | /* Release the critical section... */ 174 | UMM_CRITICAL_EXIT(id_info); 175 | 176 | return NULL; 177 | } 178 | 179 | /* ------------------------------------------------------------------------ */ 180 | 181 | size_t umm_free_heap_size(void) { 182 | return umm_multi_free_heap_size(&umm_heap_current); 183 | } 184 | 185 | size_t umm_multi_free_heap_size(umm_heap *heap) { 186 | #ifndef UMM_INLINE_METRICS 187 | umm_multi_info(heap, NULL, false); 188 | #endif 189 | return (size_t)ummHeapInfo.freeBlocks * UMM_BLOCKSIZE; 190 | } 191 | 192 | size_t umm_max_free_block_size(void) { 193 | return umm_multi_max_free_block_size(&umm_heap_current); 194 | } 195 | 196 | size_t umm_multi_max_free_block_size(umm_heap *heap) { 197 | umm_multi_info(heap, NULL, false); 198 | return ummHeapInfo.maxFreeContiguousBlocks * sizeof(umm_block); 199 | } 200 | 201 | int umm_usage_metric(void) { 202 | return umm_multi_usage_metric(&umm_heap_current); 203 | } 204 | 205 | int umm_multi_usage_metric(umm_heap *heap) { 206 | #ifdef UMM_INLINE_METRICS 207 | compute_usage_metric(); 208 | #else 209 | umm_multi_info(heap, NULL, false); 210 | #endif 211 | DBGLOG_DEBUG("usedBlocks %i totalBlocks %i\n", ummHeapInfo.usedBlocks, ummHeapInfo.totalBlocks); 212 | 213 | return ummHeapInfo.usage_metric; 214 | } 215 | 216 | int umm_fragmentation_metric(void) { 217 | return umm_multi_fragmentation_metric(&umm_heap_current); 218 | } 219 | 220 | int umm_multi_fragmentation_metric(umm_heap *heap) { 221 | #ifdef UMM_INLINE_METRICS 222 | compute_fragmentation_metric(); 223 | #else 224 | umm_multi_info(heap, NULL, false); 225 | #endif 226 | DBGLOG_DEBUG("freeBlocks %i freeBlocksSquared %i\n", ummHeapInfo.freeBlocks, ummHeapInfo.freeBlocksSquared); 227 | 228 | return ummHeapInfo.fragmentation_metric; 229 | } 230 | 231 | #ifdef UMM_INLINE_METRICS 232 | static void umm_multi_fragmentation_metric_init(umm_heap *heap) { 233 | ummHeapInfo.freeBlocks = UMM_NUMBLOCKS - 2; 234 | ummHeapInfo.freeBlocksSquared = ummHeapInfo.freeBlocks * ummHeapInfo.freeBlocks; 235 | } 236 | 237 | static void umm_fragmentation_metric_init(void) { 238 | umm_multi_fragmentation_metric_init(&umm_heap_current); 239 | } 240 | 241 | static void umm_multi_fragmentation_metric_add(umm_heap *heap, uint16_t c) { 242 | uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c; 243 | DBGLOG_DEBUG("Add block %i size %i to free metric\n", c, blocks); 244 | ummHeapInfo.freeBlocks += blocks; 245 | ummHeapInfo.freeBlocksSquared += (blocks * blocks); 246 | } 247 | 248 | static void umm_fragmentation_metric_add(uint16_t c) { 249 | umm_multi_fragmentation_metric_add(&umm_heap_current, c); 250 | } 251 | 252 | static void umm_multi_fragmentation_metric_remove(umm_heap *heap, uint16_t c) { 253 | uint16_t blocks = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) - c; 254 | DBGLOG_DEBUG("Remove block %i size %i from free metric\n", c, blocks); 255 | ummHeapInfo.freeBlocks -= blocks; 256 | ummHeapInfo.freeBlocksSquared -= (blocks * blocks); 257 | } 258 | 259 | static void umm_fragmentation_metric_remove(uint16_t c) { 260 | umm_multi_fragmentation_metric_remove(&umm_heap_current, c); 261 | } 262 | #endif // UMM_INLINE_METRICS 263 | 264 | /* ------------------------------------------------------------------------ */ 265 | #endif 266 | -------------------------------------------------------------------------------- /src/umm_integrity.c: -------------------------------------------------------------------------------- 1 | /* integrity check (UMM_INTEGRITY_CHECK) {{{ */ 2 | #ifdef UMM_INTEGRITY_CHECK 3 | 4 | #include 5 | #include 6 | 7 | extern umm_heap umm_heap_current; 8 | 9 | /* 10 | * Perform integrity check of the whole heap data. Returns 1 in case of 11 | * success, 0 otherwise. 12 | * 13 | * First of all, iterate through all free blocks, and check that all backlinks 14 | * match (i.e. if block X has next free block Y, then the block Y should have 15 | * previous free block set to X). 16 | * 17 | * Additionally, we check that each free block is correctly marked with 18 | * `UMM_FREELIST_MASK` on the `next` pointer: during iteration through free 19 | * list, we mark each free block by the same flag `UMM_FREELIST_MASK`, but 20 | * on `prev` pointer. We'll check and unmark it later. 21 | * 22 | * Then, we iterate through all blocks in the heap, and similarly check that 23 | * all backlinks match (i.e. if block X has next block Y, then the block Y 24 | * should have previous block set to X). 25 | * 26 | * But before checking each backlink, we check that the `next` and `prev` 27 | * pointers are both marked with `UMM_FREELIST_MASK`, or both unmarked. 28 | * This way, we ensure that the free flag is in sync with the free pointers 29 | * chain. 30 | */ 31 | bool umm_integrity_check(void) { 32 | return umm_multi_integrity_check(&umm_heap_current); 33 | } 34 | 35 | bool umm_multi_integrity_check(umm_heap* heap) { 36 | UMM_CRITICAL_DECL(id_integrity); 37 | bool ok = true; 38 | uint16_t prev; 39 | uint16_t cur; 40 | 41 | UMM_CHECK_INITIALIZED(); 42 | 43 | /* Iterate through all free blocks */ 44 | prev = 0; 45 | UMM_CRITICAL_ENTRY(id_integrity); 46 | while (1) { 47 | cur = UMM_NFREE(prev); 48 | 49 | /* Check that next free block number is valid */ 50 | if (cur >= UMM_NUMBLOCKS) { 51 | DBGLOG_CRITICAL("Heap integrity broken: too large next free num: %d " 52 | "(in block %d, addr 0x%08x)\n", 53 | cur, prev, DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev))); 54 | ok = false; 55 | goto clean; 56 | } 57 | if (cur == 0) { 58 | /* No more free blocks */ 59 | break; 60 | } 61 | 62 | /* Check if prev free block number matches */ 63 | if (UMM_PFREE(cur) != prev) { 64 | DBGLOG_CRITICAL("Heap integrity broken: free links don't match: " 65 | "%d -> %d, but %d -> %d\n", 66 | prev, cur, cur, UMM_PFREE(cur)); 67 | ok = false; 68 | goto clean; 69 | } 70 | 71 | UMM_PBLOCK(cur) |= UMM_FREELIST_MASK; 72 | 73 | prev = cur; 74 | } 75 | 76 | /* Iterate through all blocks */ 77 | prev = 0; 78 | while (1) { 79 | cur = UMM_NBLOCK(prev) & UMM_BLOCKNO_MASK; 80 | 81 | /* Check that next block number is valid */ 82 | if (cur >= UMM_NUMBLOCKS) { 83 | DBGLOG_CRITICAL("Heap integrity broken: too large next block num: %d " 84 | "(in block %d, addr 0x%08x)\n", 85 | cur, prev, DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev))); 86 | ok = false; 87 | goto clean; 88 | } 89 | if (cur == 0) { 90 | /* No more blocks */ 91 | break; 92 | } 93 | 94 | /* make sure the free mark is appropriate, and unmark it */ 95 | if ((UMM_NBLOCK(cur) & UMM_FREELIST_MASK) 96 | != (UMM_PBLOCK(cur) & UMM_FREELIST_MASK)) { 97 | DBGLOG_CRITICAL("Heap integrity broken: mask wrong at addr 0x%08x: n=0x%x, p=0x%x\n", 98 | DBGLOG_32_BIT_PTR(&UMM_NBLOCK(cur)), 99 | (UMM_NBLOCK(cur) & UMM_FREELIST_MASK), 100 | (UMM_PBLOCK(cur) & UMM_FREELIST_MASK)); 101 | ok = false; 102 | goto clean; 103 | } 104 | 105 | /* make sure the block list is sequential */ 106 | if (cur <= prev) { 107 | DBGLOG_CRITICAL("Heap integrity broken: next block %d is before prev this one " 108 | "(in block %d, addr 0x%08x)\n", 109 | cur, prev, DBGLOG_32_BIT_PTR(&UMM_NBLOCK(prev))); 110 | ok = false; 111 | goto clean; 112 | } 113 | 114 | /* unmark */ 115 | UMM_PBLOCK(cur) &= UMM_BLOCKNO_MASK; 116 | 117 | /* Check if prev block number matches */ 118 | if (UMM_PBLOCK(cur) != prev) { 119 | DBGLOG_CRITICAL("Heap integrity broken: block links don't match: " 120 | "%d -> %d, but %d -> %d\n", 121 | prev, cur, cur, UMM_PBLOCK(cur)); 122 | ok = false; 123 | goto clean; 124 | } 125 | 126 | prev = cur; 127 | } 128 | 129 | clean: 130 | UMM_CRITICAL_EXIT(id_integrity); 131 | if (!ok) { 132 | UMM_HEAP_CORRUPTION_CB(); 133 | } 134 | return ok; 135 | } 136 | 137 | #endif 138 | /* }}} */ 139 | -------------------------------------------------------------------------------- /src/umm_malloc.c: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * umm_malloc.c - a memory allocator for embedded systems (microcontrollers) 3 | * 4 | * See LICENSE for copyright notice 5 | * See README.md for acknowledgements and description of internals 6 | * ---------------------------------------------------------------------------- 7 | * 8 | * R.Hempel 2007-09-22 - Original 9 | * R.Hempel 2008-12-11 - Added MIT License biolerplate 10 | * - realloc() now looks to see if previous block is free 11 | * - made common operations functions 12 | * R.Hempel 2009-03-02 - Added macros to disable tasking 13 | * - Added function to dump heap and check for valid free 14 | * pointer 15 | * R.Hempel 2009-03-09 - Changed name to umm_malloc to avoid conflicts with 16 | * the mm_malloc() library functions 17 | * - Added some test code to assimilate a free block 18 | * with the very block if possible. Complicated and 19 | * not worth the grief. 20 | * D.Frank 2014-04-02 - Fixed heap configuration when UMM_TEST_MAIN is NOT set, 21 | * added user-dependent configuration file umm_malloc_cfg.h 22 | * R.Hempel 2016-12-04 - Add support for Unity test framework 23 | * - Reorganize source files to avoid redundant content 24 | * - Move integrity and poison checking to separate file 25 | * R.Hempel 2017-12-29 - Fix bug in realloc when requesting a new block that 26 | * results in OOM error - see Issue 11 27 | * R.Hempel 2019-09-07 - Separate the malloc() and free() functionality into 28 | * wrappers that use critical section protection macros 29 | * and static core functions that assume they are 30 | * running in a protected con text. Thanks @devyte 31 | * R.Hempel 2020-01-07 - Add support for Fragmentation metric - See Issue 14 32 | * R.Hempel 2020-01-12 - Use explicitly sized values from stdint.h - See Issue 15 33 | * R.Hempel 2020-01-20 - Move metric functions back to umm_info - See Issue 29 34 | * R.Hempel 2020-02-01 - Macro functions are uppercased - See Issue 34 35 | * R.Hempel 2020-06-20 - Support alternate body size - See Issue 42 36 | * R.Hempel 2021-05-02 - Support explicit memory umm_init_heap() - See Issue 53 37 | * K.Whitlock 2023-07-06 - Add support for multiple heaps 38 | * ---------------------------------------------------------------------------- 39 | */ 40 | 41 | #include 42 | #include 43 | #include 44 | #include 45 | 46 | #include "umm_malloc_cfg.h" // Override with umm_malloc_cfg_xxx.h 47 | #include "umm_malloc.h" 48 | 49 | /* Use the default DBGLOG_LEVEL and DBGLOG_FUNCTION */ 50 | 51 | // #define DBGLOG_ENABLE 52 | 53 | #define DBGLOG_LEVEL 0 54 | 55 | #ifdef DBGLOG_ENABLE 56 | #include "dbglog/dbglog.h" 57 | #endif 58 | 59 | extern void *UMM_MALLOC_CFG_HEAP_ADDR; 60 | extern uint32_t UMM_MALLOC_CFG_HEAP_SIZE; 61 | 62 | /* ------------------------------------------------------------------------- */ 63 | 64 | UMM_H_ATTPACKPRE typedef struct umm_ptr_t { 65 | uint16_t next; 66 | uint16_t prev; 67 | } UMM_H_ATTPACKSUF umm_ptr; 68 | 69 | UMM_H_ATTPACKPRE typedef struct umm_block_t { 70 | union { 71 | umm_ptr used; 72 | } header; 73 | union { 74 | umm_ptr free; 75 | uint8_t data[UMM_BLOCK_BODY_SIZE - sizeof(struct umm_ptr_t)]; 76 | } body; 77 | } UMM_H_ATTPACKSUF umm_block; 78 | 79 | #define UMM_FREELIST_MASK ((uint16_t)(0x8000)) 80 | #define UMM_BLOCKNO_MASK ((uint16_t)(0x7FFF)) 81 | 82 | /* ------------------------------------------------------------------------- */ 83 | 84 | #define UMM_HEAP ((umm_block *)heap->pheap) 85 | #define UMM_HEAPSIZE (heap->heap_size) 86 | #define UMM_NUMBLOCKS (heap->numblocks) 87 | 88 | #define UMM_BLOCKSIZE (sizeof(umm_block)) 89 | #define UMM_BLOCK_LAST (UMM_NUMBLOCKS - 1) 90 | 91 | /* ------------------------------------------------------------------------- 92 | * These macros evaluate to the address of the block and data respectively 93 | */ 94 | 95 | #define UMM_BLOCK(b) (UMM_HEAP[b]) 96 | #define UMM_DATA(b) (UMM_BLOCK(b).body.data) 97 | 98 | /* ------------------------------------------------------------------------- 99 | * These macros evaluate to the index of the block - NOT the address!!! 100 | */ 101 | 102 | #define UMM_NBLOCK(b) (UMM_BLOCK(b).header.used.next) 103 | #define UMM_PBLOCK(b) (UMM_BLOCK(b).header.used.prev) 104 | #define UMM_NFREE(b) (UMM_BLOCK(b).body.free.next) 105 | #define UMM_PFREE(b) (UMM_BLOCK(b).body.free.prev) 106 | 107 | /* ------------------------------------------------------------------------- 108 | * There are additional files that may be included here - normally it's 109 | * not a good idea to include .c files but in this case it keeps the 110 | * main umm_malloc file clear and prevents issues with exposing internal 111 | * data structures to other programs. 112 | * ------------------------------------------------------------------------- 113 | */ 114 | 115 | #include "umm_integrity.c" 116 | #include "umm_poison.c" 117 | #include "umm_info.c" 118 | 119 | /* ------------------------------------------------------------------------ */ 120 | 121 | static uint16_t umm_blocks(size_t size) { 122 | 123 | /* 124 | * The calculation of the block size is not too difficult, but there are 125 | * a few little things that we need to be mindful of. 126 | * 127 | * When a block removed from the free list, the space used by the free 128 | * pointers is available for data. That's what the first calculation 129 | * of size is doing. 130 | * 131 | * We don't check for the special case of (size == 0) here as this needs 132 | * special handling in the caller depending on context. For example when we 133 | * realloc() a block to size 0 it should simply be freed. 134 | * 135 | * We do NOT need to check for allocating more blocks than the heap can 136 | * possibly hold - the allocator figures this out for us. 137 | * 138 | * There are only two cases left to consider: 139 | * 140 | * 1. (size <= body) Obviously this is just one block 141 | * 2. (blocks > (2^15)) This should return ((2^15)) to force a 142 | * failure when the allocator runs 143 | * 144 | * If the requested size is greater that 32677-2 blocks (max block index 145 | * minus the overhead of the top and bottom bookkeeping blocks) then we 146 | * will return an incorrectly truncated value when the result is cast to 147 | * a uint16_t. 148 | */ 149 | 150 | if (size <= (sizeof(((umm_block *)0)->body))) { 151 | return 1; 152 | } 153 | 154 | /* 155 | * If it's for more than that, then we need to figure out the number of 156 | * additional whole blocks the size of an umm_block are required, so 157 | * reduce the size request by the number of bytes in the body of the 158 | * first block. 159 | */ 160 | 161 | size -= (sizeof(((umm_block *)0)->body)); 162 | 163 | /* NOTE WELL that we take advantage of the fact that INT16_MAX is the 164 | * number of blocks that we can index in 15 bits :-) 165 | * 166 | * The below expression looks wierd, but it's right. Assuming body 167 | * size of 4 bytes and a block size of 8 bytes: 168 | * 169 | * BYTES (BYTES-BODY) (BYTES-BODY-1)/BLOCKSIZE BLOCKS 170 | * 1 n/a n/a 1 171 | * 5 1 0 2 172 | * 12 8 0 2 173 | * 13 9 1 3 174 | */ 175 | 176 | size_t blocks = (2 + ((size - 1) / (UMM_BLOCKSIZE))); 177 | 178 | if (blocks > (INT16_MAX)) { 179 | blocks = INT16_MAX; 180 | } 181 | 182 | return (uint16_t)blocks; 183 | } 184 | 185 | /* ------------------------------------------------------------------------ */ 186 | /* 187 | * Split the block `c` into two blocks: `c` and `c + blocks`. 188 | * 189 | * - `new_freemask` should be `0` if `c + blocks` used, or `UMM_FREELIST_MASK` 190 | * otherwise. 191 | * 192 | * Note that free pointers are NOT modified by this function. 193 | */ 194 | static void umm_split_block(umm_heap *heap, 195 | uint16_t c, 196 | uint16_t blocks, 197 | uint16_t new_freemask) { 198 | 199 | UMM_NBLOCK(c + blocks) = (UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) | new_freemask; 200 | UMM_PBLOCK(c + blocks) = c; 201 | 202 | UMM_PBLOCK(UMM_NBLOCK(c) & UMM_BLOCKNO_MASK) = (c + blocks); 203 | UMM_NBLOCK(c) = (c + blocks); 204 | } 205 | 206 | /* ------------------------------------------------------------------------ */ 207 | 208 | static void umm_disconnect_from_free_list(umm_heap *heap, uint16_t c) { 209 | /* Disconnect this block from the FREE list */ 210 | 211 | UMM_NFREE(UMM_PFREE(c)) = UMM_NFREE(c); 212 | UMM_PFREE(UMM_NFREE(c)) = UMM_PFREE(c); 213 | 214 | /* And clear the free block indicator */ 215 | 216 | UMM_NBLOCK(c) &= (~UMM_FREELIST_MASK); 217 | } 218 | 219 | /* ------------------------------------------------------------------------ 220 | * The umm_assimilate_up() function does not assume that UMM_NBLOCK(c) 221 | * has the UMM_FREELIST_MASK bit set. It only assimilates up if the 222 | * next block is free. 223 | */ 224 | 225 | static void umm_assimilate_up(umm_heap *heap, uint16_t c) { 226 | 227 | if (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK) { 228 | 229 | UMM_FRAGMENTATION_METRIC_REMOVE(UMM_NBLOCK(c)); 230 | 231 | /* 232 | * The next block is a free block, so assimilate up and remove it from 233 | * the free list 234 | */ 235 | 236 | DBGLOG_DEBUG("Assimilate up to next block, which is FREE\n"); 237 | 238 | /* Disconnect the next block from the FREE list */ 239 | 240 | umm_disconnect_from_free_list(heap, UMM_NBLOCK(c)); 241 | 242 | /* Assimilate the next block with this one */ 243 | 244 | UMM_PBLOCK(UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) = c; 245 | UMM_NBLOCK(c) = UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK; 246 | } 247 | } 248 | 249 | /* ------------------------------------------------------------------------ 250 | * The umm_assimilate_down() function assumes that UMM_NBLOCK(c) does NOT 251 | * have the UMM_FREELIST_MASK bit set. In other words, try to assimilate 252 | * up before assimilating down. 253 | */ 254 | 255 | static uint16_t umm_assimilate_down(umm_heap *heap, uint16_t c, uint16_t freemask) { 256 | 257 | // We are going to assimilate down to the previous block because 258 | // it was free, so remove it from the fragmentation metric 259 | 260 | UMM_FRAGMENTATION_METRIC_REMOVE(UMM_PBLOCK(c)); 261 | 262 | UMM_NBLOCK(UMM_PBLOCK(c)) = UMM_NBLOCK(c) | freemask; 263 | UMM_PBLOCK(UMM_NBLOCK(c)) = UMM_PBLOCK(c); 264 | 265 | if (freemask) { 266 | // We are going to free the entire assimilated block 267 | // so add it to the fragmentation metric. A good 268 | // compiler will optimize away the empty if statement 269 | // when UMM_INFO is not defined, so don't worry about 270 | // guarding it. 271 | 272 | UMM_FRAGMENTATION_METRIC_ADD(UMM_PBLOCK(c)); 273 | } 274 | 275 | return UMM_PBLOCK(c); 276 | } 277 | 278 | /* ------------------------------------------------------------------------- */ 279 | 280 | void umm_multi_init_heap(umm_heap *heap, void *ptr, size_t size) { 281 | /* init heap pointer and size, and memset it to 0 */ 282 | heap->pheap = ptr; 283 | UMM_HEAPSIZE = size; 284 | UMM_NUMBLOCKS = (UMM_HEAPSIZE / UMM_BLOCKSIZE); 285 | memset(UMM_HEAP, 0x00, UMM_HEAPSIZE); 286 | 287 | /* setup initial blank heap structure */ 288 | UMM_FRAGMENTATION_METRIC_INIT(); 289 | 290 | /* Set up umm_block[0], which just points to umm_block[1] */ 291 | UMM_NBLOCK(0) = 1; 292 | UMM_NFREE(0) = 1; 293 | UMM_PFREE(0) = 1; 294 | 295 | /* 296 | * Now, we need to set the whole heap space as a huge free block. We should 297 | * not touch umm_block[0], since it's special: umm_block[0] is the head of 298 | * the free block list. It's a part of the heap invariant. 299 | * 300 | * See the detailed explanation at the beginning of the file. 301 | * 302 | * umm_block[1] has pointers: 303 | * 304 | * - next `umm_block`: the last one umm_block[n] 305 | * - prev `umm_block`: umm_block[0] 306 | * 307 | * Plus, it's a free `umm_block`, so we need to apply `UMM_FREELIST_MASK` 308 | * 309 | * And it's the last free block, so the next free block is 0 which marks 310 | * the end of the list. The previous block and free block pointer are 0 311 | * too, there is no need to initialize these values due to the init code 312 | * that memsets the entire umm_ space to 0. 313 | */ 314 | UMM_NBLOCK(1) = UMM_BLOCK_LAST | UMM_FREELIST_MASK; 315 | 316 | /* 317 | * Last umm_block[n] has the next block index at 0, meaning it's 318 | * the end of the list, and the previous block is umm_block[1]. 319 | * 320 | * The last block is a special block and can never be part of the 321 | * free list, so its pointers are left at 0 too. 322 | */ 323 | 324 | UMM_PBLOCK(UMM_BLOCK_LAST) = 1; 325 | 326 | // DBGLOG_FORCE(true, "nblock(0) %04x pblock(0) %04x nfree(0) %04x pfree(0) %04x\n", UMM_NBLOCK(0) & UMM_BLOCKNO_MASK, UMM_PBLOCK(0), UMM_NFREE(0), UMM_PFREE(0)); 327 | // DBGLOG_FORCE(true, "nblock(1) %04x pblock(1) %04x nfree(1) %04x pfree(1) %04x\n", UMM_NBLOCK(1) & UMM_BLOCKNO_MASK, UMM_PBLOCK(1), UMM_NFREE(1), UMM_PFREE(1)); 328 | 329 | } 330 | 331 | void umm_multi_init(umm_heap *heap) { 332 | /* Initialize the heap from linker supplied values */ 333 | 334 | umm_multi_init_heap(heap, UMM_MALLOC_CFG_HEAP_ADDR, UMM_MALLOC_CFG_HEAP_SIZE); 335 | } 336 | 337 | /* ------------------------------------------------------------------------ 338 | * Must be called only from within critical sections guarded by 339 | * UMM_CRITICAL_ENTRY(id) and UMM_CRITICAL_EXIT(id). 340 | */ 341 | 342 | static void umm_free_core(umm_heap *heap, void *ptr) { 343 | 344 | uint16_t c; 345 | 346 | /* 347 | * FIXME: At some point it might be a good idea to add a check to make sure 348 | * that the pointer we're being asked to free up is actually within 349 | * the umm_heap! 350 | * 351 | * NOTE: See the new umm_info() function that you can use to see if a ptr is 352 | * on the free list! 353 | */ 354 | 355 | /* Figure out which block we're in. Note the use of truncated division... */ 356 | 357 | c = (((uint8_t *)ptr) - (uint8_t *)(&(UMM_HEAP[0]))) / UMM_BLOCKSIZE; 358 | 359 | DBGLOG_DEBUG("Freeing block %6i\n", c); 360 | 361 | /* Now let's assimilate this block with the next one if possible. */ 362 | 363 | umm_assimilate_up(heap, c); 364 | 365 | /* Then assimilate with the previous block if possible */ 366 | 367 | if (UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK) { 368 | 369 | DBGLOG_DEBUG("Assimilate down to previous block, which is FREE\n"); 370 | 371 | c = umm_assimilate_down(heap, c, UMM_FREELIST_MASK); 372 | } else { 373 | /* 374 | * The previous block is not a free block, so add this one to the head 375 | * of the free list 376 | */ 377 | UMM_FRAGMENTATION_METRIC_ADD(c); 378 | 379 | DBGLOG_DEBUG("Just add to head of free list\n"); 380 | 381 | UMM_PFREE(UMM_NFREE(0)) = c; 382 | UMM_NFREE(c) = UMM_NFREE(0); 383 | UMM_PFREE(c) = 0; 384 | UMM_NFREE(0) = c; 385 | 386 | UMM_NBLOCK(c) |= UMM_FREELIST_MASK; 387 | } 388 | } 389 | 390 | /* ------------------------------------------------------------------------ */ 391 | 392 | void umm_multi_free(umm_heap *heap, void *ptr) { 393 | UMM_CRITICAL_DECL(id_free); 394 | 395 | UMM_CHECK_INITIALIZED(); 396 | 397 | /* If we're being asked to free a NULL pointer, well that's just silly! */ 398 | 399 | if ((void *)0 == ptr) { 400 | DBGLOG_DEBUG("free a null pointer -> do nothing\n"); 401 | 402 | return; 403 | } 404 | 405 | /* If we're being asked to free an out of range pointer - do nothing */ 406 | /* TODO: remove the check for NULL pointer later */ 407 | 408 | if ((ptr < heap->pheap) || ((size_t)ptr >= (size_t)heap->pheap + heap->heap_size)) { 409 | DBGLOG_DEBUG("free an out of range pointer -> do nothing\n"); 410 | 411 | return; 412 | } 413 | 414 | /* Free the memory withing a protected critical section */ 415 | 416 | UMM_CRITICAL_ENTRY(id_free); 417 | 418 | umm_free_core(heap, ptr); 419 | 420 | UMM_CRITICAL_EXIT(id_free); 421 | } 422 | 423 | /* ------------------------------------------------------------------------ 424 | * Must be called only from within critical sections guarded by 425 | * UMM_CRITICAL_ENTRY(id) and UMM_CRITICAL_EXIT(id). 426 | */ 427 | 428 | static void *umm_malloc_core(umm_heap *heap, size_t size) { 429 | uint16_t blocks; 430 | uint16_t blockSize = 0; 431 | 432 | uint16_t bestSize; 433 | uint16_t bestBlock; 434 | 435 | uint16_t cf; 436 | 437 | blocks = umm_blocks(size); 438 | 439 | /* 440 | * Now we can scan through the free list until we find a space that's big 441 | * enough to hold the number of blocks we need. 442 | * 443 | * This part may be customized to be a best-fit, worst-fit, or first-fit 444 | * algorithm 445 | */ 446 | 447 | cf = UMM_NFREE(0); 448 | 449 | bestBlock = UMM_NFREE(0); 450 | bestSize = 0x7FFF; 451 | 452 | while (cf) { 453 | blockSize = (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK) - cf; 454 | 455 | DBGLOG_TRACE("Looking at block %6i size %6i\n", cf, blockSize); 456 | 457 | #if defined UMM_BEST_FIT 458 | if ((blockSize >= blocks) && (blockSize < bestSize)) { 459 | bestBlock = cf; 460 | bestSize = blockSize; 461 | } 462 | #elif defined UMM_FIRST_FIT 463 | /* This is the first block that fits! */ 464 | if ((blockSize >= blocks)) { 465 | break; 466 | } 467 | #else 468 | #error "No UMM_*_FIT is defined - check umm_malloc_cfg.h" 469 | #endif 470 | 471 | cf = UMM_NFREE(cf); 472 | } 473 | 474 | if (0x7FFF != bestSize) { 475 | cf = bestBlock; 476 | blockSize = bestSize; 477 | } 478 | 479 | if (UMM_NBLOCK(cf) & UMM_BLOCKNO_MASK && blockSize >= blocks) { 480 | 481 | UMM_FRAGMENTATION_METRIC_REMOVE(cf); 482 | 483 | /* 484 | * This is an existing block in the memory heap, we just need to split off 485 | * what we need, unlink it from the free list and mark it as in use, and 486 | * link the rest of the block back into the freelist as if it was a new 487 | * block on the free list... 488 | */ 489 | 490 | if (blockSize == blocks) { 491 | /* It's an exact fit and we don't neet to split off a block. */ 492 | DBGLOG_DEBUG("Allocating %6i blocks starting at %6i - exact\n", blocks, cf); 493 | 494 | /* Disconnect this block from the FREE list */ 495 | 496 | umm_disconnect_from_free_list(heap, cf); 497 | } else { 498 | 499 | /* It's not an exact fit and we need to split off a block. */ 500 | DBGLOG_DEBUG("Allocating %6i blocks starting at %6i - existing\n", blocks, cf); 501 | 502 | /* 503 | * split current free block `cf` into two blocks. The first one will be 504 | * returned to user, so it's not free, and the second one will be free. 505 | */ 506 | umm_split_block(heap, cf, blocks, UMM_FREELIST_MASK /*new block is free*/); 507 | 508 | UMM_FRAGMENTATION_METRIC_ADD(UMM_NBLOCK(cf)); 509 | 510 | /* 511 | * `umm_split_block()` does not update the free pointers (it affects 512 | * only free flags), but effectively we've just moved beginning of the 513 | * free block from `cf` to `cf + blocks`. So we have to adjust pointers 514 | * to and from adjacent free blocks. 515 | */ 516 | 517 | /* previous free block */ 518 | UMM_NFREE(UMM_PFREE(cf)) = cf + blocks; 519 | UMM_PFREE(cf + blocks) = UMM_PFREE(cf); 520 | 521 | /* next free block */ 522 | UMM_PFREE(UMM_NFREE(cf)) = cf + blocks; 523 | UMM_NFREE(cf + blocks) = UMM_NFREE(cf); 524 | } 525 | 526 | } else { 527 | /* Out of memory */ 528 | 529 | DBGLOG_DEBUG("Can't allocate %5i blocks\n", blocks); 530 | 531 | return (void *)NULL; 532 | } 533 | 534 | return (void *)&UMM_DATA(cf); 535 | } 536 | 537 | /* ------------------------------------------------------------------------ */ 538 | 539 | void *umm_multi_malloc(umm_heap *heap, size_t size) { 540 | UMM_CRITICAL_DECL(id_malloc); 541 | 542 | void *ptr = NULL; 543 | 544 | UMM_CHECK_INITIALIZED(); 545 | 546 | /* 547 | * the very first thing we do is figure out if we're being asked to allocate 548 | * a size of 0 - and if we are we'll simply return a null pointer. if not 549 | * then reduce the size by 1 byte so that the subsequent calculations on 550 | * the number of blocks to allocate are easier... 551 | */ 552 | 553 | if (0 == size) { 554 | DBGLOG_DEBUG("malloc a block of 0 bytes -> do nothing\n"); 555 | 556 | return ptr; 557 | } 558 | 559 | /* Allocate the memory withing a protected critical section */ 560 | 561 | UMM_CRITICAL_ENTRY(id_malloc); 562 | 563 | ptr = umm_malloc_core(heap, size); 564 | 565 | UMM_CRITICAL_EXIT(id_malloc); 566 | 567 | return ptr; 568 | } 569 | 570 | /* ------------------------------------------------------------------------ */ 571 | 572 | void *umm_multi_realloc(umm_heap *heap, void *ptr, size_t size) { 573 | UMM_CRITICAL_DECL(id_realloc); 574 | 575 | uint16_t blocks; 576 | uint16_t blockSize; 577 | uint16_t prevBlockSize = 0; 578 | uint16_t nextBlockSize = 0; 579 | 580 | uint16_t c; 581 | 582 | size_t curSize; 583 | 584 | UMM_CHECK_INITIALIZED(); 585 | 586 | /* 587 | * This code looks after the case of a NULL value for ptr. The ANSI C 588 | * standard says that if ptr is NULL and size is non-zero, then we've 589 | * got to work the same a malloc(). If size is also 0, then our version 590 | * of malloc() returns a NULL pointer, which is OK as far as the ANSI C 591 | * standard is concerned. 592 | */ 593 | 594 | if (((void *)NULL == ptr)) { 595 | DBGLOG_DEBUG("realloc the NULL pointer - call malloc()\n"); 596 | 597 | return umm_multi_malloc(heap, size); 598 | } 599 | 600 | /* 601 | * Now we're sure that we have a non_NULL ptr, but we're not sure what 602 | * we should do with it. If the size is 0, then the ANSI C standard says that 603 | * we should operate the same as free. 604 | */ 605 | 606 | if (0 == size) { 607 | DBGLOG_DEBUG("realloc to 0 size, just free the block\n"); 608 | 609 | umm_multi_free(heap, ptr); 610 | 611 | return (void *)NULL; 612 | } 613 | 614 | /* 615 | * Otherwise we need to actually do a reallocation. A naiive approach 616 | * would be to malloc() a new block of the correct size, copy the old data 617 | * to the new block, and then free the old block. 618 | * 619 | * While this will work, we end up doing a lot of possibly unnecessary 620 | * copying. So first, let's figure out how many blocks we'll need. 621 | */ 622 | 623 | blocks = umm_blocks(size); 624 | 625 | /* Figure out which block we're in. Note the use of truncated division... */ 626 | 627 | c = (((uint8_t *)ptr) - (uint8_t *)(&(UMM_HEAP[0]))) / UMM_BLOCKSIZE; 628 | 629 | /* Figure out how big this block is ... the free bit is not set :-) */ 630 | 631 | blockSize = (UMM_NBLOCK(c) - c); 632 | 633 | /* Figure out how many bytes are in this block */ 634 | 635 | curSize = (blockSize * UMM_BLOCKSIZE) - (sizeof(((umm_block *)0)->header)); 636 | 637 | /* Protect the critical section... */ 638 | UMM_CRITICAL_ENTRY(id_realloc); 639 | 640 | /* Now figure out if the previous and/or next blocks are free as well as 641 | * their sizes - this will help us to minimize special code later when we 642 | * decide if it's possible to use the adjacent blocks. 643 | * 644 | * We set prevBlockSize and nextBlockSize to non-zero values ONLY if they 645 | * are free! 646 | */ 647 | 648 | if ((UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_FREELIST_MASK)) { 649 | nextBlockSize = (UMM_NBLOCK(UMM_NBLOCK(c)) & UMM_BLOCKNO_MASK) - UMM_NBLOCK(c); 650 | } 651 | 652 | if ((UMM_NBLOCK(UMM_PBLOCK(c)) & UMM_FREELIST_MASK)) { 653 | prevBlockSize = (c - UMM_PBLOCK(c)); 654 | } 655 | 656 | DBGLOG_DEBUG("realloc blocks %i blockSize %i nextBlockSize %i prevBlockSize %i\n", blocks, blockSize, nextBlockSize, prevBlockSize); 657 | 658 | /* 659 | * Ok, now that we're here we know how many blocks we want and the current 660 | * blockSize. The prevBlockSize and nextBlockSize are set and we can figure 661 | * out the best strategy for the new allocation as follows: 662 | * 663 | * 1. If the new block is the same size or smaller than the current block do 664 | * nothing. 665 | * 2. If the next block is free and adding it to the current block gives us 666 | * EXACTLY enough memory, assimilate the next block. This avoids unwanted 667 | * fragmentation of free memory. 668 | * 669 | * The following cases may be better handled with memory copies to reduce 670 | * fragmentation 671 | * 672 | * 3. If the previous block is NOT free and the next block is free and 673 | * adding it to the current block gives us enough memory, assimilate 674 | * the next block. This may introduce a bit of fragmentation. 675 | * 4. If the prev block is free and adding it to the current block gives us 676 | * enough memory, remove the previous block from the free list, assimilate 677 | * it, copy to the new block. 678 | * 5. If the prev and next blocks are free and adding them to the current 679 | * block gives us enough memory, assimilate the next block, remove the 680 | * previous block from the free list, assimilate it, copy to the new block. 681 | * 6. Otherwise try to allocate an entirely new block of memory. If the 682 | * allocation works free the old block and return the new pointer. If 683 | * the allocation fails, return NULL and leave the old block intact. 684 | * 685 | * TODO: Add some conditional code to optimise for less fragmentation 686 | * by simply allocating new memory if we need to copy anyways. 687 | * 688 | * All that's left to do is decide if the fit was exact or not. If the fit 689 | * was not exact, then split the memory block so that we use only the requested 690 | * number of blocks and add what's left to the free list. 691 | */ 692 | 693 | // Case 1 - block is same size or smaller 694 | if (blockSize >= blocks) { 695 | DBGLOG_DEBUG("realloc the same or smaller size block - %i, do nothing\n", blocks); 696 | /* This space intentionally left blank */ 697 | 698 | // Case 2 - block + next block fits EXACTLY 699 | } else if ((blockSize + nextBlockSize) == blocks) { 700 | DBGLOG_DEBUG("exact realloc using next block - %i\n", blocks); 701 | umm_assimilate_up(heap, c); 702 | blockSize += nextBlockSize; 703 | 704 | // Case 3 - prev block NOT free and block + next block fits 705 | } else if ((0 == prevBlockSize) && (blockSize + nextBlockSize) >= blocks) { 706 | DBGLOG_DEBUG("realloc using next block - %i\n", blocks); 707 | umm_assimilate_up(heap, c); 708 | blockSize += nextBlockSize; 709 | 710 | // Case 4 - prev block + block fits 711 | } else if ((prevBlockSize + blockSize) >= blocks) { 712 | DBGLOG_DEBUG("realloc using prev block - %i\n", blocks); 713 | umm_disconnect_from_free_list(heap, UMM_PBLOCK(c)); 714 | c = umm_assimilate_down(heap, c, 0); 715 | memmove((void *)&UMM_DATA(c), ptr, curSize); 716 | ptr = (void *)&UMM_DATA(c); 717 | blockSize += prevBlockSize; 718 | 719 | // Case 5 - prev block + block + next block fits 720 | } else if ((prevBlockSize + blockSize + nextBlockSize) >= blocks) { 721 | DBGLOG_DEBUG("realloc using prev and next block - %i\n", blocks); 722 | umm_assimilate_up(heap, c); 723 | umm_disconnect_from_free_list(heap, UMM_PBLOCK(c)); 724 | c = umm_assimilate_down(heap, c, 0); 725 | memmove((void *)&UMM_DATA(c), ptr, curSize); 726 | ptr = (void *)&UMM_DATA(c); 727 | blockSize += (prevBlockSize + nextBlockSize); 728 | 729 | // Case 6 - default is we need to realloc a new block 730 | } else { 731 | DBGLOG_DEBUG("realloc a completely new block %i\n", blocks); 732 | void *oldptr = ptr; 733 | if ((ptr = umm_malloc_core(heap, size))) { 734 | DBGLOG_DEBUG("realloc %i to a bigger block %i, copy, and free the old\n", blockSize, blocks); 735 | memcpy(ptr, oldptr, curSize); 736 | umm_free_core(heap, oldptr); 737 | } else { 738 | DBGLOG_DEBUG("realloc %i to a bigger block %i failed - return NULL and leave the old block!\n", blockSize, blocks); 739 | /* This space intentionally left blnk */ 740 | } 741 | blockSize = blocks; 742 | } 743 | 744 | /* Now all we need to do is figure out if the block fit exactly or if we 745 | * need to split and free ... 746 | */ 747 | 748 | if (blockSize > blocks) { 749 | DBGLOG_DEBUG("split and free %i blocks from %i\n", blocks, blockSize); 750 | umm_split_block(heap,c, blocks, 0); 751 | umm_free_core(heap, (void *)&UMM_DATA(c + blocks)); 752 | } 753 | 754 | /* Release the critical section... */ 755 | UMM_CRITICAL_EXIT(id_realloc); 756 | 757 | return ptr; 758 | } 759 | 760 | /* ------------------------------------------------------------------------ */ 761 | 762 | void *umm_multi_calloc(umm_heap *heap, size_t num, size_t item_size) { 763 | void *ret; 764 | 765 | ret = umm_multi_malloc(heap, (size_t)(item_size * num)); 766 | 767 | if (ret) { 768 | memset(ret, 0x00, (size_t)(item_size * num)); 769 | } 770 | 771 | return ret; 772 | } 773 | 774 | /* ------------------------------------------------------------------------ */ 775 | 776 | /* Single-heap functions */ 777 | 778 | struct umm_heap_config umm_heap_current; // The global heap for single-heap use 779 | 780 | void umm_init_heap(void *ptr, size_t size){ 781 | umm_multi_init_heap(&umm_heap_current, ptr, size); 782 | } 783 | 784 | void umm_init(void){ 785 | umm_multi_init(&umm_heap_current); 786 | } 787 | 788 | void *umm_malloc(size_t size){ 789 | return umm_multi_malloc(&umm_heap_current, size); 790 | } 791 | 792 | void *umm_calloc(size_t num, size_t size){ 793 | return umm_multi_calloc(&umm_heap_current, num, size); 794 | } 795 | 796 | void *umm_realloc(void *ptr, size_t size){ 797 | return umm_multi_realloc(&umm_heap_current, ptr, size); 798 | } 799 | 800 | void umm_free(void *ptr){ 801 | umm_multi_free(&umm_heap_current, ptr); 802 | } -------------------------------------------------------------------------------- /src/umm_malloc.h: -------------------------------------------------------------------------------- 1 | /* ---------------------------------------------------------------------------- 2 | * umm_malloc.h - a memory allocator for embedded systems (microcontrollers) 3 | * 4 | * See copyright notice in LICENSE.TXT 5 | * ---------------------------------------------------------------------------- 6 | */ 7 | 8 | #ifndef UMM_MALLOC_H 9 | #define UMM_MALLOC_H 10 | 11 | #include 12 | #include 13 | 14 | #ifdef __cplusplus 15 | extern "C" { 16 | #endif 17 | 18 | /* ------------------------------------------------------------------------ */ 19 | 20 | typedef struct umm_heap_config { 21 | void *pheap; 22 | size_t heap_size; 23 | uint16_t numblocks; 24 | } umm_heap; 25 | 26 | extern void umm_multi_init_heap(umm_heap *heap, void *ptr, size_t size); 27 | extern void umm_multi_init(umm_heap *heap); 28 | 29 | extern void *umm_multi_malloc(umm_heap *heap, size_t size); 30 | extern void *umm_multi_calloc(umm_heap *heap, size_t num, size_t size); 31 | extern void *umm_multi_realloc(umm_heap *heap, void *ptr, size_t size); 32 | extern void umm_multi_free(umm_heap *heap, void *ptr); 33 | 34 | /* ------------------------------------------------------------------------ */ 35 | 36 | extern void umm_init_heap(void *ptr, size_t size); 37 | extern void umm_init(void); 38 | 39 | extern void *umm_malloc(size_t size); 40 | extern void *umm_calloc(size_t num, size_t size); 41 | extern void *umm_realloc(void *ptr, size_t size); 42 | extern void umm_free(void *ptr); 43 | 44 | /* ------------------------------------------------------------------------ */ 45 | 46 | #ifdef __cplusplus 47 | } 48 | #endif 49 | 50 | #endif /* UMM_MALLOC_H */ 51 | -------------------------------------------------------------------------------- /src/umm_malloc_cfg.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Configuration for umm_malloc - DO NOT EDIT THIS FILE BY HAND! 3 | * 4 | * NOTE WELL: Your project MUST have a umm_malloc_cfgport.h - even if 5 | * it's empty!!! 6 | * 7 | * Refer to the notes below for details on the umm_malloc configuration 8 | * options. 9 | */ 10 | 11 | #ifndef _UMM_MALLOC_CFG_H 12 | #define _UMM_MALLOC_CFG_H 13 | 14 | #include 15 | #include 16 | #include 17 | 18 | /* 19 | * There are a number of defines you can set at compile time that affect how 20 | * the memory allocator will operate. 21 | * 22 | * You should NOT edit this file, it may be changed from time to time in 23 | * the upstream project. Instead, you can do one of the following (in order 24 | * of priority 25 | * 26 | * 1. Pass in the override values on the command line using -D UMM_xxx 27 | * 2. Pass in the filename holding override values using -D UMM_MALLOC_CFGFILE 28 | * 3. Set up defaults in a file called umm_malloc_cfgport.h 29 | * 30 | * NOTE WELL: For the command line -D options to take highest priority, your 31 | * project level override file must check that the UMM_xxx 32 | * value is not already defined before overriding 33 | * 34 | * Unless otherwise noted, the default state of these values is #undef-ined! 35 | * 36 | * As this is the top level configuration file, it is responsible for making 37 | * sure that the configuration makes sense. For example the UMM_BLOCK_BODY_SIZE 38 | * is a minimum of 8 and a multiple of 4. 39 | * 40 | * UMM_BLOCK_BODY_SIZE 41 | * 42 | * Defines the umm_block[].body size - it is 8 by default 43 | * 44 | * This assumes umm_ptr is a pair of uint16_t values 45 | * which is 4 bytes plus the data[] array which is another 4 bytes 46 | * for a total of 8. 47 | * 48 | * NOTE WELL that the umm_block[].body size must be multiple of 49 | * the natural access size of the host machine to ensure 50 | * that accesses are efficient. 51 | * 52 | * We have not verified the checks below for 64 bit machines 53 | * because this library is targeted for 32 bit machines. 54 | * 55 | * UMM_NUM_HEAPS 56 | * 57 | * Set to the maximum number of heaps that can be defined by the 58 | * application - defaults to 1. 59 | * 60 | * UMM_BEST_FIT (default) 61 | * 62 | * Set this if you want to use a best-fit algorithm for allocating new blocks. 63 | * On by default, turned off by UMM_FIRST_FIT 64 | * 65 | * UMM_FIRST_FIT 66 | * 67 | * Set this if you want to use a first-fit algorithm for allocating new blocks. 68 | * Faster than UMM_BEST_FIT but can result in higher fragmentation. 69 | * 70 | * UMM_INFO 71 | * 72 | * Set if you want the ability to calculate metrics on demand 73 | * 74 | * UMM_INLINE_METRICS 75 | * 76 | * Set this if you want to have access to a minimal set of heap metrics that 77 | * can be used to gauge heap health. 78 | * Setting this at compile time will automatically set UMM_INFO. 79 | * Note that enabling this define will add a slight runtime penalty. 80 | * 81 | * UMM_CHECK_INITIALIZED 82 | * 83 | * Set if you want to be able to verify that the heap is intialized 84 | * before any operation - the default is no check. You may set the 85 | * UMM_CHECK_INITIALIZED macro to the following provided macros, or 86 | * write your own handler: 87 | * 88 | * UMM_INIT_IF_UNINITIALIZED 89 | * UMM_HANG_IF_UNINITIALIZED 90 | * 91 | * UMM_INTEGRITY_CHECK 92 | * 93 | * Set if you want to be able to verify that the heap is semantically correct 94 | * before or after any heap operation - all of the block indexes in the heap 95 | * make sense. 96 | * Slows execution dramatically but catches errors really quickly. 97 | * 98 | * UMM_POISON_CHECK 99 | * 100 | * Set if you want to be able to leave a poison buffer around each allocation. 101 | * Note this uses an extra 8 bytes per allocation, but you get the benefit of 102 | * being able to detect if your program is writing past an allocated buffer. 103 | * 104 | * DBGLOG_ENABLE 105 | * 106 | * Set if you want to enable logging - the default is to use printf() but 107 | * if you have any special requirements such as thread safety or a custom 108 | * logging routine - you are free to everride the default 109 | * 110 | * DBGLOG_LEVEL=n 111 | * 112 | * Set n to a value from 0 to 6 depending on how verbose you want the debug 113 | * log to be 114 | * 115 | * UMM_MAX_CRITICAL_DEPTH_CHECK=n 116 | * 117 | * Set this if you want to compile in code to verify that the critical 118 | * section maximum depth is not exceeded. If set, the value must be greater 119 | * than 0. 120 | * 121 | * The critical depth checking is only needed if your target environment 122 | * does not support reading and writing the current interrupt enable state. 123 | * 124 | * Support for this library in a multitasking environment is provided when 125 | * you add bodies to the UMM_CRITICAL_ENTRY and UMM_CRITICAL_EXIT macros 126 | * (see below) 127 | * 128 | * ---------------------------------------------------------------------------- 129 | */ 130 | 131 | #ifdef UMM_MALLOC_CFGFILE 132 | #include UMM_MALLOC_CFGFILE 133 | #else 134 | #include 135 | #endif 136 | 137 | #ifdef __cplusplus 138 | extern "C" { 139 | #endif 140 | 141 | /* Forward declaration of umm_heap_config */ 142 | struct umm_heap_config; 143 | 144 | /* A couple of macros to make packing structures less compiler dependent */ 145 | 146 | #ifndef UMM_H_ATTPACKPRE 147 | #define UMM_H_ATTPACKPRE 148 | #endif 149 | #ifndef UMM_H_ATTPACKSUF 150 | #define UMM_H_ATTPACKSUF __attribute__((__packed__)) 151 | #endif 152 | 153 | /* -------------------------------------------------------------------------- */ 154 | 155 | #ifndef UMM_INIT_IF_UNINITIALIZED 156 | #define UMM_INIT_IF_UNINITIALIZED() do { if (UMM_HEAP == NULL) { umm_init(); } } while(0) 157 | #endif 158 | 159 | #ifndef UMM_HANG_IF_UNINITIALIZED 160 | #define UMM_HANG_IF_UNINITIALIZED() do { if (UMM_HEAP == NULL) { while(1) {} } } while(0) 161 | #endif 162 | 163 | #ifndef UMM_CHECK_INITIALIZED 164 | #define UMM_CHECK_INITIALIZED() 165 | #endif 166 | 167 | /* -------------------------------------------------------------------------- */ 168 | 169 | #ifndef UMM_BLOCK_BODY_SIZE 170 | #define UMM_BLOCK_BODY_SIZE (8) 171 | #endif 172 | 173 | #define UMM_MIN_BLOCK_BODY_SIZE (8) 174 | 175 | #if (UMM_BLOCK_BODY_SIZE < UMM_MIN_BLOCK_BODY_SIZE) 176 | #error UMM_BLOCK_BODY_SIZE must be at least 8! 177 | #endif 178 | 179 | #if ((UMM_BLOCK_BODY_SIZE % 4) != 0) 180 | #error UMM_BLOCK_BODY_SIZE must be multiple of 4! 181 | #endif 182 | 183 | /* -------------------------------------------------------------------------- */ 184 | 185 | #ifndef UMM_NUM_HEAPS 186 | #define UMM_NUM_HEAPS (1) 187 | #endif 188 | 189 | #if (UMM_NUM_HEAPS < 1) 190 | #error UMM_NUM_HEAPS must be at least 1! 191 | #endif 192 | 193 | /* -------------------------------------------------------------------------- */ 194 | 195 | #ifdef UMM_BEST_FIT 196 | #ifdef UMM_FIRST_FIT 197 | #error Both UMM_BEST_FIT and UMM_FIRST_FIT are defined - pick one! 198 | #endif 199 | #else /* UMM_BEST_FIT is not defined */ 200 | #ifndef UMM_FIRST_FIT 201 | #define UMM_BEST_FIT 202 | #endif 203 | #endif 204 | 205 | /* -------------------------------------------------------------------------- */ 206 | 207 | #ifdef UMM_INLINE_METRICS 208 | #define UMM_MULTI_FRAGMENTATION_METRIC_INIT(h) umm_multi_fragmentation_metric_init(h) 209 | #define UMM_MULTI_FRAGMENTATION_METRIC_ADD(h,c) umm_multi_fragmentation_metric_add(h,c) 210 | #define UMM_MULTI_FRAGMENTATION_METRIC_REMOVE(h,c) umm_multi_fragmentation_metric_remove(h,c) 211 | #define UMM_FRAGMENTATION_METRIC_INIT() umm_fragmentation_metric_init() 212 | #define UMM_FRAGMENTATION_METRIC_ADD(c) umm_fragmentation_metric_add(c) 213 | #define UMM_FRAGMENTATION_METRIC_REMOVE(c) umm_fragmentation_metric_remove(c) 214 | #ifndef UMM_INFO 215 | #define UMM_INFO 216 | #endif 217 | #else 218 | #define UMM_FRAGMENTATION_METRIC_INIT() 219 | #define UMM_FRAGMENTATION_METRIC_ADD(c) 220 | #define UMM_FRAGMENTATION_METRIC_REMOVE(c) 221 | #endif // UMM_INLINE_METRICS 222 | 223 | /* -------------------------------------------------------------------------- */ 224 | 225 | #ifdef UMM_INFO 226 | typedef struct UMM_HEAP_INFO_t { 227 | unsigned int totalEntries; 228 | unsigned int usedEntries; 229 | unsigned int freeEntries; 230 | 231 | unsigned int totalBlocks; 232 | unsigned int usedBlocks; 233 | unsigned int freeBlocks; 234 | unsigned int freeBlocksSquared; 235 | 236 | unsigned int maxFreeContiguousBlocks; 237 | 238 | int usage_metric; 239 | int fragmentation_metric; 240 | } 241 | UMM_HEAP_INFO; 242 | 243 | extern UMM_HEAP_INFO ummHeapInfo; 244 | 245 | extern void *umm_multi_info(struct umm_heap_config *heap, void *ptr, bool force); 246 | extern size_t umm_multi_free_heap_size(struct umm_heap_config *heap); 247 | extern size_t umm_multi_max_free_block_size(struct umm_heap_config *heap); 248 | extern int umm_multi_usage_metric(struct umm_heap_config *heap); 249 | extern int umm_multi_fragmentation_metric(struct umm_heap_config *heap); 250 | extern void *umm_info(void *ptr, bool force); 251 | extern size_t umm_free_heap_size(void); 252 | extern size_t umm_max_free_block_size(void); 253 | extern int umm_usage_metric(void); 254 | extern int umm_fragmentation_metric(void); 255 | #else 256 | #define umm_multi_info(h,p,b) 257 | #define umm_multi_free_heap_size(h) (0) 258 | #define umm_multi_max_free_block_size(h) (0) 259 | #define umm_multi_usage_metric(h) (0) 260 | #define umm_multi_fragmentation_metric(h) (0) 261 | #define umm_info(p,b) 262 | #define umm_free_heap_size() (0) 263 | #define umm_max_free_block_size() (0) 264 | #define umm_usage_metric() (0) 265 | #define umm_fragmentation_metric() (0) 266 | #endif 267 | 268 | /* 269 | * Three macros to make it easier to protect the memory allocator in a 270 | * multitasking system. You should set these macros up to use whatever your 271 | * system uses for this purpose. You can disable interrupts entirely, or just 272 | * disable task switching - it's up to you 273 | * 274 | * If needed, UMM_CRITICAL_DECL can be used to declare or initialize 275 | * synchronization elements before their use. "tag" can be used to add context 276 | * uniqueness to the declaration. 277 | * exp. #define UMM_CRITICAL_DECL(tag) uint32_t _saved_ps_##tag 278 | * Another possible use for "tag", activity identifier when profiling time 279 | * spent in UMM_CRITICAL. The "tag" values used are id_malloc, id_realloc, 280 | * id_free, id_poison, id_integrity, and id_info. 281 | * 282 | * NOTE WELL that these macros MUST be allowed to nest, because umm_free() is 283 | * called from within umm_malloc() 284 | */ 285 | 286 | #ifndef UMM_CRITICAL_DECL 287 | #define UMM_CRITICAL_DECL(tag) 288 | #endif 289 | 290 | #ifdef UMM_MAX_CRITICAL_DEPTH_CHECK 291 | extern int umm_critical_depth; 292 | extern int umm_max_critical_depth; 293 | #ifndef UMM_CRITICAL_ENTRY 294 | #define UMM_CRITICAL_ENTRY(tag) { \ 295 | ++umm_critical_depth; \ 296 | if (umm_critical_depth > umm_max_critical_depth) { \ 297 | umm_max_critical_depth = umm_critical_depth; \ 298 | } \ 299 | } 300 | #endif 301 | #ifndef UMM_CRITICAL_EXIT 302 | #define UMM_CRITICAL_EXIT(tag) (umm_critical_depth--) 303 | #endif 304 | #else 305 | #ifndef UMM_CRITICAL_ENTRY 306 | #define UMM_CRITICAL_ENTRY(tag) 307 | #endif 308 | #ifndef UMM_CRITICAL_EXIT 309 | #define UMM_CRITICAL_EXIT(tag) 310 | #endif 311 | #endif 312 | 313 | /* 314 | * Enables heap integrity check before any heap operation. It affects 315 | * performance, but does NOT consume extra memory. 316 | * 317 | * If integrity violation is detected, the message is printed and user-provided 318 | * callback is called: `UMM_HEAP_CORRUPTION_CB()` 319 | * 320 | * Note that not all buffer overruns are detected: each buffer is aligned by 321 | * 4 bytes, so there might be some trailing "extra" bytes which are not checked 322 | * for corruption. 323 | */ 324 | 325 | #ifdef UMM_INTEGRITY_CHECK 326 | extern bool umm_multi_integrity_check(struct umm_heap_config *heap); 327 | extern bool umm_integrity_check(void); 328 | #define INTEGRITY_CHECK() umm_integrity_check() 329 | extern void umm_corruption(void); 330 | #define UMM_HEAP_CORRUPTION_CB() printf("Heap Corruption!") 331 | #else 332 | #define INTEGRITY_CHECK() (1) 333 | #endif 334 | 335 | /* 336 | * Enables heap poisoning: add predefined value (poison) before and after each 337 | * allocation, and check before each heap operation that no poison is 338 | * corrupted. 339 | * 340 | * Other than the poison itself, we need to store exact user-requested length 341 | * for each buffer, so that overrun by just 1 byte will be always noticed. 342 | * 343 | * Customizations: 344 | * 345 | * UMM_POISON_SIZE_BEFORE: 346 | * Number of poison bytes before each block, e.g. 4 347 | * UMM_POISON_SIZE_AFTER: 348 | * Number of poison bytes after each block e.g. 4 349 | * UMM_POISONED_BLOCK_LEN_TYPE 350 | * Type of the exact buffer length, e.g. `uint16_t` 351 | * 352 | * NOTE: each allocated buffer is aligned by 4 bytes. But when poisoning is 353 | * enabled, actual pointer returned to user is shifted by 354 | * `(sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE)`. 355 | * 356 | * It's your responsibility to make resulting pointers aligned appropriately. 357 | * 358 | * If poison corruption is detected, the message is printed and user-provided 359 | * callback is called: `UMM_HEAP_CORRUPTION_CB()` 360 | */ 361 | 362 | #ifdef UMM_POISON_CHECK 363 | #define UMM_POISON_SIZE_BEFORE (4) 364 | #define UMM_POISON_SIZE_AFTER (4) 365 | #define UMM_POISONED_BLOCK_LEN_TYPE uint16_t 366 | 367 | extern void *umm_multi_poison_malloc(struct umm_heap_config *heap, size_t size); 368 | extern void *umm_multi_poison_calloc(struct umm_heap_config *heap, size_t num, size_t size); 369 | extern void *umm_multi_poison_realloc(struct umm_heap_config *heap, void *ptr, size_t size); 370 | extern void umm_multi_poison_free(struct umm_heap_config *heap, void *ptr); 371 | extern bool umm_multi_poison_check(struct umm_heap_config *heap); 372 | 373 | extern void *umm_poison_malloc(size_t size); 374 | extern void *umm_poison_calloc(size_t num, size_t size); 375 | extern void *umm_poison_realloc(void *ptr, size_t size); 376 | extern void umm_poison_free(void *ptr); 377 | extern bool umm_poison_check(void); 378 | 379 | #define POISON_CHECK() umm_poison_check() 380 | #else 381 | #define POISON_CHECK() (1) 382 | #endif 383 | 384 | /* 385 | * Add blank macros for DBGLOG_xxx() - if you want to override these on 386 | * a per-source module basis, you must define DBGLOG_LEVEL and then 387 | * #include "dbglog.h" 388 | */ 389 | 390 | #define DBGLOG_TRACE(format, ...) 391 | #define DBGLOG_DEBUG(format, ...) 392 | #define DBGLOG_CRITICAL(format, ...) 393 | #define DBGLOG_ERROR(format, ...) 394 | #define DBGLOG_WARNING(format, ...) 395 | #define DBGLOG_INFO(format, ...) 396 | #define DBGLOG_FORCE(format, ...) 397 | 398 | #ifdef __cplusplus 399 | } 400 | #endif 401 | 402 | #endif /* _UMM_MALLOC_CFG_H */ 403 | -------------------------------------------------------------------------------- /src/umm_poison.c: -------------------------------------------------------------------------------- 1 | /* poisoning (UMM_POISON_CHECK) {{{ */ 2 | #if defined(UMM_POISON_CHECK) 3 | #define POISON_BYTE (0xa5) 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | extern umm_heap umm_heap_current; 10 | 11 | /* 12 | * Yields a size of the poison for the block of size `s`. 13 | * If `s` is 0, returns 0. 14 | */ 15 | static size_t poison_size(size_t s) { 16 | return s ? (UMM_POISON_SIZE_BEFORE + 17 | sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + 18 | UMM_POISON_SIZE_AFTER) 19 | : 0; 20 | } 21 | 22 | /* 23 | * Print memory contents starting from given `ptr` 24 | */ 25 | static void dump_mem(const void *ptr, size_t len) { 26 | while (len--) { 27 | DBGLOG_ERROR(" 0x%.2x", (*(uint8_t *)ptr++)); 28 | } 29 | } 30 | 31 | /* 32 | * Put poison data at given `ptr` and `poison_size` 33 | */ 34 | static void put_poison(void *ptr, size_t poison_size) { 35 | memset(ptr, POISON_BYTE, poison_size); 36 | } 37 | 38 | /* 39 | * Check poison data at given `ptr` and `poison_size`. `where` is a pointer to 40 | * a string, either "before" or "after", meaning, before or after the block. 41 | * 42 | * If poison is there, returns 1. 43 | * Otherwise, prints the appropriate message, and returns 0. 44 | */ 45 | static bool check_poison(const void *ptr, size_t poison_size, 46 | const void *where) { 47 | size_t i; 48 | bool ok = true; 49 | 50 | for (i = 0; i < poison_size; i++) { 51 | if (((uint8_t *)ptr)[i] != POISON_BYTE) { 52 | ok = false; 53 | break; 54 | } 55 | } 56 | 57 | if (!ok) { 58 | DBGLOG_ERROR("No poison %s block at: 0x%08x, actual data:", (char *)where, DBGLOG_32_BIT_PTR(ptr)); 59 | dump_mem(ptr, poison_size); 60 | DBGLOG_ERROR("\n"); 61 | } 62 | 63 | return ok; 64 | } 65 | 66 | /* 67 | * Check if a block is properly poisoned. Must be called only for non-free 68 | * blocks. 69 | */ 70 | static bool check_poison_block(umm_block *pblock) { 71 | bool ok = true; 72 | 73 | if (pblock->header.used.next & UMM_FREELIST_MASK) { 74 | DBGLOG_ERROR("check_poison_block is called for free block 0x%08x\n", DBGLOG_32_BIT_PTR(pblock)); 75 | } else { 76 | /* the block is used; let's check poison */ 77 | char *pc = (char *)pblock->body.data; 78 | void *pc_cur; 79 | 80 | pc_cur = pc + sizeof(UMM_POISONED_BLOCK_LEN_TYPE); 81 | if (!check_poison(pc_cur, UMM_POISON_SIZE_BEFORE, "before")) { 82 | ok = false; 83 | goto clean; 84 | } 85 | 86 | pc_cur = pc + *((UMM_POISONED_BLOCK_LEN_TYPE *)pc) - UMM_POISON_SIZE_AFTER; 87 | if (!check_poison(pc_cur, UMM_POISON_SIZE_AFTER, "after")) { 88 | ok = false; 89 | goto clean; 90 | } 91 | } 92 | 93 | clean: 94 | return ok; 95 | } 96 | 97 | /* 98 | * Takes a pointer returned by actual allocator function (`umm_malloc` or 99 | * `umm_realloc`), puts appropriate poison, and returns adjusted pointer that 100 | * should be returned to the user. 101 | * 102 | * `size_w_poison` is a size of the whole block, including a poison. 103 | */ 104 | static void *get_poisoned(void *ptr, size_t size_w_poison) { 105 | if (size_w_poison != 0 && ptr != NULL) { 106 | 107 | /* Poison beginning and the end of the allocated chunk */ 108 | put_poison((char *)ptr + sizeof(UMM_POISONED_BLOCK_LEN_TYPE), 109 | UMM_POISON_SIZE_BEFORE); 110 | put_poison((char *)ptr + size_w_poison - UMM_POISON_SIZE_AFTER, 111 | UMM_POISON_SIZE_AFTER); 112 | 113 | /* Put exact length of the user's chunk of memory */ 114 | *(UMM_POISONED_BLOCK_LEN_TYPE *)ptr = (UMM_POISONED_BLOCK_LEN_TYPE)size_w_poison; 115 | 116 | /* Return pointer at the first non-poisoned byte */ 117 | return (char *)ptr + sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE; 118 | } else { 119 | return ptr; 120 | } 121 | } 122 | 123 | /* 124 | * Takes "poisoned" pointer (i.e. pointer returned from `get_poisoned()`), 125 | * and checks that the poison of this particular block is still there. 126 | * 127 | * Returns unpoisoned pointer, i.e. actual pointer to the allocated memory. 128 | */ 129 | static void *get_unpoisoned(umm_heap *heap, void *ptr) { 130 | if (ptr != NULL) { 131 | uint16_t c; 132 | 133 | ptr = (char *)ptr - (sizeof(UMM_POISONED_BLOCK_LEN_TYPE) + UMM_POISON_SIZE_BEFORE); 134 | 135 | /* Figure out which block we're in. Note the use of truncated division... */ 136 | c = (((char *)ptr) - (char *)(&(UMM_HEAP[0]))) / UMM_BLOCKSIZE; 137 | 138 | check_poison_block(&UMM_BLOCK(c)); 139 | } 140 | 141 | return ptr; 142 | } 143 | 144 | /* }}} */ 145 | 146 | /* ------------------------------------------------------------------------ */ 147 | 148 | void *umm_poison_malloc(size_t size) { 149 | return umm_multi_poison_malloc(&umm_heap_current, size); 150 | } 151 | 152 | void *umm_multi_poison_malloc(umm_heap *heap, size_t size) { 153 | void *ret; 154 | 155 | size += poison_size(size); 156 | 157 | ret = umm_malloc(size); 158 | 159 | ret = get_poisoned(ret, size); 160 | 161 | return ret; 162 | } 163 | 164 | /* ------------------------------------------------------------------------ */ 165 | 166 | void *umm_poison_calloc(size_t num, size_t item_size) { 167 | return umm_multi_poison_calloc(&umm_heap_current, num, item_size); 168 | } 169 | 170 | void *umm_multi_poison_calloc(umm_heap *heap, size_t num, size_t item_size) { 171 | void *ret; 172 | size_t size = item_size * num; 173 | 174 | size += poison_size(size); 175 | 176 | ret = umm_malloc(size); 177 | 178 | if (NULL != ret) { 179 | memset(ret, 0x00, size); 180 | } 181 | 182 | ret = get_poisoned(ret, size); 183 | 184 | return ret; 185 | } 186 | 187 | /* ------------------------------------------------------------------------ */ 188 | 189 | void *umm_poison_realloc(void *ptr, size_t size) { 190 | return umm_multi_poison_realloc(&umm_heap_current, ptr, size); 191 | } 192 | 193 | void *umm_multi_poison_realloc(umm_heap *heap, void *ptr, size_t size) { 194 | void *ret; 195 | 196 | ptr = get_unpoisoned(heap, ptr); 197 | 198 | size += poison_size(size); 199 | ret = umm_realloc(ptr, size); 200 | 201 | ret = get_poisoned(ret, size); 202 | 203 | return ret; 204 | } 205 | 206 | /* ------------------------------------------------------------------------ */ 207 | 208 | void umm_poison_free(void *ptr) { 209 | umm_multi_poison_free(&umm_heap_current, ptr); 210 | } 211 | 212 | void umm_multi_poison_free(umm_heap *heap, void *ptr) { 213 | 214 | ptr = get_unpoisoned(heap, ptr); 215 | 216 | umm_free(ptr); 217 | } 218 | 219 | /* 220 | * Iterates through all blocks in the heap, and checks poison for all used 221 | * blocks. 222 | */ 223 | 224 | bool umm_poison_check(void) { 225 | return umm_multi_poison_check(&umm_heap_current); 226 | } 227 | 228 | bool umm_multi_poison_check(umm_heap *heap) { 229 | UMM_CRITICAL_DECL(id_poison); 230 | 231 | bool ok = true; 232 | unsigned short int cur; 233 | 234 | UMM_CHECK_INITIALIZED(); 235 | 236 | UMM_CRITICAL_ENTRY(id_poison); 237 | 238 | /* Now iterate through the blocks list */ 239 | cur = UMM_NBLOCK(0) & UMM_BLOCKNO_MASK; 240 | 241 | while (UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK) { 242 | if (!(UMM_NBLOCK(cur) & UMM_FREELIST_MASK)) { 243 | /* This is a used block (not free), so, check its poison */ 244 | ok = check_poison_block(&UMM_BLOCK(cur)); 245 | if (!ok) { 246 | break; 247 | } 248 | } 249 | 250 | cur = UMM_NBLOCK(cur) & UMM_BLOCKNO_MASK; 251 | } 252 | UMM_CRITICAL_EXIT(id_poison); 253 | 254 | return ok; 255 | } 256 | 257 | /* ------------------------------------------------------------------------ */ 258 | 259 | #endif 260 | -------------------------------------------------------------------------------- /unittest/config/host/umm_malloc_cfgport.h: -------------------------------------------------------------------------------- 1 | #warning Find a way to set these on the make command line to avoid looping in make 2 | 3 | #define UMM_INFO 4 | #define UMM_INLINE_METRICS 5 | #define UMM_POISON_CHECK --------------------------------------------------------------------------------