├── .vscode └── settings.json ├── README.md ├── images ├── v1-fiveThread.png ├── v1-oneThread.png ├── v1-twoThread.png ├── v1.jpg ├── v2-functionalTest.png ├── v2-performanceTest.png ├── v2.png ├── v3-functionalTest.png └── v3-performanceTest.png ├── v1 ├── CMakeLists.txt ├── include │ └── MemoryPool.h ├── src │ └── MemoryPool.cpp └── tests │ └── UnitTest.cpp ├── v2 ├── CMakeLists.txt ├── include │ ├── CentralCache.h │ ├── Common.h │ ├── MemoryPool.h │ ├── PageCache.h │ └── ThreadCache.h ├── src │ ├── CentralCache.cpp │ ├── PageCache.cpp │ └── ThreadCache.cpp └── tests │ ├── PerformanceTest.cpp │ └── UnitTest.cpp └── v3 ├── CMakeLists.txt ├── include ├── CentralCache.h ├── Common.h ├── MemoryPool.h ├── PageCache.h └── ThreadCache.h ├── src ├── CentralCache.cpp ├── PageCache.cpp └── ThreadCache.cpp └── tests ├── PerformanceTest.cpp └── UnitTest.cpp /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "cctype": "cpp", 4 | "clocale": "cpp", 5 | "cmath": "cpp", 6 | "cstdarg": "cpp", 7 | "cstddef": "cpp", 8 | "cstdio": "cpp", 9 | "cstdlib": "cpp", 10 | "cstring": "cpp", 11 | "ctime": "cpp", 12 | "cwchar": "cpp", 13 | "cwctype": "cpp", 14 | "array": "cpp", 15 | "atomic": "cpp", 16 | "*.tcc": "cpp", 17 | "chrono": "cpp", 18 | "condition_variable": "cpp", 19 | "cstdint": "cpp", 20 | "deque": "cpp", 21 | "unordered_map": "cpp", 22 | "vector": "cpp", 23 | "exception": "cpp", 24 | "algorithm": "cpp", 25 | "functional": "cpp", 26 | "iterator": "cpp", 27 | "map": "cpp", 28 | "memory": "cpp", 29 | "memory_resource": "cpp", 30 | "numeric": "cpp", 31 | "optional": "cpp", 32 | "random": "cpp", 33 | "ratio": "cpp", 34 | "string": "cpp", 35 | "string_view": "cpp", 36 | "system_error": "cpp", 37 | "tuple": "cpp", 38 | "type_traits": "cpp", 39 | "utility": "cpp", 40 | "fstream": "cpp", 41 | "initializer_list": "cpp", 42 | "iomanip": "cpp", 43 | "iosfwd": "cpp", 44 | "iostream": "cpp", 45 | "istream": "cpp", 46 | "limits": "cpp", 47 | "mutex": "cpp", 48 | "new": "cpp", 49 | "ostream": "cpp", 50 | "sstream": "cpp", 51 | "stdexcept": "cpp", 52 | "streambuf": "cpp", 53 | "thread": "cpp", 54 | "cinttypes": "cpp", 55 | "typeinfo": "cpp" 56 | } 57 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kama-memoryPool 2 | 【代码随想录知识星球】项目分享:Kama-memoryPool 3 | 4 | >⭐️ 本项目为[【代码随想录知识星球】](https://programmercarl.com/other/kstar.html) 教学项目 5 | >⭐️ 在 [内存池文档](https://www.programmercarl.com/other/project_neicun.html) 里详细讲解:项目前置知识 + 项目细节 + 代码解读 + 项目难点 + 面试题与回答 + 简历写法 + 项目拓展。 全面帮助你用这个项目求职面试! 6 | ## 项目介绍 7 | 本项目是基于 C++ 实现的自定义内存池框架,旨在提高内存分配和释放的效率,特别是在多线程环境下。 8 | 该项目中实现的内存池主要分为两个版本,分别是目录 v1 和 v2 ( v3 在 v2 基础上进行了一定优化),这两个版本的内存池设计思路大不相同。 9 | ### v1 介绍 10 | 基于哈希映射的多种定长内存分配器,可用于替换 new 和 delete 等内存申请释放的系统调用。包含以下主要功能: 11 | - 内存分配:提供 allocate 方法,从内存池中分配内存块。 12 | - 内存释放:提供 deallocate 方法,将内存块归还到内存池。 13 | - 内存块管理:通过 allocateNewBlock 方法管理内存块的分配和释放。 14 | - 自由链表:使用无锁的自由链表管理空闲内存块,提高并发性能。 15 | 16 | 项目架构图如下: 17 | ![alt text](images/v1.jpg) 18 | 19 | ### v2、v3 介绍 20 | 该项目包括以下主要功能: 21 | - 线程本地缓存(ThreadCache):每个线程维护自己的内存块链表,减少线程间的锁竞争,提高内存分配效率。 22 | - 中心缓存(CentralCache):用于管理多个线程共享的内存块,支持批量分配和回收,优化内存利用率。 23 | - 页面缓存(PageCache):负责从操作系统申请和释放大块内存,支持内存块的合并和分割,减少内存碎片。 24 | - 自旋锁和原子操作:在多线程环境下使用自旋锁和原子操作,确保线程安全的同时减少锁的开销。 25 | 26 | 项目架构图如下: 27 | ![alt text](images/v2.png) 28 | 29 | ## 编译 30 | 先进入 v1 或 v2 或 v3 项目目录 31 | ``` 32 | cd v1 33 | ``` 34 | 在项目目录下创建build目录,并进入该目录 35 | ``` 36 | mkdir build 37 | cd build 38 | ``` 39 | 执行 cmake 命令 40 | ``` 41 | cmake .. 42 | ``` 43 | 执行 make 命令 44 | ``` 45 | make 46 | ``` 47 | 删除编译生成的可执行文件: 48 | ``` 49 | make clean 50 | ``` 51 | ## 运行 52 | ``` 53 | ./可执行文件名 54 | ``` 55 | ## 测试结果 56 | ### v1 57 | #### 单个线程下的测试情况: 58 | ![alt text](images/v1-oneThread.png) 59 | #### 两个线程下的测试情况: 60 | ![alt text](images/v1-twoThread.png) 61 | 62 | #### 五个线程下的测试情况: 63 | ![alt text](images/v1-fiveThread.png) 64 | 通过上述测试结果大家可以看出该内存池的性能甚至不如直接使用系统调用 malloc 和 free,所以就有了内存池v2、v3。 65 | 66 | ### v2 67 | #### 功能测试结果 68 | ![alt text](images/v2-functionalTest.png) 69 | #### 性能测试结果 70 | ![alt text](images/v2-performanceTest.png) 71 | 72 | ### v3 73 | #### 功能测试结果 74 | ![alt text](images/v3-functionalTest.png) 75 | #### 性能测试结果 76 | 测试结果表明内存池v3的性能要略好于内存池v2。 77 | ![alt text](images/v3-performanceTest.png) 78 | -------------------------------------------------------------------------------- /images/v1-fiveThread.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v1-fiveThread.png -------------------------------------------------------------------------------- /images/v1-oneThread.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v1-oneThread.png -------------------------------------------------------------------------------- /images/v1-twoThread.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v1-twoThread.png -------------------------------------------------------------------------------- /images/v1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v1.jpg -------------------------------------------------------------------------------- /images/v2-functionalTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v2-functionalTest.png -------------------------------------------------------------------------------- /images/v2-performanceTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v2-performanceTest.png -------------------------------------------------------------------------------- /images/v2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v2.png -------------------------------------------------------------------------------- /images/v3-functionalTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v3-functionalTest.png -------------------------------------------------------------------------------- /images/v3-performanceTest.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/youngyangyang04/memory-pool/2477e80085b21a155aee3c4bf34f88b26e7d1a6b/images/v3-performanceTest.png -------------------------------------------------------------------------------- /v1/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # CMake minimum version 2 | cmake_minimum_required(VERSION 3.10) 3 | 4 | # Project name 5 | project(MemoryPoolProject) 6 | 7 | # Set C++ standard 8 | set(CMAKE_CXX_STANDARD 11) 9 | set(CMAKE_CXX_STANDARD_REQUIRED True) 10 | 11 | # Include directories 12 | include_directories(include) 13 | 14 | # Source files 15 | file(GLOB SRC_FILES "src/*.cpp") 16 | file(GLOB TEST_FILES "tests/*.cpp") 17 | 18 | # Add the executable for the main target 19 | add_executable(${PROJECT_NAME} ${SRC_FILES} ${TEST_FILES}) 20 | 21 | # Add compile options 22 | target_compile_options(${PROJECT_NAME} PRIVATE -g -pthread) 23 | 24 | # Link libraries 25 | target_link_libraries(${PROJECT_NAME} pthread) 26 | 27 | # Link libraries (if any) 28 | # target_link_libraries(${PROJECT_NAME} ) -------------------------------------------------------------------------------- /v1/include/MemoryPool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | namespace Kama_memoryPool 11 | { 12 | #define MEMORY_POOL_NUM 64 13 | #define SLOT_BASE_SIZE 8 14 | #define MAX_SLOT_SIZE 512 15 | 16 | 17 | /* 具体内存池的槽大小没法确定,因为每个内存池的槽大小不同(8的倍数) 18 | 所以这个槽结构体的sizeof 不是实际的槽大小 */ 19 | struct Slot 20 | { 21 | std::atomic next; // 原子指针 22 | }; 23 | 24 | class MemoryPool 25 | { 26 | public: 27 | MemoryPool(size_t BlockSize = 4096); 28 | ~MemoryPool(); 29 | 30 | void init(size_t); 31 | 32 | void* allocate(); 33 | void deallocate(void*); 34 | private: 35 | void allocateNewBlock(); 36 | size_t padPointer(char* p, size_t align); 37 | 38 | // 使用CAS操作进行无锁入队和出队 39 | bool pushFreeList(Slot* slot); 40 | Slot* popFreeList(); 41 | private: 42 | int BlockSize_; // 内存块大小 43 | int SlotSize_; // 槽大小 44 | Slot* firstBlock_; // 指向内存池管理的首个实际内存块 45 | Slot* curSlot_; // 指向当前未被使用过的槽 46 | std::atomic freeList_; // 指向空闲的槽(被使用过后又被释放的槽) 47 | Slot* lastSlot_; // 作为当前内存块中最后能够存放元素的位置标识(超过该位置需申请新的内存块) 48 | //std::mutex mutexForFreeList_; // 保证freeList_在多线程中操作的原子性 49 | std::mutex mutexForBlock_; // 保证多线程情况下避免不必要的重复开辟内存导致的浪费行为 50 | }; 51 | 52 | class HashBucket 53 | { 54 | public: 55 | static void initMemoryPool(); 56 | static MemoryPool& getMemoryPool(int index); 57 | 58 | static void* useMemory(size_t size) 59 | { 60 | if (size <= 0) 61 | return nullptr; 62 | if (size > MAX_SLOT_SIZE) // 大于512字节的内存,则使用new 63 | return operator new(size); 64 | 65 | // 相当于size / 8 向上取整(因为分配内存只能大不能小 66 | return getMemoryPool(((size + 7) / SLOT_BASE_SIZE) - 1).allocate(); 67 | } 68 | 69 | static void freeMemory(void* ptr, size_t size) 70 | { 71 | if (!ptr) 72 | return; 73 | if (size > MAX_SLOT_SIZE) 74 | { 75 | operator delete(ptr); 76 | return; 77 | } 78 | 79 | getMemoryPool(((size + 7) / SLOT_BASE_SIZE) - 1).deallocate(ptr); 80 | } 81 | 82 | template 83 | friend T* newElement(Args&&... args); 84 | 85 | template 86 | friend void deleteElement(T* p); 87 | }; 88 | 89 | template 90 | T* newElement(Args&&... args) 91 | { 92 | T* p = nullptr; 93 | // 根据元素大小选取合适的内存池分配内存 94 | if ((p = reinterpret_cast(HashBucket::useMemory(sizeof(T)))) != nullptr) 95 | // 在分配的内存上构造对象 96 | new(p) T(std::forward(args)...); 97 | 98 | return p; 99 | } 100 | 101 | template 102 | void deleteElement(T* p) 103 | { 104 | // 对象析构 105 | if (p) 106 | { 107 | p->~T(); 108 | // 内存回收 109 | HashBucket::freeMemory(reinterpret_cast(p), sizeof(T)); 110 | } 111 | } 112 | 113 | } // namespace memoryPool 114 | -------------------------------------------------------------------------------- /v1/src/MemoryPool.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/MemoryPool.h" 2 | 3 | namespace Kama_memoryPool 4 | { 5 | MemoryPool::MemoryPool(size_t BlockSize) 6 | : BlockSize_ (BlockSize) 7 | , SlotSize_ (0) 8 | , firstBlock_ (nullptr) 9 | , curSlot_ (nullptr) 10 | , freeList_ (nullptr) 11 | , lastSlot_ (nullptr) 12 | {} 13 | 14 | MemoryPool::~MemoryPool() 15 | { 16 | // 把连续的block删除 17 | Slot* cur = firstBlock_; 18 | while (cur) 19 | { 20 | Slot* next = cur->next; 21 | // 等同于 free(reinterpret_cast(firstBlock_)); 22 | // 转化为 void 指针,因为 void 类型不需要调用析构函数,只释放空间 23 | operator delete(reinterpret_cast(cur)); 24 | cur = next; 25 | } 26 | } 27 | 28 | void MemoryPool::init(size_t size) 29 | { 30 | assert(size > 0); 31 | SlotSize_ = size; 32 | firstBlock_ = nullptr; 33 | curSlot_ = nullptr; 34 | freeList_ = nullptr; 35 | lastSlot_ = nullptr; 36 | } 37 | 38 | void* MemoryPool::allocate() 39 | { 40 | // 优先使用空闲链表中的内存槽 41 | Slot* slot = popFreeList(); 42 | if (slot != nullptr) 43 | return slot; 44 | 45 | Slot* temp; 46 | { 47 | std::lock_guard lock(mutexForBlock_); 48 | if (curSlot_ >= lastSlot_) 49 | { 50 | // 当前内存块已无内存槽可用,开辟一块新的内存 51 | allocateNewBlock(); 52 | } 53 | 54 | temp = curSlot_; 55 | // 这里不能直接 curSlot_ += SlotSize_ 因为curSlot_是Slot*类型,所以需要除以SlotSize_再加1 56 | curSlot_ += SlotSize_ / sizeof(Slot); 57 | } 58 | 59 | return temp; 60 | } 61 | 62 | void MemoryPool::deallocate(void* ptr) 63 | { 64 | if (!ptr) return; 65 | 66 | Slot* slot = reinterpret_cast(ptr); 67 | pushFreeList(slot); 68 | } 69 | 70 | void MemoryPool::allocateNewBlock() 71 | { 72 | //std::cout << "申请一块内存块,SlotSize: " << SlotSize_ << std::endl; 73 | // 头插法插入新的内存块 74 | void* newBlock = operator new(BlockSize_); 75 | reinterpret_cast(newBlock)->next = firstBlock_; 76 | firstBlock_ = reinterpret_cast(newBlock); 77 | 78 | char* body = reinterpret_cast(newBlock) + sizeof(Slot*); 79 | size_t paddingSize = padPointer(body, SlotSize_); // 计算对齐需要填充内存的大小 80 | curSlot_ = reinterpret_cast(body + paddingSize); 81 | 82 | // 超过该标记位置,则说明该内存块已无内存槽可用,需向系统申请新的内存块 83 | lastSlot_ = reinterpret_cast(reinterpret_cast(newBlock) + BlockSize_ - SlotSize_ + 1); 84 | 85 | freeList_ = nullptr; 86 | } 87 | 88 | // 让指针对齐到槽大小的倍数位置 89 | size_t MemoryPool::padPointer(char* p, size_t align) 90 | { 91 | // align 是槽大小 92 | return align - (reinterpret_cast(p) % align); 93 | } 94 | 95 | // 实现无锁入队操作 96 | bool MemoryPool::pushFreeList(Slot* slot) 97 | { 98 | while (true) 99 | { 100 | // 获取当前头节点 101 | Slot* oldHead = freeList_.load(std::memory_order_relaxed); 102 | // 将新节点的 next 指向当前头节点 103 | slot->next.store(oldHead, std::memory_order_relaxed); 104 | 105 | // 尝试将新节点设置为头节点 106 | if (freeList_.compare_exchange_weak(oldHead, slot, 107 | std::memory_order_release, std::memory_order_relaxed)) 108 | { 109 | return true; 110 | } 111 | // 失败:说明另一个线程可能已经修改了 freeList_ 112 | // CAS 失败则重试 113 | } 114 | } 115 | 116 | // 实现无锁出队操作 117 | Slot* MemoryPool::popFreeList() 118 | { 119 | while (true) 120 | { 121 | Slot* oldHead = freeList_.load(std::memory_order_acquire); 122 | if (oldHead == nullptr) 123 | return nullptr; // 队列为空 124 | 125 | // 在访问 newHead 之前再次验证 oldHead 的有效性 126 | Slot* newHead = nullptr; 127 | try 128 | { 129 | newHead = oldHead->next.load(std::memory_order_relaxed); 130 | } 131 | catch(...) 132 | { 133 | // 如果返回失败,则continue重新尝试申请内存 134 | continue; 135 | } 136 | 137 | // 尝试更新头结点 138 | // 原子性地尝试将 freeList_ 从 oldHead 更新为 newHead 139 | if (freeList_.compare_exchange_weak(oldHead, newHead, 140 | std::memory_order_acquire, std::memory_order_relaxed)) 141 | { 142 | return oldHead; 143 | } 144 | // 失败:说明另一个线程可能已经修改了 freeList_ 145 | // CAS 失败则重试 146 | } 147 | } 148 | 149 | 150 | void HashBucket::initMemoryPool() 151 | { 152 | for (int i = 0; i < MEMORY_POOL_NUM; i++) 153 | { 154 | getMemoryPool(i).init((i + 1) * SLOT_BASE_SIZE); 155 | } 156 | } 157 | 158 | // 单例模式 159 | MemoryPool& HashBucket::getMemoryPool(int index) 160 | { 161 | static MemoryPool memoryPool[MEMORY_POOL_NUM]; 162 | return memoryPool[index]; 163 | } 164 | 165 | } // namespace memoryPool 166 | 167 | -------------------------------------------------------------------------------- /v1/tests/UnitTest.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "../include/MemoryPool.h" 6 | 7 | using namespace Kama_memoryPool; 8 | 9 | // 测试用例 10 | class P1 11 | { 12 | int id_; 13 | }; 14 | 15 | class P2 16 | { 17 | int id_[5]; 18 | }; 19 | 20 | class P3 21 | { 22 | int id_[10]; 23 | }; 24 | 25 | class P4 26 | { 27 | int id_[20]; 28 | }; 29 | 30 | // 单轮次申请释放次数 线程数 轮次 31 | void BenchmarkMemoryPool(size_t ntimes, size_t nworks, size_t rounds) 32 | { 33 | std::vector vthread(nworks); // 线程池 34 | size_t total_costtime = 0; 35 | for (size_t k = 0; k < nworks; ++k) // 创建 nworks 个线程 36 | { 37 | vthread[k] = std::thread([&]() { 38 | for (size_t j = 0; j < rounds; ++j) 39 | { 40 | size_t begin1 = clock(); 41 | for (size_t i = 0; i < ntimes; i++) 42 | { 43 | P1* p1 = newElement(); // 内存池对外接口 44 | deleteElement(p1); 45 | P2* p2 = newElement(); 46 | deleteElement(p2); 47 | P3* p3 = newElement(); 48 | deleteElement(p3); 49 | P4* p4 = newElement(); 50 | deleteElement(p4); 51 | } 52 | size_t end1 = clock(); 53 | 54 | total_costtime += end1 - begin1; 55 | } 56 | }); 57 | } 58 | for (auto& t : vthread) 59 | { 60 | t.join(); 61 | } 62 | printf("%lu个线程并发执行%lu轮次,每轮次newElement&deleteElement %lu次,总计花费:%lu ms\n", nworks, rounds, ntimes, total_costtime); 63 | } 64 | 65 | void BenchmarkNew(size_t ntimes, size_t nworks, size_t rounds) 66 | { 67 | std::vector vthread(nworks); 68 | size_t total_costtime = 0; 69 | for (size_t k = 0; k < nworks; ++k) 70 | { 71 | vthread[k] = std::thread([&]() { 72 | for (size_t j = 0; j < rounds; ++j) 73 | { 74 | size_t begin1 = clock(); 75 | for (size_t i = 0; i < ntimes; i++) 76 | { 77 | P1* p1 = new P1; 78 | delete p1; 79 | P2* p2 = new P2; 80 | delete p2; 81 | P3* p3 = new P3; 82 | delete p3; 83 | P4* p4 = new P4; 84 | delete p4; 85 | } 86 | size_t end1 = clock(); 87 | 88 | total_costtime += end1 - begin1; 89 | } 90 | }); 91 | } 92 | for (auto& t : vthread) 93 | { 94 | t.join(); 95 | } 96 | printf("%lu个线程并发执行%lu轮次,每轮次malloc&free %lu次,总计花费:%lu ms\n", nworks, rounds, ntimes, total_costtime); 97 | } 98 | 99 | int main() 100 | { 101 | HashBucket::initMemoryPool(); // 使用内存池接口前一定要先调用该函数 102 | BenchmarkMemoryPool(100, 1, 10); // 测试内存池 103 | std::cout << "===========================================================================" << std::endl; 104 | std::cout << "===========================================================================" << std::endl; 105 | BenchmarkNew(100, 1, 10); // 测试 new delete 106 | 107 | return 0; 108 | } -------------------------------------------------------------------------------- /v2/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(memory_pool) 3 | 4 | # 设置C++标准 5 | set(CMAKE_CXX_STANDARD 17) 6 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 7 | 8 | # 编译选项 9 | add_compile_options(-Wall -O2) 10 | 11 | # 查找pthread库 12 | find_package(Threads REQUIRED) 13 | 14 | # 设置目录 15 | set(SRC_DIR ${CMAKE_SOURCE_DIR}/src) 16 | set(INC_DIR ${CMAKE_SOURCE_DIR}/include) 17 | set(TEST_DIR ${CMAKE_SOURCE_DIR}/tests) 18 | 19 | # 源文件 20 | file(GLOB SOURCES "${SRC_DIR}/*.cpp") 21 | 22 | # 添加头文件目录 23 | include_directories(${INC_DIR}) 24 | 25 | # 创建单元测试可执行文件 26 | add_executable(unit_test 27 | ${SOURCES} 28 | ${TEST_DIR}/UnitTest.cpp 29 | ) 30 | 31 | # 创建性能测试可执行文件 32 | add_executable(perf_test 33 | ${SOURCES} 34 | ${TEST_DIR}/PerformanceTest.cpp 35 | ) 36 | 37 | # 链接pthread库 38 | target_link_libraries(unit_test PRIVATE Threads::Threads) 39 | target_link_libraries(perf_test PRIVATE Threads::Threads) 40 | 41 | # 添加测试命令 42 | add_custom_target(test 43 | COMMAND ./unit_test 44 | DEPENDS unit_test 45 | ) 46 | 47 | add_custom_target(perf 48 | COMMAND ./perf_test 49 | DEPENDS perf_test 50 | ) -------------------------------------------------------------------------------- /v2/include/CentralCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | #include 4 | 5 | namespace Kama_memoryPool 6 | { 7 | 8 | class CentralCache 9 | { 10 | public: 11 | static CentralCache& getInstance() 12 | { 13 | static CentralCache instance; 14 | return instance; 15 | } 16 | 17 | void* fetchRange(size_t index); 18 | void returnRange(void* start, size_t size, size_t bytes); 19 | 20 | private: 21 | // 相互是还所有原子指针为nullptr 22 | CentralCache() 23 | { 24 | for (auto& ptr : centralFreeList_) 25 | { 26 | ptr.store(nullptr, std::memory_order_relaxed); 27 | } 28 | // 初始化所有锁 29 | for (auto& lock : locks_) 30 | { 31 | lock.clear(); 32 | } 33 | } 34 | // 从页缓存获取内存 35 | void* fetchFromPageCache(size_t size); 36 | 37 | private: 38 | // 中心缓存的自由链表 39 | std::array, FREE_LIST_SIZE> centralFreeList_; 40 | 41 | // 用于同步的自旋锁 42 | std::array locks_; 43 | }; 44 | 45 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/include/Common.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | // 对齐数和大小定义 9 | constexpr size_t ALIGNMENT = 8; 10 | constexpr size_t MAX_BYTES = 256 * 1024; // 256KB 11 | constexpr size_t FREE_LIST_SIZE = MAX_BYTES / ALIGNMENT; // ALIGNMENT等于指针void*的大小 12 | 13 | // 内存块头部信息 14 | struct BlockHeader 15 | { 16 | size_t size; // 内存块大小 17 | bool inUse; // 使用标志 18 | BlockHeader* next; // 指向下一个内存块 19 | }; 20 | 21 | // 大小类管理 22 | class SizeClass 23 | { 24 | public: 25 | static size_t roundUp(size_t bytes) 26 | { 27 | return (bytes + ALIGNMENT - 1) & ~(ALIGNMENT - 1); 28 | } 29 | 30 | static size_t getIndex(size_t bytes) 31 | { 32 | // 确保bytes至少为ALIGNMENT 33 | bytes = std::max(bytes, ALIGNMENT); 34 | // 向上取整后-1 35 | return (bytes + ALIGNMENT - 1) / ALIGNMENT - 1; 36 | } 37 | }; 38 | 39 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/include/MemoryPool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "ThreadCache.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | class MemoryPool 8 | { 9 | public: 10 | static void* allocate(size_t size) 11 | { 12 | return ThreadCache::getInstance()->allocate(size); 13 | } 14 | 15 | static void deallocate(void* ptr, size_t size) 16 | { 17 | ThreadCache::getInstance()->deallocate(ptr, size); 18 | } 19 | }; 20 | 21 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/include/PageCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | 9 | class PageCache 10 | { 11 | public: 12 | static const size_t PAGE_SIZE = 4096; // 4K页大小 13 | 14 | static PageCache& getInstance() 15 | { 16 | static PageCache instance; 17 | return instance; 18 | } 19 | 20 | // 分配指定页数的span 21 | void* allocateSpan(size_t numPages); 22 | 23 | // 释放span 24 | void deallocateSpan(void* ptr, size_t numPages); 25 | 26 | private: 27 | PageCache() = default; 28 | 29 | // 向系统申请内存 30 | void* systemAlloc(size_t numPages); 31 | private: 32 | struct Span 33 | { 34 | void* pageAddr; // 页起始地址 35 | size_t numPages; // 页数 36 | Span* next; // 链表指针 37 | }; 38 | 39 | // 按页数管理空闲span,不同页数对应不同Span链表 40 | std::map freeSpans_; 41 | // 页号到span的映射,用于回收 42 | std::map spanMap_; 43 | std::mutex mutex_; 44 | }; 45 | 46 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/include/ThreadCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | // 线程本地缓存 8 | class ThreadCache 9 | { 10 | public: 11 | static ThreadCache* getInstance() 12 | { 13 | static thread_local ThreadCache instance; 14 | return &instance; 15 | } 16 | 17 | void* allocate(size_t size); 18 | void deallocate(void* ptr, size_t size); 19 | private: 20 | ThreadCache() 21 | { 22 | // 初始化自由链表和大小统计 23 | freeList_.fill(nullptr); 24 | freeListSize_.fill(0); 25 | } 26 | 27 | // 从中心缓存获取内存 28 | void* fetchFromCentralCache(size_t index); 29 | // 归还内存到中心缓存 30 | void returnToCentralCache(void* start, size_t size); 31 | 32 | bool shouldReturnToCentralCache(size_t index); 33 | private: 34 | // 每个线程的自由链表数组 35 | std::array freeList_; 36 | std::array freeListSize_; // 自由链表大小统计 37 | }; 38 | 39 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/src/CentralCache.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/CentralCache.h" 2 | #include "../include/PageCache.h" 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | 9 | // 每次从PageCache获取span大小(以页为单位) 10 | static const size_t SPAN_PAGES = 8; 11 | 12 | void* CentralCache::fetchRange(size_t index) 13 | { 14 | // 索引检查,当索引大于等于FREE_LIST_SIZE时,说明申请内存过大应直接向系统申请 15 | if (index >= FREE_LIST_SIZE) 16 | return nullptr; 17 | 18 | // 自旋锁保护 19 | while (locks_[index].test_and_set(std::memory_order_acquire)) 20 | { 21 | std::this_thread::yield(); // 添加线程让步,避免忙等待,避免过度消耗CPU 22 | } 23 | 24 | void* result = nullptr; 25 | try 26 | { 27 | // 尝试从中心缓存获取内存块 28 | result = centralFreeList_[index].load(std::memory_order_relaxed); 29 | 30 | if (!result) 31 | { 32 | // 如果中心缓存为空,从页缓存获取新的内存块 33 | size_t size = (index + 1) * ALIGNMENT; 34 | result = fetchFromPageCache(size); 35 | 36 | if (!result) 37 | { 38 | locks_[index].clear(std::memory_order_release); 39 | return nullptr; 40 | } 41 | 42 | // 将获取的内存块切分成小块 43 | char* start = static_cast(result); 44 | size_t blockNum = (SPAN_PAGES * PageCache::PAGE_SIZE) / size; 45 | 46 | if (blockNum > 1) 47 | { // 确保至少有两个块才构建链表 48 | for (size_t i = 1; i < blockNum; ++i) 49 | { 50 | void* current = start + (i - 1) * size; 51 | void* next = start + i * size; 52 | *reinterpret_cast(current) = next; 53 | } 54 | *reinterpret_cast(start + (blockNum - 1) * size) = nullptr; 55 | 56 | // 保存result的下一个节点 57 | void* next = *reinterpret_cast(result); 58 | // 将result与链表断开 59 | *reinterpret_cast(result) = nullptr; 60 | // 更新中心缓存 61 | centralFreeList_[index].store( 62 | next, 63 | std::memory_order_release 64 | ); 65 | } 66 | } 67 | else 68 | { 69 | // 保存result的下一个节点 70 | void* next = *reinterpret_cast(result); 71 | // 将result与链表断开 72 | *reinterpret_cast(result) = nullptr; 73 | 74 | // 更新中心缓存 75 | centralFreeList_[index].store(next, std::memory_order_release); 76 | } 77 | } 78 | catch (...) 79 | { 80 | locks_[index].clear(std::memory_order_release); 81 | throw; 82 | } 83 | 84 | // 释放锁 85 | locks_[index].clear(std::memory_order_release); 86 | return result; 87 | } 88 | 89 | void CentralCache::returnRange(void* start, size_t size, size_t index) 90 | { 91 | // 当索引大于等于FREE_LIST_SIZE时,说明内存过大应直接向系统归还 92 | if (!start || index >= FREE_LIST_SIZE) 93 | return; 94 | 95 | while (locks_[index].test_and_set(std::memory_order_acquire)) 96 | { 97 | std::this_thread::yield(); 98 | } 99 | 100 | try 101 | { 102 | // 找到要归还的链表的最后一个节点 103 | void* end = start; 104 | size_t count = 1; 105 | while (*reinterpret_cast(end) != nullptr && count < size) { 106 | end = *reinterpret_cast(end); 107 | count++; 108 | } 109 | 110 | // 将归还的链表连接到中心缓存的链表头部 111 | void* current = centralFreeList_[index].load(std::memory_order_relaxed); 112 | *reinterpret_cast(end) = current; // 将原链表头接到归还链表的尾部 113 | centralFreeList_[index].store(start, std::memory_order_release); // 将归还的链表头设为新的链表头 114 | } 115 | catch (...) 116 | { 117 | locks_[index].clear(std::memory_order_release); 118 | throw; 119 | } 120 | 121 | locks_[index].clear(std::memory_order_release); 122 | } 123 | 124 | void* CentralCache::fetchFromPageCache(size_t size) 125 | { 126 | // 1. 计算实际需要的页数 127 | size_t numPages = (size + PageCache::PAGE_SIZE - 1) / PageCache::PAGE_SIZE; 128 | 129 | // 2. 根据大小决定分配策略 130 | if (size <= SPAN_PAGES * PageCache::PAGE_SIZE) 131 | { 132 | // 小于等于32KB的请求,使用固定8页 133 | return PageCache::getInstance().allocateSpan(SPAN_PAGES); 134 | } 135 | else 136 | { 137 | // 大于32KB的请求,按实际需求分配 138 | return PageCache::getInstance().allocateSpan(numPages); 139 | } 140 | } 141 | 142 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/src/PageCache.cpp: -------------------------------------------------------------------------------- 1 | #include "PageCache.h" 2 | #include 3 | #include 4 | 5 | namespace Kama_memoryPool 6 | { 7 | 8 | void* PageCache::allocateSpan(size_t numPages) 9 | { 10 | std::lock_guard lock(mutex_); 11 | 12 | // 查找合适的空闲span 13 | // lower_bound函数返回第一个大于等于numPages的元素的迭代器 14 | auto it = freeSpans_.lower_bound(numPages); 15 | if (it != freeSpans_.end()) 16 | { 17 | Span* span = it->second; 18 | 19 | // 将取出的span从原有的空闲链表freeSpans_[it->first]中移除 20 | if (span->next) 21 | { 22 | freeSpans_[it->first] = span->next; 23 | } 24 | else 25 | { 26 | freeSpans_.erase(it); 27 | } 28 | 29 | // 如果span大于需要的numPages则进行分割 30 | if (span->numPages > numPages) 31 | { 32 | Span* newSpan = new Span; 33 | newSpan->pageAddr = static_cast(span->pageAddr) + 34 | numPages * PAGE_SIZE; 35 | newSpan->numPages = span->numPages - numPages; 36 | newSpan->next = nullptr; 37 | 38 | // 将超出部分放回空闲Span*列表头部 39 | auto& list = freeSpans_[newSpan->numPages]; 40 | newSpan->next = list; 41 | list = newSpan; 42 | 43 | span->numPages = numPages; 44 | } 45 | 46 | // 记录span信息用于回收 47 | spanMap_[span->pageAddr] = span; 48 | return span->pageAddr; 49 | } 50 | 51 | // 没有合适的span,向系统申请 52 | void* memory = systemAlloc(numPages); 53 | if (!memory) return nullptr; 54 | 55 | // 创建新的span 56 | Span* span = new Span; 57 | span->pageAddr = memory; 58 | span->numPages = numPages; 59 | span->next = nullptr; 60 | 61 | // 记录span信息用于回收 62 | spanMap_[memory] = span; 63 | return memory; 64 | } 65 | 66 | void PageCache::deallocateSpan(void* ptr, size_t numPages) 67 | { 68 | std::lock_guard lock(mutex_); 69 | 70 | // 查找对应的span,没找到代表不是PageCache分配的内存,直接返回 71 | auto it = spanMap_.find(ptr); 72 | if (it == spanMap_.end()) return; 73 | 74 | Span* span = it->second; 75 | 76 | // 尝试合并相邻的span 77 | void* nextAddr = static_cast(ptr) + numPages * PAGE_SIZE; 78 | auto nextIt = spanMap_.find(nextAddr); 79 | 80 | if (nextIt != spanMap_.end()) 81 | { 82 | Span* nextSpan = nextIt->second; 83 | 84 | // 1. 首先检查nextSpan是否在空闲链表中 85 | bool found = false; 86 | auto& nextList = freeSpans_[nextSpan->numPages]; 87 | 88 | // 检查是否是头节点 89 | if (nextList == nextSpan) 90 | { 91 | nextList = nextSpan->next; 92 | found = true; 93 | } 94 | else if (nextList) // 只有在链表非空时才遍历 95 | { 96 | Span* prev = nextList; 97 | while (prev->next) 98 | { 99 | if (prev->next == nextSpan) 100 | { 101 | // 将nextSpan从空闲链表中移除 102 | prev->next = nextSpan->next; 103 | found = true; 104 | break; 105 | } 106 | prev = prev->next; 107 | } 108 | } 109 | 110 | // 2. 只有在找到nextSpan的情况下才进行合并 111 | if (found) 112 | { 113 | // 合并span 114 | span->numPages += nextSpan->numPages; 115 | spanMap_.erase(nextAddr); 116 | delete nextSpan; 117 | } 118 | } 119 | 120 | // 将合并后的span通过头插法插入空闲列表 121 | auto& list = freeSpans_[span->numPages]; 122 | span->next = list; 123 | list = span; 124 | } 125 | 126 | void* PageCache::systemAlloc(size_t numPages) 127 | { 128 | size_t size = numPages * PAGE_SIZE; 129 | 130 | // 使用mmap分配内存 131 | void* ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, 132 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 133 | if (ptr == MAP_FAILED) return nullptr; 134 | 135 | // 清零内存 136 | memset(ptr, 0, size); 137 | return ptr; 138 | } 139 | 140 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/src/ThreadCache.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/ThreadCache.h" 2 | #include "../include/CentralCache.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | void* ThreadCache::allocate(size_t size) 8 | { 9 | // 处理0大小的分配请求 10 | if (size == 0) 11 | { 12 | size = ALIGNMENT; // 至少分配一个对齐大小 13 | } 14 | 15 | if (size > MAX_BYTES) 16 | { 17 | // 大对象直接从系统分配 18 | return malloc(size); 19 | } 20 | 21 | size_t index = SizeClass::getIndex(size); 22 | 23 | // 更新自由链表大小 24 | freeListSize_[index]--; 25 | 26 | // 检查线程本地自由链表 27 | // 如果 freeList_[index] 不为空,表示该链表中有可用内存块 28 | if (void* ptr = freeList_[index]) 29 | { 30 | freeList_[index] = *reinterpret_cast(ptr); // 将freeList_[index]指向的内存块的下一个内存块地址(取决于内存块的实现) 31 | return ptr; 32 | } 33 | 34 | // 如果线程本地自由链表为空,则从中心缓存获取一批内存 35 | return fetchFromCentralCache(index); 36 | } 37 | 38 | void ThreadCache::deallocate(void* ptr, size_t size) 39 | { 40 | if (size > MAX_BYTES) 41 | { 42 | free(ptr); 43 | return; 44 | } 45 | 46 | size_t index = SizeClass::getIndex(size); 47 | 48 | // 插入到线程本地自由链表 49 | *reinterpret_cast(ptr) = freeList_[index]; 50 | freeList_[index] = ptr; 51 | 52 | // 更新自由链表大小 53 | freeListSize_[index]++; // 增加对应大小类的自由链表大小 54 | 55 | // 判断是否需要将部分内存回收给中心缓存 56 | if (shouldReturnToCentralCache(index)) 57 | { 58 | returnToCentralCache(freeList_[index], size); 59 | } 60 | } 61 | 62 | // 判断是否需要将内存回收给中心缓存 63 | bool ThreadCache::shouldReturnToCentralCache(size_t index) 64 | { 65 | // 设定阈值,例如:当自由链表的大小超过一定数量时 66 | size_t threshold = 64; // 例如,64个内存块 67 | return (freeListSize_[index] > threshold); 68 | } 69 | 70 | void* ThreadCache::fetchFromCentralCache(size_t index) 71 | { 72 | // 从中心缓存批量获取内存 73 | void* start = CentralCache::getInstance().fetchRange(index); 74 | if (!start) return nullptr; 75 | 76 | // 取一个返回,其余放入自由链表 77 | void* result = start; 78 | freeList_[index] = *reinterpret_cast(start); 79 | 80 | // 更新自由链表大小 81 | size_t batchNum = 0; 82 | void* current = start; // 从start开始遍历 83 | 84 | // 计算从中心缓存获取的内存块数量 85 | while (current != nullptr) 86 | { 87 | batchNum++; 88 | current = *reinterpret_cast(current); // 遍历下一个内存块 89 | } 90 | 91 | // 更新freeListSize_,增加获取的内存块数量 92 | freeListSize_[index] += batchNum; 93 | 94 | return result; 95 | } 96 | 97 | void ThreadCache::returnToCentralCache(void* start, size_t size) 98 | { 99 | // 根据大小计算对应的索引 100 | size_t index = SizeClass::getIndex(size); 101 | 102 | // 获取对齐后的实际块大小 103 | size_t alignedSize = SizeClass::roundUp(size); 104 | 105 | // 计算要归还内存块数量 106 | size_t batchNum = freeListSize_[index]; 107 | if (batchNum <= 1) return; // 如果只有一个块,则不归还 108 | 109 | // 保留一部分在ThreadCache中(比如保留1/4) 110 | size_t keepNum = std::max(batchNum / 4, size_t(1)); 111 | size_t returnNum = batchNum - keepNum; 112 | 113 | // 将内存块串成链表 114 | char* current = static_cast(start); 115 | // 使用对齐后的大小计算分割点 116 | char* splitNode = current; 117 | for (size_t i = 0; i < keepNum - 1; ++i) 118 | { 119 | splitNode = reinterpret_cast(*reinterpret_cast(splitNode)); 120 | if (splitNode == nullptr) 121 | { 122 | // 如果链表提前结束,更新实际的返回数量 123 | returnNum = batchNum - (i + 1); 124 | break; 125 | } 126 | } 127 | 128 | if (splitNode != nullptr) 129 | { 130 | // 将要返回的部分和要保留的部分断开 131 | void* nextNode = *reinterpret_cast(splitNode); 132 | *reinterpret_cast(splitNode) = nullptr; // 断开连接 133 | 134 | // 更新ThreadCache的空闲链表 135 | freeList_[index] = start; 136 | 137 | // 更新自由链表大小 138 | freeListSize_[index] = keepNum; 139 | 140 | // 将剩余部分返回给CentralCache 141 | if (returnNum > 0 && nextNode != nullptr) 142 | { 143 | CentralCache::getInstance().returnRange(nextNode, returnNum * alignedSize, index); 144 | } 145 | } 146 | } 147 | 148 | 149 | } // namespace memoryPool -------------------------------------------------------------------------------- /v2/tests/PerformanceTest.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/MemoryPool.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace Kama_memoryPool; 10 | using namespace std::chrono; 11 | 12 | // 计时器类 13 | class Timer 14 | { 15 | high_resolution_clock::time_point start; 16 | public: 17 | Timer() : start(high_resolution_clock::now()) {} 18 | 19 | double elapsed() 20 | { 21 | auto end = high_resolution_clock::now(); 22 | return duration_cast(end - start).count() / 1000.0; // 转换为毫秒 23 | } 24 | }; 25 | 26 | // 性能测试类 27 | class PerformanceTest 28 | { 29 | private: 30 | // 测试统计信息 31 | struct TestStats 32 | { 33 | double memPoolTime{0.0}; 34 | double systemTime{0.0}; 35 | size_t totalAllocs{0}; 36 | size_t totalBytes{0}; 37 | }; 38 | 39 | public: 40 | // 1. 系统预热 41 | static void warmup() 42 | { 43 | std::cout << "Warming up memory systems...\n"; 44 | // 使用 pair 来存储指针和对应的大小 45 | std::vector> warmupPtrs; 46 | 47 | // 预热内存池 48 | for (int i = 0; i < 1000; ++i) 49 | { 50 | for (size_t size : {32, 64, 128, 256, 512}) { 51 | void* p = MemoryPool::allocate(size); 52 | warmupPtrs.emplace_back(p, size); // 存储指针和对应的大小 53 | } 54 | } 55 | 56 | // 释放预热内存 57 | for (const auto& [ptr, size] : warmupPtrs) 58 | { 59 | MemoryPool::deallocate(ptr, size); // 使用实际分配的大小进行释放 60 | } 61 | 62 | std::cout << "Warmup complete.\n\n"; 63 | } 64 | 65 | // 2. 小对象分配测试 66 | static void testSmallAllocation() 67 | { 68 | constexpr size_t NUM_ALLOCS = 100000; 69 | constexpr size_t SMALL_SIZE = 32; 70 | 71 | std::cout << "\nTesting small allocations (" << NUM_ALLOCS << " allocations of " 72 | << SMALL_SIZE << " bytes):" << std::endl; 73 | 74 | // 测试内存池 75 | { 76 | Timer t; 77 | std::vector ptrs; 78 | ptrs.reserve(NUM_ALLOCS); 79 | 80 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 81 | { 82 | ptrs.push_back(MemoryPool::allocate(SMALL_SIZE)); 83 | 84 | // 模拟真实使用:部分立即释放 85 | if (i % 4 == 0) 86 | { 87 | MemoryPool::deallocate(ptrs.back(), SMALL_SIZE); 88 | ptrs.pop_back(); 89 | } 90 | } 91 | 92 | for (void* ptr : ptrs) 93 | { 94 | MemoryPool::deallocate(ptr, SMALL_SIZE); 95 | } 96 | 97 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 98 | << t.elapsed() << " ms" << std::endl; 99 | } 100 | 101 | // 测试new/delete 102 | { 103 | Timer t; 104 | std::vector ptrs; 105 | ptrs.reserve(NUM_ALLOCS); 106 | 107 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 108 | { 109 | ptrs.push_back(new char[SMALL_SIZE]); 110 | 111 | if (i % 4 == 0) 112 | { 113 | delete[] static_cast(ptrs.back()); 114 | ptrs.pop_back(); 115 | } 116 | } 117 | 118 | for (void* ptr : ptrs) 119 | { 120 | delete[] static_cast(ptr); 121 | } 122 | 123 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 124 | << t.elapsed() << " ms" << std::endl; 125 | } 126 | } 127 | 128 | // 3. 多线程测试 129 | static void testMultiThreaded() 130 | { 131 | constexpr size_t NUM_THREADS = 4; 132 | constexpr size_t ALLOCS_PER_THREAD = 25000; 133 | constexpr size_t MAX_SIZE = 256; 134 | 135 | std::cout << "\nTesting multi-threaded allocations (" << NUM_THREADS 136 | << " threads, " << ALLOCS_PER_THREAD << " allocations each):" 137 | << std::endl; 138 | 139 | auto threadFunc = [](bool useMemPool) 140 | { 141 | std::random_device rd; 142 | std::mt19937 gen(rd()); 143 | std::uniform_int_distribution<> dis(8, MAX_SIZE); 144 | std::vector> ptrs; 145 | ptrs.reserve(ALLOCS_PER_THREAD); 146 | 147 | for (size_t i = 0; i < ALLOCS_PER_THREAD; ++i) 148 | { 149 | size_t size = dis(gen); 150 | void* ptr = useMemPool ? MemoryPool::allocate(size) 151 | : new char[size]; 152 | ptrs.push_back({ptr, size}); 153 | 154 | // 随机释放一些内存 155 | if (rand() % 100 < 75) 156 | { // 75%的概率释放 157 | size_t index = rand() % ptrs.size(); 158 | if (useMemPool) 159 | { 160 | MemoryPool::deallocate(ptrs[index].first, ptrs[index].second); 161 | } 162 | else 163 | { 164 | delete[] static_cast(ptrs[index].first); 165 | } 166 | ptrs[index] = ptrs.back(); 167 | ptrs.pop_back(); 168 | } 169 | } 170 | 171 | // 清理剩余内存 172 | for (const auto& [ptr, size] : ptrs) 173 | { 174 | if (useMemPool) 175 | { 176 | MemoryPool::deallocate(ptr, size); 177 | } 178 | else 179 | { 180 | delete[] static_cast(ptr); 181 | } 182 | } 183 | }; 184 | 185 | // 测试内存池 186 | { 187 | Timer t; 188 | std::vector threads; 189 | 190 | for (size_t i = 0; i < NUM_THREADS; ++i) 191 | { 192 | threads.emplace_back(threadFunc, true); 193 | } 194 | 195 | for (auto& thread : threads) 196 | { 197 | thread.join(); 198 | } 199 | 200 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 201 | << t.elapsed() << " ms" << std::endl; 202 | } 203 | 204 | // 测试new/delete 205 | { 206 | Timer t; 207 | std::vector threads; 208 | 209 | for (size_t i = 0; i < NUM_THREADS; ++i) 210 | { 211 | threads.emplace_back(threadFunc, false); 212 | } 213 | 214 | for (auto& thread : threads) 215 | { 216 | thread.join(); 217 | } 218 | 219 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 220 | << t.elapsed() << " ms" << std::endl; 221 | } 222 | } 223 | 224 | // 4. 混合大小测试 225 | static void testMixedSizes() 226 | { 227 | constexpr size_t NUM_ALLOCS = 50000; 228 | const size_t SIZES[] = {16, 32, 64, 128, 256, 512, 1024, 2048}; 229 | 230 | std::cout << "\nTesting mixed size allocations (" << NUM_ALLOCS 231 | << " allocations):" << std::endl; 232 | 233 | // 测试内存池 234 | { 235 | Timer t; 236 | std::vector> ptrs; 237 | ptrs.reserve(NUM_ALLOCS); 238 | 239 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 240 | { 241 | size_t size = SIZES[rand() % 8]; 242 | void* p = MemoryPool::allocate(size); 243 | ptrs.emplace_back(p, size); 244 | 245 | // 批量释放 246 | if (i % 100 == 0 && !ptrs.empty()) 247 | { 248 | size_t releaseCount = std::min(ptrs.size(), size_t(20)); 249 | for (size_t j = 0; j < releaseCount; ++j) 250 | { 251 | MemoryPool::deallocate(ptrs.back().first, ptrs.back().second); 252 | ptrs.pop_back(); 253 | } 254 | } 255 | } 256 | 257 | for (const auto& [ptr, size] : ptrs) 258 | { 259 | MemoryPool::deallocate(ptr, size); 260 | } 261 | 262 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 263 | << t.elapsed() << " ms" << std::endl; 264 | } 265 | 266 | // 测试new/delete 267 | { 268 | Timer t; 269 | std::vector> ptrs; 270 | ptrs.reserve(NUM_ALLOCS); 271 | 272 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 273 | { 274 | size_t size = SIZES[rand() % 8]; 275 | void* p = new char[size]; 276 | ptrs.emplace_back(p, size); 277 | 278 | if (i % 100 == 0 && !ptrs.empty()) 279 | { 280 | size_t releaseCount = std::min(ptrs.size(), size_t(20)); 281 | for (size_t j = 0; j < releaseCount; ++j) 282 | { 283 | delete[] static_cast(ptrs.back().first); 284 | ptrs.pop_back(); 285 | } 286 | } 287 | } 288 | 289 | for (const auto& [ptr, size] : ptrs) 290 | { 291 | delete[] static_cast(ptr); 292 | } 293 | 294 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 295 | << t.elapsed() << " ms" << std::endl; 296 | } 297 | } 298 | }; 299 | 300 | int main() 301 | { 302 | std::cout << "Starting performance tests..." << std::endl; 303 | 304 | // 预热系统 305 | PerformanceTest::warmup(); 306 | 307 | // 运行测试 308 | PerformanceTest::testSmallAllocation(); 309 | PerformanceTest::testMultiThreaded(); 310 | PerformanceTest::testMixedSizes(); 311 | 312 | return 0; 313 | } -------------------------------------------------------------------------------- /v2/tests/UnitTest.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/MemoryPool.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | using namespace Kama_memoryPool; 12 | 13 | // 基础分配测试 14 | void testBasicAllocation() 15 | { 16 | std::cout << "Running basic allocation test..." << std::endl; 17 | 18 | // 测试小内存分配 19 | void* ptr1 = MemoryPool::allocate(8); 20 | assert(ptr1 != nullptr); 21 | MemoryPool::deallocate(ptr1, 8); 22 | 23 | // 测试中等大小内存分配 24 | void* ptr2 = MemoryPool::allocate(1024); 25 | assert(ptr2 != nullptr); 26 | MemoryPool::deallocate(ptr2, 1024); 27 | 28 | // 测试大内存分配(超过MAX_BYTES) 29 | void* ptr3 = MemoryPool::allocate(1024 * 1024); 30 | assert(ptr3 != nullptr); 31 | MemoryPool::deallocate(ptr3, 1024 * 1024); 32 | 33 | std::cout << "Basic allocation test passed!" << std::endl; 34 | } 35 | 36 | // 内存写入测试 37 | void testMemoryWriting() 38 | { 39 | std::cout << "Running memory writing test..." << std::endl; 40 | 41 | // 分配并写入数据 42 | const size_t size = 128; 43 | char* ptr = static_cast(MemoryPool::allocate(size)); 44 | assert(ptr != nullptr); 45 | 46 | // 写入数据 47 | for (size_t i = 0; i < size; ++i) 48 | { 49 | ptr[i] = static_cast(i % 256); 50 | } 51 | 52 | // 验证数据 53 | for (size_t i = 0; i < size; ++i) 54 | { 55 | assert(ptr[i] == static_cast(i % 256)); 56 | } 57 | 58 | MemoryPool::deallocate(ptr, size); 59 | std::cout << "Memory writing test passed!" << std::endl; 60 | } 61 | 62 | // 多线程测试 63 | void testMultiThreading() 64 | { 65 | std::cout << "Running multi-threading test..." << std::endl; 66 | 67 | const int NUM_THREADS = 4; 68 | const int ALLOCS_PER_THREAD = 1000; 69 | std::atomic has_error{false}; 70 | 71 | auto threadFunc = [&has_error]() 72 | { 73 | try 74 | { 75 | std::vector> allocations; 76 | allocations.reserve(ALLOCS_PER_THREAD); 77 | 78 | for (int i = 0; i < ALLOCS_PER_THREAD && !has_error; ++i) 79 | { 80 | size_t size = (rand() % 256 + 1) * 8; 81 | void* ptr = MemoryPool::allocate(size); 82 | 83 | if (!ptr) 84 | { 85 | std::cerr << "Allocation failed for size: " << size << std::endl; 86 | has_error = true; 87 | break; 88 | } 89 | 90 | allocations.push_back({ptr, size}); 91 | 92 | if (rand() % 2 && !allocations.empty()) 93 | { 94 | size_t index = rand() % allocations.size(); 95 | MemoryPool::deallocate(allocations[index].first, 96 | allocations[index].second); 97 | allocations.erase(allocations.begin() + index); 98 | } 99 | } 100 | 101 | for (const auto& alloc : allocations) 102 | { 103 | MemoryPool::deallocate(alloc.first, alloc.second); 104 | } 105 | } 106 | catch (const std::exception& e) 107 | { 108 | std::cerr << "Thread exception: " << e.what() << std::endl; 109 | has_error = true; 110 | } 111 | }; 112 | 113 | std::vector threads; 114 | for (int i = 0; i < NUM_THREADS; ++i) 115 | { 116 | threads.emplace_back(threadFunc); 117 | } 118 | 119 | for (auto& thread : threads) 120 | { 121 | thread.join(); 122 | } 123 | 124 | std::cout << "Multi-threading test passed!" << std::endl; 125 | } 126 | 127 | // 边界测试 128 | void testEdgeCases() 129 | { 130 | std::cout << "Running edge cases test..." << std::endl; 131 | 132 | // 测试0大小分配 133 | void* ptr1 = MemoryPool::allocate(0); 134 | assert(ptr1 != nullptr); 135 | MemoryPool::deallocate(ptr1, 0); 136 | 137 | // 测试最小对齐大小 138 | void* ptr2 = MemoryPool::allocate(1); 139 | assert(ptr2 != nullptr); 140 | assert((reinterpret_cast(ptr2) & (ALIGNMENT - 1)) == 0); 141 | MemoryPool::deallocate(ptr2, 1); 142 | 143 | // 测试最大大小边界 144 | void* ptr3 = MemoryPool::allocate(MAX_BYTES); 145 | assert(ptr3 != nullptr); 146 | MemoryPool::deallocate(ptr3, MAX_BYTES); 147 | 148 | // 测试超过最大大小 149 | void* ptr4 = MemoryPool::allocate(MAX_BYTES + 1); 150 | assert(ptr4 != nullptr); 151 | MemoryPool::deallocate(ptr4, MAX_BYTES + 1); 152 | 153 | std::cout << "Edge cases test passed!" << std::endl; 154 | } 155 | 156 | // 压力测试 157 | void testStress() 158 | { 159 | std::cout << "Running stress test..." << std::endl; 160 | 161 | const int NUM_ITERATIONS = 10000; 162 | std::vector> allocations; 163 | allocations.reserve(NUM_ITERATIONS); 164 | 165 | for (int i = 0; i < NUM_ITERATIONS; ++i) 166 | { 167 | size_t size = (rand() % 1024 + 1) * 8; 168 | void* ptr = MemoryPool::allocate(size); 169 | assert(ptr != nullptr); 170 | allocations.push_back({ptr, size}); 171 | } 172 | 173 | // 随机顺序释放 174 | std::random_device rd; 175 | std::mt19937 g(rd()); 176 | std::shuffle(allocations.begin(), allocations.end(), g); 177 | for (const auto& alloc : allocations) 178 | { 179 | MemoryPool::deallocate(alloc.first, alloc.second); 180 | } 181 | 182 | std::cout << "Stress test passed!" << std::endl; 183 | } 184 | 185 | int main() 186 | { 187 | try 188 | { 189 | std::cout << "Starting memory pool tests..." << std::endl; 190 | 191 | testBasicAllocation(); 192 | testMemoryWriting(); 193 | testMultiThreading(); 194 | testEdgeCases(); 195 | testStress(); 196 | 197 | std::cout << "All tests passed successfully!" << std::endl; 198 | return 0; 199 | } 200 | catch (const std::exception& e) 201 | { 202 | std::cerr << "Test failed with exception: " << e.what() << std::endl; 203 | return 1; 204 | } 205 | } -------------------------------------------------------------------------------- /v3/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.10) 2 | project(memory_pool) 3 | 4 | # 设置C++标准 5 | set(CMAKE_CXX_STANDARD 17) 6 | set(CMAKE_CXX_STANDARD_REQUIRED ON) 7 | 8 | # 编译选项 9 | add_compile_options(-Wall -O2) 10 | 11 | # 查找pthread库 12 | find_package(Threads REQUIRED) 13 | 14 | # 设置目录 15 | set(SRC_DIR ${CMAKE_SOURCE_DIR}/src) 16 | set(INC_DIR ${CMAKE_SOURCE_DIR}/include) 17 | set(TEST_DIR ${CMAKE_SOURCE_DIR}/tests) 18 | 19 | # 源文件 20 | file(GLOB SOURCES "${SRC_DIR}/*.cpp") 21 | 22 | # 添加头文件目录 23 | include_directories(${INC_DIR}) 24 | 25 | # 创建单元测试可执行文件 26 | add_executable(unit_test 27 | ${SOURCES} 28 | ${TEST_DIR}/UnitTest.cpp 29 | ) 30 | 31 | # 创建性能测试可执行文件 32 | add_executable(perf_test 33 | ${SOURCES} 34 | ${TEST_DIR}/PerformanceTest.cpp 35 | ) 36 | 37 | # 链接pthread库 38 | target_link_libraries(unit_test PRIVATE Threads::Threads) 39 | target_link_libraries(perf_test PRIVATE Threads::Threads) 40 | 41 | # 添加测试命令 42 | add_custom_target(test 43 | COMMAND ./unit_test 44 | DEPENDS unit_test 45 | ) 46 | 47 | add_custom_target(perf 48 | COMMAND ./perf_test 49 | DEPENDS perf_test 50 | ) -------------------------------------------------------------------------------- /v3/include/CentralCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | #include 4 | 5 | namespace Kama_memoryPool 6 | { 7 | 8 | class CentralCache 9 | { 10 | public: 11 | static CentralCache& getInstance() 12 | { 13 | static CentralCache instance; 14 | return instance; 15 | } 16 | 17 | void* fetchRange(size_t index, size_t batchNum); 18 | void returnRange(void* start, size_t size, size_t bytes); 19 | 20 | private: 21 | // 相互是还所有原子指针为nullptr 22 | CentralCache() 23 | { 24 | for (auto& ptr : centralFreeList_) 25 | { 26 | ptr.store(nullptr, std::memory_order_relaxed); 27 | } 28 | // 初始化所有锁 29 | for (auto& lock : locks_) 30 | { 31 | lock.clear(); 32 | } 33 | } 34 | // 从页缓存获取内存 35 | void* fetchFromPageCache(size_t size); 36 | 37 | private: 38 | // 中心缓存的自由链表 39 | std::array, FREE_LIST_SIZE> centralFreeList_; 40 | 41 | // 用于同步的自旋锁 42 | std::array locks_; 43 | }; 44 | 45 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/include/Common.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | // 对齐数和大小定义 9 | constexpr size_t ALIGNMENT = 8; 10 | constexpr size_t MAX_BYTES = 256 * 1024; // 256KB 11 | constexpr size_t FREE_LIST_SIZE = MAX_BYTES / ALIGNMENT; // ALIGNMENT等于指针void*的大小 12 | 13 | // 内存块头部信息 14 | struct BlockHeader 15 | { 16 | size_t size; // 内存块大小 17 | bool inUse; // 使用标志 18 | BlockHeader* next; // 指向下一个内存块 19 | }; 20 | 21 | // 大小类管理 22 | class SizeClass 23 | { 24 | public: 25 | static size_t roundUp(size_t bytes) 26 | { 27 | return (bytes + ALIGNMENT - 1) & ~(ALIGNMENT - 1); 28 | } 29 | 30 | static size_t getIndex(size_t bytes) 31 | { 32 | // 确保bytes至少为ALIGNMENT 33 | bytes = std::max(bytes, ALIGNMENT); 34 | // 向上取整后-1 35 | return (bytes + ALIGNMENT - 1) / ALIGNMENT - 1; 36 | } 37 | }; 38 | 39 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/include/MemoryPool.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "ThreadCache.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | class MemoryPool 8 | { 9 | public: 10 | static void* allocate(size_t size) 11 | { 12 | return ThreadCache::getInstance()->allocate(size); 13 | } 14 | 15 | static void deallocate(void* ptr, size_t size) 16 | { 17 | ThreadCache::getInstance()->deallocate(ptr, size); 18 | } 19 | }; 20 | 21 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/include/PageCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | 9 | class PageCache 10 | { 11 | public: 12 | static const size_t PAGE_SIZE = 4096; // 4K页大小 13 | 14 | static PageCache& getInstance() 15 | { 16 | static PageCache instance; 17 | return instance; 18 | } 19 | 20 | // 分配指定页数的span 21 | void* allocateSpan(size_t numPages); 22 | 23 | // 释放span 24 | void deallocateSpan(void* ptr, size_t numPages); 25 | 26 | private: 27 | PageCache() = default; 28 | 29 | // 向系统申请内存 30 | void* systemAlloc(size_t numPages); 31 | private: 32 | struct Span 33 | { 34 | void* pageAddr; // 页起始地址 35 | size_t numPages; // 页数 36 | Span* next; // 链表指针 37 | }; 38 | 39 | // 按页数管理空闲span,不同页数对应不同Span链表 40 | std::map freeSpans_; 41 | // 页号到span的映射,用于回收 42 | std::map spanMap_; 43 | std::mutex mutex_; 44 | }; 45 | 46 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/include/ThreadCache.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include "Common.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | // 线程本地缓存 8 | class ThreadCache 9 | { 10 | public: 11 | static ThreadCache* getInstance() 12 | { 13 | static thread_local ThreadCache instance; 14 | return &instance; 15 | } 16 | 17 | void* allocate(size_t size); 18 | void deallocate(void* ptr, size_t size); 19 | private: 20 | ThreadCache() = default; 21 | // 从中心缓存获取内存 22 | void* fetchFromCentralCache(size_t index); 23 | // 归还内存到中心缓存 24 | void returnToCentralCache(void* start, size_t size); 25 | // 计算批量获取内存块的数量 26 | size_t getBatchNum(size_t size); 27 | // 判断是否需要归还内存给中心缓存 28 | bool shouldReturnToCentralCache(size_t index); 29 | private: 30 | // 每个线程的自由链表数组 31 | std::array freeList_; 32 | std::array freeListSize_; // 自由链表大小统计 33 | }; 34 | 35 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/src/CentralCache.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/CentralCache.h" 2 | #include "../include/PageCache.h" 3 | #include 4 | #include 5 | 6 | namespace Kama_memoryPool 7 | { 8 | 9 | // 每次从PageCache获取span大小(以页为单位) 10 | static const size_t SPAN_PAGES = 8; 11 | 12 | void* CentralCache::fetchRange(size_t index, size_t batchNum) 13 | { 14 | // 索引检查,当索引大于等于FREE_LIST_SIZE时,说明申请内存过大应直接向系统申请 15 | if (index >= FREE_LIST_SIZE || batchNum == 0) 16 | return nullptr; 17 | 18 | // 自旋锁保护 19 | while (locks_[index].test_and_set(std::memory_order_acquire)) 20 | { 21 | std::this_thread::yield(); // 添加线程让步,避免忙等待,避免过度消耗CPU 22 | } 23 | 24 | void* result = nullptr; 25 | try 26 | { 27 | // 尝试从中心缓存获取内存块 28 | result = centralFreeList_[index].load(std::memory_order_relaxed); 29 | 30 | if (!result) 31 | { 32 | // 如果中心缓存为空,从页缓存获取新的内存块 33 | size_t size = (index + 1) * ALIGNMENT; 34 | result = fetchFromPageCache(size); 35 | 36 | if (!result) 37 | { 38 | locks_[index].clear(std::memory_order_release); 39 | return nullptr; 40 | } 41 | 42 | // 将从PageCache获取的内存块切分成小块 43 | char* start = static_cast(result); 44 | size_t totalBlocks = (SPAN_PAGES * PageCache::PAGE_SIZE) / size; 45 | size_t allocBlocks = std::min(batchNum, totalBlocks); 46 | 47 | // 构建返回给ThreadCache的内存块链表 48 | if (allocBlocks > 1) 49 | { 50 | // 确保至少有两个块才构建链表 51 | // 构建链表 52 | for (size_t i = 1; i < allocBlocks; ++i) 53 | { 54 | void* current = start + (i - 1) * size; 55 | void* next = start + i * size; 56 | *reinterpret_cast(current) = next; 57 | } 58 | *reinterpret_cast(start + (allocBlocks - 1) * size) = nullptr; 59 | } 60 | 61 | // 构建保留在CentralCache的链表 62 | if (totalBlocks > allocBlocks) 63 | { 64 | void* remainStart = start + allocBlocks * size; 65 | for (size_t i = allocBlocks + 1; i < totalBlocks; ++i) 66 | { 67 | void* current = start + (i - 1) * size; 68 | void* next = start + i * size; 69 | *reinterpret_cast(current) = next; 70 | } 71 | *reinterpret_cast(start + (totalBlocks - 1) * size) = nullptr; 72 | 73 | centralFreeList_[index].store(remainStart, std::memory_order_release); 74 | } 75 | } 76 | else // 如果中心缓存有index对应大小的内存块 77 | { 78 | // 从现有链表中获取指定数量的块 79 | void* current = result; 80 | void* prev = nullptr; 81 | size_t count = 0; 82 | 83 | while (current && count < batchNum) 84 | { 85 | prev = current; 86 | current = *reinterpret_cast(current); 87 | count++; 88 | } 89 | 90 | if (prev) // 当前centralFreeList_[index]链表上的内存块大于batchNum时需要用到 91 | { 92 | *reinterpret_cast(prev) = nullptr; 93 | } 94 | 95 | centralFreeList_[index].store(current, std::memory_order_release); 96 | } 97 | } 98 | catch (...) 99 | { 100 | locks_[index].clear(std::memory_order_release); 101 | throw; 102 | } 103 | 104 | // 释放锁 105 | locks_[index].clear(std::memory_order_release); 106 | return result; 107 | } 108 | 109 | void CentralCache::returnRange(void* start, size_t size, size_t index) 110 | { 111 | // 当索引大于等于FREE_LIST_SIZE时,说明内存过大应直接向系统归还 112 | if (!start || index >= FREE_LIST_SIZE) 113 | return; 114 | 115 | while (locks_[index].test_and_set(std::memory_order_acquire)) 116 | { 117 | std::this_thread::yield(); 118 | } 119 | 120 | try 121 | { 122 | // 找到要归还的链表的最后一个节点 123 | void* end = start; 124 | size_t count = 1; 125 | while (*reinterpret_cast(end) != nullptr && count < size) { 126 | end = *reinterpret_cast(end); 127 | count++; 128 | } 129 | 130 | // 将归还的链表连接到中心缓存的链表头部 131 | void* current = centralFreeList_[index].load(std::memory_order_relaxed); 132 | *reinterpret_cast(end) = current; // 将原链表头接到归还链表的尾部 133 | centralFreeList_[index].store(start, std::memory_order_release); // 将归还的链表头设为新的链表头 134 | } 135 | catch (...) 136 | { 137 | locks_[index].clear(std::memory_order_release); 138 | throw; 139 | } 140 | 141 | locks_[index].clear(std::memory_order_release); 142 | } 143 | 144 | void* CentralCache::fetchFromPageCache(size_t size) 145 | { 146 | // 1. 计算实际需要的页数 147 | size_t numPages = (size + PageCache::PAGE_SIZE - 1) / PageCache::PAGE_SIZE; 148 | 149 | // 2. 根据大小决定分配策略 150 | if (size <= SPAN_PAGES * PageCache::PAGE_SIZE) 151 | { 152 | // 小于等于32KB的请求,使用固定8页 153 | return PageCache::getInstance().allocateSpan(SPAN_PAGES); 154 | } 155 | else 156 | { 157 | // 大于32KB的请求,按实际需求分配 158 | return PageCache::getInstance().allocateSpan(numPages); 159 | } 160 | } 161 | 162 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/src/PageCache.cpp: -------------------------------------------------------------------------------- 1 | #include "PageCache.h" 2 | #include 3 | #include 4 | 5 | namespace Kama_memoryPool 6 | { 7 | 8 | void* PageCache::allocateSpan(size_t numPages) 9 | { 10 | std::lock_guard lock(mutex_); 11 | 12 | // 查找合适的空闲span 13 | // lower_bound函数返回第一个大于等于numPages的元素的迭代器 14 | auto it = freeSpans_.lower_bound(numPages); 15 | if (it != freeSpans_.end()) 16 | { 17 | Span* span = it->second; 18 | 19 | // 将取出的span从原有的空闲链表freeSpans_[it->first]中移除 20 | if (span->next) 21 | { 22 | freeSpans_[it->first] = span->next; 23 | } 24 | else 25 | { 26 | freeSpans_.erase(it); 27 | } 28 | 29 | // 如果span大于需要的numPages则进行分割 30 | if (span->numPages > numPages) 31 | { 32 | Span* newSpan = new Span; 33 | newSpan->pageAddr = static_cast(span->pageAddr) + 34 | numPages * PAGE_SIZE; 35 | newSpan->numPages = span->numPages - numPages; 36 | newSpan->next = nullptr; 37 | 38 | // 将超出部分放回空闲Span*列表头部 39 | auto& list = freeSpans_[newSpan->numPages]; 40 | newSpan->next = list; 41 | list = newSpan; 42 | 43 | span->numPages = numPages; 44 | } 45 | 46 | // 记录span信息用于回收 47 | spanMap_[span->pageAddr] = span; 48 | return span->pageAddr; 49 | } 50 | 51 | // 没有合适的span,向系统申请 52 | void* memory = systemAlloc(numPages); 53 | if (!memory) return nullptr; 54 | 55 | // 创建新的span 56 | Span* span = new Span; 57 | span->pageAddr = memory; 58 | span->numPages = numPages; 59 | span->next = nullptr; 60 | 61 | // 记录span信息用于回收 62 | spanMap_[memory] = span; 63 | return memory; 64 | } 65 | 66 | void PageCache::deallocateSpan(void* ptr, size_t numPages) 67 | { 68 | std::lock_guard lock(mutex_); 69 | 70 | // 查找对应的span,没找到代表不是PageCache分配的内存,直接返回 71 | auto it = spanMap_.find(ptr); 72 | if (it == spanMap_.end()) return; 73 | 74 | Span* span = it->second; 75 | 76 | // 尝试合并相邻的span 77 | void* nextAddr = static_cast(ptr) + numPages * PAGE_SIZE; 78 | auto nextIt = spanMap_.find(nextAddr); 79 | 80 | if (nextIt != spanMap_.end()) 81 | { 82 | Span* nextSpan = nextIt->second; 83 | 84 | // 1. 首先检查nextSpan是否在空闲链表中 85 | bool found = false; 86 | auto& nextList = freeSpans_[nextSpan->numPages]; 87 | 88 | // 检查是否是头节点 89 | if (nextList == nextSpan) 90 | { 91 | nextList = nextSpan->next; 92 | found = true; 93 | } 94 | else if (nextList) // 只有在链表非空时才遍历 95 | { 96 | Span* prev = nextList; 97 | while (prev->next) 98 | { 99 | if (prev->next == nextSpan) 100 | { 101 | // 将nextSpan从空闲链表中移除 102 | prev->next = nextSpan->next; 103 | found = true; 104 | break; 105 | } 106 | prev = prev->next; 107 | } 108 | } 109 | 110 | // 2. 只有在找到nextSpan的情况下才进行合并 111 | if (found) 112 | { 113 | // 合并span 114 | span->numPages += nextSpan->numPages; 115 | spanMap_.erase(nextAddr); 116 | delete nextSpan; 117 | } 118 | } 119 | 120 | // 将合并后的span通过头插法插入空闲列表 121 | auto& list = freeSpans_[span->numPages]; 122 | span->next = list; 123 | list = span; 124 | } 125 | 126 | void* PageCache::systemAlloc(size_t numPages) 127 | { 128 | size_t size = numPages * PAGE_SIZE; 129 | 130 | // 使用mmap分配内存 131 | void* ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, 132 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 133 | if (ptr == MAP_FAILED) return nullptr; 134 | 135 | // 清零内存 136 | memset(ptr, 0, size); 137 | return ptr; 138 | } 139 | 140 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/src/ThreadCache.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/ThreadCache.h" 2 | #include "../include/CentralCache.h" 3 | 4 | namespace Kama_memoryPool 5 | { 6 | 7 | void* ThreadCache::allocate(size_t size) 8 | { 9 | // 处理0大小的分配请求 10 | if (size == 0) 11 | { 12 | size = ALIGNMENT; // 至少分配一个对齐大小 13 | } 14 | 15 | if (size > MAX_BYTES) 16 | { 17 | // 大对象直接从系统分配 18 | return malloc(size); 19 | } 20 | 21 | size_t index = SizeClass::getIndex(size); 22 | 23 | // 更新自由链表大小 24 | freeListSize_[index]--; 25 | 26 | // 检查线程本地自由链表 27 | // 如果 freeList_[index] 不为空,表示该链表中有可用内存块 28 | if (void* ptr = freeList_[index]) 29 | { 30 | freeList_[index] = *reinterpret_cast(ptr); // 将freeList_[index]指向的内存块的下一个内存块地址(取决于内存块的实现) 31 | return ptr; 32 | } 33 | 34 | // 如果线程本地自由链表为空,则从中心缓存获取一批内存 35 | return fetchFromCentralCache(index); 36 | } 37 | 38 | void ThreadCache::deallocate(void* ptr, size_t size) 39 | { 40 | if (size > MAX_BYTES) 41 | { 42 | free(ptr); 43 | return; 44 | } 45 | 46 | size_t index = SizeClass::getIndex(size); 47 | 48 | // 插入到线程本地自由链表 49 | *reinterpret_cast(ptr) = freeList_[index]; 50 | freeList_[index] = ptr; 51 | 52 | // 更新自由链表大小 53 | freeListSize_[index]++; // 增加对应大小类的自由链表大小 54 | 55 | // 判断是否需要将部分内存回收给中心缓存 56 | if (shouldReturnToCentralCache(index)) 57 | { 58 | returnToCentralCache(freeList_[index], size); 59 | } 60 | } 61 | 62 | // 判断是否需要将内存回收给中心缓存 63 | bool ThreadCache::shouldReturnToCentralCache(size_t index) 64 | { 65 | // 设定阈值,例如:当自由链表的大小超过一定数量时 66 | size_t threshold = 64; // 例如,64个内存块 67 | return (freeListSize_[index] > threshold); 68 | } 69 | 70 | void* ThreadCache::fetchFromCentralCache(size_t index) 71 | { 72 | size_t size = (index + 1) * ALIGNMENT; 73 | // 根据对象内存大小计算批量获取的数量 74 | size_t batchNum = getBatchNum(size); 75 | // 从中心缓存批量获取内存 76 | void* start = CentralCache::getInstance().fetchRange(index, batchNum); 77 | if (!start) return nullptr; 78 | 79 | // 更新自由链表大小 80 | freeListSize_[index] += batchNum; // 增加对应大小类的自由链表大小 81 | 82 | // 取一个返回,其余放入线程本地自由链表 83 | void* result = start; 84 | if (batchNum > 1) 85 | { 86 | freeList_[index] = *reinterpret_cast(start); 87 | } 88 | 89 | return result; 90 | } 91 | 92 | void ThreadCache::returnToCentralCache(void* start, size_t size) 93 | { 94 | // 根据大小计算对应的索引 95 | size_t index = SizeClass::getIndex(size); 96 | 97 | // 获取对齐后的实际块大小 98 | size_t alignedSize = SizeClass::roundUp(size); 99 | 100 | // 计算要归还内存块数量 101 | size_t batchNum = freeListSize_[index]; 102 | if (batchNum <= 1) return; // 如果只有一个块,则不归还 103 | 104 | // 保留一部分在ThreadCache中(比如保留1/4) 105 | size_t keepNum = std::max(batchNum / 4, size_t(1)); 106 | size_t returnNum = batchNum - keepNum; 107 | 108 | // 将内存块串成链表 109 | char* current = static_cast(start); 110 | // 使用对齐后的大小计算分割点 111 | char* splitNode = current; 112 | for (size_t i = 0; i < keepNum - 1; ++i) 113 | { 114 | splitNode = reinterpret_cast(*reinterpret_cast(splitNode)); 115 | if (splitNode == nullptr) 116 | { 117 | // 如果链表提前结束,更新实际的返回数量 118 | returnNum = batchNum - (i + 1); 119 | break; 120 | } 121 | } 122 | 123 | if (splitNode != nullptr) 124 | { 125 | // 将要返回的部分和要保留的部分断开 126 | void* nextNode = *reinterpret_cast(splitNode); 127 | *reinterpret_cast(splitNode) = nullptr; // 断开连接 128 | 129 | // 更新ThreadCache的空闲链表 130 | freeList_[index] = start; 131 | 132 | // 更新自由链表大小 133 | freeListSize_[index] = keepNum; 134 | 135 | // 将剩余部分返回给CentralCache 136 | if (returnNum > 0 && nextNode != nullptr) 137 | { 138 | CentralCache::getInstance().returnRange(nextNode, returnNum * alignedSize, index); 139 | } 140 | } 141 | } 142 | 143 | // 计算批量获取内存块的数量 144 | size_t ThreadCache::getBatchNum(size_t size) 145 | { 146 | // 基准:每次批量获取不超过4KB内存 147 | constexpr size_t MAX_BATCH_SIZE = 4 * 1024; // 4KB 148 | 149 | // 根据对象大小设置合理的基准批量数 150 | size_t baseNum; 151 | if (size <= 32) baseNum = 64; // 64 * 32 = 2KB 152 | else if (size <= 64) baseNum = 32; // 32 * 64 = 2KB 153 | else if (size <= 128) baseNum = 16; // 16 * 128 = 2KB 154 | else if (size <= 256) baseNum = 8; // 8 * 256 = 2KB 155 | else if (size <= 512) baseNum = 4; // 4 * 512 = 2KB 156 | else if (size <= 1024) baseNum = 2; // 2 * 1024 = 2KB 157 | else baseNum = 1; // 大于1024的对象每次只从中心缓存取1个 158 | 159 | // 计算最大批量数 160 | size_t maxNum = std::max(size_t(1), MAX_BATCH_SIZE / size); 161 | 162 | // 取最小值,但确保至少返回1 163 | return std::max(sizeof(1), std::min(maxNum, baseNum)); 164 | } 165 | 166 | } // namespace memoryPool -------------------------------------------------------------------------------- /v3/tests/PerformanceTest.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/MemoryPool.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | using namespace Kama_memoryPool; 10 | using namespace std::chrono; 11 | 12 | // 计时器类 13 | class Timer 14 | { 15 | high_resolution_clock::time_point start; 16 | public: 17 | Timer() : start(high_resolution_clock::now()) {} 18 | 19 | double elapsed() 20 | { 21 | auto end = high_resolution_clock::now(); 22 | return duration_cast(end - start).count() / 1000.0; // 转换为毫秒 23 | } 24 | }; 25 | 26 | // 性能测试类 27 | class PerformanceTest 28 | { 29 | private: 30 | // 测试统计信息 31 | struct TestStats 32 | { 33 | double memPoolTime{0.0}; 34 | double systemTime{0.0}; 35 | size_t totalAllocs{0}; 36 | size_t totalBytes{0}; 37 | }; 38 | 39 | public: 40 | // 1. 系统预热 41 | static void warmup() 42 | { 43 | std::cout << "Warming up memory systems...\n"; 44 | // 使用 pair 来存储指针和对应的大小 45 | std::vector> warmupPtrs; 46 | 47 | // 预热内存池 48 | for (int i = 0; i < 1000; ++i) 49 | { 50 | for (size_t size : {32, 64, 128, 256, 512}) { 51 | void* p = MemoryPool::allocate(size); 52 | warmupPtrs.emplace_back(p, size); // 存储指针和对应的大小 53 | } 54 | } 55 | 56 | // 释放预热内存 57 | for (const auto& [ptr, size] : warmupPtrs) 58 | { 59 | MemoryPool::deallocate(ptr, size); // 使用实际分配的大小进行释放 60 | } 61 | 62 | std::cout << "Warmup complete.\n\n"; 63 | } 64 | 65 | // 2. 小对象分配测试 66 | static void testSmallAllocation() 67 | { 68 | constexpr size_t NUM_ALLOCS = 100000; 69 | constexpr size_t SMALL_SIZE = 32; 70 | 71 | std::cout << "\nTesting small allocations (" << NUM_ALLOCS << " allocations of " 72 | << SMALL_SIZE << " bytes):" << std::endl; 73 | 74 | // 测试内存池 75 | { 76 | Timer t; 77 | std::vector ptrs; 78 | ptrs.reserve(NUM_ALLOCS); 79 | 80 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 81 | { 82 | ptrs.push_back(MemoryPool::allocate(SMALL_SIZE)); 83 | 84 | // 模拟真实使用:部分立即释放 85 | if (i % 4 == 0) 86 | { 87 | MemoryPool::deallocate(ptrs.back(), SMALL_SIZE); 88 | ptrs.pop_back(); 89 | } 90 | } 91 | 92 | for (void* ptr : ptrs) 93 | { 94 | MemoryPool::deallocate(ptr, SMALL_SIZE); 95 | } 96 | 97 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 98 | << t.elapsed() << " ms" << std::endl; 99 | } 100 | 101 | // 测试new/delete 102 | { 103 | Timer t; 104 | std::vector ptrs; 105 | ptrs.reserve(NUM_ALLOCS); 106 | 107 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 108 | { 109 | ptrs.push_back(new char[SMALL_SIZE]); 110 | 111 | if (i % 4 == 0) 112 | { 113 | delete[] static_cast(ptrs.back()); 114 | ptrs.pop_back(); 115 | } 116 | } 117 | 118 | for (void* ptr : ptrs) 119 | { 120 | delete[] static_cast(ptr); 121 | } 122 | 123 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 124 | << t.elapsed() << " ms" << std::endl; 125 | } 126 | } 127 | 128 | // 3. 多线程测试 129 | static void testMultiThreaded() 130 | { 131 | constexpr size_t NUM_THREADS = 4; 132 | constexpr size_t ALLOCS_PER_THREAD = 25000; 133 | constexpr size_t MAX_SIZE = 256; 134 | 135 | std::cout << "\nTesting multi-threaded allocations (" << NUM_THREADS 136 | << " threads, " << ALLOCS_PER_THREAD << " allocations each):" 137 | << std::endl; 138 | 139 | auto threadFunc = [](bool useMemPool) 140 | { 141 | std::random_device rd; 142 | std::mt19937 gen(rd()); 143 | std::uniform_int_distribution<> dis(8, MAX_SIZE); 144 | std::vector> ptrs; 145 | ptrs.reserve(ALLOCS_PER_THREAD); 146 | 147 | for (size_t i = 0; i < ALLOCS_PER_THREAD; ++i) 148 | { 149 | size_t size = dis(gen); 150 | void* ptr = useMemPool ? MemoryPool::allocate(size) 151 | : new char[size]; 152 | ptrs.push_back({ptr, size}); 153 | 154 | // 随机释放一些内存 155 | if (rand() % 100 < 75) 156 | { // 75%的概率释放 157 | size_t index = rand() % ptrs.size(); 158 | if (useMemPool) { 159 | MemoryPool::deallocate(ptrs[index].first, ptrs[index].second); 160 | } else { 161 | delete[] static_cast(ptrs[index].first); 162 | } 163 | ptrs[index] = ptrs.back(); 164 | ptrs.pop_back(); 165 | } 166 | } 167 | 168 | // 清理剩余内存 169 | for (const auto& [ptr, size] : ptrs) 170 | { 171 | if (useMemPool) 172 | { 173 | MemoryPool::deallocate(ptr, size); 174 | } 175 | else 176 | { 177 | delete[] static_cast(ptr); 178 | } 179 | } 180 | }; 181 | 182 | // 测试内存池 183 | { 184 | Timer t; 185 | std::vector threads; 186 | 187 | for (size_t i = 0; i < NUM_THREADS; ++i) 188 | { 189 | threads.emplace_back(threadFunc, true); 190 | } 191 | 192 | for (auto& thread : threads) 193 | { 194 | thread.join(); 195 | } 196 | 197 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 198 | << t.elapsed() << " ms" << std::endl; 199 | } 200 | 201 | // 测试new/delete 202 | { 203 | Timer t; 204 | std::vector threads; 205 | 206 | for (size_t i = 0; i < NUM_THREADS; ++i) 207 | { 208 | threads.emplace_back(threadFunc, false); 209 | } 210 | 211 | for (auto& thread : threads) 212 | { 213 | thread.join(); 214 | } 215 | 216 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 217 | << t.elapsed() << " ms" << std::endl; 218 | } 219 | } 220 | 221 | // 4. 混合大小测试 222 | static void testMixedSizes() 223 | { 224 | constexpr size_t NUM_ALLOCS = 50000; 225 | const size_t SIZES[] = {16, 32, 64, 128, 256, 512, 1024, 2048}; 226 | 227 | std::cout << "\nTesting mixed size allocations (" << NUM_ALLOCS 228 | << " allocations):" << std::endl; 229 | 230 | // 测试内存池 231 | { 232 | Timer t; 233 | std::vector> ptrs; 234 | ptrs.reserve(NUM_ALLOCS); 235 | 236 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 237 | { 238 | size_t size = SIZES[rand() % 8]; 239 | void* p = MemoryPool::allocate(size); 240 | ptrs.emplace_back(p, size); 241 | 242 | // 批量释放 243 | if (i % 100 == 0 && !ptrs.empty()) 244 | { 245 | size_t releaseCount = std::min(ptrs.size(), size_t(20)); 246 | for (size_t j = 0; j < releaseCount; ++j) 247 | { 248 | MemoryPool::deallocate(ptrs.back().first, ptrs.back().second); 249 | ptrs.pop_back(); 250 | } 251 | } 252 | } 253 | 254 | for (const auto& [ptr, size] : ptrs) 255 | { 256 | MemoryPool::deallocate(ptr, size); 257 | } 258 | 259 | std::cout << "Memory Pool: " << std::fixed << std::setprecision(3) 260 | << t.elapsed() << " ms" << std::endl; 261 | } 262 | 263 | // 测试new/delete 264 | { 265 | Timer t; 266 | std::vector> ptrs; 267 | ptrs.reserve(NUM_ALLOCS); 268 | 269 | for (size_t i = 0; i < NUM_ALLOCS; ++i) 270 | { 271 | size_t size = SIZES[rand() % 8]; 272 | void* p = new char[size]; 273 | ptrs.emplace_back(p, size); 274 | 275 | if (i % 100 == 0 && !ptrs.empty()) 276 | { 277 | size_t releaseCount = std::min(ptrs.size(), size_t(20)); 278 | for (size_t j = 0; j < releaseCount; ++j) 279 | { 280 | delete[] static_cast(ptrs.back().first); 281 | ptrs.pop_back(); 282 | } 283 | } 284 | } 285 | 286 | for (const auto& [ptr, size] : ptrs) 287 | { 288 | delete[] static_cast(ptr); 289 | } 290 | 291 | std::cout << "New/Delete: " << std::fixed << std::setprecision(3) 292 | << t.elapsed() << " ms" << std::endl; 293 | } 294 | } 295 | }; 296 | 297 | int main() 298 | { 299 | std::cout << "Starting performance tests..." << std::endl; 300 | 301 | // 预热系统 302 | PerformanceTest::warmup(); 303 | 304 | // 运行测试 305 | PerformanceTest::testSmallAllocation(); 306 | PerformanceTest::testMultiThreaded(); 307 | PerformanceTest::testMixedSizes(); 308 | 309 | return 0; 310 | } -------------------------------------------------------------------------------- /v3/tests/UnitTest.cpp: -------------------------------------------------------------------------------- 1 | #include "../include/MemoryPool.h" 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | using namespace Kama_memoryPool; 12 | 13 | // 基础分配测试 14 | void testBasicAllocation() 15 | { 16 | std::cout << "Running basic allocation test..." << std::endl; 17 | 18 | // 测试小内存分配 19 | void* ptr1 = MemoryPool::allocate(8); 20 | assert(ptr1 != nullptr); 21 | MemoryPool::deallocate(ptr1, 8); 22 | 23 | // 测试中等大小内存分配 24 | void* ptr2 = MemoryPool::allocate(1024); 25 | assert(ptr2 != nullptr); 26 | MemoryPool::deallocate(ptr2, 1024); 27 | 28 | // 测试大内存分配(超过MAX_BYTES) 29 | void* ptr3 = MemoryPool::allocate(1024 * 1024); 30 | assert(ptr3 != nullptr); 31 | MemoryPool::deallocate(ptr3, 1024 * 1024); 32 | 33 | std::cout << "Basic allocation test passed!" << std::endl; 34 | } 35 | 36 | // 内存写入测试 37 | void testMemoryWriting() 38 | { 39 | std::cout << "Running memory writing test..." << std::endl; 40 | 41 | // 分配并写入数据 42 | const size_t size = 128; 43 | char* ptr = static_cast(MemoryPool::allocate(size)); 44 | assert(ptr != nullptr); 45 | 46 | // 写入数据 47 | for (size_t i = 0; i < size; ++i) 48 | { 49 | ptr[i] = static_cast(i % 256); 50 | } 51 | 52 | // 验证数据 53 | for (size_t i = 0; i < size; ++i) 54 | { 55 | assert(ptr[i] == static_cast(i % 256)); 56 | } 57 | 58 | MemoryPool::deallocate(ptr, size); 59 | std::cout << "Memory writing test passed!" << std::endl; 60 | } 61 | 62 | // 多线程测试 63 | void testMultiThreading() 64 | { 65 | std::cout << "Running multi-threading test..." << std::endl; 66 | 67 | const int NUM_THREADS = 4; 68 | const int ALLOCS_PER_THREAD = 1000; 69 | std::atomic has_error{false}; 70 | 71 | auto threadFunc = [&has_error]() 72 | { 73 | try 74 | { 75 | std::vector> allocations; 76 | allocations.reserve(ALLOCS_PER_THREAD); 77 | 78 | for (int i = 0; i < ALLOCS_PER_THREAD && !has_error; ++i) 79 | { 80 | size_t size = (rand() % 256 + 1) * 8; 81 | void* ptr = MemoryPool::allocate(size); 82 | 83 | if (!ptr) 84 | { 85 | std::cerr << "Allocation failed for size: " << size << std::endl; 86 | has_error = true; 87 | break; 88 | } 89 | 90 | allocations.push_back({ptr, size}); 91 | 92 | if (rand() % 2 && !allocations.empty()) 93 | { 94 | size_t index = rand() % allocations.size(); 95 | MemoryPool::deallocate(allocations[index].first, 96 | allocations[index].second); 97 | allocations.erase(allocations.begin() + index); 98 | } 99 | } 100 | 101 | for (const auto& alloc : allocations) 102 | { 103 | MemoryPool::deallocate(alloc.first, alloc.second); 104 | } 105 | } 106 | catch (const std::exception& e) 107 | { 108 | std::cerr << "Thread exception: " << e.what() << std::endl; 109 | has_error = true; 110 | } 111 | }; 112 | 113 | std::vector threads; 114 | for (int i = 0; i < NUM_THREADS; ++i) 115 | { 116 | threads.emplace_back(threadFunc); 117 | } 118 | 119 | for (auto& thread : threads) 120 | { 121 | thread.join(); 122 | } 123 | 124 | std::cout << "Multi-threading test passed!" << std::endl; 125 | } 126 | 127 | // 边界测试 128 | void testEdgeCases() 129 | { 130 | std::cout << "Running edge cases test..." << std::endl; 131 | 132 | // 测试0大小分配 133 | void* ptr1 = MemoryPool::allocate(0); 134 | assert(ptr1 != nullptr); 135 | MemoryPool::deallocate(ptr1, 0); 136 | 137 | // 测试最小对齐大小 138 | void* ptr2 = MemoryPool::allocate(1); 139 | assert(ptr2 != nullptr); 140 | assert((reinterpret_cast(ptr2) & (ALIGNMENT - 1)) == 0); 141 | MemoryPool::deallocate(ptr2, 1); 142 | 143 | // 测试最大大小边界 144 | void* ptr3 = MemoryPool::allocate(MAX_BYTES); 145 | assert(ptr3 != nullptr); 146 | MemoryPool::deallocate(ptr3, MAX_BYTES); 147 | 148 | // 测试超过最大大小 149 | void* ptr4 = MemoryPool::allocate(MAX_BYTES + 1); 150 | assert(ptr4 != nullptr); 151 | MemoryPool::deallocate(ptr4, MAX_BYTES + 1); 152 | 153 | std::cout << "Edge cases test passed!" << std::endl; 154 | } 155 | 156 | // 压力测试 157 | void testStress() 158 | { 159 | std::cout << "Running stress test..." << std::endl; 160 | 161 | const int NUM_ITERATIONS = 10000; 162 | std::vector> allocations; 163 | allocations.reserve(NUM_ITERATIONS); 164 | 165 | for (int i = 0; i < NUM_ITERATIONS; ++i) 166 | { 167 | size_t size = (rand() % 1024 + 1) * 8; 168 | void* ptr = MemoryPool::allocate(size); 169 | assert(ptr != nullptr); 170 | allocations.push_back({ptr, size}); 171 | } 172 | 173 | // 随机顺序释放 174 | std::random_device rd; 175 | std::mt19937 g(rd()); 176 | std::shuffle(allocations.begin(), allocations.end(), g); 177 | for (const auto& alloc : allocations) 178 | { 179 | MemoryPool::deallocate(alloc.first, alloc.second); 180 | } 181 | 182 | std::cout << "Stress test passed!" << std::endl; 183 | } 184 | 185 | int main() 186 | { 187 | try 188 | { 189 | std::cout << "Starting memory pool tests..." << std::endl; 190 | 191 | testBasicAllocation(); 192 | testMemoryWriting(); 193 | testMultiThreading(); 194 | testEdgeCases(); 195 | testStress(); 196 | 197 | std::cout << "All tests passed successfully!" << std::endl; 198 | return 0; 199 | } 200 | catch (const std::exception& e) 201 | { 202 | std::cerr << "Test failed with exception: " << e.what() << std::endl; 203 | return 1; 204 | } 205 | } --------------------------------------------------------------------------------