└── main.cpp /main.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | // ---------------------------------------------------------------------------------------------------------- 14 | // ------------------------------- < allocator implementation later > --------------------------------------- 15 | // ---------------------------------------------------------------------------------------------------------- 16 | 17 | namespace details 18 | { 19 | 20 | std::size_t getAlignmentPadding(std::size_t not_aligned_address, std::size_t alignment) 21 | { 22 | if ( (alignment != 0u) && (not_aligned_address % alignment != 0u) ) 23 | { 24 | const std::size_t multiplier = (not_aligned_address / alignment) + 1u; 25 | const std::size_t aligned_address = multiplier * alignment; 26 | return aligned_address - not_aligned_address; 27 | } 28 | 29 | return 0u; 30 | } 31 | 32 | // Current chunk implementation works only with size 33 | // aligned by 4 bytes, because HEADER_SIZE now also 4 bytes. 34 | // You can modify it with HEADER_SIZE without problems for your purposes. 35 | 36 | template 37 | class Chunk 38 | { 39 | static constexpr std::size_t HEADER_SIZE = 4u; 40 | static_assert(CHUNK_SIZE % HEADER_SIZE == 0, "CHUNK_SIZE must be multiple of the four"); 41 | static_assert(CHUNK_SIZE > HEADER_SIZE, "CHUNK_SIZE must be more than HEADER_SIZE"); 42 | public: 43 | Chunk() 44 | { 45 | m_blocks.resize(CHUNK_SIZE); 46 | std::uint32_t* init_header = reinterpret_cast(m_blocks.data()); 47 | *init_header = CHUNK_SIZE - HEADER_SIZE; 48 | m_max_block = init_header; 49 | m_free_blocks.insert(init_header); 50 | } 51 | 52 | bool isInside(const std::uint8_t* address) const noexcept 53 | { 54 | const std::uint8_t* start_chunk_address = reinterpret_cast(m_blocks.data()); 55 | const std::uint8_t* end_chunk_address = start_chunk_address + CHUNK_SIZE; 56 | return (start_chunk_address <= address) && (address <= end_chunk_address); 57 | } 58 | 59 | std::uint8_t* tryReserveBlock(std::size_t allocation_size) 60 | { 61 | const std::size_t not_aligned_address = reinterpret_cast(m_max_block) + allocation_size; 62 | const std::size_t alignment_padding = getAlignmentPadding(not_aligned_address, HEADER_SIZE); 63 | const std::uint32_t allocation_size_with_alignment = static_cast(allocation_size + alignment_padding); 64 | if ( (!m_max_block) || (allocation_size_with_alignment > *m_max_block) ) // Check on enaught memory for allocation 65 | { 66 | return nullptr; 67 | } 68 | 69 | // Find min available by size memory block 70 | const auto min_it = std::min_element(m_free_blocks.cbegin(), m_free_blocks.cend(), [allocation_size_with_alignment] (const std::uint32_t* lhs, const std::uint32_t* rhs) 71 | { 72 | if (*rhs < allocation_size_with_alignment) 73 | { 74 | return true; 75 | } 76 | 77 | return (*lhs < *rhs) && (*lhs >= allocation_size_with_alignment); 78 | }); 79 | 80 | assert(min_it != m_free_blocks.cend() && "Internal logic error with reserve block, something wrong in implementation..."); 81 | assert(**min_it >= allocation_size_with_alignment && "Internal logic error with reserve block, something wrong in implementation..."); 82 | 83 | std::uint32_t* header_address = *min_it; 84 | std::uint32_t* new_header_address = 85 | reinterpret_cast(reinterpret_cast(header_address) + HEADER_SIZE + allocation_size_with_alignment); 86 | if (m_free_blocks.find(new_header_address) == m_free_blocks.cend()) // check if there is free memory in the current block 87 | { 88 | const std::uint32_t old_block_size = *header_address; 89 | const std::uint32_t difference = old_block_size - HEADER_SIZE; 90 | if (difference >= allocation_size_with_alignment) // check if there is enough space for another block 91 | { 92 | const std::uint32_t new_block_size = difference - allocation_size_with_alignment; 93 | *new_header_address = new_block_size; 94 | m_free_blocks.insert(new_header_address); 95 | } 96 | } 97 | 98 | m_free_blocks.erase(header_address); 99 | *header_address = static_cast(allocation_size); 100 | if (header_address == m_max_block) // if the maximum block were changed, then need to find the maximum block again 101 | { 102 | // Find max block by size 103 | const auto max_it = std::max_element(m_free_blocks.cbegin(), m_free_blocks.cend(), [] (const std::uint32_t* lhs, const std::uint32_t* rhs) 104 | { 105 | return (*lhs) < (*rhs); 106 | }); 107 | 108 | // If there are no free blocks, therefore the memory in this chunk is over 109 | m_max_block = (max_it != m_free_blocks.cend()) ? (*max_it) : (nullptr); 110 | } 111 | 112 | return reinterpret_cast(header_address) + HEADER_SIZE; 113 | } 114 | 115 | void releaseBlock(std::uint8_t* block_ptr) 116 | { 117 | std::uint8_t* header_address = block_ptr - HEADER_SIZE; 118 | const std::uint32_t size_relized_block = *header_address; 119 | if ( (!m_max_block) || (size_relized_block > *m_max_block) ) // if the relized block is greater than the maximum, then need to replace it 120 | { 121 | m_max_block = reinterpret_cast(header_address); 122 | } 123 | 124 | m_free_blocks.insert(reinterpret_cast(header_address)); 125 | auto forward_it = m_free_blocks.find(reinterpret_cast(header_address)); 126 | auto backward_it = defragment(forward_it, m_free_blocks.end()); 127 | defragment(std::make_reverse_iterator(backward_it), m_free_blocks.rend()); 128 | } 129 | private: 130 | template 131 | constexpr DstIterator getIterator(SrcIterator it) const 132 | { 133 | using iterator = std::set::iterator; 134 | using reverse_iterator = std::set::reverse_iterator; 135 | if constexpr ( (std::is_same_v) && (std::is_same_v) ) 136 | { 137 | return std::make_reverse_iterator(it); 138 | } 139 | else if constexpr ( (std::is_same_v) && (std::is_same_v) ) 140 | { 141 | return it.base(); 142 | } 143 | else 144 | { 145 | return it; 146 | } 147 | } 148 | 149 | template 150 | Iterator defragment(Iterator start, Iterator end) 151 | { 152 | // primitive defragmentation algorithm - connects two neighboring 153 | // free blocks into one with linear complexity 154 | 155 | auto fixed_it = start++; 156 | std::uint32_t* prev_header_address = *fixed_it; 157 | for (auto it = start; (fixed_it != end) && (it != end);) 158 | { 159 | std::uint32_t* current_header_address = *it; 160 | const std::uint32_t prev_block_size = *prev_header_address; 161 | const std::uint32_t* available_current_block_address = 162 | reinterpret_cast(reinterpret_cast(prev_header_address) + HEADER_SIZE + prev_block_size); 163 | if (available_current_block_address == current_header_address) 164 | { 165 | const std::uint32_t current_block_size = *current_header_address; 166 | const std::uint32_t new_prev_block_size = prev_block_size + HEADER_SIZE + current_block_size; 167 | *prev_header_address = new_prev_block_size; 168 | if (new_prev_block_size > *m_max_block) 169 | { 170 | m_max_block = reinterpret_cast(prev_header_address); 171 | } 172 | 173 | auto it_for_delete = getIterator::iterator>(it); 174 | it = getIterator(m_free_blocks.erase(it_for_delete)); 175 | } 176 | else 177 | { 178 | return fixed_it; 179 | } 180 | } 181 | 182 | return fixed_it; 183 | } 184 | public: 185 | std::vector m_blocks; 186 | std::set m_free_blocks; 187 | std::uint32_t* m_max_block; 188 | }; 189 | 190 | } 191 | 192 | // Strategy for manipulation memory chunks, like 193 | // a primitive malloc allocator. 194 | // 195 | // Warning: if you try to deallocate some random block 196 | // of the memory, most of all it will be an undefined behavior, 197 | // because current implementation doesn't check this possible situation. 198 | 199 | template 203 | class CustomAllocationStrategy 204 | { 205 | static_assert(CHUNK_SIZE != 0u, "Chunk size must be more, than zero"); 206 | static_assert(CHUNK_SIZE <= std::numeric_limits::max(), 207 | "Chunk size must be less or equal max value of the uint32_t"); 208 | public: 209 | void* allocate(std::size_t size) 210 | { 211 | assert(size < CHUNK_SIZE && "Incorrect chunk size for future usage"); 212 | 213 | if (size == 0u) 214 | { 215 | return nullptr; 216 | } 217 | 218 | for (auto& chunk : m_chunks) 219 | { 220 | void* allocated_block = chunk.tryReserveBlock(size); 221 | if (allocated_block) //if the block was not reserved, then memory in the chunk has run out 222 | { 223 | return allocated_block; 224 | } 225 | } 226 | 227 | m_chunks.push_back(details::Chunk{}); 228 | auto& chunk = m_chunks.back(); 229 | std::uint8_t* allocated_block = chunk.tryReserveBlock(size); 230 | return allocated_block; 231 | } 232 | 233 | void deallocate(void* memory_ptr, std::size_t size) 234 | { 235 | if ( (!memory_ptr) || (size == 0u) ) 236 | { 237 | return; 238 | } 239 | 240 | std::uint8_t* deallocation_ptr = static_cast(memory_ptr); 241 | for (auto& chunk : m_chunks) 242 | { 243 | if (chunk.isInside(deallocation_ptr)) 244 | { 245 | chunk.releaseBlock(deallocation_ptr); 246 | } 247 | } 248 | } 249 | private: 250 | std::deque> m_chunks{ 1u }; 251 | }; 252 | 253 | // Common interface for interaction with STL 254 | // containers and algorithms. You can manually change 255 | // allocation algorithm with different 'AllocationStrategy' 256 | // 257 | // In this implementation was not implented 'adress' and 'max_size' 258 | // unnecessary functions for. 259 | 260 | template 261 | class Allocator 262 | { 263 | static_assert(!std::is_same_v, "Type of the allocator can not be void"); 264 | public: 265 | using value_type = T; 266 | 267 | template 268 | friend class Allocator; 269 | 270 | template 271 | struct rebind 272 | { 273 | using other = Allocator; 274 | }; 275 | public: 276 | Allocator() = default; 277 | 278 | explicit Allocator(AllocationStrategy& strategy) noexcept 279 | : m_allocation_strategy(&strategy) {} 280 | 281 | Allocator(const Allocator& other) noexcept 282 | : m_allocation_strategy(other.m_allocation_strategy) {} 283 | 284 | template 285 | Allocator(const Allocator& other) noexcept 286 | : m_allocation_strategy(other.m_allocation_strategy) {} 287 | 288 | T* allocate(std::size_t count_objects) 289 | { 290 | assert(m_allocation_strategy && "Not initialized allocation strategy"); 291 | return static_cast(m_allocation_strategy->allocate(count_objects * sizeof(T))); 292 | } 293 | 294 | void deallocate(void* memory_ptr, std::size_t count_objects) 295 | { 296 | assert(m_allocation_strategy && "Not initialized allocation strategy"); 297 | m_allocation_strategy->deallocate(memory_ptr, count_objects * sizeof(T)); 298 | } 299 | 300 | template 301 | void construct(U* ptr, Args&&... args) 302 | { 303 | new (reinterpret_cast(ptr)) U { std::forward(args)... }; 304 | } 305 | 306 | template 307 | void destroy(U* ptr) 308 | { 309 | ptr->~U(); 310 | } 311 | private: 312 | AllocationStrategy* m_allocation_strategy = nullptr; 313 | }; 314 | 315 | template 316 | bool operator==(const Allocator& lhs, const Allocator& rhs) 317 | { 318 | return lhs.m_allocation_strategy == rhs.m_allocation_strategy; 319 | } 320 | 321 | template 322 | bool operator!=(const Allocator& lhs, const Allocator& rhs) 323 | { 324 | return !(lhs == rhs); 325 | } 326 | 327 | // ---------------------------------------------------------------------------------------------------------- 328 | // -------------------------------------- < usage aliases laler > ------------------------------------------- 329 | // ---------------------------------------------------------------------------------------------------------- 330 | 331 | template 332 | using CustomAllocator = Allocator>; 333 | 334 | template 335 | using CustomAllocatorWithStackChunks = Allocator>; 336 | 337 | template 338 | using CustomAllocatorWithHeapChunks = Allocator>; 339 | 340 | template 341 | using custom_vector = std::vector>; 342 | 343 | template 344 | using custom_list = std::list>; 345 | 346 | template 347 | using custom_set = std::set, CustomAllocator>; 348 | 349 | template 350 | using custom_unordered_set = std::unordered_set, std::equal_to, CustomAllocator>; 351 | 352 | template 353 | using custom_map = std::map, CustomAllocator>>; 354 | 355 | template 356 | using custom_unordered_map = std::unordered_map, std::equal_to, CustomAllocator>>; 357 | 358 | using custom_string = std::basic_string, CustomAllocator>; 359 | 360 | // ---------------------------------------------------------------------------------------------------------- 361 | // ------------------------------ < usage helpers for unique_ptr laler > ------------------------------------ 362 | // ---------------------------------------------------------------------------------------------------------- 363 | 364 | template 365 | using custom_unique_ptr = std::unique_ptr>; 366 | 367 | template 368 | custom_unique_ptr make_custom_unique(Allocator allocator, Args&&... args) 369 | { 370 | const auto custom_deleter = [allocator](T* ptr) mutable 371 | { 372 | allocator.destroy(ptr); 373 | allocator.deallocate(ptr, 1u); 374 | }; 375 | 376 | void* memory_block = allocator.allocate(1u); 377 | if (memory_block) 378 | { 379 | T* object_block = static_cast(memory_block); 380 | allocator.construct(object_block, std::forward(args)...); 381 | return custom_unique_ptr{ object_block, custom_deleter }; 382 | } 383 | 384 | return nullptr; 385 | } 386 | 387 | // ---------------------------------------------------------------------------------------------------------- 388 | // -------------------------------- < usage example later > ------------------------------------------------- 389 | // ---------------------------------------------------------------------------------------------------------- 390 | 391 | int main(int argc, char** argv) 392 | { 393 | CustomAllocationStrategy allocation_area{}; 394 | 395 | CustomAllocator custom_int_allocator{ allocation_area }; 396 | custom_vector vector{ custom_int_allocator }; 397 | for (int i = 0u; i < 100; ++i) 398 | { 399 | vector.push_back(i); 400 | std::cout << vector.at(i) << " "; 401 | } 402 | 403 | vector.resize(16u); 404 | for (int val : vector) 405 | { 406 | std::cout << val << " "; 407 | } 408 | 409 | CustomAllocator custom_int_allocator_copy = vector.get_allocator(); 410 | custom_unique_ptr ptr1 = make_custom_unique>(custom_int_allocator_copy, 100); 411 | custom_unique_ptr ptr2 = make_custom_unique>(custom_int_allocator_copy, 500); 412 | custom_unique_ptr ptr3 = make_custom_unique>(custom_int_allocator_copy, 1000); 413 | custom_unique_ptr ptr4 = make_custom_unique>(custom_int_allocator_copy, 1500); 414 | std::cout << *ptr1 << " " << *ptr2 << " " << *ptr3 << " " << *ptr4 << " "; 415 | 416 | CustomAllocator custom_float_allocator { custom_int_allocator }; 417 | custom_list list{ { 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f }, custom_float_allocator }; 418 | for (float val : list) 419 | { 420 | std::cout << val << " "; 421 | } 422 | 423 | CustomAllocator> custom_pair_allocator{ allocation_area }; 424 | custom_map map{ { { 1.0, 100.0 }, { 2.0, 200.0 } }, custom_pair_allocator }; 425 | for (const auto& it : map) 426 | { 427 | std::cout << "{" << it.first << " : " << it.second << "} "; 428 | } 429 | 430 | CustomAllocator custom_double_allocator{ allocation_area }; 431 | custom_set set{ { 1000.0, 2000.0, 3000.0 }, custom_double_allocator }; 432 | for (double val : set) 433 | { 434 | std::cout << val << " "; 435 | } 436 | 437 | CustomAllocator custom_char_allocator{ allocation_area }; 438 | custom_string string1{ "First allocated string without SBO ", custom_char_allocator }; 439 | custom_string string2{ "Second allocated string without SBO ", custom_char_allocator }; 440 | custom_string string3{ "Third allocated string without SBO ", custom_char_allocator }; 441 | custom_string result_string = string1 + string2 + string3; 442 | std::cout << result_string; 443 | 444 | return EXIT_SUCCESS; 445 | } 446 | --------------------------------------------------------------------------------