├── .gitignore ├── .travis.yml ├── README.md ├── dub.sdl ├── meson.build ├── source └── stdx │ └── allocator │ ├── building_blocks │ ├── affix_allocator.d │ ├── allocator_list.d │ ├── bitmapped_block.d │ ├── bucketizer.d │ ├── fallback_allocator.d │ ├── free_list.d │ ├── free_tree.d │ ├── kernighan_ritchie.d │ ├── null_allocator.d │ ├── package.d │ ├── quantizer.d │ ├── region.d │ ├── scoped_allocator.d │ ├── segregator.d │ └── stats_collector.d │ ├── common.d │ ├── gc_allocator.d │ ├── internal.d │ ├── mallocator.d │ ├── mmap_allocator.d │ ├── package.d │ ├── showcase.d │ └── typed.d └── subprojects └── mir-core.wrap /.gitignore: -------------------------------------------------------------------------------- 1 | .dub 2 | docs.json 3 | __dummy.html 4 | docs/ 5 | allocator.so 6 | allocator.dylib 7 | allocator.dll 8 | allocator.lib 9 | allocator-test-* 10 | stdx-allocator-test-* 11 | *.a 12 | *.exe 13 | *.o 14 | *.obj 15 | *.lst 16 | dub.selections.json 17 | subprojects/mir-core 18 | build 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: d 3 | 4 | os: 5 | - linux 6 | - osx 7 | 8 | d: 9 | - ldc 10 | - ldc-beta 11 | - dmd 12 | - dmd-beta 13 | 14 | env: 15 | - ARCH="x86_64" 16 | 17 | branches: 18 | only: 19 | - master 20 | - stable 21 | 22 | matrix: 23 | include: 24 | - {os: linux, d: ldc-beta, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} 25 | - {os: linux, d: ldc, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} 26 | - {os: linux, d: dmd-beta, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} 27 | - {os: linux, d: dmd, env: ARCH="x86", addons: {apt: {packages: [[gcc-multilib]]}}} 28 | 29 | install: 30 | - | 31 | if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then 32 | curl -L "https://github.com/ninja-build/ninja/releases/download/v1.7.2/ninja-linux.zip" -o ninja-linux.zip 33 | sudo unzip ninja-linux.zip -d /usr/local/bin 34 | sudo chmod 755 /usr/local/bin/ninja 35 | sudo add-apt-repository -y ppa:deadsnakes/ppa 36 | sudo apt-get -y update 37 | sudo apt-get -y install python3.6 38 | curl https://bootstrap.pypa.io/get-pip.py | sudo python3.6 39 | sudo pip3 install meson 40 | fi 41 | 42 | packages: 43 | - pkg-config 44 | 45 | script: 46 | - echo "$ARCH" 47 | - dub test --arch "$ARCH" 48 | - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then meson build --default-library=static && cd build && ninja -j4 && ninja -j4 test -v && cd ..; fi # TODO: 32bit meson test 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | stdx.allocator 2 | =============== 3 | 4 | [![CI status](https://travis-ci.org/dlang-community/stdx-allocator.svg?branch=master)](https://travis-ci.org/dlang-community/stdx-allocator/) 5 | 6 | Extracted std.experimental.allocator for usage via DUB 7 | -------------------------------------------------------------------------------- /dub.sdl: -------------------------------------------------------------------------------- 1 | name "stdx-allocator" 2 | description "Extracted std.experimental.allocator" 3 | authors "Team Phobos" 4 | copyright "Copyright © 2017, Team Phobos" 5 | license "BSL-1.0" 6 | dependency "mir-core" version=">=0.0.5 <0.3.0" 7 | 8 | configuration "unittest" { 9 | } 10 | 11 | configuration "wasm" { 12 | dflags "-mtriple=wasm32-unknown-unknown-wasm" "-betterC" 13 | targetType "sourceLibrary" 14 | } 15 | -------------------------------------------------------------------------------- /meson.build: -------------------------------------------------------------------------------- 1 | project('stdx-allocator', 'd', 2 | meson_version: '>=0.45', 3 | license: 'BSL-1.0', 4 | version: '3.0.1' 5 | ) 6 | 7 | project_soversion = '0' 8 | 9 | mir_core_dep = dependency('mir-core', fallback : ['mir-core', 'mir_core_dep']) 10 | 11 | required_deps = [mir_core_dep] 12 | 13 | pkgc = import('pkgconfig') 14 | 15 | # 16 | # Sources 17 | # 18 | allocator_src = [ 19 | 'source/stdx/allocator/internal.d', 20 | 'source/stdx/allocator/building_blocks/null_allocator.d', 21 | 'source/stdx/allocator/building_blocks/fallback_allocator.d', 22 | 'source/stdx/allocator/building_blocks/bitmapped_block.d', 23 | 'source/stdx/allocator/building_blocks/stats_collector.d', 24 | 'source/stdx/allocator/building_blocks/package.d', 25 | 'source/stdx/allocator/building_blocks/affix_allocator.d', 26 | 'source/stdx/allocator/building_blocks/free_list.d', 27 | 'source/stdx/allocator/building_blocks/bucketizer.d', 28 | 'source/stdx/allocator/building_blocks/free_tree.d', 29 | 'source/stdx/allocator/building_blocks/kernighan_ritchie.d', 30 | 'source/stdx/allocator/building_blocks/allocator_list.d', 31 | 'source/stdx/allocator/building_blocks/segregator.d', 32 | 'source/stdx/allocator/building_blocks/scoped_allocator.d', 33 | 'source/stdx/allocator/building_blocks/region.d', 34 | 'source/stdx/allocator/building_blocks/quantizer.d', 35 | 'source/stdx/allocator/typed.d', 36 | 'source/stdx/allocator/mmap_allocator.d', 37 | 'source/stdx/allocator/showcase.d', 38 | 'source/stdx/allocator/mallocator.d', 39 | 'source/stdx/allocator/package.d', 40 | 'source/stdx/allocator/common.d', 41 | 'source/stdx/allocator/gc_allocator.d', 42 | ] 43 | 44 | src_dir = include_directories('source/') 45 | 46 | # 47 | # Targets 48 | # 49 | allocator_lib = library('stdx-allocator', 50 | [allocator_src], 51 | include_directories: [src_dir], 52 | install: true, 53 | version: meson.project_version(), 54 | soversion: project_soversion, 55 | dependencies: required_deps, 56 | ) 57 | 58 | # 59 | # Tests 60 | # 61 | allocator_test_exe = executable(meson.project_name() + '-test', 62 | allocator_src, 63 | include_directories: src_dir, 64 | d_unittest: true, 65 | link_args: '-main', 66 | dependencies: required_deps, 67 | 68 | ) 69 | 70 | # for use by Vibe.d and others which embed this as subproject 71 | allocator_dep = declare_dependency( 72 | link_with: allocator_lib, 73 | include_directories: src_dir, 74 | dependencies: required_deps, 75 | ) 76 | 77 | # 78 | # Install 79 | # 80 | install_subdir('source/stdx/', install_dir: 'include/d/stdx-allocator/') 81 | 82 | 83 | pkgc.generate(name: 'stdx-allocator', 84 | libraries: allocator_lib, 85 | requires: ['mir-core'], 86 | subdirs: 'd/' + meson.project_name(), 87 | version: meson.project_version(), 88 | description: 'High-level interface for allocators for D, extracted from Phobos.' 89 | ) 90 | 91 | test(meson.project_name() + '-test', allocator_test_exe) 92 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/affix_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.affix_allocator; 3 | 4 | /** 5 | 6 | Allocator that adds some extra data before (of type $(D Prefix)) and/or after 7 | (of type $(D Suffix)) any allocation made with its parent allocator. This is 8 | useful for uses where additional allocation-related information is needed, such 9 | as mutexes, reference counts, or walls for debugging memory corruption errors. 10 | 11 | If $(D Prefix) is not $(D void), $(D Allocator) must guarantee an alignment at 12 | least as large as $(D Prefix.alignof). 13 | 14 | Suffixes are slower to get at because of alignment rounding, so prefixes should 15 | be preferred. However, small prefixes blunt the alignment so if a large 16 | alignment with a small affix is needed, suffixes should be chosen. 17 | 18 | The following methods are defined if $(D Allocator) defines them, and forward to it: $(D deallocateAll), $(D empty), $(D owns). 19 | */ 20 | struct AffixAllocator(Allocator, Prefix, Suffix = void) 21 | { 22 | import mir.utility : min; 23 | import mir.conv : emplace; 24 | import stdx.allocator : IAllocator, theAllocator; 25 | import stdx.allocator.common : stateSize, forwardToMember, 26 | roundUpToMultipleOf, alignedAt, alignDownTo, roundUpToMultipleOf, 27 | hasStaticallyKnownAlignment; 28 | import stdx.allocator.internal : isPowerOf2; 29 | import stdx.allocator.internal : Ternary; 30 | 31 | static if (hasStaticallyKnownAlignment!Allocator) 32 | { 33 | static assert( 34 | !stateSize!Prefix || Allocator.alignment >= Prefix.alignof, 35 | "AffixAllocator does not work with allocators offering a smaller" 36 | ~ " alignment than the prefix alignment."); 37 | } 38 | static assert(alignment % Suffix.alignof == 0, 39 | "This restriction could be relaxed in the future."); 40 | 41 | /** 42 | If $(D Prefix) is $(D void), the alignment is that of the parent. Otherwise, the alignment is the same as the $(D Prefix)'s alignment. 43 | */ 44 | static if (hasStaticallyKnownAlignment!Allocator) 45 | { 46 | enum uint alignment = isPowerOf2(stateSize!Prefix) 47 | ? min(stateSize!Prefix, Allocator.alignment) 48 | : (stateSize!Prefix ? Prefix.alignof : Allocator.alignment); 49 | } 50 | else static if (is(Prefix == void)) 51 | { 52 | enum uint alignment = platformAlignment; 53 | } 54 | else 55 | { 56 | enum uint alignment = Prefix.alignof; 57 | } 58 | 59 | /** 60 | If the parent allocator $(D Allocator) is stateful, an instance of it is 61 | stored as a member. Otherwise, $(D AffixAllocator) uses 62 | `Allocator.instance`. In either case, the name $(D _parent) is uniformly 63 | used for accessing the parent allocator. 64 | */ 65 | static if (stateSize!Allocator) 66 | { 67 | Allocator _parent; 68 | static if (is(Allocator == IAllocator)) 69 | { 70 | Allocator parent() 71 | { 72 | if (_parent is null) _parent = theAllocator; 73 | assert(alignment <= _parent.alignment); 74 | return _parent; 75 | } 76 | } 77 | else 78 | { 79 | alias parent = _parent; 80 | } 81 | } 82 | else 83 | { 84 | alias parent = Allocator.instance; 85 | } 86 | 87 | private template Impl(bool isStatic) 88 | { 89 | 90 | size_t goodAllocSize(size_t s) 91 | { 92 | import stdx.allocator.common : goodAllocSize; 93 | auto a = actualAllocationSize(s); 94 | return roundUpToMultipleOf(parent.goodAllocSize(a) 95 | - stateSize!Prefix - stateSize!Suffix, 96 | this.alignment); 97 | } 98 | 99 | static if (isStatic) 100 | private size_t actualAllocationSize(size_t s) 101 | { 102 | assert(s > 0); 103 | static if (!stateSize!Suffix) 104 | { 105 | return s + stateSize!Prefix; 106 | } 107 | else 108 | { 109 | return 110 | roundUpToMultipleOf(s + stateSize!Prefix, Suffix.alignof) 111 | + stateSize!Suffix; 112 | } 113 | } 114 | else 115 | private size_t actualAllocationSize(size_t s) const 116 | { 117 | assert(s > 0); 118 | static if (!stateSize!Suffix) 119 | { 120 | return s + stateSize!Prefix; 121 | } 122 | else 123 | { 124 | return 125 | roundUpToMultipleOf(s + stateSize!Prefix, Suffix.alignof) 126 | + stateSize!Suffix; 127 | } 128 | } 129 | 130 | static if (isStatic) 131 | private void[] actualAllocation(void[] b) 132 | { 133 | assert(b !is null); 134 | return (b.ptr - stateSize!Prefix) 135 | [0 .. actualAllocationSize(b.length)]; 136 | } 137 | else 138 | private void[] actualAllocation(void[] b) const 139 | { 140 | assert(b !is null); 141 | return (b.ptr - stateSize!Prefix) 142 | [0 .. actualAllocationSize(b.length)]; 143 | } 144 | 145 | void[] allocate(size_t bytes) 146 | { 147 | if (!bytes) return null; 148 | auto result = parent.allocate(actualAllocationSize(bytes)); 149 | if (result is null) return null; 150 | static if (stateSize!Prefix) 151 | { 152 | assert(result.ptr.alignedAt(Prefix.alignof)); 153 | emplace!Prefix(cast(Prefix*) result.ptr); 154 | } 155 | static if (stateSize!Suffix) 156 | { 157 | auto suffixP = result.ptr + result.length - Suffix.sizeof; 158 | assert(suffixP.alignedAt(Suffix.alignof)); 159 | emplace!Suffix(cast(Suffix*)(suffixP)); 160 | } 161 | return result[stateSize!Prefix .. stateSize!Prefix + bytes]; 162 | } 163 | 164 | static if (__traits(hasMember, Allocator, "allocateAll")) 165 | void[] allocateAll() 166 | { 167 | auto result = parent.allocateAll(); 168 | if (result is null) return null; 169 | if (result.length < actualAllocationSize(1)) 170 | { 171 | deallocate(result); 172 | return null; 173 | } 174 | static if (stateSize!Prefix) 175 | { 176 | assert(result.length > stateSize!Prefix); 177 | emplace!Prefix(cast(Prefix*) result.ptr); 178 | result = result[stateSize!Prefix .. $]; 179 | } 180 | static if (stateSize!Suffix) 181 | { 182 | assert(result.length > stateSize!Suffix); 183 | // Ehm, find a properly aligned place for the suffix 184 | auto p = (result.ptr + result.length - stateSize!Suffix) 185 | .alignDownTo(Suffix.alignof); 186 | assert(p > result.ptr); 187 | emplace!Suffix(cast(Suffix*) p); 188 | result = result[0 .. p - result.ptr]; 189 | } 190 | return result; 191 | } 192 | 193 | static if (__traits(hasMember, Allocator, "owns")) 194 | Ternary owns(void[] b) 195 | { 196 | if (b is null) return Ternary.no; 197 | return parent.owns(actualAllocation(b)); 198 | } 199 | 200 | static if (__traits(hasMember, Allocator, "resolveInternalPointer")) 201 | Ternary resolveInternalPointer(const void* p, ref void[] result) 202 | { 203 | void[] p1; 204 | Ternary r = parent.resolveInternalPointer(p, p1); 205 | if (r != Ternary.yes || p1 is null) 206 | return r; 207 | p1 = p1[stateSize!Prefix .. $]; 208 | auto p2 = (p1.ptr + p1.length - stateSize!Suffix) 209 | .alignDownTo(Suffix.alignof); 210 | result = p1[0 .. p2 - p1.ptr]; 211 | return Ternary.yes; 212 | } 213 | 214 | static if (!stateSize!Suffix && __traits(hasMember, Allocator, "expand")) 215 | bool expand(ref void[] b, size_t delta) 216 | { 217 | if (!b.ptr) return delta == 0; 218 | auto t = actualAllocation(b); 219 | const result = parent.expand(t, delta); 220 | if (!result) return false; 221 | b = b.ptr[0 .. b.length + delta]; 222 | return true; 223 | } 224 | 225 | static if (__traits(hasMember, Allocator, "reallocate")) 226 | bool reallocate(ref void[] b, size_t s) 227 | { 228 | if (b is null) 229 | { 230 | b = allocate(s); 231 | return b.length == s; 232 | } 233 | auto t = actualAllocation(b); 234 | const result = parent.reallocate(t, actualAllocationSize(s)); 235 | if (!result) return false; // no harm done 236 | b = t.ptr[stateSize!Prefix .. stateSize!Prefix + s]; 237 | return true; 238 | } 239 | 240 | static if (__traits(hasMember, Allocator, "deallocate")) 241 | bool deallocate(void[] b) 242 | { 243 | if (!b.ptr) return true; 244 | return parent.deallocate(actualAllocation(b)); 245 | } 246 | 247 | /* The following methods are defined if $(D ParentAllocator) defines 248 | them, and forward to it: $(D deallocateAll), $(D empty).*/ 249 | mixin(forwardToMember("parent", 250 | "deallocateAll", "empty")); 251 | 252 | // Computes suffix type given buffer type 253 | private template Payload2Affix(Payload, Affix) 254 | { 255 | static if (is(Payload[] : void[])) 256 | alias Payload2Affix = Affix; 257 | else static if (is(Payload[] : shared(void)[])) 258 | alias Payload2Affix = shared Affix; 259 | else static if (is(Payload[] : immutable(void)[])) 260 | alias Payload2Affix = shared Affix; 261 | else static if (is(Payload[] : const(shared(void))[])) 262 | alias Payload2Affix = shared Affix; 263 | else static if (is(Payload[] : const(void)[])) 264 | alias Payload2Affix = const Affix; 265 | else 266 | static assert(0, "Internal error for type " ~ Payload.stringof); 267 | } 268 | 269 | // Extra functions 270 | static if (stateSize!Prefix) 271 | { 272 | static auto ref prefix(T)(T[] b) 273 | { 274 | assert(b.ptr && b.ptr.alignedAt(Prefix.alignof)); 275 | return (cast(Payload2Affix!(T, Prefix)*) b.ptr)[-1]; 276 | } 277 | } 278 | static if (stateSize!Suffix) 279 | auto ref suffix(T)(T[] b) 280 | { 281 | assert(b.ptr); 282 | auto p = b.ptr - stateSize!Prefix 283 | + actualAllocationSize(b.length); 284 | assert(p && p.alignedAt(Suffix.alignof)); 285 | return (cast(Payload2Affix!(T, Suffix)*) p)[-1]; 286 | } 287 | } 288 | 289 | version (StdDdoc) 290 | { 291 | /** 292 | Standard allocator methods. Each is defined if and only if the parent 293 | allocator defines the homonym method (except for $(D goodAllocSize), 294 | which may use the global default). Also, the methods will be $(D 295 | shared) if the parent allocator defines them as such. 296 | */ 297 | size_t goodAllocSize(size_t); 298 | /// Ditto 299 | void[] allocate(size_t); 300 | /// Ditto 301 | Ternary owns(void[]); 302 | /// Ditto 303 | bool expand(ref void[] b, size_t delta); 304 | /// Ditto 305 | bool reallocate(ref void[] b, size_t s); 306 | /// Ditto 307 | bool deallocate(void[] b); 308 | /// Ditto 309 | bool deallocateAll(); 310 | /// Ditto 311 | Ternary empty(); 312 | 313 | /** 314 | The `instance` singleton is defined if and only if the parent allocator 315 | has no state and defines its own `it` object. 316 | */ 317 | static AffixAllocator instance; 318 | 319 | /** 320 | Affix access functions offering references to the affixes of a 321 | block `b` previously allocated with this allocator. `b` may not be null. 322 | They are defined if and only if the corresponding affix is not `void`. 323 | 324 | The qualifiers of the affix are not always the same as the qualifiers 325 | of the argument. This is because the affixes are not part of the data 326 | itself, but instead are just $(I associated) with the data and known 327 | to the allocator. The table below documents the type of `preffix(b)` and 328 | `affix(b)` depending on the type of `b`. 329 | 330 | $(BOOKTABLE Result of `prefix`/`suffix` depending on argument (`U` is 331 | any unqualified type, `Affix` is `Prefix` or `Suffix`), 332 | $(TR $(TH Argument$(NBSP)Type) $(TH Return) $(TH Comments)) 333 | 334 | $(TR $(TD `shared(U)[]`) $(TD `ref shared Affix`) 335 | $(TD Data is shared across threads and the affix follows suit.)) 336 | 337 | $(TR $(TD `immutable(U)[]`) $(TD `ref shared Affix`) 338 | $(TD Although the data is immutable, the allocator "knows" the 339 | underlying memory is mutable, so `immutable` is elided for the affix 340 | which is independent from the data itself. However, the result is 341 | `shared` because `immutable` is implicitly shareable so multiple 342 | threads may access and manipulate the affix for the same data.)) 343 | 344 | $(TR $(TD `const(shared(U))[]`) $(TD `ref shared Affix`) 345 | $(TD The data is always shareable across threads. Even if the data 346 | is `const`, the affix is modifiable by the same reasoning as for 347 | `immutable`.)) 348 | 349 | $(TR $(TD `const(U)[]`) $(TD `ref const Affix`) 350 | $(TD The input may have originated from `U[]` or `immutable(U)[]`, 351 | so it may be actually shared or not. Returning an unqualified affix 352 | may result in race conditions, whereas returning a `shared` affix 353 | may result in inadvertent sharing of mutable thread-local data 354 | across multiple threads. So the returned type is conservatively 355 | `ref const`.)) 356 | 357 | $(TR $(TD `U[]`) $(TD `ref Affix`) 358 | $(TD Unqualified data has unqualified affixes.)) 359 | ) 360 | 361 | Precondition: `b !is null` and `b` must have been allocated with 362 | this allocator. 363 | */ 364 | static ref auto prefix(T)(T[] b); 365 | /// Ditto 366 | ref auto suffix(T)(T[] b); 367 | } 368 | else static if (is(typeof(Allocator.instance) == shared)) 369 | { // for backward compatability 370 | enum shared AffixAllocator instance = AffixAllocator(); 371 | static { mixin Impl!true; } 372 | } 373 | else 374 | { 375 | static if (stateSize!Allocator == 0) 376 | { 377 | enum AffixAllocator instance = AffixAllocator(); 378 | static { mixin Impl!true; } 379 | } 380 | else 381 | { 382 | mixin Impl!false; 383 | } 384 | } 385 | } 386 | 387 | /// 388 | @system unittest 389 | { 390 | import stdx.allocator.mallocator : Mallocator; 391 | // One word before and after each allocation. 392 | alias A = AffixAllocator!(Mallocator, size_t, size_t); 393 | auto b = A.instance.allocate(11); 394 | A.instance.prefix(b) = 0xCAFE_BABE; 395 | A.instance.suffix(b) = 0xDEAD_BEEF; 396 | assert(A.instance.prefix(b) == 0xCAFE_BABE 397 | && A.instance.suffix(b) == 0xDEAD_BEEF); 398 | } 399 | 400 | @system unittest 401 | { 402 | import stdx.allocator.gc_allocator : GCAllocator; 403 | import stdx.allocator : theAllocator, IAllocator; 404 | 405 | // One word before and after each allocation. 406 | auto A = AffixAllocator!(IAllocator, size_t, size_t)(theAllocator); 407 | auto a = A.allocate(11); 408 | A.prefix(a) = 0xCAFE_BABE; 409 | A.suffix(a) = 0xDEAD_BEEF; 410 | assert(A.prefix(a) == 0xCAFE_BABE 411 | && A.suffix(a) == 0xDEAD_BEEF); 412 | 413 | // One word before and after each allocation. 414 | auto B = AffixAllocator!(IAllocator, size_t, size_t)(); 415 | auto b = B.allocate(11); 416 | B.prefix(b) = 0xCAFE_BABE; 417 | B.suffix(b) = 0xDEAD_BEEF; 418 | assert(B.prefix(b) == 0xCAFE_BABE 419 | && B.suffix(b) == 0xDEAD_BEEF); 420 | } 421 | 422 | @system unittest 423 | { 424 | import stdx.allocator.building_blocks.bitmapped_block 425 | : BitmappedBlock; 426 | import stdx.allocator.common : testAllocator; 427 | testAllocator!({ 428 | auto a = AffixAllocator!(BitmappedBlock!128, ulong, ulong) 429 | (BitmappedBlock!128(new ubyte[128 * 4096])); 430 | return a; 431 | }); 432 | } 433 | 434 | @system unittest 435 | { 436 | import stdx.allocator.mallocator : Mallocator; 437 | alias A = AffixAllocator!(Mallocator, size_t); 438 | auto b = A.instance.allocate(10); 439 | A.instance.prefix(b) = 10; 440 | assert(A.instance.prefix(b) == 10); 441 | 442 | import stdx.allocator.building_blocks.null_allocator 443 | : NullAllocator; 444 | alias B = AffixAllocator!(NullAllocator, size_t); 445 | b = B.instance.allocate(100); 446 | assert(b is null); 447 | } 448 | 449 | @system unittest 450 | { 451 | import stdx.allocator; 452 | import stdx.allocator.gc_allocator; 453 | import stdx.allocator.internal : Ternary; 454 | alias MyAllocator = AffixAllocator!(GCAllocator, uint); 455 | auto a = MyAllocator.instance.makeArray!(shared int)(100); 456 | static assert(is(typeof(&MyAllocator.instance.prefix(a)) == shared(uint)*)); 457 | auto b = MyAllocator.instance.makeArray!(shared const int)(100); 458 | static assert(is(typeof(&MyAllocator.instance.prefix(b)) == shared(uint)*)); 459 | auto c = MyAllocator.instance.makeArray!(immutable int)(100); 460 | static assert(is(typeof(&MyAllocator.instance.prefix(c)) == shared(uint)*)); 461 | auto d = MyAllocator.instance.makeArray!(int)(100); 462 | static assert(is(typeof(&MyAllocator.instance.prefix(d)) == uint*)); 463 | auto e = MyAllocator.instance.makeArray!(const int)(100); 464 | static assert(is(typeof(&MyAllocator.instance.prefix(e)) == const(uint)*)); 465 | 466 | void[] p; 467 | assert(MyAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); 468 | Ternary r = MyAllocator.instance.resolveInternalPointer(d.ptr, p); 469 | assert(p.ptr is d.ptr && p.length >= d.length); 470 | } 471 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/allocator_list.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.allocator_list; 3 | 4 | import stdx.allocator.building_blocks.null_allocator; 5 | import stdx.allocator.common; 6 | import stdx.allocator.gc_allocator; 7 | version(unittest) import std.stdio; 8 | 9 | // Turn this on for debugging 10 | // debug = allocator_list; 11 | 12 | /** 13 | 14 | Given an $(LINK2 https://en.wikipedia.org/wiki/Factory_(object-oriented_programming), 15 | object factory) of type `Factory` or a factory function 16 | `factoryFunction`, and optionally also `BookkeepingAllocator` as a supplemental 17 | allocator for bookkeeping, `AllocatorList` creates an allocator that lazily 18 | creates as many allocators are needed for satisfying client allocation requests. 19 | 20 | An embedded list builds a most-recently-used strategy: the most recent 21 | allocators used in calls to either `allocate`, `owns` (successful calls 22 | only), or `deallocate` are tried for new allocations in order of their most 23 | recent use. Thus, although core operations take in theory $(BIGOH k) time for 24 | $(D k) allocators in current use, in many workloads the factor is sublinear. 25 | Details of the actual strategy may change in future releases. 26 | 27 | `AllocatorList` is primarily intended for coarse-grained handling of 28 | allocators, i.e. the number of allocators in the list is expected to be 29 | relatively small compared to the number of allocations handled by each 30 | allocator. However, the per-allocator overhead is small so using 31 | `AllocatorList` with a large number of allocators should be satisfactory as long 32 | as the most-recently-used strategy is fast enough for the application. 33 | 34 | `AllocatorList` makes an effort to return allocated memory back when no 35 | longer used. It does so by destroying empty allocators. However, in order to 36 | avoid thrashing (excessive creation/destruction of allocators under certain use 37 | patterns), it keeps unused allocators for a while. 38 | 39 | Params: 40 | factoryFunction = A function or template function (including function literals). 41 | New allocators are created by calling `factoryFunction(n)` with strictly 42 | positive numbers `n`. Delegates that capture their enviroment are not created 43 | amid concerns regarding garbage creation for the environment. When the factory 44 | needs state, a `Factory` object should be used. 45 | 46 | BookkeepingAllocator = Allocator used for storing bookkeeping data. The size of 47 | bookkeeping data is proportional to the number of allocators. If $(D 48 | BookkeepingAllocator) is $(D NullAllocator), then $(D AllocatorList) is 49 | "ouroboros-style", i.e. it keeps the bookkeeping data in memory obtained from 50 | the allocators themselves. Note that for ouroboros-style management, the size 51 | $(D n) passed to $(D make) will be occasionally different from the size 52 | requested by client code. 53 | 54 | Factory = Type of a factory object that returns new allocators on a need 55 | basis. For an object $(D sweatshop) of type $(D Factory), `sweatshop(n)` should 56 | return an allocator able to allocate at least `n` bytes (i.e. `Factory` must 57 | define `opCall(size_t)` to return an allocator object). Usually the capacity of 58 | allocators created should be much larger than $(D n) such that an allocator can 59 | be used for many subsequent allocations. $(D n) is passed only to ensure the 60 | minimum necessary for the next allocation. The factory object is allowed to hold 61 | state, which will be stored inside `AllocatorList` as a direct `public` member 62 | called `factory`. 63 | 64 | */ 65 | struct AllocatorList(Factory, BookkeepingAllocator = GCAllocator) 66 | { 67 | import mir.conv : emplace; 68 | import stdx.allocator.building_blocks.stats_collector 69 | : StatsCollector, Options; 70 | import stdx.allocator.internal : Ternary; 71 | 72 | private enum ouroboros = is(BookkeepingAllocator == NullAllocator); 73 | 74 | /** 75 | Alias for `typeof(Factory()(1))`, i.e. the type of the individual 76 | allocators. 77 | */ 78 | alias Allocator = typeof(Factory.init(size_t(1))); 79 | // Allocator used internally 80 | private alias SAllocator = StatsCollector!(Allocator, Options.bytesUsed); 81 | 82 | private static struct Node 83 | { 84 | // Allocator in this node 85 | SAllocator a; 86 | Node* next; 87 | 88 | @disable this(this); 89 | 90 | // Is this node unused? 91 | void setUnused() { next = &this; } 92 | bool unused() const { return next is &this; } 93 | 94 | // Just forward everything to the allocator 95 | alias a this; 96 | } 97 | 98 | /** 99 | If $(D BookkeepingAllocator) is not $(D NullAllocator), $(D bkalloc) is 100 | defined and accessible. 101 | */ 102 | 103 | // State is stored in an array, but it has a list threaded through it by 104 | // means of "nextIdx". 105 | 106 | // state 107 | static if (!ouroboros) 108 | { 109 | static if (stateSize!BookkeepingAllocator) BookkeepingAllocator bkalloc; 110 | else alias bkalloc = BookkeepingAllocator.instance; 111 | } 112 | static if (stateSize!Factory) 113 | { 114 | Factory factory; 115 | } 116 | private Node[] allocators; 117 | private Node* root; 118 | 119 | static if (stateSize!Factory) 120 | { 121 | private auto make(size_t n) { return factory(n); } 122 | } 123 | else 124 | { 125 | private auto make(size_t n) { Factory f; return f(n); } 126 | } 127 | 128 | /** 129 | Constructs an `AllocatorList` given a factory object. This constructor is 130 | defined only if `Factory` has state. 131 | */ 132 | static if (stateSize!Factory) 133 | this(ref Factory plant) 134 | { 135 | factory = plant; 136 | } 137 | /// Ditto 138 | static if (stateSize!Factory) 139 | this(Factory plant) 140 | { 141 | factory = plant; 142 | } 143 | 144 | static if (__traits(hasMember, Allocator, "deallocateAll") 145 | && __traits(hasMember, Allocator, "owns")) 146 | ~this() 147 | { 148 | deallocateAll; 149 | } 150 | 151 | /** 152 | The alignment offered. 153 | */ 154 | enum uint alignment = Allocator.alignment; 155 | 156 | /** 157 | Allocate a block of size $(D s). First tries to allocate from the existing 158 | list of already-created allocators. If neither can satisfy the request, 159 | creates a new allocator by calling $(D make(s)) and delegates the request 160 | to it. However, if the allocation fresh off a newly created allocator 161 | fails, subsequent calls to $(D allocate) will not cause more calls to $(D 162 | make). 163 | */ 164 | void[] allocate(size_t s) 165 | { 166 | for (auto p = &root, n = *p; n; p = &n.next, n = *p) 167 | { 168 | auto result = n.allocate(s); 169 | if (result.length != s) continue; 170 | // Bring to front if not already 171 | if (root != n) 172 | { 173 | *p = n.next; 174 | n.next = root; 175 | root = n; 176 | } 177 | return result; 178 | } 179 | // Can't allocate from the current pool. Check if we just added a new 180 | // allocator, in that case it won't do any good to add yet another. 181 | if (root && root.empty == Ternary.yes) 182 | { 183 | // no can do 184 | return null; 185 | } 186 | // Add a new allocator 187 | if (auto a = addAllocator(s)) 188 | { 189 | auto result = a.allocate(s); 190 | assert(owns(result) == Ternary.yes || !result.ptr); 191 | return result; 192 | } 193 | return null; 194 | } 195 | 196 | private void moveAllocators(void[] newPlace) 197 | { 198 | assert(newPlace.ptr.alignedAt(Node.alignof)); 199 | assert(newPlace.length % Node.sizeof == 0); 200 | auto newAllocators = cast(Node[]) newPlace; 201 | assert(allocators.length <= newAllocators.length); 202 | 203 | // Move allocators 204 | foreach (i, ref e; allocators) 205 | { 206 | if (e.unused) 207 | { 208 | newAllocators[i].setUnused; 209 | continue; 210 | } 211 | import core.stdc.string : memcpy; 212 | memcpy(&newAllocators[i].a, &e.a, e.a.sizeof); 213 | if (e.next) 214 | { 215 | newAllocators[i].next = newAllocators.ptr 216 | + (e.next - allocators.ptr); 217 | } 218 | else 219 | { 220 | newAllocators[i].next = null; 221 | } 222 | } 223 | 224 | // Mark the unused portion as unused 225 | foreach (i; allocators.length .. newAllocators.length) 226 | { 227 | newAllocators[i].setUnused; 228 | } 229 | auto toFree = allocators; 230 | 231 | // Change state 232 | root = newAllocators.ptr + (root - allocators.ptr); 233 | allocators = newAllocators; 234 | 235 | // Free the olden buffer 236 | static if (ouroboros) 237 | { 238 | static if (__traits(hasMember, Allocator, "deallocate") 239 | && __traits(hasMember, Allocator, "owns")) 240 | deallocate(toFree); 241 | } 242 | else 243 | { 244 | bkalloc.deallocate(toFree); 245 | } 246 | } 247 | 248 | static if (ouroboros) 249 | private Node* addAllocator(size_t atLeastBytes) 250 | { 251 | void[] t = allocators; 252 | static if (__traits(hasMember, Allocator, "expand") 253 | && __traits(hasMember, Allocator, "owns")) 254 | { 255 | immutable bool expanded = t && this.expand(t, Node.sizeof); 256 | } 257 | else 258 | { 259 | enum expanded = false; 260 | } 261 | if (expanded) 262 | { 263 | import core.stdc.string : memcpy; 264 | assert(t.length % Node.sizeof == 0); 265 | assert(t.ptr.alignedAt(Node.alignof)); 266 | allocators = cast(Node[]) t; 267 | allocators[$ - 1].setUnused; 268 | auto newAlloc = SAllocator(make(atLeastBytes)); 269 | memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 270 | emplace(&newAlloc); 271 | } 272 | else 273 | { 274 | immutable toAlloc = (allocators.length + 1) * Node.sizeof 275 | + atLeastBytes + 128; 276 | auto newAlloc = SAllocator(make(toAlloc)); 277 | auto newPlace = newAlloc.allocate( 278 | (allocators.length + 1) * Node.sizeof); 279 | if (!newPlace) return null; 280 | moveAllocators(newPlace); 281 | import core.stdc.string : memcpy; 282 | memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 283 | emplace(&newAlloc); 284 | assert(allocators[$ - 1].owns(allocators) == Ternary.yes); 285 | } 286 | // Insert as new root 287 | if (root != &allocators[$ - 1]) 288 | { 289 | allocators[$ - 1].next = root; 290 | root = &allocators[$ - 1]; 291 | } 292 | else 293 | { 294 | // This is the first one 295 | root.next = null; 296 | } 297 | assert(!root.unused); 298 | return root; 299 | } 300 | 301 | static if (!ouroboros) 302 | private Node* addAllocator(size_t atLeastBytes) 303 | { 304 | void[] t = allocators; 305 | static if (__traits(hasMember, BookkeepingAllocator, "expand")) 306 | immutable bool expanded = bkalloc.expand(t, Node.sizeof); 307 | else 308 | immutable bool expanded = false; 309 | if (expanded) 310 | { 311 | assert(t.length % Node.sizeof == 0); 312 | assert(t.ptr.alignedAt(Node.alignof)); 313 | allocators = cast(Node[]) t; 314 | allocators[$ - 1].setUnused; 315 | } 316 | else 317 | { 318 | // Could not expand, create a new block 319 | t = bkalloc.allocate((allocators.length + 1) * Node.sizeof); 320 | assert(t.length % Node.sizeof == 0); 321 | if (!t.ptr) return null; 322 | moveAllocators(t); 323 | } 324 | assert(allocators[$ - 1].unused); 325 | auto newAlloc = SAllocator(make(atLeastBytes)); 326 | import core.stdc.string : memcpy; 327 | memcpy(&allocators[$ - 1].a, &newAlloc, newAlloc.sizeof); 328 | emplace(&newAlloc); 329 | // Creation succeeded, insert as root 330 | if (allocators.length == 1) 331 | allocators[$ - 1].next = null; 332 | else 333 | allocators[$ - 1].next = root; 334 | assert(allocators[$ - 1].a.bytesUsed == 0); 335 | root = &allocators[$ - 1]; 336 | return root; 337 | } 338 | 339 | /** 340 | Defined only if `Allocator` defines `owns`. Tries each allocator in 341 | turn, in most-recently-used order. If the owner is found, it is moved to 342 | the front of the list as a side effect under the assumption it will be used 343 | soon. 344 | 345 | Returns: `Ternary.yes` if one allocator was found to return `Ternary.yes`, 346 | `Ternary.no` if all component allocators returned `Ternary.no`, and 347 | `Ternary.unknown` if no allocator returned `Ternary.yes` and at least one 348 | returned `Ternary.unknown`. 349 | */ 350 | static if (__traits(hasMember, Allocator, "owns")) 351 | Ternary owns(void[] b) 352 | { 353 | auto result = Ternary.no; 354 | for (auto p = &root, n = *p; n; p = &n.next, n = *p) 355 | { 356 | immutable t = n.owns(b); 357 | if (t != Ternary.yes) 358 | { 359 | if (t == Ternary.unknown) result = t; 360 | continue; 361 | } 362 | // Move the owner to front, speculating it'll be used 363 | if (n != root) 364 | { 365 | *p = n.next; 366 | n.next = root; 367 | root = n; 368 | } 369 | return Ternary.yes; 370 | } 371 | return result; 372 | } 373 | 374 | /** 375 | Defined only if $(D Allocator.expand) is defined. Finds the owner of $(D b) 376 | and calls $(D expand) for it. The owner is not brought to the head of the 377 | list. 378 | */ 379 | static if (__traits(hasMember, Allocator, "expand") 380 | && __traits(hasMember, Allocator, "owns")) 381 | bool expand(ref void[] b, size_t delta) 382 | { 383 | if (!b.ptr) return delta == 0; 384 | for (auto p = &root, n = *p; n; p = &n.next, n = *p) 385 | { 386 | if (n.owns(b) == Ternary.yes) return n.expand(b, delta); 387 | } 388 | return false; 389 | } 390 | 391 | /** 392 | Defined only if $(D Allocator.reallocate) is defined. Finds the owner of 393 | $(D b) and calls $(D reallocate) for it. If that fails, calls the global 394 | $(D reallocate), which allocates a new block and moves memory. 395 | */ 396 | static if (__traits(hasMember, Allocator, "reallocate")) 397 | bool reallocate(ref void[] b, size_t s) 398 | { 399 | // First attempt to reallocate within the existing node 400 | if (!b.ptr) 401 | { 402 | b = allocate(s); 403 | return b.length == s; 404 | } 405 | for (auto p = &root, n = *p; n; p = &n.next, n = *p) 406 | { 407 | if (n.owns(b) == Ternary.yes) return n.reallocate(b, s); 408 | } 409 | // Failed, but we may find new memory in a new node. 410 | return .reallocate(this, b, s); 411 | } 412 | 413 | /** 414 | Defined if $(D Allocator.deallocate) and $(D Allocator.owns) are defined. 415 | */ 416 | static if (__traits(hasMember, Allocator, "deallocate") 417 | && __traits(hasMember, Allocator, "owns")) 418 | bool deallocate(void[] b) 419 | { 420 | if (!b.ptr) return true; 421 | assert(allocators.length); 422 | assert(owns(b) == Ternary.yes); 423 | bool result; 424 | for (auto p = &root, n = *p; ; p = &n.next, n = *p) 425 | { 426 | assert(n); 427 | if (n.owns(b) != Ternary.yes) continue; 428 | result = n.deallocate(b); 429 | // Bring to front 430 | if (n != root) 431 | { 432 | *p = n.next; 433 | n.next = root; 434 | root = n; 435 | } 436 | if (n.empty != Ternary.yes) return result; 437 | break; 438 | } 439 | // Hmmm... should we return this allocator back to the wild? Let's 440 | // decide if there are TWO empty allocators we can release ONE. This 441 | // is to avoid thrashing. 442 | // Note that loop starts from the second element. 443 | for (auto p = &root.next, n = *p; n; p = &n.next, n = *p) 444 | { 445 | if (n.unused || n.empty != Ternary.yes) continue; 446 | // Used and empty baby, nuke it! 447 | n.a.destroy; 448 | *p = n.next; 449 | n.setUnused; 450 | break; 451 | } 452 | return result; 453 | } 454 | 455 | /** 456 | Defined only if $(D Allocator.owns) and $(D Allocator.deallocateAll) are 457 | defined. 458 | */ 459 | static if (ouroboros && __traits(hasMember, Allocator, "deallocateAll") 460 | && __traits(hasMember, Allocator, "owns")) 461 | bool deallocateAll() 462 | { 463 | Node* special; 464 | foreach (ref n; allocators) 465 | { 466 | if (n.unused) continue; 467 | if (n.owns(allocators) == Ternary.yes) 468 | { 469 | special = &n; 470 | continue; 471 | } 472 | n.a.deallocateAll; 473 | n.a.destroy; 474 | } 475 | assert(special || !allocators.ptr); 476 | if (special) 477 | { 478 | special.deallocate(allocators); 479 | } 480 | allocators = null; 481 | root = null; 482 | return true; 483 | } 484 | 485 | static if (!ouroboros && __traits(hasMember, Allocator, "deallocateAll") 486 | && __traits(hasMember, Allocator, "owns")) 487 | bool deallocateAll() 488 | { 489 | foreach (ref n; allocators) 490 | { 491 | if (n.unused) continue; 492 | n.a.deallocateAll; 493 | n.a.destroy; 494 | } 495 | bkalloc.deallocate(allocators); 496 | allocators = null; 497 | root = null; 498 | return true; 499 | } 500 | 501 | /** 502 | Returns `Ternary.yes` if no allocators are currently active, 503 | `Ternary.no` otherwise. This methods never returns `Ternary.unknown`. 504 | */ 505 | Ternary empty() const 506 | { 507 | return Ternary(!allocators.length); 508 | } 509 | } 510 | 511 | /// Ditto 512 | template AllocatorList(alias factoryFunction, 513 | BookkeepingAllocator = GCAllocator) 514 | { 515 | alias A = typeof(factoryFunction(size_t(1))); 516 | static assert( 517 | // is a template function (including literals) 518 | is(typeof({A function(size_t) @system x = factoryFunction!size_t;})) 519 | || 520 | // or a function (including literals) 521 | is(typeof({A function(size_t) @system x = factoryFunction;})) 522 | , 523 | "Only function names and function literals that take size_t" 524 | ~ " and return an allocator are accepted, not " 525 | ~ typeof(factoryFunction).stringof 526 | ); 527 | static struct Factory 528 | { 529 | A opCall(size_t n) { return factoryFunction(n); } 530 | } 531 | alias AllocatorList = .AllocatorList!(Factory, BookkeepingAllocator); 532 | } 533 | 534 | /// 535 | version(Posix) @system unittest 536 | { 537 | import mir.utility : max; 538 | import stdx.allocator.building_blocks.free_list : ContiguousFreeList; 539 | import stdx.allocator.building_blocks.null_allocator : NullAllocator; 540 | import stdx.allocator.building_blocks.region : Region; 541 | import stdx.allocator.building_blocks.segregator : Segregator; 542 | import stdx.allocator.gc_allocator : GCAllocator; 543 | import stdx.allocator.mmap_allocator : MmapAllocator; 544 | 545 | // Ouroboros allocator list based upon 4MB regions, fetched directly from 546 | // mmap. All memory is released upon destruction. 547 | alias A1 = AllocatorList!((n) => Region!MmapAllocator(max(n, 1024u * 4096u)), 548 | NullAllocator); 549 | 550 | // Allocator list based upon 4MB regions, fetched from the garbage 551 | // collector. All memory is released upon destruction. 552 | alias A2 = AllocatorList!((n) => Region!GCAllocator(max(n, 1024u * 4096u))); 553 | 554 | // Ouroboros allocator list based upon 4MB regions, fetched from the garbage 555 | // collector. Memory is left to the collector. 556 | alias A3 = AllocatorList!( 557 | (n) => Region!NullAllocator(new ubyte[max(n, 1024u * 4096u)]), 558 | NullAllocator); 559 | 560 | // Allocator list that creates one freelist for all objects 561 | alias A4 = 562 | Segregator!( 563 | 64, AllocatorList!( 564 | (n) => ContiguousFreeList!(NullAllocator, 0, 64)( 565 | cast(ubyte[])(GCAllocator.instance.allocate(4096)))), 566 | GCAllocator); 567 | 568 | A4 a; 569 | auto small = a.allocate(64); 570 | assert(small); 571 | a.deallocate(small); 572 | auto b1 = a.allocate(1024 * 8192); 573 | assert(b1 !is null); // still works due to overdimensioning 574 | b1 = a.allocate(1024 * 10); 575 | assert(b1.length == 1024 * 10); 576 | } 577 | 578 | @system unittest 579 | { 580 | // Create an allocator based upon 4MB regions, fetched from the GC heap. 581 | import mir.utility : max; 582 | import stdx.allocator.building_blocks.region : Region; 583 | AllocatorList!((n) => Region!GCAllocator(new ubyte[max(n, 1024u * 4096u)]), 584 | NullAllocator) a; 585 | const b1 = a.allocate(1024 * 8192); 586 | assert(b1 !is null); // still works due to overdimensioning 587 | const b2 = a.allocate(1024 * 10); 588 | assert(b2.length == 1024 * 10); 589 | a.deallocateAll(); 590 | } 591 | 592 | @system unittest 593 | { 594 | // Create an allocator based upon 4MB regions, fetched from the GC heap. 595 | import mir.utility : max; 596 | import stdx.allocator.building_blocks.region : Region; 597 | AllocatorList!((n) => Region!()(new ubyte[max(n, 1024u * 4096u)])) a; 598 | auto b1 = a.allocate(1024 * 8192); 599 | assert(b1 !is null); // still works due to overdimensioning 600 | b1 = a.allocate(1024 * 10); 601 | assert(b1.length == 1024 * 10); 602 | a.deallocateAll(); 603 | } 604 | 605 | @system unittest 606 | { 607 | import mir.utility : max; 608 | import stdx.allocator.building_blocks.region : Region; 609 | import stdx.allocator.internal : Ternary; 610 | AllocatorList!((n) => Region!()(new ubyte[max(n, 1024u * 4096u)])) a; 611 | auto b1 = a.allocate(1024 * 8192); 612 | assert(b1 !is null); 613 | b1 = a.allocate(1024 * 10); 614 | assert(b1.length == 1024 * 10); 615 | a.allocate(1024 * 4095); 616 | a.deallocateAll(); 617 | assert(a.empty == Ternary.yes); 618 | } 619 | 620 | @system unittest 621 | { 622 | import stdx.allocator.building_blocks.region : Region; 623 | enum bs = GCAllocator.alignment; 624 | AllocatorList!((n) => Region!GCAllocator(256 * bs)) a; 625 | auto b1 = a.allocate(192 * bs); 626 | assert(b1.length == 192 * bs); 627 | assert(a.allocators.length == 1); 628 | auto b2 = a.allocate(64 * bs); 629 | assert(b2.length == 64 * bs); 630 | assert(a.allocators.length == 1); 631 | auto b3 = a.allocate(192 * bs); 632 | assert(b3.length == 192 * bs); 633 | assert(a.allocators.length == 2); 634 | a.deallocate(b1); 635 | b1 = a.allocate(64 * bs); 636 | assert(b1.length == 64 * bs); 637 | assert(a.allocators.length == 2); 638 | a.deallocateAll(); 639 | } 640 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/bucketizer.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.bucketizer; 3 | 4 | /** 5 | 6 | A $(D Bucketizer) uses distinct allocators for handling allocations of sizes in 7 | the intervals $(D [min, min + step - 1]), $(D [min + step, min + 2 * step - 1]), 8 | $(D [min + 2 * step, min + 3 * step - 1]), $(D ...), $(D [max - step + 1, max]). 9 | 10 | $(D Bucketizer) holds a fixed-size array of allocators and dispatches calls to 11 | them appropriately. The size of the array is $(D (max + 1 - min) / step), which 12 | must be an exact division. 13 | 14 | Allocations for sizes smaller than $(D min) or larger than $(D max) are illegal 15 | for $(D Bucketizer). To handle them separately, $(D Segregator) may be of use. 16 | 17 | */ 18 | struct Bucketizer(Allocator, size_t min, size_t max, size_t step) 19 | { 20 | import common = stdx.allocator.common : roundUpToMultipleOf; 21 | import stdx.allocator.internal : Ternary; 22 | 23 | static assert((max - (min - 1)) % step == 0, 24 | "Invalid limits when instantiating " ~ Bucketizer.stringof); 25 | 26 | // state 27 | /** 28 | The array of allocators is publicly available for e.g. initialization and 29 | inspection. 30 | */ 31 | Allocator[(max + 1 - min) / step] buckets; 32 | 33 | private Allocator* allocatorFor(size_t n) 34 | { 35 | const i = (n - min) / step; 36 | return i < buckets.length ? buckets.ptr + i : null; 37 | } 38 | 39 | /** 40 | The alignment offered is the same as $(D Allocator.alignment). 41 | */ 42 | enum uint alignment = Allocator.alignment; 43 | 44 | /** 45 | Rounds up to the maximum size of the bucket in which $(D bytes) falls. 46 | */ 47 | size_t goodAllocSize(size_t bytes) const 48 | { 49 | // round up bytes such that bytes - min + 1 is a multiple of step 50 | assert(bytes >= min); 51 | const min_1 = min - 1; 52 | return min_1 + roundUpToMultipleOf(bytes - min_1, step); 53 | } 54 | 55 | /** 56 | Directs the call to either one of the $(D buckets) allocators. 57 | */ 58 | void[] allocate(size_t bytes) 59 | { 60 | if (!bytes) return null; 61 | if (auto a = allocatorFor(bytes)) 62 | { 63 | const actual = goodAllocSize(bytes); 64 | auto result = a.allocate(actual); 65 | return result.ptr ? result.ptr[0 .. bytes] : null; 66 | } 67 | return null; 68 | } 69 | 70 | /** 71 | Directs the call to either one of the $(D buckets) allocators. Defined only 72 | if `Allocator` defines `alignedAllocate`. 73 | */ 74 | static if (__traits(hasMember, Allocator, "alignedAllocate")) 75 | void[] alignedAllocate(size_t bytes, uint a) 76 | { 77 | if (!bytes) return null; 78 | if (auto a = allocatorFor(b.length)) 79 | { 80 | const actual = goodAllocSize(bytes); 81 | auto result = a.alignedAllocate(actual); 82 | return result.ptr ? result.ptr[0 .. bytes] : null; 83 | } 84 | return null; 85 | } 86 | 87 | /** 88 | This method allows expansion within the respective bucket range. It succeeds 89 | if both $(D b.length) and $(D b.length + delta) fall in a range of the form 90 | $(D [min + k * step, min + (k + 1) * step - 1]). 91 | */ 92 | bool expand(ref void[] b, size_t delta) 93 | { 94 | if (!b.ptr) return delta == 0; 95 | assert(b.length >= min && b.length <= max); 96 | const available = goodAllocSize(b.length); 97 | const desired = b.length + delta; 98 | if (available < desired) return false; 99 | b = b.ptr[0 .. desired]; 100 | return true; 101 | } 102 | 103 | /** 104 | This method allows reallocation within the respective bucket range. If both 105 | $(D b.length) and $(D size) fall in a range of the form $(D [min + k * 106 | step, min + (k + 1) * step - 1]), then reallocation is in place. Otherwise, 107 | reallocation with moving is attempted. 108 | */ 109 | bool reallocate(ref void[] b, size_t size) 110 | { 111 | if (size == 0) 112 | { 113 | deallocate(b); 114 | b = null; 115 | return true; 116 | } 117 | if (size >= b.length) 118 | { 119 | return expand(b, size - b.length); 120 | } 121 | assert(b.length >= min && b.length <= max); 122 | if (goodAllocSize(size) == goodAllocSize(b.length)) 123 | { 124 | b = b.ptr[0 .. size]; 125 | return true; 126 | } 127 | // Move cross buckets 128 | return common.reallocate(this, b, size); 129 | } 130 | 131 | /** 132 | Similar to `reallocate`, with alignment. Defined only if `Allocator` 133 | defines `alignedReallocate`. 134 | */ 135 | static if (__traits(hasMember, Allocator, "alignedReallocate")) 136 | bool alignedReallocate(ref void[] b, size_t size, uint a) 137 | { 138 | if (size == 0) 139 | { 140 | deallocate(b); 141 | b = null; 142 | return true; 143 | } 144 | if (size >= b.length) 145 | { 146 | return expand(b, size - b.length); 147 | } 148 | assert(b.length >= min && b.length <= max); 149 | if (goodAllocSize(size) == goodAllocSize(b.length)) 150 | { 151 | b = b.ptr[0 .. size]; 152 | return true; 153 | } 154 | // Move cross buckets 155 | return .alignedReallocate(this, b, size, a); 156 | } 157 | 158 | /** 159 | Defined only if `Allocator` defines `owns`. Finds the owner of `b` and forwards the call to it. 160 | */ 161 | static if (__traits(hasMember, Allocator, "owns")) 162 | Ternary owns(void[] b) 163 | { 164 | if (!b.ptr) return Ternary.no; 165 | if (auto a = allocatorFor(b.length)) 166 | { 167 | const actual = goodAllocSize(b.length); 168 | return a.owns(b.ptr[0 .. actual]); 169 | } 170 | return Ternary.no; 171 | } 172 | 173 | /** 174 | This method is only defined if $(D Allocator) defines $(D deallocate). 175 | */ 176 | static if (__traits(hasMember, Allocator, "deallocate")) 177 | bool deallocate(void[] b) 178 | { 179 | if (!b.ptr) return true; 180 | if (auto a = allocatorFor(b.length)) 181 | { 182 | a.deallocate(b.ptr[0 .. goodAllocSize(b.length)]); 183 | } 184 | return true; 185 | } 186 | 187 | /** 188 | This method is only defined if all allocators involved define $(D 189 | deallocateAll), and calls it for each bucket in turn. Returns `true` if all 190 | allocators could deallocate all. 191 | */ 192 | static if (__traits(hasMember, Allocator, "deallocateAll")) 193 | bool deallocateAll() 194 | { 195 | bool result = true; 196 | foreach (ref a; buckets) 197 | { 198 | if (!a.deallocateAll()) result = false; 199 | } 200 | return result; 201 | } 202 | 203 | /** 204 | This method is only defined if all allocators involved define $(D 205 | resolveInternalPointer), and tries it for each bucket in turn. 206 | */ 207 | static if (__traits(hasMember, Allocator, "resolveInternalPointer")) 208 | Ternary resolveInternalPointer(const void* p, ref void[] result) 209 | { 210 | foreach (ref a; buckets) 211 | { 212 | Ternary r = a.resolveInternalPointer(p, result); 213 | if (r == Ternary.yes) return r; 214 | } 215 | return Ternary.no; 216 | } 217 | } 218 | 219 | /// 220 | @system unittest 221 | { 222 | import mir.utility : max; 223 | import stdx.allocator.building_blocks.allocator_list : AllocatorList; 224 | import stdx.allocator.building_blocks.free_list : FreeList; 225 | import stdx.allocator.building_blocks.region : Region; 226 | import stdx.allocator.common : unbounded; 227 | import stdx.allocator.mallocator : Mallocator; 228 | import stdx.allocator.internal : Ternary; 229 | Bucketizer!( 230 | FreeList!( 231 | AllocatorList!( 232 | (size_t n) => Region!Mallocator(max(n, 1024u * 1024))), 233 | 0, unbounded), 234 | 65, 512, 64) a; 235 | auto b = a.allocate(400); 236 | assert(b.length == 400); 237 | assert(a.owns(b) == Ternary.yes); 238 | void[] p; 239 | a.deallocate(b); 240 | } 241 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/fallback_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.fallback_allocator; 3 | 4 | import stdx.allocator.common; 5 | 6 | /** 7 | $(D FallbackAllocator) is the allocator equivalent of an "or" operator in 8 | algebra. An allocation request is first attempted with the $(D Primary) 9 | allocator. If that returns $(D null), the request is forwarded to the $(D 10 | Fallback) allocator. All other requests are dispatched appropriately to one of 11 | the two allocators. 12 | 13 | In order to work, $(D FallbackAllocator) requires that $(D Primary) defines the 14 | $(D owns) method. This is needed in order to decide which allocator was 15 | responsible for a given allocation. 16 | 17 | $(D FallbackAllocator) is useful for fast, special-purpose allocators backed up 18 | by general-purpose allocators. The example below features a stack region backed 19 | up by the $(D GCAllocator). 20 | */ 21 | struct FallbackAllocator(Primary, Fallback) 22 | { 23 | import mir.utility : min; 24 | import stdx.allocator.internal : Ternary; 25 | 26 | @system unittest 27 | { 28 | testAllocator!(() => FallbackAllocator()); 29 | } 30 | 31 | /// The primary allocator. 32 | static if (stateSize!Primary) Primary primary; 33 | else alias primary = Primary.instance; 34 | 35 | /// The fallback allocator. 36 | static if (stateSize!Fallback) Fallback fallback; 37 | else alias fallback = Fallback.instance; 38 | 39 | /** 40 | If both $(D Primary) and $(D Fallback) are stateless, $(D FallbackAllocator) 41 | defines a static instance called `instance`. 42 | */ 43 | static if (!stateSize!Primary && !stateSize!Fallback) 44 | { 45 | enum FallbackAllocator instance = FallbackAllocator(); 46 | } 47 | 48 | /** 49 | The alignment offered is the minimum of the two allocators' alignment. 50 | */ 51 | enum uint alignment = min(Primary.alignment, Fallback.alignment); 52 | 53 | /** 54 | Allocates memory trying the primary allocator first. If it returns $(D 55 | null), the fallback allocator is tried. 56 | */ 57 | void[] allocate(size_t s) 58 | { 59 | auto result = primary.allocate(s); 60 | return result.length == s ? result : fallback.allocate(s); 61 | } 62 | 63 | /** 64 | $(D FallbackAllocator) offers $(D alignedAllocate) iff at least one of the 65 | allocators also offers it. It attempts to allocate using either or both. 66 | */ 67 | static if (__traits(hasMember, Primary, "alignedAllocate") 68 | || __traits(hasMember, Fallback, "alignedAllocate")) 69 | void[] alignedAllocate(size_t s, uint a) 70 | { 71 | static if (__traits(hasMember, Primary, "alignedAllocate")) 72 | {{ 73 | auto result = primary.alignedAllocate(s, a); 74 | if (result.length == s) return result; 75 | }} 76 | static if (__traits(hasMember, Fallback, "alignedAllocate")) 77 | {{ 78 | auto result = fallback.alignedAllocate(s, a); 79 | if (result.length == s) return result; 80 | }} 81 | return null; 82 | } 83 | 84 | /** 85 | 86 | $(D expand) is defined if and only if at least one of the allocators 87 | defines $(D expand). It works as follows. If $(D primary.owns(b)), then the 88 | request is forwarded to $(D primary.expand) if it is defined, or fails 89 | (returning $(D false)) otherwise. If $(D primary) does not own $(D b), then 90 | the request is forwarded to $(D fallback.expand) if it is defined, or fails 91 | (returning $(D false)) otherwise. 92 | 93 | */ 94 | static if (__traits(hasMember, Primary, "owns") 95 | && (__traits(hasMember, Primary, "expand") || __traits(hasMember, Fallback, "expand"))) 96 | bool expand(ref void[] b, size_t delta) 97 | { 98 | if (!delta) return true; 99 | if (!b.ptr) return false; 100 | if (primary.owns(b) == Ternary.yes) 101 | { 102 | static if (__traits(hasMember, Primary, "expand")) 103 | return primary.expand(b, delta); 104 | else 105 | return false; 106 | } 107 | static if (__traits(hasMember, Fallback, "expand")) 108 | return fallback.expand(b, delta); 109 | else 110 | return false; 111 | } 112 | 113 | /** 114 | 115 | $(D reallocate) works as follows. If $(D primary.owns(b)), then $(D 116 | primary.reallocate(b, newSize)) is attempted. If it fails, an attempt is 117 | made to move the allocation from $(D primary) to $(D fallback). 118 | 119 | If $(D primary) does not own $(D b), then $(D fallback.reallocate(b, 120 | newSize)) is attempted. If that fails, an attempt is made to move the 121 | allocation from $(D fallback) to $(D primary). 122 | 123 | */ 124 | static if (__traits(hasMember, Primary, "owns")) 125 | bool reallocate(ref void[] b, size_t newSize) 126 | { 127 | bool crossAllocatorMove(From, To)(auto ref From from, auto ref To to) 128 | { 129 | auto b1 = to.allocate(newSize); 130 | if (b1.length != newSize) return false; 131 | if (b.length < newSize) b1[0 .. b.length] = b[]; 132 | else b1[] = b[0 .. newSize]; 133 | static if (__traits(hasMember, From, "deallocate")) 134 | from.deallocate(b); 135 | b = b1; 136 | return true; 137 | } 138 | 139 | if (b is null || primary.owns(b) == Ternary.yes) 140 | { 141 | return primary.reallocate(b, newSize) 142 | // Move from primary to fallback 143 | || crossAllocatorMove(primary, fallback); 144 | } 145 | return fallback.reallocate(b, newSize) 146 | // Interesting. Move from fallback to primary. 147 | || crossAllocatorMove(fallback, primary); 148 | } 149 | 150 | static if (__traits(hasMember, Primary, "owns") 151 | && (__traits(hasMember, Primary, "alignedAllocate") 152 | || __traits(hasMember, Fallback, "alignedAllocate"))) 153 | bool alignedReallocate(ref void[] b, size_t newSize, uint a) 154 | { 155 | bool crossAllocatorMove(From, To)(auto ref From from, auto ref To to) 156 | { 157 | static if (!__traits(hasMember, To, "alignedAllocate")) 158 | { 159 | return false; 160 | } 161 | else 162 | { 163 | auto b1 = to.alignedAllocate(newSize, a); 164 | if (b1.length != newSize) return false; 165 | if (b.length < newSize) b1[0 .. b.length] = b[]; 166 | else b1[] = b[0 .. newSize]; 167 | static if (__traits(hasMember, From, "deallocate")) 168 | from.deallocate(b); 169 | b = b1; 170 | return true; 171 | } 172 | } 173 | 174 | static if (__traits(hasMember, Primary, "alignedAllocate")) 175 | { 176 | if (b is null || primary.owns(b) == Ternary.yes) 177 | { 178 | return primary.alignedReallocate(b, newSize, a) 179 | || crossAllocatorMove(primary, fallback); 180 | } 181 | } 182 | static if (__traits(hasMember, Fallback, "alignedAllocate")) 183 | { 184 | return fallback.alignedReallocate(b, newSize, a) 185 | || crossAllocatorMove(fallback, primary); 186 | } 187 | else 188 | { 189 | return false; 190 | } 191 | } 192 | 193 | /** 194 | $(D owns) is defined if and only if both allocators define $(D owns). 195 | Returns $(D primary.owns(b) | fallback.owns(b)). 196 | */ 197 | static if (__traits(hasMember, Primary, "owns") && __traits(hasMember, Fallback, "owns")) 198 | Ternary owns(void[] b) 199 | { 200 | return primary.owns(b) | fallback.owns(b); 201 | } 202 | 203 | /** 204 | $(D resolveInternalPointer) is defined if and only if both allocators 205 | define it. 206 | */ 207 | static if (__traits(hasMember, Primary, "resolveInternalPointer") 208 | && __traits(hasMember, Fallback, "resolveInternalPointer")) 209 | Ternary resolveInternalPointer(const void* p, ref void[] result) 210 | { 211 | Ternary r = primary.resolveInternalPointer(p, result); 212 | return r == Ternary.no ? fallback.resolveInternalPointer(p, result) : r; 213 | } 214 | 215 | /** 216 | $(D deallocate) is defined if and only if at least one of the allocators 217 | define $(D deallocate). It works as follows. If $(D primary.owns(b)), 218 | then the request is forwarded to $(D primary.deallocate) if it is defined, 219 | or is a no-op otherwise. If $(D primary) does not own $(D b), then the 220 | request is forwarded to $(D fallback.deallocate) if it is defined, or is a 221 | no-op otherwise. 222 | */ 223 | static if (__traits(hasMember, Primary, "owns") && 224 | (__traits(hasMember, Primary, "deallocate") 225 | || __traits(hasMember, Fallback, "deallocate"))) 226 | bool deallocate(void[] b) 227 | { 228 | if (primary.owns(b) == Ternary.yes) 229 | { 230 | static if (__traits(hasMember, Primary, "deallocate")) 231 | return primary.deallocate(b); 232 | else 233 | return false; 234 | } 235 | else 236 | { 237 | static if (__traits(hasMember, Fallback, "deallocate")) 238 | return fallback.deallocate(b); 239 | else 240 | return false; 241 | } 242 | } 243 | 244 | /** 245 | $(D empty) is defined if both allocators also define it. 246 | 247 | Returns: $(D primary.empty & fallback.empty) 248 | */ 249 | static if (__traits(hasMember, Primary, "empty") && __traits(hasMember, Fallback, "empty")) 250 | Ternary empty() 251 | { 252 | return primary.empty & fallback.empty; 253 | } 254 | } 255 | 256 | @system unittest 257 | { 258 | import std.conv : text; 259 | import stdx.allocator.building_blocks.region : InSituRegion; 260 | import stdx.allocator.gc_allocator : GCAllocator; 261 | import stdx.allocator.internal : Ternary; 262 | FallbackAllocator!(InSituRegion!16_384, GCAllocator) a; 263 | // This allocation uses the stack 264 | auto b1 = a.allocate(1024); 265 | assert(b1.length == 1024, text(b1.length)); 266 | assert(a.primary.owns(b1) == Ternary.yes); 267 | // This large allocation will go to the Mallocator 268 | auto b2 = a.allocate(1024 * 1024); 269 | assert(a.primary.owns(b2) == Ternary.no); 270 | a.deallocate(b1); 271 | a.deallocate(b2); 272 | } 273 | 274 | /** 275 | Convenience function that uses type deduction to return the appropriate 276 | $(D FallbackAllocator) instance. To initialize with allocators that don't have 277 | state, use their $(D it) static member. 278 | */ 279 | FallbackAllocator!(Primary, Fallback) 280 | fallbackAllocator(Primary, Fallback)(auto ref Primary p, auto ref Fallback f) 281 | { 282 | import mir.functional: forward; 283 | 284 | alias R = FallbackAllocator!(Primary, Fallback); 285 | 286 | static if (stateSize!Primary) 287 | static if (stateSize!Fallback) 288 | return R(forward!p, forward!f); 289 | else 290 | return R(forward!p); 291 | else 292 | static if (stateSize!Fallback) 293 | return R(forward!f); 294 | else 295 | return R(); 296 | } 297 | 298 | /// 299 | @system unittest 300 | { 301 | import stdx.allocator.building_blocks.region : Region; 302 | import stdx.allocator.gc_allocator : GCAllocator; 303 | import stdx.allocator.internal : Ternary; 304 | auto a = fallbackAllocator(Region!GCAllocator(1024), GCAllocator.instance); 305 | auto b1 = a.allocate(1020); 306 | assert(b1.length == 1020); 307 | assert(a.primary.owns(b1) == Ternary.yes); 308 | auto b2 = a.allocate(10); 309 | assert(b2.length == 10); 310 | assert(a.primary.owns(b2) == Ternary.no); 311 | } 312 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/free_tree.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.free_tree; 3 | 4 | import stdx.allocator.common; 5 | 6 | //debug = std_experimental_allocator_free_tree; 7 | 8 | /** 9 | 10 | The Free Tree allocator, stackable on top of any other allocator, bears 11 | similarity with the free list allocator. Instead of a singly-linked list of 12 | previously freed blocks, it maintains a binary search tree. This allows the 13 | Free Tree allocator to manage blocks of arbitrary lengths and search them 14 | efficiently. 15 | 16 | Common uses of $(D FreeTree) include: 17 | 18 | $(UL 19 | $(LI Adding $(D deallocate) capability to an allocator that lacks it (such as simple regions).) 20 | $(LI Getting the benefits of multiple adaptable freelists that do not need to 21 | be tuned for one specific size but insted automatically adapts itself to 22 | frequently used sizes.) 23 | ) 24 | 25 | The free tree has special handling of duplicates (a singly-linked list per 26 | node) in anticipation of large number of duplicates. Allocation time from the 27 | free tree is expected to be $(BIGOH log n) where $(D n) is the number of 28 | distinct sizes (not total nodes) kept in the free tree. 29 | 30 | Allocation requests first search the tree for a buffer of suitable size 31 | deallocated in the past. If a match is found, the node is removed from the tree 32 | and the memory is returned. Otherwise, the allocation is directed to $(D 33 | ParentAllocator). If at this point $(D ParentAllocator) also fails to allocate, 34 | $(D FreeTree) frees everything and then tries the parent allocator again. 35 | 36 | Upon deallocation, the deallocated block is inserted in the internally 37 | maintained free tree (not returned to the parent). The free tree is not kept 38 | balanced. Instead, it has a last-in-first-out flavor because newly inserted 39 | blocks are rotated to the root of the tree. That way allocations are cache 40 | friendly and also frequently used sizes are more likely to be found quickly, 41 | whereas seldom used sizes migrate to the leaves of the tree. 42 | 43 | $(D FreeTree) rounds up small allocations to at least $(D 4 * size_t.sizeof), 44 | which on 64-bit system is one cache line size. If very small objects need to 45 | be efficiently allocated, the $(D FreeTree) should be fronted with an 46 | appropriate small object allocator. 47 | 48 | The following methods are defined if $(D ParentAllocator) defines them, and forward to it: $(D allocateAll), $(D expand), $(D owns), $(D reallocate). 49 | */ 50 | struct FreeTree(ParentAllocator) 51 | { 52 | static assert(ParentAllocator.alignment % size_t.alignof == 0, 53 | "FreeTree must be on top of a word-aligned allocator"); 54 | 55 | import mir.utility : min, max; 56 | 57 | // State 58 | static if (stateSize!ParentAllocator) private ParentAllocator parent; 59 | else private alias parent = ParentAllocator.instance; 60 | private Node* root; // that's the entire added state 61 | 62 | private struct Node 63 | { 64 | Node*[2] kid; 65 | Node* sibling; 66 | size_t size; 67 | ref Node* left() { return kid[0]; } 68 | ref Node* right() { return kid[1]; } 69 | } 70 | 71 | // Removes "which" from the tree, returns the memory it occupied 72 | private void[] remove(ref Node* which) 73 | { 74 | assert(which); 75 | assert(!which.sibling); 76 | auto result = (cast(ubyte*) which)[0 .. which.size]; 77 | if (!which.right) which = which.left; 78 | else if (!which.left) which = which.right; 79 | else 80 | { 81 | // result has two kids 82 | static bool toggler; 83 | // Crude randomization: alternate left/right choices 84 | toggler = !toggler; 85 | auto newRoot = which.kid[toggler], orphan = which.kid[!toggler]; 86 | which = newRoot; 87 | for (Node* n = void; (n = newRoot.kid[!toggler]) !is null; ) 88 | { 89 | newRoot = n; 90 | } 91 | newRoot.kid[!toggler] = orphan; 92 | } 93 | return result; 94 | } 95 | 96 | private void[] findAndRemove(ref Node* n, size_t s) 97 | { 98 | if (!n) return null; 99 | if (s == n.size) 100 | { 101 | if (auto sis = n.sibling) 102 | { 103 | // Nice, give away one from the freelist 104 | auto result = (cast(ubyte*) sis)[0 .. sis.size]; 105 | n.sibling = sis.sibling; 106 | return result; 107 | } 108 | return remove(n); 109 | } 110 | return findAndRemove(n.kid[s > n.size], s); 111 | } 112 | 113 | debug(std_experimental_allocator_free_tree) 114 | private void dump()() 115 | { 116 | import std.stdio : writef, writefln, writeln; 117 | writeln(typeof(this).stringof, "@", &this, " {"); 118 | scope(exit) writeln("}"); 119 | 120 | if (!root) return; 121 | 122 | static void recurse(Node* n, uint indent = 4) 123 | { 124 | if (!n) 125 | { 126 | writefln("%*s(null)", indent, ""); 127 | return; 128 | } 129 | for (auto sis = n; sis; sis = sis.sibling) 130 | { 131 | writef("%*s%x (%s bytes) ", indent, "", 132 | cast(void*) n, n.size); 133 | } 134 | writeln; 135 | if (!n.left && !n.right) return; 136 | recurse(n.left, indent + 4); 137 | recurse(n.right, indent + 4); 138 | } 139 | recurse(root); 140 | } 141 | 142 | private static void rotate(ref Node* parent, bool toRight) 143 | { 144 | assert(parent); 145 | auto opposing = parent.kid[!toRight]; 146 | if (!opposing) return; 147 | parent.kid[!toRight] = opposing.kid[toRight]; 148 | opposing.kid[toRight] = parent; 149 | parent = opposing; 150 | } 151 | 152 | // Inserts which into the tree, making it the new root 153 | private void insertAsRoot(Node* which) 154 | { 155 | assert(which); 156 | debug(std_experimental_allocator_free_tree) 157 | { 158 | assertValid; 159 | scope(exit) assertValid; 160 | } 161 | 162 | static void recurse(ref Node* where, Node* which) 163 | { 164 | if (!where) 165 | { 166 | where = which; 167 | which.left = null; 168 | which.right = null; 169 | which.sibling = null; 170 | return; 171 | } 172 | if (which.size == where.size) 173 | { 174 | // Special handling of duplicates 175 | which.sibling = where.sibling; 176 | where.sibling = which; 177 | which.left = null; 178 | which.right = null; 179 | return; 180 | } 181 | bool goRight = which.size > where.size; 182 | recurse(where.kid[goRight], which); 183 | rotate(where, !goRight); 184 | } 185 | recurse(root, which); 186 | } 187 | 188 | private void assertValid() 189 | { 190 | debug(std_experimental_allocator_free_tree) 191 | { 192 | static bool isBST(Node* n, size_t lb = 0, size_t ub = size_t.max) 193 | { 194 | if (!n) return true; 195 | for (auto sis = n.sibling; sis; sis = sis.sibling) 196 | { 197 | assert(n.size == sis.size); 198 | assert(sis.left is null); 199 | assert(sis.right is null); 200 | } 201 | return lb < n.size && n.size <= ub 202 | && isBST(n.left, lb, min(ub, n.size)) 203 | && isBST(n.right, max(lb, n.size), ub); 204 | } 205 | if (isBST(root)) return; 206 | dump; 207 | assert(0); 208 | } 209 | } 210 | 211 | /** 212 | The $(D FreeTree) is word aligned. 213 | */ 214 | enum uint alignment = size_t.alignof; 215 | 216 | /** 217 | The $(D FreeTree) allocator is noncopyable. 218 | */ 219 | this(this) @disable; 220 | 221 | /** 222 | The destructor of $(D FreeTree) releases all memory back to the parent 223 | allocator. 224 | */ 225 | static if (__traits(hasMember, ParentAllocator, "deallocate")) 226 | ~this() 227 | { 228 | clear; 229 | } 230 | 231 | /** 232 | Returns $(D parent.goodAllocSize(max(Node.sizeof, s))). 233 | */ 234 | static if (stateSize!ParentAllocator) 235 | size_t goodAllocSize(size_t s) 236 | { 237 | return parent.goodAllocSize(max(Node.sizeof, s)); 238 | } 239 | else 240 | static size_t goodAllocSize(size_t s) 241 | { 242 | return parent.goodAllocSize(max(Node.sizeof, s)); 243 | } 244 | 245 | /** 246 | 247 | Allocates $(D n) bytes of memory. First consults the free tree, and returns 248 | from it if a suitably sized block is found. Otherwise, the parent allocator 249 | is tried. If allocation from the parent succeeds, the allocated block is 250 | returned. Otherwise, the free tree tries an alternate strategy: If $(D 251 | ParentAllocator) defines $(D deallocate), $(D FreeTree) releases all of its 252 | contents and tries again. 253 | 254 | TODO: Splitting and coalescing should be implemented if $(D ParentAllocator) does not defined $(D deallocate). 255 | 256 | */ 257 | void[] allocate(size_t n) 258 | { 259 | assertValid; 260 | if (n == 0) return null; 261 | 262 | immutable s = goodAllocSize(n); 263 | 264 | // Consult the free tree. 265 | auto result = findAndRemove(root, s); 266 | if (result.ptr) return result.ptr[0 .. n]; 267 | 268 | // No block found, try the parent allocator. 269 | result = parent.allocate(s); 270 | if (result.ptr) return result.ptr[0 .. n]; 271 | 272 | // Parent ran out of juice, desperation mode on 273 | static if (__traits(hasMember, ParentAllocator, "deallocate")) 274 | { 275 | clear; 276 | // Try parent allocator again. 277 | result = parent.allocate(s); 278 | if (result.ptr) return result.ptr[0 .. n]; 279 | return null; 280 | } 281 | else 282 | { 283 | // TODO: get smart here 284 | return null; 285 | } 286 | } 287 | 288 | // Forwarding methods 289 | mixin(forwardToMember("parent", 290 | "allocateAll", "expand", "owns", "reallocate")); 291 | 292 | /** Places $(D b) into the free tree. */ 293 | bool deallocate(void[] b) 294 | { 295 | if (!b.ptr) return true; 296 | auto which = cast(Node*) b.ptr; 297 | which.size = goodAllocSize(b.length); 298 | // deliberately don't initialize which.left and which.right 299 | assert(which.size >= Node.sizeof); 300 | insertAsRoot(which); 301 | return true; 302 | } 303 | 304 | @system unittest // build a complex free tree 305 | { 306 | import stdx.allocator.gc_allocator, std.range; 307 | FreeTree!GCAllocator a; 308 | uint[] sizes = [3008,704,1856,576,1632,672,832,1856,1120,2656,1216,672, 309 | 448,992,2400,1376,2688,2656,736,1440]; 310 | void[][] allocs; 311 | foreach (s; sizes) 312 | allocs ~= a.allocate(s); 313 | foreach_reverse (b; allocs) 314 | { 315 | assert(b.ptr); 316 | a.deallocate(b); 317 | } 318 | a.assertValid; 319 | allocs = null; 320 | foreach (s; sizes) 321 | allocs ~= a.allocate(s); 322 | assert(a.root is null); 323 | a.assertValid; 324 | } 325 | 326 | /** Defined if $(D ParentAllocator.deallocate) exists, and returns to it 327 | all memory held in the free tree. */ 328 | static if (__traits(hasMember, ParentAllocator, "deallocate")) 329 | void clear() 330 | { 331 | void recurse(Node* n) 332 | { 333 | if (!n) return; 334 | recurse(n.left); 335 | recurse(n.right); 336 | parent.deallocate((cast(ubyte*) n)[0 .. n.size]); 337 | } 338 | recurse(root); 339 | root = null; 340 | } 341 | 342 | /** 343 | 344 | Defined if $(D ParentAllocator.deallocateAll) exists, and forwards to it. 345 | Also nullifies the free tree (it's assumed the parent frees all memory 346 | stil managed by the free tree). 347 | 348 | */ 349 | static if (__traits(hasMember, ParentAllocator, "deallocateAll")) 350 | bool deallocateAll() 351 | { 352 | // This is easy, just nuke the root and deallocate all from the 353 | // parent 354 | root = null; 355 | return parent.deallocateAll; 356 | } 357 | } 358 | 359 | @system unittest 360 | { 361 | import stdx.allocator.gc_allocator; 362 | testAllocator!(() => FreeTree!GCAllocator()); 363 | } 364 | 365 | @system unittest // issue 16506 366 | { 367 | import stdx.allocator.gc_allocator : GCAllocator; 368 | import stdx.allocator.mallocator : Mallocator; 369 | 370 | static void f(ParentAllocator)(size_t sz) 371 | { 372 | static FreeTree!ParentAllocator myAlloc; 373 | byte[] _payload = cast(byte[]) myAlloc.allocate(sz); 374 | assert(_payload, "_payload is null"); 375 | _payload[] = 0; 376 | myAlloc.deallocate(_payload); 377 | } 378 | 379 | f!Mallocator(33); 380 | f!Mallocator(43); 381 | f!GCAllocator(1); 382 | } 383 | 384 | @system unittest // issue 16507 385 | { 386 | static struct MyAllocator 387 | { 388 | byte dummy; 389 | static bool alive = true; 390 | void[] allocate(size_t s) { return new byte[](s); } 391 | bool deallocate(void[] ) { if (alive) assert(false); return true; } 392 | enum alignment = size_t.sizeof; 393 | } 394 | 395 | FreeTree!MyAllocator ft; 396 | void[] x = ft.allocate(1); 397 | ft.deallocate(x); 398 | ft.allocate(1000); 399 | MyAllocator.alive = false; 400 | } 401 | 402 | @system unittest // "desperation mode" 403 | { 404 | uint myDeallocCounter = 0; 405 | 406 | struct MyAllocator 407 | { 408 | byte[] allocation; 409 | void[] allocate(size_t s) 410 | { 411 | if (allocation.ptr) return null; 412 | allocation = new byte[](s); 413 | return allocation; 414 | } 415 | bool deallocate(void[] ) 416 | { 417 | ++myDeallocCounter; 418 | allocation = null; 419 | return true; 420 | } 421 | enum alignment = size_t.sizeof; 422 | } 423 | 424 | FreeTree!MyAllocator ft; 425 | void[] x = ft.allocate(1); 426 | ft.deallocate(x); 427 | assert(myDeallocCounter == 0); 428 | x = ft.allocate(1000); // Triggers "desperation mode". 429 | assert(myDeallocCounter == 1); 430 | assert(x.ptr); 431 | void[] y = ft.allocate(1000); /* Triggers "desperation mode" but there's 432 | nothing to deallocate so MyAllocator can't deliver. */ 433 | assert(myDeallocCounter == 1); 434 | assert(y.ptr is null); 435 | } 436 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/null_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.null_allocator; 3 | 4 | /** 5 | $(D NullAllocator) is an emphatically empty implementation of the allocator 6 | interface. Although it has no direct use, it is useful as a "terminator" in 7 | composite allocators. 8 | */ 9 | struct NullAllocator 10 | { 11 | import stdx.allocator.internal : Ternary; 12 | /** 13 | $(D NullAllocator) advertises a relatively large _alignment equal to 64 KB. 14 | This is because $(D NullAllocator) never actually needs to honor this 15 | alignment and because composite allocators using $(D NullAllocator) 16 | shouldn't be unnecessarily constrained. 17 | */ 18 | enum uint alignment = 64 * 1024; 19 | // /// Returns $(D n). 20 | //size_t goodAllocSize(size_t n) shared const 21 | //{ return .goodAllocSize(this, n); } 22 | /// Always returns $(D null). 23 | static void[] allocate()(size_t) { return null; } 24 | /// Always returns $(D null). 25 | static void[] alignedAllocate()(size_t, uint) { return null; } 26 | /// Always returns $(D null). 27 | static void[] allocateAll()() { return null; } 28 | /** 29 | These methods return $(D false). 30 | Precondition: $(D b is null). This is because there is no other possible 31 | legitimate input. 32 | */ 33 | static bool expand()(ref void[] b, size_t s) 34 | { assert(b is null); return s == 0; } 35 | /// Ditto 36 | static bool reallocate()(ref void[] b, size_t) 37 | { assert(b is null); return false; } 38 | /// Ditto 39 | static bool alignedReallocate()(ref void[] b, size_t, uint) 40 | { assert(b is null); return false; } 41 | /// Returns $(D Ternary.no). 42 | static Ternary owns()(void[]) { return Ternary.no; } 43 | /** 44 | Returns $(D Ternary.no). 45 | */ 46 | static Ternary resolveInternalPointer()(const void*, ref void[]) 47 | { return Ternary.no; } 48 | /** 49 | No-op. 50 | Precondition: $(D b is null) 51 | */ 52 | static bool deallocate()(void[] b) { assert(b is null); return true; } 53 | /** 54 | No-op. 55 | */ 56 | static bool deallocateAll()() { return true; } 57 | /** 58 | Returns $(D Ternary.yes). 59 | */ 60 | static Ternary empty()() { return Ternary.yes; } 61 | /** 62 | Returns the $(D static) global instance of the $(D NullAllocator). 63 | */ 64 | enum NullAllocator instance = NullAllocator(); 65 | } 66 | 67 | @system unittest 68 | { 69 | assert(NullAllocator.instance.alignedAllocate(100, 0) is null); 70 | assert(NullAllocator.instance.allocateAll() is null); 71 | auto b = NullAllocator.instance.allocate(100); 72 | assert(b is null); 73 | assert(NullAllocator.instance.expand(b, 0)); 74 | assert(!NullAllocator.instance.expand(b, 42)); 75 | assert(!NullAllocator.instance.reallocate(b, 42)); 76 | assert(!NullAllocator.instance.alignedReallocate(b, 42, 0)); 77 | NullAllocator.instance.deallocate(b); 78 | assert(NullAllocator.instance.deallocateAll() == true); 79 | 80 | import stdx.allocator.internal : Ternary; 81 | assert(NullAllocator.instance.empty() == Ternary.yes); 82 | assert(NullAllocator.instance.owns(null) == Ternary.no); 83 | void[] p; 84 | assert(NullAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); 85 | } 86 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/package.d: -------------------------------------------------------------------------------- 1 | /** 2 | $(H2 Assembling Your Own Allocator) 3 | 4 | In addition to defining the interfaces above, this package also implements 5 | untyped composable memory allocators. They are $(I untyped) because they deal 6 | exclusively in $(D void[]) and have no notion of what type the memory allocated 7 | would be destined for. They are $(I composable) because the included allocators 8 | are building blocks that can be assembled in complex nontrivial allocators. 9 | 10 | $(P Unlike the allocators for the C and C++ programming languages, which manage 11 | the allocated size internally, these allocators require that the client 12 | maintains (or knows $(I a priori)) the allocation size for each piece of memory 13 | allocated. Put simply, the client must pass the allocated size upon 14 | deallocation. Storing the size in the _allocator has significant negative 15 | performance implications, and is virtually always redundant because client code 16 | needs knowledge of the allocated size in order to avoid buffer overruns. (See 17 | more discussion in a $(HTTP open- 18 | std.org/JTC1/SC22/WG21/docs/papers/2013/n3536.html, proposal) for sized 19 | deallocation in C++.) For this reason, allocators herein traffic in $(D void[]) 20 | as opposed to $(D void*).) 21 | 22 | $(P In order to be usable as an _allocator, a type should implement the 23 | following methods with their respective semantics. Only $(D alignment) and $(D 24 | allocate) are required. If any of the other methods is missing, the _allocator 25 | is assumed to not have that capability (for example some allocators do not offer 26 | manual deallocation of memory). Allocators should NOT implement 27 | unsupported methods to always fail. For example, an allocator that lacks the 28 | capability to implement `alignedAllocate` should not define it at all (as 29 | opposed to defining it to always return `null` or throw an exception). The 30 | missing implementation statically informs other components about the 31 | allocator's capabilities and allows them to make design decisions accordingly.) 32 | 33 | $(BOOKTABLE , 34 | $(TR $(TH Method name) $(TH Semantics)) 35 | 36 | $(TR $(TDC uint alignment;, $(POST $(RES) > 0)) $(TD Returns the minimum 37 | alignment of all data returned by the allocator. An allocator may implement $(D 38 | alignment) as a statically-known $(D enum) value only. Applications that need 39 | dynamically-chosen alignment values should use the $(D alignedAllocate) and $(D 40 | alignedReallocate) APIs.)) 41 | 42 | $(TR $(TDC size_t goodAllocSize(size_t n);, $(POST $(RES) >= n)) $(TD Allocators 43 | customarily allocate memory in discretely-sized chunks. Therefore, a request for 44 | $(D n) bytes may result in a larger allocation. The extra memory allocated goes 45 | unused and adds to the so-called $(HTTP goo.gl/YoKffF,internal fragmentation). 46 | The function $(D goodAllocSize(n)) returns the actual number of bytes that would 47 | be allocated upon a request for $(D n) bytes. This module defines a default 48 | implementation that returns $(D n) rounded up to a multiple of the allocator's 49 | alignment.)) 50 | 51 | $(TR $(TDC void[] allocate(size_t s);, $(POST $(RES) is null || $(RES).length == 52 | s)) $(TD If $(D s == 0), the call may return any empty slice (including $(D 53 | null)). Otherwise, the call allocates $(D s) bytes of memory and returns the 54 | allocated block, or $(D null) if the request could not be satisfied.)) 55 | 56 | $(TR $(TDC void[] alignedAllocate(size_t s, uint a);, $(POST $(RES) is null || 57 | $(RES).length == s)) $(TD Similar to `allocate`, with the additional 58 | guarantee that the memory returned is aligned to at least `a` bytes. `a` 59 | must be a power of 2.)) 60 | 61 | $(TR $(TDC void[] allocateAll();) $(TD Offers all of allocator's memory to the 62 | caller, so it's usually defined by fixed-size allocators. If the allocator is 63 | currently NOT managing any memory, then $(D allocateAll()) shall allocate and 64 | return all memory available to the allocator, and subsequent calls to all 65 | allocation primitives should not succeed (e.g. $(D allocate) shall return $(D 66 | null) etc). Otherwise, $(D allocateAll) only works on a best-effort basis, and 67 | the allocator is allowed to return $(D null) even if does have available memory. 68 | Memory allocated with $(D allocateAll) is not otherwise special (e.g. can be 69 | reallocated or deallocated with the usual primitives, if defined).)) 70 | 71 | $(TR $(TDC bool expand(ref void[] b, size_t delta);, $(POST !$(RES) || b.length 72 | == $(I old)(b).length + delta)) $(TD Expands $(D b) by $(D delta) bytes. If $(D 73 | delta == 0), succeeds without changing $(D b). If $(D b is null), returns 74 | `false` (the null pointer cannot be expanded in place). Otherwise, $(D 75 | b) must be a buffer previously allocated with the same allocator. If expansion 76 | was successful, $(D expand) changes $(D b)'s length to $(D b.length + delta) and 77 | returns $(D true). Upon failure, the call effects no change upon the allocator 78 | object, leaves $(D b) unchanged, and returns $(D false).)) 79 | 80 | $(TR $(TDC bool reallocate(ref void[] b, size_t s);, $(POST !$(RES) || b.length 81 | == s)) $(TD Reallocates $(D b) to size $(D s), possibly moving memory around. 82 | $(D b) must be $(D null) or a buffer allocated with the same allocator. If 83 | reallocation was successful, $(D reallocate) changes $(D b) appropriately and 84 | returns $(D true). Upon failure, the call effects no change upon the allocator 85 | object, leaves $(D b) unchanged, and returns $(D false). An allocator should 86 | implement $(D reallocate) if it can derive some advantage from doing so; 87 | otherwise, this module defines a $(D reallocate) free function implemented in 88 | terms of $(D expand), $(D allocate), and $(D deallocate).)) 89 | 90 | $(TR $(TDC bool alignedReallocate(ref void[] b,$(BR) size_t s, uint a);, $(POST 91 | !$(RES) || b.length == s)) $(TD Similar to $(D reallocate), but guarantees the 92 | reallocated memory is aligned at $(D a) bytes. The buffer must have been 93 | originated with a call to $(D alignedAllocate). $(D a) must be a power of 2 94 | greater than $(D (void*).sizeof). An allocator should implement $(D 95 | alignedReallocate) if it can derive some advantage from doing so; otherwise, 96 | this module defines a $(D alignedReallocate) free function implemented in terms 97 | of $(D expand), $(D alignedAllocate), and $(D deallocate).)) 98 | 99 | $(TR $(TDC Ternary owns(void[] b);) $(TD Returns `Ternary.yes` if `b` has been 100 | allocated with this allocator. An allocator should define this method only if it 101 | can decide on ownership precisely and fast (in constant time, logarithmic time, 102 | or linear time with a low multiplication factor). Traditional allocators such as 103 | the C heap do not define such functionality. If $(D b is null), the allocator 104 | shall return `Ternary.no`, i.e. no allocator owns the `null` slice.)) 105 | 106 | $(TR $(TDC Ternary resolveInternalPointer(void* p, ref void[] result);) $(TD If 107 | `p` is a pointer somewhere inside a block allocated with this allocator, 108 | `result` holds a pointer to the beginning of the allocated block and returns 109 | `Ternary.yes`. Otherwise, `result` holds `null` and returns `Ternary.no`. 110 | If the pointer points immediately after an allocated block, the result is 111 | implementation defined.)) 112 | 113 | $(TR $(TDC bool deallocate(void[] b);) $(TD If $(D b is null), does 114 | nothing and returns `true`. Otherwise, deallocates memory previously allocated 115 | with this allocator and returns `true` if successful, `false` otherwise. An 116 | implementation that would not support deallocation (i.e. would always return 117 | `false` should not define this primitive at all.))) 118 | 119 | $(TR $(TDC bool deallocateAll();, $(POST empty)) $(TD Deallocates all memory 120 | allocated with this allocator. If an allocator implements this method, it must 121 | specify whether its destructor calls it, too.)) 122 | 123 | $(TR $(TDC Ternary empty();) $(TD Returns `Ternary.yes` if and only if the 124 | allocator holds no memory (i.e. no allocation has occurred, or all allocations 125 | have been deallocated).)) 126 | 127 | $(TR $(TDC static Allocator instance;, $(POST instance $(I is a valid) 128 | Allocator $(I object))) $(TD Some allocators are $(I monostate), i.e. have only 129 | an instance and hold only global state. (Notable examples are C's own 130 | `malloc`-based allocator and D's garbage-collected heap.) Such allocators must 131 | define a static $(D instance) instance that serves as the symbolic placeholder 132 | for the global instance of the allocator. An allocator should not hold state 133 | and define `instance` simultaneously. Depending on whether the allocator is 134 | thread-safe or not, this instance may be $(D shared).)) 135 | ) 136 | 137 | $(H2 Sample Assembly) 138 | 139 | The example below features an _allocator modeled after $(HTTP goo.gl/m7329l, 140 | jemalloc), which uses a battery of free-list allocators spaced so as to keep 141 | internal fragmentation to a minimum. The $(D FList) definitions specify no 142 | bounds for the freelist because the $(D Segregator) does all size selection in 143 | advance. 144 | 145 | Sizes through 3584 bytes are handled via freelists of staggered sizes. Sizes 146 | from 3585 bytes through 4072 KB are handled by a $(D BitmappedBlock) with a 147 | block size of 4 KB. Sizes above that are passed direct to the $(D GCAllocator). 148 | 149 | ---- 150 | alias FList = FreeList!(GCAllocator, 0, unbounded); 151 | alias A = Segregator!( 152 | 8, FreeList!(GCAllocator, 0, 8), 153 | 128, Bucketizer!(FList, 1, 128, 16), 154 | 256, Bucketizer!(FList, 129, 256, 32), 155 | 512, Bucketizer!(FList, 257, 512, 64), 156 | 1024, Bucketizer!(FList, 513, 1024, 128), 157 | 2048, Bucketizer!(FList, 1025, 2048, 256), 158 | 3584, Bucketizer!(FList, 2049, 3584, 512), 159 | 4072 * 1024, AllocatorList!( 160 | () => BitmappedBlock!(GCAllocator, 4096)(4072 * 1024)), 161 | GCAllocator 162 | ); 163 | A tuMalloc; 164 | auto b = tuMalloc.allocate(500); 165 | assert(b.length == 500); 166 | auto c = tuMalloc.allocate(113); 167 | assert(c.length == 113); 168 | assert(tuMalloc.expand(c, 14)); 169 | tuMalloc.deallocate(b); 170 | tuMalloc.deallocate(c); 171 | ---- 172 | 173 | $(H2 Allocating memory for sharing across threads) 174 | 175 | One allocation pattern used in multithreaded applications is to share memory 176 | across threads, and to deallocate blocks in a different thread than the one that 177 | allocated it. 178 | 179 | All allocators in this module accept and return $(D void[]) (as opposed to 180 | $(D shared void[])). This is because at the time of allocation, deallocation, or 181 | reallocation, the memory is effectively not $(D shared) (if it were, it would 182 | reveal a bug at the application level). 183 | 184 | The issue remains of calling $(D a.deallocate(b)) from a different thread than 185 | the one that allocated $(D b). It follows that both threads must have access to 186 | the same instance $(D a) of the respective allocator type. By definition of D, 187 | this is possible only if $(D a) has the $(D shared) qualifier. It follows that 188 | the allocator type must implement $(D allocate) and $(D deallocate) as $(D 189 | shared) methods. That way, the allocator commits to allowing usable $(D shared) 190 | instances. 191 | 192 | Conversely, allocating memory with one non-$(D shared) allocator, passing it 193 | across threads (by casting the obtained buffer to $(D shared)), and later 194 | deallocating it in a different thread (either with a different allocator object 195 | or with the same allocator object after casting it to $(D shared)) is illegal. 196 | 197 | $(H2 Building Blocks) 198 | 199 | $(P The table below gives a synopsis of predefined allocator building blocks, 200 | with their respective modules. Either `import` the needed modules individually, 201 | or `import` `stdx.building_blocks`, which imports them all 202 | `public`ly. The building blocks can be assembled in unbounded ways and also 203 | combined with your own. For a collection of typical and useful preassembled 204 | allocators and for inspiration in defining more such assemblies, refer to 205 | $(MREF std,experimental,allocator,showcase).) 206 | 207 | $(BOOKTABLE, 208 | $(TR $(TH Allocator$(BR)) $(TH Description)) 209 | 210 | $(TR $(TDC2 NullAllocator, null_allocator) $(TD Very good at doing absolutely nothing. A good 211 | starting point for defining other allocators or for studying the API.)) 212 | 213 | $(TR $(TDC3 GCAllocator, gc_allocator) $(TD The system-provided garbage-collector allocator. 214 | This should be the default fallback allocator tapping into system memory. It 215 | offers manual $(D free) and dutifully collects litter.)) 216 | 217 | $(TR $(TDC3 Mallocator, mallocator) $(TD The C heap _allocator, a.k.a. $(D 218 | malloc)/$(D realloc)/$(D free). Use sparingly and only for code that is unlikely 219 | to leak.)) 220 | 221 | $(TR $(TDC3 AlignedMallocator, mallocator) $(TD Interface to OS-specific _allocators that 222 | support specifying alignment: 223 | $(HTTP man7.org/linux/man-pages/man3/posix_memalign.3.html, $(D posix_memalign)) 224 | on Posix and $(HTTP msdn.microsoft.com/en-us/library/fs9stz4e(v=vs.80).aspx, 225 | $(D __aligned_xxx)) on Windows.)) 226 | 227 | $(TR $(TDC2 AffixAllocator, affix_allocator) $(TD Allocator that allows and manages allocating 228 | extra prefix and/or a suffix bytes for each block allocated.)) 229 | 230 | $(TR $(TDC2 BitmappedBlock, bitmapped_block) $(TD Organizes one contiguous chunk of memory in 231 | equal-size blocks and tracks allocation status at the cost of one bit per 232 | block.)) 233 | 234 | $(TR $(TDC2 FallbackAllocator, fallback_allocator) $(TD Allocator that combines two other allocators 235 | - primary and fallback. Allocation requests are first tried with primary, and 236 | upon failure are passed to the fallback. Useful for small and fast allocators 237 | fronting general-purpose ones.)) 238 | 239 | $(TR $(TDC2 FreeList, free_list) $(TD Allocator that implements a $(HTTP 240 | wikipedia.org/wiki/Free_list, free list) on top of any other allocator. The 241 | preferred size, tolerance, and maximum elements are configurable at compile- and 242 | run time.)) 243 | 244 | $(TR $(TDC2 SharedFreeList, free_list) $(TD Same features as $(D FreeList), but packaged as 245 | a $(D shared) structure that is accessible to several threads.)) 246 | 247 | $(TR $(TDC2 FreeTree, free_tree) $(TD Allocator similar to $(D FreeList) that uses a 248 | binary search tree to adaptively store not one, but many free lists.)) 249 | 250 | $(TR $(TDC2 Region, region) $(TD Region allocator organizes a chunk of memory as a 251 | simple bump-the-pointer allocator.)) 252 | 253 | $(TR $(TDC2 InSituRegion, region) $(TD Region holding its own allocation, most often on 254 | the stack. Has statically-determined size.)) 255 | 256 | $(TR $(TDC2 SbrkRegion, region) $(TD Region using $(D $(LINK2 https://en.wikipedia.org/wiki/Sbrk, 257 | sbrk)) for allocating memory.)) 258 | 259 | $(TR $(TDC3 MmapAllocator, mmap_allocator) $(TD Allocator using 260 | $(D $(LINK2 https://en.wikipedia.org/wiki/Mmap, mmap)) directly.)) 261 | 262 | $(TR $(TDC2 StatsCollector, stats_collector) $(TD Collect statistics about any other 263 | allocator.)) 264 | 265 | $(TR $(TDC2 Quantizer, quantizer) $(TD Allocates in coarse-grained quantas, thus 266 | improving performance of reallocations by often reallocating in place. The drawback is higher memory consumption because of allocated and unused memory.)) 267 | 268 | $(TR $(TDC2 AllocatorList, allocator_list) $(TD Given an allocator factory, lazily creates as 269 | many allocators as needed to satisfy allocation requests. The allocators are 270 | stored in a linked list. Requests for allocation are satisfied by searching the 271 | list in a linear manner.)) 272 | 273 | $(TR $(TDC2 Segregator, segregator) $(TD Segregates allocation requests by size 274 | and dispatches them to distinct allocators.)) 275 | 276 | $(TR $(TDC2 Bucketizer, bucketizer) $(TD Divides allocation sizes in discrete buckets and 277 | uses an array of allocators, one per bucket, to satisfy requests.)) 278 | 279 | $(COMMENT $(TR $(TDC2 InternalPointersTree) $(TD Adds support for resolving internal 280 | pointers on top of another allocator.))) 281 | ) 282 | 283 | Macros: 284 | MYREF2 = $(REF_SHORT $1, std,experimental,allocator,building_blocks,$2) 285 | MYREF3 = $(REF_SHORT $1, std,experimental,allocator,$2) 286 | TDC = $(TDNW $(D $1)$+) 287 | TDC2 = $(TDNW $(D $(MYREF2 $1,$+))$(BR)$(SMALL 288 | $(D stdx.allocator.building_blocks.$2))) 289 | TDC3 = $(TDNW $(D $(MYREF3 $1,$+))$(BR)$(SMALL 290 | $(D stdx.allocator.$2))) 291 | RES = $(I result) 292 | POST = $(BR)$(SMALL $(I Post:) $(BLUE $(D $0))) 293 | */ 294 | 295 | module stdx.allocator.building_blocks; 296 | 297 | public import 298 | stdx.allocator.building_blocks.affix_allocator, 299 | stdx.allocator.building_blocks.allocator_list, 300 | stdx.allocator.building_blocks.bucketizer, 301 | stdx.allocator.building_blocks.fallback_allocator, 302 | stdx.allocator.building_blocks.free_list, 303 | stdx.allocator.building_blocks.free_tree, 304 | stdx.allocator.gc_allocator, 305 | stdx.allocator.building_blocks.bitmapped_block, 306 | stdx.allocator.building_blocks.kernighan_ritchie, 307 | stdx.allocator.mallocator, 308 | stdx.allocator.mmap_allocator, 309 | stdx.allocator.building_blocks.null_allocator, 310 | stdx.allocator.building_blocks.quantizer, 311 | stdx.allocator.building_blocks.region, 312 | stdx.allocator.building_blocks.segregator, 313 | stdx.allocator.building_blocks.stats_collector; 314 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/quantizer.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.quantizer; 3 | 4 | import stdx.allocator.common; 5 | 6 | /** 7 | This allocator sits on top of $(D ParentAllocator) and quantizes allocation 8 | sizes, usually from arbitrary positive numbers to a small set of round numbers 9 | (e.g. powers of two, page sizes etc). This technique is commonly used to: 10 | 11 | $(UL 12 | $(LI Preallocate more memory than requested such that later on, when 13 | reallocation is needed (e.g. to grow an array), expansion can be done quickly 14 | in place. Reallocation to smaller sizes is also fast (in-place) when the new 15 | size requested is within the same quantum as the existing size. Code that's 16 | reallocation-heavy can therefore benefit from fronting a generic allocator 17 | with a $(D Quantizer). These advantages are present even if 18 | $(D ParentAllocator) does not support reallocation at all.) 19 | $(LI Improve behavior of allocators sensitive to allocation sizes, such as $(D 20 | FreeList) and $(D FreeTree). Rounding allocation requests up makes for smaller 21 | free lists/trees at the cost of slack memory (internal fragmentation).) 22 | ) 23 | 24 | The following methods are forwarded to the parent allocator if present: 25 | $(D allocateAll), $(D owns), $(D deallocateAll), $(D empty). 26 | 27 | Preconditions: $(D roundingFunction) must satisfy three constraints. These are 28 | not enforced (save for the use of $(D assert)) for the sake of efficiency. 29 | $(OL 30 | $(LI $(D roundingFunction(n) >= n) for all $(D n) of type $(D size_t);) 31 | $(LI $(D roundingFunction) must be monotonically increasing, i.e. $(D 32 | roundingFunction(n1) <= roundingFunction(n2)) for all $(D n1 < n2);) 33 | $(LI $(D roundingFunction) must be $(D pure), i.e. always return the same 34 | value for a given $(D n).) 35 | ) 36 | */ 37 | struct Quantizer(ParentAllocator, alias roundingFunction) 38 | { 39 | 40 | /** 41 | The parent allocator. Depending on whether $(D ParentAllocator) holds state 42 | or not, this is a member variable or an alias for 43 | `ParentAllocator.instance`. 44 | */ 45 | static if (stateSize!ParentAllocator) 46 | { 47 | ParentAllocator parent; 48 | } 49 | else 50 | { 51 | alias parent = ParentAllocator.instance; 52 | enum Quantizer instance = Quantizer(); 53 | } 54 | 55 | /** 56 | Returns $(D roundingFunction(n)). 57 | */ 58 | size_t goodAllocSize(size_t n) 59 | { 60 | auto result = roundingFunction(n); 61 | assert(result >= n); 62 | return result; 63 | } 64 | 65 | /** 66 | Alignment is identical to that of the parent. 67 | */ 68 | enum alignment = ParentAllocator.alignment; 69 | 70 | /** 71 | Gets a larger buffer $(D buf) by calling 72 | $(D parent.allocate(goodAllocSize(n))). If $(D buf) is $(D null), returns 73 | $(D null). Otherwise, returns $(D buf[0 .. n]). 74 | */ 75 | void[] allocate(size_t n) 76 | { 77 | auto result = parent.allocate(goodAllocSize(n)); 78 | return result.ptr ? result.ptr[0 .. n] : null; 79 | } 80 | 81 | /** 82 | Defined only if $(D parent.alignedAllocate) exists and works similarly to 83 | $(D allocate) by forwarding to 84 | $(D parent.alignedAllocate(goodAllocSize(n), a)). 85 | */ 86 | static if (__traits(hasMember, ParentAllocator, "alignedAllocate")) 87 | void[] alignedAllocate(size_t n, uint) 88 | { 89 | auto result = parent.alignedAllocate(goodAllocSize(n)); 90 | return result.ptr ? result.ptr[0 .. n] : null; 91 | } 92 | 93 | /** 94 | First checks whether there's enough slack memory preallocated for $(D b) 95 | by evaluating $(D b.length + delta <= goodAllocSize(b.length)). If that's 96 | the case, expands $(D b) in place. Otherwise, attempts to use 97 | $(D parent.expand) appropriately if present. 98 | */ 99 | bool expand(ref void[] b, size_t delta) 100 | { 101 | if (!b.ptr) return delta == 0; 102 | immutable allocated = goodAllocSize(b.length), 103 | needed = b.length + delta, 104 | neededAllocation = goodAllocSize(needed); 105 | assert(b.length <= allocated); 106 | assert(needed <= neededAllocation); 107 | assert(allocated <= neededAllocation); 108 | // Second test needed because expand must work for null pointers, too. 109 | if (allocated == neededAllocation) 110 | { 111 | // Nice! 112 | b = b.ptr[0 .. needed]; 113 | return true; 114 | } 115 | // Hail Mary 116 | static if (__traits(hasMember, ParentAllocator, "expand")) 117 | { 118 | // Expand to the appropriate quantum 119 | auto original = b.ptr[0 .. allocated]; 120 | assert(goodAllocSize(needed) >= allocated); 121 | if (!parent.expand(original, neededAllocation - allocated)) 122 | return false; 123 | // Dial back the size 124 | b = original.ptr[0 .. needed]; 125 | return true; 126 | } 127 | else 128 | { 129 | return false; 130 | } 131 | } 132 | 133 | /** 134 | Expands or shrinks allocated block to an allocated size of $(D 135 | goodAllocSize(s)). Expansion occurs in place under the conditions required 136 | by $(D expand). Shrinking occurs in place if $(D goodAllocSize(b.length) 137 | == goodAllocSize(s)). 138 | */ 139 | bool reallocate(ref void[] b, size_t s) 140 | { 141 | if (!b.ptr) 142 | { 143 | b = allocate(s); 144 | return b.length == s; 145 | } 146 | if (s >= b.length && expand(b, s - b.length)) return true; 147 | immutable toAllocate = goodAllocSize(s), 148 | allocated = goodAllocSize(b.length); 149 | // Are the lengths within the same quantum? 150 | if (allocated == toAllocate) 151 | { 152 | // Reallocation (whether up or down) will be done in place 153 | b = b.ptr[0 .. s]; 154 | return true; 155 | } 156 | // Defer to parent (or global) with quantized size 157 | auto original = b.ptr[0 .. allocated]; 158 | if (!parent.reallocate(original, toAllocate)) return false; 159 | b = original.ptr[0 .. s]; 160 | return true; 161 | } 162 | 163 | /** 164 | Defined only if $(D ParentAllocator.alignedAllocate) exists. Expansion 165 | occurs in place under the conditions required by $(D expand). Shrinking 166 | occurs in place if $(D goodAllocSize(b.length) == goodAllocSize(s)). 167 | */ 168 | static if (__traits(hasMember, ParentAllocator, "alignedAllocate")) 169 | bool alignedReallocate(ref void[] b, size_t s, uint a) 170 | { 171 | if (!b.ptr) 172 | { 173 | b = alignedAllocate(s); 174 | return b.length == s; 175 | } 176 | if (s >= b.length && expand(b, s - b.length)) return true; 177 | immutable toAllocate = goodAllocSize(s), 178 | allocated = goodAllocSize(b.length); 179 | // Are the lengths within the same quantum? 180 | if (allocated == toAllocate) 181 | { 182 | assert(b.ptr); // code above must have caught this 183 | // Reallocation (whether up or down) will be done in place 184 | b = b.ptr[0 .. s]; 185 | return true; 186 | } 187 | // Defer to parent (or global) with quantized size 188 | auto original = b.ptr[0 .. allocated]; 189 | if (!parent.alignedReallocate(original, toAllocate, a)) return false; 190 | b = original.ptr[0 .. s]; 191 | return true; 192 | } 193 | 194 | /** 195 | Defined if $(D ParentAllocator.deallocate) exists and forwards to 196 | $(D parent.deallocate(b.ptr[0 .. goodAllocSize(b.length)])). 197 | */ 198 | static if (__traits(hasMember, ParentAllocator, "deallocate")) 199 | bool deallocate(void[] b) 200 | { 201 | if (!b.ptr) return true; 202 | return parent.deallocate(b.ptr[0 .. goodAllocSize(b.length)]); 203 | } 204 | 205 | // Forwarding methods 206 | mixin(forwardToMember("parent", 207 | "allocateAll", "owns", "deallocateAll", "empty")); 208 | } 209 | 210 | /// 211 | @system unittest 212 | { 213 | import stdx.allocator.building_blocks.free_tree : FreeTree; 214 | import stdx.allocator.gc_allocator : GCAllocator; 215 | 216 | size_t roundUpToMultipleOf(size_t s, uint base) 217 | { 218 | auto rem = s % base; 219 | return rem ? s + base - rem : s; 220 | } 221 | 222 | // Quantize small allocations to a multiple of cache line, large ones to a 223 | // multiple of page size 224 | alias MyAlloc = Quantizer!( 225 | FreeTree!GCAllocator, 226 | n => roundUpToMultipleOf(n, n <= 16_384 ? 64 : 4096)); 227 | MyAlloc alloc; 228 | const buf = alloc.allocate(256); 229 | assert(buf.ptr); 230 | } 231 | 232 | @system unittest 233 | { 234 | import stdx.allocator.gc_allocator : GCAllocator; 235 | alias MyAlloc = Quantizer!(GCAllocator, 236 | (size_t n) => n.roundUpToMultipleOf(64)); 237 | testAllocator!(() => MyAlloc()); 238 | } 239 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/scoped_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.scoped_allocator; 3 | 4 | import stdx.allocator.common; 5 | 6 | /** 7 | 8 | $(D ScopedAllocator) delegates all allocation requests to $(D ParentAllocator). 9 | When destroyed, the $(D ScopedAllocator) object automatically calls $(D 10 | deallocate) for all memory allocated through its lifetime. (The $(D 11 | deallocateAll) function is also implemented with the same semantics.) 12 | 13 | $(D deallocate) is also supported, which is where most implementation effort 14 | and overhead of $(D ScopedAllocator) go. If $(D deallocate) is not needed, a 15 | simpler design combining $(D AllocatorList) with $(D Region) is recommended. 16 | 17 | */ 18 | struct ScopedAllocator(ParentAllocator) 19 | { 20 | @system unittest 21 | { 22 | testAllocator!(() => ScopedAllocator()); 23 | } 24 | 25 | import stdx.allocator.building_blocks.affix_allocator 26 | : AffixAllocator; 27 | import stdx.allocator.internal : Ternary; 28 | 29 | private struct Node 30 | { 31 | Node* prev; 32 | Node* next; 33 | size_t length; 34 | } 35 | 36 | alias Allocator = AffixAllocator!(ParentAllocator, Node); 37 | 38 | // state 39 | /** 40 | If $(D ParentAllocator) is stateful, $(D parent) is a property giving access 41 | to an $(D AffixAllocator!ParentAllocator). Otherwise, $(D parent) is an alias for `AffixAllocator!ParentAllocator.instance`. 42 | */ 43 | static if (stateSize!ParentAllocator) 44 | { 45 | Allocator parent; 46 | } 47 | else 48 | { 49 | alias parent = Allocator.instance; 50 | } 51 | private Node* root; 52 | 53 | /** 54 | $(D ScopedAllocator) is not copyable. 55 | */ 56 | @disable this(this); 57 | 58 | /** 59 | $(D ScopedAllocator)'s destructor releases all memory allocated during its 60 | lifetime. 61 | */ 62 | ~this() 63 | { 64 | deallocateAll; 65 | } 66 | 67 | /// Alignment offered 68 | enum alignment = Allocator.alignment; 69 | 70 | /** 71 | Forwards to $(D parent.goodAllocSize) (which accounts for the management 72 | overhead). 73 | */ 74 | size_t goodAllocSize(size_t n) 75 | { 76 | return parent.goodAllocSize(n); 77 | } 78 | 79 | /** 80 | Allocates memory. For management it actually allocates extra memory from 81 | the parent. 82 | */ 83 | void[] allocate(size_t n) 84 | { 85 | auto b = parent.allocate(n); 86 | if (!b.ptr) return b; 87 | Node* toInsert = & parent.prefix(b); 88 | toInsert.prev = null; 89 | toInsert.next = root; 90 | toInsert.length = n; 91 | assert(!root || !root.prev); 92 | if (root) root.prev = toInsert; 93 | root = toInsert; 94 | return b; 95 | } 96 | 97 | /** 98 | Forwards to $(D parent.expand(b, delta)). 99 | */ 100 | static if (__traits(hasMember, Allocator, "expand")) 101 | bool expand(ref void[] b, size_t delta) 102 | { 103 | auto result = parent.expand(b, delta); 104 | if (result && b.ptr) 105 | { 106 | parent.prefix(b).length = b.length; 107 | } 108 | return result; 109 | } 110 | 111 | /** 112 | Reallocates $(D b) to new size $(D s). 113 | */ 114 | bool reallocate(ref void[] b, size_t s) 115 | { 116 | // Remove from list 117 | if (b.ptr) 118 | { 119 | Node* n = & parent.prefix(b); 120 | if (n.prev) n.prev.next = n.next; 121 | else root = n.next; 122 | if (n.next) n.next.prev = n.prev; 123 | } 124 | auto result = parent.reallocate(b, s); 125 | // Add back to list 126 | if (b.ptr) 127 | { 128 | Node* n = & parent.prefix(b); 129 | n.prev = null; 130 | n.next = root; 131 | n.length = s; 132 | if (root) root.prev = n; 133 | root = n; 134 | } 135 | return result; 136 | } 137 | 138 | /** 139 | Forwards to $(D parent.owns(b)). 140 | */ 141 | static if (__traits(hasMember, Allocator, "owns")) 142 | Ternary owns(void[] b) 143 | { 144 | return parent.owns(b); 145 | } 146 | 147 | /** 148 | Deallocates $(D b). 149 | */ 150 | static if (__traits(hasMember, Allocator, "deallocate")) 151 | bool deallocate(void[] b) 152 | { 153 | // Remove from list 154 | if (b.ptr) 155 | { 156 | Node* n = & parent.prefix(b); 157 | if (n.prev) n.prev.next = n.next; 158 | else root = n.next; 159 | if (n.next) n.next.prev = n.prev; 160 | } 161 | return parent.deallocate(b); 162 | } 163 | 164 | /** 165 | Deallocates all memory allocated. 166 | */ 167 | bool deallocateAll() 168 | { 169 | bool result = true; 170 | for (auto n = root; n; ) 171 | { 172 | void* p = n + 1; 173 | auto length = n.length; 174 | n = n.next; 175 | if (!parent.deallocate(p[0 .. length])) 176 | result = false; 177 | } 178 | root = null; 179 | return result; 180 | } 181 | 182 | /** 183 | Returns `Ternary.yes` if this allocator is not responsible for any memory, 184 | `Ternary.no` otherwise. (Never returns `Ternary.unknown`.) 185 | */ 186 | Ternary empty() const 187 | { 188 | return Ternary(root is null); 189 | } 190 | } 191 | 192 | /// 193 | @system unittest 194 | { 195 | import stdx.allocator.mallocator : Mallocator; 196 | import stdx.allocator.internal : Ternary; 197 | ScopedAllocator!Mallocator alloc; 198 | assert(alloc.empty == Ternary.yes); 199 | const b = alloc.allocate(10); 200 | assert(b.length == 10); 201 | assert(alloc.empty == Ternary.no); 202 | } 203 | 204 | @system unittest 205 | { 206 | import stdx.allocator.gc_allocator : GCAllocator; 207 | testAllocator!(() => ScopedAllocator!GCAllocator()); 208 | } 209 | 210 | @system unittest // https://issues.dlang.org/show_bug.cgi?id=16046 211 | { 212 | import stdx.allocator; 213 | import stdx.allocator.mallocator; 214 | ScopedAllocator!Mallocator alloc; 215 | auto foo = alloc.make!int(1); 216 | auto bar = alloc.make!int(2); 217 | assert(foo); 218 | assert(bar); 219 | alloc.dispose(foo); 220 | alloc.dispose(bar); // segfault here 221 | } 222 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/segregator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.building_blocks.segregator; 3 | 4 | import stdx.allocator.common; 5 | 6 | /** 7 | Dispatches allocations (and deallocations) between two allocators ($(D 8 | SmallAllocator) and $(D LargeAllocator)) depending on the size allocated, as 9 | follows. All allocations smaller than or equal to $(D threshold) will be 10 | dispatched to $(D SmallAllocator). The others will go to $(D LargeAllocator). 11 | 12 | If both allocators are $(D shared), the $(D Segregator) will also offer $(D 13 | shared) methods. 14 | */ 15 | struct Segregator(size_t threshold, SmallAllocator, LargeAllocator) 16 | { 17 | import mir.utility : min; 18 | import stdx.allocator.internal : Ternary; 19 | 20 | static if (stateSize!SmallAllocator) private SmallAllocator _small; 21 | else private alias _small = SmallAllocator.instance; 22 | static if (stateSize!LargeAllocator) private LargeAllocator _large; 23 | else private alias _large = LargeAllocator.instance; 24 | 25 | version (StdDdoc) 26 | { 27 | /** 28 | The alignment offered is the minimum of the two allocators' alignment. 29 | */ 30 | enum uint alignment; 31 | /** 32 | This method is defined only if at least one of the allocators defines 33 | it. The good allocation size is obtained from $(D SmallAllocator) if $(D 34 | s <= threshold), or $(D LargeAllocator) otherwise. (If one of the 35 | allocators does not define $(D goodAllocSize), the default 36 | implementation in this module applies.) 37 | */ 38 | static size_t goodAllocSize(size_t s); 39 | /** 40 | The memory is obtained from $(D SmallAllocator) if $(D s <= threshold), 41 | or $(D LargeAllocator) otherwise. 42 | */ 43 | void[] allocate(size_t); 44 | /** 45 | This method is defined if both allocators define it, and forwards to 46 | $(D SmallAllocator) or $(D LargeAllocator) appropriately. 47 | */ 48 | void[] alignedAllocate(size_t, uint); 49 | /** 50 | This method is defined only if at least one of the allocators defines 51 | it. If $(D SmallAllocator) defines $(D expand) and $(D b.length + 52 | delta <= threshold), the call is forwarded to $(D SmallAllocator). If $(D 53 | LargeAllocator) defines $(D expand) and $(D b.length > threshold), the 54 | call is forwarded to $(D LargeAllocator). Otherwise, the call returns 55 | $(D false). 56 | */ 57 | bool expand(ref void[] b, size_t delta); 58 | /** 59 | This method is defined only if at least one of the allocators defines 60 | it. If $(D SmallAllocator) defines $(D reallocate) and $(D b.length <= 61 | threshold && s <= threshold), the call is forwarded to $(D 62 | SmallAllocator). If $(D LargeAllocator) defines $(D expand) and $(D 63 | b.length > threshold && s > threshold), the call is forwarded to $(D 64 | LargeAllocator). Otherwise, the call returns $(D false). 65 | */ 66 | bool reallocate(ref void[] b, size_t s); 67 | /** 68 | This method is defined only if at least one of the allocators defines 69 | it, and work similarly to $(D reallocate). 70 | */ 71 | bool alignedReallocate(ref void[] b, size_t s); 72 | /** 73 | This method is defined only if both allocators define it. The call is 74 | forwarded to $(D SmallAllocator) if $(D b.length <= threshold), or $(D 75 | LargeAllocator) otherwise. 76 | */ 77 | Ternary owns(void[] b); 78 | /** 79 | This function is defined only if both allocators define it, and forwards 80 | appropriately depending on $(D b.length). 81 | */ 82 | bool deallocate(void[] b); 83 | /** 84 | This function is defined only if both allocators define it, and calls 85 | $(D deallocateAll) for them in turn. 86 | */ 87 | bool deallocateAll(); 88 | /** 89 | This function is defined only if both allocators define it, and returns 90 | the conjunction of $(D empty) calls for the two. 91 | */ 92 | Ternary empty(); 93 | } 94 | 95 | /** 96 | Composite allocators involving nested instantiations of $(D Segregator) make 97 | it difficult to access individual sub-allocators stored within. $(D 98 | allocatorForSize) simplifies the task by supplying the allocator nested 99 | inside a $(D Segregator) that is responsible for a specific size $(D s). 100 | 101 | Example: 102 | ---- 103 | alias A = Segregator!(300, 104 | Segregator!(200, A1, A2), 105 | A3); 106 | A a; 107 | static assert(typeof(a.allocatorForSize!10) == A1); 108 | static assert(typeof(a.allocatorForSize!250) == A2); 109 | static assert(typeof(a.allocatorForSize!301) == A3); 110 | ---- 111 | */ 112 | ref auto allocatorForSize(size_t s)() 113 | { 114 | static if (s <= threshold) 115 | static if (is(SmallAllocator == Segregator!(Args), Args...)) 116 | return _small.allocatorForSize!s; 117 | else return _small; 118 | else 119 | static if (is(LargeAllocator == Segregator!(Args), Args...)) 120 | return _large.allocatorForSize!s; 121 | else return _large; 122 | } 123 | 124 | enum uint alignment = min(SmallAllocator.alignment, 125 | LargeAllocator.alignment); 126 | 127 | private template Impl() 128 | { 129 | size_t goodAllocSize(size_t s) 130 | { 131 | return s <= threshold 132 | ? _small.goodAllocSize(s) 133 | : _large.goodAllocSize(s); 134 | } 135 | 136 | void[] allocate(size_t s) 137 | { 138 | return s <= threshold ? _small.allocate(s) : _large.allocate(s); 139 | } 140 | 141 | static if (__traits(hasMember, SmallAllocator, "alignedAllocate") 142 | && __traits(hasMember, LargeAllocator, "alignedAllocate")) 143 | void[] alignedAllocate(size_t s, uint a) 144 | { 145 | return s <= threshold 146 | ? _small.alignedAllocate(s, a) 147 | : _large.alignedAllocate(s, a); 148 | } 149 | 150 | static if (__traits(hasMember, SmallAllocator, "expand") 151 | || __traits(hasMember, LargeAllocator, "expand")) 152 | bool expand(ref void[] b, size_t delta) 153 | { 154 | if (!delta) return true; 155 | if (b.length + delta <= threshold) 156 | { 157 | // Old and new allocations handled by _small 158 | static if (__traits(hasMember, SmallAllocator, "expand")) 159 | return _small.expand(b, delta); 160 | else 161 | return false; 162 | } 163 | if (b.length > threshold) 164 | { 165 | // Old and new allocations handled by _large 166 | static if (__traits(hasMember, LargeAllocator, "expand")) 167 | return _large.expand(b, delta); 168 | else 169 | return false; 170 | } 171 | // Oops, cross-allocator transgression 172 | return false; 173 | } 174 | 175 | static if (__traits(hasMember, SmallAllocator, "reallocate") 176 | || __traits(hasMember, LargeAllocator, "reallocate")) 177 | bool reallocate(ref void[] b, size_t s) 178 | { 179 | static if (__traits(hasMember, SmallAllocator, "reallocate")) 180 | if (b.length <= threshold && s <= threshold) 181 | { 182 | // Old and new allocations handled by _small 183 | return _small.reallocate(b, s); 184 | } 185 | static if (__traits(hasMember, LargeAllocator, "reallocate")) 186 | if (b.length > threshold && s > threshold) 187 | { 188 | // Old and new allocations handled by _large 189 | return _large.reallocate(b, s); 190 | } 191 | // Cross-allocator transgression 192 | static if (!__traits(hasMember, typeof(this), "instance")) 193 | return .reallocate(this, b, s); 194 | else 195 | return .reallocate(instance, b, s); 196 | } 197 | 198 | static if (__traits(hasMember, SmallAllocator, "alignedReallocate") 199 | || __traits(hasMember, LargeAllocator, "alignedReallocate")) 200 | bool alignedReallocate(ref void[] b, size_t s) 201 | { 202 | static if (__traits(hasMember, SmallAllocator, "alignedReallocate")) 203 | if (b.length <= threshold && s <= threshold) 204 | { 205 | // Old and new allocations handled by _small 206 | return _small.alignedReallocate(b, s); 207 | } 208 | static if (__traits(hasMember, LargeAllocator, "alignedReallocate")) 209 | if (b.length > threshold && s > threshold) 210 | { 211 | // Old and new allocations handled by _large 212 | return _large.alignedReallocate(b, s); 213 | } 214 | // Cross-allocator transgression 215 | static if (!__traits(hasMember, typeof(this), "instance")) 216 | return .alignedReallocate(this, b, s); 217 | else 218 | return .alignedReallocate(instance, b, s); 219 | } 220 | 221 | static if (__traits(hasMember, SmallAllocator, "owns") 222 | && __traits(hasMember, LargeAllocator, "owns")) 223 | Ternary owns(void[] b) 224 | { 225 | return Ternary(b.length <= threshold 226 | ? _small.owns(b) : _large.owns(b)); 227 | } 228 | 229 | static if (__traits(hasMember, SmallAllocator, "deallocate") 230 | && __traits(hasMember, LargeAllocator, "deallocate")) 231 | bool deallocate(void[] data) 232 | { 233 | return data.length <= threshold 234 | ? _small.deallocate(data) 235 | : _large.deallocate(data); 236 | } 237 | 238 | static if (__traits(hasMember, SmallAllocator, "deallocateAll") 239 | && __traits(hasMember, LargeAllocator, "deallocateAll")) 240 | bool deallocateAll() 241 | { 242 | // Use & insted of && to evaluate both 243 | return _small.deallocateAll() & _large.deallocateAll(); 244 | } 245 | 246 | static if (__traits(hasMember, SmallAllocator, "empty") 247 | && __traits(hasMember, LargeAllocator, "empty")) 248 | Ternary empty() 249 | { 250 | return _small.empty & _large.empty; 251 | } 252 | 253 | static if (__traits(hasMember, SmallAllocator, "resolveInternalPointer") 254 | && __traits(hasMember, LargeAllocator, "resolveInternalPointer")) 255 | Ternary resolveInternalPointer(const void* p, ref void[] result) 256 | { 257 | Ternary r = _small.resolveInternalPointer(p, result); 258 | return r == Ternary.no ? _large.resolveInternalPointer(p, result) : r; 259 | } 260 | } 261 | 262 | private enum sharedMethods = 263 | !stateSize!SmallAllocator 264 | && !stateSize!LargeAllocator 265 | && is(typeof(SmallAllocator.instance) == shared) 266 | && is(typeof(LargeAllocator.instance) == shared); 267 | 268 | static if (sharedMethods) 269 | { // for backward compatability 270 | enum shared Segregator instance = Segregator(); 271 | static { mixin Impl!(); } 272 | } 273 | else 274 | { 275 | static if (!stateSize!SmallAllocator && !stateSize!LargeAllocator) 276 | { 277 | enum shared Segregator instance = Segregator(); 278 | static { mixin Impl!(); } 279 | } 280 | else 281 | { 282 | mixin Impl!(); 283 | } 284 | } 285 | } 286 | 287 | /// 288 | @system unittest 289 | { 290 | import stdx.allocator.building_blocks.free_list : FreeList; 291 | import stdx.allocator.gc_allocator : GCAllocator; 292 | import stdx.allocator.mallocator : Mallocator; 293 | alias A = 294 | Segregator!( 295 | 1024 * 4, 296 | Segregator!( 297 | 128, FreeList!(Mallocator, 0, 128), 298 | GCAllocator), 299 | Segregator!( 300 | 1024 * 1024, Mallocator, 301 | GCAllocator) 302 | ); 303 | A a; 304 | auto b = a.allocate(200); 305 | assert(b.length == 200); 306 | a.deallocate(b); 307 | } 308 | 309 | /** 310 | A $(D Segregator) with more than three arguments expands to a composition of 311 | elemental $(D Segregator)s, as illustrated by the following example: 312 | 313 | ---- 314 | alias A = 315 | Segregator!( 316 | n1, A1, 317 | n2, A2, 318 | n3, A3, 319 | A4 320 | ); 321 | ---- 322 | 323 | With this definition, allocation requests for $(D n1) bytes or less are directed 324 | to $(D A1); requests between $(D n1 + 1) and $(D n2) bytes (inclusive) are 325 | directed to $(D A2); requests between $(D n2 + 1) and $(D n3) bytes (inclusive) 326 | are directed to $(D A3); and requests for more than $(D n3) bytes are directed 327 | to $(D A4). If some particular range should not be handled, $(D NullAllocator) 328 | may be used appropriately. 329 | 330 | */ 331 | template Segregator(Args...) 332 | if (Args.length > 3) 333 | { 334 | // Binary search 335 | private enum cutPoint = ((Args.length - 2) / 4) * 2; 336 | static if (cutPoint >= 2) 337 | { 338 | alias Segregator = .Segregator!( 339 | Args[cutPoint], 340 | .Segregator!(Args[0 .. cutPoint], Args[cutPoint + 1]), 341 | .Segregator!(Args[cutPoint + 2 .. $]) 342 | ); 343 | } 344 | else 345 | { 346 | // Favor small sizes 347 | alias Segregator = .Segregator!( 348 | Args[0], 349 | Args[1], 350 | .Segregator!(Args[2 .. $]) 351 | ); 352 | } 353 | } 354 | 355 | /// 356 | @system unittest 357 | { 358 | import stdx.allocator.building_blocks.free_list : FreeList; 359 | import stdx.allocator.gc_allocator : GCAllocator; 360 | import stdx.allocator.mallocator : Mallocator; 361 | alias A = 362 | Segregator!( 363 | 128, FreeList!(Mallocator, 0, 128), 364 | 1024 * 4, GCAllocator, 365 | 1024 * 1024, Mallocator, 366 | GCAllocator 367 | ); 368 | A a; 369 | auto b = a.allocate(201); 370 | assert(b.length == 201); 371 | a.deallocate(b); 372 | } 373 | -------------------------------------------------------------------------------- /source/stdx/allocator/building_blocks/stats_collector.d: -------------------------------------------------------------------------------- 1 | // Written in the D programming language. 2 | /** 3 | Allocator that collects useful statistics about allocations, both global and per 4 | calling point. The statistics collected can be configured statically by choosing 5 | combinations of `Options` appropriately. 6 | 7 | Example: 8 | ---- 9 | import stdx.allocator.gc_allocator : GCAllocator; 10 | import stdx.allocator.building_blocks.free_list : FreeList; 11 | alias Allocator = StatsCollector!(GCAllocator, Options.bytesUsed); 12 | ---- 13 | */ 14 | module stdx.allocator.building_blocks.stats_collector; 15 | 16 | import stdx.allocator.common; 17 | 18 | /** 19 | _Options for $(D StatsCollector) defined below. Each enables during 20 | compilation one specific counter, statistic, or other piece of information. 21 | */ 22 | enum Options : ulong 23 | { 24 | /** 25 | Counts the number of calls to $(D owns). 26 | */ 27 | numOwns = 1u << 0, 28 | /** 29 | Counts the number of calls to $(D allocate). All calls are counted, 30 | including requests for zero bytes or failed requests. 31 | */ 32 | numAllocate = 1u << 1, 33 | /** 34 | Counts the number of calls to $(D allocate) that succeeded, i.e. they 35 | returned a block as large as requested. (N.B. requests for zero bytes count 36 | as successful.) 37 | */ 38 | numAllocateOK = 1u << 2, 39 | /** 40 | Counts the number of calls to $(D expand), regardless of arguments or 41 | result. 42 | */ 43 | numExpand = 1u << 3, 44 | /** 45 | Counts the number of calls to $(D expand) that resulted in a successful 46 | expansion. 47 | */ 48 | numExpandOK = 1u << 4, 49 | /** 50 | Counts the number of calls to $(D reallocate), regardless of arguments or 51 | result. 52 | */ 53 | numReallocate = 1u << 5, 54 | /** 55 | Counts the number of calls to $(D reallocate) that succeeded. 56 | (Reallocations to zero bytes count as successful.) 57 | */ 58 | numReallocateOK = 1u << 6, 59 | /** 60 | Counts the number of calls to $(D reallocate) that resulted in an in-place 61 | reallocation (no memory moved). If this number is close to the total number 62 | of reallocations, that indicates the allocator finds room at the current 63 | block's end in a large fraction of the cases, but also that internal 64 | fragmentation may be high (the size of the unit of allocation is large 65 | compared to the typical allocation size of the application). 66 | */ 67 | numReallocateInPlace = 1u << 7, 68 | /** 69 | Counts the number of calls to $(D deallocate). 70 | */ 71 | numDeallocate = 1u << 8, 72 | /** 73 | Counts the number of calls to $(D deallocateAll). 74 | */ 75 | numDeallocateAll = 1u << 9, 76 | /** 77 | Chooses all $(D numXxx) flags. 78 | */ 79 | numAll = (1u << 10) - 1, 80 | /** 81 | Tracks bytes currently allocated by this allocator. This number goes up 82 | and down as memory is allocated and deallocated, and is zero if the 83 | allocator currently has no active allocation. 84 | */ 85 | bytesUsed = 1u << 10, 86 | /** 87 | Tracks total cumulative bytes allocated by means of $(D allocate), 88 | $(D expand), and $(D reallocate) (when resulting in an expansion). This 89 | number always grows and indicates allocation traffic. To compute bytes 90 | deallocated cumulatively, subtract $(D bytesUsed) from $(D bytesAllocated). 91 | */ 92 | bytesAllocated = 1u << 11, 93 | /** 94 | Tracks the sum of all $(D delta) values in calls of the form 95 | $(D expand(b, delta)) that succeed (return $(D true)). 96 | */ 97 | bytesExpanded = 1u << 12, 98 | /** 99 | Tracks the sum of all $(D b.length - s) with $(D b.length > s) in calls of 100 | the form $(D realloc(b, s)) that succeed (return $(D true)). In per-call 101 | statistics, also unambiguously counts the bytes deallocated with 102 | $(D deallocate). 103 | */ 104 | bytesContracted = 1u << 13, 105 | /** 106 | Tracks the sum of all bytes moved as a result of calls to $(D realloc) that 107 | were unable to reallocate in place. A large number (relative to $(D 108 | bytesAllocated)) indicates that the application should use larger 109 | preallocations. 110 | */ 111 | bytesMoved = 1u << 14, 112 | /** 113 | Tracks the sum of all bytes NOT moved as result of calls to $(D realloc) 114 | that managed to reallocate in place. A large number (relative to $(D 115 | bytesAllocated)) indicates that the application is expansion-intensive and 116 | is saving a good amount of moves. However, if this number is relatively 117 | small and $(D bytesSlack) is high, it means the application is 118 | overallocating for little benefit. 119 | */ 120 | bytesNotMoved = 1u << 15, 121 | /** 122 | Measures the sum of extra bytes allocated beyond the bytes requested, i.e. 123 | the $(HTTP goo.gl/YoKffF, internal fragmentation). This is the current 124 | effective number of slack bytes, and it goes up and down with time. 125 | */ 126 | bytesSlack = 1u << 16, 127 | /** 128 | Measures the maximum bytes allocated over the time. This is useful for 129 | dimensioning allocators. 130 | */ 131 | bytesHighTide = 1u << 17, 132 | /** 133 | Chooses all $(D byteXxx) flags. 134 | */ 135 | bytesAll = ((1u << 18) - 1) & ~numAll, 136 | /** 137 | Combines all flags above. 138 | */ 139 | all = (1u << 18) - 1 140 | } 141 | 142 | /** 143 | 144 | Allocator that collects extra data about allocations. Since each piece of 145 | information adds size and time overhead, statistics can be individually enabled 146 | or disabled through compile-time $(D flags). 147 | 148 | All stats of the form $(D numXxx) record counts of events occurring, such as 149 | calls to functions and specific results. The stats of the form $(D bytesXxx) 150 | collect cumulative sizes. 151 | 152 | In addition, the data $(D callerSize), $(D callerModule), $(D callerFile), $(D 153 | callerLine), and $(D callerTime) is associated with each specific allocation. 154 | This data prefixes each allocation. 155 | 156 | */ 157 | struct StatsCollector(Allocator, ulong flags = Options.all, 158 | ulong perCallFlags = 0) 159 | { 160 | private: 161 | import stdx.allocator.internal : Ternary; 162 | 163 | enum define = (string type, string[] names...) 164 | { 165 | string result; 166 | foreach (v; names) 167 | result ~= "static if (flags & Options."~v~") {" 168 | ~ "private "~type~" _"~v~";" 169 | ~ "public const("~type~") "~v~"() const { return _"~v~"; }" 170 | ~ "}"; 171 | return result; 172 | }; 173 | 174 | void add(string counter)(sizediff_t n) 175 | { 176 | mixin("static if (flags & Options." ~ counter 177 | ~ ") _" ~ counter ~ " += n;"); 178 | static if (counter == "bytesUsed" && (flags & Options.bytesHighTide)) 179 | { 180 | if (bytesHighTide < bytesUsed ) _bytesHighTide = bytesUsed; 181 | } 182 | } 183 | 184 | void up(string counter)() { add!counter(1); } 185 | void down(string counter)() { add!counter(-1); } 186 | 187 | version (StdDdoc) 188 | { 189 | /** 190 | Read-only properties enabled by the homonym $(D flags) chosen by the 191 | user. 192 | 193 | Example: 194 | ---- 195 | StatsCollector!(Mallocator, 196 | Options.bytesUsed | Options.bytesAllocated) a; 197 | auto d1 = a.allocate(10); 198 | auto d2 = a.allocate(11); 199 | a.deallocate(d1); 200 | assert(a.bytesAllocated == 21); 201 | assert(a.bytesUsed == 11); 202 | a.deallocate(d2); 203 | assert(a.bytesAllocated == 21); 204 | assert(a.bytesUsed == 0); 205 | ---- 206 | */ 207 | @property ulong numOwns() const; 208 | /// Ditto 209 | @property ulong numAllocate() const; 210 | /// Ditto 211 | @property ulong numAllocateOK() const; 212 | /// Ditto 213 | @property ulong numExpand() const; 214 | /// Ditto 215 | @property ulong numExpandOK() const; 216 | /// Ditto 217 | @property ulong numReallocate() const; 218 | /// Ditto 219 | @property ulong numReallocateOK() const; 220 | /// Ditto 221 | @property ulong numReallocateInPlace() const; 222 | /// Ditto 223 | @property ulong numDeallocate() const; 224 | /// Ditto 225 | @property ulong numDeallocateAll() const; 226 | /// Ditto 227 | @property ulong bytesUsed() const; 228 | /// Ditto 229 | @property ulong bytesAllocated() const; 230 | /// Ditto 231 | @property ulong bytesExpanded() const; 232 | /// Ditto 233 | @property ulong bytesContracted() const; 234 | /// Ditto 235 | @property ulong bytesMoved() const; 236 | /// Ditto 237 | @property ulong bytesNotMoved() const; 238 | /// Ditto 239 | @property ulong bytesSlack() const; 240 | /// Ditto 241 | @property ulong bytesHighTide() const; 242 | } 243 | 244 | public: 245 | /** 246 | The parent allocator is publicly accessible either as a direct member if it 247 | holds state, or as an alias to `Allocator.instance` otherwise. One may use 248 | it for making calls that won't count toward statistics collection. 249 | */ 250 | static if (stateSize!Allocator) Allocator parent; 251 | else alias parent = Allocator.instance; 252 | 253 | private: 254 | // Per-allocator state 255 | mixin(define("ulong", 256 | "numOwns", 257 | "numAllocate", 258 | "numAllocateOK", 259 | "numExpand", 260 | "numExpandOK", 261 | "numReallocate", 262 | "numReallocateOK", 263 | "numReallocateInPlace", 264 | "numDeallocate", 265 | "numDeallocateAll", 266 | "bytesUsed", 267 | "bytesAllocated", 268 | "bytesExpanded", 269 | "bytesContracted", 270 | "bytesMoved", 271 | "bytesNotMoved", 272 | "bytesSlack", 273 | "bytesHighTide", 274 | )); 275 | 276 | public: 277 | 278 | /// Alignment offered is equal to $(D Allocator.alignment). 279 | alias alignment = Allocator.alignment; 280 | 281 | /** 282 | Increments $(D numOwns) (per instance and and per call) and forwards to $(D 283 | parent.owns(b)). 284 | */ 285 | static if (__traits(hasMember, Allocator, "owns")) 286 | { 287 | static if ((perCallFlags & Options.numOwns) == 0) 288 | Ternary owns(void[] b) 289 | { return ownsImpl(b); } 290 | else 291 | Ternary owns(string f = __FILE, uint n = line)(void[] b) 292 | { return ownsImpl!(f, n)(b); } 293 | } 294 | 295 | private Ternary ownsImpl(string f = null, uint n = 0)(void[] b) 296 | { 297 | up!"numOwns"; 298 | addPerCall!(f, n, "numOwns")(1); 299 | return parent.owns(b); 300 | } 301 | 302 | /** 303 | Forwards to $(D parent.allocate). Affects per instance: $(D numAllocate), 304 | $(D bytesUsed), $(D bytesAllocated), $(D bytesSlack), $(D numAllocateOK), 305 | and $(D bytesHighTide). Affects per call: $(D numAllocate), $(D 306 | numAllocateOK), and $(D bytesAllocated). 307 | */ 308 | static if (!(perCallFlags 309 | & (Options.numAllocate | Options.numAllocateOK 310 | | Options.bytesAllocated))) 311 | { 312 | void[] allocate(size_t n) 313 | { return allocateImpl(n); } 314 | } 315 | else 316 | { 317 | void[] allocate(string f = __FILE__, ulong n = __LINE__) 318 | (size_t bytes) 319 | { return allocateImpl!(f, n)(bytes); } 320 | } 321 | 322 | private void[] allocateImpl(string f = null, ulong n = 0)(size_t bytes) 323 | { 324 | auto result = parent.allocate(bytes); 325 | add!"bytesUsed"(result.length); 326 | add!"bytesAllocated"(result.length); 327 | immutable slack = this.goodAllocSize(result.length) - result.length; 328 | add!"bytesSlack"(slack); 329 | up!"numAllocate"; 330 | add!"numAllocateOK"(result.length == bytes); // allocating 0 bytes is OK 331 | addPerCall!(f, n, "numAllocate", "numAllocateOK", "bytesAllocated") 332 | (1, result.length == bytes, result.length); 333 | return result; 334 | } 335 | 336 | /** 337 | Defined whether or not $(D Allocator.expand) is defined. Affects 338 | per instance: $(D numExpand), $(D numExpandOK), $(D bytesExpanded), 339 | $(D bytesSlack), $(D bytesAllocated), and $(D bytesUsed). Affects per call: 340 | $(D numExpand), $(D numExpandOK), $(D bytesExpanded), and 341 | $(D bytesAllocated). 342 | */ 343 | static if (!(perCallFlags 344 | & (Options.numExpand | Options.numExpandOK | Options.bytesExpanded))) 345 | { 346 | bool expand(ref void[] b, size_t delta) 347 | { return expandImpl(b, delta); } 348 | } 349 | else 350 | { 351 | bool expand(string f = __FILE__, uint n = __LINE__) 352 | (ref void[] b, size_t delta) 353 | { return expandImpl!(f, n)(b, delta); } 354 | } 355 | 356 | private bool expandImpl(string f = null, uint n = 0)(ref void[] b, size_t s) 357 | { 358 | up!"numExpand"; 359 | sizediff_t slack = 0; 360 | static if (!__traits(hasMember, Allocator, "expand")) 361 | { 362 | auto result = s == 0; 363 | } 364 | else 365 | { 366 | immutable bytesSlackB4 = this.goodAllocSize(b.length) - b.length; 367 | auto result = parent.expand(b, s); 368 | if (result) 369 | { 370 | up!"numExpandOK"; 371 | add!"bytesUsed"(s); 372 | add!"bytesAllocated"(s); 373 | add!"bytesExpanded"(s); 374 | slack = sizediff_t(this.goodAllocSize(b.length) - b.length 375 | - bytesSlackB4); 376 | add!"bytesSlack"(slack); 377 | } 378 | } 379 | immutable xtra = result ? s : 0; 380 | addPerCall!(f, n, "numExpand", "numExpandOK", "bytesExpanded", 381 | "bytesAllocated") 382 | (1, result, xtra, xtra); 383 | return result; 384 | } 385 | 386 | /** 387 | Defined whether or not $(D Allocator.reallocate) is defined. Affects 388 | per instance: $(D numReallocate), $(D numReallocateOK), $(D 389 | numReallocateInPlace), $(D bytesNotMoved), $(D bytesAllocated), $(D 390 | bytesSlack), $(D bytesExpanded), and $(D bytesContracted). Affects per call: 391 | $(D numReallocate), $(D numReallocateOK), $(D numReallocateInPlace), 392 | $(D bytesNotMoved), $(D bytesExpanded), $(D bytesContracted), and 393 | $(D bytesMoved). 394 | */ 395 | static if (!(perCallFlags 396 | & (Options.numReallocate | Options.numReallocateOK 397 | | Options.numReallocateInPlace | Options.bytesNotMoved 398 | | Options.bytesExpanded | Options.bytesContracted 399 | | Options.bytesMoved))) 400 | { 401 | bool reallocate(ref void[] b, size_t s) 402 | { return reallocateImpl(b, s); } 403 | } 404 | else 405 | { 406 | bool reallocate(string f = __FILE__, ulong n = __LINE__) 407 | (ref void[] b, size_t s) 408 | { return reallocateImpl!(f, n)(b, s); } 409 | } 410 | 411 | private bool reallocateImpl(string f = null, uint n = 0) 412 | (ref void[] b, size_t s) 413 | { 414 | up!"numReallocate"; 415 | const bytesSlackB4 = this.goodAllocSize(b.length) - b.length; 416 | const oldB = b.ptr; 417 | const oldLength = b.length; 418 | 419 | const result = parent.reallocate(b, s); 420 | 421 | sizediff_t slack = 0; 422 | bool wasInPlace = false; 423 | sizediff_t delta = 0; 424 | 425 | if (result) 426 | { 427 | up!"numReallocateOK"; 428 | slack = (this.goodAllocSize(b.length) - b.length) - bytesSlackB4; 429 | add!"bytesSlack"(slack); 430 | add!"bytesUsed"(sizediff_t(b.length - oldLength)); 431 | if (oldB == b.ptr) 432 | { 433 | // This was an in-place reallocation, yay 434 | wasInPlace = true; 435 | up!"numReallocateInPlace"; 436 | add!"bytesNotMoved"(oldLength); 437 | delta = b.length - oldLength; 438 | if (delta >= 0) 439 | { 440 | // Expansion 441 | add!"bytesAllocated"(delta); 442 | add!"bytesExpanded"(delta); 443 | } 444 | else 445 | { 446 | // Contraction 447 | add!"bytesContracted"(-delta); 448 | } 449 | } 450 | else 451 | { 452 | // This was a allocate-move-deallocate cycle 453 | add!"bytesAllocated"(b.length); 454 | add!"bytesMoved"(oldLength); 455 | } 456 | } 457 | addPerCall!(f, n, "numReallocate", "numReallocateOK", 458 | "numReallocateInPlace", "bytesNotMoved", 459 | "bytesExpanded", "bytesContracted", "bytesMoved") 460 | (1, result, wasInPlace, wasInPlace ? oldLength : 0, 461 | delta >= 0 ? delta : 0, delta < 0 ? -delta : 0, 462 | wasInPlace ? 0 : oldLength); 463 | return result; 464 | } 465 | 466 | /** 467 | Defined whether or not $(D Allocator.deallocate) is defined. Affects 468 | per instance: $(D numDeallocate), $(D bytesUsed), and $(D bytesSlack). 469 | Affects per call: $(D numDeallocate) and $(D bytesContracted). 470 | */ 471 | static if (!(perCallFlags & 472 | (Options.numDeallocate | Options.bytesContracted))) 473 | bool deallocate(void[] b) 474 | { return deallocateImpl(b); } 475 | else 476 | bool deallocate(string f = __FILE__, uint n = __LINE__)(void[] b) 477 | { return deallocateImpl!(f, n)(b); } 478 | 479 | private bool deallocateImpl(string f = null, uint n = 0)(void[] b) 480 | { 481 | up!"numDeallocate"; 482 | add!"bytesUsed"(-sizediff_t(b.length)); 483 | add!"bytesSlack"(-(this.goodAllocSize(b.length) - b.length)); 484 | addPerCall!(f, n, "numDeallocate", "bytesContracted")(1, b.length); 485 | static if (__traits(hasMember, Allocator, "deallocate")) 486 | return parent.deallocate(b); 487 | else 488 | return false; 489 | } 490 | 491 | static if (__traits(hasMember, Allocator, "deallocateAll")) 492 | { 493 | /** 494 | Defined only if $(D Allocator.deallocateAll) is defined. Affects 495 | per instance and per call $(D numDeallocateAll). 496 | */ 497 | static if (!(perCallFlags & Options.numDeallocateAll)) 498 | bool deallocateAll() 499 | { return deallocateAllImpl(); } 500 | else 501 | bool deallocateAll(string f = __FILE__, uint n = __LINE__)() 502 | { return deallocateAllImpl!(f, n)(); } 503 | 504 | private bool deallocateAllImpl(string f = null, uint n = 0)() 505 | { 506 | up!"numDeallocateAll"; 507 | addPerCall!(f, n, "numDeallocateAll")(1); 508 | static if ((flags & Options.bytesUsed)) 509 | _bytesUsed = 0; 510 | return parent.deallocateAll(); 511 | } 512 | } 513 | 514 | /** 515 | Defined only if $(D Options.bytesUsed) is defined. Returns $(D bytesUsed == 516 | 0). 517 | */ 518 | static if (flags & Options.bytesUsed) 519 | Ternary empty() 520 | { 521 | return Ternary(_bytesUsed == 0); 522 | } 523 | 524 | /** 525 | Reports per instance statistics to $(D output) (e.g. $(D stdout)). The 526 | format is simple: one kind and value per line, separated by a colon, e.g. 527 | $(D bytesAllocated:7395404) 528 | */ 529 | void reportStatistics(R)(auto ref R output) 530 | { 531 | foreach (member; __traits(allMembers, Options)) 532 | {{ 533 | enum e = __traits(getMember, Options, member); 534 | static if ((flags & e) && e != Options.numAll 535 | && e != Options.bytesAll && e != Options.all) 536 | output.write(member, ":", e, '\n'); 537 | }} 538 | } 539 | 540 | static if (perCallFlags) 541 | { 542 | /** 543 | Defined if $(D perCallFlags) is nonzero. 544 | */ 545 | struct PerCallStatistics 546 | { 547 | /// The file and line of the call. 548 | string file; 549 | /// Ditto 550 | uint line; 551 | /// The options corresponding to the statistics collected. 552 | Options[] opts; 553 | /// The values of the statistics. Has the same length as $(D opts). 554 | ulong[] values; 555 | // Next in the chain. 556 | private PerCallStatistics* next; 557 | 558 | /** 559 | Format to a string such as: 560 | $(D mymodule.d(655): [numAllocate:21, numAllocateOK:21, bytesAllocated:324202]). 561 | */ 562 | string toString()() const 563 | { 564 | import std.conv : text, to; 565 | auto result = text(file, "(", line, "): ["); 566 | foreach (i, opt; opts) 567 | { 568 | if (i) result ~= ", "; 569 | result ~= opt.to!string; 570 | result ~= ':'; 571 | result ~= values[i].to!string; 572 | } 573 | return result ~= "]"; 574 | } 575 | } 576 | private static PerCallStatistics* root; 577 | 578 | /** 579 | Defined if $(D perCallFlags) is nonzero. Iterates all monitored 580 | file/line instances. The order of iteration is not meaningful (items 581 | are inserted at the front of a list upon the first call), so 582 | preprocessing the statistics after collection might be appropriate. 583 | */ 584 | static auto byFileLine() 585 | { 586 | static struct Voldemort 587 | { 588 | PerCallStatistics* current; 589 | bool empty() { return !current; } 590 | ref PerCallStatistics front() { return *current; } 591 | void popFront() { current = current.next; } 592 | auto save() { return this; } 593 | } 594 | return Voldemort(root); 595 | } 596 | 597 | /** 598 | Defined if $(D perCallFlags) is nonzero. Outputs (e.g. to a $(D File)) 599 | a simple report of the collected per-call statistics. 600 | */ 601 | static void reportPerCallStatistics(R)(auto ref R output) 602 | { 603 | output.write("Stats for: ", StatsCollector.stringof, '\n'); 604 | foreach (ref stat; byFileLine) 605 | { 606 | output.write(stat, '\n'); 607 | } 608 | } 609 | 610 | private PerCallStatistics* statsAt(string f, uint n, opts...)() 611 | { 612 | static PerCallStatistics s = { f, n, [ opts ], new ulong[opts.length] }; 613 | static bool inserted; 614 | 615 | if (!inserted) 616 | { 617 | // Insert as root 618 | s.next = root; 619 | root = &s; 620 | inserted = true; 621 | } 622 | return &s; 623 | } 624 | 625 | private void addPerCall(string f, uint n, names...)(ulong[] values...) 626 | { 627 | import std.array : join; 628 | enum uint mask = mixin("Options."~[names].join("|Options.")); 629 | static if (perCallFlags & mask) 630 | { 631 | // Per allocation info 632 | auto ps = mixin("statsAt!(f, n," 633 | ~ "Options."~[names].join(", Options.") 634 | ~")"); 635 | foreach (i; 0 .. names.length) 636 | { 637 | ps.values[i] += values[i]; 638 | } 639 | } 640 | } 641 | } 642 | else 643 | { 644 | private void addPerCall(string f, uint n, names...)(ulong[]...) 645 | { 646 | } 647 | } 648 | } 649 | 650 | /// 651 | @system unittest 652 | { 653 | import stdx.allocator.building_blocks.free_list : FreeList; 654 | import stdx.allocator.gc_allocator : GCAllocator; 655 | alias Allocator = StatsCollector!(GCAllocator, Options.all, Options.all); 656 | 657 | Allocator alloc; 658 | auto b = alloc.allocate(10); 659 | alloc.reallocate(b, 20); 660 | alloc.deallocate(b); 661 | 662 | static if (__VERSION__ >= 2073) 663 | { 664 | import std.file : deleteme, remove; 665 | import std.range : walkLength; 666 | import std.stdio : File; 667 | 668 | auto f = deleteme ~ "-dlang.stdx.allocator.stats_collector.txt"; 669 | scope(exit) remove(f); 670 | Allocator.reportPerCallStatistics(File(f, "w")); 671 | alloc.reportStatistics(File(f, "a")); 672 | assert(File(f).byLine.walkLength == 22); 673 | } 674 | } 675 | 676 | @system unittest 677 | { 678 | void test(Allocator)() 679 | { 680 | import std.range : walkLength; 681 | import std.stdio : writeln; 682 | Allocator a; 683 | auto b1 = a.allocate(100); 684 | assert(a.numAllocate == 1); 685 | assert(a.expand(b1, 0)); 686 | assert(a.reallocate(b1, b1.length + 1)); 687 | auto b2 = a.allocate(101); 688 | assert(a.numAllocate == 2); 689 | assert(a.bytesAllocated == 202); 690 | assert(a.bytesUsed == 202); 691 | auto b3 = a.allocate(202); 692 | assert(a.numAllocate == 3); 693 | assert(a.bytesAllocated == 404); 694 | 695 | a.deallocate(b2); 696 | assert(a.numDeallocate == 1); 697 | a.deallocate(b1); 698 | assert(a.numDeallocate == 2); 699 | a.deallocate(b3); 700 | assert(a.numDeallocate == 3); 701 | assert(a.numAllocate == a.numDeallocate); 702 | assert(a.bytesUsed == 0); 703 | } 704 | 705 | import stdx.allocator.building_blocks.free_list : FreeList; 706 | import stdx.allocator.gc_allocator : GCAllocator; 707 | test!(StatsCollector!(GCAllocator, Options.all, Options.all)); 708 | test!(StatsCollector!(FreeList!(GCAllocator, 128), Options.all, 709 | Options.all)); 710 | } 711 | 712 | @system unittest 713 | { 714 | void test(Allocator)() 715 | { 716 | import std.range : walkLength; 717 | import std.stdio : writeln; 718 | Allocator a; 719 | auto b1 = a.allocate(100); 720 | assert(a.expand(b1, 0)); 721 | assert(a.reallocate(b1, b1.length + 1)); 722 | auto b2 = a.allocate(101); 723 | auto b3 = a.allocate(202); 724 | 725 | a.deallocate(b2); 726 | a.deallocate(b1); 727 | a.deallocate(b3); 728 | } 729 | import stdx.allocator.building_blocks.free_list : FreeList; 730 | import stdx.allocator.gc_allocator : GCAllocator; 731 | test!(StatsCollector!(GCAllocator, 0, 0)); 732 | } 733 | -------------------------------------------------------------------------------- /source/stdx/allocator/common.d: -------------------------------------------------------------------------------- 1 | /** 2 | Utility and ancillary artifacts of `stdx.allocator`. This module 3 | shouldn't be used directly; its functionality will be migrated into more 4 | appropriate parts of `std`. 5 | 6 | Authors: $(HTTP erdani.com, Andrei Alexandrescu), Timon Gehr (`Ternary`) 7 | */ 8 | module stdx.allocator.common; 9 | import mir.utility; 10 | import std.traits; 11 | 12 | /** 13 | Returns the size in bytes of the state that needs to be allocated to hold an 14 | object of type $(D T). $(D stateSize!T) is zero for $(D struct)s that are not 15 | nested and have no nonstatic member variables. 16 | */ 17 | template stateSize(T) 18 | { 19 | static if (is(T == class) || is(T == interface)) 20 | enum stateSize = __traits(classInstanceSize, T); 21 | else static if (is(T == struct) || is(T == union)) 22 | enum stateSize = Fields!T.length || isNested!T ? T.sizeof : 0; 23 | else static if (is(T == void)) 24 | enum size_t stateSize = 0; 25 | else 26 | enum stateSize = T.sizeof; 27 | } 28 | 29 | @safe @nogc nothrow pure 30 | unittest 31 | { 32 | static assert(stateSize!void == 0); 33 | struct A {} 34 | static assert(stateSize!A == 0); 35 | struct B { int x; } 36 | static assert(stateSize!B == 4); 37 | interface I1 {} 38 | //static assert(stateSize!I1 == 2 * size_t.sizeof); 39 | class C1 {} 40 | static assert(stateSize!C1 == 3 * size_t.sizeof); 41 | class C2 { char c; } 42 | static assert(stateSize!C2 == 4 * size_t.sizeof); 43 | static class C3 { char c; } 44 | static assert(stateSize!C3 == 2 * size_t.sizeof + char.sizeof); 45 | } 46 | 47 | /** 48 | Returns `true` if the `Allocator` has the alignment known at compile time; 49 | otherwise it returns `false`. 50 | */ 51 | template hasStaticallyKnownAlignment(Allocator) 52 | { 53 | enum hasStaticallyKnownAlignment = __traits(compiles, 54 | {enum x = Allocator.alignment;}); 55 | } 56 | 57 | /** 58 | $(D chooseAtRuntime) is a compile-time constant of type $(D size_t) that several 59 | parameterized structures in this module recognize to mean deferral to runtime of 60 | the exact value. For example, $(D BitmappedBlock!(Allocator, 4096)) (described in 61 | detail below) defines a block allocator with block size of 4096 bytes, whereas 62 | $(D BitmappedBlock!(Allocator, chooseAtRuntime)) defines a block allocator that has a 63 | field storing the block size, initialized by the user. 64 | */ 65 | enum chooseAtRuntime = size_t.max - 1; 66 | 67 | /** 68 | $(D unbounded) is a compile-time constant of type $(D size_t) that several 69 | parameterized structures in this module recognize to mean "infinite" bounds for 70 | the parameter. For example, $(D Freelist) (described in detail below) accepts a 71 | $(D maxNodes) parameter limiting the number of freelist items. If $(D unbounded) 72 | is passed for $(D maxNodes), then there is no limit and no checking for the 73 | number of nodes. 74 | */ 75 | enum unbounded = size_t.max; 76 | 77 | /** 78 | The alignment that is guaranteed to accommodate any D object allocation on the 79 | current platform. 80 | */ 81 | enum uint platformAlignment = mir.utility.max(double.alignof, real.alignof); 82 | 83 | /** 84 | The default good size allocation is deduced as $(D n) rounded up to the 85 | allocator's alignment. 86 | */ 87 | size_t goodAllocSize(A)(auto ref A a, size_t n) 88 | { 89 | return n.roundUpToMultipleOf(a.alignment); 90 | } 91 | 92 | /** 93 | Returns s rounded up to a multiple of base. 94 | */ 95 | @safe @nogc nothrow pure 96 | size_t roundUpToMultipleOf()(size_t s, uint base) 97 | { 98 | assert(base); 99 | auto rem = s % base; 100 | return rem ? s + base - rem : s; 101 | } 102 | 103 | @safe @nogc nothrow pure 104 | unittest 105 | { 106 | assert(10.roundUpToMultipleOf(11) == 11); 107 | assert(11.roundUpToMultipleOf(11) == 11); 108 | assert(12.roundUpToMultipleOf(11) == 22); 109 | assert(118.roundUpToMultipleOf(11) == 121); 110 | } 111 | 112 | /** 113 | Returns `n` rounded up to a multiple of alignment, which must be a power of 2. 114 | */ 115 | @safe @nogc nothrow pure 116 | size_t roundUpToAlignment()(size_t n, uint alignment) 117 | { 118 | import stdx.allocator.internal : isPowerOf2; 119 | assert(alignment.isPowerOf2); 120 | immutable uint slack = cast(uint) n & (alignment - 1); 121 | const result = slack 122 | ? n + alignment - slack 123 | : n; 124 | assert(result >= n); 125 | return result; 126 | } 127 | 128 | @safe @nogc nothrow pure 129 | unittest 130 | { 131 | assert(10.roundUpToAlignment(4) == 12); 132 | assert(11.roundUpToAlignment(2) == 12); 133 | assert(12.roundUpToAlignment(8) == 16); 134 | assert(118.roundUpToAlignment(64) == 128); 135 | } 136 | 137 | /** 138 | Returns `n` rounded down to a multiple of alignment, which must be a power of 2. 139 | */ 140 | @safe @nogc nothrow pure 141 | size_t roundDownToAlignment()(size_t n, uint alignment) 142 | { 143 | import stdx.allocator.internal : isPowerOf2; 144 | assert(alignment.isPowerOf2); 145 | return n & ~size_t(alignment - 1); 146 | } 147 | 148 | @safe @nogc nothrow pure 149 | unittest 150 | { 151 | assert(10.roundDownToAlignment(4) == 8); 152 | assert(11.roundDownToAlignment(2) == 10); 153 | assert(12.roundDownToAlignment(8) == 8); 154 | assert(63.roundDownToAlignment(64) == 0); 155 | } 156 | 157 | /** 158 | Advances the beginning of `b` to start at alignment `a`. The resulting buffer 159 | may therefore be shorter. Returns the adjusted buffer, or null if obtaining a 160 | non-empty buffer is impossible. 161 | */ 162 | @nogc nothrow pure 163 | void[] roundUpToAlignment()(void[] b, uint a) 164 | { 165 | auto e = b.ptr + b.length; 166 | auto p = cast(void*) roundUpToAlignment(cast(size_t) b.ptr, a); 167 | if (e <= p) return null; 168 | return p[0 .. e - p]; 169 | } 170 | 171 | @nogc nothrow pure 172 | @system unittest 173 | { 174 | void[] empty; 175 | assert(roundUpToAlignment(empty, 4) == null); 176 | char[128] buf; 177 | // At least one pointer inside buf is 128-aligned 178 | assert(roundUpToAlignment(buf, 128) !is null); 179 | } 180 | 181 | /** 182 | Like `a / b` but rounds the result up, not down. 183 | */ 184 | @safe @nogc nothrow pure 185 | size_t divideRoundUp()(size_t a, size_t b) 186 | { 187 | assert(b); 188 | return (a + b - 1) / b; 189 | } 190 | 191 | /** 192 | Returns `s` rounded up to a multiple of `base`. 193 | */ 194 | @nogc nothrow pure 195 | void[] roundStartToMultipleOf()(void[] s, uint base) 196 | { 197 | assert(base); 198 | auto p = cast(void*) roundUpToMultipleOf( 199 | cast(size_t) s.ptr, base); 200 | auto end = s.ptr + s.length; 201 | return p[0 .. end - p]; 202 | } 203 | 204 | nothrow pure 205 | @system unittest 206 | { 207 | void[] p; 208 | assert(roundStartToMultipleOf(p, 16) is null); 209 | p = new ulong[10]; 210 | assert(roundStartToMultipleOf(p, 16) is p); 211 | } 212 | 213 | /** 214 | Returns $(D s) rounded up to the nearest power of 2. 215 | */ 216 | @safe @nogc nothrow pure 217 | size_t roundUpToPowerOf2()(size_t s) 218 | { 219 | import std.meta : AliasSeq; 220 | assert(s <= (size_t.max >> 1) + 1); 221 | --s; 222 | static if (size_t.sizeof == 4) 223 | alias Shifts = AliasSeq!(1, 2, 4, 8, 16); 224 | else 225 | alias Shifts = AliasSeq!(1, 2, 4, 8, 16, 32); 226 | foreach (i; Shifts) 227 | { 228 | s |= s >> i; 229 | } 230 | return s + 1; 231 | } 232 | 233 | @safe @nogc nothrow pure 234 | unittest 235 | { 236 | assert(0.roundUpToPowerOf2 == 0); 237 | assert(1.roundUpToPowerOf2 == 1); 238 | assert(2.roundUpToPowerOf2 == 2); 239 | assert(3.roundUpToPowerOf2 == 4); 240 | assert(7.roundUpToPowerOf2 == 8); 241 | assert(8.roundUpToPowerOf2 == 8); 242 | assert(10.roundUpToPowerOf2 == 16); 243 | assert(11.roundUpToPowerOf2 == 16); 244 | assert(12.roundUpToPowerOf2 == 16); 245 | assert(118.roundUpToPowerOf2 == 128); 246 | assert((size_t.max >> 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); 247 | assert(((size_t.max >> 1) + 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); 248 | } 249 | 250 | /** 251 | Returns the number of trailing zeros of $(D x). 252 | */ 253 | @safe @nogc nothrow pure 254 | uint trailingZeros()(ulong x) 255 | { 256 | uint result; 257 | while (result < 64 && !(x & (1UL << result))) 258 | { 259 | ++result; 260 | } 261 | return result; 262 | } 263 | 264 | @safe @nogc nothrow pure 265 | unittest 266 | { 267 | assert(trailingZeros(0) == 64); 268 | assert(trailingZeros(1) == 0); 269 | assert(trailingZeros(2) == 1); 270 | assert(trailingZeros(3) == 0); 271 | assert(trailingZeros(4) == 2); 272 | } 273 | 274 | /** 275 | Returns `true` if `ptr` is aligned at `alignment`. 276 | */ 277 | @nogc nothrow pure 278 | bool alignedAt(T)(T* ptr, uint alignment) 279 | { 280 | return cast(size_t) ptr % alignment == 0; 281 | } 282 | 283 | /** 284 | Returns the effective alignment of `ptr`, i.e. the largest power of two that is 285 | a divisor of `ptr`. 286 | */ 287 | @nogc nothrow pure 288 | uint effectiveAlignment()(void* ptr) 289 | { 290 | return 1U << trailingZeros(cast(size_t) ptr); 291 | } 292 | 293 | @nogc nothrow pure 294 | @system unittest 295 | { 296 | int x; 297 | assert(effectiveAlignment(&x) >= int.alignof); 298 | } 299 | 300 | /** 301 | Aligns a pointer down to a specified alignment. The resulting pointer is less 302 | than or equal to the given pointer. 303 | */ 304 | @nogc nothrow pure 305 | void* alignDownTo()(void* ptr, uint alignment) 306 | { 307 | import stdx.allocator.internal : isPowerOf2; 308 | assert(alignment.isPowerOf2); 309 | return cast(void*) (cast(size_t) ptr & ~(alignment - 1UL)); 310 | } 311 | 312 | /** 313 | Aligns a pointer up to a specified alignment. The resulting pointer is greater 314 | than or equal to the given pointer. 315 | */ 316 | @nogc nothrow pure 317 | void* alignUpTo()(void* ptr, uint alignment) 318 | { 319 | import stdx.allocator.internal : isPowerOf2; 320 | assert(alignment.isPowerOf2); 321 | immutable uint slack = cast(size_t) ptr & (alignment - 1U); 322 | return slack ? ptr + alignment - slack : ptr; 323 | } 324 | 325 | @safe @nogc nothrow pure 326 | bool isGoodStaticAlignment()(uint x) 327 | { 328 | import stdx.allocator.internal : isPowerOf2; 329 | return x.isPowerOf2; 330 | } 331 | 332 | @safe @nogc nothrow pure 333 | bool isGoodDynamicAlignment()(uint x) 334 | { 335 | import stdx.allocator.internal : isPowerOf2; 336 | return x.isPowerOf2 && x >= (void*).sizeof; 337 | } 338 | 339 | /** 340 | The default $(D reallocate) function first attempts to use $(D expand). If $(D 341 | Allocator.expand) is not defined or returns $(D false), $(D reallocate) 342 | allocates a new block of memory of appropriate size and copies data from the old 343 | block to the new block. Finally, if $(D Allocator) defines $(D deallocate), $(D 344 | reallocate) uses it to free the old memory block. 345 | 346 | $(D reallocate) does not attempt to use $(D Allocator.reallocate) even if 347 | defined. This is deliberate so allocators may use it internally within their own 348 | implementation of $(D reallocate). 349 | 350 | */ 351 | bool reallocate(Allocator)(auto ref Allocator a, ref void[] b, size_t s) 352 | { 353 | if (b.length == s) return true; 354 | static if (__traits(hasMember, Allocator, "expand")) 355 | { 356 | if (b.length <= s && a.expand(b, s - b.length)) return true; 357 | } 358 | auto newB = a.allocate(s); 359 | if (newB.length != s) return false; 360 | if (newB.length <= b.length) newB[] = b[0 .. newB.length]; 361 | else newB[0 .. b.length] = b[]; 362 | static if (__traits(hasMember, Allocator, "deallocate")) 363 | a.deallocate(b); 364 | b = newB; 365 | return true; 366 | } 367 | 368 | /** 369 | 370 | The default $(D alignedReallocate) function first attempts to use $(D expand). 371 | If $(D Allocator.expand) is not defined or returns $(D false), $(D 372 | alignedReallocate) allocates a new block of memory of appropriate size and 373 | copies data from the old block to the new block. Finally, if $(D Allocator) 374 | defines $(D deallocate), $(D alignedReallocate) uses it to free the old memory 375 | block. 376 | 377 | $(D alignedReallocate) does not attempt to use $(D Allocator.reallocate) even if 378 | defined. This is deliberate so allocators may use it internally within their own 379 | implementation of $(D reallocate). 380 | 381 | */ 382 | bool alignedReallocate(Allocator)(auto ref Allocator alloc, 383 | ref void[] b, size_t s, uint a) 384 | { 385 | static if (__traits(hasMember, Allocator, "expand")) 386 | { 387 | if (b.length <= s && b.ptr.alignedAt(a) 388 | && alloc.expand(b, s - b.length)) return true; 389 | } 390 | else 391 | { 392 | if (b.length == s) return true; 393 | } 394 | auto newB = alloc.alignedAllocate(s, a); 395 | if (newB.length <= b.length) newB[] = b[0 .. newB.length]; 396 | else newB[0 .. b.length] = b[]; 397 | static if (__traits(hasMember, Allocator, "deallocate")) 398 | alloc.deallocate(b); 399 | b = newB; 400 | return true; 401 | } 402 | 403 | /** 404 | Forwards each of the methods in `funs` (if defined) to `member`. 405 | */ 406 | enum forwardToMember = (string member, string[] funs...) 407 | { 408 | string result = " import std.traits : Parameters;\n"; 409 | foreach (fun; funs) 410 | { 411 | result ~= " 412 | static if (__traits(hasMember, typeof("~member~"), `"~fun~"`)) 413 | { 414 | static if (__traits(isTemplate, "~member~"."~fun~")) 415 | auto ref "~fun~"(Parameters!(typeof("~member~"."~fun~"!())) args) 416 | { 417 | return "~member~"."~fun~"(args); 418 | } 419 | else 420 | auto ref "~fun~"(Parameters!(typeof("~member~"."~fun~")) args) 421 | { 422 | return "~member~"."~fun~"(args); 423 | } 424 | }\n"; 425 | } 426 | return result; 427 | }; 428 | 429 | version(unittest) 430 | { 431 | import stdx.allocator : IAllocator, ISharedAllocator; 432 | 433 | package void testAllocator(alias make)() 434 | { 435 | import std.conv : text; 436 | import stdx.allocator.internal : isPowerOf2; 437 | import std.stdio : writeln, stderr; 438 | import stdx.allocator.internal : Ternary; 439 | alias A = typeof(make()); 440 | scope(failure) stderr.writeln("testAllocator failed for ", A.stringof); 441 | 442 | auto a = make(); 443 | 444 | // Test alignment 445 | static assert(A.alignment.isPowerOf2); 446 | 447 | // Test goodAllocSize 448 | assert(a.goodAllocSize(1) >= A.alignment, 449 | text(a.goodAllocSize(1), " < ", A.alignment)); 450 | assert(a.goodAllocSize(11) >= 11.roundUpToMultipleOf(A.alignment)); 451 | assert(a.goodAllocSize(111) >= 111.roundUpToMultipleOf(A.alignment)); 452 | 453 | // Test allocate 454 | assert(a.allocate(0) is null); 455 | 456 | auto b1 = a.allocate(1); 457 | assert(b1.length == 1); 458 | auto b2 = a.allocate(2); 459 | assert(b2.length == 2); 460 | assert(b2.ptr + b2.length <= b1.ptr || b1.ptr + b1.length <= b2.ptr); 461 | 462 | // Test alignedAllocate 463 | static if (__traits(hasMember, A, "alignedAllocate")) 464 | {{ 465 | auto b3 = a.alignedAllocate(1, 256); 466 | assert(b3.length <= 1); 467 | assert(b3.ptr.alignedAt(256)); 468 | assert(a.alignedReallocate(b3, 2, 512)); 469 | assert(b3.ptr.alignedAt(512)); 470 | static if (__traits(hasMember, A, "alignedDeallocate")) 471 | { 472 | a.alignedDeallocate(b3); 473 | } 474 | }} 475 | else 476 | { 477 | static assert(!__traits(hasMember, A, "alignedDeallocate")); 478 | // This seems to be a bug in the compiler: 479 | //static assert(!__traits(hasMember, A, "alignedReallocate"), A.stringof); 480 | } 481 | 482 | static if (__traits(hasMember, A, "allocateAll")) 483 | {{ 484 | auto aa = make(); 485 | if (aa.allocateAll().ptr) 486 | { 487 | // Can't get any more memory 488 | assert(!aa.allocate(1).ptr); 489 | } 490 | auto ab = make(); 491 | const b4 = ab.allocateAll(); 492 | assert(b4.length); 493 | // Can't get any more memory 494 | assert(!ab.allocate(1).ptr); 495 | }} 496 | 497 | static if (__traits(hasMember, A, "expand")) 498 | {{ 499 | assert(a.expand(b1, 0)); 500 | auto len = b1.length; 501 | if (a.expand(b1, 102)) 502 | { 503 | assert(b1.length == len + 102, text(b1.length, " != ", len + 102)); 504 | } 505 | auto aa = make(); 506 | void[] b5 = null; 507 | assert(aa.expand(b5, 0)); 508 | assert(b5 is null); 509 | assert(!aa.expand(b5, 1)); 510 | assert(b5.length == 0); 511 | }} 512 | 513 | void[] b6 = null; 514 | assert(a.reallocate(b6, 0)); 515 | assert(b6.length == 0); 516 | assert(a.reallocate(b6, 1)); 517 | assert(b6.length == 1, text(b6.length)); 518 | assert(a.reallocate(b6, 2)); 519 | assert(b6.length == 2); 520 | 521 | // Test owns 522 | static if (__traits(hasMember, A, "owns")) 523 | {{ 524 | assert(a.owns(null) == Ternary.no); 525 | assert(a.owns(b1) == Ternary.yes); 526 | assert(a.owns(b2) == Ternary.yes); 527 | assert(a.owns(b6) == Ternary.yes); 528 | }} 529 | 530 | static if (__traits(hasMember, A, "resolveInternalPointer")) 531 | {{ 532 | void[] p; 533 | assert(a.resolveInternalPointer(null, p) == Ternary.no); 534 | Ternary r = a.resolveInternalPointer(b1.ptr, p); 535 | assert(p.ptr is b1.ptr && p.length >= b1.length); 536 | r = a.resolveInternalPointer(b1.ptr + b1.length / 2, p); 537 | assert(p.ptr is b1.ptr && p.length >= b1.length); 538 | r = a.resolveInternalPointer(b2.ptr, p); 539 | assert(p.ptr is b2.ptr && p.length >= b2.length); 540 | r = a.resolveInternalPointer(b2.ptr + b2.length / 2, p); 541 | assert(p.ptr is b2.ptr && p.length >= b2.length); 542 | r = a.resolveInternalPointer(b6.ptr, p); 543 | assert(p.ptr is b6.ptr && p.length >= b6.length); 544 | r = a.resolveInternalPointer(b6.ptr + b6.length / 2, p); 545 | assert(p.ptr is b6.ptr && p.length >= b6.length); 546 | static int[10] b7 = [ 1, 2, 3 ]; 547 | assert(a.resolveInternalPointer(b7.ptr, p) == Ternary.no); 548 | assert(a.resolveInternalPointer(b7.ptr + b7.length / 2, p) == Ternary.no); 549 | assert(a.resolveInternalPointer(b7.ptr + b7.length, p) == Ternary.no); 550 | int[3] b8 = [ 1, 2, 3 ]; 551 | assert(a.resolveInternalPointer(b8.ptr, p) == Ternary.no); 552 | assert(a.resolveInternalPointer(b8.ptr + b8.length / 2, p) == Ternary.no); 553 | assert(a.resolveInternalPointer(b8.ptr + b8.length, p) == Ternary.no); 554 | }} 555 | } 556 | 557 | package void testAllocatorObject(AllocInterface)(AllocInterface a) 558 | if (is(AllocInterface : IAllocator) 559 | || is (AllocInterface : shared ISharedAllocator)) 560 | { 561 | import std.conv : text; 562 | import stdx.allocator.internal : isPowerOf2; 563 | import std.stdio : writeln, stderr; 564 | import stdx.allocator.internal : Ternary; 565 | scope(failure) stderr.writeln("testAllocatorObject failed for ", 566 | AllocInterface.stringof); 567 | 568 | assert(a); 569 | 570 | // Test alignment 571 | assert(a.alignment.isPowerOf2); 572 | 573 | // Test goodAllocSize 574 | assert(a.goodAllocSize(1) >= a.alignment, 575 | text(a.goodAllocSize(1), " < ", a.alignment)); 576 | assert(a.goodAllocSize(11) >= 11.roundUpToMultipleOf(a.alignment)); 577 | assert(a.goodAllocSize(111) >= 111.roundUpToMultipleOf(a.alignment)); 578 | 579 | // Test empty 580 | assert(a.empty != Ternary.no); 581 | 582 | // Test allocate 583 | assert(a.allocate(0) is null); 584 | 585 | auto b1 = a.allocate(1); 586 | assert(b1.length == 1); 587 | auto b2 = a.allocate(2); 588 | assert(b2.length == 2); 589 | assert(b2.ptr + b2.length <= b1.ptr || b1.ptr + b1.length <= b2.ptr); 590 | 591 | // Test alignedAllocate 592 | { 593 | // If not implemented it will return null, so those should pass 594 | auto b3 = a.alignedAllocate(1, 256); 595 | assert(b3.length <= 1); 596 | assert(b3.ptr.alignedAt(256)); 597 | if (a.alignedReallocate(b3, 1, 256)) 598 | { 599 | // If it is false, then the wrapped allocator did not implement 600 | // this 601 | assert(a.alignedReallocate(b3, 2, 512)); 602 | assert(b3.ptr.alignedAt(512)); 603 | } 604 | } 605 | 606 | // Test allocateAll 607 | { 608 | auto aa = a.allocateAll(); 609 | if (aa.ptr) 610 | { 611 | // Can't get any more memory 612 | assert(!a.allocate(1).ptr); 613 | a.deallocate(aa); 614 | } 615 | const b4 = a.allocateAll(); 616 | if (b4.ptr) 617 | { 618 | // Can't get any more memory 619 | assert(!a.allocate(1).ptr); 620 | } 621 | } 622 | 623 | // Test expand 624 | { 625 | assert(a.expand(b1, 0)); 626 | auto len = b1.length; 627 | if (a.expand(b1, 102)) 628 | { 629 | assert(b1.length == len + 102, text(b1.length, " != ", len + 102)); 630 | } 631 | } 632 | 633 | void[] b6 = null; 634 | assert(a.reallocate(b6, 0)); 635 | assert(b6.length == 0); 636 | assert(a.reallocate(b6, 1)); 637 | assert(b6.length == 1, text(b6.length)); 638 | assert(a.reallocate(b6, 2)); 639 | assert(b6.length == 2); 640 | 641 | // Test owns 642 | { 643 | if (a.owns(null) != Ternary.unknown) 644 | { 645 | assert(a.owns(null) == Ternary.no); 646 | assert(a.owns(b1) == Ternary.yes); 647 | assert(a.owns(b2) == Ternary.yes); 648 | assert(a.owns(b6) == Ternary.yes); 649 | } 650 | } 651 | 652 | // Test resolveInternalPointer 653 | { 654 | void[] p; 655 | if (a.resolveInternalPointer(null, p) != Ternary.unknown) 656 | { 657 | assert(a.resolveInternalPointer(null, p) == Ternary.no); 658 | Ternary r = a.resolveInternalPointer(b1.ptr, p); 659 | assert(p.ptr is b1.ptr && p.length >= b1.length); 660 | r = a.resolveInternalPointer(b1.ptr + b1.length / 2, p); 661 | assert(p.ptr is b1.ptr && p.length >= b1.length); 662 | r = a.resolveInternalPointer(b2.ptr, p); 663 | assert(p.ptr is b2.ptr && p.length >= b2.length); 664 | r = a.resolveInternalPointer(b2.ptr + b2.length / 2, p); 665 | assert(p.ptr is b2.ptr && p.length >= b2.length); 666 | r = a.resolveInternalPointer(b6.ptr, p); 667 | assert(p.ptr is b6.ptr && p.length >= b6.length); 668 | r = a.resolveInternalPointer(b6.ptr + b6.length / 2, p); 669 | assert(p.ptr is b6.ptr && p.length >= b6.length); 670 | static int[10] b7 = [ 1, 2, 3 ]; 671 | assert(a.resolveInternalPointer(b7.ptr, p) == Ternary.no); 672 | assert(a.resolveInternalPointer(b7.ptr + b7.length / 2, p) == Ternary.no); 673 | assert(a.resolveInternalPointer(b7.ptr + b7.length, p) == Ternary.no); 674 | int[3] b8 = [ 1, 2, 3 ]; 675 | assert(a.resolveInternalPointer(b8.ptr, p) == Ternary.no); 676 | assert(a.resolveInternalPointer(b8.ptr + b8.length / 2, p) == Ternary.no); 677 | assert(a.resolveInternalPointer(b8.ptr + b8.length, p) == Ternary.no); 678 | } 679 | } 680 | 681 | // Test deallocateAll 682 | { 683 | if (a.deallocateAll()) 684 | { 685 | if (a.empty != Ternary.unknown) 686 | { 687 | assert(a.empty == Ternary.yes); 688 | } 689 | } 690 | } 691 | } 692 | } 693 | -------------------------------------------------------------------------------- /source/stdx/allocator/gc_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.gc_allocator; 3 | import stdx.allocator.common; 4 | 5 | version (D_BetterC) { 6 | import stdx.allocator.building_blocks.null_allocator; 7 | alias GCAllocator = NullAllocator; 8 | } else 9 | version = HasDRuntime; 10 | 11 | version (HasDRuntime): 12 | 13 | /** 14 | D's built-in garbage-collected allocator. 15 | */ 16 | struct GCAllocator 17 | { 18 | import core.memory : GC; 19 | import stdx.allocator.internal : Ternary; 20 | @system unittest { testAllocator!(() => GCAllocator.instance); } 21 | 22 | /** 23 | The alignment is a static constant equal to $(D platformAlignment), which 24 | ensures proper alignment for any D data type. 25 | */ 26 | enum uint alignment = platformAlignment; 27 | 28 | /** 29 | Standard allocator methods per the semantics defined above. The $(D 30 | deallocate) and $(D reallocate) methods are $(D @system) because they may 31 | move memory around, leaving dangling pointers in user code. 32 | */ 33 | static pure nothrow @trusted void[] allocate()(size_t bytes) 34 | { 35 | if (!bytes) return null; 36 | auto p = GC.malloc(bytes); 37 | return p ? p[0 .. bytes] : null; 38 | } 39 | 40 | /// Ditto 41 | static @system bool expand()(ref void[] b, size_t delta) 42 | { 43 | if (delta == 0) return true; 44 | if (b is null) return false; 45 | immutable curLength = GC.sizeOf(b.ptr); 46 | assert(curLength != 0); // we have a valid GC pointer here 47 | immutable desired = b.length + delta; 48 | if (desired > curLength) // check to see if the current block can't hold the data 49 | { 50 | immutable sizeRequest = desired - curLength; 51 | immutable newSize = GC.extend(b.ptr, sizeRequest, sizeRequest); 52 | if (newSize == 0) 53 | { 54 | // expansion unsuccessful 55 | return false; 56 | } 57 | assert(newSize >= desired); 58 | } 59 | b = b.ptr[0 .. desired]; 60 | return true; 61 | } 62 | 63 | /// Ditto 64 | static pure nothrow @system bool reallocate()(ref void[] b, size_t newSize) 65 | { 66 | import core.exception : OutOfMemoryError; 67 | try 68 | { 69 | auto p = cast(ubyte*) GC.realloc(b.ptr, newSize); 70 | b = p[0 .. newSize]; 71 | } 72 | catch (OutOfMemoryError) 73 | { 74 | // leave the block in place, tell caller 75 | return false; 76 | } 77 | return true; 78 | } 79 | 80 | /// Ditto 81 | pure nothrow 82 | static Ternary resolveInternalPointer()(const void* p, ref void[] result) 83 | { 84 | auto r = GC.addrOf(cast(void*) p); 85 | if (!r) return Ternary.no; 86 | result = r[0 .. GC.sizeOf(r)]; 87 | return Ternary.yes; 88 | } 89 | 90 | /// Ditto 91 | static pure nothrow @system bool deallocate()(void[] b) 92 | { 93 | GC.free(b.ptr); 94 | return true; 95 | } 96 | 97 | /// Ditto 98 | static size_t goodAllocSize()(size_t n) 99 | { 100 | if (n == 0) 101 | return 0; 102 | if (n <= 16) 103 | return 16; 104 | 105 | import core.bitop : bsr; 106 | 107 | auto largestBit = bsr(n-1) + 1; 108 | if (largestBit <= 12) // 4096 or less 109 | return size_t(1) << largestBit; 110 | 111 | // larger, we use a multiple of 4096. 112 | return ((n + 4095) / 4096) * 4096; 113 | } 114 | 115 | /** 116 | Returns the global instance of this allocator type. The garbage collected allocator is 117 | thread-safe, therefore all of its methods are $(D static) and `instance` itself is 118 | $(D shared). 119 | */ 120 | enum GCAllocator instance = GCAllocator(); 121 | 122 | // Leave it undocummented for now. 123 | static nothrow @trusted void collect()() 124 | { 125 | GC.collect(); 126 | } 127 | } 128 | 129 | /// 130 | @system unittest 131 | { 132 | auto buffer = GCAllocator.instance.allocate(1024 * 1024 * 4); 133 | // deallocate upon scope's end (alternatively: leave it to collection) 134 | scope(exit) GCAllocator.instance.deallocate(buffer); 135 | //... 136 | } 137 | 138 | @system unittest 139 | { 140 | auto b = GCAllocator.instance.allocate(10_000); 141 | assert(GCAllocator.instance.expand(b, 1)); 142 | } 143 | 144 | @system unittest 145 | { 146 | import core.memory : GC; 147 | import stdx.allocator.internal : Ternary; 148 | 149 | // test allocation sizes 150 | assert(GCAllocator.instance.goodAllocSize(1) == 16); 151 | for (size_t s = 16; s <= 8192; s *= 2) 152 | { 153 | assert(GCAllocator.instance.goodAllocSize(s) == s); 154 | assert(GCAllocator.instance.goodAllocSize(s - (s / 2) + 1) == s); 155 | 156 | auto buffer = GCAllocator.instance.allocate(s); 157 | scope(exit) GCAllocator.instance.deallocate(buffer); 158 | 159 | void[] p; 160 | assert(GCAllocator.instance.resolveInternalPointer(null, p) == Ternary.no); 161 | Ternary r = GCAllocator.instance.resolveInternalPointer(buffer.ptr, p); 162 | assert(p.ptr is buffer.ptr && p.length >= buffer.length); 163 | 164 | assert(GC.sizeOf(buffer.ptr) == s); 165 | 166 | // the GC should provide power of 2 as "good" sizes, but other sizes are allowed, too 167 | version(none) 168 | { 169 | auto buffer2 = GCAllocator.instance.allocate(s - (s / 2) + 1); 170 | scope(exit) GCAllocator.instance.deallocate(buffer2); 171 | assert(GC.sizeOf(buffer2.ptr) == s); 172 | } 173 | } 174 | 175 | // anything above a page is simply rounded up to next page 176 | assert(GCAllocator.instance.goodAllocSize(4096 * 4 + 1) == 4096 * 5); 177 | } 178 | -------------------------------------------------------------------------------- /source/stdx/allocator/internal.d: -------------------------------------------------------------------------------- 1 | // Private parts of Phobos 2 | module stdx.allocator.internal; 3 | 4 | import std.traits; 5 | 6 | // Bulk of emplace unittests ends here 7 | 8 | static if (is(typeof({ import std.typecons : Ternary; }))) 9 | { 10 | public import std.typecons : Ternary; 11 | } 12 | else static if (is(typeof({ import std.experimental.allocator.common : Ternary; }))) 13 | { 14 | public import std.experimental.allocator.common : Ternary; 15 | } 16 | else static assert(0, "Oops, dont know how to find Ternary"); 17 | 18 | /** 19 | Check whether a number is an integer power of two. 20 | 21 | Note that only positive numbers can be integer powers of two. This 22 | function always return `false` if `x` is negative or zero. 23 | 24 | Params: 25 | x = the number to test 26 | 27 | Returns: 28 | `true` if `x` is an integer power of two. 29 | */ 30 | bool isPowerOf2(X)(const X x) pure @safe nothrow @nogc 31 | if (isNumeric!X) 32 | { 33 | static if (isFloatingPoint!X) 34 | { 35 | import std.math : frexp; 36 | int exp; 37 | const X sig = frexp(x, exp); 38 | 39 | return (exp != int.min) && (sig is cast(X) 0.5L); 40 | } 41 | else 42 | { 43 | static if (isSigned!X) 44 | { 45 | auto y = cast(typeof(x + 0))x; 46 | return y > 0 && !(y & (y - 1)); 47 | } 48 | else 49 | { 50 | auto y = cast(typeof(x + 0u))x; 51 | return (y & -y) > (y - 1); 52 | } 53 | } 54 | } 55 | /// 56 | @safe unittest 57 | { 58 | import std.math : pow; 59 | 60 | assert( isPowerOf2(1.0L)); 61 | assert( isPowerOf2(2.0L)); 62 | assert( isPowerOf2(0.5L)); 63 | assert( isPowerOf2(pow(2.0L, 96))); 64 | assert( isPowerOf2(pow(2.0L, -77))); 65 | 66 | assert(!isPowerOf2(-2.0L)); 67 | assert(!isPowerOf2(-0.5L)); 68 | assert(!isPowerOf2(0.0L)); 69 | assert(!isPowerOf2(4.315)); 70 | assert(!isPowerOf2(1.0L / 3.0L)); 71 | 72 | assert(!isPowerOf2(real.nan)); 73 | assert(!isPowerOf2(real.infinity)); 74 | } 75 | /// 76 | @safe unittest 77 | { 78 | assert( isPowerOf2(1)); 79 | assert( isPowerOf2(2)); 80 | assert( isPowerOf2(1uL << 63)); 81 | 82 | assert(!isPowerOf2(-4)); 83 | assert(!isPowerOf2(0)); 84 | assert(!isPowerOf2(1337u)); 85 | } 86 | 87 | @safe unittest 88 | { 89 | import std.meta : AliasSeq; 90 | import std.math : pow; 91 | 92 | immutable smallP2 = pow(2.0L, -62); 93 | immutable bigP2 = pow(2.0L, 50); 94 | immutable smallP7 = pow(7.0L, -35); 95 | immutable bigP7 = pow(7.0L, 30); 96 | 97 | foreach (X; AliasSeq!(float, double, real)) 98 | { 99 | immutable min_sub = X.min_normal * X.epsilon; 100 | 101 | foreach (x; [smallP2, min_sub, X.min_normal, .25L, 0.5L, 1.0L, 102 | 2.0L, 8.0L, pow(2.0L, X.max_exp - 1), bigP2]) 103 | { 104 | assert( isPowerOf2(cast(X) x)); 105 | assert(!isPowerOf2(cast(X)-x)); 106 | } 107 | 108 | foreach (x; [0.0L, 3 * min_sub, smallP7, 0.1L, 1337.0L, bigP7, X.max, real.nan, real.infinity]) 109 | { 110 | assert(!isPowerOf2(cast(X) x)); 111 | assert(!isPowerOf2(cast(X)-x)); 112 | } 113 | } 114 | 115 | foreach (X; AliasSeq!(byte, ubyte, short, ushort, int, uint, long, ulong)) 116 | { 117 | foreach (x; [1, 2, 4, 8, (X.max >>> 1) + 1]) 118 | { 119 | assert( isPowerOf2(cast(X) x)); 120 | static if (isSigned!X) 121 | assert(!isPowerOf2(cast(X)-x)); 122 | } 123 | 124 | foreach (x; [0, 3, 5, 13, 77, X.min, X.max]) 125 | assert(!isPowerOf2(cast(X) x)); 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /source/stdx/allocator/mallocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.mallocator; 3 | import stdx.allocator.common; 4 | 5 | /** 6 | The C heap allocator. 7 | */ 8 | struct Mallocator 9 | { 10 | @system unittest { testAllocator!(() => Mallocator.instance); } 11 | 12 | /** 13 | The alignment is a static constant equal to $(D platformAlignment), which 14 | ensures proper alignment for any D data type. 15 | */ 16 | enum uint alignment = platformAlignment; 17 | 18 | /** 19 | Standard allocator methods per the semantics defined above. The 20 | $(D deallocate) and $(D reallocate) methods are $(D @system) because they 21 | may move memory around, leaving dangling pointers in user code. Somewhat 22 | paradoxically, $(D malloc) is $(D @safe) but that's only useful to safe 23 | programs that can afford to leak memory allocated. 24 | */ 25 | @trusted @nogc nothrow 26 | static void[] allocate()(size_t bytes) 27 | { 28 | import core.stdc.stdlib : malloc; 29 | if (!bytes) return null; 30 | auto p = malloc(bytes); 31 | return p ? p[0 .. bytes] : null; 32 | } 33 | 34 | /// Ditto 35 | @system @nogc nothrow 36 | static bool deallocate()(void[] b) 37 | { 38 | import core.stdc.stdlib : free; 39 | free(b.ptr); 40 | return true; 41 | } 42 | 43 | /// Ditto 44 | @system @nogc nothrow 45 | static bool reallocate()(ref void[] b, size_t s) 46 | { 47 | import core.stdc.stdlib : realloc; 48 | if (!s) 49 | { 50 | // fuzzy area in the C standard, see http://goo.gl/ZpWeSE 51 | // so just deallocate and nullify the pointer 52 | deallocate(b); 53 | b = null; 54 | return true; 55 | } 56 | auto p = cast(ubyte*) realloc(b.ptr, s); 57 | if (!p) return false; 58 | b = p[0 .. s]; 59 | return true; 60 | } 61 | 62 | /** 63 | Returns the global instance of this allocator type. The C heap allocator is 64 | thread-safe, therefore all of its methods are $(D static) and `instance` itself is 65 | $(D shared). 66 | */ 67 | enum Mallocator instance = Mallocator(); 68 | } 69 | 70 | /// 71 | @nogc nothrow 72 | @system unittest 73 | { 74 | auto buffer = Mallocator.instance.allocate(1024 * 1024 * 4); 75 | scope(exit) Mallocator.instance.deallocate(buffer); 76 | //... 77 | } 78 | 79 | @nogc nothrow 80 | @system unittest 81 | { 82 | @nogc nothrow 83 | static void test(A)() 84 | { 85 | int* p = null; 86 | p = cast(int*) A.instance.allocate(int.sizeof); 87 | scope(exit) A.instance.deallocate(p[0 .. int.sizeof]); 88 | *p = 42; 89 | assert(*p == 42); 90 | } 91 | test!Mallocator(); 92 | } 93 | 94 | @nogc nothrow 95 | @system unittest 96 | { 97 | static void test(A)() 98 | { 99 | import stdx.allocator : make; 100 | Object p = null; 101 | p = A.instance.make!Object(); 102 | assert(p !is null); 103 | } 104 | 105 | test!Mallocator(); 106 | } 107 | 108 | version (Posix) 109 | @nogc nothrow 110 | private extern(C) int posix_memalign(void**, size_t, size_t); 111 | 112 | version (Windows) 113 | { 114 | // DMD Win 32 bit, DigitalMars C standard library misses the _aligned_xxx 115 | // functions family (snn.lib) 116 | version(CRuntime_DigitalMars) 117 | { 118 | // Helper to cast the infos written before the aligned pointer 119 | // this header keeps track of the size (required to realloc) and of 120 | // the base ptr (required to free). 121 | private struct AlignInfo 122 | { 123 | void* basePtr; 124 | size_t size; 125 | 126 | @nogc nothrow 127 | static AlignInfo* opCall()(void* ptr) 128 | { 129 | return cast(AlignInfo*) (ptr - AlignInfo.sizeof); 130 | } 131 | } 132 | 133 | @nogc nothrow 134 | private void* _aligned_malloc()(size_t size, size_t alignment) 135 | { 136 | import core.stdc.stdlib : malloc; 137 | size_t offset = alignment + size_t.sizeof * 2 - 1; 138 | 139 | // unaligned chunk 140 | void* basePtr = malloc(size + offset); 141 | if (!basePtr) return null; 142 | 143 | // get aligned location within the chunk 144 | void* alignedPtr = cast(void**)((cast(size_t)(basePtr) + offset) 145 | & ~(alignment - 1)); 146 | 147 | // write the header before the aligned pointer 148 | AlignInfo* head = AlignInfo(alignedPtr); 149 | head.basePtr = basePtr; 150 | head.size = size; 151 | 152 | return alignedPtr; 153 | } 154 | 155 | @nogc nothrow 156 | private void* _aligned_realloc()(void* ptr, size_t size, size_t alignment) 157 | { 158 | import core.stdc.stdlib : free; 159 | import core.stdc.string : memcpy; 160 | 161 | if (!ptr) return _aligned_malloc(size, alignment); 162 | 163 | // gets the header from the exising pointer 164 | AlignInfo* head = AlignInfo(ptr); 165 | 166 | // gets a new aligned pointer 167 | void* alignedPtr = _aligned_malloc(size, alignment); 168 | if (!alignedPtr) 169 | { 170 | //to https://msdn.microsoft.com/en-us/library/ms235462.aspx 171 | //see Return value: in this case the original block is unchanged 172 | return null; 173 | } 174 | 175 | // copy exising data 176 | memcpy(alignedPtr, ptr, head.size); 177 | free(head.basePtr); 178 | 179 | return alignedPtr; 180 | } 181 | 182 | @nogc nothrow 183 | private void _aligned_free()(void *ptr) 184 | { 185 | import core.stdc.stdlib : free; 186 | if (!ptr) return; 187 | AlignInfo* head = AlignInfo(ptr); 188 | free(head.basePtr); 189 | } 190 | 191 | } 192 | // DMD Win 64 bit, uses microsoft standard C library which implements them 193 | else 194 | { 195 | @nogc nothrow private extern(C) void* _aligned_malloc(size_t, size_t); 196 | @nogc nothrow private extern(C) void _aligned_free(void *memblock); 197 | @nogc nothrow private extern(C) void* _aligned_realloc(void *, size_t, size_t); 198 | } 199 | } 200 | 201 | /** 202 | Aligned allocator using OS-specific primitives, under a uniform API. 203 | */ 204 | version (WebAssembly) {} else version = HasMemAlign; 205 | 206 | version (HasMemAlign) 207 | struct AlignedMallocator 208 | { 209 | @system unittest { testAllocator!(() => typeof(this).instance); } 210 | 211 | /** 212 | The default alignment is $(D platformAlignment). 213 | */ 214 | enum uint alignment = platformAlignment; 215 | 216 | /** 217 | Forwards to $(D alignedAllocate(bytes, platformAlignment)). 218 | */ 219 | @trusted @nogc nothrow 220 | static void[] allocate()(size_t bytes) 221 | { 222 | if (!bytes) return null; 223 | return alignedAllocate(bytes, alignment); 224 | } 225 | 226 | /** 227 | Uses $(HTTP man7.org/linux/man-pages/man3/posix_memalign.3.html, 228 | $(D posix_memalign)) on Posix and 229 | $(HTTP msdn.microsoft.com/en-us/library/8z34s9c6(v=vs.80).aspx, 230 | $(D __aligned_malloc)) on Windows. 231 | */ 232 | version(Posix) 233 | @trusted @nogc nothrow 234 | static void[] alignedAllocate()(size_t bytes, uint a) 235 | { 236 | import core.stdc.errno : ENOMEM, EINVAL; 237 | assert(a.isGoodDynamicAlignment); 238 | void* result; 239 | auto code = posix_memalign(&result, a, bytes); 240 | if (code == ENOMEM) 241 | return null; 242 | 243 | else if (code == EINVAL) 244 | { 245 | assert(0, "AlignedMallocator.alignment is not a power of two " 246 | ~"multiple of (void*).sizeof, according to posix_memalign!"); 247 | } 248 | else if (code != 0) 249 | assert(0, "posix_memalign returned an unknown code!"); 250 | 251 | else 252 | return result[0 .. bytes]; 253 | } 254 | else version(Windows) 255 | @trusted @nogc nothrow 256 | static void[] alignedAllocate()(size_t bytes, uint a) 257 | { 258 | auto result = _aligned_malloc(bytes, a); 259 | return result ? result[0 .. bytes] : null; 260 | } 261 | else static assert(0); 262 | 263 | /** 264 | Calls $(D free(b.ptr)) on Posix and 265 | $(HTTP msdn.microsoft.com/en-US/library/17b5h8td(v=vs.80).aspx, 266 | $(D __aligned_free(b.ptr))) on Windows. 267 | */ 268 | version (Posix) 269 | @system @nogc nothrow 270 | static bool deallocate()(void[] b) 271 | { 272 | import core.stdc.stdlib : free; 273 | free(b.ptr); 274 | return true; 275 | } 276 | else version (Windows) 277 | @system @nogc nothrow 278 | static bool deallocate()(void[] b) 279 | { 280 | _aligned_free(b.ptr); 281 | return true; 282 | } 283 | else static assert(0); 284 | 285 | /** 286 | On Posix, forwards to $(D realloc). On Windows, forwards to 287 | $(D alignedReallocate(b, newSize, platformAlignment)). 288 | */ 289 | version (Posix) 290 | @system @nogc nothrow 291 | static bool reallocate()(ref void[] b, size_t newSize) 292 | { 293 | return Mallocator.instance.reallocate(b, newSize); 294 | } 295 | version (Windows) 296 | @system @nogc nothrow 297 | static bool reallocate()(ref void[] b, size_t newSize) 298 | { 299 | return alignedReallocate(b, newSize, alignment); 300 | } 301 | 302 | /** 303 | On Posix, uses $(D alignedAllocate) and copies data around because there is 304 | no realloc for aligned memory. On Windows, calls 305 | $(HTTP msdn.microsoft.com/en-US/library/y69db7sx(v=vs.80).aspx, 306 | $(D __aligned_realloc(b.ptr, newSize, a))). 307 | */ 308 | version (Windows) 309 | @system @nogc nothrow 310 | static bool alignedReallocate()(ref void[] b, size_t s, uint a) 311 | { 312 | if (!s) 313 | { 314 | deallocate(b); 315 | b = null; 316 | return true; 317 | } 318 | auto p = cast(ubyte*) _aligned_realloc(b.ptr, s, a); 319 | if (!p) return false; 320 | b = p[0 .. s]; 321 | return true; 322 | } 323 | 324 | /** 325 | Returns the global instance of this allocator type. The C heap allocator is 326 | thread-safe, therefore all of its methods are $(D static) and `instance` itself is 327 | $(D shared). 328 | */ 329 | enum AlignedMallocator instance = AlignedMallocator(); 330 | } 331 | 332 | /// 333 | @nogc nothrow 334 | @system unittest 335 | { 336 | auto buffer = AlignedMallocator.instance.alignedAllocate(1024 * 1024 * 4, 337 | 128); 338 | scope(exit) AlignedMallocator.instance.deallocate(buffer); 339 | //... 340 | } 341 | 342 | version(unittest) version(CRuntime_DigitalMars) 343 | @nogc nothrow 344 | size_t addr(ref void* ptr) { return cast(size_t) ptr; } 345 | 346 | version(CRuntime_DigitalMars) 347 | @nogc nothrow 348 | @system unittest 349 | { 350 | void* m; 351 | 352 | m = _aligned_malloc(16, 0x10); 353 | if (m) 354 | { 355 | assert((m.addr & 0xF) == 0); 356 | _aligned_free(m); 357 | } 358 | 359 | m = _aligned_malloc(16, 0x100); 360 | if (m) 361 | { 362 | assert((m.addr & 0xFF) == 0); 363 | _aligned_free(m); 364 | } 365 | 366 | m = _aligned_malloc(16, 0x1000); 367 | if (m) 368 | { 369 | assert((m.addr & 0xFFF) == 0); 370 | _aligned_free(m); 371 | } 372 | 373 | m = _aligned_malloc(16, 0x10); 374 | if (m) 375 | { 376 | assert((cast(size_t) m & 0xF) == 0); 377 | m = _aligned_realloc(m, 32, 0x10000); 378 | if (m) assert((m.addr & 0xFFFF) == 0); 379 | _aligned_free(m); 380 | } 381 | 382 | m = _aligned_malloc(8, 0x10); 383 | if (m) 384 | { 385 | *cast(ulong*) m = 0X01234567_89ABCDEF; 386 | m = _aligned_realloc(m, 0x800, 0x1000); 387 | if (m) assert(*cast(ulong*) m == 0X01234567_89ABCDEF); 388 | _aligned_free(m); 389 | } 390 | } 391 | -------------------------------------------------------------------------------- /source/stdx/allocator/mmap_allocator.d: -------------------------------------------------------------------------------- 1 | /// 2 | module stdx.allocator.mmap_allocator; 3 | 4 | // MmapAllocator 5 | /** 6 | 7 | Allocator (currently defined only for Posix and Windows) using 8 | $(D $(LINK2 https://en.wikipedia.org/wiki/Mmap, mmap)) 9 | and $(D $(LUCKY munmap)) directly (or their Windows equivalents). There is no 10 | additional structure: each call to $(D allocate(s)) issues a call to 11 | $(D mmap(null, s, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)), 12 | and each call to $(D deallocate(b)) issues $(D munmap(b.ptr, b.length)). 13 | So $(D MmapAllocator) is usually intended for allocating large chunks to be 14 | managed by fine-granular allocators. 15 | 16 | */ 17 | struct MmapAllocator 18 | { 19 | /// The one shared instance. 20 | enum MmapAllocator instance = MmapAllocator(); 21 | 22 | /** 23 | Alignment is page-size and hardcoded to 4096 (even though on certain systems 24 | it could be larger). 25 | */ 26 | enum size_t alignment = 4096; 27 | 28 | version(Posix) 29 | { 30 | /// Allocator API. 31 | static void[] allocate()(size_t bytes) 32 | { 33 | import core.sys.posix.sys.mman : mmap, MAP_ANON, PROT_READ, 34 | PROT_WRITE, MAP_PRIVATE, MAP_FAILED; 35 | if (!bytes) return null; 36 | auto p = mmap(null, bytes, PROT_READ | PROT_WRITE, 37 | MAP_PRIVATE | MAP_ANON, -1, 0); 38 | if (p is MAP_FAILED) return null; 39 | return p[0 .. bytes]; 40 | } 41 | 42 | /// Ditto 43 | static bool deallocate()(void[] b) 44 | { 45 | import core.sys.posix.sys.mman : munmap; 46 | if (b.ptr) munmap(b.ptr, b.length) == 0 || assert(0); 47 | return true; 48 | } 49 | } 50 | else version(Windows) 51 | { 52 | import core.sys.windows.windows : VirtualAlloc, VirtualFree, MEM_COMMIT, 53 | PAGE_READWRITE, MEM_RELEASE; 54 | 55 | /// Allocator API. 56 | static void[] allocate()(size_t bytes) 57 | { 58 | if (!bytes) return null; 59 | auto p = VirtualAlloc(null, bytes, MEM_COMMIT, PAGE_READWRITE); 60 | if (p == null) 61 | return null; 62 | return p[0 .. bytes]; 63 | } 64 | 65 | /// Ditto 66 | static bool deallocate()(void[] b) 67 | { 68 | return b.ptr is null || VirtualFree(b.ptr, 0, MEM_RELEASE) != 0; 69 | } 70 | } 71 | } 72 | 73 | @system unittest 74 | { 75 | alias alloc = MmapAllocator.instance; 76 | auto p = alloc.allocate(100); 77 | assert(p.length == 100); 78 | alloc.deallocate(p); 79 | } 80 | -------------------------------------------------------------------------------- /source/stdx/allocator/showcase.d: -------------------------------------------------------------------------------- 1 | /** 2 | 3 | Collection of typical and useful prebuilt allocators using the given 4 | components. User code would typically import this module and use its 5 | facilities, or import individual heap building blocks and assemble them. 6 | 7 | */ 8 | module stdx.allocator.showcase; 9 | 10 | version (D_BetterC) {} else version = HasDRuntime; 11 | 12 | version (HasDRuntime): 13 | 14 | import stdx.allocator.building_blocks.fallback_allocator, 15 | stdx.allocator.gc_allocator, 16 | stdx.allocator.building_blocks.region; 17 | 18 | /** 19 | 20 | Allocator that uses stack allocation for up to $(D stackSize) bytes and 21 | then falls back to $(D Allocator). Defined as: 22 | 23 | ---- 24 | alias StackFront(size_t stackSize, Allocator) = 25 | FallbackAllocator!( 26 | InSituRegion!(stackSize, Allocator.alignment, 27 | __traits(hasMember, Allocator, "deallocate") 28 | ? Yes.defineDeallocate 29 | : No.defineDeallocate), 30 | Allocator); 31 | ---- 32 | 33 | Choosing `stackSize` is as always a compromise. Too small a size exhausts the 34 | stack storage after a few allocations, after which there are no gains over the 35 | backup allocator. Too large a size increases the stack consumed by the thread 36 | and may end up worse off because it explores cold portions of the stack. 37 | 38 | */ 39 | alias StackFront(size_t stackSize, Allocator = GCAllocator) = 40 | FallbackAllocator!( 41 | InSituRegion!(stackSize, Allocator.alignment), 42 | Allocator); 43 | 44 | /// 45 | @system unittest 46 | { 47 | StackFront!4096 a; 48 | auto b = a.allocate(4000); 49 | assert(b.length == 4000); 50 | auto c = a.allocate(4000); 51 | assert(c.length == 4000); 52 | a.deallocate(b); 53 | a.deallocate(c); 54 | } 55 | 56 | /** 57 | Creates a scalable `AllocatorList` of `Regions`, each having at least 58 | `bytesPerRegion` bytes. Allocation is very fast. This allocator does not offer 59 | `deallocate` but does free all regions in its destructor. It is recommended for 60 | short-lived batch applications that count on never running out of memory. 61 | */ 62 | auto mmapRegionList(size_t bytesPerRegion) 63 | { 64 | static struct Factory 65 | { 66 | size_t bytesPerRegion; 67 | import mir.utility : max; 68 | import stdx.allocator.building_blocks.region 69 | : Region; 70 | import stdx.allocator.mmap_allocator 71 | : MmapAllocator; 72 | this(size_t n) 73 | { 74 | bytesPerRegion = n; 75 | } 76 | auto opCall(size_t n) 77 | { 78 | return Region!MmapAllocator(max(n, bytesPerRegion)); 79 | } 80 | } 81 | import stdx.allocator.building_blocks.allocator_list 82 | : AllocatorList; 83 | import stdx.allocator.building_blocks.null_allocator 84 | : NullAllocator; 85 | auto shop = Factory(bytesPerRegion); 86 | return AllocatorList!(Factory, NullAllocator)(shop); 87 | } 88 | 89 | /// 90 | @system unittest 91 | { 92 | auto alloc = mmapRegionList(1024 * 1024); 93 | const b = alloc.allocate(100); 94 | assert(b.length == 100); 95 | } 96 | -------------------------------------------------------------------------------- /source/stdx/allocator/typed.d: -------------------------------------------------------------------------------- 1 | /** 2 | This module defines `TypedAllocator`, a statically-typed allocator that 3 | aggregates multiple untyped allocators and uses them depending on the static 4 | properties of the types allocated. For example, distinct allocators may be used 5 | for thread-local vs. thread-shared data, or for fixed-size data (`struct`, 6 | `class` objects) vs. resizable data (arrays). 7 | 8 | Macros: 9 | T2=$(TR $(D $1) $(TD $(ARGS $+))) 10 | */ 11 | 12 | module stdx.allocator.typed; 13 | 14 | import stdx.allocator; 15 | import stdx.allocator.common; 16 | import std.range.primitives : isInputRange, isForwardRange, save, empty, 17 | front, popFront; 18 | import std.typecons : Flag, Yes, No; 19 | 20 | /** 21 | Allocation-related flags dictated by type characteristics. `TypedAllocator` 22 | deduces these flags from the type being allocated and uses the appropriate 23 | allocator accordingly. 24 | */ 25 | enum AllocFlag : uint 26 | { 27 | init = 0, 28 | /** 29 | Fixed-size allocation (unlikely to get reallocated later). Examples: `int`, 30 | `double`, any `struct` or `class` type. By default it is assumed that the 31 | allocation is variable-size, i.e. susceptible to later reallocation 32 | (for example all array types). This flag is advisory, i.e. in-place resizing 33 | may be attempted for `fixedSize` allocations and may succeed. The flag is 34 | just a hint to the compiler it may use allocation strategies that work well 35 | with objects of fixed size. 36 | */ 37 | fixedSize = 1, 38 | /** 39 | The type being allocated embeds no pointers. Examples: `int`, `int[]`, $(D 40 | Tuple!(int, float)). The implicit conservative assumption is that the type 41 | has members with indirections so it needs to be scanned if garbage 42 | collected. Example of types with pointers: `int*[]`, $(D Tuple!(int, 43 | string)). 44 | */ 45 | hasNoIndirections = 4, 46 | /** 47 | By default it is conservatively assumed that allocated memory may be `cast` 48 | to `shared`, passed across threads, and deallocated in a different thread 49 | than the one that allocated it. If that's not the case, there are two 50 | options. First, `immutableShared` means the memory is allocated for 51 | `immutable` data and will be deallocated in the same thread it was 52 | allocated in. Second, `threadLocal` means the memory is not to be shared 53 | across threads at all. The two flags cannot be simultaneously present. 54 | */ 55 | immutableShared = 8, 56 | /// ditto 57 | threadLocal = 16, 58 | } 59 | 60 | /** 61 | `TypedAllocator` acts like a chassis on which several specialized allocators 62 | can be assembled. To let the system make a choice about a particular kind of 63 | allocation, use `Default` for the respective parameters. 64 | 65 | There is a hierarchy of allocation kinds. When an allocator is implemented for 66 | a given combination of flags, it is used. Otherwise, the next down the list is 67 | chosen. 68 | 69 | $(BOOKTABLE , 70 | 71 | $(TR $(TH `AllocFlag` combination) $(TH Description)) 72 | 73 | $(T2 AllocFlag.threadLocal |$(NBSP)AllocFlag.hasNoIndirections 74 | |$(NBSP)AllocFlag.fixedSize, 75 | This is the most specific allocation policy: the memory being allocated is 76 | thread local, has no indirections at all, and will not be reallocated. Examples 77 | of types fitting this description: `int`, `double`, $(D Tuple!(int, long)), but 78 | not $(D Tuple!(int, string)), which contains an indirection.) 79 | 80 | $(T2 AllocFlag.threadLocal |$(NBSP)AllocFlag.hasNoIndirections, 81 | As above, but may be reallocated later. Examples of types fitting this 82 | description are $(D int[]), $(D double[]), $(D Tuple!(int, long)[]), but not 83 | $(D Tuple!(int, string)[]), which contains an indirection.) 84 | 85 | $(T2 AllocFlag.threadLocal, 86 | As above, but may embed indirections. Examples of types fitting this 87 | description are $(D int*[]), $(D Object[]), $(D Tuple!(int, string)[]).) 88 | 89 | $(T2 AllocFlag.immutableShared |$(NBSP)AllocFlag.hasNoIndirections 90 | |$(NBSP)AllocFlag.fixedSize, 91 | The type being allocated is `immutable` and has no pointers. The thread that 92 | allocated it must also deallocate it. Example: `immutable(int)`.) 93 | 94 | $(T2 AllocFlag.immutableShared |$(NBSP)AllocFlag.hasNoIndirections, 95 | As above, but the type may be appended to in the future. Example: `string`.) 96 | 97 | $(T2 AllocFlag.immutableShared, 98 | As above, but the type may embed references. Example: `immutable(Object)[]`.) 99 | 100 | $(T2 AllocFlag.hasNoIndirections |$(NBSP)AllocFlag.fixedSize, 101 | The type being allocated may be shared across threads, embeds no indirections, 102 | and has fixed size.) 103 | 104 | $(T2 AllocFlag.hasNoIndirections, 105 | The type being allocated may be shared across threads, may embed indirections, 106 | and has variable size.) 107 | 108 | $(T2 AllocFlag.fixedSize, 109 | The type being allocated may be shared across threads, may embed indirections, 110 | and has fixed size.) 111 | 112 | $(T2 0, The most conservative/general allocation: memory may be shared, 113 | deallocated in a different thread, may or may not be resized, and may embed 114 | references.) 115 | ) 116 | 117 | Params: 118 | PrimaryAllocator = The default allocator. 119 | Policies = Zero or more pairs consisting of an `AllocFlag` and an allocator 120 | type. 121 | */ 122 | struct TypedAllocator(PrimaryAllocator, Policies...) 123 | { 124 | import std.algorithm.sorting : isSorted; 125 | import std.meta : AliasSeq; 126 | import std.typecons : Tuple; 127 | 128 | static assert(Policies.length == 0 || isSorted([Stride2!Policies])); 129 | 130 | private template Stride2(T...) 131 | { 132 | static if (T.length >= 2) 133 | { 134 | alias Stride2 = AliasSeq!(T[0], Stride2!(T[2 .. $])); 135 | } 136 | else 137 | { 138 | alias Stride2 = AliasSeq!(T[0 .. $]); 139 | } 140 | } 141 | 142 | // state 143 | static if (stateSize!PrimaryAllocator) private PrimaryAllocator primary; 144 | else alias primary = PrimaryAllocator.instance; 145 | static if (Policies.length > 0) 146 | private Tuple!(Stride2!(Policies[1 .. $])) extras; 147 | 148 | private static bool match(uint have, uint want) 149 | { 150 | enum uint maskAway = 151 | ~(AllocFlag.immutableShared | AllocFlag.threadLocal); 152 | // Do we offer thread local? 153 | if (have & AllocFlag.threadLocal) 154 | { 155 | if (want & AllocFlag.threadLocal) 156 | return match(have & maskAway, want & maskAway); 157 | return false; 158 | } 159 | if (have & AllocFlag.immutableShared) 160 | { 161 | // Okay to ask for either thread local or immutable shared 162 | if (want & (AllocFlag.threadLocal 163 | | AllocFlag.immutableShared)) 164 | return match(have & maskAway, want & maskAway); 165 | return false; 166 | } 167 | // From here on we have full-blown thread sharing. 168 | if (have & AllocFlag.hasNoIndirections) 169 | { 170 | if (want & AllocFlag.hasNoIndirections) 171 | return match(have & ~AllocFlag.hasNoIndirections, 172 | want & ~AllocFlag.hasNoIndirections); 173 | return false; 174 | } 175 | // Fixed size or variable size both match. 176 | return true; 177 | } 178 | 179 | /** 180 | Given `flags` as a combination of `AllocFlag` values, or a type `T`, returns 181 | the allocator that's a closest fit in capabilities. 182 | */ 183 | auto ref allocatorFor(uint flags)() 184 | { 185 | static if (Policies.length == 0 || !match(Policies[0], flags)) 186 | { 187 | return primary; 188 | } 189 | else static if (Policies.length && match(Policies[$ - 2], flags)) 190 | { 191 | return extras[$ - 1]; 192 | } 193 | else 194 | { 195 | foreach (i, choice; Stride2!Policies) 196 | { 197 | static if (!match(choice, flags)) 198 | { 199 | return extras[i - 1]; 200 | } 201 | } 202 | assert(0); 203 | } 204 | } 205 | 206 | /// ditto 207 | auto ref allocatorFor(T)() 208 | { 209 | static if (is(T == void[])) 210 | { 211 | return primary; 212 | } 213 | else 214 | { 215 | return allocatorFor!(type2flags!T)(); 216 | } 217 | } 218 | 219 | /** 220 | Given a type `T`, returns its allocation-related flags as a combination of 221 | `AllocFlag` values. 222 | */ 223 | static uint type2flags(T)() 224 | { 225 | uint result; 226 | static if (is(T == immutable)) 227 | result |= AllocFlag.immutableShared; 228 | else static if (is(T == shared)) 229 | result |= AllocFlag.forSharing; 230 | static if (!is(T == U[], U)) 231 | result |= AllocFlag.fixedSize; 232 | import std.traits : hasIndirections; 233 | static if (!hasIndirections!T) 234 | result |= AllocFlag.hasNoIndirections; 235 | return result; 236 | } 237 | 238 | /** 239 | Dynamically allocates (using the appropriate allocator chosen with 240 | `allocatorFor!T`) and then creates in the memory allocated an object of 241 | type `T`, using `args` (if any) for its initialization. Initialization 242 | occurs in the memory allocated and is otherwise semantically the same as 243 | `T(args)`. (Note that using `make!(T[])` creates a pointer to an 244 | (empty) array of `T`s, not an array. To allocate and initialize an 245 | array, use `makeArray!T` described below.) 246 | 247 | Params: 248 | T = Type of the object being created. 249 | args = Optional arguments used for initializing the created object. If not 250 | present, the object is default constructed. 251 | 252 | Returns: If `T` is a class type, returns a reference to the created `T` 253 | object. Otherwise, returns a `T*` pointing to the created object. In all 254 | cases, returns `null` if allocation failed. 255 | 256 | Throws: If `T`'s constructor throws, deallocates the allocated memory and 257 | propagates the exception. 258 | */ 259 | auto make(T, A...)(auto ref A args) 260 | { 261 | return .make!T(allocatorFor!T, args); 262 | } 263 | 264 | /** 265 | Create an array of `T` with `length` elements. The array is either 266 | default-initialized, filled with copies of `init`, or initialized with 267 | values fetched from `range`. 268 | 269 | Params: 270 | T = element type of the array being created 271 | length = length of the newly created array 272 | init = element used for filling the array 273 | range = range used for initializing the array elements 274 | 275 | Returns: 276 | The newly-created array, or `null` if either `length` was `0` or 277 | allocation failed. 278 | 279 | Throws: 280 | The first two overloads throw only if the used allocator's primitives do. 281 | The overloads that involve copy initialization deallocate memory and propagate the exception if the copy operation throws. 282 | */ 283 | T[] makeArray(T)(size_t length) 284 | { 285 | return .makeArray!T(allocatorFor!(T[]), length); 286 | } 287 | 288 | /// Ditto 289 | T[] makeArray(T)(size_t length, auto ref T init) 290 | { 291 | return .makeArray!T(allocatorFor!(T[]), init, length); 292 | } 293 | 294 | /// Ditto 295 | T[] makeArray(T, R)(R range) 296 | if (isInputRange!R) 297 | { 298 | return .makeArray!T(allocatorFor!(T[]), range); 299 | } 300 | 301 | /** 302 | Grows `array` by appending `delta` more elements. The needed memory is 303 | allocated using the same allocator that was used for the array type. The 304 | extra elements added are either default-initialized, filled with copies of 305 | `init`, or initialized with values fetched from `range`. 306 | 307 | Params: 308 | T = element type of the array being created 309 | array = a reference to the array being grown 310 | delta = number of elements to add (upon success the new length of `array` 311 | is $(D array.length + delta)) 312 | init = element used for filling the array 313 | range = range used for initializing the array elements 314 | 315 | Returns: 316 | `true` upon success, `false` if memory could not be allocated. In the 317 | latter case `array` is left unaffected. 318 | 319 | Throws: 320 | The first two overloads throw only if the used allocator's primitives do. 321 | The overloads that involve copy initialization deallocate memory and 322 | propagate the exception if the copy operation throws. 323 | */ 324 | bool expandArray(T)(ref T[] array, size_t delta) 325 | { 326 | return .expandArray(allocatorFor!(T[]), array, delta); 327 | } 328 | /// Ditto 329 | bool expandArray(T)(T[] array, size_t delta, auto ref T init) 330 | { 331 | return .expandArray(allocatorFor!(T[]), array, delta, init); 332 | } 333 | /// Ditto 334 | bool expandArray(T, R)(ref T[] array, R range) 335 | if (isInputRange!R) 336 | { 337 | return .expandArray(allocatorFor!(T[]), array, range); 338 | } 339 | 340 | /** 341 | Shrinks an array by `delta` elements using `allocatorFor!(T[])`. 342 | 343 | If $(D arr.length < delta), does nothing and returns `false`. Otherwise, 344 | destroys the last $(D arr.length - delta) elements in the array and then 345 | reallocates the array's buffer. If reallocation fails, fills the array with 346 | default-initialized data. 347 | 348 | Params: 349 | T = element type of the array being created 350 | arr = a reference to the array being shrunk 351 | delta = number of elements to remove (upon success the new length of 352 | `arr` is $(D arr.length - delta)) 353 | 354 | Returns: 355 | `true` upon success, `false` if memory could not be reallocated. In the 356 | latter case $(D arr[$ - delta .. $]) is left with default-initialized 357 | elements. 358 | 359 | Throws: 360 | The first two overloads throw only if the used allocator's primitives do. 361 | The overloads that involve copy initialization deallocate memory and 362 | propagate the exception if the copy operation throws. 363 | */ 364 | bool shrinkArray(T)(ref T[] arr, size_t delta) 365 | { 366 | return .shrinkArray(allocatorFor!(T[]), arr, delta); 367 | } 368 | 369 | /** 370 | Destroys and then deallocates (using `allocatorFor!T`) the object pointed 371 | to by a pointer, the class object referred to by a `class` or `interface` 372 | reference, or an entire array. It is assumed the respective entities had 373 | been allocated with the same allocator. 374 | */ 375 | void dispose(T)(T* p) 376 | { 377 | return .dispose(allocatorFor!T, p); 378 | } 379 | /// Ditto 380 | void dispose(T)(T p) 381 | if (is(T == class) || is(T == interface)) 382 | { 383 | return .dispose(allocatorFor!T, p); 384 | } 385 | /// Ditto 386 | void dispose(T)(T[] array) 387 | { 388 | return .dispose(allocatorFor!(T[]), array); 389 | } 390 | } 391 | 392 | /// 393 | @system unittest 394 | { 395 | import stdx.allocator.gc_allocator : GCAllocator; 396 | import stdx.allocator.mallocator : Mallocator; 397 | import stdx.allocator.mmap_allocator : MmapAllocator; 398 | alias MyAllocator = TypedAllocator!(GCAllocator, 399 | AllocFlag.fixedSize | AllocFlag.threadLocal, Mallocator, 400 | AllocFlag.fixedSize | AllocFlag.threadLocal 401 | | AllocFlag.hasNoIndirections, 402 | MmapAllocator, 403 | ); 404 | MyAllocator a; 405 | auto b = a.allocatorFor!0(); 406 | static assert(is(typeof(b) == GCAllocator)); 407 | enum f1 = AllocFlag.fixedSize | AllocFlag.threadLocal; 408 | auto c = a.allocatorFor!f1(); 409 | static assert(is(typeof(c) == Mallocator)); 410 | enum f2 = AllocFlag.fixedSize | AllocFlag.threadLocal; 411 | static assert(is(typeof(a.allocatorFor!f2()) == Mallocator)); 412 | // Partial match 413 | enum f3 = AllocFlag.threadLocal; 414 | static assert(is(typeof(a.allocatorFor!f3()) == Mallocator)); 415 | 416 | int* p = a.make!int; 417 | scope(exit) a.dispose(p); 418 | int[] arr = a.makeArray!int(42); 419 | scope(exit) a.dispose(arr); 420 | assert(a.expandArray(arr, 3)); 421 | assert(a.shrinkArray(arr, 4)); 422 | } 423 | -------------------------------------------------------------------------------- /subprojects/mir-core.wrap: -------------------------------------------------------------------------------- 1 | [wrap-git] 2 | directory=mir-core 3 | url=https://github.com/libmir/mir-core.git 4 | revision=head 5 | --------------------------------------------------------------------------------