1 // Written in the D programming language. 2 3 /** 4 Macros: 5 WIKI = Phobos/StdAllocator 6 MYREF = <font face='Consolas, "Bitstream Vera Sans Mono", "Andale Mono", Monaco, 7 "DejaVu Sans Mono", "Lucida Console", monospace'><a href="#$1">$1</a> </font> 8 TDC = <td nowrap>$(D $1)$(BR)$(SMALL $(I Post:) $(BLUE $(D $+)))</td> 9 TDC2 = <td nowrap>$(D $(LREF $0))</td> 10 RES = $(I result) 11 12 Copyright: Andrei Alexandrescu 2013-. 13 14 License: $(WEB boost.org/LICENSE_1_0.txt, Boost License 1.0). 15 16 Authors: $(WEB erdani.com, Andrei Alexandrescu) 17 18 Source: $(PHOBOSSRC std/_allocator.d) 19 20 This module implements untyped composable memory allocators. They are $(I 21 untyped) because they deal exclusively in $(D void[]) and have no notion of what 22 type the memory allocated would be destined for. They are $(I composable) 23 because the included allocators are building blocks that can be assembled in 24 complex nontrivial allocators. 25 26 $(P Unlike the allocators for the C and C++ programming languages, which manage 27 the allocated size internally, these allocators require that the client 28 maintains (or knows $(I a priori)) the allocation size for each piece of memory 29 allocated. Put simply, the client must pass the allocated size upon 30 deallocation. Storing the size in the _allocator has significant negative 31 performance implications, and is virtually always redundant because client code 32 needs knowledge of the allocated size in order to avoid buffer overruns. (See 33 more discussion in a $(WEB open- 34 std.org/JTC1/SC22/WG21/docs/papers/2013/n3536.html, proposal) for sized 35 deallocation in C++.) For this reason, allocators herein traffic in $(D void[]) 36 as opposed to $(D void*).) 37 38 $(P In order to be usable as an _allocator, a type should implement the 39 following methods with their respective semantics. Only $(D alignment) and $(D 40 allocate) are required. If any of the other methods is missing, the _allocator 41 is assumed to not have that capability (for example some allocators do not offer 42 manual deallocation of memory).) 43 44 $(BOOKTABLE , 45 $(TR $(TH Method name) $(TH Semantics)) 46 47 $(TR $(TDC uint alignment;, $(RES) > 0) $(TD Returns the minimum alignment of 48 all data returned by the allocator. An allocator may implement $(D alignment) as 49 a statically-known $(D enum) value only. Applications that need 50 dynamically-chosen alignment values should use the $(D alignedAllocate) and $(D 51 alignedReallocate) APIs.)) 52 53 $(TR $(TDC size_t goodAllocSize(size_t n);, $(RES) >= n) $(TD Allocators 54 customarily allocate memory in discretely-sized chunks. Therefore, a request for 55 $(D n) bytes may result in a larger allocation. The extra memory allocated goes 56 unused and adds to the so-called $(WEB goo.gl/YoKffF,internal fragmentation). 57 The function $(D goodAllocSize(n)) returns the actual number of bytes that would 58 be allocated upon a request for $(D n) bytes. This module defines a default 59 implementation that returns $(D n) rounded up to a multiple of the allocator's 60 alignment.)) 61 62 $(TR $(TDC void[] allocate(size_t s);, $(RES) is null || $(RES).length == s) 63 $(TD If $(D s == 0), the call may return any empty slice (including $(D 64 null)). Otherwise, the call allocates $(D s) bytes of memory and returns the 65 allocated block, or $(D null) if the request could not be satisfied.)) 66 67 $(TR $(TDC void[] alignedAllocate(size_t s, uint a);, $(RES) is null || 68 $(RES).length == s) $(TD Similar to $(D allocate), with the additional guarantee 69 that the memory returned is aligned to at least $(D a) bytes. $(D a) must be a 70 power of 2 greater than $(D (void*).sizeof).)) 71 72 $(TR $(TDC void[] allocateAll();, n/a) $(TD This is a special function 73 indicating to wrapping allocators that $(D this) is a simple, 74 limited-capabilities allocator that invites customization. Fixed-size regions 75 fit this characterization. If called, the function allocates all memory 76 available to the allocator and returns it.)) 77 78 $(TR $(TDC bool expand(ref void[] b, size_t delta);, !$(RES) || b.length == $(I 79 old)(b).length + delta) $(TD Expands $(D b) by $(D delta) bytes. If $(D b is 80 null), the call evaluates $(D b = allocate(delta)) and returns $(D b !is null). 81 Otherwise, $(D b) must be a buffer previously allocated with the same allocator. 82 If expansion was successful, $(D expand) changes $(D b)'s length to $(D b.length 83 + delta) and returns $(D true). Upon failure, the call effects no change upon 84 the allocator object, leaves $(D b) unchanged, and returns $(D false).)) 85 86 $(TR $(TDC bool reallocate(ref void[] b, size_t s);, !$(RES) || b.length == s) 87 $(TD Reallocates $(D b) to size $(D s), possibly moving memory around. $(D b) 88 must be $(D null) or a buffer allocated with the same allocator. If reallocation 89 was successful, $(D reallocate) changes $(D b) appropriately and returns $(D 90 true). Upon failure, the call effects no change upon the allocator object, 91 leaves $(D b) unchanged, and returns $(D false). An allocator should implement 92 $(D reallocate) if it can derive some advantage from doing so; otherwise, this 93 module defines a $(D reallocate) free function implemented in terms of $(D 94 expand), $(D allocate), and $(D deallocate).)) 95 96 $(TR $(TDC bool alignedReallocate(ref void[] b, size_t s, uint a);, !$(RES) || 97 b.length == s) $(TD Similar to $(D reallocate), but guarantees the reallocated 98 memory is aligned at $(D a) bytes. The buffer must have been originated with a 99 call to $(D alignedAllocate). $(D a) must be a power of 2 greater than $(D 100 (void*).sizeof).)) 101 102 $(TR $(TDC bool owns(void[] b);, n/a) $(TD Returns $(D true) if $(D b) has been 103 allocated with this allocator. An allocator should define this 104 method only if it can decide on ownership precisely and fast (in constant time, 105 logarithmic time, or linear time with a low multiplication factor). Traditional 106 allocators such as the C heap do not define such functionality. If $(D b is 107 null), the allocator should return $(D true) if it may return $(D null) as result of an allocation with $(D size == 0).)) 108 109 $(TR $(TDC void deallocate(void[] b);, n/a) $(TD If $(D b is null), does 110 nothing. Otherwise, deallocates memory previously allocated with this 111 allocator.)) 112 113 $(TR $(TDC void deallocateAll();, n/a) $(TD Deallocates all memory allocated 114 with this allocator. If an allocator implements this method, it must specify 115 whether its destructor calls it, too.)) 116 117 $(TR $(TDC static Allocator it;, it $(I is a valid) Allocator $(I object)) $(TD 118 Some allocators are $(I monostate), i.e. have only an instance and hold only 119 global state. (Notable examples are C's own $(D malloc)-based allocator and D's 120 garbage-collected heap.) Such allocators must define a static $(D it) instance 121 that serves as the symbolic placeholder for the global instance of the 122 allocator. An allocator should not hold state and define $(D it) simultaneously. 123 Depending on whether the allocator is thread-safe or not, this instance may be 124 $(D shared).)) 125 126 ) 127 128 The example below features an allocator modeled after $(WEB goo.gl/m7329l, 129 jemalloc), which uses a battery of free-list allocators spaced so as to keep 130 internal fragmentation to a minimum. The $(D FList) definitions specify no 131 bounds for the freelist because the $(D Segregator) does all size selection in 132 advance. 133 134 Sizes through 3584 bytes are handled via freelists of staggered sizes. Sizes 135 from 3585 bytes through 4072 KB are handled by a $(D HeapBlock) with a 136 block size of 4 KB. Sizes above that are passed direct to the $(D Mallocator). 137 138 ---- 139 alias FList = Freelist!(GCAllocator, 0, unbounded); 140 alias A = Segregator!( 141 8, Freelist!(GCAllocator, 0, 8), 142 128, Bucketizer!(FList, 1, 128, 16), 143 256, Bucketizer!(FList, 129, 256, 32), 144 512, Bucketizer!(FList, 257, 512, 64), 145 1024, Bucketizer!(FList, 513, 1024, 128), 146 2048, Bucketizer!(FList, 1025, 2048, 256), 147 3584, Bucketizer!(FList, 2049, 3584, 512), 148 4072 * 1024, CascadingAllocator!( 149 () => HeapBlock!(GCAllocator, 4096)(4072 * 1024)), 150 GCAllocator 151 ); 152 A tuMalloc; 153 auto b = tuMalloc.allocate(500); 154 assert(b.length == 500); 155 auto c = tuMalloc.allocate(113); 156 assert(c.length == 113); 157 assert(tuMalloc.expand(c, 14)); 158 tuMalloc.deallocate(b); 159 tuMalloc.deallocate(c); 160 ---- 161 162 $(H2 Allocating memory for sharing across threads) 163 164 One allocation pattern used in multithreaded applications is to share memory 165 across threads, and to deallocate blocks in a different thread than the one that 166 allocated it. 167 168 All allocators in this module accept and return $(D void[]) (as opposed to 169 $(D shared void[])). This is because at the time of allocation, deallocation, or 170 reallocation, the memory is effectively not $(D shared) (if it were, it would 171 reveal a bug at the application level). 172 173 The issue remains of calling $(D a.deallocate(b)) from a different thread than 174 the one that allocated $(D b). It follows that both threads must have access to 175 the same instance $(D a) of the respective allocator type. By definition of D, 176 this is possible only if $(D a) has the $(D shared) qualifier. It follows that 177 the allocator type must implement $(D allocate) and $(D deallocate) as $(D 178 shared) methods. That way, the allocator commits to allowing usable $(D shared) 179 instances. 180 181 Conversely, allocating memory with one non-$(D shared) allocator, passing it 182 across threads (by casting the obtained buffer to $(D shared)), and later 183 deallocating it in a different thread (either with a different allocator object 184 or with the same allocator object after casting it to $(D shared)) is illegal. 185 186 $(BOOKTABLE $(BIG Synopsis of predefined _allocator building blocks), 187 $(TR $(TH Allocator) $(TH Description)) 188 189 $(TR $(TDC2 NullAllocator) $(TD Very good at doing absolutely nothing. A good 190 starting point for defining other allocators or for studying the API.)) 191 192 $(TR $(TDC2 GCAllocator) $(TD The system-provided garbage-collector allocator. 193 This should be the default fallback allocator tapping into system memory. It 194 offers manual $(D free) and dutifully collects litter.)) 195 196 $(TR $(TDC2 Mallocator) $(TD The C heap _allocator, a.k.a. $(D 197 malloc)/$(D realloc)/$(D free). Use sparingly and only for code that is unlikely 198 to leak.)) 199 200 $(TR $(TDC2 AlignedMallocator) $(TD Interface to OS-specific _allocators that 201 support specifying alignment: 202 $(WEB man7.org/linux/man-pages/man3/posix_memalign.3.html, $(D posix_memalign)) 203 on Posix and $(WEB msdn.microsoft.com/en-us/library/fs9stz4e(v=vs.80).aspx, 204 $(D __aligned_xxx)) on Windows.)) 205 206 $(TR $(TDC2 AffixAllocator) $(TD Allocator that allows and manages allocating 207 extra prefix and/or a suffix bytes for each block allocated.)) 208 209 $(TR $(TDC2 HeapBlock) $(TD Organizes one contiguous chunk of memory in 210 equal-size blocks and tracks allocation status at the cost of one bit per 211 block.)) 212 213 $(TR $(TDC2 FallbackAllocator) $(TD Allocator that combines two other allocators 214 - primary and fallback. Allocation requests are first tried with primary, and 215 upon failure are passed to the fallback. Useful for small and fast allocators 216 fronting general-purpose ones.)) 217 218 $(TR $(TDC2 Freelist) $(TD Allocator that implements a $(WEB 219 wikipedia.org/wiki/Free_list, free list) on top of any other allocator. The 220 preferred size, tolerance, and maximum elements are configurable at compile- and 221 run time.)) 222 223 $(TR $(TDC2 SharedFreelist) $(TD Same features as $(D Freelist), but packaged as 224 a $(D shared) structure that is accessible to several threads.)) 225 226 $(TR $(TDC2 Region) $(TD Region allocator organizes a chunk of memory as a 227 simple bump-the-pointer allocator.)) 228 229 $(TR $(TDC2 InSituRegion) $(TD Region holding its own allocation, most often on 230 the stack. Has statically-determined size.)) 231 232 $(TR $(TDC2 AllocatorWithStats) $(TD Collect statistics about any other 233 allocator.)) 234 235 $(TR $(TDC2 CascadingAllocator) $(TD Given an allocator factory, lazily creates as 236 many allocators as needed to satisfy allocation requests. The allocators are 237 stored in a linked list. Requests for allocation are satisfied by searching the 238 list in a linear manner.)) 239 240 $(TR $(TDC2 Segregator) $(TD Segregates allocation requests by size and 241 dispatches them to distinct allocators.)) 242 243 $(TR $(TDC2 Bucketizer) $(TD Divides allocation sizes in discrete buckets and 244 uses an array of allocators, one per bucket, to satisfy requests.)) 245 246 ) 247 */ 248 249 module std.allocator; 250 251 // Example in the synopsis above 252 unittest 253 { 254 alias FList = Freelist!(GCAllocator, 0, unbounded); 255 alias A = Segregator!( 256 8, Freelist!(GCAllocator, 0, 8), 257 128, Bucketizer!(FList, 1, 128, 16), 258 256, Bucketizer!(FList, 129, 256, 32), 259 512, Bucketizer!(FList, 257, 512, 64), 260 1024, Bucketizer!(FList, 513, 1024, 128), 261 2048, Bucketizer!(FList, 1025, 2048, 256), 262 3584, Bucketizer!(FList, 2049, 3584, 512), 263 4072 * 1024, CascadingAllocator!( 264 () => HeapBlock!(GCAllocator, 4096)(4072 * 1024)), 265 GCAllocator 266 ); 267 A tuMalloc; 268 auto b = tuMalloc.allocate(500); 269 assert(b.length == 500); 270 auto c = tuMalloc.allocate(113); 271 assert(c.length == 113); 272 assert(tuMalloc.expand(c, 14)); 273 tuMalloc.deallocate(b); 274 tuMalloc.deallocate(c); 275 } 276 277 import std.algorithm, std.conv, std.exception, std.range, std.traits, 278 std.typecons, std.typetuple; 279 version(unittest) import std.stdio; 280 281 /* 282 Ternary by Timon Gehr and Andrei Alexandrescu. 283 */ 284 private struct Ternary 285 { 286 private ubyte value = 6; 287 private static Ternary make(ubyte b) 288 { 289 Ternary r = void; 290 r.value = b; 291 return r; 292 } 293 294 enum no = make(0), yes = make(2), unknown = make(6); 295 296 this(bool b) { value = b << 1; } 297 298 void opAssign(bool b) { value = b << 1; } 299 300 Ternary opUnary(string s)() if (s == "~") 301 { 302 return make(386 >> value & 6); 303 } 304 305 Ternary opBinary(string s)(Ternary rhs) if (s == "|") 306 { 307 return make(25512 >> value + rhs.value & 6); 308 } 309 310 Ternary opBinary(string s)(Ternary rhs) if (s == "&") 311 { 312 return make(26144 >> value + rhs.value & 6); 313 } 314 315 Ternary opBinary(string s)(Ternary rhs) if (s == "^") 316 { 317 return make(26504 >> value + rhs.value & 6); 318 } 319 } 320 321 unittest 322 { 323 alias f = Ternary.no, t = Ternary.yes, u = Ternary.unknown; 324 auto truthTableAnd = 325 [ 326 t, t, t, 327 t, u, u, 328 t, f, f, 329 u, t, u, 330 u, u, u, 331 u, f, f, 332 f, t, f, 333 f, u, f, 334 f, f, f, 335 ]; 336 337 auto truthTableOr = 338 [ 339 t, t, t, 340 t, u, t, 341 t, f, t, 342 u, t, t, 343 u, u, u, 344 u, f, u, 345 f, t, t, 346 f, u, u, 347 f, f, f, 348 ]; 349 350 auto truthTableXor = 351 [ 352 t, t, f, 353 t, u, u, 354 t, f, t, 355 u, t, u, 356 u, u, u, 357 u, f, u, 358 f, t, t, 359 f, u, u, 360 f, f, f, 361 ]; 362 363 for (auto i = 0; i != truthTableAnd.length; i += 3) 364 { 365 assert((truthTableAnd[i] & truthTableAnd[i + 1]) 366 == truthTableAnd[i + 2]); 367 assert((truthTableOr[i] | truthTableOr[i + 1]) 368 == truthTableOr[i + 2]); 369 assert((truthTableXor[i] ^ truthTableXor[i + 1]) 370 == truthTableXor[i + 2]); 371 } 372 373 Ternary a; 374 assert(a == Ternary.unknown); 375 static assert(!is(typeof({ if (a) {} }))); 376 assert(!is(typeof({ auto b = Ternary(3); }))); 377 a = true; 378 assert(a == Ternary.yes); 379 a = false; 380 assert(a == Ternary.no); 381 a = Ternary.unknown; 382 assert(a == Ternary.unknown); 383 Ternary b; 384 b = a; 385 assert(b == a); 386 assert(~Ternary.yes == Ternary.no); 387 assert(~Ternary.no == Ternary.yes); 388 assert(~Ternary.unknown == Ternary.unknown); 389 } 390 391 /** 392 Returns the size in bytes of the state that needs to be allocated to hold an 393 object of type $(D T). $(D stateSize!T) is zero for $(D struct)s that are not 394 nested and have no nonstatic member variables. 395 */ 396 private template stateSize(T) 397 { 398 static if (is(T == class) || is(T == interface)) 399 enum stateSize = __traits(classInstanceSize, T); 400 else static if (is(T == struct) || is(T == union)) 401 enum stateSize = FieldTypeTuple!T.length || isNested!T ? T.sizeof : 0; 402 else static if (is(T == void)) 403 enum size_t stateSize = 0; 404 else 405 enum stateSize = T.sizeof; 406 } 407 408 unittest 409 { 410 static assert(stateSize!void == 0); 411 struct A {} 412 static assert(stateSize!A == 0); 413 struct B { int x; } 414 static assert(stateSize!B == 4); 415 interface I1 {} 416 // static assert(stateSize!I1 == 2 * size_t.sizeof); 417 class C1 {} 418 static assert(stateSize!C1 == 3 * size_t.sizeof); 419 class C2 { char c; } 420 static assert(stateSize!C2 == 4 * size_t.sizeof); 421 static class C3 { char c; } 422 static assert(stateSize!C3 == 2 * size_t.sizeof + char.sizeof); 423 } 424 425 /** 426 * Shortcut that encapsulates a cast and a call to emplace() 427 * Params: 428 * a = the allocator to use 429 * args = the arguments to $(D T)'s constructor 430 * Returns: a pointer to an instance of $(D T). 431 */ 432 T* allocate(T, Allocator, Args...)(auto ref Allocator a, auto ref Args args) 433 @trusted if (is (T == struct)) 434 { 435 import std.conv : emplace; 436 void[] mem = a.allocate(T.sizeof); 437 return emplace(cast(T*) mem.ptr, args); 438 } 439 440 /// 441 unittest 442 { 443 auto allocator = Mallocator.it; 444 struct TestStruct { int x = 5; } 445 TestStruct* p = allocate!TestStruct(allocator); 446 assert (p !is null); 447 assert (p.x == 5); 448 Mallocator.it.deallocate((cast(void*) p)[0 .. TestStruct.sizeof]); 449 } 450 451 /** 452 * Shortcut that encapsulates a cast and a call to emplace() 453 * Params: 454 * a = the allocator to use 455 * args = the arguments to $(D T)'s constructor 456 * Returns: a reference to an instance of $(D T). 457 */ 458 T allocate(T, Allocator, Args...)(auto ref Allocator a, auto ref Args args) 459 @trusted if (is (T == class)) 460 { 461 import std.conv : emplace; 462 void[] mem = a.allocate(__traits(classInstanceSize, T)); 463 return emplace!T(mem, args); 464 } 465 466 /// 467 unittest 468 { 469 auto allocator = Mallocator.it; 470 class TestClass { int x; this(int x) { this.x = x; } } 471 TestClass tc = allocate!TestClass(allocator, 10); 472 assert (tc !is null); 473 assert (tc.x == 10); 474 Mallocator.it.deallocate((cast(void*) tc)[0 .. __traits(classInstanceSize, TestClass)]); 475 } 476 477 /** 478 * Encapsulates some casts and pointer slicing to deallocate $(D structInstance). 479 * This function does NOT call T's destructor. 480 */ 481 void deallocate(T, Allocator)(auto ref Allocator a, T* structInstance) 482 pure nothrow @trusted if (is (T == struct)) 483 { 484 a.deallocate((cast(void*) structInstance)[0 .. T.sizeof]); 485 } 486 487 /// 488 unittest 489 { 490 auto allocator = Mallocator.it; 491 bool d = false; 492 struct TestStruct { float f; } 493 TestStruct* ts = allocate!TestStruct(allocator); 494 deallocate(allocator, ts); 495 } 496 497 /** 498 * Encapsulates some casts and pointer slicing to deallocate $(D classInstance). 499 * This function does NOT call T's destructor. 500 */ 501 void deallocate(T, Allocator)(auto ref Allocator a, T classInstance) 502 pure nothrow @trusted if (is (T == class)) 503 { 504 a.deallocate((cast(void*) classInstance)[0 .. __traits(classInstanceSize, T)]); 505 } 506 507 /// 508 unittest 509 { 510 import std.math; 511 auto allocator = Mallocator.it; 512 class TestClass { double z; } 513 TestClass tc = allocate!TestClass(allocator); 514 assert (isnan(tc.z)); 515 deallocate(allocator, tc); 516 } 517 518 /** 519 $(D chooseAtRuntime) is a compile-time constant of type $(D size_t) that several 520 parameterized structures in this module recognize to mean deferral to runtime of 521 the exact value. For example, $(D HeapBlock!(Allocator, 4096)) (described in 522 detail below) defines a block allocator with block size of 4096 bytes, whereas 523 $(D HeapBlock!(Allocator, chooseAtRuntime)) defines a block allocator that has a 524 field storing the block size, initialized by the user. 525 */ 526 enum chooseAtRuntime = size_t.max - 1; 527 528 /** 529 $(D unbounded) is a compile-time constant of type $(D size_t) that several 530 parameterized structures in this module recognize to mean "infinite" bounds for 531 the parameter. For example, $(D Freelist) (described in detail below) accepts a 532 $(D maxNodes) parameter limiting the number of freelist items. If $(D unbounded) 533 is passed for $(D maxNodes), then there is no limit and no checking for the 534 number of nodes. 535 */ 536 enum unbounded = size_t.max; 537 538 /** 539 The alignment that is guaranteed to accommodate any D object allocation on the 540 current platform. 541 */ 542 enum uint platformAlignment = std.algorithm.max(double.alignof, real.alignof); 543 544 /** 545 The default good size allocation is deduced as $(D n) rounded up to the 546 allocator's alignment. 547 */ 548 size_t goodAllocSize(A)(auto ref A a, size_t n) pure nothrow 549 { 550 return n.roundUpToMultipleOf(a.alignment); 551 } 552 553 /** 554 The default $(D reallocate) function first attempts to use $(D expand). If $(D 555 Allocator.expand) is not defined or returns $(D false), $(D reallocate) 556 allocates a new block of memory of appropriate size and copies data from the old 557 block to the new block. Finally, if $(D Allocator) defines $(D deallocate), $(D 558 reallocate) uses it to free the old memory block. 559 560 $(D reallocate) does not attempt to use $(D Allocator.reallocate) even if 561 defined. This is deliberate so allocators may use it internally within their own 562 implementation of $(D reallocate). 563 564 */ 565 bool reallocate(Allocator)(ref Allocator a, ref void[] b, size_t s) 566 { 567 if (b.length == s) return true; 568 static if (hasMember!(Allocator, "expand")) 569 { 570 if (b.length <= s && a.expand(b, s - b.length)) return true; 571 } 572 auto newB = a.allocate(s); 573 if (newB.length <= b.length) newB[] = b[0 .. newB.length]; 574 else newB[0 .. b.length] = b[]; 575 static if (hasMember!(Allocator, "deallocate")) 576 a.deallocate(b); 577 b = newB; 578 return true; 579 } 580 581 /* 582 _ _ _ _ _ _ _ 583 | \ | | | | | /\ | | | | | 584 | \| |_ _| | | / \ | | | ___ ___ __ _| |_ ___ _ __ 585 | . ` | | | | | | / /\ \ | | |/ _ \ / __/ _` | __/ _ \| '__| 586 | |\ | |_| | | |/ ____ \| | | (_) | (_| (_| | || (_) | | 587 |_| \_|\__,_|_|_/_/ \_\_|_|\___/ \___\__,_|\__\___/|_| 588 */ 589 /** 590 $(D NullAllocator) is an emphatically empty implementation of the allocator interface. Although it has no direct use, it is useful as a "terminator" in composite allocators. 591 */ 592 struct NullAllocator 593 { 594 /** 595 $(D NullAllocator) advertises a relatively large _alignment equal to 64 KB. 596 This is because $(D NullAllocator) never actually needs to honor this 597 alignment and because composite allocators using $(D NullAllocator) 598 shouldn't be unnecessarily constrained. 599 */ 600 enum uint alignment = 64 * 1024; 601 /// Always returns $(D null). 602 void[] allocate(size_t) shared { return null; } 603 /// Returns $(D b is null). 604 bool owns(void[] b) shared { return b is null; } 605 /** 606 These methods return $(D false). 607 Precondition: $(D b is null). This is because there is no other possible 608 legitimate input. 609 */ 610 bool expand(ref void[] b, size_t) shared 611 { assert(b is null); return false; } 612 /// Ditto 613 bool reallocate(ref void[] b, size_t) shared 614 { assert(b is null); return false; } 615 /** 616 No-op. 617 Precondition: $(D b is null) 618 */ 619 void deallocate(void[] b) shared { assert(b is null); } 620 /** 621 No-op. 622 */ 623 void deallocateAll() shared { } 624 625 static shared(NullAllocator) it() pure nothrow @property @trusted 626 { 627 return cast(typeof(return)) _it; 628 } 629 630 /** 631 Returns the $(D shared) global instance of the $(D NullAllocator). 632 */ 633 private static shared const NullAllocator _it; 634 } 635 636 unittest 637 { 638 auto b = NullAllocator.it.allocate(100); 639 assert(b is null); 640 NullAllocator.it.deallocate(b); 641 NullAllocator.it.deallocateAll(); 642 assert(NullAllocator.it.owns(null)); 643 } 644 645 /** 646 D's built-in garbage-collected allocator. 647 */ 648 struct GCAllocator 649 { 650 private import core.memory; 651 652 /** 653 The alignment is a static constant equal to $(D platformAlignment), which 654 ensures proper alignment for any D data type. 655 */ 656 enum uint alignment = platformAlignment; 657 658 /** 659 Standard allocator methods per the semantics defined above. The 660 $(D deallocate) and $(D reallocate) methods are $(D @system) because they 661 may move memory around, leaving dangling pointers in user code. 662 */ 663 @trusted void[] allocate(size_t bytes) shared nothrow pure 664 { 665 auto p = GC.malloc(bytes); 666 return p ? p[0 .. bytes] : null; 667 } 668 669 /// Ditto 670 @trusted bool expand(ref void[] b, size_t delta) shared nothrow pure 671 { 672 auto newSize = GC.extend(b.ptr, b.length + delta, 673 b.length + delta); 674 if (newSize == 0) 675 { 676 // expansion unsuccessful 677 return false; 678 } 679 assert(newSize >= b.length + delta); 680 b = b.ptr[0 .. newSize]; 681 return true; 682 } 683 684 /// Ditto 685 @system bool reallocate(ref void[] b, size_t newSize) shared nothrow pure 686 { 687 import core.exception : OutOfMemoryError; 688 try 689 { 690 auto p = cast(ubyte*) GC.realloc(b.ptr, newSize); 691 b = p[0 .. newSize]; 692 } 693 catch (OutOfMemoryError) 694 { 695 // leave the block in place, tell caller 696 return false; 697 } 698 return true; 699 } 700 701 /// Ditto 702 @system void deallocate(void[] b) shared nothrow pure 703 { 704 GC.free(b.ptr); 705 } 706 707 static shared(GCAllocator) it() pure nothrow @property @trusted 708 { 709 return cast(typeof(return)) _it; 710 } 711 712 /** 713 Returns the global instance of this allocator type. The garbage collected allocator is thread-safe, therefore all of its methods and $(D it) itself are $(D shared). 714 */ 715 private static shared const GCAllocator _it; 716 717 // Leave it undocummented for now. 718 @trusted void collect() shared 719 { 720 GC.collect(); 721 } 722 } 723 724 /// 725 unittest 726 { 727 auto buffer = GCAllocator.it.allocate(1024 * 1024 * 4); 728 scope(exit) GCAllocator.it.deallocate(buffer); // or leave it to collection 729 //... 730 } 731 732 unittest 733 { 734 auto b = GCAllocator.it.allocate(10000); 735 assert(GCAllocator.it.expand(b, 1)); 736 } 737 738 private extern (C) 739 { 740 void* malloc(size_t) pure nothrow @trusted; 741 void free(void*) pure nothrow @trusted; 742 void* realloc(void*, size_t) pure nothrow @trusted; 743 } 744 745 /** 746 The C heap allocator. 747 */ 748 struct Mallocator 749 { 750 // private import core.stdc.stdlib; 751 752 /** 753 The alignment is a static constant equal to $(D platformAlignment), which ensures proper alignment for any D data type. 754 */ 755 enum uint alignment = platformAlignment; 756 757 /** 758 Standard allocator methods per the semantics defined above. The 759 $(D deallocate) and $(D reallocate) methods are $(D @system) because they 760 may move memory around, leaving dangling pointers in user code. Somewhat 761 paradoxically, $(D malloc) is $(D @safe) but that's only useful to safe 762 programs that can afford to leak memory allocated. 763 */ 764 void[] allocate(size_t bytes) shared pure nothrow @trusted 765 { 766 auto p = malloc(bytes); 767 return p ? p[0 .. bytes] : null; 768 } 769 770 /// Ditto 771 void deallocate(void[] b) shared pure nothrow @system 772 { 773 free(b.ptr); 774 } 775 776 /// Ditto 777 bool reallocate(ref void[] b, size_t s) shared pure nothrow @system 778 { 779 if (!s) 780 { 781 // fuzzy area in the C standard, see http://goo.gl/ZpWeSE 782 // so just deallocate and nullify the pointer 783 deallocate(b); 784 b = null; 785 return true; 786 } 787 auto p = cast(ubyte*) realloc(b.ptr, s); 788 if (!p) return false; 789 b = p[0 .. s]; 790 return true; 791 } 792 793 static shared(Mallocator) it() pure nothrow @property @trusted 794 { 795 return cast(typeof(return)) _it; 796 } 797 798 /** 799 Returns the global instance of this allocator type. The C heap allocator is thread-safe, therefore all of its methods and $(D it) itself are $(D shared). 800 */ 801 private static shared const Mallocator _it; 802 } 803 804 /// 805 unittest 806 { 807 auto buffer = Mallocator.it.allocate(1024 * 1024 * 4); 808 scope(exit) Mallocator.it.deallocate(buffer); 809 //... 810 } 811 812 unittest 813 { 814 static void test(A)() 815 { 816 int* p = null; 817 p = new int; 818 *p = 42; 819 assert(*p == 42); 820 } 821 822 test!GCAllocator(); 823 test!Mallocator(); 824 } 825 826 unittest 827 { 828 static void test(A)() 829 { 830 Object p = null; 831 p = new Object; 832 assert(p !is null); 833 } 834 835 test!GCAllocator(); 836 test!Mallocator(); 837 } 838 839 version (Posix) extern(C) int posix_memalign(void**, size_t, size_t); 840 version (Windows) 841 { 842 extern(C) void* _aligned_malloc(size_t, size_t); 843 extern(C) void _aligned_free(void *memblock); 844 extern(C) void* _aligned_realloc(void *, size_t, size_t); 845 } 846 847 /** 848 Aligned allocator using OS-specific primitives, under a uniform API. 849 */ 850 struct AlignedMallocator 851 { 852 private import core.stdc.stdlib; 853 854 /** 855 The default alignment is $(D platformAlignment). 856 */ 857 enum uint alignment = platformAlignment; 858 859 /** 860 Forwards to $(D alignedAllocate(bytes, platformAlignment)). 861 */ 862 @trusted void[] allocate(size_t bytes) shared 863 { 864 return alignedAllocate(bytes, alignment); 865 } 866 867 version (Posix) import core.stdc.errno, core.sys.posix.stdlib; 868 869 /** 870 Uses $(WEB man7.org/linux/man-pages/man3/posix_memalign.3.html, 871 $(D posix_memalign)) on Posix and 872 $(WEB msdn.microsoft.com/en-us/library/8z34s9c6(v=vs.80).aspx, 873 $(D __aligned_malloc)) on Windows. 874 */ 875 version(Posix) @trusted 876 void[] alignedAllocate(size_t bytes, uint a) shared 877 { 878 assert(a.isGoodDynamicAlignment); 879 void* result; 880 auto code = posix_memalign(&result, a, bytes); 881 if (code == ENOMEM) return null; 882 enforce(code == 0, text("Invalid alignment requested: ", a)); 883 return result[0 .. bytes]; 884 } 885 else version(Windows) @trusted 886 void[] alignedAllocate(size_t bytes, uint a) shared 887 { 888 auto result = _aligned_malloc(bytes, a); 889 return result ? result[0 .. bytes] : null; 890 } 891 else static assert(0); 892 893 /** 894 Calls $(D free(b.ptr)) on Posix and 895 $(WEB msdn.microsoft.com/en-US/library/17b5h8td(v=vs.80).aspx, 896 $(D __aligned_free(b.ptr))) on Windows. 897 */ 898 version (Posix) @system 899 void deallocate(void[] b) shared 900 { 901 free(b.ptr); 902 } 903 else version (Windows) @system 904 void deallocate(void[] b) shared 905 { 906 _aligned_free(b.ptr); 907 } 908 else static assert(0); 909 910 /** 911 On Posix, forwards to $(D realloc). On Windows, forwards to 912 $(D alignedReallocate(b, newSize, platformAlignment)). 913 */ 914 version (Posix) @system bool reallocate(ref void[] b, size_t newSize) shared 915 { 916 return Mallocator.it.reallocate(b, newSize); 917 } 918 version (Windows) @system 919 bool reallocate(ref void[] b, size_t newSize) shared 920 { 921 return alignedReallocate(b, newSize, alignment); 922 } 923 924 /** 925 On Posix, uses $(D alignedAllocate) and copies data around because there is 926 no realloc for aligned memory. On Windows, calls 927 $(WEB msdn.microsoft.com/en-US/library/y69db7sx(v=vs.80).aspx, 928 $(D __aligned_realloc(b.ptr, newSize, a))). 929 */ 930 version (Posix) @system 931 bool alignedReallocate(ref void[] b, size_t s, uint a) shared 932 { 933 if (!s) 934 { 935 deallocate(b); 936 b = null; 937 return true; 938 } 939 auto result = alignedAllocate(s, a); 940 if (!result) return false; 941 if (s < b.length) result[] = b[0 .. s]; 942 else result[0 .. b.length] = b[]; 943 deallocate(b); 944 b = result; 945 return true; 946 } 947 else version (Windows) @system 948 bool alignedReallocate(ref void[] b, size_t s, uint a) shared 949 { 950 if (!s) 951 { 952 deallocate(b); 953 b = null; 954 return true; 955 } 956 auto p = cast(ubyte*) _aligned_realloc(b.ptr, s, a); 957 if (!p) return false; 958 b = p[0 .. s]; 959 return true; 960 } 961 962 /** 963 Returns the global instance of this allocator type. The C heap allocator is thread-safe, therefore all of its methods and $(D it) itself are $(D shared). 964 */ 965 static shared AlignedMallocator it; 966 } 967 968 /// 969 unittest 970 { 971 auto buffer = AlignedMallocator.it.alignedAllocate(1024 * 1024 * 4, 128); 972 scope(exit) AlignedMallocator.it.deallocate(buffer); 973 //... 974 } 975 976 /** 977 Returns s rounded up to a multiple of base. 978 */ 979 private size_t roundUpToMultipleOf(size_t s, uint base) pure nothrow @safe 980 { 981 assert(base); 982 auto rem = s % base; 983 return rem ? s + base - rem : s; 984 } 985 986 unittest 987 { 988 assert(10.roundUpToMultipleOf(11) == 11); 989 assert(11.roundUpToMultipleOf(11) == 11); 990 assert(12.roundUpToMultipleOf(11) == 22); 991 assert(118.roundUpToMultipleOf(11) == 121); 992 } 993 994 /** 995 Returns s rounded up to a multiple of base. 996 */ 997 private void[] roundStartToMultipleOf(void[] s, uint base) pure nothrow @trusted 998 { 999 assert(base); 1000 auto p = cast(void*) roundUpToMultipleOf( 1001 cast(size_t) s.ptr, base); 1002 auto end = s.ptr + s.length; 1003 return p[0 .. end - p]; 1004 } 1005 1006 unittest 1007 { 1008 void[] p; 1009 assert(roundStartToMultipleOf(p, 16) is null); 1010 p = new ulong[10]; 1011 assert(roundStartToMultipleOf(p, 16) is p); 1012 } 1013 1014 /** 1015 Returns $(D s) rounded up to the nearest power of 2. 1016 */ 1017 private size_t roundUpToPowerOf2(size_t s) pure nothrow @safe 1018 { 1019 assert(s <= (size_t.max >> 1) + 1); 1020 --s; 1021 static if (size_t.sizeof == 4) 1022 alias Shifts = TypeTuple!(1, 2, 4, 8, 16); 1023 else 1024 alias Shifts = TypeTuple!(1, 2, 4, 8, 16, 32); 1025 foreach (i; Shifts) 1026 { 1027 s |= s >> i; 1028 } 1029 return s + 1; 1030 } 1031 1032 unittest 1033 { 1034 assert(0.roundUpToPowerOf2 == 0); 1035 assert(1.roundUpToPowerOf2 == 1); 1036 assert(2.roundUpToPowerOf2 == 2); 1037 assert(3.roundUpToPowerOf2 == 4); 1038 assert(7.roundUpToPowerOf2 == 8); 1039 assert(8.roundUpToPowerOf2 == 8); 1040 assert(10.roundUpToPowerOf2 == 16); 1041 assert(11.roundUpToPowerOf2 == 16); 1042 assert(12.roundUpToPowerOf2 == 16); 1043 assert(118.roundUpToPowerOf2 == 128); 1044 assert((size_t.max >> 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); 1045 assert(((size_t.max >> 1) + 1).roundUpToPowerOf2 == (size_t.max >> 1) + 1); 1046 } 1047 1048 /** 1049 1050 Allocator that adds some extra data before (of type $(D Prefix)) and/or after 1051 (of type $(D Suffix)) any allocation made with its parent allocator. This is 1052 useful for uses where additional allocation-related information is needed, such 1053 as mutexes, reference counts, or walls for debugging memory corruption errors. 1054 1055 If $(D Prefix) is not $(D void), $(D Allocator) must guarantee an alignment at 1056 least as large as $(D Prefix.alignof). 1057 1058 Suffixes are slower to get at because of alignment rounding, so prefixes should 1059 be preferred. However, small prefixes blunt the alignment so if a large 1060 alignment with a small affix is needed, suffixes should be chosen. 1061 1062 */ 1063 struct AffixAllocator(Allocator, Prefix, Suffix = void) 1064 { 1065 static assert( 1066 !stateSize!Prefix || Allocator.alignment >= Prefix.alignof, 1067 "AffixAllocator does not work with allocators offering a smaller" 1068 ~ " alignment than the prefix alignment."); 1069 static assert(alignment % Suffix.alignof == 0, 1070 "This restriction could be relaxed in the future."); 1071 1072 /** 1073 If $(D Prefix) is $(D void), the alignment is that of the parent. Otherwise, the alignment is the same as the $(D Prefix)'s alignment. 1074 */ 1075 enum uint alignment = 1076 stateSize!Prefix ? Allocator.alignment : Prefix.alignof; 1077 1078 /** 1079 If the parent allocator $(D Allocator) is stateful, an instance of it is 1080 stored as a member. Otherwise, $(D AffixAllocator) uses $(D Allocator.it). 1081 In either case, the name $(D _parent) is uniformly used for accessing the 1082 parent allocator. 1083 */ 1084 static if (stateSize!Allocator) Allocator parent; 1085 else alias parent = Allocator.it; 1086 1087 template Impl() 1088 { 1089 size_t goodAllocSize(size_t s) pure nothrow const 1090 { 1091 return parent.goodAllocSize(actualAllocationSize(s)); 1092 } 1093 1094 private size_t actualAllocationSize(size_t s) const 1095 { 1096 static if (!stateSize!Suffix) 1097 { 1098 return s + stateSize!Prefix; 1099 } 1100 else 1101 { 1102 return roundUpToMultipleOf( 1103 s + stateSize!Prefix, 1104 Suffix.alignof) + stateSize!Suffix; 1105 } 1106 } 1107 1108 private void[] actualAllocation(void[] b) const 1109 { 1110 assert(b !is null); 1111 return (b.ptr - stateSize!Prefix) 1112 [0 .. actualAllocationSize(b.length)]; 1113 } 1114 1115 void[] allocate(size_t bytes) 1116 { 1117 auto result = parent.allocate(actualAllocationSize(bytes)); 1118 if (result is null) return null; 1119 static if (stateSize!Prefix) 1120 emplace!Prefix(cast(Prefix*)result.ptr); 1121 static if (stateSize!Suffix) 1122 emplace!Suffix( 1123 cast(Suffix*)(result.ptr + result.length - Suffix.sizeof)); 1124 return result[stateSize!Prefix .. stateSize!Prefix + bytes]; 1125 } 1126 1127 static if (hasMember!(Allocator, "owns")) 1128 bool owns(void[] b) 1129 { 1130 return b is null ? true : parent.owns(actualAllocation(b)); 1131 } 1132 1133 static if (!stateSize!Suffix && hasMember!(Allocator, "expand")) 1134 bool expand(ref void[] b, size_t delta) 1135 { 1136 auto t = actualAllocation(b); 1137 auto result = parent.expand(t, delta); 1138 if (!result) return false; 1139 b = b.ptr[0 .. b.length + delta]; 1140 return true; 1141 } 1142 1143 static if (hasMember!(Allocator, "reallocate")) 1144 bool reallocate(ref void[] b, size_t s) 1145 { 1146 auto t = actualAllocation(b); 1147 auto result = parent.reallocate(t, actualAllocationSize(s)); 1148 if (!result) return false; // no harm done 1149 b = t.ptr[stateSize!Prefix .. stateSize!Prefix + s]; 1150 return true; 1151 } 1152 1153 static if (hasMember!(Allocator, "deallocate")) 1154 void deallocate(void[] b) 1155 { 1156 auto p = b.ptr - stateSize!Prefix; 1157 parent.deallocate(p[0 .. actualAllocationSize(b.length)]); 1158 } 1159 1160 static if (hasMember!(Allocator, "deallocateAll")) 1161 void deallocateAll() 1162 { 1163 parent.deallocateAll(); 1164 } 1165 1166 // Extra functions 1167 static if (stateSize!Prefix) 1168 static ref Prefix prefix(void[] b) 1169 { 1170 return (cast(Prefix*)b.ptr)[-1]; 1171 } 1172 static if (stateSize!Suffix) 1173 ref Suffix suffix(void[] b) 1174 { 1175 auto p = b.ptr - stateSize!Prefix 1176 + actualAllocationSize(b.length); 1177 return (cast(Prefix*) p)[-1]; 1178 } 1179 } 1180 1181 version (StdDdoc) 1182 { 1183 /** 1184 Standard allocator methods. Each is defined if and only if the parent 1185 allocator defines the homonym method (except for $(D goodAllocSize), 1186 which may use the global default). Also, the methods will be $(D 1187 shared) if the parent allocator defines them as such. 1188 */ 1189 size_t goodAllocSize(size_t) pure nothrow const; 1190 /// Ditto 1191 void[] allocate(size_t) pure nothrow; 1192 /// Ditto 1193 bool owns(void[]) pure nothrow; 1194 /// Ditto 1195 bool expand(ref void[] b, size_t delta) pure nothrow; 1196 /// Ditto 1197 bool reallocate(ref void[] b, size_t s)pure nothrow; 1198 /// Ditto 1199 void deallocate(void[] b) pure nothrow; 1200 /// Ditto 1201 void deallocateAll() pure nothrow; 1202 1203 /** 1204 The $(D it) singleton is defined if and only if the parent allocator has 1205 no state and defines its own $(D it) object. 1206 */ 1207 static AffixAllocator it; 1208 1209 /** 1210 Affix access functions offering mutable references to the affixes of a 1211 block previously allocated with this allocator. $(D b) may not be null. 1212 They are defined if and only if the corresponding affix is not $(D void). 1213 1214 Precondition: $(D b !is null) 1215 */ 1216 static ref Prefix prefix(void[] b); 1217 /// Ditto 1218 static ref Suffix suffix(void[] b); 1219 } 1220 else static if (is(typeof(Allocator.it) == shared)) 1221 { 1222 static shared AffixAllocator it; 1223 shared { mixin Impl!(); } 1224 } 1225 else 1226 { 1227 mixin Impl!(); 1228 static if (stateSize!Allocator == 0) 1229 static __gshared AffixAllocator it; 1230 } 1231 } 1232 1233 /// 1234 unittest 1235 { 1236 // One word before and after each allocation. 1237 alias A = AffixAllocator!(Mallocator, size_t, size_t); 1238 auto b = A.it.allocate(11); 1239 A.it.prefix(b) = 0xCAFE_BABE; 1240 A.it.suffix(b) = 0xDEAD_BEEF; 1241 assert(A.it.prefix(b) == 0xCAFE_BABE && A.it.suffix(b) == 0xDEAD_BEEF); 1242 } 1243 1244 unittest 1245 { 1246 alias A = AffixAllocator!(Mallocator, size_t); 1247 auto b = A.it.allocate(10); 1248 A.it.prefix(b) = 10; 1249 assert(A.it.prefix(b) == 10); 1250 1251 alias B = AffixAllocator!(NullAllocator, size_t); 1252 b = B.it.allocate(100); 1253 assert(b is null); 1254 } 1255 1256 /** 1257 Returns the number of most significant ones before a zero can be found in $(D x). 1258 If $(D x) contains no zeros (i.e. is equal to $(D ulong.max)), returns 64. 1259 */ 1260 private uint leadingOnes(ulong x) pure nothrow @safe 1261 { 1262 uint result = 0; 1263 while (cast(long) x < 0) 1264 { 1265 ++result; 1266 x <<= 1; 1267 } 1268 return result; 1269 } 1270 1271 unittest 1272 { 1273 assert(leadingOnes(0) == 0); 1274 assert(leadingOnes(~0UL) == 64); 1275 assert(leadingOnes(0xF000_0000_0000_0000) == 4); 1276 assert(leadingOnes(0xE400_0000_0000_0000) == 3); 1277 assert(leadingOnes(0xC700_0200_0000_0000) == 2); 1278 assert(leadingOnes(0x8000_0030_0000_0000) == 1); 1279 assert(leadingOnes(0x2000_0000_0000_0000) == 0); 1280 } 1281 1282 /** 1283 Finds a run of contiguous ones in $(D x) of length at least $(D n). 1284 */ 1285 private uint findContigOnes(ulong x, uint n) pure nothrow @safe 1286 { 1287 while (n > 1) 1288 { 1289 immutable s = n >> 1; 1290 x &= x << s; 1291 n -= s; 1292 } 1293 return leadingOnes(~x); 1294 } 1295 1296 unittest 1297 { 1298 assert(findContigOnes(0x0000_0000_0000_0300, 2) == 54); 1299 1300 assert(findContigOnes(~0UL, 1) == 0); 1301 assert(findContigOnes(~0UL, 2) == 0); 1302 assert(findContigOnes(~0UL, 32) == 0); 1303 assert(findContigOnes(~0UL, 64) == 0); 1304 assert(findContigOnes(0UL, 1) == 64); 1305 1306 assert(findContigOnes(0x4000_0000_0000_0000, 1) == 1); 1307 assert(findContigOnes(0x0000_0F00_0000_0000, 4) == 20); 1308 } 1309 1310 /** 1311 Returns the number of trailing zeros of $(D x). 1312 */ 1313 private uint trailingZeros(ulong x) pure nothrow @safe 1314 { 1315 uint result; 1316 while (result < 64 && !(x & (1UL << result))) 1317 { 1318 ++result; 1319 } 1320 return result; 1321 } 1322 1323 unittest 1324 { 1325 assert(trailingZeros(0) == 64); 1326 assert(trailingZeros(1) == 0); 1327 assert(trailingZeros(2) == 1); 1328 assert(trailingZeros(3) == 0); 1329 assert(trailingZeros(4) == 2); 1330 } 1331 1332 /* 1333 Unconditionally sets the bits from lsb through msb in w to zero. 1334 */ 1335 private void setBits(ref ulong w, uint lsb, uint msb) pure nothrow @safe 1336 { 1337 assert(lsb <= msb && msb < 64); 1338 const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); 1339 w |= mask; 1340 } 1341 1342 unittest 1343 { 1344 ulong w; 1345 w = 0; setBits(w, 0, 63); assert(w == ulong.max); 1346 w = 0; setBits(w, 1, 63); assert(w == ulong.max - 1); 1347 w = 6; setBits(w, 0, 1); assert(w == 7); 1348 w = 6; setBits(w, 3, 3); assert(w == 14); 1349 } 1350 1351 /* Are bits from lsb through msb in w zero? If so, make then 1 1352 and return the resulting w. Otherwise, just return 0. 1353 */ 1354 private bool setBitsIfZero(ref ulong w, uint lsb, uint msb) pure nothrow @safe 1355 { 1356 assert(lsb <= msb && msb < 64); 1357 const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); 1358 if (w & mask) return false; 1359 w |= mask; 1360 return true; 1361 } 1362 1363 unittest 1364 { 1365 // TODO 1366 } 1367 1368 // Assigns bits in w from lsb through msb to zero. 1369 private void resetBits(ref ulong w, uint lsb, uint msb) pure nothrow @safe 1370 { 1371 assert(lsb <= msb && msb < 64); 1372 const mask = (ulong.max << lsb) & (ulong.max >> (63 - msb)); 1373 w &= ~mask; 1374 } 1375 1376 unittest 1377 { 1378 // TODO 1379 } 1380 1381 /** 1382 1383 $(D HeapBlock) implements a simple heap consisting of one contiguous area 1384 of memory organized in blocks, each of size $(D theBlockSize). A block is a unit 1385 of allocation. A bitmap serves as bookkeeping data, more precisely one bit per 1386 block indicating whether that block is currently allocated or not. 1387 1388 There are advantages to storing bookkeeping data separated from the payload 1389 (as opposed to e.g. $(D AffixAllocator)). The layout is more compact, searching 1390 for a free block during allocation enjoys better cache locality, and 1391 deallocation does not touch memory around the payload being deallocated (which 1392 is often cold). 1393 1394 Allocation requests are handled on a first-fit basis. Although linear in 1395 complexity, allocation is in practice fast because of the compact bookkeeping 1396 representation, use of simple and fast bitwise routines, and memoization of the 1397 first available block position. A known issue with this general approach is 1398 fragmentation, partially mitigated by coalescing. Since $(D HeapBlock) does 1399 not maintain the allocated size, freeing memory implicitly coalesces free blocks 1400 together. Also, tuning $(D blockSize) has a considerable impact on both internal 1401 and external fragmentation. 1402 1403 The size of each block can be selected either during compilation or at run 1404 time. Statically-known block sizes are frequent in practice and yield slightly 1405 better performance. To choose a block size statically, pass it as the $(D 1406 blockSize) parameter as in $(D HeapBlock!(Allocator, 4096)). To choose a block 1407 size parameter, use $(D HeapBlock!(Allocator, chooseAtRuntime)) and pass the 1408 block size to the constructor. 1409 1410 TODO: implement $(D alignedAllocate) and $(D alignedReallocate). 1411 1412 */ 1413 struct HeapBlock(Allocator, size_t theBlockSize, 1414 size_t theAlignment = platformAlignment) 1415 { 1416 static assert(theBlockSize > 0 && theAlignment.isGoodStaticAlignment); 1417 1418 /** 1419 Parent allocator. If it has no state, $(D parent) is an alias for $(D 1420 Allocator.it). 1421 */ 1422 static if (stateSize!Allocator) Allocator parent; 1423 else alias parent = Allocator.it; 1424 1425 /** 1426 If $(D blockSize == chooseAtRuntime), $(D HeapBlock) offers a read/write 1427 property $(D blockSize). It must be set to a power of two before any use 1428 of the allocator. Otherwise, $(D blockSize) is an alias for $(D 1429 theBlockSize). 1430 */ 1431 static if (theBlockSize != chooseAtRuntime) 1432 { 1433 alias blockSize = theBlockSize; 1434 } 1435 else 1436 { 1437 @property uint blockSize() { return _blockSize; } 1438 @property void blockSize(uint s) 1439 { 1440 assert(!_control && s % alignment == 0); 1441 _blockSize = s; 1442 } 1443 private uint _blockSize; 1444 } 1445 1446 /** 1447 The alignment offered is user-configurable statically through parameter 1448 $(D theAlignment), defaulted to $(D platformAlignment). 1449 */ 1450 alias alignment = theAlignment; 1451 1452 private uint _blocks; 1453 private ulong[] _control; 1454 private void[] _payload; 1455 private size_t _startIdx; 1456 1457 /** 1458 Constructs a block allocator given the total number of blocks. Only one $(D 1459 parent.allocate) call will be made, and the layout puts the bitmap at the 1460 front followed immediately by the payload. The constructor does not perform the allocation, however; allocation is done lazily upon the first call to 1461 $(D allocate). 1462 */ 1463 this(uint blocks) 1464 { 1465 _blocks = blocks; 1466 } 1467 1468 private void initialize() pure nothrow @trusted 1469 { 1470 assert(_blocks); 1471 const controlBytes = ((_blocks + 63) / 64) * 8; 1472 const controlBytesRounded = controlBytes.roundUpToMultipleOf( 1473 alignment); 1474 const payloadBytes = _blocks * blockSize; 1475 auto allocatedByUs = parent.allocate( 1476 controlBytesRounded // control bits 1477 + payloadBytes // payload 1478 ); 1479 auto m = cast(ulong[]) allocatedByUs; 1480 _control = m[0 .. controlBytes / 8]; 1481 _control[] = 0; 1482 _payload = m[controlBytesRounded / 8 .. $]; 1483 assert(_payload.length == _blocks * blockSize/+, 1484 text(_payload.length, " != ", _blocks * blockSize)+/); 1485 } 1486 1487 private void initialize(void[] store) pure nothrow @trusted 1488 { 1489 assert(store.length); 1490 // Round store to be ulong-aligned 1491 store = store.roundStartToMultipleOf(ulong.alignof); 1492 assert(store.length); 1493 /* Divide data between control and payload. The equation is (in real 1494 numbers, not integers): bs * x + x / 8 = store.length, where x is 1495 the number of blocks. 1496 */ 1497 double approxBlocks = (8.0 * store.length) / (8 * blockSize + 1); 1498 import std.math; 1499 auto blocks = cast(size_t) (approxBlocks + nextDown(1.0)); 1500 assert(blocks > 0); 1501 assert(blockSize); 1502 assert(blocks * blockSize + ((blocks + 63) / 64) * 8 >= store.length/+, 1503 text(approxBlocks, " ", blocks, " ", blockSize, " ", 1504 store.length)+/); 1505 while (blocks * blockSize + ((blocks + 63) / 64) * 8 > store.length) 1506 { 1507 --blocks; 1508 assert(blocks > 0); 1509 } 1510 auto control = cast(ulong[]) store[0 .. ((blocks + 63) / 64) * 8]; 1511 store = store[control.length * 8 .. $]; 1512 // Take into account data alignment necessities 1513 store = store.roundStartToMultipleOf(alignment); 1514 assert(store.length); 1515 while (blocks * blockSize > store.length) 1516 { 1517 --blocks; 1518 } 1519 auto payload = store[0 .. blocks * blockSize]; 1520 initialize(control, payload, blockSize); 1521 } 1522 1523 private void initialize(ulong[] control, void[] payload, size_t blockSize) 1524 pure nothrow @trusted 1525 { 1526 assert(payload.length % blockSize == 0); 1527 assert(payload.length / blockSize <= uint.max); 1528 _blocks = cast(uint) (payload.length / blockSize); 1529 const controlWords = (_blocks + 63) / 64; 1530 assert(controlWords == control.length); 1531 _control = control; 1532 assert(control.equal(repeat(0, control.length))); 1533 _payload = payload; 1534 } 1535 1536 /* 1537 Adjusts the memoized _startIdx to the leftmost control word that has at 1538 least one zero bit. Assumes all control words to the left of $(D 1539 _control[_startIdx]) are already occupied. 1540 */ 1541 private void adjustStartIdx() 1542 { 1543 while (_startIdx < _control.length && _control[_startIdx] == ulong.max) 1544 { 1545 ++_startIdx; 1546 } 1547 } 1548 1549 /* 1550 Returns the blocks corresponding to the control bits starting at word index 1551 wordIdx and bit index msbIdx (MSB=0) for a total of howManyBlocks. 1552 */ 1553 private void[] blocksFor(size_t wordIdx, uint msbIdx, size_t howManyBlocks) 1554 { 1555 assert(msbIdx <= 63); 1556 const start = (wordIdx * 64 + msbIdx) * blockSize; 1557 const end = start + blockSize * howManyBlocks; 1558 if (end <= _payload.length) return _payload[start .. end]; 1559 // This could happen if we have more control bits than available memory. 1560 // That's possible because the control bits are rounded up to fit in 1561 // 64-bit words. 1562 return null; 1563 } 1564 1565 /** 1566 Standard allocator methods per the semantics defined above. The $(D 1567 deallocate) and $(D reallocate) methods are $(D @system) because they may 1568 move memory around, leaving dangling pointers in user code. 1569 1570 BUGS: Neither $(D deallocateAll) nor the destructor free the original memory 1571 block. Either user code or the parent allocator should carry that. 1572 */ 1573 void[] allocate(const size_t s) pure nothrow @trusted 1574 { 1575 if (!_control) 1576 { 1577 // Lazy initialize 1578 if (!_blocks) 1579 static if (hasMember!(Allocator, "allocateAll")) 1580 initialize(parent.allocateAll); 1581 else 1582 return null; 1583 else 1584 initialize(); 1585 } 1586 assert(_blocks && _control && _payload); 1587 const blocks = (s + blockSize - 1) / blockSize; 1588 void[] result = void; 1589 1590 switcharoo: 1591 switch (blocks) 1592 { 1593 case 1: 1594 // inline code here for speed 1595 // find the next available block 1596 foreach (i; _startIdx .. _control.length) 1597 { 1598 const w = _control[i]; 1599 if (w == ulong.max) continue; 1600 uint j = leadingOnes(w); 1601 assert(j < 64); 1602 assert((_control[i] & ((1UL << 63) >> j)) == 0); 1603 _control[i] |= (1UL << 63) >> j; 1604 if (i == _startIdx) 1605 { 1606 adjustStartIdx(); 1607 } 1608 result = blocksFor(i, j, 1); 1609 break switcharoo; 1610 } 1611 goto case 0; // fall through 1612 case 0: 1613 return null; 1614 case 2: .. case 63: 1615 result = smallAlloc(cast(uint) blocks); 1616 break; 1617 default: 1618 result = hugeAlloc(blocks); 1619 break; 1620 } 1621 return result ? result.ptr[0 .. s] : null; 1622 } 1623 1624 /// Ditto 1625 bool owns(void[] b) const pure nothrow @trusted 1626 { 1627 return b.ptr >= _payload.ptr 1628 && b.ptr + b.length <= _payload.ptr + _payload.length 1629 || b is null; 1630 } 1631 1632 /* 1633 Tries to allocate "blocks" blocks at the exact position indicated by the 1634 position wordIdx/msbIdx (msbIdx counts from MSB, i.e. MSB has index 0). If 1635 it succeeds, fills "result" with the result and returns tuple(size_t.max, 1636 0). Otherwise, returns a tuple with the next position to search. 1637 */ 1638 private Tuple!(size_t, uint) allocateAt(size_t wordIdx, uint msbIdx, 1639 size_t blocks, ref void[] result) pure nothrow @trusted 1640 { 1641 assert(blocks > 0); 1642 assert(wordIdx < _control.length); 1643 assert(msbIdx <= 63); 1644 if (msbIdx + blocks <= 64) 1645 { 1646 // Allocation should fit this control word 1647 if (setBitsIfZero(_control[wordIdx], 1648 cast(uint) (64 - msbIdx - blocks), 63 - msbIdx)) 1649 { 1650 // Success 1651 result = blocksFor(wordIdx, msbIdx, blocks); 1652 return tuple(size_t.max, 0u); 1653 } 1654 // Can't allocate, make a suggestion 1655 return msbIdx + blocks == 64 1656 ? tuple(wordIdx + 1, 0u) 1657 : tuple(wordIdx, cast(uint) (msbIdx + blocks)); 1658 } 1659 // Allocation spans two control words or more 1660 auto mask = ulong.max >> msbIdx; 1661 if (_control[wordIdx] & mask) 1662 { 1663 // We can't allocate the rest of this control word, 1664 // return a suggestion. 1665 return tuple(wordIdx + 1, 0u); 1666 } 1667 // We can allocate the rest of this control word, but we first need to 1668 // make sure we can allocate the tail. 1669 if (wordIdx + 1 == _control.length) 1670 { 1671 // No more memory 1672 return tuple(_control.length, 0u); 1673 } 1674 auto hint = allocateAt(wordIdx + 1, 0, blocks - 64 + msbIdx, result); 1675 if (hint[0] == size_t.max) 1676 { 1677 // We did it! 1678 _control[wordIdx] |= mask; 1679 result = blocksFor(wordIdx, msbIdx, blocks); 1680 return tuple(size_t.max, 0u); 1681 } 1682 // Failed, return a suggestion that skips this whole run. 1683 return hint; 1684 } 1685 1686 /* Allocates as many blocks as possible at the end of the blocks indicated 1687 by wordIdx. Returns the number of blocks allocated. */ 1688 private uint allocateAtTail(size_t wordIdx) 1689 { 1690 assert(wordIdx < _control.length); 1691 const available = trailingZeros(_control[wordIdx]); 1692 _control[wordIdx] |= ulong.max >> available; 1693 return available; 1694 } 1695 1696 private void[] smallAlloc(uint blocks) pure nothrow @trusted 1697 { 1698 assert(blocks >= 2 && blocks <= 64/+, text(blocks)+/); 1699 foreach (i; _startIdx .. _control.length) 1700 { 1701 // Test within the current 64-bit word 1702 const v = _control[i]; 1703 if (v == ulong.max) continue; 1704 auto j = findContigOnes(~v, blocks); 1705 if (j < 64) 1706 { 1707 // yay, found stuff 1708 setBits(_control[i], 64 - j - blocks, 63 - j); 1709 return blocksFor(i, j, blocks); 1710 } 1711 // Next, try allocations that cross a word 1712 auto available = trailingZeros(v); 1713 if (available == 0) continue; 1714 if (i + 1 >= _control.length) break; 1715 assert(available < blocks); // otherwise we should have found it 1716 auto needed = blocks - available; 1717 assert(needed > 0 && needed < 64); 1718 if (allocateAtFront(i + 1, needed)) 1719 { 1720 // yay, found a block crossing two words 1721 _control[i] |= (1UL << available) - 1; 1722 return blocksFor(i, 64 - available, blocks); 1723 } 1724 } 1725 return null; 1726 } 1727 1728 private void[] hugeAlloc(size_t blocks) pure nothrow @trusted 1729 { 1730 assert(blocks > 64); 1731 void[] result; 1732 auto pos = tuple(_startIdx, 0); 1733 for (;;) 1734 { 1735 if (pos[0] >= _control.length) 1736 { 1737 // No more memory 1738 return null; 1739 } 1740 pos = allocateAt(pos[0], pos[1], blocks, result); 1741 if (pos[0] == size_t.max) 1742 { 1743 // Found and allocated 1744 return result; 1745 } 1746 } 1747 } 1748 1749 // Rounds sizeInBytes to a multiple of blockSize. 1750 private size_t bytes2blocks(size_t sizeInBytes) 1751 { 1752 return (sizeInBytes + blockSize - 1) / blockSize; 1753 } 1754 1755 /* Allocates given blocks at the beginning blocks indicated by wordIdx. 1756 Returns true if allocation was possible, false otherwise. */ 1757 private bool allocateAtFront(size_t wordIdx, uint blocks) 1758 { 1759 assert(wordIdx < _control.length && blocks >= 1 && blocks <= 64); 1760 const mask = (1UL << (64 - blocks)) - 1; 1761 if (_control[wordIdx] > mask) return false; 1762 // yay, works 1763 _control[wordIdx] |= ~mask; 1764 return true; 1765 } 1766 1767 /// Ditto 1768 bool expand(ref void[] b, size_t delta) pure nothrow @trusted 1769 { 1770 //debug writefln("expand(%s, %s, %s)", b, minDelta, desiredDelta); 1771 if (b is null) 1772 { 1773 b = allocate(delta); 1774 return b !is null; 1775 } 1776 1777 const blocksOld = bytes2blocks(b.length); 1778 const blocksNew = bytes2blocks(b.length + delta); 1779 assert(blocksOld <= blocksNew); 1780 1781 // Possibly we have enough slack at the end of the block! 1782 if (blocksOld == blocksNew) 1783 { 1784 b = b.ptr[0 .. b.length + delta]; 1785 return true; 1786 } 1787 1788 assert((b.ptr - _payload.ptr) % blockSize == 0); 1789 const blockIdx = (b.ptr - _payload.ptr) / blockSize; 1790 const blockIdxAfter = blockIdx + blocksOld; 1791 //writefln("blockIdx: %s, blockIdxAfter: %s", blockIdx, blockIdxAfter); 1792 1793 // Try the maximum 1794 const wordIdx = blockIdxAfter / 64, 1795 msbIdx = cast(uint) (blockIdxAfter % 64); 1796 void[] p; 1797 auto hint = allocateAt(wordIdx, msbIdx, blocksNew - blocksOld, p); 1798 if (hint[0] != size_t.max) 1799 { 1800 return false; 1801 } 1802 // Expansion successful 1803 assert(p.ptr == b.ptr + blocksOld * blockSize/+, 1804 text(p.ptr, " != ", b.ptr + blocksOld * blockSize)+/); 1805 b = b.ptr[0 .. b.length + delta]; 1806 return true; 1807 } 1808 1809 /// Ditto 1810 bool reallocate(ref void[] b, size_t newSize) pure nothrow @system 1811 { 1812 if (newSize == 0) 1813 { 1814 deallocate(b); 1815 b = null; 1816 return true; 1817 } 1818 if (newSize < b.length) 1819 { 1820 // Shrink. Will shrink in place by deallocating the trailing part. 1821 auto newCapacity = bytes2blocks(newSize) * blockSize; 1822 deallocate(b[newCapacity .. $]); 1823 b = b[0 .. newSize]; 1824 return true; 1825 } 1826 // Attempt an in-place expansion first 1827 const delta = newSize - b.length; 1828 if (expand(b, delta)) return true; 1829 // Go the slow route 1830 return .reallocate(this, b, newSize); 1831 } 1832 1833 /// Ditto 1834 void deallocate(void[] b) 1835 { 1836 // Round up size to multiple of block size 1837 auto blocks = (b.length + blockSize - 1) / blockSize; 1838 // Locate position 1839 auto pos = b.ptr - _payload.ptr; 1840 assert(pos % blockSize == 0); 1841 auto blockIdx = pos / blockSize; 1842 auto wordIdx = blockIdx / 64, msbIdx = cast(uint) (blockIdx % 64); 1843 if (_startIdx > wordIdx) _startIdx = wordIdx; 1844 1845 // Three stages: heading bits, full words, leftover bits 1846 if (msbIdx) 1847 { 1848 if (blocks + msbIdx <= 64) 1849 { 1850 resetBits(_control[wordIdx], cast(uint) (64 - msbIdx - blocks), 1851 63 - msbIdx); 1852 return; 1853 } 1854 else 1855 { 1856 _control[wordIdx] &= ulong.max << 64 - msbIdx; 1857 blocks -= 64 - msbIdx; 1858 ++wordIdx; 1859 msbIdx = 0; 1860 } 1861 } 1862 1863 // Stage 2: reset one word at a time 1864 for (; blocks >= 64; blocks -= 64) 1865 { 1866 _control[wordIdx++] = 0; 1867 } 1868 1869 // Stage 3: deal with leftover bits, if any 1870 assert(wordIdx <= _control.length); 1871 if (blocks) 1872 { 1873 _control[wordIdx] &= ulong.max >> blocks; 1874 } 1875 } 1876 1877 /// Ditto 1878 void deallocateAll() 1879 { 1880 static if (false && hasMember!(Allocator, "deallocate")) 1881 { 1882 parent.deallocate(_allocatedByUs); 1883 this = this.init; 1884 } 1885 else 1886 { 1887 _control[] = 0; 1888 _startIdx = 0; 1889 } 1890 } 1891 } 1892 1893 /// 1894 unittest 1895 { 1896 // Create a block allocator on top of a 10KB stack region. 1897 HeapBlock!(InSituRegion!(10240, 64), 64, 64) a; 1898 static assert(hasMember!(InSituRegion!(10240, 64), "allocateAll")); 1899 auto b = a.allocate(100); 1900 assert(b.length == 100); 1901 } 1902 1903 unittest 1904 { 1905 static void testAllocateAll(size_t bs)(uint blocks, uint blocksAtATime) 1906 { 1907 assert(bs); 1908 auto a = HeapBlock!(GCAllocator, bs)(blocks); 1909 assert(a._blocks || !blocks); 1910 1911 // test allocation of 0 bytes 1912 auto x = a.allocate(0); 1913 assert(x is null); 1914 // test allocation of 1 byte 1915 x = a.allocate(1); 1916 assert(x.length == 1 || blocks == 0, text(x.ptr, " ", x.length, " ", a)); 1917 a.deallocateAll(); 1918 1919 //writeln("Control words: ", a._control.length); 1920 //writeln("Payload bytes: ", a._payload.length); 1921 bool twice = true; 1922 1923 begin: 1924 foreach (i; 0 .. blocks / blocksAtATime) 1925 { 1926 auto b = a.allocate(bs * blocksAtATime); 1927 assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); 1928 } 1929 assert(a.allocate(bs * blocksAtATime) is null); 1930 assert(a.allocate(1) is null); 1931 1932 // Now deallocate all and do it again! 1933 a.deallocateAll(); 1934 1935 // Test deallocation 1936 1937 auto v = new void[][blocks / blocksAtATime]; 1938 foreach (i; 0 .. blocks / blocksAtATime) 1939 { 1940 auto b = a.allocate(bs * blocksAtATime); 1941 assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); 1942 v[i] = b; 1943 } 1944 assert(a.allocate(bs * blocksAtATime) is null); 1945 assert(a.allocate(1) is null); 1946 1947 foreach (i; 0 .. blocks / blocksAtATime) 1948 { 1949 a.deallocate(v[i]); 1950 } 1951 1952 foreach (i; 0 .. blocks / blocksAtATime) 1953 { 1954 auto b = a.allocate(bs * blocksAtATime); 1955 assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); 1956 v[i] = b; 1957 } 1958 1959 foreach (i; 0 .. v.length) 1960 { 1961 a.deallocate(v[i]); 1962 } 1963 1964 if (twice) 1965 { 1966 twice = false; 1967 goto begin; 1968 } 1969 1970 a.deallocateAll; 1971 1972 // test expansion 1973 if (blocks >= blocksAtATime) 1974 { 1975 foreach (i; 0 .. blocks / blocksAtATime - 1) 1976 { 1977 auto b = a.allocate(bs * blocksAtATime); 1978 assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); 1979 (cast(ubyte[]) b)[] = 0xff; 1980 a.expand(b, blocksAtATime * bs) 1981 || assert(0, text(i)); 1982 (cast(ubyte[]) b)[] = 0xfe; 1983 assert(b.length == bs * blocksAtATime * 2, text(i, ": ", b.length)); 1984 a.reallocate(b, blocksAtATime * bs) || assert(0); 1985 assert(b.length == bs * blocksAtATime, text(i, ": ", b.length)); 1986 } 1987 } 1988 } 1989 1990 testAllocateAll!(1)(0, 1); 1991 testAllocateAll!(1)(8, 1); 1992 testAllocateAll!(4096)(128, 1); 1993 1994 testAllocateAll!(1)(0, 2); 1995 testAllocateAll!(1)(128, 2); 1996 testAllocateAll!(4096)(128, 2); 1997 1998 testAllocateAll!(1)(0, 4); 1999 testAllocateAll!(1)(128, 4); 2000 testAllocateAll!(4096)(128, 4); 2001 2002 testAllocateAll!(1)(0, 3); 2003 testAllocateAll!(1)(24, 3); 2004 testAllocateAll!(3000)(100, 1); 2005 testAllocateAll!(3000)(100, 3); 2006 2007 testAllocateAll!(1)(0, 128); 2008 testAllocateAll!(1)(128 * 1, 128); 2009 testAllocateAll!(128 * 20)(13 * 128, 128); 2010 } 2011 2012 /** 2013 $(D FallbackAllocator) is the allocator equivalent of an "or" operator in 2014 algebra. An allocation request is first attempted with the $(D Primary) 2015 allocator. If that returns $(D null), the request is forwarded to the $(D 2016 Fallback) allocator. All other requests are dispatched appropriately to one of 2017 the two allocators. 2018 2019 In order to work, $(D FallbackAllocator) requires that $(D Primary) defines the 2020 $(D owns) method. This is needed in order to decide which allocator was 2021 responsible for a given allocation. 2022 2023 $(D FallbackAllocator) is useful for fast, special-purpose allocators backed up 2024 by general-purpose allocators. The example below features a stack region backed 2025 up by the $(D GCAllocator). 2026 */ 2027 struct FallbackAllocator(Primary, Fallback) 2028 { 2029 /// The primary allocator. 2030 static if (stateSize!Primary) Primary primary; 2031 else alias primary = Primary.it; 2032 2033 /// The fallback allocator. 2034 static if (stateSize!Fallback) Fallback fallback; 2035 else alias fallback = Fallback.it; 2036 2037 /** 2038 If both $(D Primary) and $(D Fallback) are stateless, $(D FallbackAllocator) 2039 defines a static instance $(D it). 2040 */ 2041 /+static if (!stateSize!Primary && !stateSize!Fallback) 2042 { 2043 static FallbackAllocator it; 2044 }+/ 2045 2046 /** 2047 The alignment offered is the minimum of the two allocators' alignment. 2048 */ 2049 enum uint alignment = min(Primary.alignment, Fallback.alignment); 2050 2051 /** 2052 Allocates memory trying the primary allocator first. If it returns $(D 2053 null), the fallback allocator is tried. 2054 */ 2055 void[] allocate(size_t s) pure nothrow @safe 2056 { 2057 auto result = primary.allocate(s); 2058 return result ? result : fallback.allocate(s); 2059 } 2060 2061 /** 2062 2063 $(D expand) is defined if and only if at least one of the allocators 2064 defines $(D expand). It works as follows. If $(D primary.owns(b)), then the 2065 request is forwarded to $(D primary.expand) if it is defined, or fails 2066 (returning $(D false)) otherwise. If $(D primary) does not own $(D b), then 2067 the request is forwarded to $(D fallback.expand) if it is defined, or fails 2068 (returning $(D false)) otherwise. 2069 2070 */ 2071 static if (hasMember!(Primary, "expand") || hasMember!(Fallback, "expand")) 2072 bool expand(ref void[] b, size_t delta) 2073 { 2074 if (primary.owns(b)) 2075 { 2076 static if (hasMember!(Primary, "expand")) 2077 return primary.expand(b, delta); 2078 else 2079 return false; 2080 } 2081 static if (hasMember!(Fallback, "expand")) 2082 return fallback.expand(b, delta); 2083 else 2084 return false; 2085 } 2086 2087 /** 2088 2089 $(D reallocate) works as follows. If $(D primary.owns(b)), then $(D 2090 primary.reallocate(b, newSize)) is attempted. If it fails, an attempt is 2091 made to move the allocation from $(D primary) to $(D fallback). 2092 2093 If $(D primary) does not own $(D b), then $(D fallback.reallocate(b, 2094 newSize)) is attempted. If that fails, an attempt is made to move the 2095 allocation from $(D fallback) to $(D primary). 2096 2097 */ 2098 bool reallocate(ref void[] b, size_t newSize) pure nothrow @trusted 2099 { 2100 bool crossAllocatorMove(From, To)(auto ref From from, auto ref To to) 2101 { 2102 auto b1 = to.allocate(newSize); 2103 if (b1 is null) return false; 2104 if (b.length < newSize) b1[0 .. b.length] = b[]; 2105 else b1[] = b[0 .. newSize]; 2106 static if (hasMember!(From, "deallocate")) 2107 from.deallocate(b); 2108 b = b1; 2109 return true; 2110 } 2111 2112 if (primary.owns(b)) 2113 { 2114 if (primary.reallocate(b, newSize)) return true; 2115 // Move from primary to fallback 2116 return crossAllocatorMove(primary, fallback); 2117 } 2118 if (fallback.reallocate(b, newSize)) return true; 2119 // Interesting. Move from fallback to primary. 2120 return crossAllocatorMove(fallback, primary); 2121 } 2122 2123 /** 2124 $(D owns) is defined if and only if both allocators define $(D owns). 2125 Returns $(D primary.owns(b) || fallback.owns(b)). 2126 */ 2127 static if (hasMember!(Primary, "owns") && hasMember!(Fallback, "owns")) 2128 bool owns(void[] p) 2129 { 2130 return primary.owns(b) || fallback.owns(p); 2131 } 2132 2133 /** 2134 $(D deallocate) is defined if and only if at least one of the allocators 2135 define $(D deallocate). It works as follows. If $(D primary.owns(b)), 2136 then the request is forwarded to $(D primary.deallocate) if it is defined, 2137 or is a no-op otherwise. If $(D primary) does not own $(D b), then the 2138 request is forwarded to $(D fallback.deallocate) if it is defined, or is a 2139 no-op otherwise. 2140 */ 2141 static if (hasMember!(Primary, "deallocate") 2142 || hasMember!(Fallback, "deallocate")) 2143 void deallocate(void[] b) pure nothrow @trusted 2144 { 2145 if (primary.owns(b)) 2146 { 2147 static if (hasMember!(Primary, "deallocate")) 2148 primary.deallocate(b); 2149 } 2150 else 2151 { 2152 static if (hasMember!(Fallback, "deallocate")) 2153 fallback.deallocate(b); 2154 } 2155 } 2156 } 2157 2158 /// 2159 unittest 2160 { 2161 FallbackAllocator!(InSituRegion!16_384, GCAllocator) a; 2162 // This allocation uses the stack 2163 auto b1 = a.allocate(1024); 2164 assert(b1.length == 1024, text(b1.length)); 2165 assert(a.primary.owns(b1)); 2166 // This large allocation will go to the Mallocator 2167 auto b2 = a.allocate(1024 * 1024); 2168 assert(!a.primary.owns(b2)); 2169 a.deallocate(b1); 2170 a.deallocate(b2); 2171 } 2172 2173 /** 2174 2175 $(WEB en.wikipedia.org/wiki/Free_list, Free list allocator), stackable on top of 2176 another allocator. Allocation requests between $(D min) and $(D max) bytes are 2177 rounded up to $(D max) and served from a singly-linked list of buffers 2178 deallocated in the past. All other allocations are directed to $(D 2179 ParentAllocator). Due to the simplicity of free list management, allocations 2180 from the free list are fast. 2181 2182 If a program makes many allocations in the interval $(D [minSize, maxSize]) and 2183 then frees most of them, the freelist may grow large, thus making memory 2184 inaccessible to requests of other sizes. To prevent that, the $(D maxNodes) 2185 parameter allows limiting the size of the free list. Alternatively, $(D 2186 deallocateAll) cleans the free list. 2187 2188 $(D Freelist) attempts to reduce internal fragmentation and improve cache 2189 locality by allocating multiple nodes at once, under the control of the $(D 2190 batchCount) parameter. This makes $(D Freelist) an efficient front for small 2191 object allocation on top of a large-block allocator. The default value of $(D 2192 batchCount) is 8, which should amortize freelist management costs to negligible 2193 in most cases. 2194 2195 One instantiation is of particular interest: $(D Freelist!(0,unbounded)) puts 2196 every deallocation in the freelist, and subsequently serves any allocation from 2197 the freelist (if not empty). There is no checking of size matching, which would 2198 be incorrect for a freestanding allocator but is both correct and fast when an 2199 owning allocator on top of the free list allocator (such as $(D Segregator)) is 2200 already in charge of handling size checking. 2201 2202 */ 2203 struct Freelist(ParentAllocator, 2204 size_t minSize, size_t maxSize = minSize, 2205 uint batchCount = 8, size_t maxNodes = unbounded) 2206 { 2207 static assert(minSize != unbounded, "Use minSize = 0 for no low bound."); 2208 static assert(maxSize >= (void*).sizeof, 2209 "Maximum size must accommodate a pointer."); 2210 2211 static if (minSize != chooseAtRuntime) 2212 { 2213 alias min = minSize; 2214 } 2215 else 2216 { 2217 size_t _min = chooseAtRuntime; 2218 @property size_t min() const nothrow pure @safe 2219 { 2220 assert(_min != chooseAtRuntime); 2221 return _min; 2222 } 2223 @property void min(size_t x) 2224 { 2225 enforce(x <= _max); 2226 _min = x; 2227 } 2228 static if (maxSize == chooseAtRuntime) 2229 { 2230 // Both bounds can be set, provide one function for setting both in 2231 // one shot. 2232 void setBounds(size_t low, size_t high) 2233 { 2234 enforce(low <= high && high >= (void*).sizeof); 2235 _min = low; 2236 _max = high; 2237 } 2238 } 2239 } 2240 2241 private bool tooSmall(size_t n) const nothrow pure @safe 2242 { 2243 static if (minSize == 0) return false; 2244 else return n < min; 2245 } 2246 2247 static if (maxSize != chooseAtRuntime) 2248 { 2249 alias max = maxSize; 2250 } 2251 else 2252 { 2253 size_t _max; 2254 @property size_t max() const { return _max; } 2255 @property void max(size_t x) 2256 { 2257 enforce(x >= _min && x >= (void*).sizeof); 2258 _max = x; 2259 } 2260 } 2261 2262 private bool tooLarge(size_t n) const nothrow pure @safe 2263 { 2264 static if (maxSize == unbounded) return false; 2265 else return n > max; 2266 } 2267 2268 private bool inRange(size_t n) const nothrow pure @safe 2269 { 2270 static if (minSize == maxSize && minSize != chooseAtRuntime) 2271 return n == maxSize; 2272 else return !tooSmall(n) && !tooLarge(n); 2273 } 2274 2275 version (StdDdoc) 2276 { 2277 /** 2278 Properties for getting and setting bounds. Setting a bound is only 2279 possible if the respective compile-time parameter has been set to $(D 2280 chooseAtRuntime). $(D setBounds) is defined only if both $(D minSize) 2281 and $(D maxSize) are set to $(D chooseAtRuntime). 2282 */ 2283 @property size_t min(); 2284 /// Ditto 2285 @property void min(size_t newMinSize); 2286 /// Ditto 2287 @property size_t max(); 2288 /// Ditto 2289 @property void max(size_t newMaxSize); 2290 /// Ditto 2291 void setBounds(size_t newMin, size_t newMax); 2292 /// 2293 unittest 2294 { 2295 Freelist!(Mallocator, chooseAtRuntime, chooseAtRuntime) a; 2296 // Set the maxSize first so setting the minSize doesn't throw 2297 a.max = 128; 2298 a.min = 64; 2299 a.setBounds(64, 128); // equivalent 2300 assert(a.max == 128); 2301 assert(a.min == 64); 2302 } 2303 } 2304 2305 /** 2306 The parent allocator. Depending on whether $(D ParentAllocator) holds state 2307 or not, this is a member variable or an alias for $(D ParentAllocator.it). 2308 */ 2309 static if (stateSize!ParentAllocator) ParentAllocator parent; 2310 else alias parent = ParentAllocator.it; 2311 2312 private struct Node { Node* next; } 2313 static assert(ParentAllocator.alignment >= Node.alignof); 2314 private Node* _root; 2315 private uint nodesAtATime = batchCount; 2316 2317 static if (maxNodes != unbounded) 2318 { 2319 private size_t nodes; 2320 private void incNodes() { ++nodes; } 2321 private void decNodes() { assert(nodes); --nodes; } 2322 private bool nodesFull() { return nodes >= maxNodes; } 2323 } 2324 else 2325 { 2326 private static void incNodes() { } 2327 private static void decNodes() { } 2328 private enum bool nodesFull = false; 2329 } 2330 2331 /** 2332 Alignment is defined as $(D parent.alignment). However, if $(D 2333 parent.alignment > maxSize), objects returned from the freelist will have a 2334 smaller _alignment, namely $(D maxSize) rounded up to the nearest multiple 2335 of 2. This allows $(D Freelist) to minimize internal fragmentation by 2336 allocating several small objects within an allocated block. Also, there is 2337 no disruption because no object has smaller size than its _alignment. 2338 */ 2339 enum uint alignment = ParentAllocator.alignment; 2340 2341 /** 2342 Returns $(D max) for sizes in the interval $(D [min, max]), and $(D 2343 parent.goodAllocSize(bytes)) otherwise. 2344 */ 2345 size_t goodAllocSize(size_t bytes) const nothrow pure @safe 2346 { 2347 if (inRange(bytes)) return maxSize == unbounded ? bytes : max; 2348 return parent.goodAllocSize(bytes); 2349 } 2350 2351 /** 2352 Allocates memory either off of the free list or from the parent allocator. 2353 */ 2354 void[] allocate(size_t bytes) pure nothrow @trusted 2355 { 2356 assert(bytes < size_t.max / 2); 2357 if (!inRange(bytes)) return parent.allocate(bytes); 2358 // Round up allocation to max 2359 if (maxSize != unbounded) bytes = max; 2360 if (!_root) return allocateFresh(bytes); 2361 // Pop off the freelist 2362 auto result = (cast(ubyte*) _root)[0 .. bytes]; 2363 _root = _root.next; 2364 decNodes(); 2365 return result; 2366 } 2367 2368 private void[] allocateFresh(const size_t bytes) pure nothrow @trusted 2369 { 2370 assert(!_root); 2371 assert(bytes == max || max == unbounded); 2372 if (nodesAtATime == 1) 2373 { 2374 // Easy case, just get it over with 2375 return parent.allocate(bytes); 2376 } 2377 static if (maxSize != unbounded && maxSize != chooseAtRuntime) 2378 { 2379 static assert((parent.alignment + max) % Node.alignof == 0, 2380 text("(", parent.alignment, " + ", max, ") % ", 2381 Node.alignof)); 2382 } 2383 else 2384 { 2385 assert((parent.alignment + bytes) % Node.alignof == 0/+, 2386 text("(", parent.alignment, " + ", bytes, ") % ", 2387 Node.alignof)+/); 2388 } 2389 2390 auto data = parent.allocate(nodesAtATime * bytes); 2391 if (!data) return null; 2392 auto result = data[0 .. bytes]; 2393 auto n = data[bytes .. $]; 2394 _root = cast(Node*) n.ptr; 2395 for (;;) 2396 { 2397 if (n.length < bytes) 2398 { 2399 (cast(Node*) data.ptr).next = null; 2400 break; 2401 } 2402 (cast(Node*) data.ptr).next = cast(Node*) n.ptr; 2403 data = n; 2404 n = data[bytes .. $]; 2405 } 2406 return result; 2407 } 2408 2409 /** 2410 If $(D b.length) is in the interval $(D [min, max]), returns $(D true). 2411 Otherwise, if $(D Parent.owns) is defined, forwards to it. Otherwise, 2412 returns $(D false). This semantics is intended to have $(D 2413 Freelist) handle deallocations of objects of the appropriate size, 2414 even for allocators that don't support $(D owns) (such as $(D Mallocator)). 2415 */ 2416 bool owns(void[] b) const pure nothrow @safe 2417 { 2418 if (inRange(b.length)) return true; 2419 static if (hasMember!(ParentAllocator, "owns")) 2420 return parent.owns(b); 2421 else 2422 return false; 2423 } 2424 2425 /** 2426 Forwards to $(D parent). 2427 */ 2428 static if (hasMember!(ParentAllocator, "expand")) 2429 bool expand(void[] b, size_t s) 2430 { 2431 return parent.expand(b, s); 2432 } 2433 2434 /// Ditto 2435 static if (hasMember!(ParentAllocator, "reallocate")) 2436 bool reallocate(void[] b, size_t s) 2437 { 2438 return parent.reallocate(b, s); 2439 } 2440 2441 /** 2442 Intercepts deallocations and caches those of the appropriate size in the 2443 freelist. For all others, forwards to $(D parent.deallocate) or does nothing 2444 if $(D Parent) does not define $(D deallocate). 2445 */ 2446 void deallocate(void[] block) 2447 { 2448 if (!nodesFull && inRange(block.length)) 2449 { 2450 auto t = _root; 2451 _root = cast(Node*) block.ptr; 2452 _root.next = t; 2453 incNodes(); 2454 } 2455 else 2456 { 2457 static if (is(typeof(parent.deallocate(block)))) 2458 parent.deallocate(block); 2459 } 2460 } 2461 2462 /** 2463 If $(D ParentAllocator) defines $(D deallocateAll), just forwards to it and 2464 reset the freelist. Otherwise, walks the list and frees each object in turn. 2465 */ 2466 void deallocateAll() 2467 { 2468 static if (hasMember!(ParentAllocator, "deallocateAll")) 2469 { 2470 parent.deallocateAll(); 2471 } 2472 else static if (hasMember!(ParentAllocator, "deallocate")) 2473 { 2474 for (auto n = _root; n; n = n.next) 2475 { 2476 parent.deallocate((cast(ubyte*)n)[0 .. max]); 2477 } 2478 } 2479 _root = null; 2480 } 2481 } 2482 2483 unittest 2484 { 2485 Freelist!(GCAllocator, 0, 8, 1) fl; 2486 assert(fl._root is null); 2487 auto b1 = fl.allocate(7); 2488 //assert(fl._root !is null); 2489 auto b2 = fl.allocate(8); 2490 assert(fl._root is null); 2491 fl.deallocate(b1); 2492 assert(fl._root !is null); 2493 auto b3 = fl.allocate(8); 2494 assert(fl._root is null); 2495 } 2496 2497 /** 2498 Freelist shared across threads. Allocation and deallocation are lock-free. The 2499 parameters have the same semantics as for $(D Freelist). 2500 */ 2501 struct SharedFreelist(ParentAllocator, 2502 size_t minSize, size_t maxSize = minSize, 2503 uint batchCount = 8, size_t maxNodes = unbounded) 2504 { 2505 static assert(minSize != unbounded, "Use minSize = 0 for no low bound."); 2506 static assert(maxSize >= (void*).sizeof, 2507 "Maximum size must accommodate a pointer."); 2508 2509 private import core.atomic; 2510 2511 static if (minSize != chooseAtRuntime) 2512 { 2513 alias min = minSize; 2514 } 2515 else 2516 { 2517 shared size_t _min = chooseAtRuntime; 2518 @property size_t min() const shared 2519 { 2520 assert(_min != chooseAtRuntime); 2521 return _min; 2522 } 2523 @property void min(size_t x) shared 2524 { 2525 enforce(x <= max); 2526 enforce(cas(&_min, chooseAtRuntime, x), 2527 "SharedFreelist.min must be initialized exactly once."); 2528 } 2529 static if (maxSize == chooseAtRuntime) 2530 { 2531 // Both bounds can be set, provide one function for setting both in 2532 // one shot. 2533 void setBounds(size_t low, size_t high) shared 2534 { 2535 enforce(low <= high && high >= (void*).sizeof); 2536 enforce(cas(&_min, chooseAtRuntime, low), 2537 "SharedFreelist.min must be initialized exactly once."); 2538 enforce(cas(&_max, chooseAtRuntime, high), 2539 "SharedFreelist.max must be initialized exactly once."); 2540 } 2541 } 2542 } 2543 2544 private bool tooSmall(size_t n) const shared 2545 { 2546 static if (minSize == 0) return false; 2547 else static if (minSize == chooseAtRuntime) return n < _min; 2548 else return n < minSize; 2549 } 2550 2551 static if (maxSize != chooseAtRuntime) 2552 { 2553 alias max = maxSize; 2554 } 2555 else 2556 { 2557 shared size_t _max = chooseAtRuntime; 2558 @property size_t max() const shared { return _max; } 2559 @property void max(size_t x) shared 2560 { 2561 enforce(x >= _min && x >= (void*).sizeof); 2562 enforce(cas(&_max, chooseAtRuntime, x), 2563 "SharedFreelist.max must be initialized exactly once."); 2564 } 2565 } 2566 2567 private bool tooLarge(size_t n) const shared 2568 { 2569 static if (maxSize == unbounded) return false; 2570 else static if (maxSize == chooseAtRuntime) return n > _max; 2571 else return n > maxSize; 2572 } 2573 2574 private bool inRange(size_t n) const shared 2575 { 2576 static if (minSize == maxSize && minSize != chooseAtRuntime) 2577 return n == maxSize; 2578 else return !tooSmall(n) && !tooLarge(n); 2579 } 2580 2581 static if (maxNodes != unbounded) 2582 { 2583 private shared size_t nodes; 2584 private void incNodes() shared 2585 { 2586 atomicOp!("+=")(nodes, 1); 2587 } 2588 private void decNodes() shared 2589 { 2590 assert(nodes); 2591 atomicOp!("-=")(nodes, 1); 2592 } 2593 private bool nodesFull() shared 2594 { 2595 return nodes >= maxNodes; 2596 } 2597 } 2598 else 2599 { 2600 private static void incNodes() { } 2601 private static void decNodes() { } 2602 private enum bool nodesFull = false; 2603 } 2604 2605 version (StdDdoc) 2606 { 2607 /** 2608 Properties for getting (and possibly setting) the bounds. Setting bounds 2609 is allowed only once , and before any allocation takes place. Otherwise, 2610 the primitives have the same semantics as those of $(D Freelist). 2611 */ 2612 @property size_t min(); 2613 /// Ditto 2614 @property void min(size_t newMinSize); 2615 /// Ditto 2616 @property size_t max(); 2617 /// Ditto 2618 @property void max(size_t newMaxSize); 2619 /// Ditto 2620 void setBounds(size_t newMin, size_t newMax); 2621 /// 2622 unittest 2623 { 2624 Freelist!(Mallocator, chooseAtRuntime, chooseAtRuntime) a; 2625 // Set the maxSize first so setting the minSize doesn't throw 2626 a.max = 128; 2627 a.min = 64; 2628 a.setBounds(64, 128); // equivalent 2629 assert(a.max == 128); 2630 assert(a.min == 64); 2631 } 2632 } 2633 2634 /** 2635 The parent allocator. Depending on whether $(D ParentAllocator) holds state 2636 or not, this is a member variable or an alias for $(D ParentAllocator.it). 2637 */ 2638 static if (stateSize!ParentAllocator) shared ParentAllocator parent; 2639 else alias parent = ParentAllocator.it; 2640 2641 private struct Node { Node* next; } 2642 static assert(ParentAllocator.alignment >= Node.alignof); 2643 private Node* _root; 2644 private uint nodesAtATime = batchCount; 2645 2646 /// Standard primitives. 2647 enum uint alignment = ParentAllocator.alignment; 2648 2649 /// Ditto 2650 size_t goodAllocSize(size_t bytes) shared 2651 { 2652 if (inRange(bytes)) return maxSize == unbounded ? bytes : max; 2653 return parent.goodAllocSize(bytes); 2654 } 2655 2656 /// Ditto 2657 bool owns(void[] b) shared const 2658 { 2659 if (inRange(b.length)) return true; 2660 static if (hasMember!(ParentAllocator, "owns")) 2661 return parent.owns(b); 2662 else 2663 return false; 2664 } 2665 2666 /** 2667 Forwards to $(D parent), which must also support $(D shared) primitives. 2668 */ 2669 static if (hasMember!(ParentAllocator, "expand")) 2670 bool expand(void[] b, size_t s) 2671 { 2672 return parent.expand(b, s); 2673 } 2674 2675 /// Ditto 2676 static if (hasMember!(ParentAllocator, "reallocate")) 2677 bool reallocate(void[] b, size_t s) 2678 { 2679 return parent.reallocate(b, s); 2680 } 2681 2682 /// Ditto 2683 void[] allocate(size_t bytes) shared 2684 { 2685 assert(bytes < size_t.max / 2); 2686 if (!inRange(bytes)) return parent.allocate(bytes); 2687 if (maxSize != unbounded) bytes = max; 2688 if (!_root) return allocateFresh(bytes); 2689 // Pop off the freelist 2690 shared Node* oldRoot = void, next = void; 2691 do 2692 { 2693 oldRoot = _root; // atomic load 2694 next = oldRoot is null ? null : oldRoot.next; // atomic load 2695 } 2696 while (!cas(&_root, oldRoot, next)); 2697 // great, snatched the root 2698 decNodes(); 2699 return (cast(ubyte*) oldRoot)[0 .. bytes]; 2700 } 2701 2702 private void[] allocateFresh(const size_t bytes) shared 2703 { 2704 assert(bytes == max || max == unbounded); 2705 if (nodesAtATime == 1) 2706 { 2707 // Easy case, just get it over with 2708 return parent.allocate(bytes); 2709 } 2710 static if (maxSize != unbounded && maxSize != chooseAtRuntime) 2711 { 2712 static assert( 2713 (parent.alignment + max) % Node.alignof == 0, 2714 text("(", parent.alignment, " + ", max, ") % ", 2715 Node.alignof)); 2716 } 2717 else 2718 { 2719 assert((parent.alignment + bytes) % Node.alignof == 0, 2720 text("(", parent.alignment, " + ", bytes, ") % ", 2721 Node.alignof)); 2722 } 2723 2724 auto data = parent.allocate(nodesAtATime * bytes); 2725 if (!data) return null; 2726 auto result = data[0 .. bytes]; 2727 auto n = data[bytes .. $]; 2728 auto newRoot = cast(shared Node*) n.ptr; 2729 shared Node* lastNode; 2730 for (;;) 2731 { 2732 if (n.length < bytes) 2733 { 2734 lastNode = cast(shared Node*) data.ptr; 2735 break; 2736 } 2737 (cast(Node*) data.ptr).next = cast(Node*) n.ptr; 2738 data = n; 2739 n = data[bytes .. $]; 2740 } 2741 // Created the list, now wire the new nodes in considering another 2742 // thread might have also created some nodes. 2743 do 2744 { 2745 lastNode.next = _root; 2746 } 2747 while (!cas(&_root, lastNode.next, newRoot)); 2748 return result; 2749 } 2750 2751 /// Ditto 2752 void deallocate(void[] b) shared 2753 { 2754 if (!nodesFull && inRange(b.length)) 2755 { 2756 auto newRoot = cast(shared Node*) b.ptr; 2757 shared Node* oldRoot; 2758 do 2759 { 2760 oldRoot = _root; 2761 newRoot.next = oldRoot; 2762 } 2763 while (!cas(&_root, oldRoot, newRoot)); 2764 incNodes(); 2765 } 2766 else 2767 { 2768 static if (is(typeof(parent.deallocate(block)))) 2769 parent.deallocate(block); 2770 } 2771 } 2772 2773 /// Ditto 2774 void deallocateAll() shared 2775 { 2776 static if (hasMember!(ParentAllocator, "deallocateAll")) 2777 { 2778 parent.deallocateAll(); 2779 } 2780 else static if (hasMember!(ParentAllocator, "deallocate")) 2781 { 2782 for (auto n = _root; n; n = n.next) 2783 { 2784 parent.deallocate((cast(ubyte*)n)[0 .. max]); 2785 } 2786 } 2787 _root = null; 2788 } 2789 } 2790 2791 // This deadlocks 2792 version (none) unittest 2793 { 2794 import core.thread, std.concurrency; 2795 2796 static shared SharedFreelist!(Mallocator, 64, 128, 8, 100) a; 2797 2798 assert(a.goodAllocSize(1) == platformAlignment); 2799 2800 auto b = a.allocate(100); 2801 a.deallocate(b); 2802 2803 static void fun(Tid tid, int i) 2804 { 2805 scope(exit) tid.send(true); 2806 auto b = cast(ubyte[]) a.allocate(100); 2807 b[] = cast(ubyte) i; 2808 2809 assert(b.equal(repeat(cast(ubyte) i, b.length))); 2810 a.deallocate(b); 2811 } 2812 2813 Tid[] tids; 2814 foreach (i; 0 .. 1000) 2815 { 2816 tids ~= spawn(&fun, thisTid, i); 2817 } 2818 2819 foreach (i; 0 .. 1000) 2820 { 2821 assert(receiveOnly!bool); 2822 } 2823 } 2824 2825 unittest 2826 { 2827 shared SharedFreelist!(Mallocator, chooseAtRuntime, chooseAtRuntime, 2828 8, 100) a; 2829 auto b = a.allocate(64); 2830 } 2831 2832 /* 2833 (This type is not public.) 2834 2835 A $(D BasicRegion) allocator allocates memory straight from an externally- 2836 provided storage as backend. There is no deallocation, and once the region is 2837 full, allocation requests return $(D null). Therefore, $(D Region)s are often 2838 used in conjunction with freelists and a fallback general-purpose allocator. 2839 2840 The region only stores two words, corresponding to the current position in the 2841 store and the available length. One allocation entails rounding up the 2842 allocation size for alignment purposes, bumping the current pointer, and 2843 comparing it against the limit. 2844 2845 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the 2846 sizes of all allocation requests are rounded up to a multiple of $(D minAlign). 2847 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and 2848 control alignment externally. 2849 */ 2850 private struct BasicRegion(uint minAlign = platformAlignment) 2851 { 2852 static assert(minAlign.isGoodStaticAlignment); 2853 private void* _current, _end; 2854 2855 /** 2856 Constructs a region backed by a user-provided store. 2857 */ 2858 this(void[] store) pure nothrow @trusted 2859 { 2860 static if (minAlign > 1) 2861 { 2862 auto newStore = cast(void*) roundUpToMultipleOf( 2863 cast(ulong) store.ptr, 2864 alignment); 2865 assert(newStore <= store.ptr + store.length); 2866 _current = newStore; 2867 } 2868 else 2869 { 2870 _current = store; 2871 } 2872 _end = store.ptr + store.length; 2873 } 2874 2875 /** 2876 The postblit of $(D BasicRegion) is disabled because such objects should not 2877 be copied around naively. 2878 */ 2879 //@disable this(this); 2880 2881 /** 2882 Standard allocator primitives. 2883 */ 2884 enum uint alignment = minAlign; 2885 2886 /// Ditto 2887 void[] allocate(size_t bytes) pure nothrow @trusted 2888 { 2889 static if (minAlign > 1) 2890 const rounded = bytes.roundUpToMultipleOf(alignment); 2891 else 2892 alias rounded = bytes; 2893 auto newCurrent = _current + rounded; 2894 if (newCurrent > _end) return null; 2895 auto result = _current[0 .. bytes]; 2896 _current = newCurrent; 2897 assert(cast(ulong) result.ptr % alignment == 0); 2898 return result; 2899 } 2900 2901 /// Ditto 2902 void[] alignedAllocate(size_t bytes, uint a) 2903 { 2904 // Just bump the pointer to the next good allocation 2905 auto save = _current; 2906 _current = cast(void*) roundUpToMultipleOf( 2907 cast(ulong) _current, a); 2908 if (auto b = allocate(bytes)) return b; 2909 // Failed, rollback 2910 _current = save; 2911 return null; 2912 } 2913 2914 /// Allocates and returns all memory available to this region. 2915 void[] allocateAll() 2916 { 2917 auto result = _current[0 .. available]; 2918 _current = _end; 2919 return result; 2920 } 2921 /// Nonstandard property that returns bytes available for allocation. 2922 size_t available() const 2923 { 2924 return _end - _current; 2925 } 2926 } 2927 2928 /* 2929 For implementers' eyes: Region adds more capabilities on top of $(BasicRegion) 2930 at the cost of one extra word of storage. $(D Region) "remembers" the beginning 2931 of the region and therefore is able to provide implementations of $(D owns) and 2932 $(D deallocateAll). For most applications the performance distinction between 2933 $(D BasicRegion) and $(D Region) is unimportant, so the latter should be the 2934 default choice. 2935 */ 2936 2937 /** 2938 A $(D Region) allocator manages one block of memory provided at construction. 2939 There is no deallocation, and once the region is full, allocation requests 2940 return $(D null). Therefore, $(D Region)s are often used in conjunction 2941 with freelists, a fallback general-purpose allocator, or both. 2942 2943 The region stores three words corresponding to the start of the store, the 2944 current position in the store, and the end of the store. One allocation entails 2945 rounding up the allocation size for alignment purposes, bumping the current 2946 pointer, and comparing it against the limit. 2947 2948 The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the 2949 sizes of all allocation requests are rounded up to a multiple of $(D minAlign). 2950 Applications aiming at maximum speed may want to choose $(D minAlign = 1) and 2951 control alignment externally. 2952 */ 2953 struct Region(uint minAlign = platformAlignment) 2954 { 2955 static assert(minAlign.isGoodStaticAlignment); 2956 2957 private BasicRegion!(minAlign) base; 2958 private void* _begin; 2959 2960 /** 2961 Constructs a $(D Region) object backed by $(D buffer), which must be aligned 2962 to $(D minAlign). 2963 */ 2964 this(void[] buffer) pure nothrow @safe 2965 { 2966 base = BasicRegion!minAlign(buffer); 2967 assert(buffer.ptr !is &this); 2968 _begin = base._current; 2969 } 2970 2971 /** 2972 Standard primitives. 2973 */ 2974 enum uint alignment = minAlign; 2975 2976 /// Ditto 2977 void[] allocate(size_t bytes) 2978 { 2979 return base.allocate(bytes); 2980 } 2981 2982 /// Ditto 2983 void[] alignedAllocate(size_t bytes, uint a) 2984 { 2985 return base.alignedAllocate(bytes, a); 2986 } 2987 2988 /// Ditto 2989 bool owns(void[] b) const 2990 { 2991 return b.ptr >= _begin && b.ptr + b.length <= base._end 2992 || b is null; 2993 } 2994 2995 /// Ditto 2996 void deallocateAll() 2997 { 2998 base._current = _begin; 2999 } 3000 3001 /** 3002 Nonstandard function that gives away the initial buffer used by the range, 3003 and makes the range unavailable for further allocations. This is useful for 3004 deallocating the memory assigned to the region. 3005 */ 3006 void[] relinquish() 3007 { 3008 auto result = _begin[0 .. base._end - _begin]; 3009 base._current = base._end; 3010 return result; 3011 } 3012 } 3013 3014 /// 3015 unittest 3016 { 3017 auto reg = Region!()(Mallocator.it.allocate(1024 * 64)); 3018 scope(exit) Mallocator.it.deallocate(reg.relinquish); 3019 auto b = reg.allocate(101); 3020 assert(b.length == 101); 3021 } 3022 3023 /** 3024 3025 $(D InSituRegion) is a convenient region that carries its storage within itself 3026 (in the form of a statically-sized array). 3027 3028 The first template argument is the size of the region and the second is the 3029 needed alignment. Depending on the alignment requested and platform details, 3030 the actual available storage may be smaller than the compile-time parameter. To 3031 make sure that at least $(D n) bytes are available in the region, use 3032 $(D InSituRegion!(n + a - 1, a)). 3033 3034 */ 3035 struct InSituRegion(size_t size, size_t minAlign = platformAlignment) 3036 { 3037 static assert(minAlign.isGoodStaticAlignment); 3038 static assert(size >= minAlign); 3039 3040 // The store will be aligned to double.alignof, regardless of the requested 3041 // alignment. 3042 union 3043 { 3044 private ubyte[size] _store = void; 3045 private double _forAlignmentOnly = void; 3046 } 3047 private void* _crt, _end; 3048 3049 /** 3050 An alias for $(D minAlign), which must be a valid alignment (nonzero power 3051 of 2). The start of the region and all allocation requests will be rounded 3052 up to a multiple of the alignment. 3053 3054 ---- 3055 InSituRegion!(4096) a1; 3056 assert(a1.alignment == platformAlignment); 3057 InSituRegion!(4096, 64) a2; 3058 assert(a2.alignment == 64); 3059 ---- 3060 */ 3061 enum uint alignment = minAlign; 3062 3063 private void lazyInit() 3064 { 3065 assert(!_crt); 3066 _crt = cast(void*) roundUpToMultipleOf( 3067 cast(ulong) _store.ptr, alignment); 3068 _end = _store.ptr + _store.length; 3069 } 3070 3071 /** 3072 Allocates $(D bytes) and returns them, or $(D null) if the region cannot 3073 accommodate the request. For efficiency reasons, if $(D bytes == 0) the 3074 function returns an empty non-null slice. 3075 */ 3076 void[] allocate(size_t bytes) pure nothrow @trusted 3077 out (result) 3078 { 3079 assert (result is null || owns(result)); 3080 } 3081 body 3082 { 3083 // Oddity: we don't return null for null allocation. Instead, we return 3084 // an empty slice with a non-null ptr. 3085 const rounded = bytes.roundUpToMultipleOf(alignment); 3086 auto newCrt = _crt + rounded; 3087 assert(newCrt >= _crt); // big overflow 3088 again: 3089 if (newCrt <= _end) 3090 { 3091 assert(_crt); // this relies on null + size > null 3092 auto result = _crt[0 .. bytes]; 3093 _crt = newCrt; 3094 return result; 3095 } 3096 // slow path 3097 if (_crt) return null; 3098 // Lazy initialize _crt 3099 lazyInit(); 3100 newCrt = _crt + rounded; 3101 goto again; 3102 } 3103 3104 /** 3105 As above, but the memory allocated is aligned at $(D a) bytes. 3106 */ 3107 void[] alignedAllocate(size_t bytes, uint a) 3108 { 3109 // Just bump the pointer to the next good allocation 3110 auto save = _crt; 3111 _crt = cast(void*) roundUpToMultipleOf( 3112 cast(ulong) _crt, a); 3113 if (auto b = allocate(bytes)) return b; 3114 // Failed, rollback 3115 _crt = save; 3116 return null; 3117 } 3118 3119 /** 3120 Returns $(D true) if and only if $(D b) is the result of a successful 3121 allocation. For efficiency reasons, if $(D b is null) the function returns 3122 $(D false). 3123 */ 3124 bool owns(const void[] b) const nothrow pure @trusted 3125 { 3126 // No nullptr 3127 return b.ptr >= _store.ptr 3128 && b.ptr + b.length <= _store.ptr + _store.length; 3129 } 3130 3131 /** 3132 Deallocates all memory allocated with this allocator. 3133 */ 3134 void deallocateAll() pure nothrow @safe 3135 { 3136 _crt = _store.ptr; 3137 } 3138 3139 /** 3140 Allocates all memory available with this allocator. 3141 */ 3142 void[] allocateAll() 3143 { 3144 auto s = available; 3145 auto result = _crt[0 .. s]; 3146 _crt = _end; 3147 return result; 3148 } 3149 3150 /** 3151 Nonstandard function that returns the bytes available for allocation. 3152 */ 3153 size_t available() 3154 { 3155 if (!_crt) lazyInit(); 3156 return _end - _crt; 3157 } 3158 } 3159 3160 /// 3161 unittest 3162 { 3163 // 128KB region, allocated to x86's cache line 3164 InSituRegion!(128 * 1024, 64) r1; 3165 auto a1 = r1.allocate(101); 3166 assert(a1.length == 101); 3167 3168 // 128KB region, with fallback to the garbage collector. 3169 FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2; 3170 auto a2 = r1.allocate(102); 3171 assert(a2.length == 102); 3172 3173 // Reap with GC fallback. 3174 InSituRegion!(128 * 1024) tmp3; 3175 FallbackAllocator!(HeapBlock!(InSituRegion!(128 * 1024), 64, 64), 3176 GCAllocator) r3; 3177 auto a3 = r3.allocate(103); 3178 assert(a3.length == 103); 3179 3180 // Reap/GC with a freelist for small objects up to 16 bytes. 3181 InSituRegion!(128 * 1024) tmp4; 3182 Freelist!(FallbackAllocator!( 3183 HeapBlock!(InSituRegion!(128 * 1024), 64, 64), GCAllocator), 0, 16) r4; 3184 auto a4 = r4.allocate(104); 3185 assert(a4.length == 104); 3186 3187 // Same as above, except the freelist only applies to the reap. 3188 InSituRegion!(128 * 1024) tmp5; 3189 FallbackAllocator!(Freelist!(HeapBlock!(InSituRegion!(128 * 1024), 64, 64), 0, 16), GCAllocator) r5; 3190 auto a5 = r5.allocate(105); 3191 assert(a5.length == 105); 3192 } 3193 3194 unittest 3195 { 3196 InSituRegion!(4096) r1; 3197 auto a = r1.allocate(2001); 3198 assert(a.length == 2001); 3199 assert(r1.available == 2080, text(r1.available)); 3200 3201 InSituRegion!(65536, 1024*4) r2; 3202 assert(r2.available <= 65536); 3203 a = r2.allocate(2001); 3204 assert(a.length == 2001); 3205 } 3206 3207 /** 3208 _Options for $(D AllocatorWithStats) defined below. Each enables during 3209 compilation one specific counter, statistic, or other piece of information. 3210 */ 3211 enum Options : uint 3212 { 3213 /** 3214 Counts the number of calls to $(D owns). 3215 */ 3216 numOwns = 1u << 0, 3217 /** 3218 Counts the number of calls to $(D allocate). All calls are counted, 3219 including requests for zero bytes or failed requests. 3220 */ 3221 numAllocate = 1u << 1, 3222 /** 3223 Counts the number of calls to $(D allocate) that succeeded, i.e. they were 3224 for more than zero bytes and returned a non-null block. 3225 */ 3226 numAllocateOK = 1u << 2, 3227 /** 3228 Counts the number of calls to $(D expand), regardless of arguments or 3229 result. 3230 */ 3231 numExpand = 1u << 3, 3232 /** 3233 Counts the number of calls to $(D expand) that resulted in a successful 3234 expansion. 3235 */ 3236 numExpandOK = 1u << 4, 3237 /** 3238 Counts the number of calls to $(D reallocate), regardless of arguments or 3239 result. 3240 */ 3241 numReallocate = 1u << 5, 3242 /** 3243 Counts the number of calls to $(D reallocate) that succeeded. (Reallocations 3244 to zero bytes count as successful.) 3245 */ 3246 numReallocateOK = 1u << 6, 3247 /** 3248 Counts the number of calls to $(D reallocate) that resulted in an in-place 3249 reallocation (no memory moved). If this number is close to the total number 3250 of reallocations, that indicates the allocator finds room at the current 3251 block's end in a large fraction of the cases, but also that internal 3252 fragmentation may be high (the size of the unit of allocation is large 3253 compared to the typical allocation size of the application). 3254 */ 3255 numReallocateInPlace = 1u << 7, 3256 /** 3257 Counts the number of calls to $(D deallocate). 3258 */ 3259 numDeallocate = 1u << 8, 3260 /** 3261 Counts the number of calls to $(D deallocateAll). 3262 */ 3263 numDeallocateAll = 1u << 9, 3264 /** 3265 Chooses all $(D numXxx) flags. 3266 */ 3267 numAll = (1u << 10) - 1, 3268 /** 3269 Tracks total cumulative bytes allocated by means of $(D allocate), 3270 $(D expand), and $(D reallocate) (when resulting in an expansion). This 3271 number always grows and indicates allocation traffic. To compute bytes 3272 currently allocated, subtract $(D bytesDeallocated) (below) from 3273 $(D bytesAllocated). 3274 */ 3275 bytesAllocated = 1u << 10, 3276 /** 3277 Tracks total cumulative bytes deallocated by means of $(D deallocate) and 3278 $(D reallocate) (when resulting in a contraction). This number always grows 3279 and indicates deallocation traffic. 3280 */ 3281 bytesDeallocated = 1u << 11, 3282 /** 3283 Tracks the sum of all $(D delta) values in calls of the form 3284 $(D expand(b, delta)) that succeed (return $(D true)). 3285 */ 3286 bytesExpanded = 1u << 12, 3287 /** 3288 Tracks the sum of all $(D b.length - s) with $(D b.length > s) in calls of 3289 the form $(D realloc(b, s)) that succeed (return $(D true)). 3290 */ 3291 bytesContracted = 1u << 13, 3292 /** 3293 Tracks the sum of all bytes moved as a result of calls to $(D realloc) that 3294 were unable to reallocate in place. A large number (relative to $(D 3295 bytesAllocated)) indicates that the application should use larger 3296 preallocations. 3297 */ 3298 bytesMoved = 1u << 14, 3299 /** 3300 Measures the sum of extra bytes allocated beyond the bytes requested, i.e. 3301 the $(WEB goo.gl/YoKffF, internal fragmentation). This is the current 3302 effective number of slack bytes, and it goes up and down with time. 3303 */ 3304 bytesSlack = 1u << 15, 3305 /** 3306 Measures the maximum bytes allocated over the time. This is useful for 3307 dimensioning allocators. 3308 */ 3309 bytesHighTide = 1u << 16, 3310 /** 3311 Chooses all $(D byteXxx) flags. 3312 */ 3313 bytesAll = ((1u << 17) - 1) & ~numAll, 3314 /** 3315 Instructs $(D AllocatorWithStats) to store the size asked by the caller for 3316 each allocation. All per-allocation data is stored just before the actually 3317 allocation (see $(D AffixAllocator)). 3318 */ 3319 callerSize = 1u << 17, 3320 /** 3321 Instructs $(D AllocatorWithStats) to store the caller module for each 3322 allocation. 3323 */ 3324 callerModule = 1u << 18, 3325 /** 3326 Instructs $(D AllocatorWithStats) to store the caller's file for each 3327 allocation. 3328 */ 3329 callerFile = 1u << 19, 3330 /** 3331 Instructs $(D AllocatorWithStats) to store the caller $(D __FUNCTION__) for 3332 each allocation. 3333 */ 3334 callerFunction = 1u << 20, 3335 /** 3336 Instructs $(D AllocatorWithStats) to store the caller's line for each 3337 allocation. 3338 */ 3339 callerLine = 1u << 21, 3340 /** 3341 Instructs $(D AllocatorWithStats) to store the time of each allocation. 3342 */ 3343 callerTime = 1u << 22, 3344 /** 3345 Chooses all $(D callerXxx) flags. 3346 */ 3347 callerAll = ((1u << 23) - 1) & ~numAll & ~bytesAll, 3348 /** 3349 Combines all flags above. 3350 */ 3351 all = (1u << 23) - 1 3352 } 3353 3354 /** 3355 3356 Allocator that collects extra data about allocations. Since each piece of 3357 information adds size and time overhead, statistics can be individually enabled 3358 or disabled through compile-time $(D flags). 3359 3360 All stats of the form $(D numXxx) record counts of events occurring, such as 3361 calls to functions and specific results. The stats of the form $(D bytesXxx) 3362 collect cumulative sizes. 3363 3364 In addition, the data $(D callerSize), $(D callerModule), $(D callerFile), $(D 3365 callerLine), and $(D callerTime) is associated with each specific allocation. 3366 This data prefixes each allocation. 3367 3368 */ 3369 struct AllocatorWithStats(Allocator, uint flags = Options.all) 3370 { 3371 private: 3372 // Per-allocator state 3373 mixin(define("ulong", 3374 "numOwns", 3375 "numAllocate", 3376 "numAllocateOK", 3377 "numExpand", 3378 "numExpandOK", 3379 "numReallocate", 3380 "numReallocateOK", 3381 "numReallocateInPlace", 3382 "numDeallocate", 3383 "numDeallocateAll", 3384 "bytesAllocated", 3385 "bytesDeallocated", 3386 "bytesExpanded", 3387 "bytesContracted", 3388 "bytesMoved", 3389 "bytesSlack", 3390 "bytesHighTide", 3391 )); 3392 3393 static string define(string type, string[] names...) 3394 { 3395 string result; 3396 foreach (v; names) 3397 result ~= "static if (flags & Options."~v~") {" 3398 "private "~type~" _"~v~";" 3399 "public const("~type~") "~v~"() const { return _"~v~"; }" 3400 "}"; 3401 return result; 3402 } 3403 3404 void add(string counter)(Signed!size_t n) 3405 { 3406 mixin("static if (flags & Options." ~ counter 3407 ~ ") _" ~ counter ~ " += n;"); 3408 } 3409 3410 void up(string counter)() { add!counter(1); } 3411 void down(string counter)() { add!counter(-1); } 3412 3413 version (StdDdoc) 3414 { 3415 /** 3416 Read-only properties enabled by the homonym $(D flags) chosen by the 3417 user. 3418 3419 Example: 3420 ---- 3421 AllocatorWithStats!(Mallocator, 3422 Options.bytesAllocated | Options.bytesDeallocated) a; 3423 auto d1 = a.allocate(10); 3424 auto d2 = a.allocate(11); 3425 a.deallocate(d1); 3426 assert(a.bytesAllocated == 21); 3427 assert(a.bytesDeallocated == 10); 3428 ---- 3429 */ 3430 @property ulong numOwns() const; 3431 /// Ditto 3432 @property ulong numAllocate() const; 3433 /// Ditto 3434 @property ulong numAllocateOK() const; 3435 /// Ditto 3436 @property ulong numExpand() const; 3437 /// Ditto 3438 @property ulong numExpandOK() const; 3439 /// Ditto 3440 @property ulong numReallocate() const; 3441 /// Ditto 3442 @property ulong numReallocateOK() const; 3443 /// Ditto 3444 @property ulong numReallocateInPlace() const; 3445 /// Ditto 3446 @property ulong numDeallocate() const; 3447 /// Ditto 3448 @property ulong numDeallocateAll() const; 3449 /// Ditto 3450 @property ulong bytesAllocated() const; 3451 /// Ditto 3452 @property ulong bytesDeallocated() const; 3453 /// Ditto 3454 @property ulong bytesExpanded() const; 3455 /// Ditto 3456 @property ulong bytesContracted() const; 3457 /// Ditto 3458 @property ulong bytesMoved() const; 3459 /// Ditto 3460 @property ulong bytesSlack() const; 3461 /// Ditto 3462 @property ulong bytesHighTide() const; 3463 } 3464 3465 // Do flags require any per allocation state? 3466 enum hasPerAllocationState = flags & (Options.callerTime 3467 | Options.callerModule | Options.callerFile | Options.callerLine); 3468 3469 version (StdDdoc) 3470 { 3471 /** 3472 Per-allocation information that can be iterated upon by using 3473 $(D byAllocation). This only tracks live allocations and is useful for 3474 e.g. tracking memory leaks. 3475 3476 Example: 3477 ---- 3478 AllocatorWithStats!(Mallocator, Options.all) a; 3479 auto d1 = a.allocate(10); 3480 auto d2 = a.allocate(11); 3481 a.deallocate(d1); 3482 foreach (ref e; a.byAllocation) 3483 { 3484 writeln("Allocation module: ", e.callerModule); 3485 } 3486 ---- 3487 */ 3488 public struct AllocationInfo 3489 { 3490 /** 3491 Read-only property defined by the corresponding flag chosen in 3492 $(D options). 3493 */ 3494 @property size_t callerSize() const; 3495 /// Ditto 3496 @property string callerModule() const; 3497 /// Ditto 3498 @property string callerFile() const; 3499 /// Ditto 3500 @property uint callerLine() const; 3501 /// Ditto 3502 @property uint callerFunction() const; 3503 /// Ditto 3504 @property const(SysTime) callerTime() const; 3505 } 3506 } 3507 else static if (hasPerAllocationState) 3508 { 3509 public struct AllocationInfo 3510 { 3511 import std.datetime; 3512 mixin(define("string", "callerModule", "callerFile", 3513 "callerFunction")); 3514 mixin(define("uint", "callerLine")); 3515 mixin(define("size_t", "callerSize")); 3516 mixin(define("SysTime", "callerTime")); 3517 private AllocationInfo* _prev, _next; 3518 } 3519 AllocationInfo* _root; 3520 alias MyAllocator = AffixAllocator!(Allocator, AllocationInfo); 3521 3522 public auto byAllocation() 3523 { 3524 struct Voldemort 3525 { 3526 private AllocationInfo* _root; 3527 bool empty() { return _root is null; } 3528 ref AllocationInfo front() { return *_root; } 3529 void popFront() { _root = _root._next; } 3530 Voldemort save() { return this; } 3531 } 3532 return Voldemort(_root); 3533 } 3534 } 3535 else 3536 { 3537 alias MyAllocator = Allocator; 3538 } 3539 3540 public: 3541 // Parent allocator (publicly accessible) 3542 static if (stateSize!MyAllocator) MyAllocator parent; 3543 else alias parent = MyAllocator.it; 3544 3545 enum uint alignment = Allocator.alignment; 3546 3547 static if (hasMember!(Allocator, "owns")) 3548 bool owns(void[] b) 3549 { 3550 up!"numOwns"; 3551 return parent.owns(b); 3552 } 3553 3554 void[] allocate 3555 (string m = __MODULE__, string f = __FILE__, ulong n = __LINE__, 3556 string fun = __FUNCTION__) 3557 (size_t bytes) 3558 { 3559 up!"numAllocate"; 3560 auto result = parent.allocate(bytes); 3561 add!"bytesAllocated"(result.length); 3562 add!"bytesSlack"(this.goodAllocSize(result.length) - result.length); 3563 add!"numAllocateOK"(result || !bytes); // allocating 0 bytes is OK 3564 static if (flags & Options.bytesHighTide) 3565 { 3566 const bytesNow = bytesAllocated - bytesDeallocated; 3567 if (_bytesHighTide < bytesNow) _bytesHighTide = bytesNow; 3568 } 3569 static if (hasPerAllocationState) 3570 { 3571 auto p = &parent.prefix(result); 3572 static if (flags & Options.callerSize) 3573 p._callerSize = bytes; 3574 static if (flags & Options.callerModule) 3575 p._callerModule = m; 3576 static if (flags & Options.callerFile) 3577 p._callerFile = f; 3578 static if (flags & Options.callerFunction) 3579 p._callerFunction = fun; 3580 static if (flags & Options.callerLine) 3581 p._callerLine = n; 3582 static if (flags & Options.callerTime) 3583 { 3584 import std.datetime; 3585 p._callerTime = Clock.currTime; 3586 } 3587 // Wire the new info into the list 3588 assert(p._prev is null); 3589 p._next = _root; 3590 if (_root) _root._prev = p; 3591 _root = p; 3592 } 3593 return result; 3594 } 3595 3596 static if (hasMember!(Allocator, "expand")) 3597 bool expand(ref void[] b, size_t s) 3598 { 3599 up!"numExpand"; 3600 static if (flags & Options.bytesSlack) 3601 const bytesSlackB4 = goodAllocSize(b.length) - b.length; 3602 auto result = parent.expand(b, s); 3603 if (result) 3604 { 3605 up!"numExpandOK"; 3606 add!"bytesExpanded"(s); 3607 add!"bytesSlack"(goodAllocSize(b.length) - b.length - bytesSlackB4); 3608 } 3609 return result; 3610 } 3611 3612 bool reallocate(ref void[] b, size_t s) 3613 { 3614 up!"numReallocate"; 3615 static if (flags & Options.bytesSlack) 3616 const bytesSlackB4 = this.goodAllocSize(b.length) - b.length; 3617 static if (flags & Options.numReallocateInPlace) 3618 const oldB = b.ptr; 3619 static if (flags & Options.bytesMoved) 3620 const oldLength = b.length; 3621 static if (hasPerAllocationState) 3622 const reallocatingRoot = b && _root is &parent.prefix(b); 3623 if (!parent.reallocate(b, s)) return false; 3624 up!"numReallocateOK"; 3625 add!"bytesSlack"(this.goodAllocSize(b.length) - b.length 3626 - bytesSlackB4); 3627 if (oldB == b.ptr) 3628 { 3629 // This was an in-place reallocation, yay 3630 up!"numReallocateInPlace"; 3631 const Signed!size_t delta = b.length - oldLength; 3632 if (delta >= 0) 3633 { 3634 // Expansion 3635 add!"bytesAllocated"(delta); 3636 add!"bytesExpanded"(delta); 3637 } 3638 else 3639 { 3640 // Contraction 3641 add!"bytesDeallocated"(-delta); 3642 add!"bytesContracted"(-delta); 3643 } 3644 } 3645 else 3646 { 3647 // This was a allocate-move-deallocate cycle 3648 add!"bytesAllocated"(b.length); 3649 add!"bytesMoved"(oldLength); 3650 add!"bytesDeallocated"(oldLength); 3651 static if (hasPerAllocationState) 3652 { 3653 // Stitch the pointers again, ho-hum 3654 auto p = &parent.prefix(b); 3655 if (p._next) p._next._prev = p; 3656 if (p._prev) p._prev._next = p; 3657 if (reallocatingRoot) _root = p; 3658 } 3659 } 3660 return true; 3661 } 3662 3663 void deallocate(void[] b) 3664 { 3665 up!"numDeallocate"; 3666 add!"bytesDeallocated"(b.length); 3667 add!"bytesSlack"(-(this.goodAllocSize(b.length) - b.length)); 3668 // Remove the node from the list 3669 static if (hasPerAllocationState) 3670 { 3671 auto p = &parent.prefix(b); 3672 if (p._next) p._next._prev = p._prev; 3673 if (p._prev) p._prev._next = p._next; 3674 if (_root is p) _root = p._next; 3675 } 3676 parent.deallocate(b); 3677 } 3678 3679 static if (hasMember!(Allocator, "deallocateAll")) 3680 void deallocateAll() 3681 { 3682 up!"numDeallocateAll"; 3683 // Must force bytesDeallocated to match bytesAllocated 3684 static if ((flags & Options.bytesDeallocated) 3685 && (flags & Options.bytesDeallocated)) 3686 _bytesDeallocated = _bytesAllocated; 3687 parent.deallocateAll(); 3688 static if (hasPerAllocationState) _root = null; 3689 } 3690 } 3691 3692 unittest 3693 { 3694 void test(Allocator)() 3695 { 3696 Allocator a; 3697 auto b1 = a.allocate(100); 3698 assert(a.numAllocate == 1); 3699 auto b2 = a.allocate(101); 3700 assert(a.numAllocate == 2); 3701 assert(a.bytesAllocated == 201); 3702 auto b3 = a.allocate(202); 3703 assert(a.numAllocate == 3); 3704 assert(a.bytesAllocated == 403); 3705 3706 assert(walkLength(a.byAllocation) == 3); 3707 3708 foreach (ref e; a.byAllocation) 3709 { 3710 if (false) writeln(e); 3711 } 3712 3713 a.deallocate(b2); 3714 assert(a.numDeallocate == 1); 3715 a.deallocate(b1); 3716 assert(a.numDeallocate == 2); 3717 a.deallocate(b3); 3718 assert(a.numDeallocate == 3); 3719 assert(a.numAllocate == a.numDeallocate); 3720 assert(a.bytesDeallocated == 403); 3721 } 3722 3723 test!(AllocatorWithStats!Mallocator)(); 3724 test!(AllocatorWithStats!(Freelist!(Mallocator, 128)))(); 3725 } 3726 3727 //struct ArrayOfAllocators(alias make) 3728 //{ 3729 // alias Allocator = typeof(make()); 3730 // private Allocator[] allox; 3731 3732 // void[] allocate(size_t bytes) 3733 // { 3734 // void[] result = allocateNoGrow(bytes); 3735 // if (result) return result; 3736 // // Everything's full to the brim, create a new allocator. 3737 // auto newAlloc = make(); 3738 // assert(&newAlloc !is newAlloc.initial); 3739 // // Move the array to the new allocator 3740 // assert(Allocator.alignment % Allocator.alignof == 0); 3741 // const arrayBytes = (allox.length + 1) * Allocator.sizeof; 3742 // Allocator[] newArray = void; 3743 // do 3744 // { 3745 // if (arrayBytes < bytes) 3746 // { 3747 // // There is a chance we can find room in the existing allocator. 3748 // newArray = cast(Allocator[]) allocateNoGrow(arrayBytes); 3749 // if (newArray) break; 3750 // } 3751 // newArray = cast(Allocator[]) newAlloc.allocate(arrayBytes); 3752 // writeln(newArray.length); 3753 // assert(newAlloc.initial !is &newArray[$ - 1]); 3754 // if (!newArray) return null; 3755 // } while (false); 3756 3757 // assert(newAlloc.initial !is &newArray[$ - 1]); 3758 3759 // // Move data over to the new position 3760 // foreach (i, ref e; allox) 3761 // { 3762 // writeln(&e, " ", e.base.store_.ptr, " ", e.initial); 3763 // e.move(newArray[i]); 3764 // } 3765 // auto recoveredBytes = allox.length * Allocator.sizeof; 3766 // static if (hasMember!(Allocator, "deallocate")) 3767 // deallocate(allox); 3768 // allox = newArray; 3769 // assert(&allox[$ - 1] !is newAlloc.initial); 3770 // newAlloc.move(allox[$ - 1]); 3771 // assert(&allox[$ - 1] !is allox[$ - 1].initial); 3772 // if (recoveredBytes >= bytes) 3773 // { 3774 // // The new request may be served from the just-freed memory. Recurse 3775 // // and be bold. 3776 // return allocateNoGrow(bytes); 3777 // } 3778 // // Otherwise, we can't possibly fetch memory from anywhere else but the 3779 // // fresh new allocator. 3780 // return allox.back.allocate(bytes); 3781 // } 3782 3783 // private void[] allocateNoGrow(size_t bytes) 3784 // { 3785 // void[] result; 3786 // foreach (ref a; allox) 3787 // { 3788 // result = a.allocate(bytes); 3789 // if (result) break; 3790 // } 3791 // return result; 3792 // } 3793 3794 // bool owns(void[] b) 3795 // { 3796 // foreach (i, ref a; allox) 3797 // { 3798 // if (a.owns(b)) return true; 3799 // } 3800 // return false; 3801 // } 3802 3803 // static if (hasMember!(Allocator, "deallocate")) 3804 // void deallocate(void[] b) 3805 // { 3806 // foreach (i, ref a; allox) 3807 // { 3808 // if (!a.owns(b)) continue; 3809 // a.deallocate(b); 3810 // break; 3811 // } 3812 // } 3813 //} 3814 // 3815 //version(none) unittest 3816 //{ 3817 // ArrayOfAllocators!({ return Region!()(new void[1024 * 4096]); }) a; 3818 // assert(a.allox.length == 0); 3819 // auto b1 = a.allocate(1024 * 8192); 3820 // assert(b1 is null); 3821 // b1 = a.allocate(1024 * 10); 3822 // assert(b1.length == 1024 * 10); 3823 // assert(a.allox.length == 1); 3824 // auto b2 = a.allocate(1024 * 4095); 3825 // assert(a.allox.length == 2); 3826 //} 3827 3828 3829 /** 3830 Given $(D make) as a function that returns fresh allocators, $(D 3831 CascadingAllocator) creates an allocator that lazily creates as many allocators 3832 are needed for satisfying client allocation requests. 3833 3834 The management data of the allocators is stored in memory obtained from the 3835 allocators themselves, in a private linked list. 3836 */ 3837 struct CascadingAllocator(alias make) 3838 { 3839 /// Alias for $(D typeof(make)). 3840 alias typeof(make()) Allocator; 3841 private struct Node 3842 { 3843 Allocator a; 3844 Node* next; 3845 bool nextIsInitialized; 3846 } 3847 private Node* _root; 3848 3849 /** 3850 Standard primitives. 3851 */ 3852 enum uint alignment = Allocator.alignment; 3853 3854 /// Ditto 3855 void[] allocate(size_t s) pure nothrow @trusted 3856 out (res) 3857 { 3858 import std.string; 3859 assert (res.length == 0 || res.length == s, 3860 "res.length = %d, s = %d".format(res.length, s)); 3861 } 3862 body 3863 { 3864 auto result = allocateNoGrow(s); 3865 if (result) return result; 3866 // Must create a new allocator object 3867 if (!_root) 3868 { 3869 // I mean _brand_ new allocator object 3870 auto newNodeStack = Node(make()); 3871 // Weird: store the new node inside its own allocated storage! 3872 _root = cast(Node*) newNodeStack.a.allocate(Node.sizeof).ptr; 3873 if (!_root) 3874 { 3875 // Are you serious? Not even the first allocation? 3876 return null; 3877 } 3878 newNodeStack.move(*_root); 3879 // Make sure we reserve room for the next next node 3880 _root.next = cast(Node*) _root.a.allocate(Node.sizeof).ptr; 3881 assert(_root.next); 3882 // root is set up, serve from it 3883 return allocateNoGrow(s); 3884 } 3885 // No room left, must append a new allocator 3886 auto n = _root; 3887 while (n.nextIsInitialized) n = n.next; 3888 if (!n.next) 3889 { 3890 // Resources truly exhausted, not much to do 3891 return null; 3892 } 3893 emplace(n.next, Node(make())); 3894 n.nextIsInitialized = true; 3895 // Reserve room for the next next allocator 3896 n.next.next = cast(Node*) allocateNoGrow(Node.sizeof).ptr; 3897 // Rare failure cases leave nextIsInitialized to false 3898 if (!n.next.next) n.nextIsInitialized = false; 3899 // TODO: would be nice to bring the new allocator to the front. 3900 // All done! 3901 return allocateNoGrow(s); 3902 } 3903 3904 private void[] allocateNoGrow(size_t bytes) 3905 { 3906 void[] result; 3907 if (!_root) return result; 3908 for (auto n = _root; ; n = n.next) 3909 { 3910 result = n.a.allocate(bytes); 3911 if (result) break; 3912 if (!n.nextIsInitialized) break; 3913 } 3914 return result; 3915 } 3916 3917 /// Defined only if $(D Allocator.owns) is defined. 3918 static if (hasMember!(Allocator, "owns")) 3919 bool owns(void[] b) 3920 { 3921 if (!_root) return b is null; 3922 for (auto n = _root; ; n = n.next) 3923 { 3924 if (n.a.owns(b)) return true; 3925 if (!n.nextIsInitialized) break; 3926 } 3927 return false; 3928 } 3929 3930 /// Defined only if $(D Allocator.expand) is defined. 3931 static if (hasMember!(Allocator, "expand")) 3932 bool expand(ref void[] b, size_t delta) 3933 { 3934 if (!b) return (b = allocate(delta)) !is null; 3935 if (!_root) return false; 3936 for (auto n = _root; ; n = n.next) 3937 { 3938 if (n.a.owns(b)) return n.a.expand(b, delta); 3939 if (!n.nextIsInitialized) break; 3940 } 3941 return false; 3942 } 3943 3944 /// Allows moving data from one $(D Allocator) to another. 3945 bool reallocate(ref void[] b, size_t s) 3946 { 3947 if (!b) return (b = allocate(s)) !is null; 3948 // First attempt to reallocate within the existing node 3949 if (!_root) return false; 3950 for (auto n = _root; ; n = n.next) 3951 { 3952 static if (hasMember!(Allocator, "owns")) 3953 if (n.a.owns(b) && n.a.reallocate(b, s)) return true; 3954 if (!n.nextIsInitialized) break; 3955 } 3956 // Failed, but we may find new memory in a new node. 3957 auto newB = allocate(s); 3958 if (!newB) return false; 3959 newB[] = b[]; 3960 static if (hasMember!(Allocator, "deallocate")) 3961 deallocate(b); 3962 b = newB; 3963 return true; 3964 } 3965 3966 /// Defined only if $(D Allocator.deallocate) is defined. 3967 static if (hasMember!(Allocator, "deallocate")) 3968 void deallocate(void[] b) 3969 { 3970 if (!_root) 3971 { 3972 assert(b is null); 3973 return; 3974 } 3975 for (auto n = _root; ; n = n.next) 3976 { 3977 if (n.a.owns(b)) return n.a.deallocate(b); 3978 if (!n.nextIsInitialized) break; 3979 } 3980 assert(false); 3981 } 3982 3983 /// Defined only if $(D Allocator.deallocateAll) is defined. 3984 static if (hasMember!(Allocator, "deallocateAll")) 3985 void deallocateAll() 3986 { 3987 if (!_root) return; 3988 // This is tricky because the list of allocators is threaded through the 3989 // allocators themselves. Malloc to the rescue! 3990 // First compute the number of allocators 3991 uint k = 0; 3992 for (auto n = _root; ; n = n.next) 3993 { 3994 ++k; 3995 if (!n.nextIsInitialized) break; 3996 } 3997 auto nodes = 3998 cast(Node*[]) Mallocator.it.allocate(k * (Allocator*).sizeof); 3999 scope(exit) Mallocator.it.deallocate(nodes); 4000 foreach (ref n; nodes) 4001 { 4002 n = _root; 4003 _root = _root.next; 4004 } 4005 _root = null; 4006 // Now we can deallocate in peace 4007 foreach (n; nodes) 4008 { 4009 n.a.deallocateAll(); 4010 } 4011 } 4012 } 4013 4014 /// 4015 unittest 4016 { 4017 // Create an allocator based upon 4MB regions, fetched from the GC heap. 4018 CascadingAllocator!(function() nothrow { return Region!()(new void[1024 * 4096]); }) a; 4019 auto b1 = a.allocate(1024 * 8192); 4020 assert(b1 is null); // can't allocate more than 4MB at a time 4021 b1 = a.allocate(1024 * 10); 4022 assert(b1.length == 1024 * 10); 4023 a.deallocateAll(); 4024 } 4025 4026 unittest 4027 { 4028 CascadingAllocator!({ return Region!()(new void[1024 * 4096]); }) a; 4029 auto b1 = a.allocate(1024 * 8192); 4030 assert(b1 is null); 4031 assert(!a._root.nextIsInitialized); 4032 b1 = a.allocate(1024 * 10); 4033 assert(b1.length == 1024 * 10); 4034 auto b2 = a.allocate(1024 * 4095); 4035 assert(a._root.nextIsInitialized); 4036 a.deallocateAll(); 4037 assert(!a._root); 4038 } 4039 4040 /** 4041 Dispatches allocations (and deallocations) between two allocators ($(D 4042 SmallAllocator) and $(D LargeAllocator)) depending on the size allocated, as 4043 follows. All allocations smaller than or equal to $(D threshold) will be 4044 dispatched to $(D SmallAllocator). The others will go to $(D LargeAllocator). 4045 4046 If both allocators are $(D shared), the $(D Segregator) will also offer $(D 4047 shared) methods. 4048 */ 4049 struct Segregator(size_t threshold, SmallAllocator, LargeAllocator) 4050 { 4051 static if (stateSize!SmallAllocator) SmallAllocator _small; 4052 else static alias SmallAllocator.it _small; 4053 static if (stateSize!LargeAllocator) LargeAllocator _large; 4054 else alias LargeAllocator.it _large; 4055 4056 version (StdDdoc) 4057 { 4058 /** 4059 The alignment offered is the minimum of the two allocators' alignment. 4060 */ 4061 enum uint alignment; 4062 /** 4063 This method is defined only if at least one of the allocators defines 4064 it. The good allocation size is obtained from $(D SmallAllocator) if $(D 4065 s <= threshold), or $(D LargeAllocator) otherwise. (If one of the 4066 allocators does not define $(D goodAllocSize), the default 4067 implementation in this module applies.) 4068 */ 4069 static size_t goodAllocSize(size_t s); 4070 /** 4071 The memory is obtained from $(D SmallAllocator) if $(D s <= threshold), 4072 or $(D LargeAllocator) otherwise. 4073 */ 4074 void[] allocate(size_t); 4075 /** 4076 This method is defined only if both allocators define it. The call is 4077 forwarded to $(D SmallAllocator) if $(D b.length <= threshold), or $(D 4078 LargeAllocator) otherwise. 4079 */ 4080 bool owns(void[] b); 4081 /** 4082 This method is defined only if at least one of the allocators defines 4083 it. If $(D SmallAllocator) defines $(D expand) and $(D b.length + 4084 delta <= threshold), the call is forwarded to $(D SmallAllocator). If $( 4085 LargeAllocator) defines $(D expand) and $(D b.length > threshold), the 4086 call is forwarded to $(D LargeAllocator). Otherwise, the call returns 4087 $(D false). 4088 */ 4089 bool expand(ref void[] b, size_t delta); 4090 /** 4091 This method is defined only if at least one of the allocators defines 4092 it. If $(D SmallAllocator) defines $(D reallocate) and $(D b.length <= 4093 threshold && s <= threshold), the call is forwarded to $(D 4094 SmallAllocator). If $(D LargeAllocator) defines $(D expand) and $(D 4095 b.length > threshold && s > threshold), the call is forwarded to $(D 4096 LargeAllocator). Otherwise, the call returns $(D false). 4097 */ 4098 bool reallocate(ref void[] b, size_t s); 4099 /** 4100 This function is defined only if both allocators define it, and forwards 4101 appropriately depending on $(D b.length). 4102 */ 4103 void deallocate(void[] b); 4104 /** 4105 This function is defined only if both allocators define it, and calls 4106 $(D deallocateAll) for them in turn. 4107 */ 4108 void deallocateAll(); 4109 } 4110 4111 /** 4112 Composite allocators involving nested instantiations of $(D Segregator) make 4113 it difficult to access individual sub-allocators stored within. $(D 4114 allocatorForSize) simplifies the task by supplying the allocator nested 4115 inside a $(D Segregator) that is responsible for a specific size $(D s). 4116 4117 Example: 4118 ---- 4119 alias A = Segregator!(300, 4120 Segregator!(200, A1, A2), 4121 A3); 4122 A a; 4123 static assert(typeof(a.allocatorForSize!10) == A1); 4124 static assert(typeof(a.allocatorForSize!250) == A2); 4125 static assert(typeof(a.allocatorForSize!301) == A3); 4126 ---- 4127 */ 4128 ref auto allocatorForSize(size_t s)() 4129 { 4130 static if (s <= threshold) 4131 static if (is(SmallAllocator == Segregator!(Args), Args...)) 4132 return _small.allocatorForSize!s; 4133 else return _small; 4134 else 4135 static if (is(LargeAllocator == Segregator!(Args), Args...)) 4136 return _large.allocatorForSize!s; 4137 else return _large; 4138 } 4139 4140 enum uint alignment = min(SmallAllocator.alignment, 4141 LargeAllocator.alignment); 4142 4143 template Impl() 4144 { 4145 void[] allocate(size_t s) nothrow pure @trusted 4146 { 4147 return s <= threshold ? _small.allocate(s) : _large.allocate(s); 4148 } 4149 4150 static if (hasMember!(SmallAllocator, "deallocate") 4151 && hasMember!(LargeAllocator, "deallocate")) 4152 void deallocate(void[] data) nothrow pure @trusted 4153 { 4154 data.length <= threshold 4155 ? _small.deallocate(data) 4156 : _large.deallocate(data); 4157 } 4158 4159 size_t goodAllocSize(size_t s) const nothrow pure @safe 4160 { 4161 return s <= threshold 4162 ? _small.goodAllocSize(s) 4163 : _large.goodAllocSize(s); 4164 } 4165 4166 static if (hasMember!(SmallAllocator, "owns") 4167 && hasMember!(LargeAllocator, "owns")) 4168 bool owns(void[] b) const nothrow pure @safe 4169 { 4170 return b.length <= threshold ? _small.owns(b) : _large.owns(b); 4171 } 4172 4173 static if (hasMember!(SmallAllocator, "expand") 4174 || hasMember!(LargeAllocator, "expand")) 4175 bool expand(ref void[] b, size_t delta) 4176 { 4177 if (b.length + delta <= threshold) 4178 { 4179 // Old and new allocations handled by _small 4180 static if (hasMember!(SmallAllocator, "expand")) 4181 return _small.expand(b, delta); 4182 else 4183 return false; 4184 } 4185 if (b.length > threshold) 4186 { 4187 // Old and new allocations handled by _large 4188 static if (hasMember!(LargeAllocator, "expand")) 4189 return _large.expand(b, delta); 4190 else 4191 return false; 4192 } 4193 // Oops, cross-allocator transgression 4194 return false; 4195 } 4196 4197 static if (hasMember!(SmallAllocator, "reallocate") 4198 || hasMember!(LargeAllocator, "reallocate")) 4199 bool reallocate(ref void[] b, size_t s) 4200 { 4201 static if (hasMember!(SmallAllocator, "reallocate")) 4202 if (b.length <= threshold && s <= threshold) 4203 { 4204 // Old and new allocations handled by _small 4205 return _small.reallocate(b, s); 4206 } 4207 static if (hasMember!(LargeAllocator, "reallocate")) 4208 if (b.length > threshold && s > threshold) 4209 { 4210 // Old and new allocations handled by _large 4211 return _large.reallocate(b, s); 4212 } 4213 // Cross-allocator transgression 4214 return .reallocate(this, b, s); 4215 } 4216 4217 static if (hasMember!(SmallAllocator, "deallocateAll") 4218 && hasMember!(LargeAllocator, "deallocateAll")) 4219 void deallocateAll() 4220 { 4221 _small.deallocateAll(); 4222 _large.deallocateAll(); 4223 } 4224 } 4225 4226 enum sharedMethods = 4227 !stateSize!SmallAllocator 4228 && !stateSize!LargeAllocator 4229 && is(typeof(SmallAllocator.it) == shared) 4230 && is(typeof(LargeAllocator.it) == shared); 4231 //pragma(msg, sharedMethods); 4232 4233 static if (sharedMethods) 4234 { 4235 static shared(typeof(this)) it() pure nothrow @property @trusted 4236 { 4237 return cast(typeof(return)) _it; 4238 } 4239 private static const shared Segregator _it; 4240 shared { mixin Impl!(); } 4241 } 4242 else 4243 { 4244 static if (!stateSize!SmallAllocator && !stateSize!LargeAllocator) 4245 static __gshared Segregator it; 4246 mixin Impl!(); 4247 } 4248 } 4249 4250 /// 4251 unittest 4252 { 4253 alias A = 4254 Segregator!( 4255 1024 * 4, 4256 Segregator!( 4257 128, Freelist!(Mallocator, 0, 128), 4258 GCAllocator), 4259 Segregator!( 4260 1024 * 1024, Mallocator, 4261 GCAllocator) 4262 ); 4263 A a; 4264 auto b = a.allocate(200); 4265 assert(b.length == 200); 4266 a.deallocate(b); 4267 } 4268 4269 /** 4270 A $(D Segregator) with more than three arguments expands to a composition of 4271 elemental $(D Segregator)s, as illustrated by the following example: 4272 4273 ---- 4274 alias A = 4275 Segregator!( 4276 n1, A1, 4277 n2, A2, 4278 n3, A3, 4279 A4 4280 ); 4281 ---- 4282 4283 With this definition, allocation requests for $(D n1) bytes or less are directed 4284 to $(D A1); requests between $(D n1 + 1) and $(D n2) bytes (inclusive) are 4285 directed to $(D A2); requests between $(D n2 + 1) and $(D n3) bytes (inclusive) 4286 are directed to $(D A3); and requests for more than $(D n3) bytes are directed 4287 to $(D A4). If some particular range should not be handled, $(D NullAllocator) 4288 may be used appropriately. 4289 4290 */ 4291 template Segregator(Args...) if (Args.length > 3) 4292 { 4293 // Binary search 4294 private enum cutPoint = ((Args.length - 2) / 4) * 2; 4295 static if (cutPoint >= 2) 4296 { 4297 alias Segregator = .Segregator!( 4298 Args[cutPoint], 4299 .Segregator!(Args[0 .. cutPoint], Args[cutPoint + 1]), 4300 .Segregator!(Args[cutPoint + 2 .. $]) 4301 ); 4302 } 4303 else 4304 { 4305 // Favor small sizes 4306 alias Segregator = .Segregator!( 4307 Args[0], 4308 Args[1], 4309 .Segregator!(Args[2 .. $]) 4310 ); 4311 } 4312 4313 // Linear search 4314 //alias Segregator = .Segregator!( 4315 // Args[0], Args[1], 4316 // .Segregator!(Args[2 .. $]) 4317 //); 4318 } 4319 4320 /// 4321 unittest 4322 { 4323 alias A = 4324 Segregator!( 4325 128, Freelist!(Mallocator, 0, 128), 4326 1024 * 4, GCAllocator, 4327 1024 * 1024, Mallocator, 4328 GCAllocator 4329 ); 4330 A a; 4331 auto b = a.allocate(201); 4332 assert(b.length == 201); 4333 a.deallocate(b); 4334 } 4335 4336 /** 4337 4338 A $(D Bucketizer) uses distinct allocators for handling allocations of sizes in 4339 the intervals $(D [min, min + step - 1]), $(D [min + step, min + 2 * step - 1]), 4340 $(D [min + 2 * step, min + 3 * step - 1]), $(D ...), $(D [max - step + 1, max]). 4341 4342 $(D Bucketizer) holds a fixed-size array of allocators and dispatches calls to 4343 them appropriately. The size of the array is $(D (max + 1 - min) / step), which 4344 must be an exact division. 4345 4346 Allocations for sizes smaller than $(D min) or larger than $(D max) are illegal 4347 for $(D Bucketizer). To handle them separately, $(D Segregator) may be of use. 4348 4349 */ 4350 struct Bucketizer(Allocator, size_t min, size_t max, size_t step) 4351 { 4352 static assert((max - (min - 1)) % step == 0, 4353 "Invalid limits when instantiating " ~ Bucketizer.stringof); 4354 4355 //static if (min == chooseAtRuntime) size_t _min; 4356 //else alias _min = min; 4357 //static if (max == chooseAtRuntime) size_t _max; 4358 //else alias _max = max; 4359 //static if (step == chooseAtRuntime) size_t _step; 4360 //else alias _step = step; 4361 4362 /// The array of allocators is publicly available for e.g. initialization 4363 /// and inspection. 4364 Allocator buckets[(max - (min - 1)) / step]; 4365 4366 /** 4367 The alignment offered is the same as $(D Allocator.alignment). 4368 */ 4369 enum uint alignment = Allocator.alignment; 4370 4371 /** 4372 Rounds up to the maximum size of the bucket in which $(D bytes) falls. 4373 */ 4374 size_t goodAllocSize(size_t bytes) const 4375 { 4376 // round up bytes such that bytes - min + 1 is a multiple of step 4377 assert(bytes >= min); 4378 const min_1 = min - 1; 4379 return min_1 + roundUpToMultipleOf(bytes - min_1, step); 4380 } 4381 4382 /** 4383 Returns $(D b.length >= min && b.length <= max). 4384 */ 4385 bool owns(void[] b) const 4386 { 4387 return b.length >= min && b.length <= max; 4388 } 4389 4390 /** 4391 Directs the call to either one of the $(D buckets) allocators. 4392 */ 4393 void[] allocate(size_t bytes) 4394 { 4395 // Choose the appropriate allocator 4396 const i = (bytes - min) / step; 4397 assert(i < buckets.length); 4398 const actual = goodAllocSize(bytes); 4399 auto result = buckets[i].allocate(actual); 4400 return result.ptr[0 .. bytes]; 4401 } 4402 4403 /** 4404 This method allows expansion within the respective bucket range. It succeeds 4405 if both $(D b.length) and $(D b.length + delta) fall in a range of the form 4406 $(D [min + k * step, min + (k + 1) * step - 1]). 4407 */ 4408 bool expand(ref void[] b, size_t delta) 4409 { 4410 assert(b.length >= min && b.length <= max); 4411 const available = goodAllocSize(b.length); 4412 const desired = b.length + delta; 4413 if (available < desired) return false; 4414 b = b.ptr[0 .. desired]; 4415 return true; 4416 } 4417 4418 /** 4419 This method is only defined if $(D Allocator) defines $(D deallocate). 4420 */ 4421 static if (hasMember!(Allocator, "deallocate")) 4422 void deallocate(void[] b) 4423 { 4424 const i = (b.length - min) / step; 4425 assert(i < buckets.length); 4426 const actual = goodAllocSize(b.length); 4427 buckets.ptr[i].deallocate(b.ptr[0 .. actual]); 4428 } 4429 4430 /** 4431 This method is only defined if all allocators involved define $(D 4432 deallocateAll), and calls it for each bucket in turn. 4433 */ 4434 static if (hasMember!(Allocator, "deallocateAll")) 4435 void deallocateAll() 4436 { 4437 foreach (ref a; buckets) 4438 { 4439 a.deallocateAll(); 4440 } 4441 } 4442 } 4443 4444 /// 4445 unittest 4446 { 4447 Bucketizer!(Freelist!(Mallocator, 0, unbounded), 4448 65, 512, 64) a; 4449 auto b = a.allocate(400); 4450 assert(b.length == 400, text(b.length)); 4451 a.deallocate(b); 4452 } 4453 4454 /** 4455 Dynamic version of an allocator. This should be used wherever a uniform type is 4456 required for encapsulating various allocator implementations. 4457 4458 TODO: add support for $(D shared). 4459 */ 4460 class CAllocator 4461 { 4462 /// Returns the alignment offered. By default this method returns $(D 4463 /// platformAlignment). 4464 uint alignment() pure nothrow const @property 4465 { 4466 return platformAlignment; 4467 } 4468 4469 /** 4470 Sets the alignment and returns $(D true) on success, $(D false) if not 4471 supported. By default returns $(D false). An allocator implementation could 4472 throw an exception if it does allow setting the alignment but an invalid 4473 value is passed. 4474 */ 4475 bool alignment(uint) pure nothrow @property 4476 { 4477 return false; 4478 } 4479 4480 /** 4481 Returns the good allocation size that guarantees zero internal 4482 fragmentation. By default returns $(D s) rounded up to the nearest multiple 4483 of $(D alignment). 4484 */ 4485 size_t goodAllocSize(size_t s) pure nothrow const @property 4486 { 4487 return s.roundUpToMultipleOf(alignment); 4488 } 4489 4490 /** 4491 Allocates memory. 4492 */ 4493 abstract void[] allocate(size_t) pure nothrow @safe; 4494 4495 /** 4496 Returns $(D true) if the allocator supports $(D owns). By default returns 4497 $(D false). 4498 */ 4499 bool supportsOwns() 4500 { 4501 return false; 4502 } 4503 4504 /** 4505 Returns $(D true) if the allocator owns $(D b). By default issues $(D 4506 assert(false)). 4507 */ 4508 bool owns(void[] b) 4509 { 4510 assert(false); 4511 } 4512 4513 /// Expands a memory block in place. 4514 abstract bool expand(ref void[], size_t) pure nothrow; 4515 4516 /// Reallocates a memory block. 4517 abstract bool reallocate(ref void[] b, size_t) pure nothrow; 4518 4519 /// Deallocates a memory block. Returns $(D false) if deallocation is not 4520 /// supported. 4521 abstract bool deallocate(void[]) pure nothrow; 4522 4523 /// Deallocates all memory. Returns $(D false) if not supported. 4524 abstract bool deallocateAll() pure nothrow; 4525 4526 /// Returns $(D true) if allocator supports $(D allocateAll). By default 4527 /// returns $(D false). 4528 bool supportsAllocateAll() 4529 { 4530 return false; 4531 } 4532 4533 /** 4534 Allocates and returns all memory available to this allocator. By default 4535 issues $(D assert(false)). 4536 */ 4537 void[] allocateAll() 4538 { 4539 assert(false); 4540 } 4541 } 4542 4543 /** 4544 Implementation of $(D CAllocator) using $(D Allocator). This adapts a 4545 statically-built allocator type to a uniform dynamic interface that is directly 4546 usable by non-templated code. 4547 */ 4548 class CAllocatorImpl(Allocator) : CAllocator 4549 { 4550 /** 4551 The implementation is available as a public member. 4552 */ 4553 static if (stateSize!Allocator) Allocator impl; 4554 else alias impl = Allocator.it; 4555 4556 /// Returns $(D impl.alignment). 4557 override uint alignment() pure nothrow const @property 4558 { 4559 return impl.alignment; 4560 } 4561 4562 /** 4563 If $(D Allocator) supports alignment setting, performs it and returns $(D 4564 true). Otherwise, returns $(D false). 4565 */ 4566 override bool alignment(uint a) pure nothrow const @property 4567 { 4568 static if (is(typeof(impl.alignment = a))) 4569 { 4570 impl.alignment = a; 4571 return true; 4572 } 4573 else 4574 { 4575 return false; 4576 } 4577 } 4578 4579 /** 4580 Returns $(D impl.goodAllocSize(s)). 4581 */ 4582 override size_t goodAllocSize(size_t s) pure nothrow const @property 4583 { 4584 return impl.goodAllocSize(s); 4585 } 4586 4587 /** 4588 Returns $(D impl.allocate(s)). 4589 */ 4590 override void[] allocate(size_t s) pure nothrow @safe 4591 { 4592 return impl.allocate(s); 4593 } 4594 4595 /** 4596 Returns $(D true) if $(D Allocator) supports $(D owns). 4597 */ 4598 override bool supportsOwns() pure nothrow const @safe 4599 { 4600 return hasMember!(Allocator, "owns"); 4601 } 4602 4603 /** 4604 Overridden only if $(D Allocator) implements $(D owns). In that case, 4605 returns $(D impl.owns(b)). 4606 */ 4607 static if (hasMember!(Allocator, "owns")) 4608 override bool owns(void[] b) 4609 { 4610 return impl.owns(b); 4611 } 4612 4613 /// Returns $(D impl.expand(b, s)) if defined, $(D false) otherwise. 4614 override bool expand(ref void[] b, size_t s) pure nothrow 4615 { 4616 static if (hasMember!(Allocator, "expand")) 4617 return impl.expand(b, s); 4618 else 4619 return false; 4620 } 4621 4622 /// Returns $(D impl.reallocate(b, s)). 4623 override bool reallocate(ref void[] b, size_t s) 4624 { 4625 return impl.reallocate(b, s); 4626 } 4627 4628 /// Calls $(D impl.deallocate(b)) and returns $(D true) if defined, 4629 /// otherwise returns $(D false). 4630 override bool deallocate(void[] b) pure nothrow 4631 { 4632 static if (hasMember!(Allocator, "deallocate")) 4633 { 4634 impl.deallocate(b); 4635 return true; 4636 } 4637 else 4638 { 4639 return false; 4640 } 4641 } 4642 4643 /// Calls $(D impl.deallocateAll()) and returns $(D true) if defined, 4644 /// otherwise returns $(D false). 4645 override bool deallocateAll() pure nothrow 4646 { 4647 static if (hasMember!(Allocator, "deallocateAll")) 4648 { 4649 impl.deallocateAll(); 4650 return true; 4651 } 4652 else 4653 { 4654 return false; 4655 } 4656 } 4657 4658 /// Returns $(D true) if allocator supports $(D allocateAll). By default 4659 /// returns $(D false). 4660 override bool supportsAllocateAll() pure nothrow const 4661 { 4662 return hasMember!(Allocator, "allocateAll"); 4663 } 4664 4665 /** 4666 Overridden only if $(D Allocator) implements $(D allocateAll). In that case, 4667 returns $(D impl.allocateAll()). 4668 */ 4669 static if (hasMember!(Allocator, "allocateAll")) 4670 override void[] allocateAll() pure nothrow 4671 { 4672 return impl.allocateAll(); 4673 } 4674 } 4675 4676 /// 4677 unittest 4678 { 4679 /// Define an allocator bound to the built-in GC. 4680 CAllocator alloc = new CAllocatorImpl!GCAllocator; 4681 auto b = alloc.allocate(42); 4682 assert(b.length == 42); 4683 assert(alloc.deallocate(b)); 4684 4685 // Define an elaborate allocator and bind it to the class API. 4686 // Note that the same variable "alloc" is used. 4687 alias FList = Freelist!(GCAllocator, 0, unbounded); 4688 alias A = Segregator!( 4689 8, Freelist!(GCAllocator, 0, 8), 4690 128, Bucketizer!(FList, 1, 128, 16), 4691 256, Bucketizer!(FList, 129, 256, 32), 4692 512, Bucketizer!(FList, 257, 512, 64), 4693 1024, Bucketizer!(FList, 513, 1024, 128), 4694 2048, Bucketizer!(FList, 1025, 2048, 256), 4695 3584, Bucketizer!(FList, 2049, 3584, 512), 4696 4072 * 1024, CascadingAllocator!( 4697 () => HeapBlock!(GCAllocator, 4096)(4072 * 1024)), 4698 GCAllocator 4699 ); 4700 4701 alloc = new CAllocatorImpl!A; 4702 b = alloc.allocate(101); 4703 assert(alloc.deallocate(b)); 4704 } 4705 4706 private bool isPowerOf2(uint x) 4707 { 4708 return (x & (x - 1)) == 0; 4709 } 4710 4711 private bool isGoodStaticAlignment(uint x) 4712 { 4713 return x.isPowerOf2 && x > 0; 4714 } 4715 4716 private bool isGoodDynamicAlignment(uint x) 4717 { 4718 return x.isPowerOf2 && x >= (void*).sizeof; 4719 } 4720 4721 __EOF__ 4722 4723 version(none) struct TemplateAllocator 4724 { 4725 enum alignment = platformAlignment; 4726 static size_t goodAllocSize(size_t s) 4727 { 4728 } 4729 void[] allocate(size_t) 4730 { 4731 } 4732 bool owns(void[]) 4733 { 4734 } 4735 bool expand(ref void[] b, size_t) 4736 { 4737 } 4738 bool reallocate(ref void[] b, size_t) 4739 { 4740 } 4741 void deallocate(void[] b) 4742 { 4743 } 4744 void deallocateAll() 4745 { 4746 } 4747 void[] allocateAll() 4748 { 4749 } 4750 static shared TemplateAllocator it; 4751 }