1 /**
2 	Utility functions for memory management
3 
4 	Note that this module currently is a big sand box for testing allocation related stuff.
5 	Nothing here, including the interfaces, is final but rather a lot of experimentation.
6 
7 	Copyright: © 2012-2013 RejectedSoftware e.K.
8 	License: Subject to the terms of the MIT license, as written in the included LICENSE.txt file.
9 	Authors: Sönke Ludwig
10 */
11 module vutil.memory;
12 
13 import core.exception : OutOfMemoryError;
14 import core.stdc.stdlib;
15 import core.memory;
16 import std.conv;
17 import std.exception : enforceEx;
18 import std.traits;
19 import std.algorithm;
20 
21 Allocator defaultAllocator() nothrow
22 {
23 	version(VibeManualMemoryManagement){
24 		return manualAllocator();
25 	} else {
26 		static __gshared Allocator alloc;
27 		if( !alloc ){
28 			alloc = new GCAllocator;
29 			//alloc = new AutoFreeListAllocator(alloc);
30 			//alloc = new DebugAllocator(alloc);
31 			alloc = new LockAllocator(alloc);
32 		}
33 		return alloc;
34 	}
35 }
36 
37 Allocator manualAllocator() nothrow
38 {
39 	static __gshared Allocator alloc;
40 	if( !alloc ){
41 		alloc = new MallocAllocator;
42 		alloc = new AutoFreeListAllocator(alloc);
43 		//alloc = new DebugAllocator(alloc);
44 		alloc = new LockAllocator(alloc);
45 	}
46 	return alloc;
47 }
48 
49 auto allocObject(T, bool MANAGED = true, ARGS...)(Allocator allocator, ARGS args)
50 {
51 	auto mem = allocator.alloc(AllocSize!T);
52 	static if( MANAGED ){
53 		static if( hasIndirections!T )
54 			GC.addRange(mem.ptr, mem.length);
55 		return internalEmplace!T(mem, args);
56 	}
57 	else static if( is(T == class) ) return cast(T)mem.ptr;
58 	else return cast(T*)mem.ptr;
59 }
60 
61 T[] allocArray(T, bool MANAGED = true)(Allocator allocator, size_t n)
62 {
63 	auto mem = allocator.alloc(T.sizeof * n);
64 	auto ret = cast(T[])mem;
65 	static if( MANAGED ){
66 		static if( hasIndirections!T )
67 			GC.addRange(mem.ptr, mem.length);
68 		// TODO: use memset for class, pointers and scalars
69 		foreach (ref el; ret) {
70 			internalEmplace!T(cast(void[])((&el)[0 .. 1]));
71 		}
72 	}
73 	return ret;
74 }
75 
76 void freeArray(T, bool MANAGED = true)(Allocator allocator, ref T[] array, bool call_destructors = true)
77 {
78 	static if (MANAGED) {
79 		static if (hasIndirections!T)
80 			GC.removeRange(array.ptr);
81 		static if (hasElaborateDestructor!T)
82 			if (call_destructors)
83 				foreach_reverse (ref el; array)
84 					destroy(el);
85 	}
86 	allocator.free(cast(void[])array);
87 	array = null;
88 }
89 
90 
91 interface Allocator {
92 nothrow:
93 	enum size_t alignment = 0x10;
94 	enum size_t alignmentMask = alignment-1;
95 
96 	void[] alloc(size_t sz)
97 		out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "alloc() returned misaligned data."); }
98 
99 	void[] realloc(void[] mem, size_t new_sz)
100 		in {
101 			assert(mem.ptr !is null, "realloc() called with null array.");
102 			assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc().");
103 		}
104 		out { assert((cast(size_t)__result.ptr & alignmentMask) == 0, "realloc() returned misaligned data."); }
105 
106 	void free(void[] mem)
107 		in {
108 			assert(mem.ptr !is null, "free() called with null array.");
109 			assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free().");
110 		}
111 }
112 
113 
114 /**
115 	Simple proxy allocator protecting its base allocator with a mutex.
116 */
117 class LockAllocator : Allocator {
118 	private {
119 		Allocator m_base;
120 	}
121 	this(Allocator base) nothrow { m_base = base; }
122 	void[] alloc(size_t sz) {
123 		// Since 2068, synchronized statements are annotated nothrow.
124 		// DMD#4115, Druntime#1013, Druntime#1021, Phobos#2704
125 		// However, they were "logically" nothrow before.
126 		static if (__VERSION__ <= 2067)
127 			scope (failure) assert(0, "Internal error: function should be nothrow");
128 
129 		synchronized (this)
130 			return m_base.alloc(sz);
131 	}
132 	void[] realloc(void[] mem, size_t new_sz)
133 		in {
134 			assert(mem.ptr !is null, "realloc() called with null array.");
135 			assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to realloc().");
136 		}
137 		body {
138 			// Since 2068, synchronized statements are annotated nothrow.
139 			// DMD#4115, Druntime#1013, Druntime#1021, Phobos#2704
140 			// However, they were "logically" nothrow before.
141 			static if (__VERSION__ <= 2067)
142 				scope (failure) assert(0, "Internal error: function should be nothrow");
143 
144 			synchronized(this)
145 				return m_base.realloc(mem, new_sz);
146 		}
147 	void free(void[] mem)
148 		in {
149 			assert(mem.ptr !is null, "free() called with null array.");
150 			assert((cast(size_t)mem.ptr & alignmentMask) == 0, "misaligned pointer passed to free().");
151 		}
152 		body {
153 			// Since 2068, synchronized statements are annotated nothrow.
154 			// DMD#4115, Druntime#1013, Druntime#1021, Phobos#2704
155 			// However, they were "logically" nothrow before.
156 			static if (__VERSION__ <= 2067)
157 				scope (failure) assert(0, "Internal error: function should be nothrow");
158 			synchronized(this)
159 				m_base.free(mem);
160 		}
161 }
162 
163 final class DebugAllocator : Allocator {
164 	import vutil.hashmap : HashMap;
165 	private {
166 		Allocator m_baseAlloc;
167 		HashMap!(void*, size_t) m_blocks;
168 		size_t m_bytes;
169 		size_t m_maxBytes;
170 	}
171 
172 	this(Allocator base_allocator) nothrow
173 	{
174 		m_baseAlloc = base_allocator;
175 		m_blocks = HashMap!(void*, size_t)(manualAllocator());
176 	}
177 
178 	@property size_t allocatedBlockCount() const { return m_blocks.length; }
179 	@property size_t bytesAllocated() const { return m_bytes; }
180 	@property size_t maxBytesAllocated() const { return m_maxBytes; }
181 
182 	void[] alloc(size_t sz)
183 	{
184 		auto ret = m_baseAlloc.alloc(sz);
185 		assert(ret.length == sz, "base.alloc() returned block with wrong size.");
186 		assert(m_blocks.getNothrow(ret.ptr, size_t.max) == size_t.max, "base.alloc() returned block that is already allocated.");
187 		m_blocks[ret.ptr] = sz;
188 		m_bytes += sz;
189 		if( m_bytes > m_maxBytes ){
190 			m_maxBytes = m_bytes;
191 		}
192 		return ret;
193 	}
194 
195 	void[] realloc(void[] mem, size_t new_size)
196 	{
197 		auto sz = m_blocks.getNothrow(mem.ptr, size_t.max);
198 		assert(sz != size_t.max, "realloc() called with non-allocated pointer.");
199 		assert(sz == mem.length, "realloc() called with block of wrong size.");
200 		auto ret = m_baseAlloc.realloc(mem, new_size);
201 		assert(ret.length == new_size, "base.realloc() returned block with wrong size.");
202 		assert(ret.ptr is mem.ptr || m_blocks.getNothrow(ret.ptr, size_t.max) == size_t.max, "base.realloc() returned block that is already allocated.");
203 		m_bytes -= sz;
204 		m_blocks.remove(mem.ptr);
205 		m_blocks[ret.ptr] = new_size;
206 		m_bytes += new_size;
207 		return ret;
208 	}
209 	void free(void[] mem)
210 	{
211 		auto sz = m_blocks.getNothrow(mem.ptr, size_t.max);
212 		assert(sz != size_t.max, "free() called with non-allocated object.");
213 		assert(sz == mem.length, "free() called with block of wrong size.");
214 		m_baseAlloc.free(mem);
215 		m_bytes -= sz;
216 		m_blocks.remove(mem.ptr);
217 	}
218 }
219 
220 final class MallocAllocator : Allocator {
221 	void[] alloc(size_t sz)
222 	{
223 		static err = new immutable OutOfMemoryError;
224 		auto ptr = .malloc(sz + Allocator.alignment);
225 		if (ptr is null) throw err;
226 		return adjustPointerAlignment(ptr)[0 .. sz];
227 	}
228 
229 	void[] realloc(void[] mem, size_t new_size)
230 	{
231 		size_t csz = min(mem.length, new_size);
232 		auto p = extractUnalignedPointer(mem.ptr);
233 		size_t oldmisalign = mem.ptr - p;
234 
235 		auto pn = cast(ubyte*).realloc(p, new_size+Allocator.alignment);
236 		if (p == pn) return pn[oldmisalign .. new_size+oldmisalign];
237 
238 		auto pna = cast(ubyte*)adjustPointerAlignment(pn);
239 		auto newmisalign = pna - pn;
240 
241 		// account for changed alignment after realloc (move memory back to aligned position)
242 		if (oldmisalign != newmisalign) {
243 			if (newmisalign > oldmisalign) {
244 				foreach_reverse (i; 0 .. csz)
245 					pn[i + newmisalign] = pn[i + oldmisalign];
246 			} else {
247 				foreach (i; 0 .. csz)
248 					pn[i + newmisalign] = pn[i + oldmisalign];
249 			}
250 		}
251 
252 		return pna[0 .. new_size];
253 	}
254 
255 	void free(void[] mem)
256 	{
257 		.free(extractUnalignedPointer(mem.ptr));
258 	}
259 }
260 
261 final class GCAllocator : Allocator {
262 	void[] alloc(size_t sz)
263 	{
264 		auto mem = GC.malloc(sz+Allocator.alignment);
265 		auto alignedmem = adjustPointerAlignment(mem);
266 		assert(alignedmem - mem <= Allocator.alignment);
267 		auto ret = alignedmem[0 .. sz];
268 		ensureValidMemory(ret);
269 		return ret;
270 	}
271 	void[] realloc(void[] mem, size_t new_size)
272 	{
273 		size_t csz = min(mem.length, new_size);
274 
275 		auto p = extractUnalignedPointer(mem.ptr);
276 		size_t misalign = mem.ptr - p;
277 		assert(misalign <= Allocator.alignment);
278 
279 		void[] ret;
280 		auto extended = GC.extend(p, new_size - mem.length, new_size - mem.length);
281 		if (extended) {
282 			assert(extended >= new_size+Allocator.alignment);
283 			ret = p[misalign .. new_size+misalign];
284 		} else {
285 			ret = alloc(new_size);
286 			ret[0 .. csz] = mem[0 .. csz];
287 		}
288 		ensureValidMemory(ret);
289 		return ret;
290 	}
291 	void free(void[] mem)
292 	{
293 		// For safety reasons, the GCAllocator should never explicitly free memory.
294 		//GC.free(extractUnalignedPointer(mem.ptr));
295 	}
296 }
297 
298 final class AutoFreeListAllocator : Allocator {
299 	import std.typetuple;
300 
301 	private {
302 		enum minExponent = 5;
303 		enum freeListCount = 14;
304 		FreeListAlloc[freeListCount] m_freeLists;
305 		Allocator m_baseAlloc;
306 	}
307 
308 	this(Allocator base_allocator) nothrow
309 	{
310 		m_baseAlloc = base_allocator;
311 		foreach (i; iotaTuple!freeListCount)
312 			m_freeLists[i] = new FreeListAlloc(nthFreeListSize!(i), m_baseAlloc);
313 	}
314 
315 	void[] alloc(size_t sz)
316 	{
317 		if (sz > nthFreeListSize!(freeListCount-1)) return m_baseAlloc.alloc(sz);
318 		foreach (i; iotaTuple!freeListCount)
319 			if (sz <= nthFreeListSize!(i))
320 				return m_freeLists[i].alloc().ptr[0 .. sz];
321 		//logTrace("AFL alloc %08X(%d)", ret.ptr, sz);
322 		assert(false);
323 	}
324 
325 	void[] realloc(void[] data, size_t sz)
326 	{
327 		foreach (fl; m_freeLists) {
328 			if (data.length <= fl.elementSize) {
329 				// just grow the slice if it still fits into the free list slot
330 				if (sz <= fl.elementSize)
331 					return data.ptr[0 .. sz];
332 
333 				// otherwise re-allocate
334 				auto newd = alloc(sz);
335 				assert(newd.ptr+sz <= data.ptr || newd.ptr >= data.ptr+data.length, "New block overlaps old one!?");
336 				auto len = min(data.length, sz);
337 				newd[0 .. len] = data[0 .. len];
338 				free(data);
339 				return newd;
340 			}
341 		}
342 		// forward large blocks to the base allocator
343 		return m_baseAlloc.realloc(data, sz);
344 	}
345 
346 	void free(void[] data)
347 	{
348 		//logTrace("AFL free %08X(%s)", data.ptr, data.length);
349 		if (data.length > nthFreeListSize!(freeListCount-1)) {
350 			m_baseAlloc.free(data);
351 			return;
352 		}
353 		foreach(i; iotaTuple!freeListCount) {
354 			if (data.length <= nthFreeListSize!i) {
355 				m_freeLists[i].free(data.ptr[0 .. nthFreeListSize!i]);
356 				return;
357 			}
358 		}
359 		assert(false);
360 	}
361 
362 	private static pure size_t nthFreeListSize(size_t i)() { return 1 << (i + minExponent); }
363 	private template iotaTuple(size_t i) {
364 		static if (i > 1) alias iotaTuple = TypeTuple!(iotaTuple!(i-1), i-1);
365 		else alias iotaTuple = TypeTuple!(0);
366 	}
367 }
368 
369 final class PoolAllocator : Allocator {
370 	static struct Pool { Pool* next; void[] data; void[] remaining; }
371 	static struct Destructor { Destructor* next; void function(void*) destructor; void* object; }
372 	private {
373 		Allocator m_baseAllocator;
374 		Pool* m_freePools;
375 		Pool* m_fullPools;
376 		Destructor* m_destructors;
377 		size_t m_poolSize;
378 	}
379 
380 	this(size_t pool_size, Allocator base) nothrow
381 	{
382 		m_poolSize = pool_size;
383 		m_baseAllocator = base;
384 	}
385 
386 	@property size_t totalSize()
387 	{
388 		size_t amt = 0;
389 		for (auto p = m_fullPools; p; p = p.next)
390 			amt += p.data.length;
391 		for (auto p = m_freePools; p; p = p.next)
392 			amt += p.data.length;
393 		return amt;
394 	}
395 
396 	@property size_t allocatedSize()
397 	{
398 		size_t amt = 0;
399 		for (auto p = m_fullPools; p; p = p.next)
400 			amt += p.data.length;
401 		for (auto p = m_freePools; p; p = p.next)
402 			amt += p.data.length - p.remaining.length;
403 		return amt;
404 	}
405 
406 	void[] alloc(size_t sz)
407 	{
408 		auto aligned_sz = alignedSize(sz);
409 
410 		Pool* pprev = null;
411 		Pool* p = cast(Pool*)m_freePools;
412 		while( p && p.remaining.length < aligned_sz ){
413 			pprev = p;
414 			p = p.next;
415 		}
416 
417 		if( !p ){
418 			auto pmem = m_baseAllocator.alloc(AllocSize!Pool);
419 
420 			p = emplace!Pool(cast(Pool*)pmem.ptr);
421 			p.data = m_baseAllocator.alloc(max(aligned_sz, m_poolSize));
422 			p.remaining = p.data;
423 			p.next = cast(Pool*)m_freePools;
424 			m_freePools = p;
425 			pprev = null;
426 		}
427 
428 		auto ret = p.remaining[0 .. aligned_sz];
429 		p.remaining = p.remaining[aligned_sz .. $];
430 		if( !p.remaining.length ){
431 			if( pprev ){
432 				pprev.next = p.next;
433 			} else {
434 				m_freePools = p.next;
435 			}
436 			p.next = cast(Pool*)m_fullPools;
437 			m_fullPools = p;
438 		}
439 
440 		return ret[0 .. sz];
441 	}
442 
443 	void[] realloc(void[] arr, size_t newsize)
444 	{
445 		auto aligned_sz = alignedSize(arr.length);
446 		auto aligned_newsz = alignedSize(newsize);
447 
448 		if( aligned_newsz <= aligned_sz ) return arr[0 .. newsize]; // TODO: back up remaining
449 
450 		auto pool = m_freePools;
451 		bool last_in_pool = pool && arr.ptr+aligned_sz == pool.remaining.ptr;
452 		if( last_in_pool && pool.remaining.length+aligned_sz >= aligned_newsz ){
453 			pool.remaining = pool.remaining[aligned_newsz-aligned_sz .. $];
454 			arr = arr.ptr[0 .. aligned_newsz];
455 			assert(arr.ptr+arr.length == pool.remaining.ptr, "Last block does not align with the remaining space!?");
456 			return arr[0 .. newsize];
457 		} else {
458 			auto ret = alloc(newsize);
459 			assert(ret.ptr >= arr.ptr+aligned_sz || ret.ptr+ret.length <= arr.ptr, "New block overlaps old one!?");
460 			ret[0 .. min(arr.length, newsize)] = arr[0 .. min(arr.length, newsize)];
461 			return ret;
462 		}
463 	}
464 
465 	void free(void[] mem)
466 	{
467 	}
468 
469 	void freeAll()
470 	{
471 		version(VibeManualMemoryManagement){
472 			// destroy all initialized objects
473 			for (auto d = m_destructors; d; d = d.next)
474 				d.destructor(cast(void*)d.object);
475 			m_destructors = null;
476 
477 			// put all full Pools into the free pools list
478 			for (Pool* p = cast(Pool*)m_fullPools, pnext; p; p = pnext) {
479 				pnext = p.next;
480 				p.next = cast(Pool*)m_freePools;
481 				m_freePools = cast(Pool*)p;
482 			}
483 
484 			// free up all pools
485 			for (Pool* p = cast(Pool*)m_freePools; p; p = p.next)
486 				p.remaining = p.data;
487 		}
488 	}
489 
490 	void reset()
491 	{
492 		version(VibeManualMemoryManagement){
493 			freeAll();
494 			Pool* pnext;
495 			for (auto p = cast(Pool*)m_freePools; p; p = pnext) {
496 				pnext = p.next;
497 				m_baseAllocator.free(p.data);
498 				m_baseAllocator.free((cast(void*)p)[0 .. AllocSize!Pool]);
499 			}
500 			m_freePools = null;
501 		}
502 	}
503 
504 	private static destroy(T)(void* ptr)
505 	{
506 		static if( is(T == class) ) .destroy(cast(T)ptr);
507 		else .destroy(*cast(T*)ptr);
508 	}
509 }
510 
511 final class FreeListAlloc : Allocator
512 {
513 nothrow:
514 	private static struct FreeListSlot { FreeListSlot* next; }
515 	private {
516 		immutable size_t m_elemSize;
517 		Allocator m_baseAlloc;
518 		FreeListSlot* m_firstFree = null;
519 		size_t m_nalloc = 0;
520 		size_t m_nfree = 0;
521 	}
522 
523 	this(size_t elem_size, Allocator base_allocator)
524 	{
525 		assert(elem_size >= size_t.sizeof);
526 		m_elemSize = elem_size;
527 		m_baseAlloc = base_allocator;
528 	}
529 
530 	@property size_t elementSize() const { return m_elemSize; }
531 
532 	void[] alloc(size_t sz)
533 	{
534 		assert(sz == m_elemSize, "Invalid allocation size.");
535 		return alloc();
536 	}
537 
538 	void[] alloc()
539 	{
540 		void[] mem;
541 		if( m_firstFree ){
542 			auto slot = m_firstFree;
543 			m_firstFree = slot.next;
544 			slot.next = null;
545 			mem = (cast(void*)slot)[0 .. m_elemSize];
546 			m_nfree--;
547 		} else {
548 			mem = m_baseAlloc.alloc(m_elemSize);
549 			//logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree);
550 		}
551 		m_nalloc++;
552 		//logInfo("Alloc %d bytes: alloc: %d, free: %d", SZ, s_nalloc, s_nfree);
553 		return mem;
554 	}
555 
556 	void[] realloc(void[] mem, size_t sz)
557 	{
558 		assert(mem.length == m_elemSize);
559 		assert(sz == m_elemSize);
560 		return mem;
561 	}
562 
563 	void free(void[] mem)
564 	{
565 		assert(mem.length == m_elemSize, "Memory block passed to free has wrong size.");
566 		auto s = cast(FreeListSlot*)mem.ptr;
567 		s.next = m_firstFree;
568 		m_firstFree = s;
569 		m_nalloc--;
570 		m_nfree++;
571 	}
572 }
573 
574 template FreeListObjectAlloc(T, bool USE_GC = true, bool INIT = true)
575 {
576 	enum ElemSize = AllocSize!T;
577 
578 	static if( is(T == class) ){
579 		alias TR = T;
580 	} else {
581 		alias TR = T*;
582 	}
583 
584 	TR alloc(ARGS...)(ARGS args)
585 	{
586 		//logInfo("alloc %s/%d", T.stringof, ElemSize);
587 		auto mem = manualAllocator().alloc(ElemSize);
588 		static if( hasIndirections!T ) GC.addRange(mem.ptr, ElemSize);
589 		static if( INIT ) return internalEmplace!T(mem, args);
590 		else return cast(TR)mem.ptr;
591 	}
592 
593 	void free(TR obj)
594 	{
595 		static if( INIT ){
596 			scope(failure) assert(0, "You shouldn't throw in destructors");
597 			auto objc = obj;
598 			static if (is(TR == T*)) .destroy(*objc);//typeid(T).destroy(cast(void*)obj);
599 			else .destroy(objc);
600 		}
601 		static if( hasIndirections!T ) GC.removeRange(cast(void*)obj);
602 		manualAllocator().free((cast(void*)obj)[0 .. ElemSize]);
603 	}
604 }
605 
606 
607 template AllocSize(T)
608 {
609 	static if (is(T == class)) {
610 		// workaround for a strange bug where AllocSize!SSLStream == 0: TODO: dustmite!
611 		enum dummy = T.stringof ~ __traits(classInstanceSize, T).stringof;
612 		enum AllocSize = __traits(classInstanceSize, T);
613 	} else {
614 		enum AllocSize = T.sizeof;
615 	}
616 }
617 
618 struct FreeListRef(T, bool INIT = true)
619 {
620 	enum ElemSize = AllocSize!T;
621 
622 	static if( is(T == class) ){
623 		alias TR = T;
624 	} else {
625 		alias TR = T*;
626 	}
627 
628 	private TR m_object;
629 	private size_t m_magic = 0x1EE75817; // workaround for compiler bug
630 
631 	static FreeListRef opCall(ARGS...)(ARGS args)
632 	{
633 		//logInfo("refalloc %s/%d", T.stringof, ElemSize);
634 		FreeListRef ret;
635 		auto mem = manualAllocator().alloc(ElemSize + int.sizeof);
636 		static if( hasIndirections!T ) GC.addRange(mem.ptr, ElemSize);
637 		static if( INIT ) ret.m_object = cast(TR)internalEmplace!(Unqual!T)(mem, args);
638 		else ret.m_object = cast(TR)mem.ptr;
639 		ret.refCount = 1;
640 		return ret;
641 	}
642 
643 	~this()
644 	{
645 		//if( m_object ) logInfo("~this!%s(): %d", T.stringof, this.refCount);
646 		//if( m_object ) logInfo("ref %s destructor %d", T.stringof, refCount);
647 		//else logInfo("ref %s destructor %d", T.stringof, 0);
648 		clear();
649 		m_magic = 0;
650 		m_object = null;
651 	}
652 
653 	this(this)
654 	{
655 		checkInvariants();
656 		if( m_object ){
657 			//if( m_object ) logInfo("this!%s(this): %d", T.stringof, this.refCount);
658 			this.refCount++;
659 		}
660 	}
661 
662 	void opAssign(FreeListRef other)
663 	{
664 		clear();
665 		m_object = other.m_object;
666 		if( m_object ){
667 			//logInfo("opAssign!%s(): %d", T.stringof, this.refCount);
668 			refCount++;
669 		}
670 	}
671 
672 	void clear()
673 	{
674 		checkInvariants();
675 		if( m_object ){
676 			if( --this.refCount == 0 ){
677 				static if( INIT ){
678 					//logInfo("ref %s destroy", T.stringof);
679 					//typeid(T).destroy(cast(void*)m_object);
680 					auto objc = m_object;
681 					static if (is(TR == T)) .destroy(objc);
682 					else .destroy(*objc);
683 					//logInfo("ref %s destroyed", T.stringof);
684 				}
685 				static if( hasIndirections!T ) GC.removeRange(cast(void*)m_object);
686 				manualAllocator().free((cast(void*)m_object)[0 .. ElemSize+int.sizeof]);
687 			}
688 		}
689 
690 		m_object = null;
691 		m_magic = 0x1EE75817;
692 	}
693 
694 	@property const(TR) get() const { checkInvariants(); return m_object; }
695 	@property TR get() { checkInvariants(); return m_object; }
696 	alias get this;
697 
698 	private @property ref int refCount()
699 	const {
700 		auto ptr = cast(ubyte*)cast(void*)m_object;
701 		ptr += ElemSize;
702 		return *cast(int*)ptr;
703 	}
704 
705 	private void checkInvariants()
706 	const {
707 		assert(m_magic == 0x1EE75817);
708 		assert(!m_object || refCount > 0);
709 	}
710 }
711 
712 private void* extractUnalignedPointer(void* base) nothrow
713 {
714 	ubyte misalign = *(cast(ubyte*)base-1);
715 	assert(misalign <= Allocator.alignment);
716 	return base - misalign;
717 }
718 
719 private void* adjustPointerAlignment(void* base) nothrow
720 {
721 	ubyte misalign = Allocator.alignment - (cast(size_t)base & Allocator.alignmentMask);
722 	base += misalign;
723 	*(cast(ubyte*)base-1) = misalign;
724 	return base;
725 }
726 
727 unittest {
728 	void test_align(void* p, size_t adjustment) {
729 		void* pa = adjustPointerAlignment(p);
730 		assert((cast(size_t)pa & Allocator.alignmentMask) == 0, "Non-aligned pointer.");
731 		assert(*(cast(ubyte*)pa-1) == adjustment, "Invalid adjustment "~to!string(p)~": "~to!string(*(cast(ubyte*)pa-1)));
732 		void* pr = extractUnalignedPointer(pa);
733 		assert(pr == p, "Recovered base != original");
734 	}
735 	void* ptr = .malloc(0x40);
736 	ptr += Allocator.alignment - (cast(size_t)ptr & Allocator.alignmentMask);
737 	test_align(ptr++, 0x10);
738 	test_align(ptr++, 0x0F);
739 	test_align(ptr++, 0x0E);
740 	test_align(ptr++, 0x0D);
741 	test_align(ptr++, 0x0C);
742 	test_align(ptr++, 0x0B);
743 	test_align(ptr++, 0x0A);
744 	test_align(ptr++, 0x09);
745 	test_align(ptr++, 0x08);
746 	test_align(ptr++, 0x07);
747 	test_align(ptr++, 0x06);
748 	test_align(ptr++, 0x05);
749 	test_align(ptr++, 0x04);
750 	test_align(ptr++, 0x03);
751 	test_align(ptr++, 0x02);
752 	test_align(ptr++, 0x01);
753 	test_align(ptr++, 0x10);
754 }
755 
756 private size_t alignedSize(size_t sz) nothrow
757 {
758 	return ((sz + Allocator.alignment - 1) / Allocator.alignment) * Allocator.alignment;
759 }
760 
761 unittest {
762 	foreach( i; 0 .. 20 ){
763 		auto ia = alignedSize(i);
764 		assert(ia >= i);
765 		assert((ia & Allocator.alignmentMask) == 0);
766 		assert(ia < i+Allocator.alignment);
767 	}
768 }
769 
770 private void ensureValidMemory(void[] mem) nothrow
771 {
772 	auto bytes = cast(ubyte[])mem;
773 	swap(bytes[0], bytes[$-1]);
774 	swap(bytes[0], bytes[$-1]);
775 }
776 
777 /// See issue #14194
778 private T internalEmplace(T, Args...)(void[] chunk, auto ref Args args)
779 	if (is(T == class))
780 in {
781 	import std.string, std.format;
782 	assert(chunk.length >= T.sizeof,
783 	       format("emplace: Chunk size too small: %s < %s size = %s",
784 		      chunk.length, T.stringof, T.sizeof));
785 	assert((cast(size_t) chunk.ptr) % T.alignof == 0,
786 	       format("emplace: Misaligned memory block (0x%X): it must be %s-byte aligned for type %s", chunk.ptr, T.alignof, T.stringof));
787 
788 } body {
789     enum classSize = __traits(classInstanceSize, T);
790     auto result = cast(T) chunk.ptr;
791 
792     // Initialize the object in its pre-ctor state
793     (cast(byte[]) chunk)[0 .. classSize] = typeid(T).init[];
794 
795     // Call the ctor if any
796     static if (is(typeof(result.__ctor(args))))
797     {
798         // T defines a genuine constructor accepting args
799         // Go the classic route: write .init first, then call ctor
800         result.__ctor(args);
801     }
802     else
803     {
804         static assert(args.length == 0 && !is(typeof(&T.__ctor)),
805                 "Don't know how to initialize an object of type "
806                 ~ T.stringof ~ " with arguments " ~ Args.stringof);
807     }
808     return result;
809 }
810 
811 /// Dittor
812 private auto internalEmplace(T, Args...)(void[] chunk, auto ref Args args)
813 	if (!is(T == class))
814 in {
815 	import std.string, std.format;
816 	assert(chunk.length >= T.sizeof,
817 	       format("emplace: Chunk size too small: %s < %s size = %s",
818 		      chunk.length, T.stringof, T.sizeof));
819 	assert((cast(size_t) chunk.ptr) % T.alignof == 0,
820 	       format("emplace: Misaligned memory block (0x%X): it must be %s-byte aligned for type %s", chunk.ptr, T.alignof, T.stringof));
821 
822 } body {
823 	return emplace(cast(T*)chunk.ptr, args);
824 }