comparison tango/lib/gc/basic/gcx.d @ 132:1700239cab2e trunk

[svn r136] MAJOR UNSTABLE UPDATE!!! Initial commit after moving to Tango instead of Phobos. Lots of bugfixes... This build is not suitable for most things.
author lindquist
date Fri, 11 Jan 2008 17:57:40 +0100
parents
children e881c9b1c738
comparison
equal deleted inserted replaced
131:5825d48b27d1 132:1700239cab2e
1 /**
2 * This module contains the garbage collector implementation.
3 *
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com.
5 * All rights reserved.
6 * License:
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, in both source and binary form, subject to the following
14 * restrictions:
15 *
16 * o The origin of this software must not be misrepresented; you must not
17 * claim that you wrote the original software. If you use this software
18 * in a product, an acknowledgment in the product documentation would be
19 * appreciated but is not required.
20 * o Altered source versions must be plainly marked as such, and must not
21 * be misrepresented as being the original software.
22 * o This notice may not be removed or altered from any source
23 * distribution.
24 * Authors: Walter Bright, David Friedman, Sean Kelly
25 */
26
27 // D Programming Language Garbage Collector implementation
28
29 /************** Debugging ***************************/
30
31 //debug = PRINTF; // turn on printf's
32 //debug = COLLECT_PRINTF; // turn on printf's
33 //debug = THREADINVARIANT; // check thread integrity
34 //debug = LOGGING; // log allocations / frees
35 //debug = MEMSTOMP; // stomp on memory
36 //debug = SENTINEL; // add underrun/overrrun protection
37 //debug = PTRCHECK; // more pointer checking
38 //debug = PTRCHECK2; // thorough but slow pointer checking
39
40 /*************** Configuration *********************/
41
42 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer
43 // (use for Intel X86 CPUs)
44 // else growing the stack means adding to the stack pointer
45 version = MULTI_THREADED; // produce multithreaded version
46
47 /***************************************************/
48
49 private import gcbits;
50 private import gcstats;
51 private import gcalloc;
52
53 private import cstdlib = tango.stdc.stdlib : calloc, free, malloc, realloc;
54 private import cstring = tango.stdc.string : memcpy, memmove, memset;
55
56 debug private import tango.stdc.stdio;
57
58 version (GNU)
59 {
60 // BUG: The following import will likely not work, since the gcc
61 // subdirectory is elsewhere. Instead, perhaps the functions
62 // could be declared directly or some other resolution could
63 // be found.
64 private import gcc.builtins; // for __builtin_unwind_init
65 }
66
67
68 private
69 {
70 enum BlkAttr : uint
71 {
72 FINALIZE = 0b0000_0001,
73 NO_SCAN = 0b0000_0010,
74 NO_MOVE = 0b0000_0100,
75 ALL_BITS = 0b1111_1111
76 }
77
78 struct BlkInfo
79 {
80 void* base;
81 size_t size;
82 uint attr;
83 }
84
85 extern (C) void* rt_stackBottom();
86 extern (C) void* rt_stackTop();
87
88 extern (C) void rt_finalize( void* p, bool det = true );
89
90 alias void delegate( void*, void* ) scanFn;
91
92 extern (C) void rt_scanStaticData( scanFn scan );
93
94 version (MULTI_THREADED)
95 {
96 extern (C) bool thread_needLock();
97 extern (C) void thread_suspendAll();
98 extern (C) void thread_resumeAll();
99
100 extern (C) void thread_scanAll( scanFn fn, void* curStackTop = null );
101 }
102
103 extern (C) void onOutOfMemoryError();
104 }
105
106
107 alias GC gc_t;
108
109
110 /* ======================= Leak Detector =========================== */
111
112
113 debug (LOGGING)
114 {
115 struct Log
116 {
117 void* p;
118 size_t size;
119 uint line;
120 char* file;
121 void* parent;
122
123 void print()
124 {
125 printf(" p = %x, size = %d, parent = %x ", p, size, parent);
126 if (file)
127 {
128 printf("%s(%u)", file, line);
129 }
130 printf("\n");
131 }
132 }
133
134
135 struct LogArray
136 {
137 size_t dim;
138 size_t allocdim;
139 Log *data;
140
141 void Dtor()
142 {
143 if (data)
144 cstdlib.free(data);
145 data = null;
146 }
147
148 void reserve(size_t nentries)
149 {
150 assert(dim <= allocdim);
151 if (allocdim - dim < nentries)
152 {
153 allocdim = (dim + nentries) * 2;
154 assert(dim + nentries <= allocdim);
155 if (!data)
156 {
157 data = cast(Log *)cstdlib.malloc(allocdim * Log.sizeof);
158 if (!data && allocdim)
159 onOutOfMemoryError();
160 }
161 else
162 { Log *newdata;
163
164 newdata = cast(Log *)cstdlib.malloc(allocdim * Log.sizeof);
165 if (!newdata && allocdim)
166 onOutOfMemoryError();
167 cstring.memcpy(newdata, data, dim * Log.sizeof);
168 cstdlib.free(data);
169 data = newdata;
170 }
171 }
172 }
173
174
175 void push(Log log)
176 {
177 reserve(1);
178 data[dim++] = log;
179 }
180
181 void remove(size_t i)
182 {
183 cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof);
184 dim--;
185 }
186
187
188 size_t find(void *p)
189 {
190 for (size_t i = 0; i < dim; i++)
191 {
192 if (data[i].p == p)
193 return i;
194 }
195 return ~0u; // not found
196 }
197
198
199 void copy(LogArray *from)
200 {
201 reserve(from.dim - dim);
202 assert(from.dim <= allocdim);
203 cstring.memcpy(data, from.data, from.dim * Log.sizeof);
204 dim = from.dim;
205 }
206 }
207 }
208
209
210 /* ============================ GC =============================== */
211
212
213 class GCLock { } // just a dummy so we can get a global lock
214
215
216 const uint GCVERSION = 1; // increment every time we change interface
217 // to GC.
218
219 class GC
220 {
221 // For passing to debug code
222 static size_t line;
223 static char* file;
224
225 uint gcversion = GCVERSION;
226
227 Gcx *gcx; // implementation
228 static ClassInfo gcLock; // global lock
229
230
231 void initialize()
232 {
233 gcLock = GCLock.classinfo;
234 gcx = cast(Gcx *)cstdlib.calloc(1, Gcx.sizeof);
235 if (!gcx)
236 onOutOfMemoryError();
237 gcx.initialize();
238 setStackBottom(rt_stackBottom());
239 }
240
241
242 void Dtor()
243 {
244 version (linux)
245 {
246 //debug(PRINTF) printf("Thread %x ", pthread_self());
247 //debug(PRINTF) printf("GC.Dtor()\n");
248 }
249
250 if (gcx)
251 {
252 gcx.Dtor();
253 cstdlib.free(gcx);
254 gcx = null;
255 }
256 }
257
258
259 invariant
260 {
261 if (gcx)
262 {
263 gcx.thread_Invariant();
264 }
265 }
266
267
268 /**
269 *
270 */
271 void enable()
272 {
273 if (!thread_needLock())
274 {
275 assert(gcx.disabled > 0);
276 gcx.disabled--;
277 }
278 else synchronized (gcLock)
279 {
280 assert(gcx.disabled > 0);
281 gcx.disabled--;
282 }
283 }
284
285
286 /**
287 *
288 */
289 void disable()
290 {
291 if (!thread_needLock())
292 {
293 gcx.disabled++;
294 }
295 else synchronized (gcLock)
296 {
297 gcx.disabled++;
298 }
299 }
300
301
302 /**
303 *
304 */
305 uint getAttr(void* p)
306 {
307 if (!p)
308 {
309 return 0;
310 }
311
312 uint go()
313 {
314 Pool* pool = gcx.findPool(p);
315 uint oldb = 0;
316
317 if (pool)
318 {
319 uint biti = (p - pool.baseAddr) / 16;
320
321 oldb = gcx.getBits(pool, biti);
322 }
323 return oldb;
324 }
325
326 if (!thread_needLock())
327 {
328 return go();
329 }
330 else synchronized (gcLock)
331 {
332 return go();
333 }
334 }
335
336
337 /**
338 *
339 */
340 uint setAttr(void* p, uint mask)
341 {
342 if (!p)
343 {
344 return 0;
345 }
346
347 uint go()
348 {
349 Pool* pool = gcx.findPool(p);
350 uint oldb = 0;
351
352 if (pool)
353 {
354 uint biti = (p - pool.baseAddr) / 16;
355
356 oldb = gcx.getBits(pool, biti);
357 gcx.setBits(pool, biti, mask);
358 }
359 return oldb;
360 }
361
362 if (!thread_needLock())
363 {
364 return go();
365 }
366 else synchronized (gcLock)
367 {
368 return go();
369 }
370 }
371
372
373 /**
374 *
375 */
376 uint clrAttr(void* p, uint mask)
377 {
378 if (!p)
379 {
380 return 0;
381 }
382
383 uint go()
384 {
385 Pool* pool = gcx.findPool(p);
386 uint oldb = 0;
387
388 if (pool)
389 {
390 uint biti = (p - pool.baseAddr) / 16;
391
392 oldb = gcx.getBits(pool, biti);
393 gcx.clrBits(pool, biti, mask);
394 }
395 return oldb;
396 }
397
398 if (!thread_needLock())
399 {
400 return go();
401 }
402 else synchronized (gcLock)
403 {
404 return go();
405 }
406 }
407
408
409 /**
410 *
411 */
412 void *malloc(size_t size, uint bits = 0)
413 {
414 if (!size)
415 {
416 return null;
417 }
418
419 if (!thread_needLock())
420 {
421 return mallocNoSync(size, bits);
422 }
423 else synchronized (gcLock)
424 {
425 return mallocNoSync(size, bits);
426 }
427 }
428
429
430 //
431 //
432 //
433 private void *mallocNoSync(size_t size, uint bits = 0)
434 {
435 assert(size != 0);
436
437 void *p = null;
438 Bins bin;
439
440 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx);
441 assert(gcx);
442 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self());
443
444 size += SENTINEL_EXTRA;
445
446 // Compute size bin
447 // Cache previous binsize lookup - Dave Fladebo.
448 static size_t lastsize = -1;
449 static Bins lastbin;
450 if (size == lastsize)
451 bin = lastbin;
452 else
453 {
454 bin = gcx.findBin(size);
455 lastsize = size;
456 lastbin = bin;
457 }
458
459 if (bin < B_PAGE)
460 {
461 p = gcx.bucket[bin];
462 if (p == null)
463 {
464 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page
465 {
466 if (!thread_needLock())
467 {
468 /* Then we haven't locked it yet. Be sure
469 * and lock for a collection, since a finalizer
470 * may start a new thread.
471 */
472 synchronized (gcLock)
473 {
474 gcx.fullcollectshell();
475 }
476 }
477 else if (!gcx.fullcollectshell()) // collect to find a new page
478 {
479 //gcx.newPool(1);
480 }
481 }
482 if (!gcx.bucket[bin] && !gcx.allocPage(bin))
483 { int result;
484
485 gcx.newPool(1); // allocate new pool to find a new page
486 result = gcx.allocPage(bin);
487 if (!result)
488 onOutOfMemoryError();
489 }
490 p = gcx.bucket[bin];
491 }
492
493 // Return next item from free list
494 gcx.bucket[bin] = (cast(List *)p).next;
495 if( !(bits & BlkAttr.NO_SCAN) )
496 cstring.memset(p + size, 0, binsize[bin] - size);
497 //debug(PRINTF) printf("\tmalloc => %x\n", p);
498 debug (MEMSTOMP) cstring.memset(p, 0xF0, size);
499 }
500 else
501 {
502 p = gcx.bigAlloc(size);
503 if (!p)
504 onOutOfMemoryError();
505 }
506 size -= SENTINEL_EXTRA;
507 p = sentinel_add(p);
508 sentinel_init(p, size);
509 gcx.log_malloc(p, size);
510
511 if (bits)
512 {
513 Pool *pool = gcx.findPool(p);
514 assert(pool);
515
516 gcx.setBits(pool, (p - pool.baseAddr) / 16, bits);
517 }
518 return p;
519 }
520
521
522 /**
523 *
524 */
525 void *calloc(size_t size, uint bits = 0)
526 {
527 if (!size)
528 {
529 return null;
530 }
531
532 if (!thread_needLock())
533 {
534 return callocNoSync(size, bits);
535 }
536 else synchronized (gcLock)
537 {
538 return callocNoSync(size, bits);
539 }
540 }
541
542
543 //
544 //
545 //
546 private void *callocNoSync(size_t size, uint bits = 0)
547 {
548 assert(size != 0);
549
550 //debug(PRINTF) printf("calloc: %x len %d\n", p, len);
551 void *p = mallocNoSync(size, bits);
552 cstring.memset(p, 0, size);
553 return p;
554 }
555
556
557 /**
558 *
559 */
560 void *realloc(void *p, size_t size, uint bits = 0)
561 {
562 if (!thread_needLock())
563 {
564 return reallocNoSync(p, size, bits);
565 }
566 else synchronized (gcLock)
567 {
568 return reallocNoSync(p, size, bits);
569 }
570 }
571
572
573 //
574 //
575 //
576 private void *reallocNoSync(void *p, size_t size, uint bits = 0)
577 {
578 if (!size)
579 { if (p)
580 { freeNoSync(p);
581 p = null;
582 }
583 }
584 else if (!p)
585 {
586 p = mallocNoSync(size, bits);
587 }
588 else
589 { void *p2;
590 size_t psize;
591
592 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size);
593 version (SENTINEL)
594 {
595 sentinel_Invariant(p);
596 psize = *sentinel_size(p);
597 if (psize != size)
598 {
599 if (psize)
600 {
601 Pool *pool = gcx.findPool(p);
602
603 if (pool)
604 {
605 uint biti = cast(uint)(p - pool.baseAddr) / 16;
606
607 if (bits)
608 {
609 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
610 gcx.setBits(pool, biti, bits);
611 }
612 else
613 {
614 bits = gcx.getBits(pool, biti);
615 }
616 }
617 }
618 p2 = mallocNoSync(size, bits);
619 if (psize < size)
620 size = psize;
621 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
622 cstring.memcpy(p2, p, size);
623 p = p2;
624 }
625 }
626 else
627 {
628 psize = gcx.findSize(p); // find allocated size
629 if (psize >= PAGESIZE && size >= PAGESIZE)
630 {
631 auto psz = psize / PAGESIZE;
632 auto newsz = (size + PAGESIZE - 1) / PAGESIZE;
633 if (newsz == psz)
634 return p;
635
636 auto pool = gcx.findPool(p);
637 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
638
639 if (newsz < psz)
640 { // Shrink in place
641 synchronized (gcLock)
642 {
643 debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size);
644 pool.freePages(pagenum + newsz, psz - newsz);
645 }
646 return p;
647 }
648 else if (pagenum + newsz <= pool.npages)
649 {
650 // Attempt to expand in place
651 synchronized (gcLock)
652 {
653 for (size_t i = pagenum + psz; 1;)
654 {
655 if (i == pagenum + newsz)
656 {
657 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize);
658 cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz);
659 return p;
660 }
661 if (i == pool.ncommitted)
662 {
663 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted);
664 if (u == ~0u)
665 break;
666 i = pagenum + newsz;
667 continue;
668 }
669 if (pool.pagetable[i] != B_FREE)
670 break;
671 i++;
672 }
673 }
674 }
675 }
676 if (psize < size || // if new size is bigger
677 psize > size * 2) // or less than half
678 {
679 if (psize)
680 {
681 Pool *pool = gcx.findPool(p);
682
683 if (pool)
684 {
685 uint biti = cast(uint)(p - pool.baseAddr) / 16;
686
687 if (bits)
688 {
689 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
690 gcx.setBits(pool, biti, bits);
691 }
692 else
693 {
694 bits = gcx.getBits(pool, biti);
695 }
696 }
697 }
698 p2 = mallocNoSync(size, bits);
699 if (psize < size)
700 size = psize;
701 //debug(PRINTF) printf("\tcopying %d bytes\n",size);
702 cstring.memcpy(p2, p, size);
703 p = p2;
704 }
705 }
706 }
707 return p;
708 }
709
710
711 /**
712 * Attempt to in-place enlarge the memory block pointed to by p by at least
713 * minbytes beyond its current capacity, up to a maximum of maxsize. This
714 * does not attempt to move the memory block (like realloc() does).
715 *
716 * Returns:
717 * 0 if could not extend p,
718 * total size of entire memory block if successful.
719 */
720 size_t extend(void* p, size_t minsize, size_t maxsize)
721 {
722 if (!thread_needLock())
723 {
724 return extendNoSync(p, minsize, maxsize);
725 }
726 else synchronized (gcLock)
727 {
728 return extendNoSync(p, minsize, maxsize);
729 }
730 }
731
732
733 //
734 //
735 //
736 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize)
737 in
738 {
739 assert( minsize <= maxsize );
740 }
741 body
742 {
743 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize);
744 version (SENTINEL)
745 {
746 return 0;
747 }
748 auto psize = gcx.findSize(p); // find allocated size
749 if (psize < PAGESIZE)
750 return 0; // cannot extend buckets
751
752 auto psz = psize / PAGESIZE;
753 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE;
754 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE;
755
756 auto pool = gcx.findPool(p);
757 auto pagenum = (p - pool.baseAddr) / PAGESIZE;
758
759 size_t sz;
760 for (sz = 0; sz < maxsz; sz++)
761 {
762 auto i = pagenum + psz + sz;
763 if (i == pool.ncommitted)
764 break;
765 if (pool.pagetable[i] != B_FREE)
766 { if (sz < minsz)
767 return 0;
768 break;
769 }
770 }
771 if (sz >= minsz)
772 {
773 }
774 else if (pagenum + psz + sz == pool.ncommitted)
775 {
776 auto u = pool.extendPages(minsz - sz);
777 if (u == ~0u)
778 return 0;
779 sz = minsz;
780 }
781 else
782 return 0;
783 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize);
784 cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz);
785 gcx.p_cache = null;
786 gcx.size_cache = 0;
787 return (psz + sz) * PAGESIZE;
788 }
789
790
791 /**
792 *
793 */
794 void free(void *p)
795 {
796 if (!p)
797 {
798 return;
799 }
800
801 if (!thread_needLock())
802 {
803 return freeNoSync(p);
804 }
805 else synchronized (gcLock)
806 {
807 return freeNoSync(p);
808 }
809 }
810
811
812 //
813 //
814 //
815 private void freeNoSync(void *p)
816 {
817 assert (p);
818
819 Pool *pool;
820 uint pagenum;
821 Bins bin;
822 uint biti;
823
824 // Find which page it is in
825 pool = gcx.findPool(p);
826 if (!pool) // if not one of ours
827 return; // ignore
828 sentinel_Invariant(p);
829 p = sentinel_sub(p);
830 pagenum = (p - pool.baseAddr) / PAGESIZE;
831 biti = cast(uint)(p - pool.baseAddr) / 16;
832 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
833
834 bin = cast(Bins)pool.pagetable[pagenum];
835 if (bin == B_PAGE) // if large alloc
836 { int npages;
837 uint n;
838
839 // Free pages
840 npages = 1;
841 n = pagenum;
842 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS)
843 npages++;
844 debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE);
845 pool.freePages(pagenum, npages);
846 }
847 else
848 { // Add to free list
849 List *list = cast(List *)p;
850
851 debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]);
852
853 list.next = gcx.bucket[bin];
854 gcx.bucket[bin] = list;
855 }
856 gcx.log_free(sentinel_add(p));
857 }
858
859
860 /**
861 * Determine the base address of the block containing p. If p is not a gc
862 * allocated pointer, return null.
863 */
864 void* addrOf(void *p)
865 {
866 if (!p)
867 {
868 return null;
869 }
870
871 if (!thread_needLock())
872 {
873 return addrOfNoSync(p);
874 }
875 else synchronized (gcLock)
876 {
877 return addrOfNoSync(p);
878 }
879 }
880
881
882 //
883 //
884 //
885 void* addrOfNoSync(void *p)
886 {
887 if (!p)
888 {
889 return null;
890 }
891
892 return gcx.findBase(p);
893 }
894
895
896 /**
897 * Determine the allocated size of pointer p. If p is an interior pointer
898 * or not a gc allocated pointer, return 0.
899 */
900 size_t sizeOf(void *p)
901 {
902 if (!p)
903 {
904 return 0;
905 }
906
907 if (!thread_needLock())
908 {
909 return sizeOfNoSync(p);
910 }
911 else synchronized (gcLock)
912 {
913 return sizeOfNoSync(p);
914 }
915 }
916
917
918 //
919 //
920 //
921 private size_t sizeOfNoSync(void *p)
922 {
923 assert (p);
924
925 version (SENTINEL)
926 {
927 p = sentinel_sub(p);
928 size_t size = gcx.findSize(p);
929
930 // Check for interior pointer
931 // This depends on:
932 // 1) size is a power of 2 for less than PAGESIZE values
933 // 2) base of memory pool is aligned on PAGESIZE boundary
934 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
935 size = 0;
936 return size ? size - SENTINAL_EXTRA : 0;
937 }
938 else
939 {
940 if (p == gcx.p_cache)
941 return gcx.size_cache;
942
943 size_t size = gcx.findSize(p);
944
945 // Check for interior pointer
946 // This depends on:
947 // 1) size is a power of 2 for less than PAGESIZE values
948 // 2) base of memory pool is aligned on PAGESIZE boundary
949 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1))
950 size = 0;
951 else
952 {
953 gcx.p_cache = p;
954 gcx.size_cache = size;
955 }
956
957 return size;
958 }
959 }
960
961
962 /**
963 * Determine the base address of the block containing p. If p is not a gc
964 * allocated pointer, return null.
965 */
966 BlkInfo query(void *p)
967 {
968 if (!p)
969 {
970 BlkInfo i;
971 return i;
972 }
973
974 if (!thread_needLock())
975 {
976 return queryNoSync(p);
977 }
978 else synchronized (gcLock)
979 {
980 return queryNoSync(p);
981 }
982 }
983
984
985 //
986 //
987 //
988 BlkInfo queryNoSync(void *p)
989 {
990 assert(p);
991
992 return gcx.getInfo(p);
993 }
994
995
996 /**
997 * Verify that pointer p:
998 * 1) belongs to this memory pool
999 * 2) points to the start of an allocated piece of memory
1000 * 3) is not on a free list
1001 */
1002 void check(void *p)
1003 {
1004 if (!p)
1005 {
1006 return;
1007 }
1008
1009 if (!thread_needLock())
1010 {
1011 checkNoSync(p);
1012 }
1013 else synchronized (gcLock)
1014 {
1015 checkNoSync(p);
1016 }
1017 }
1018
1019
1020 //
1021 //
1022 //
1023 private void checkNoSync(void *p)
1024 {
1025 assert(p);
1026
1027 sentinel_Invariant(p);
1028 debug (PTRCHECK)
1029 {
1030 Pool* pool;
1031 uint pagenum;
1032 Bins bin;
1033 size_t size;
1034
1035 p = sentinel_sub(p);
1036 pool = gcx.findPool(p);
1037 assert(pool);
1038 pagenum = (p - pool.baseAddr) / PAGESIZE;
1039 bin = cast(Bins)pool.pagetable[pagenum];
1040 assert(bin <= B_PAGE);
1041 size = binsize[bin];
1042 assert((cast(size_t)p & (size - 1)) == 0);
1043
1044 debug (PTRCHECK2)
1045 {
1046 if (bin < B_PAGE)
1047 {
1048 // Check that p is not on a free list
1049 List *list;
1050
1051 for (list = gcx.bucket[bin]; list; list = list.next)
1052 {
1053 assert(cast(void *)list != p);
1054 }
1055 }
1056 }
1057 }
1058 }
1059
1060
1061 //
1062 //
1063 //
1064 private void setStackBottom(void *p)
1065 {
1066 version (STACKGROWSDOWN)
1067 {
1068 //p = (void *)((uint *)p + 4);
1069 if (p > gcx.stackBottom)
1070 {
1071 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1072 gcx.stackBottom = p;
1073 }
1074 }
1075 else
1076 {
1077 //p = (void *)((uint *)p - 4);
1078 if (p < gcx.stackBottom)
1079 {
1080 //debug(PRINTF) printf("setStackBottom(%x)\n", p);
1081 gcx.stackBottom = cast(char *)p;
1082 }
1083 }
1084 }
1085
1086
1087 /**
1088 * add p to list of roots
1089 */
1090 void addRoot(void *p)
1091 {
1092 if (!p)
1093 {
1094 return;
1095 }
1096
1097 if (!thread_needLock())
1098 {
1099 gcx.addRoot(p);
1100 }
1101 else synchronized (gcLock)
1102 {
1103 gcx.addRoot(p);
1104 }
1105 }
1106
1107
1108 /**
1109 * remove p from list of roots
1110 */
1111 void removeRoot(void *p)
1112 {
1113 if (!p)
1114 {
1115 return;
1116 }
1117
1118 if (!thread_needLock())
1119 {
1120 gcx.removeRoot(p);
1121 }
1122 else synchronized (gcLock)
1123 {
1124 gcx.removeRoot(p);
1125 }
1126 }
1127
1128
1129 /**
1130 * add range to scan for roots
1131 */
1132 void addRange(void *p, size_t sz)
1133 {
1134 if (!p || !sz)
1135 {
1136 return;
1137 }
1138
1139 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop);
1140 if (!thread_needLock())
1141 {
1142 gcx.addRange(p, p + sz);
1143 }
1144 else synchronized (gcLock)
1145 {
1146 gcx.addRange(p, p + sz);
1147 }
1148 //debug(PRINTF) printf("-GC.addRange()\n");
1149 }
1150
1151
1152 /**
1153 * remove range
1154 */
1155 void removeRange(void *p)
1156 {
1157 if (!p)
1158 {
1159 return;
1160 }
1161
1162 if (!thread_needLock())
1163 {
1164 gcx.removeRange(p);
1165 }
1166 else synchronized (gcLock)
1167 {
1168 gcx.removeRange(p);
1169 }
1170 }
1171
1172
1173 /**
1174 * do full garbage collection
1175 */
1176 void fullCollect()
1177 {
1178 debug(PRINTF) printf("GC.fullCollect()\n");
1179
1180 if (!thread_needLock())
1181 {
1182 gcx.fullcollectshell();
1183 }
1184 else synchronized (gcLock)
1185 {
1186 gcx.fullcollectshell();
1187 }
1188
1189 version (none)
1190 {
1191 GCStats stats;
1192
1193 getStats(stats);
1194 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n",
1195 stats.poolsize, stats.usedsize, stats.freelistsize);
1196 }
1197
1198 gcx.log_collect();
1199 }
1200
1201
1202 /**
1203 * do full garbage collection ignoring roots
1204 */
1205 void fullCollectNoStack()
1206 {
1207 if (!thread_needLock())
1208 {
1209 gcx.noStack++;
1210 gcx.fullcollectshell();
1211 gcx.noStack--;
1212 }
1213 else synchronized (gcLock)
1214 {
1215 gcx.noStack++;
1216 gcx.fullcollectshell();
1217 gcx.noStack--;
1218 }
1219 }
1220
1221
1222 /**
1223 * Retrieve statistics about garbage collection.
1224 * Useful for debugging and tuning.
1225 */
1226 void getStats(out GCStats stats)
1227 {
1228 if (!thread_needLock())
1229 {
1230 getStatsNoSync(stats);
1231 }
1232 else synchronized (gcLock)
1233 {
1234 getStatsNoSync(stats);
1235 }
1236 }
1237
1238
1239 //
1240 //
1241 //
1242 private void getStatsNoSync(out GCStats stats)
1243 {
1244 size_t psize = 0;
1245 size_t usize = 0;
1246 size_t flsize = 0;
1247
1248 size_t n;
1249 size_t bsize = 0;
1250
1251 //debug(PRINTF) printf("getStats()\n");
1252 cstring.memset(&stats, 0, GCStats.sizeof);
1253
1254 for (n = 0; n < gcx.npools; n++)
1255 { Pool *pool = gcx.pooltable[n];
1256
1257 psize += pool.ncommitted * PAGESIZE;
1258 for (uint j = 0; j < pool.ncommitted; j++)
1259 {
1260 Bins bin = cast(Bins)pool.pagetable[j];
1261 if (bin == B_FREE)
1262 stats.freeblocks++;
1263 else if (bin == B_PAGE)
1264 stats.pageblocks++;
1265 else if (bin < B_PAGE)
1266 bsize += PAGESIZE;
1267 }
1268 }
1269
1270 for (n = 0; n < B_PAGE; n++)
1271 {
1272 //debug(PRINTF) printf("bin %d\n", n);
1273 for (List *list = gcx.bucket[n]; list; list = list.next)
1274 {
1275 //debug(PRINTF) printf("\tlist %x\n", list);
1276 flsize += binsize[n];
1277 }
1278 }
1279
1280 usize = bsize - flsize;
1281
1282 stats.poolsize = psize;
1283 stats.usedsize = bsize - flsize;
1284 stats.freelistsize = flsize;
1285 }
1286 }
1287
1288
1289 /* ============================ Gcx =============================== */
1290
1291 enum
1292 { PAGESIZE = 4096,
1293 COMMITSIZE = (4096*16),
1294 POOLSIZE = (4096*256),
1295 }
1296
1297
1298 enum
1299 {
1300 B_16,
1301 B_32,
1302 B_64,
1303 B_128,
1304 B_256,
1305 B_512,
1306 B_1024,
1307 B_2048,
1308 B_PAGE, // start of large alloc
1309 B_PAGEPLUS, // continuation of large alloc
1310 B_FREE, // free page
1311 B_UNCOMMITTED, // memory not committed for this page
1312 B_MAX
1313 }
1314
1315
1316 alias ubyte Bins;
1317
1318
1319 struct List
1320 {
1321 List *next;
1322 }
1323
1324
1325 struct Range
1326 {
1327 void *pbot;
1328 void *ptop;
1329 }
1330
1331
1332 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ];
1333 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1),
1334 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ];
1335
1336 /* ============================ Gcx =============================== */
1337
1338
1339 struct Gcx
1340 {
1341 debug (THREADINVARIANT)
1342 {
1343 pthread_t self;
1344 void thread_Invariant()
1345 {
1346 if (self != pthread_self())
1347 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self());
1348 assert(self == pthread_self());
1349 }
1350 }
1351 else
1352 {
1353 void thread_Invariant() { }
1354 }
1355
1356 void *p_cache;
1357 size_t size_cache;
1358
1359 size_t nroots;
1360 size_t rootdim;
1361 void **roots;
1362
1363 size_t nranges;
1364 size_t rangedim;
1365 Range *ranges;
1366
1367 uint noStack; // !=0 means don't scan stack
1368 uint log; // turn on logging
1369 uint anychanges;
1370 void *stackBottom;
1371 uint inited;
1372 int disabled; // turn off collections if >0
1373
1374 byte *minAddr; // min(baseAddr)
1375 byte *maxAddr; // max(topAddr)
1376
1377 uint npools;
1378 Pool **pooltable;
1379
1380 List *bucket[B_MAX]; // free list for each size
1381
1382
1383 void initialize()
1384 { int dummy;
1385
1386 (cast(byte *)this)[0 .. Gcx.sizeof] = 0;
1387 stackBottom = cast(char *)&dummy;
1388 log_init();
1389 debug (THREADINVARIANT)
1390 self = pthread_self();
1391 //printf("gcx = %p, self = %x\n", this, self);
1392 inited = 1;
1393 }
1394
1395
1396 void Dtor()
1397 {
1398 inited = 0;
1399
1400 for (uint i = 0; i < npools; i++)
1401 { Pool *pool = pooltable[i];
1402
1403 pool.Dtor();
1404 cstdlib.free(pool);
1405 }
1406 if (pooltable)
1407 cstdlib.free(pooltable);
1408
1409 if (roots)
1410 cstdlib.free(roots);
1411
1412 if (ranges)
1413 cstdlib.free(ranges);
1414 }
1415
1416
1417 void Invariant() { }
1418
1419
1420 invariant
1421 {
1422 if (inited)
1423 {
1424 //printf("Gcx.invariant(): this = %p\n", this);
1425 uint i;
1426
1427 // Assure we're called on the right thread
1428 debug (THREADINVARIANT) assert(self == pthread_self());
1429
1430 for (i = 0; i < npools; i++)
1431 { Pool *pool = pooltable[i];
1432
1433 pool.Invariant();
1434 if (i == 0)
1435 {
1436 assert(minAddr == pool.baseAddr);
1437 }
1438 if (i + 1 < npools)
1439 {
1440 assert(pool.opCmp(pooltable[i + 1]) < 0);
1441 }
1442 else if (i + 1 == npools)
1443 {
1444 assert(maxAddr == pool.topAddr);
1445 }
1446 }
1447
1448 if (roots)
1449 {
1450 assert(rootdim != 0);
1451 assert(nroots <= rootdim);
1452 }
1453
1454 if (ranges)
1455 {
1456 assert(rangedim != 0);
1457 assert(nranges <= rangedim);
1458
1459 for (i = 0; i < nranges; i++)
1460 {
1461 assert(ranges[i].pbot);
1462 assert(ranges[i].ptop);
1463 assert(ranges[i].pbot <= ranges[i].ptop);
1464 }
1465 }
1466
1467 for (i = 0; i < B_PAGE; i++)
1468 {
1469 for (List *list = bucket[i]; list; list = list.next)
1470 {
1471 }
1472 }
1473 }
1474 }
1475
1476
1477 /**
1478 *
1479 */
1480 void addRoot(void *p)
1481 {
1482 if (nroots == rootdim)
1483 {
1484 size_t newdim = rootdim * 2 + 16;
1485 void** newroots;
1486
1487 newroots = cast(void **)cstdlib.malloc(newdim * newroots[0].sizeof);
1488 if (!newroots)
1489 onOutOfMemoryError();
1490 if (roots)
1491 { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof);
1492 cstdlib.free(roots);
1493 }
1494 roots = newroots;
1495 rootdim = newdim;
1496 }
1497 roots[nroots] = p;
1498 nroots++;
1499 }
1500
1501
1502 /**
1503 *
1504 */
1505 void removeRoot(void *p)
1506 {
1507 for (size_t i = nroots; i--;)
1508 {
1509 if (roots[i] == p)
1510 {
1511 nroots--;
1512 cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof);
1513 return;
1514 }
1515 }
1516 assert(0);
1517 }
1518
1519
1520 /**
1521 *
1522 */
1523 void addRange(void *pbot, void *ptop)
1524 {
1525 debug(PRINTF) printf("Thread %x ", pthread_self());
1526 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges);
1527 if (nranges == rangedim)
1528 {
1529 size_t newdim = rangedim * 2 + 16;
1530 Range *newranges;
1531
1532 newranges = cast(Range *)cstdlib.malloc(newdim * newranges[0].sizeof);
1533 if (!newranges)
1534 onOutOfMemoryError();
1535 if (ranges)
1536 { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof);
1537 cstdlib.free(ranges);
1538 }
1539 ranges = newranges;
1540 rangedim = newdim;
1541 }
1542 ranges[nranges].pbot = pbot;
1543 ranges[nranges].ptop = ptop;
1544 nranges++;
1545 }
1546
1547
1548 /**
1549 *
1550 */
1551 void removeRange(void *pbot)
1552 {
1553 debug(PRINTF) printf("Thread %x ", pthread_self());
1554 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges);
1555 for (size_t i = nranges; i--;)
1556 {
1557 if (ranges[i].pbot == pbot)
1558 {
1559 nranges--;
1560 cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof);
1561 return;
1562 }
1563 }
1564 debug(PRINTF) printf("Wrong thread\n");
1565
1566 // This is a fatal error, but ignore it.
1567 // The problem is that we can get a Close() call on a thread
1568 // other than the one the range was allocated on.
1569 //assert(zero);
1570 }
1571
1572
1573 /**
1574 * Find Pool that pointer is in.
1575 * Return null if not in a Pool.
1576 * Assume pooltable[] is sorted.
1577 */
1578 Pool *findPool(void *p)
1579 {
1580 if (p >= minAddr && p < maxAddr)
1581 {
1582 if (npools == 1)
1583 {
1584 return pooltable[0];
1585 }
1586
1587 for (uint i = 0; i < npools; i++)
1588 { Pool *pool;
1589
1590 pool = pooltable[i];
1591 if (p < pool.topAddr)
1592 { if (pool.baseAddr <= p)
1593 return pool;
1594 break;
1595 }
1596 }
1597 }
1598 return null;
1599 }
1600
1601
1602 /**
1603 * Find base address of block containing pointer p.
1604 * Returns null if not a gc'd pointer
1605 */
1606 void* findBase(void *p)
1607 {
1608 Pool *pool;
1609
1610 pool = findPool(p);
1611 if (pool)
1612 {
1613 size_t offset = cast(size_t)(p - pool.baseAddr);
1614 uint pn = offset / PAGESIZE;
1615 Bins bin = cast(Bins)pool.pagetable[pn];
1616
1617 // Adjust bit to be at start of allocated memory block
1618 if (bin <= B_PAGE)
1619 {
1620 return pool.baseAddr + (offset & notbinsize[bin]);
1621 }
1622 else if (bin == B_PAGEPLUS)
1623 {
1624 do
1625 { --pn, offset -= PAGESIZE;
1626 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1627
1628 return pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1629 }
1630 else
1631 {
1632 // we are in a B_FREE or B_UNCOMMITTED page
1633 return null;
1634 }
1635 }
1636 return null;
1637 }
1638
1639
1640 /**
1641 * Find size of pointer p.
1642 * Returns 0 if not a gc'd pointer
1643 */
1644 size_t findSize(void *p)
1645 {
1646 Pool *pool;
1647 size_t size = 0;
1648
1649 pool = findPool(p);
1650 if (pool)
1651 {
1652 uint pagenum;
1653 Bins bin;
1654
1655 pagenum = (cast(uint)(p - pool.baseAddr)) / PAGESIZE;
1656 bin = cast(Bins)pool.pagetable[pagenum];
1657 size = binsize[bin];
1658 if (bin == B_PAGE)
1659 { uint npages = pool.ncommitted;
1660 ubyte* pt;
1661 uint i;
1662
1663 pt = &pool.pagetable[0];
1664 for (i = pagenum + 1; i < npages; i++)
1665 {
1666 if (pt[i] != B_PAGEPLUS)
1667 break;
1668 }
1669 size = (i - pagenum) * PAGESIZE;
1670 }
1671 }
1672 return size;
1673 }
1674
1675
1676 /**
1677 *
1678 */
1679 BlkInfo getInfo(void* p)
1680 {
1681 Pool *pool;
1682 BlkInfo info;
1683
1684 pool = findPool(p);
1685 if (pool)
1686 {
1687 size_t offset = cast(size_t)(p - pool.baseAddr);
1688 uint pn = offset / PAGESIZE;
1689 Bins bin = cast(Bins)pool.pagetable[pn];
1690
1691 ////////////////////////////////////////////////////////////////////
1692 // findAddr
1693 ////////////////////////////////////////////////////////////////////
1694
1695 if (bin <= B_PAGE)
1696 {
1697 info.base = pool.baseAddr + (offset & notbinsize[bin]);
1698 }
1699 else if (bin == B_PAGEPLUS)
1700 {
1701 do
1702 { --pn, offset -= PAGESIZE;
1703 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
1704
1705 info.base = pool.baseAddr + (offset & (offset.max ^ (PAGESIZE-1)));
1706
1707 // fix bin for use by size calc below
1708 bin = cast(Bins)pool.pagetable[pn];
1709 }
1710
1711 ////////////////////////////////////////////////////////////////////
1712 // findSize
1713 ////////////////////////////////////////////////////////////////////
1714
1715 info.size = binsize[bin];
1716 if (bin == B_PAGE)
1717 { uint npages = pool.ncommitted;
1718 ubyte* pt;
1719 uint i;
1720
1721 pt = &pool.pagetable[0];
1722 for (i = pn + 1; i < npages; i++)
1723 {
1724 if (pt[i] != B_PAGEPLUS)
1725 break;
1726 }
1727 info.size = (i - pn) * PAGESIZE;
1728 }
1729
1730 ////////////////////////////////////////////////////////////////////
1731 // getBits
1732 ////////////////////////////////////////////////////////////////////
1733
1734 info.attr = getBits(pool, offset / 16);
1735 }
1736 return info;
1737 }
1738
1739
1740 /**
1741 * Compute bin for size.
1742 */
1743 static Bins findBin(size_t size)
1744 { Bins bin;
1745
1746 if (size <= 256)
1747 {
1748 if (size <= 64)
1749 {
1750 if (size <= 16)
1751 bin = B_16;
1752 else if (size <= 32)
1753 bin = B_32;
1754 else
1755 bin = B_64;
1756 }
1757 else
1758 {
1759 if (size <= 128)
1760 bin = B_128;
1761 else
1762 bin = B_256;
1763 }
1764 }
1765 else
1766 {
1767 if (size <= 1024)
1768 {
1769 if (size <= 512)
1770 bin = B_512;
1771 else
1772 bin = B_1024;
1773 }
1774 else
1775 {
1776 if (size <= 2048)
1777 bin = B_2048;
1778 else
1779 bin = B_PAGE;
1780 }
1781 }
1782 return bin;
1783 }
1784
1785
1786 /**
1787 * Allocate a chunk of memory that is larger than a page.
1788 * Return null if out of memory.
1789 */
1790 void *bigAlloc(size_t size)
1791 {
1792 Pool *pool;
1793 uint npages;
1794 uint n;
1795 uint pn;
1796 uint freedpages;
1797 void *p;
1798 int state;
1799
1800 npages = (size + PAGESIZE - 1) / PAGESIZE;
1801
1802 for (state = 0; ; )
1803 {
1804 // This code could use some refinement when repeatedly
1805 // allocating very large arrays.
1806
1807 for (n = 0; n < npools; n++)
1808 {
1809 pool = pooltable[n];
1810 pn = pool.allocPages(npages);
1811 if (pn != ~0u)
1812 goto L1;
1813 }
1814
1815 // Failed
1816 switch (state)
1817 {
1818 case 0:
1819 if (disabled)
1820 { state = 1;
1821 continue;
1822 }
1823 // Try collecting
1824 freedpages = fullcollectshell();
1825 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4))
1826 { state = 1;
1827 continue;
1828 }
1829 // Allocate new pool
1830 pool = newPool(npages);
1831 if (!pool)
1832 { state = 2;
1833 continue;
1834 }
1835 pn = pool.allocPages(npages);
1836 assert(pn != ~0u);
1837 goto L1;
1838 case 1:
1839 // Allocate new pool
1840 pool = newPool(npages);
1841 if (!pool)
1842 goto Lnomemory;
1843 pn = pool.allocPages(npages);
1844 assert(pn != ~0u);
1845 goto L1;
1846 case 2:
1847 goto Lnomemory;
1848 default:
1849 assert(false);
1850 }
1851 }
1852
1853 L1:
1854 pool.pagetable[pn] = B_PAGE;
1855 if (npages > 1)
1856 cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1);
1857 p = pool.baseAddr + pn * PAGESIZE;
1858 cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size);
1859 debug (MEMSTOMP) cstring.memset(p, 0xF1, size);
1860 //debug(PRINTF) printf("\tp = %x\n", p);
1861 return p;
1862
1863 Lnomemory:
1864 return null; // let mallocNoSync handle the error
1865 }
1866
1867
1868 /**
1869 * Allocate a new pool with at least npages in it.
1870 * Sort it into pooltable[].
1871 * Return null if failed.
1872 */
1873 Pool *newPool(uint npages)
1874 {
1875 Pool *pool;
1876 Pool **newpooltable;
1877 uint newnpools;
1878 uint i;
1879
1880 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages);
1881
1882 // Round up to COMMITSIZE pages
1883 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
1884
1885 // Minimum of POOLSIZE
1886 if (npages < POOLSIZE/PAGESIZE)
1887 npages = POOLSIZE/PAGESIZE;
1888 else if (npages > POOLSIZE/PAGESIZE)
1889 { // Give us 150% of requested size, so there's room to extend
1890 auto n = npages + (npages >> 1);
1891 if (n < size_t.max/PAGESIZE)
1892 npages = n;
1893 }
1894
1895 // Allocate successively larger pools up to 8 megs
1896 if (npools)
1897 { uint n;
1898
1899 n = npools;
1900 if (n > 8)
1901 n = 8; // cap pool size at 8 megs
1902 n *= (POOLSIZE / PAGESIZE);
1903 if (npages < n)
1904 npages = n;
1905 }
1906
1907 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof);
1908 if (pool)
1909 {
1910 pool.initialize(npages);
1911 if (!pool.baseAddr)
1912 goto Lerr;
1913
1914 newnpools = npools + 1;
1915 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof);
1916 if (!newpooltable)
1917 goto Lerr;
1918
1919 // Sort pool into newpooltable[]
1920 for (i = 0; i < npools; i++)
1921 {
1922 if (pool.opCmp(newpooltable[i]) < 0)
1923 break;
1924 }
1925 cstring.memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof);
1926 newpooltable[i] = pool;
1927
1928 pooltable = newpooltable;
1929 npools = newnpools;
1930
1931 minAddr = pooltable[0].baseAddr;
1932 maxAddr = pooltable[npools - 1].topAddr;
1933 }
1934 return pool;
1935
1936 Lerr:
1937 pool.Dtor();
1938 cstdlib.free(pool);
1939 return null;
1940 }
1941
1942
1943 /**
1944 * Allocate a page of bin's.
1945 * Returns:
1946 * 0 failed
1947 */
1948 int allocPage(Bins bin)
1949 {
1950 Pool *pool;
1951 uint n;
1952 uint pn;
1953 byte *p;
1954 byte *ptop;
1955
1956 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin);
1957 for (n = 0; n < npools; n++)
1958 {
1959 pool = pooltable[n];
1960 pn = pool.allocPages(1);
1961 if (pn != ~0u)
1962 goto L1;
1963 }
1964 return 0; // failed
1965
1966 L1:
1967 pool.pagetable[pn] = cast(ubyte)bin;
1968
1969 // Convert page to free list
1970 size_t size = binsize[bin];
1971 List **b = &bucket[bin];
1972
1973 p = pool.baseAddr + pn * PAGESIZE;
1974 ptop = p + PAGESIZE;
1975 for (; p < ptop; p += size)
1976 {
1977 (cast(List *)p).next = *b;
1978 *b = cast(List *)p;
1979 }
1980 return 1;
1981 }
1982
1983
1984 /**
1985 * Search a range of memory values and mark any pointers into the GC pool.
1986 */
1987 void mark(void *pbot, void *ptop)
1988 {
1989 void **p1 = cast(void **)pbot;
1990 void **p2 = cast(void **)ptop;
1991 uint changes = 0;
1992
1993 //printf("marking range: %p -> %p\n", pbot, ptop);
1994 for (; p1 < p2; p1++)
1995 {
1996 Pool *pool;
1997 byte *p = cast(byte *)(*p1);
1998
1999 //if (log) debug(PRINTF) printf("\tmark %x\n", p);
2000 if (p >= minAddr)
2001 {
2002 pool = findPool(p);
2003 if (pool)
2004 {
2005 size_t offset = cast(size_t)(p - pool.baseAddr);
2006 uint biti;
2007 uint pn = offset / PAGESIZE;
2008 Bins bin = cast(Bins)pool.pagetable[pn];
2009
2010 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti);
2011
2012 // Adjust bit to be at start of allocated memory block
2013 if (bin <= B_PAGE)
2014 {
2015 biti = (offset & notbinsize[bin]) >> 4;
2016 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti);
2017 }
2018 else if (bin == B_PAGEPLUS)
2019 {
2020 do
2021 { --pn;
2022 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS);
2023 biti = pn * (PAGESIZE / 16);
2024 }
2025 else
2026 {
2027 // Don't mark bits in B_FREE or B_UNCOMMITTED pages
2028 continue;
2029 }
2030
2031 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti));
2032 if (!pool.mark.test(biti))
2033 {
2034 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p);
2035 pool.mark.set(biti);
2036 if (!pool.noscan.test(biti))
2037 {
2038 pool.scan.set(biti);
2039 changes = 1;
2040 }
2041 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot));
2042 }
2043 }
2044 }
2045 }
2046 anychanges |= changes;
2047 }
2048
2049
2050 /**
2051 * Return number of full pages free'd.
2052 */
2053 size_t fullcollectshell()
2054 {
2055 // The purpose of the 'shell' is to ensure all the registers
2056 // get put on the stack so they'll be scanned
2057 void *sp;
2058 size_t result;
2059 version (GNU)
2060 {
2061 __builtin_unwind_init();
2062 sp = & sp;
2063 }
2064 else
2065 {
2066 asm
2067 {
2068 pushad ;
2069 mov sp[EBP],ESP ;
2070 }
2071 }
2072 result = fullcollect(sp);
2073 version (GNU)
2074 {
2075 // nothing to do
2076 }
2077 else
2078 {
2079 asm
2080 {
2081 popad ;
2082 }
2083 }
2084 return result;
2085 }
2086
2087
2088 /**
2089 *
2090 */
2091 size_t fullcollect(void *stackTop)
2092 {
2093 uint n;
2094 Pool *pool;
2095
2096 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n");
2097
2098 thread_suspendAll();
2099
2100 p_cache = null;
2101 size_cache = 0;
2102
2103 anychanges = 0;
2104 for (n = 0; n < npools; n++)
2105 {
2106 pool = pooltable[n];
2107 pool.mark.zero();
2108 pool.scan.zero();
2109 pool.freebits.zero();
2110 }
2111
2112 // Mark each free entry, so it doesn't get scanned
2113 for (n = 0; n < B_PAGE; n++)
2114 {
2115 for (List *list = bucket[n]; list; list = list.next)
2116 {
2117 pool = findPool(list);
2118 assert(pool);
2119 pool.freebits.set(cast(uint)(cast(byte *)list - pool.baseAddr) / 16);
2120 }
2121 }
2122
2123 for (n = 0; n < npools; n++)
2124 {
2125 pool = pooltable[n];
2126 pool.mark.copy(&pool.freebits);
2127 }
2128
2129 rt_scanStaticData( &mark );
2130
2131 version (MULTI_THREADED)
2132 {
2133 if (!noStack)
2134 {
2135 // Scan stacks and registers for each paused thread
2136 thread_scanAll( &mark, stackTop );
2137 }
2138 }
2139 else
2140 {
2141 if (!noStack)
2142 {
2143 // Scan stack for main thread
2144 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom);
2145 version (STACKGROWSDOWN)
2146 mark(stackTop, stackBottom);
2147 else
2148 mark(stackBottom, stackTop);
2149 }
2150 }
2151
2152 // Scan roots[]
2153 debug(COLLECT_PRINTF) printf("scan roots[]\n");
2154 mark(roots, roots + nroots);
2155
2156 // Scan ranges[]
2157 debug(COLLECT_PRINTF) printf("scan ranges[]\n");
2158 //log++;
2159 for (n = 0; n < nranges; n++)
2160 {
2161 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop);
2162 mark(ranges[n].pbot, ranges[n].ptop);
2163 }
2164 //log--;
2165
2166 debug(COLLECT_PRINTF) printf("\tscan heap\n");
2167 while (anychanges)
2168 {
2169 anychanges = 0;
2170 for (n = 0; n < npools; n++)
2171 {
2172 uint *bbase;
2173 uint *b;
2174 uint *btop;
2175
2176 pool = pooltable[n];
2177
2178 bbase = pool.scan.base();
2179 btop = bbase + pool.scan.nwords;
2180 for (b = bbase; b < btop;)
2181 { Bins bin;
2182 uint pn;
2183 uint u;
2184 uint bitm;
2185 byte *o;
2186
2187 bitm = *b;
2188 if (!bitm)
2189 { b++;
2190 continue;
2191 }
2192 *b = 0;
2193
2194 o = pool.baseAddr + (b - bbase) * 32 * 16;
2195 if (!(bitm & 0xFFFF))
2196 {
2197 bitm >>= 16;
2198 o += 16 * 16;
2199 }
2200 for (; bitm; o += 16, bitm >>= 1)
2201 {
2202 if (!(bitm & 1))
2203 continue;
2204
2205 pn = (o - pool.baseAddr) / PAGESIZE;
2206 bin = cast(Bins)pool.pagetable[pn];
2207 if (bin < B_PAGE)
2208 {
2209 mark(o, o + binsize[bin]);
2210 }
2211 else if (bin == B_PAGE || bin == B_PAGEPLUS)
2212 {
2213 if (bin == B_PAGEPLUS)
2214 {
2215 while (pool.pagetable[pn - 1] != B_PAGE)
2216 pn--;
2217 }
2218 u = 1;
2219 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS)
2220 u++;
2221 mark(o, o + u * PAGESIZE);
2222 }
2223 }
2224 }
2225 }
2226 }
2227
2228 thread_resumeAll();
2229
2230 // Free up everything not marked
2231 debug(COLLECT_PRINTF) printf("\tfree'ing\n");
2232 size_t freedpages = 0;
2233 size_t freed = 0;
2234 for (n = 0; n < npools; n++)
2235 { uint pn;
2236 uint ncommitted;
2237 uint *bbase;
2238
2239 pool = pooltable[n];
2240 bbase = pool.mark.base();
2241 ncommitted = pool.ncommitted;
2242 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16))
2243 {
2244 Bins bin = cast(Bins)pool.pagetable[pn];
2245
2246 if (bin < B_PAGE)
2247 { byte *p;
2248 byte *ptop;
2249 uint biti;
2250 uint bitstride;
2251 uint size = binsize[bin];
2252
2253 p = pool.baseAddr + pn * PAGESIZE;
2254 ptop = p + PAGESIZE;
2255 biti = pn * (PAGESIZE/16);
2256 bitstride = size / 16;
2257
2258 version(none) // BUG: doesn't work because freebits() must also be cleared
2259 {
2260 // If free'd entire page
2261 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 &&
2262 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0)
2263 {
2264 for (; p < ptop; p += size, biti += bitstride)
2265 {
2266 if (pool.finals.nbits && pool.finals.testClear(biti))
2267 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2268 gcx.clrBits(pool, biti, BlkAttr.ALL_BITS);
2269
2270 List *list = cast(List *)p;
2271 //debug(PRINTF) printf("\tcollecting %x\n", list);
2272 log_free(sentinel_add(list));
2273
2274 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2275 }
2276 pool.pagetable[pn] = B_FREE;
2277 freed += PAGESIZE;
2278 //debug(PRINTF) printf("freeing entire page %d\n", pn);
2279 continue;
2280 }
2281 }
2282 for (; p < ptop; p += size, biti += bitstride)
2283 {
2284 if (!pool.mark.test(biti))
2285 {
2286 sentinel_Invariant(sentinel_add(p));
2287
2288 pool.freebits.set(biti);
2289 if (pool.finals.nbits && pool.finals.testClear(biti))
2290 rt_finalize(cast(List *)sentinel_add(p), false/*noStack > 0*/);
2291 clrBits(pool, biti, BlkAttr.ALL_BITS);
2292
2293 List *list = cast(List *)p;
2294 debug(PRINTF) printf("\tcollecting %x\n", list);
2295 log_free(sentinel_add(list));
2296
2297 debug (MEMSTOMP) cstring.memset(p, 0xF3, size);
2298
2299 freed += size;
2300 }
2301 }
2302 }
2303 else if (bin == B_PAGE)
2304 { uint biti = pn * (PAGESIZE / 16);
2305
2306 if (!pool.mark.test(biti))
2307 { byte *p = pool.baseAddr + pn * PAGESIZE;
2308
2309 sentinel_Invariant(sentinel_add(p));
2310 if (pool.finals.nbits && pool.finals.testClear(biti))
2311 rt_finalize(sentinel_add(p), false/*noStack > 0*/);
2312 clrBits(pool, biti, BlkAttr.ALL_BITS);
2313
2314 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p);
2315 log_free(sentinel_add(p));
2316 pool.pagetable[pn] = B_FREE;
2317 freedpages++;
2318 debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE);
2319 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS)
2320 {
2321 pn++;
2322 pool.pagetable[pn] = B_FREE;
2323 freedpages++;
2324
2325 debug (MEMSTOMP)
2326 { p += PAGESIZE;
2327 cstring.memset(p, 0xF3, PAGESIZE);
2328 }
2329 }
2330 }
2331 }
2332 }
2333 }
2334
2335 // Zero buckets
2336 bucket[] = null;
2337
2338 // Free complete pages, rebuild free list
2339 debug(COLLECT_PRINTF) printf("\tfree complete pages\n");
2340 size_t recoveredpages = 0;
2341 for (n = 0; n < npools; n++)
2342 { uint pn;
2343 uint ncommitted;
2344
2345 pool = pooltable[n];
2346 ncommitted = pool.ncommitted;
2347 for (pn = 0; pn < ncommitted; pn++)
2348 {
2349 Bins bin = cast(Bins)pool.pagetable[pn];
2350 uint biti;
2351 uint u;
2352
2353 if (bin < B_PAGE)
2354 {
2355 uint size = binsize[bin];
2356 uint bitstride = size / 16;
2357 uint bitbase = pn * (PAGESIZE / 16);
2358 uint bittop = bitbase + (PAGESIZE / 16);
2359 byte *p;
2360
2361 biti = bitbase;
2362 for (biti = bitbase; biti < bittop; biti += bitstride)
2363 { if (!pool.freebits.test(biti))
2364 goto Lnotfree;
2365 }
2366 pool.pagetable[pn] = B_FREE;
2367 recoveredpages++;
2368 continue;
2369
2370 Lnotfree:
2371 p = pool.baseAddr + pn * PAGESIZE;
2372 for (u = 0; u < PAGESIZE; u += size)
2373 { biti = bitbase + u / 16;
2374 if (pool.freebits.test(biti))
2375 { List *list;
2376
2377 list = cast(List *)(p + u);
2378 if (list.next != bucket[bin]) // avoid unnecessary writes
2379 list.next = bucket[bin];
2380 bucket[bin] = list;
2381 }
2382 }
2383 }
2384 }
2385 }
2386
2387 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages);
2388 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools);
2389
2390 return freedpages + recoveredpages;
2391 }
2392
2393
2394 /**
2395 *
2396 */
2397 uint getBits(Pool* pool, uint biti)
2398 in
2399 {
2400 assert( pool );
2401 }
2402 body
2403 {
2404 uint bits;
2405
2406 if (pool.finals.nbits &&
2407 pool.finals.test(biti))
2408 bits |= BlkAttr.FINALIZE;
2409 if (pool.noscan.test(biti))
2410 bits |= BlkAttr.NO_SCAN;
2411 // if (pool.nomove.nbits &&
2412 // pool.nomove.test(biti))
2413 // bits |= BlkAttr.NO_MOVE;
2414 return bits;
2415 }
2416
2417
2418 /**
2419 *
2420 */
2421 void setBits(Pool* pool, uint biti, uint mask)
2422 in
2423 {
2424 assert( pool );
2425 }
2426 body
2427 {
2428 if (mask & BlkAttr.FINALIZE)
2429 {
2430 if (!pool.finals.nbits)
2431 pool.finals.alloc(pool.mark.nbits);
2432 pool.finals.set(biti);
2433 }
2434 if (mask & BlkAttr.NO_SCAN)
2435 {
2436 pool.noscan.set(biti);
2437 }
2438 // if (mask & BlkAttr.NO_MOVE)
2439 // {
2440 // if (!pool.nomove.nbits)
2441 // pool.nomove.alloc(pool.mark.nbits);
2442 // pool.nomove.set(biti);
2443 // }
2444 }
2445
2446
2447 /**
2448 *
2449 */
2450 void clrBits(Pool* pool, uint biti, uint mask)
2451 in
2452 {
2453 assert( pool );
2454 }
2455 body
2456 {
2457 if (mask & BlkAttr.FINALIZE && pool.finals.nbits)
2458 pool.finals.clear(biti);
2459 if (mask & BlkAttr.NO_SCAN)
2460 pool.noscan.clear(biti);
2461 // if (mask & BlkAttr.NO_MOVE && pool.nomove.nbits)
2462 // pool.nomove.clear(biti);
2463 }
2464
2465
2466 /***** Leak Detector ******/
2467
2468
2469 debug (LOGGING)
2470 {
2471 LogArray current;
2472 LogArray prev;
2473
2474
2475 void log_init()
2476 {
2477 //debug(PRINTF) printf("+log_init()\n");
2478 current.reserve(1000);
2479 prev.reserve(1000);
2480 //debug(PRINTF) printf("-log_init()\n");
2481 }
2482
2483
2484 void log_malloc(void *p, size_t size)
2485 {
2486 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size);
2487 Log log;
2488
2489 log.p = p;
2490 log.size = size;
2491 log.line = GC.line;
2492 log.file = GC.file;
2493 log.parent = null;
2494
2495 GC.line = 0;
2496 GC.file = null;
2497
2498 current.push(log);
2499 //debug(PRINTF) printf("-log_malloc()\n");
2500 }
2501
2502
2503 void log_free(void *p)
2504 {
2505 //debug(PRINTF) printf("+log_free(%x)\n", p);
2506 size_t i;
2507
2508 i = current.find(p);
2509 if (i == ~0u)
2510 {
2511 debug(PRINTF) printf("free'ing unallocated memory %x\n", p);
2512 }
2513 else
2514 current.remove(i);
2515 //debug(PRINTF) printf("-log_free()\n");
2516 }
2517
2518
2519 void log_collect()
2520 {
2521 //debug(PRINTF) printf("+log_collect()\n");
2522 // Print everything in current that is not in prev
2523
2524 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n");
2525 size_t used = 0;
2526 for (size_t i = 0; i < current.dim; i++)
2527 {
2528 size_t j;
2529
2530 j = prev.find(current.data[i].p);
2531 if (j == ~0u)
2532 current.data[i].print();
2533 else
2534 used++;
2535 }
2536
2537 debug(PRINTF) printf("All roots this cycle: --------------------------------\n");
2538 for (size_t i = 0; i < current.dim; i++)
2539 {
2540 void *p;
2541 size_t j;
2542
2543 p = current.data[i].p;
2544 if (!findPool(current.data[i].parent))
2545 {
2546 j = prev.find(current.data[i].p);
2547 if (j == ~0u)
2548 debug(PRINTF) printf("N");
2549 else
2550 debug(PRINTF) printf(" ");;
2551 current.data[i].print();
2552 }
2553 }
2554
2555 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used);
2556 prev.copy(&current);
2557
2558 debug(PRINTF) printf("-log_collect()\n");
2559 }
2560
2561
2562 void log_parent(void *p, void *parent)
2563 {
2564 //debug(PRINTF) printf("+log_parent()\n");
2565 size_t i;
2566
2567 i = current.find(p);
2568 if (i == ~0u)
2569 {
2570 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent);
2571 Pool *pool;
2572 pool = findPool(p);
2573 assert(pool);
2574 size_t offset = cast(size_t)(p - pool.baseAddr);
2575 uint biti;
2576 uint pn = offset / PAGESIZE;
2577 Bins bin = cast(Bins)pool.pagetable[pn];
2578 biti = (offset & notbinsize[bin]);
2579 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti);
2580 }
2581 else
2582 {
2583 current.data[i].parent = parent;
2584 }
2585 //debug(PRINTF) printf("-log_parent()\n");
2586 }
2587
2588 }
2589 else
2590 {
2591 void log_init() { }
2592 void log_malloc(void *p, size_t size) { }
2593 void log_free(void *p) { }
2594 void log_collect() { }
2595 void log_parent(void *p, void *parent) { }
2596 }
2597 }
2598
2599
2600 /* ============================ Pool =============================== */
2601
2602
2603 struct Pool
2604 {
2605 byte* baseAddr;
2606 byte* topAddr;
2607 GCBits mark; // entries already scanned, or should not be scanned
2608 GCBits scan; // entries that need to be scanned
2609 GCBits freebits; // entries that are on the free list
2610 GCBits finals; // entries that need finalizer run on them
2611 GCBits noscan; // entries that should not be scanned
2612
2613 uint npages;
2614 uint ncommitted; // ncommitted <= npages
2615 ubyte* pagetable;
2616
2617
2618 void initialize(uint npages)
2619 {
2620 size_t poolsize;
2621
2622 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
2623 poolsize = npages * PAGESIZE;
2624 assert(poolsize >= POOLSIZE);
2625 baseAddr = cast(byte *)os_mem_map(poolsize);
2626
2627 // Some of the code depends on page alignment of memory pools
2628 assert((cast(uint)baseAddr & (PAGESIZE - 1)) == 0);
2629
2630 if (!baseAddr)
2631 {
2632 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno);
2633 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]);
2634
2635 npages = 0;
2636 poolsize = 0;
2637 }
2638 //assert(baseAddr);
2639 topAddr = baseAddr + poolsize;
2640
2641 mark.alloc(poolsize / 16);
2642 scan.alloc(poolsize / 16);
2643 freebits.alloc(poolsize / 16);
2644 noscan.alloc(poolsize / 16);
2645
2646 pagetable = cast(ubyte*)cstdlib.malloc(npages);
2647 if (!pagetable)
2648 onOutOfMemoryError();
2649 cstring.memset(pagetable, B_UNCOMMITTED, npages);
2650
2651 this.npages = npages;
2652 ncommitted = 0;
2653 }
2654
2655
2656 void Dtor()
2657 {
2658 if (baseAddr)
2659 {
2660 int result;
2661
2662 if (ncommitted)
2663 {
2664 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE);
2665 assert(result == 0);
2666 ncommitted = 0;
2667 }
2668
2669 if (npages)
2670 {
2671 result = os_mem_unmap(baseAddr, npages * PAGESIZE);
2672 assert(result == 0);
2673 npages = 0;
2674 }
2675
2676 baseAddr = null;
2677 topAddr = null;
2678 }
2679 if (pagetable)
2680 cstdlib.free(pagetable);
2681
2682 mark.Dtor();
2683 scan.Dtor();
2684 freebits.Dtor();
2685 finals.Dtor();
2686 noscan.Dtor();
2687 }
2688
2689
2690 void Invariant() { }
2691
2692
2693 invariant
2694 {
2695 //mark.Invariant();
2696 //scan.Invariant();
2697 //freebits.Invariant();
2698 //finals.Invariant();
2699 //noscan.Invariant();
2700
2701 if (baseAddr)
2702 {
2703 //if (baseAddr + npages * PAGESIZE != topAddr)
2704 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr);
2705 assert(baseAddr + npages * PAGESIZE == topAddr);
2706 assert(ncommitted <= npages);
2707 }
2708
2709 for (uint i = 0; i < npages; i++)
2710 { Bins bin = cast(Bins)pagetable[i];
2711
2712 assert(bin < B_MAX);
2713 }
2714 }
2715
2716
2717 /**
2718 * Allocate n pages from Pool.
2719 * Returns ~0u on failure.
2720 */
2721 uint allocPages(uint n)
2722 {
2723 uint i;
2724 uint n2;
2725
2726 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n);
2727 n2 = n;
2728 for (i = 0; i < ncommitted; i++)
2729 {
2730 if (pagetable[i] == B_FREE)
2731 {
2732 if (--n2 == 0)
2733 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1);
2734 return i - n + 1;
2735 }
2736 }
2737 else
2738 n2 = n;
2739 }
2740 return extendPages(n);
2741 }
2742
2743 /**
2744 * Extend Pool by n pages.
2745 * Returns ~0u on failure.
2746 */
2747 uint extendPages(uint n)
2748 {
2749 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n);
2750 if (ncommitted + n <= npages)
2751 {
2752 uint tocommit;
2753
2754 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1);
2755 if (ncommitted + tocommit > npages)
2756 tocommit = npages - ncommitted;
2757 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit);
2758 //fflush(stdout);
2759 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0)
2760 {
2761 cstring.memset(pagetable + ncommitted, B_FREE, tocommit);
2762 auto i = ncommitted;
2763 ncommitted += tocommit;
2764
2765 while (i && pagetable[i - 1] == B_FREE)
2766 i--;
2767
2768 return i;
2769 }
2770 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit);
2771 }
2772
2773 return ~0u;
2774 }
2775
2776
2777 /**
2778 * Free npages pages starting with pagenum.
2779 */
2780 void freePages(uint pagenum, uint npages)
2781 {
2782 cstring.memset(&pagetable[pagenum], B_FREE, npages);
2783 }
2784
2785
2786 /**
2787 * Used for sorting pooltable[]
2788 */
2789 int opCmp(Pool *p2)
2790 {
2791 if (baseAddr < p2.baseAddr)
2792 return -1;
2793 else
2794 return cast(int)(baseAddr > p2.baseAddr);
2795 }
2796 }
2797
2798
2799 /* ============================ SENTINEL =============================== */
2800
2801
2802 version (SENTINEL)
2803 {
2804 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits
2805 const ubyte SENTINEL_POST = 0xF5; // 8 bits
2806 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1;
2807
2808
2809 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; }
2810 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; }
2811 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[sentinel_size(p)]; }
2812
2813
2814 void sentinel_init(void *p, size_t size)
2815 {
2816 *sentinel_size(p) = size;
2817 *sentinel_pre(p) = SENTINEL_PRE;
2818 *sentinel_post(p) = SENTINEL_POST;
2819 }
2820
2821
2822 void sentinel_Invariant(void *p)
2823 {
2824 assert(*sentinel_pre(p) == SENTINEL_PRE);
2825 assert(*sentinel_post(p) == SENTINEL_POST);
2826 }
2827
2828
2829 void *sentinel_add(void *p)
2830 {
2831 return p + 2 * size_t.sizeof;
2832 }
2833
2834
2835 void *sentinel_sub(void *p)
2836 {
2837 return p - 2 * size_t.sizeof;
2838 }
2839 }
2840 else
2841 {
2842 const uint SENTINEL_EXTRA = 0;
2843
2844
2845 void sentinel_init(void *p, size_t size)
2846 {
2847 }
2848
2849
2850 void sentinel_Invariant(void *p)
2851 {
2852 }
2853
2854
2855 void *sentinel_add(void *p)
2856 {
2857 return p;
2858 }
2859
2860
2861 void *sentinel_sub(void *p)
2862 {
2863 return p;
2864 }
2865 }
2866