Mercurial > projects > ldc
comparison lphobos/gc/gcx.d @ 473:373489eeaf90
Applied downs' lphobos update
author | Tomas Lindquist Olsen <tomas.l.olsen@gmail.com> |
---|---|
date | Mon, 04 Aug 2008 19:28:49 +0200 |
parents | 5825d48b27d1 |
children |
comparison
equal
deleted
inserted
replaced
472:15c804b6ce77 | 473:373489eeaf90 |
---|---|
1 /* | 1 /** |
2 * Copyright (C) 2004 by Digital Mars, www.digitalmars.com | 2 * This module contains the garbage collector implementation. |
3 * Written by Walter Bright | |
4 * | 3 * |
4 * Copyright: Copyright (C) 2001-2007 Digital Mars, www.digitalmars.com. | |
5 * All rights reserved. | |
6 * License: | |
5 * This software is provided 'as-is', without any express or implied | 7 * This software is provided 'as-is', without any express or implied |
6 * warranty. In no event will the authors be held liable for any damages | 8 * warranty. In no event will the authors be held liable for any damages |
7 * arising from the use of this software. | 9 * arising from the use of this software. |
8 * | 10 * |
9 * Permission is granted to anyone to use this software for any purpose, | 11 * Permission is granted to anyone to use this software for any purpose, |
10 * including commercial applications, and to alter it and redistribute it | 12 * including commercial applications, and to alter it and redistribute it |
11 * freely, subject to the following restrictions: | 13 * freely, in both source and binary form, subject to the following |
14 * restrictions: | |
12 * | 15 * |
13 * o The origin of this software must not be misrepresented; you must not | 16 * o The origin of this software must not be misrepresented; you must not |
14 * claim that you wrote the original software. If you use this software | 17 * claim that you wrote the original software. If you use this software |
15 * in a product, an acknowledgment in the product documentation would be | 18 * in a product, an acknowledgment in the product documentation would be |
16 * appreciated but is not required. | 19 * appreciated but is not required. |
17 * o Altered source versions must be plainly marked as such, and must not | 20 * o Altered source versions must be plainly marked as such, and must not |
18 * be misrepresented as being the original software. | 21 * be misrepresented as being the original software. |
19 * o This notice may not be removed or altered from any source | 22 * o This notice may not be removed or altered from any source |
20 * distribution. | 23 * distribution. |
24 * Authors: Walter Bright, David Friedman, Sean Kelly | |
21 */ | 25 */ |
22 | 26 |
23 // D Garbage Collector stub to prevent linking in gc | 27 /* NOTE: This file has been patched from the original DMD distribution to |
24 | 28 work with the GDC compiler. |
25 /* | 29 |
26 * Modified for use as the preliminary GC for LLVMDC (LLVM D Compiler) | 30 Modified by David Friedman, July 2006 |
27 * by Tomas Lindquist Olsen, Dec 2007 | 31 */ |
28 */ | |
29 | 32 |
30 module gcx; | 33 module gcx; |
31 | 34 |
32 debug=PRINTF; | 35 // D Garbage Collector implementation |
36 | |
37 /************** Debugging ***************************/ | |
38 | |
39 //debug = PRINTF; // turn on printf's | |
40 //debug = COLLECT_PRINTF; // turn on printf's | |
41 //debug = THREADINVARIANT; // check thread integrity | |
42 //debug = LOGGING; // log allocations / frees | |
43 //debug = MEMSTOMP; // stomp on memory | |
44 //debug = SENTINEL; // add underrun/overrrun protection | |
45 //debug = PTRCHECK; // more pointer checking | |
46 //debug = PTRCHECK2; // thorough but slow pointer checking | |
47 | |
48 /*************** Configuration *********************/ | |
49 | |
50 version = STACKGROWSDOWN; // growing the stack means subtracting from the stack pointer | |
51 // (use for Intel X86 CPUs) | |
52 // else growing the stack means adding to the stack pointer | |
53 version = MULTI_THREADED; // produce multithreaded version | |
33 | 54 |
34 /***************************************************/ | 55 /***************************************************/ |
35 | 56 |
57 private import gcbits; | |
58 private import gcstats; | |
59 | |
60 private import cstdlib = std.c.stdlib : calloc, free, malloc, realloc; | |
61 private import cstring = std.c.string : memcpy, memmove, memset; | |
62 | |
63 debug private import std.c.stdio; | |
64 | |
65 import std.outofmemory; | |
66 import std.gc; | |
67 | |
68 version (GNU) | |
69 { | |
70 private import gcc.builtins; | |
71 } | |
36 | 72 |
37 version (Win32) | 73 version (Win32) |
38 { | 74 { |
39 import win32; | 75 import win32; |
76 import std.c.windows.windows; | |
40 } | 77 } |
41 | 78 else version (GNU) |
42 version (linux) | 79 { |
80 private import gcgcc; | |
81 } | |
82 else version (linux) | |
43 { | 83 { |
44 import gclinux; | 84 import gclinux; |
45 } | 85 } |
46 | 86 |
47 import gcstats; | 87 /*version (BigEndian) |
48 import stdc = std.c.stdlib; | 88 private import std.intrinsic;*/ |
49 | 89 |
50 | 90 |
51 //alias GC* gc_t; | 91 |
92 version (MULTI_THREADED) | |
93 { | |
94 import std.thread; | |
95 } | |
96 | |
97 | |
98 private void onOutOfMemoryError() | |
99 { | |
100 _d_OutOfMemory(); | |
101 } | |
102 | |
103 private void* rt_stackBottom() | |
104 { | |
105 version (Win32) | |
106 { | |
107 return win32.os_query_stackBottom(); | |
108 } | |
109 else version (GNU) | |
110 { | |
111 return gcgcc.os_query_stackBottom(); | |
112 } | |
113 else version (linux) | |
114 { | |
115 return gclinux.os_query_stackBottom(); | |
116 } | |
117 } | |
118 | |
119 private bool thread_needLock() | |
120 { | |
121 return std.thread.Thread.nthreads != 1; | |
122 } | |
123 | |
124 | |
52 alias GC gc_t; | 125 alias GC gc_t; |
53 | 126 |
54 //struct GCStats { } | 127 version (X86) version (D_InlineAsm) { version = Asm86; } |
128 | |
129 | |
130 /* ======================= Leak Detector =========================== */ | |
131 | |
132 | |
133 debug (LOGGING) | |
134 { | |
135 struct Log | |
136 { | |
137 void* p; | |
138 size_t size; | |
139 uint line; | |
140 char* file; | |
141 void* parent; | |
142 | |
143 void print() | |
144 { | |
145 printf(" p = %x, size = %d, parent = %x ", p, size, parent); | |
146 if (file) | |
147 { | |
148 printf("%s(%u)", file, line); | |
149 } | |
150 printf("\n"); | |
151 } | |
152 } | |
153 | |
154 | |
155 struct LogArray | |
156 { | |
157 size_t dim; | |
158 size_t allocdim; | |
159 Log *data; | |
160 | |
161 void Dtor() | |
162 { | |
163 if (data) | |
164 cstdlib.free(data); | |
165 data = null; | |
166 } | |
167 | |
168 void reserve(size_t nentries) | |
169 { | |
170 assert(dim <= allocdim); | |
171 if (allocdim - dim < nentries) | |
172 { | |
173 allocdim = (dim + nentries) * 2; | |
174 assert(dim + nentries <= allocdim); | |
175 if (!data) | |
176 { | |
177 data = cast(Log *)cstdlib.malloc(allocdim * Log.sizeof); | |
178 if (!data && allocdim) | |
179 onOutOfMemoryError(); | |
180 } | |
181 else | |
182 { Log *newdata; | |
183 | |
184 newdata = cast(Log *)cstdlib.malloc(allocdim * Log.sizeof); | |
185 if (!newdata && allocdim) | |
186 onOutOfMemoryError(); | |
187 cstring.memcpy(newdata, data, dim * Log.sizeof); | |
188 cstdlib.free(data); | |
189 data = newdata; | |
190 } | |
191 } | |
192 } | |
193 | |
194 | |
195 void push(Log log) | |
196 { | |
197 reserve(1); | |
198 data[dim++] = log; | |
199 } | |
200 | |
201 void remove(size_t i) | |
202 { | |
203 cstring.memmove(data + i, data + i + 1, (dim - i) * Log.sizeof); | |
204 dim--; | |
205 } | |
206 | |
207 | |
208 size_t find(void *p) | |
209 { | |
210 for (size_t i = 0; i < dim; i++) | |
211 { | |
212 if (data[i].p == p) | |
213 return i; | |
214 } | |
215 return ~0u; // not found | |
216 } | |
217 | |
218 | |
219 void copy(LogArray *from) | |
220 { | |
221 reserve(from.dim - dim); | |
222 assert(from.dim <= allocdim); | |
223 cstring.memcpy(data, from.data, from.dim * Log.sizeof); | |
224 dim = from.dim; | |
225 } | |
226 } | |
227 } | |
228 | |
55 | 229 |
56 /* ============================ GC =============================== */ | 230 /* ============================ GC =============================== */ |
57 | 231 |
58 | 232 |
59 //alias int size_t; | |
60 alias void (*GC_FINALIZER)(void *p, bool dummy); | 233 alias void (*GC_FINALIZER)(void *p, bool dummy); |
61 | 234 |
62 const uint GCVERSION = 1; // increment every time we change interface | 235 class GCLock { } // just a dummy so we can get a global lock |
63 // to GC. | 236 |
237 | |
238 const uint GCVERSION = 1; // increment every time we change interface | |
239 // to GC. | |
64 | 240 |
65 class GC | 241 class GC |
66 { | 242 { |
243 // For passing to debug code | |
244 static size_t line; | |
245 static char* file; | |
246 | |
67 uint gcversion = GCVERSION; | 247 uint gcversion = GCVERSION; |
68 | 248 |
69 void *gcx; // implementation | 249 Gcx *gcx; // implementation |
250 static ClassInfo gcLock; // global lock | |
251 | |
70 | 252 |
71 void initialize() | 253 void initialize() |
72 { | 254 { |
73 debug(PRINTF) printf("GC initialize()\n"); | 255 gcLock = GCLock.classinfo; |
256 gcx = cast(Gcx *)cstdlib.calloc(1, Gcx.sizeof); | |
257 if (!gcx) | |
258 onOutOfMemoryError(); | |
259 printf("GCX set to %p\n", gcx); | |
260 gcx.initialize(); | |
261 setStackBottom(rt_stackBottom()); | |
74 } | 262 } |
75 | 263 |
76 | 264 |
77 void Dtor() | 265 void Dtor() |
78 { | 266 { |
79 debug(PRINTF) printf("GC Dtor()\n"); | 267 version (linux) |
80 } | 268 { |
81 | 269 //debug(PRINTF) printf("Thread %x ", pthread_self()); |
82 invariant | 270 //debug(PRINTF) printf("GC.Dtor()\n"); |
83 { | 271 } |
84 debug(PRINTF) printf("GC invariant()\n"); | 272 |
85 } | 273 if (gcx) |
86 | 274 { |
275 gcx.Dtor(); | |
276 cstdlib.free(gcx); | |
277 gcx = null; | |
278 } | |
279 } | |
280 | |
281 | |
282 invariant() | |
283 { | |
284 if (gcx) | |
285 { | |
286 gcx.thread_Invariant(); | |
287 } | |
288 } | |
289 | |
290 | |
291 /** | |
292 * | |
293 */ | |
294 void enable() | |
295 { | |
296 if (!thread_needLock()) | |
297 { | |
298 assert(gcx.disabled > 0); | |
299 gcx.disabled--; | |
300 } | |
301 else synchronized (gcLock) | |
302 { | |
303 assert(gcx.disabled > 0); | |
304 gcx.disabled--; | |
305 } | |
306 } | |
307 | |
308 | |
309 /** | |
310 * | |
311 */ | |
312 void disable() | |
313 { | |
314 if (!thread_needLock()) | |
315 { | |
316 gcx.disabled++; | |
317 } | |
318 else synchronized (gcLock) | |
319 { | |
320 gcx.disabled++; | |
321 } | |
322 } | |
323 | |
324 | |
325 /** | |
326 * | |
327 */ | |
87 void *malloc(size_t size) | 328 void *malloc(size_t size) |
88 { | 329 { |
89 debug(PRINTF) printf("GC malloc()\n"); | 330 if (!size) |
90 return malloc(size); | 331 { |
91 } | 332 return null; |
92 | 333 } |
93 void *mallocNoSync(size_t size) | 334 |
94 { | 335 if (!thread_needLock()) |
95 debug(PRINTF) printf("GC mallocNoSync()\n"); | 336 { |
96 return malloc(size); | 337 return mallocNoSync(size); |
97 } | 338 } |
98 | 339 else synchronized (gcLock) |
99 | 340 { |
341 return mallocNoSync(size); | |
342 } | |
343 } | |
344 | |
345 | |
346 // | |
347 // | |
348 // | |
349 private void *mallocNoSync(size_t size) | |
350 { | |
351 assert(size != 0); | |
352 | |
353 void *p = null; | |
354 Bins bin; | |
355 | |
356 //debug(PRINTF) printf("GC::malloc(size = %d, gcx = %p)\n", size, gcx); | |
357 assert(gcx); | |
358 //debug(PRINTF) printf("gcx.self = %x, pthread_self() = %x\n", gcx.self, pthread_self()); | |
359 size += SENTINEL_EXTRA; | |
360 | |
361 // Compute size bin | |
362 // Cache previous binsize lookup - Dave Fladebo. | |
363 static size_t lastsize = -1; | |
364 static Bins lastbin; | |
365 if (size == lastsize) | |
366 bin = lastbin; | |
367 else | |
368 { | |
369 bin = gcx.findBin(size); | |
370 lastsize = size; | |
371 lastbin = bin; | |
372 } | |
373 | |
374 if (bin < B_PAGE) | |
375 { | |
376 p = gcx.bucket[bin]; | |
377 if (p == null) | |
378 { | |
379 if (!gcx.allocPage(bin) && !gcx.disabled) // try to find a new page | |
380 { | |
381 if (!thread_needLock()) | |
382 { | |
383 /* Then we haven't locked it yet. Be sure | |
384 * and lock for a collection, since a finalizer | |
385 * may start a new thread. | |
386 */ | |
387 synchronized (gcLock) | |
388 { | |
389 gcx.fullcollectshell(); | |
390 } | |
391 } | |
392 else if (!gcx.fullcollectshell()) // collect to find a new page | |
393 { | |
394 //gcx.newPool(1); | |
395 } | |
396 } | |
397 if (!gcx.bucket[bin] && !gcx.allocPage(bin)) | |
398 { int result; | |
399 | |
400 gcx.newPool(1); // allocate new pool to find a new page | |
401 result = gcx.allocPage(bin); | |
402 if (!result) | |
403 return null; | |
404 } | |
405 p = gcx.bucket[bin]; | |
406 } | |
407 | |
408 // Return next item from free list | |
409 gcx.bucket[bin] = (cast(List *)p).next; | |
410 cstring.memset(p + size, 0, binsize[bin] - size); | |
411 //debug(PRINTF) printf("\tmalloc => %x\n", p); | |
412 debug (MEMSTOMP) cstring.memset(p, 0xF0, size); | |
413 } | |
414 else | |
415 { | |
416 p = gcx.bigAlloc(size); | |
417 if (!p) | |
418 return null; | |
419 } | |
420 size -= SENTINEL_EXTRA; | |
421 p = sentinel_add(p); | |
422 sentinel_init(p, size); | |
423 gcx.log_malloc(p, size); | |
424 return p; | |
425 } | |
426 | |
427 | |
428 /** | |
429 * | |
430 */ | |
100 void *calloc(size_t size, size_t n) | 431 void *calloc(size_t size, size_t n) |
101 { | 432 { |
102 debug(PRINTF) printf("GC calloc()\n"); | 433 size_t len = size * n; |
103 return calloc(n, size); | 434 |
104 } | 435 if (!len) |
105 | 436 { |
106 | 437 return null; |
438 } | |
439 | |
440 if (!thread_needLock()) | |
441 { | |
442 return callocNoSync(len); | |
443 } | |
444 else synchronized (gcLock) | |
445 { | |
446 return callocNoSync(len); | |
447 } | |
448 } | |
449 | |
450 | |
451 // | |
452 // | |
453 // | |
454 private void *callocNoSync(size_t size) | |
455 { | |
456 assert(size != 0); | |
457 | |
458 void *p = mallocNoSync(size); | |
459 if (p) | |
460 { //debug(PRINTF) printf("calloc: %x len %d\n", p, len); | |
461 cstring.memset(p, 0, size); | |
462 } | |
463 return p; | |
464 } | |
465 | |
466 | |
467 /** | |
468 * | |
469 */ | |
107 void *realloc(void *p, size_t size) | 470 void *realloc(void *p, size_t size) |
108 { | 471 { |
109 debug(PRINTF) printf("GC realloc()\n"); | 472 if (!thread_needLock()) |
110 return realloc(p, size); | 473 { |
111 } | 474 return reallocNoSync(p, size); |
112 | 475 } |
113 | 476 else synchronized (gcLock) |
477 { | |
478 return reallocNoSync(p, size); | |
479 } | |
480 } | |
481 | |
482 | |
483 // | |
484 // | |
485 // | |
486 private void *reallocNoSync(void *p, size_t size) | |
487 { | |
488 gcx.p_cache = null; | |
489 if (!size) | |
490 { if (p) | |
491 { freeNoSync(p); | |
492 p = null; | |
493 } | |
494 } | |
495 else if (!p) | |
496 { | |
497 p = mallocNoSync(size); | |
498 } | |
499 else | |
500 { void *p2; | |
501 size_t psize; | |
502 | |
503 //debug(PRINTF) printf("GC::realloc(p = %x, size = %u)\n", p, size); | |
504 version (SENTINEL) | |
505 { | |
506 sentinel_Invariant(p); | |
507 psize = *sentinel_size(p); | |
508 if (psize != size) | |
509 { | |
510 p2 = mallocNoSync(size); | |
511 if (psize < size) | |
512 size = psize; | |
513 //debug(PRINTF) printf("\tcopying %d bytes\n",size); | |
514 cstring.memcpy(p2, p, size); | |
515 p = p2; | |
516 } | |
517 } | |
518 else | |
519 { | |
520 psize = gcx.findSize(p); // find allocated size | |
521 if (psize >= PAGESIZE && size >= PAGESIZE) | |
522 { | |
523 auto psz = psize / PAGESIZE; | |
524 auto newsz = (size + PAGESIZE - 1) / PAGESIZE; | |
525 if (newsz == psz) | |
526 return p; | |
527 | |
528 auto pool = gcx.findPool(p); | |
529 auto pagenum = (p - pool.baseAddr) / PAGESIZE; | |
530 | |
531 if (newsz < psz) | |
532 { // Shrink in place | |
533 synchronized (gcLock) | |
534 { | |
535 debug (MEMSTOMP) cstring.memset(p + size, 0xF2, psize - size); | |
536 pool.freePages(pagenum + newsz, psz - newsz); | |
537 } | |
538 return p; | |
539 } | |
540 else if (pagenum + newsz <= pool.npages) | |
541 { | |
542 // Attempt to expand in place | |
543 synchronized (gcLock) | |
544 { | |
545 for (size_t i = pagenum + psz; 1;) | |
546 { | |
547 if (i == pagenum + newsz) | |
548 { | |
549 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, size - psize); | |
550 cstring.memset(&pool.pagetable[pagenum + psz], B_PAGEPLUS, newsz - psz); | |
551 return p; | |
552 } | |
553 if (i == pool.ncommitted) | |
554 { | |
555 auto u = pool.extendPages(pagenum + newsz - pool.ncommitted); | |
556 if (u == ~0u) | |
557 break; | |
558 i = pagenum + newsz; | |
559 continue; | |
560 } | |
561 if (pool.pagetable[i] != B_FREE) | |
562 break; | |
563 i++; | |
564 } | |
565 } | |
566 } | |
567 } | |
568 if (psize < size || // if new size is bigger | |
569 psize > size * 2) // or less than half | |
570 { | |
571 p2 = mallocNoSync(size); | |
572 if (psize < size) | |
573 size = psize; | |
574 //debug(PRINTF) printf("\tcopying %d bytes\n",size); | |
575 cstring.memcpy(p2, p, size); | |
576 p = p2; | |
577 } | |
578 } | |
579 } | |
580 return p; | |
581 } | |
582 | |
583 | |
584 /** | |
585 * Attempt to in-place enlarge the memory block pointed to by p by at least | |
586 * minbytes beyond its current capacity, up to a maximum of maxsize. This | |
587 * does not attempt to move the memory block (like realloc() does). | |
588 * | |
589 * Returns: | |
590 * 0 if could not extend p, | |
591 * total size of entire memory block if successful. | |
592 */ | |
593 size_t extend(void* p, size_t minsize, size_t maxsize) | |
594 { | |
595 if (!thread_needLock()) | |
596 { | |
597 return extendNoSync(p, minsize, maxsize); | |
598 } | |
599 else synchronized (gcLock) | |
600 { | |
601 return extendNoSync(p, minsize, maxsize); | |
602 } | |
603 } | |
604 | |
605 | |
606 // | |
607 // | |
608 // | |
609 private size_t extendNoSync(void* p, size_t minsize, size_t maxsize) | |
610 in | |
611 { | |
612 assert( minsize <= maxsize ); | |
613 } | |
614 body | |
615 { | |
616 //debug(PRINTF) printf("GC::extend(p = %x, minsize = %u, maxsize = %u)\n", p, minsize, maxsize); | |
617 version (SENTINEL) | |
618 { | |
619 return 0; | |
620 } | |
621 auto psize = gcx.findSize(p); // find allocated size | |
622 if (psize < PAGESIZE) | |
623 return 0; // cannot extend buckets | |
624 | |
625 auto psz = psize / PAGESIZE; | |
626 auto minsz = (minsize + PAGESIZE - 1) / PAGESIZE; | |
627 auto maxsz = (maxsize + PAGESIZE - 1) / PAGESIZE; | |
628 | |
629 auto pool = gcx.findPool(p); | |
630 auto pagenum = (p - pool.baseAddr) / PAGESIZE; | |
631 | |
632 size_t sz; | |
633 for (sz = 0; sz < maxsz; sz++) | |
634 { | |
635 auto i = pagenum + psz + sz; | |
636 if (i == pool.ncommitted) | |
637 break; | |
638 if (pool.pagetable[i] != B_FREE) | |
639 { if (sz < minsz) | |
640 return 0; | |
641 break; | |
642 } | |
643 } | |
644 if (sz >= minsz) | |
645 { | |
646 } | |
647 else if (pagenum + psz + sz == pool.ncommitted) | |
648 { | |
649 auto u = pool.extendPages(minsz - sz); | |
650 if (u == ~0u) | |
651 return 0; | |
652 sz = minsz; | |
653 } | |
654 else | |
655 return 0; | |
656 debug (MEMSTOMP) cstring.memset(p + psize, 0xF0, (psz + sz) * PAGESIZE - psize); | |
657 cstring.memset(pool.pagetable + pagenum + psz, B_PAGEPLUS, sz); | |
658 gcx.p_cache = null; | |
659 gcx.size_cache = 0; | |
660 return (psz + sz) * PAGESIZE; | |
661 } | |
662 | |
663 | |
664 /** | |
665 * | |
666 */ | |
114 void free(void *p) | 667 void free(void *p) |
115 { | 668 { |
116 debug(PRINTF) printf("GC free()\n"); | 669 if (!p) |
117 stdc.free(p); | 670 { |
118 } | 671 return; |
119 | 672 } |
673 | |
674 if (!thread_needLock()) | |
675 { | |
676 return freeNoSync(p); | |
677 } | |
678 else synchronized (gcLock) | |
679 { | |
680 return freeNoSync(p); | |
681 } | |
682 } | |
683 | |
684 | |
685 // | |
686 // | |
687 // | |
688 private void freeNoSync(void *p) | |
689 { | |
690 assert (p); | |
691 | |
692 Pool *pool; | |
693 uint pagenum; | |
694 Bins bin; | |
695 uint biti; | |
696 | |
697 // Find which page it is in | |
698 pool = gcx.findPool(p); | |
699 if (!pool) // if not one of ours | |
700 return; // ignore | |
701 sentinel_Invariant(p); | |
702 p = sentinel_sub(p); | |
703 pagenum = (p - pool.baseAddr) / PAGESIZE; | |
704 biti = cast(uint)(p - pool.baseAddr) / 16; | |
705 pool.noscan.clear(biti); | |
706 if (pool.finals.nbits && pool.finals.testClear(biti)) | |
707 gcx.rt_finalize(sentinel_add(p), false); | |
708 | |
709 gcx.p_cache = null; | |
710 bin = cast(Bins)pool.pagetable[pagenum]; | |
711 if (bin == B_PAGE) // if large alloc | |
712 { int npages; | |
713 uint n; | |
714 | |
715 // Free pages | |
716 npages = 1; | |
717 n = pagenum; | |
718 while (++n < pool.ncommitted && pool.pagetable[n] == B_PAGEPLUS) | |
719 npages++; | |
720 debug (MEMSTOMP) cstring.memset(p, 0xF2, npages * PAGESIZE); | |
721 pool.freePages(pagenum, npages); | |
722 } | |
723 else | |
724 { // Add to free list | |
725 List *list = cast(List *)p; | |
726 | |
727 debug (MEMSTOMP) cstring.memset(p, 0xF2, binsize[bin]); | |
728 | |
729 list.next = gcx.bucket[bin]; | |
730 gcx.bucket[bin] = list; | |
731 } | |
732 gcx.log_free(sentinel_add(p)); | |
733 } | |
734 | |
735 | |
736 /** | |
737 * Determine the allocated size of pointer p. If p is an interior pointer | |
738 * or not a gc allocated pointer, return 0. | |
739 */ | |
120 size_t capacity(void *p) | 740 size_t capacity(void *p) |
121 { | 741 { |
122 debug(PRINTF) printf("GC capacity()\n"); | 742 if (!p) |
123 return 0; | 743 { |
124 } | 744 return 0; |
125 | 745 } |
746 | |
747 if (!thread_needLock()) | |
748 { | |
749 return sizeOfNoSync(p); | |
750 } | |
751 else synchronized (gcLock) | |
752 { | |
753 return sizeOfNoSync(p); | |
754 } | |
755 } | |
756 | |
757 | |
758 // | |
759 // | |
760 // | |
761 private size_t sizeOfNoSync(void *p) | |
762 { | |
763 assert (p); | |
764 | |
765 version (SENTINEL) | |
766 { | |
767 p = sentinel_sub(p); | |
768 size_t size = gcx.findSize(p); | |
769 | |
770 // Check for interior pointer | |
771 // This depends on: | |
772 // 1) size is a power of 2 for less than PAGESIZE values | |
773 // 2) base of memory pool is aligned on PAGESIZE boundary | |
774 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) | |
775 size = 0; | |
776 return size ? size - SENTINAL_EXTRA : 0; | |
777 } | |
778 else | |
779 { | |
780 if (p !is null && p == gcx.p_cache) | |
781 return gcx.size_cache; | |
782 | |
783 size_t size = gcx.findSize(p); | |
784 | |
785 // Check for interior pointer | |
786 // This depends on: | |
787 // 1) size is a power of 2 for less than PAGESIZE values | |
788 // 2) base of memory pool is aligned on PAGESIZE boundary | |
789 if (cast(size_t)p & (size - 1) & (PAGESIZE - 1)) | |
790 size = 0; | |
791 else | |
792 { | |
793 gcx.p_cache = p; | |
794 gcx.size_cache = size; | |
795 } | |
796 | |
797 return size; | |
798 } | |
799 } | |
800 | |
801 | |
802 /** | |
803 * Verify that pointer p: | |
804 * 1) belongs to this memory pool | |
805 * 2) points to the start of an allocated piece of memory | |
806 * 3) is not on a free list | |
807 */ | |
126 void check(void *p) | 808 void check(void *p) |
127 { | 809 { |
128 debug(PRINTF) printf("GC check()\n"); | 810 if (!p) |
129 } | 811 { |
130 | 812 return; |
131 | 813 } |
132 void setStackBottom(void *p) | 814 |
133 { | 815 if (!thread_needLock()) |
134 debug(PRINTF) printf("GC setStackBottom()\n"); | 816 { |
135 } | 817 checkNoSync(p); |
818 } | |
819 else synchronized (gcLock) | |
820 { | |
821 checkNoSync(p); | |
822 } | |
823 } | |
824 | |
825 // | |
826 // | |
827 // | |
828 private void checkNoSync(void *p) | |
829 { | |
830 assert(p); | |
831 | |
832 sentinel_Invariant(p); | |
833 debug (PTRCHECK) | |
834 { | |
835 Pool* pool; | |
836 uint pagenum; | |
837 Bins bin; | |
838 size_t size; | |
839 | |
840 p = sentinel_sub(p); | |
841 pool = gcx.findPool(p); | |
842 assert(pool); | |
843 pagenum = (p - pool.baseAddr) / PAGESIZE; | |
844 bin = cast(Bins)pool.pagetable[pagenum]; | |
845 assert(bin <= B_PAGE); | |
846 size = binsize[bin]; | |
847 assert((cast(size_t)p & (size - 1)) == 0); | |
848 | |
849 debug (PTRCHECK2) | |
850 { | |
851 if (bin < B_PAGE) | |
852 { | |
853 // Check that p is not on a free list | |
854 List *list; | |
855 | |
856 for (list = gcx.bucket[bin]; list; list = list.next) | |
857 { | |
858 assert(cast(void *)list != p); | |
859 } | |
860 } | |
861 } | |
862 } | |
863 } | |
864 | |
865 | |
866 // | |
867 // | |
868 // | |
869 private void setStackBottom(void *p) | |
870 { | |
871 version (STACKGROWSDOWN) | |
872 { | |
873 //p = (void *)((uint *)p + 4); | |
874 if (p > gcx.stackBottom) | |
875 { | |
876 //debug(PRINTF) printf("setStackBottom(%x)\n", p); | |
877 gcx.stackBottom = p; | |
878 } | |
879 } | |
880 else | |
881 { | |
882 //p = (void *)((uint *)p - 4); | |
883 if (p < gcx.stackBottom) | |
884 { | |
885 //debug(PRINTF) printf("setStackBottom(%x)\n", p); | |
886 gcx.stackBottom = cast(char *)p; | |
887 } | |
888 } | |
889 } | |
890 | |
136 | 891 |
137 static void scanStaticData(gc_t g) | 892 static void scanStaticData(gc_t g) |
138 { | 893 { |
894 void *pbot; | |
895 void *ptop; | |
896 size_t nbytes; | |
897 | |
898 //debug(PRINTF) printf("+GC.scanStaticData()\n"); | |
899 os_query_staticdataseg(&pbot, &nbytes); | |
900 ptop = pbot + nbytes; | |
901 version (GNU) { | |
902 if (pbot) { | |
903 g.addRange(pbot, ptop); | |
904 } | |
905 } else { | |
906 g.addRange(pbot, ptop); | |
907 } | |
908 //debug(PRINTF) printf("-GC.scanStaticData()\n"); | |
909 } | |
910 | |
911 static void unscanStaticData(gc_t g) | |
912 { | |
913 void *pbot; | |
914 size_t nbytes; | |
915 | |
916 os_query_staticdataseg(&pbot, &nbytes); | |
917 version (GNU) { | |
918 if (pbot) { | |
919 g.removeRange(pbot); | |
920 } | |
921 } else { | |
922 g.removeRange(pbot); | |
923 } | |
924 } | |
925 | |
926 | |
927 /** | |
928 * add p to list of roots | |
929 */ | |
930 void addRoot(void *p) | |
931 { | |
932 if (!p) | |
933 { | |
934 return; | |
935 } | |
936 | |
937 if (!thread_needLock()) | |
938 { | |
939 gcx.addRoot(p); | |
940 } | |
941 else synchronized (gcLock) | |
942 { | |
943 gcx.addRoot(p); | |
944 } | |
945 } | |
946 | |
947 | |
948 /** | |
949 * remove p from list of roots | |
950 */ | |
951 void removeRoot(void *p) | |
952 { | |
953 if (!p) | |
954 { | |
955 return; | |
956 } | |
957 | |
958 if (!thread_needLock()) | |
959 { | |
960 gcx.removeRoot(p); | |
961 } | |
962 else synchronized (gcLock) | |
963 { | |
964 gcx.removeRoot(p); | |
965 } | |
966 } | |
967 | |
968 | |
969 /** | |
970 * add range to scan for roots | |
971 */ | |
972 void addRange(void *pbot, void *ptop) | |
973 { | |
974 if (!pbot || !ptop) | |
975 { | |
976 return; | |
977 } | |
978 | |
979 //debug(PRINTF) printf("+GC.addRange(pbot = x%x, ptop = x%x)\n", pbot, ptop); | |
980 if (!thread_needLock()) | |
981 { | |
982 gcx.addRange(pbot, ptop); | |
983 } | |
984 else synchronized (gcLock) | |
985 { | |
986 gcx.addRange(pbot, ptop); | |
987 } | |
988 //debug(PRINTF) printf("-GC.addRange()\n"); | |
989 } | |
990 | |
991 | |
992 /** | |
993 * remove range | |
994 */ | |
995 void removeRange(void *p) | |
996 { | |
997 if (!p) | |
998 { | |
999 return; | |
1000 } | |
1001 | |
1002 if (!thread_needLock()) | |
1003 { | |
1004 gcx.removeRange(p); | |
1005 } | |
1006 else synchronized (gcLock) | |
1007 { | |
1008 gcx.removeRange(p); | |
1009 } | |
1010 } | |
1011 | |
1012 | |
1013 /** | |
1014 * do full garbage collection | |
1015 */ | |
1016 void fullCollect() | |
1017 { | |
1018 debug(PRINTF) printf("GC.fullCollect()\n"); | |
1019 | |
1020 if (!thread_needLock()) | |
1021 { | |
1022 gcx.fullcollectshell(); | |
1023 } | |
1024 else synchronized (gcLock) | |
1025 { | |
1026 gcx.fullcollectshell(); | |
1027 } | |
1028 | |
1029 version (none) | |
1030 { | |
1031 GCStats stats; | |
1032 | |
1033 getStats(stats); | |
1034 debug(PRINTF) printf("poolsize = %x, usedsize = %x, freelistsize = %x\n", | |
1035 stats.poolsize, stats.usedsize, stats.freelistsize); | |
1036 } | |
1037 | |
1038 gcx.log_collect(); | |
1039 } | |
1040 | |
1041 | |
1042 /** | |
1043 * do full garbage collection ignoring roots | |
1044 */ | |
1045 void fullCollectNoStack() | |
1046 { | |
1047 if (!thread_needLock()) | |
1048 { | |
1049 gcx.noStack++; | |
1050 gcx.fullcollectshell(); | |
1051 gcx.noStack--; | |
1052 } | |
1053 else synchronized (gcLock) | |
1054 { | |
1055 gcx.noStack++; | |
1056 gcx.fullcollectshell(); | |
1057 gcx.noStack--; | |
1058 } | |
1059 } | |
1060 | |
1061 | |
1062 /** | |
1063 * do generational garbage collection | |
1064 */ | |
1065 void genCollect() | |
1066 { | |
1067 synchronized (gcLock) | |
1068 { | |
1069 gcx.fullcollectshell(); | |
1070 } | |
1071 } | |
1072 | |
1073 void minimize() // minimize physical memory usage | |
1074 { | |
1075 // Not implemented, ignore | |
1076 } | |
1077 | |
1078 void setFinalizer(void *p, GC_FINALIZER pFn) | |
1079 { | |
1080 synchronized (gcLock) | |
1081 { | |
1082 gcx.finalizer = pFn; | |
1083 gcx.doFinalize(p); | |
1084 } | |
1085 } | |
1086 | |
1087 | |
1088 void hasPointers(void *p) | |
1089 { | |
1090 synchronized (gcLock) | |
1091 { | |
1092 gcx.HasPointers(p); | |
1093 } | |
1094 } | |
1095 | |
1096 | |
1097 void hasNoPointers(void *p) | |
1098 { | |
1099 if (!gcx.conservative) | |
1100 { synchronized (gcLock) | |
1101 { | |
1102 gcx.HasNoPointers(p); | |
1103 } | |
1104 } | |
1105 } | |
1106 | |
1107 | |
1108 void setV1_0() | |
1109 { | |
1110 gcx.conservative = 1; | |
1111 } | |
1112 | |
1113 /** | |
1114 * Retrieve statistics about garbage collection. | |
1115 * Useful for debugging and tuning. | |
1116 */ | |
1117 void getStats(out GCStats stats) | |
1118 { | |
1119 if (!thread_needLock()) | |
1120 { | |
1121 getStatsNoSync(stats); | |
1122 } | |
1123 else synchronized (gcLock) | |
1124 { | |
1125 getStatsNoSync(stats); | |
1126 } | |
1127 } | |
1128 | |
1129 // | |
1130 // | |
1131 // | |
1132 private void getStatsNoSync(out GCStats stats) | |
1133 { | |
1134 size_t psize = 0; | |
1135 size_t usize = 0; | |
1136 size_t flsize = 0; | |
1137 | |
1138 size_t n; | |
1139 size_t bsize = 0; | |
1140 | |
1141 //debug(PRINTF) printf("getStats()\n"); | |
1142 cstring.memset(&stats, 0, GCStats.sizeof); | |
1143 | |
1144 for (n = 0; n < gcx.npools; n++) | |
1145 { Pool *pool = gcx.pooltable[n]; | |
1146 | |
1147 psize += pool.ncommitted * PAGESIZE; | |
1148 for (uint j = 0; j < pool.ncommitted; j++) | |
1149 { | |
1150 Bins bin = cast(Bins)pool.pagetable[j]; | |
1151 if (bin == B_FREE) | |
1152 stats.freeblocks++; | |
1153 else if (bin == B_PAGE) | |
1154 stats.pageblocks++; | |
1155 else if (bin < B_PAGE) | |
1156 bsize += PAGESIZE; | |
1157 } | |
1158 } | |
1159 | |
1160 for (n = 0; n < B_PAGE; n++) | |
1161 { | |
1162 //debug(PRINTF) printf("bin %d\n", n); | |
1163 for (List *list = gcx.bucket[n]; list; list = list.next) | |
1164 { | |
1165 //debug(PRINTF) printf("\tlist %x\n", list); | |
1166 flsize += binsize[n]; | |
1167 } | |
1168 } | |
1169 | |
1170 usize = bsize - flsize; | |
1171 | |
1172 stats.poolsize = psize; | |
1173 stats.usedsize = bsize - flsize; | |
1174 stats.freelistsize = flsize; | |
1175 } | |
1176 } | |
1177 | |
1178 | |
1179 /* ============================ Gcx =============================== */ | |
1180 | |
1181 enum | |
1182 { PAGESIZE = 4096, | |
1183 COMMITSIZE = (4096*16), | |
1184 POOLSIZE = (4096*256), | |
1185 } | |
1186 | |
1187 | |
1188 enum | |
1189 { | |
1190 B_16, | |
1191 B_32, | |
1192 B_64, | |
1193 B_128, | |
1194 B_256, | |
1195 B_512, | |
1196 B_1024, | |
1197 B_2048, | |
1198 B_PAGE, // start of large alloc | |
1199 B_PAGEPLUS, // continuation of large alloc | |
1200 B_FREE, // free page | |
1201 B_UNCOMMITTED, // memory not committed for this page | |
1202 B_MAX | |
1203 } | |
1204 | |
1205 | |
1206 alias ubyte Bins; | |
1207 | |
1208 | |
1209 struct List | |
1210 { | |
1211 List *next; | |
1212 } | |
1213 | |
1214 | |
1215 struct Range | |
1216 { | |
139 void *pbot; | 1217 void *pbot; |
140 void *ptop; | 1218 void *ptop; |
141 uint nbytes; | |
142 | |
143 debug(PRINTF) printf("GC scanStaticData()\n"); | |
144 //debug(PRINTF) printf("+GC.scanStaticData()\n"); | |
145 os_query_staticdataseg(&pbot, &nbytes); | |
146 ptop = pbot + nbytes; | |
147 g.addRange(pbot, ptop); | |
148 //debug(PRINTF) printf("-GC.scanStaticData()\n"); | |
149 } | |
150 | |
151 static void unscanStaticData(gc_t g) | |
152 { | |
153 void *pbot; | |
154 uint nbytes; | |
155 | |
156 debug(PRINTF) printf("GC unscanStaticData()\n"); | |
157 os_query_staticdataseg(&pbot, &nbytes); | |
158 g.removeRange(pbot); | |
159 } | |
160 | |
161 | |
162 void addRoot(void *p) // add p to list of roots | |
163 { | |
164 debug(PRINTF) printf("GC addRoot()\n"); | |
165 } | |
166 | |
167 void removeRoot(void *p) // remove p from list of roots | |
168 { | |
169 debug(PRINTF) printf("GC removeRoot()\n"); | |
170 } | |
171 | |
172 void addRange(void *pbot, void *ptop) // add range to scan for roots | |
173 { | |
174 debug(PRINTF) printf("GC addRange()\n"); | |
175 } | |
176 | |
177 void removeRange(void *pbot) // remove range | |
178 { | |
179 debug(PRINTF) printf("GC removeRange()\n"); | |
180 } | |
181 | |
182 void fullCollect() // do full garbage collection | |
183 { | |
184 debug(PRINTF) printf("GC fullCollect()\n"); | |
185 } | |
186 | |
187 void fullCollectNoStack() // do full garbage collection | |
188 { | |
189 debug(PRINTF) printf("GC fullCollectNoStack()\n"); | |
190 } | |
191 | |
192 void genCollect() // do generational garbage collection | |
193 { | |
194 debug(PRINTF) printf("GC genCollect()\n"); | |
195 } | |
196 | |
197 void minimize() // minimize physical memory usage | |
198 { | |
199 debug(PRINTF) printf("GC minimize()\n"); | |
200 } | |
201 | |
202 void setFinalizer(void *p, GC_FINALIZER pFn) | |
203 { | |
204 debug(PRINTF) printf("GC setFinalizer()\n"); | |
205 } | |
206 | |
207 void enable() | |
208 { | |
209 debug(PRINTF) printf("GC enable()\n"); | |
210 } | |
211 | |
212 void disable() | |
213 { | |
214 debug(PRINTF) printf("GC disable()\n"); | |
215 } | |
216 | |
217 void getStats(out GCStats stats) | |
218 { | |
219 debug(PRINTF) printf("GC getStats()\n"); | |
220 } | |
221 | |
222 void hasPointers(void* p) | |
223 { | |
224 debug(PRINTF) printf("GC hasPointers()\n"); | |
225 } | |
226 | |
227 void hasNoPointers(void* p) | |
228 { | |
229 debug(PRINTF) printf("GC hasNoPointers()\n"); | |
230 } | |
231 | |
232 void setV1_0() | |
233 { | |
234 debug(PRINTF) printf("GC setV1_0()\n"); | |
235 assert(0); | |
236 } | |
237 | |
238 size_t extend(void* p, size_t minsize, size_t maxsize) | |
239 { | |
240 debug(PRINTF) printf("GC extend()\n"); | |
241 assert(0); | |
242 } | |
243 } | 1219 } |
1220 | |
1221 | |
1222 const uint binsize[B_MAX] = [ 16,32,64,128,256,512,1024,2048,4096 ]; | |
1223 const uint notbinsize[B_MAX] = [ ~(16u-1),~(32u-1),~(64u-1),~(128u-1),~(256u-1), | |
1224 ~(512u-1),~(1024u-1),~(2048u-1),~(4096u-1) ]; | |
1225 | |
1226 /* ============================ Gcx =============================== */ | |
1227 | |
1228 | |
1229 struct Gcx | |
1230 { | |
1231 debug (THREADINVARIANT) | |
1232 { | |
1233 pthread_t self; | |
1234 void thread_Invariant() | |
1235 { | |
1236 if (self != pthread_self()) | |
1237 printf("thread_Invariant(): gcx = %x, self = %x, pthread_self() = %x\n", this, self, pthread_self()); | |
1238 assert(self == pthread_self()); | |
1239 } | |
1240 } | |
1241 else | |
1242 { | |
1243 void thread_Invariant() { } | |
1244 } | |
1245 | |
1246 void *p_cache; | |
1247 size_t size_cache; | |
1248 | |
1249 size_t nroots; | |
1250 size_t rootdim; | |
1251 void **roots; | |
1252 | |
1253 size_t nranges; | |
1254 size_t rangedim; | |
1255 Range *ranges; | |
1256 | |
1257 uint conservative; // !=0 means conservative behavior | |
1258 uint noStack; // !=0 means don't scan stack | |
1259 uint log; // turn on logging | |
1260 uint anychanges; | |
1261 void *stackBottom; | |
1262 uint inited; | |
1263 int disabled; // turn off collections if >0 | |
1264 | |
1265 byte *minAddr; // min(baseAddr) | |
1266 byte *maxAddr; // max(topAddr) | |
1267 | |
1268 uint npools; | |
1269 Pool **pooltable; | |
1270 | |
1271 List *bucket[B_MAX]; // free list for each size | |
1272 | |
1273 GC_FINALIZER finalizer; // finalizer function (one per GC) | |
1274 | |
1275 private void rt_finalize( void* p, bool dummy ) | |
1276 { | |
1277 if (finalizer) | |
1278 (*finalizer)(p, dummy); | |
1279 } | |
1280 | |
1281 | |
1282 void initialize() | |
1283 { int dummy; | |
1284 | |
1285 (cast(byte *)this)[0 .. Gcx.sizeof] = 0; | |
1286 stackBottom = cast(char *)&dummy; | |
1287 log_init(); | |
1288 debug (THREADINVARIANT) | |
1289 self = pthread_self(); | |
1290 //printf("gcx = %p, self = %x\n", this, self); | |
1291 inited = 1; | |
1292 } | |
1293 | |
1294 | |
1295 void Dtor() | |
1296 { | |
1297 inited = 0; | |
1298 | |
1299 for (uint i = 0; i < npools; i++) | |
1300 { Pool *pool = pooltable[i]; | |
1301 | |
1302 pool.Dtor(); | |
1303 cstdlib.free(pool); | |
1304 } | |
1305 if (pooltable) | |
1306 cstdlib.free(pooltable); | |
1307 | |
1308 if (roots) | |
1309 cstdlib.free(roots); | |
1310 | |
1311 if (ranges) | |
1312 cstdlib.free(ranges); | |
1313 } | |
1314 | |
1315 | |
1316 void Invariant() { } | |
1317 | |
1318 | |
1319 invariant() | |
1320 { | |
1321 if (inited) | |
1322 { | |
1323 //printf("Gcx.invariant(): this = %p\n", this); | |
1324 uint i; | |
1325 | |
1326 // Assure we're called on the right thread | |
1327 debug (THREADINVARIANT) assert(self == pthread_self()); | |
1328 | |
1329 for (i = 0; i < npools; i++) | |
1330 { Pool *pool = pooltable[i]; | |
1331 | |
1332 pool.Invariant(); | |
1333 if (i == 0) | |
1334 { | |
1335 assert(minAddr == pool.baseAddr); | |
1336 } | |
1337 if (i + 1 < npools) | |
1338 { | |
1339 assert(pool.opCmp(pooltable[i + 1]) < 0); | |
1340 } | |
1341 else if (i + 1 == npools) | |
1342 { | |
1343 assert(maxAddr == pool.topAddr); | |
1344 } | |
1345 } | |
1346 | |
1347 if (roots) | |
1348 { | |
1349 assert(rootdim != 0); | |
1350 assert(nroots <= rootdim); | |
1351 } | |
1352 | |
1353 if (ranges) | |
1354 { | |
1355 assert(rangedim != 0); | |
1356 assert(nranges <= rangedim); | |
1357 | |
1358 for (i = 0; i < nranges; i++) | |
1359 { | |
1360 assert(ranges[i].pbot); | |
1361 assert(ranges[i].ptop); | |
1362 assert(ranges[i].pbot <= ranges[i].ptop); | |
1363 } | |
1364 } | |
1365 | |
1366 for (i = 0; i < B_PAGE; i++) | |
1367 { | |
1368 for (List *list = bucket[i]; list; list = list.next) | |
1369 { | |
1370 } | |
1371 } | |
1372 } | |
1373 } | |
1374 | |
1375 | |
1376 /** | |
1377 * | |
1378 */ | |
1379 | |
1380 void addRoot(void *p) | |
1381 { | |
1382 if (nroots == rootdim) | |
1383 { | |
1384 size_t newdim = rootdim * 2 + 16; | |
1385 void** newroots; | |
1386 | |
1387 newroots = cast(void **)cstdlib.malloc(newdim * newroots[0].sizeof); | |
1388 if (!newroots) | |
1389 onOutOfMemoryError(); | |
1390 if (roots) | |
1391 { cstring.memcpy(newroots, roots, nroots * newroots[0].sizeof); | |
1392 cstdlib.free(roots); | |
1393 } | |
1394 roots = newroots; | |
1395 rootdim = newdim; | |
1396 } | |
1397 roots[nroots] = p; | |
1398 nroots++; | |
1399 } | |
1400 | |
1401 | |
1402 /** | |
1403 * | |
1404 */ | |
1405 void removeRoot(void *p) | |
1406 { | |
1407 for (size_t i = nroots; i--;) | |
1408 { | |
1409 if (roots[i] == p) | |
1410 { | |
1411 nroots--; | |
1412 cstring.memmove(roots + i, roots + i + 1, (nroots - i) * roots[0].sizeof); | |
1413 return; | |
1414 } | |
1415 } | |
1416 assert(0); | |
1417 } | |
1418 | |
1419 | |
1420 /** | |
1421 * | |
1422 */ | |
1423 void addRange(void *pbot, void *ptop) | |
1424 { | |
1425 debug(THREADINVARIANT) { debug(PRINTF) printf("Thread %x ", pthread_self()); } | |
1426 debug(PRINTF) printf("%x.Gcx::addRange(%x, %x), nranges = %d\n", this, pbot, ptop, nranges); | |
1427 if (nranges == rangedim) | |
1428 { | |
1429 size_t newdim = rangedim * 2 + 16; | |
1430 Range *newranges; | |
1431 | |
1432 newranges = cast(Range *)cstdlib.malloc(newdim * newranges[0].sizeof); | |
1433 if (!newranges) | |
1434 onOutOfMemoryError(); | |
1435 if (ranges) | |
1436 { cstring.memcpy(newranges, ranges, nranges * newranges[0].sizeof); | |
1437 cstdlib.free(ranges); | |
1438 } | |
1439 ranges = newranges; | |
1440 rangedim = newdim; | |
1441 } | |
1442 ranges[nranges].pbot = pbot; | |
1443 ranges[nranges].ptop = ptop; | |
1444 nranges++; | |
1445 } | |
1446 | |
1447 | |
1448 /** | |
1449 * | |
1450 */ | |
1451 void removeRange(void *pbot) | |
1452 { | |
1453 debug(THREADINVARIANT) { debug(PRINTF) printf("Thread %x ", pthread_self()); } | |
1454 debug(PRINTF) printf("%x.Gcx.removeRange(%x), nranges = %d\n", this, pbot, nranges); | |
1455 for (size_t i = nranges; i--;) | |
1456 { | |
1457 if (ranges[i].pbot == pbot) | |
1458 { | |
1459 nranges--; | |
1460 cstring.memmove(ranges + i, ranges + i + 1, (nranges - i) * ranges[0].sizeof); | |
1461 return; | |
1462 } | |
1463 } | |
1464 debug(PRINTF) printf("Wrong thread\n"); | |
1465 | |
1466 // This is a fatal error, but ignore it. | |
1467 // The problem is that we can get a Close() call on a thread | |
1468 // other than the one the range was allocated on. | |
1469 //assert(zero); | |
1470 } | |
1471 | |
1472 | |
1473 /** | |
1474 * Find Pool that pointer is in. | |
1475 * Return null if not in a Pool. | |
1476 * Assume pooltable[] is sorted. | |
1477 */ | |
1478 Pool *findPool(void *p) | |
1479 { | |
1480 if (p >= minAddr && p < maxAddr) | |
1481 { | |
1482 if (npools == 1) | |
1483 { | |
1484 return pooltable[0]; | |
1485 } | |
1486 | |
1487 for (uint i = 0; i < npools; i++) | |
1488 { Pool *pool; | |
1489 | |
1490 pool = pooltable[i]; | |
1491 if (p < pool.topAddr) | |
1492 { if (pool.baseAddr <= p) | |
1493 return pool; | |
1494 break; | |
1495 } | |
1496 } | |
1497 } | |
1498 return null; | |
1499 } | |
1500 | |
1501 | |
1502 /** | |
1503 * Find size of pointer p. | |
1504 * Returns 0 if not a gc'd pointer | |
1505 */ | |
1506 size_t findSize(void *p) | |
1507 { | |
1508 Pool *pool; | |
1509 size_t size = 0; | |
1510 | |
1511 pool = findPool(p); | |
1512 if (pool) | |
1513 { | |
1514 uint pagenum; | |
1515 Bins bin; | |
1516 | |
1517 pagenum = (cast(uint)(p - pool.baseAddr)) / PAGESIZE; | |
1518 bin = cast(Bins)pool.pagetable[pagenum]; | |
1519 size = binsize[bin]; | |
1520 if (bin == B_PAGE) | |
1521 { uint npages = pool.ncommitted; | |
1522 ubyte* pt; | |
1523 uint i; | |
1524 | |
1525 pt = &pool.pagetable[0]; | |
1526 for (i = pagenum + 1; i < npages; i++) | |
1527 { | |
1528 if (pt[i] != B_PAGEPLUS) | |
1529 break; | |
1530 } | |
1531 size = (i - pagenum) * PAGESIZE; | |
1532 } | |
1533 } | |
1534 return size; | |
1535 } | |
1536 | |
1537 | |
1538 /** | |
1539 * Compute bin for size. | |
1540 */ | |
1541 static Bins findBin(size_t size) | |
1542 { Bins bin; | |
1543 | |
1544 if (size <= 256) | |
1545 { | |
1546 if (size <= 64) | |
1547 { | |
1548 if (size <= 16) | |
1549 bin = B_16; | |
1550 else if (size <= 32) | |
1551 bin = B_32; | |
1552 else | |
1553 bin = B_64; | |
1554 } | |
1555 else | |
1556 { | |
1557 if (size <= 128) | |
1558 bin = B_128; | |
1559 else | |
1560 bin = B_256; | |
1561 } | |
1562 } | |
1563 else | |
1564 { | |
1565 if (size <= 1024) | |
1566 { | |
1567 if (size <= 512) | |
1568 bin = B_512; | |
1569 else | |
1570 bin = B_1024; | |
1571 } | |
1572 else | |
1573 { | |
1574 if (size <= 2048) | |
1575 bin = B_2048; | |
1576 else | |
1577 bin = B_PAGE; | |
1578 } | |
1579 } | |
1580 return bin; | |
1581 } | |
1582 | |
1583 | |
1584 /** | |
1585 * Allocate a chunk of memory that is larger than a page. | |
1586 * Return null if out of memory. | |
1587 */ | |
1588 void *bigAlloc(size_t size) | |
1589 { | |
1590 Pool *pool; | |
1591 uint npages; | |
1592 uint n; | |
1593 uint pn; | |
1594 uint freedpages; | |
1595 void *p; | |
1596 int state; | |
1597 | |
1598 npages = (size + PAGESIZE - 1) / PAGESIZE; | |
1599 | |
1600 for (state = 0; ; ) | |
1601 { | |
1602 // This code could use some refinement when repeatedly | |
1603 // allocating very large arrays. | |
1604 | |
1605 for (n = 0; n < npools; n++) | |
1606 { | |
1607 pool = pooltable[n]; | |
1608 pn = pool.allocPages(npages); | |
1609 if (pn != ~0u) | |
1610 goto L1; | |
1611 } | |
1612 | |
1613 // Failed | |
1614 switch (state) | |
1615 { | |
1616 case 0: | |
1617 if (disabled) | |
1618 { state = 1; | |
1619 continue; | |
1620 } | |
1621 // Try collecting | |
1622 freedpages = fullcollectshell(); | |
1623 if (freedpages >= npools * ((POOLSIZE / PAGESIZE) / 4)) | |
1624 { state = 1; | |
1625 continue; | |
1626 } | |
1627 // Allocate new pool | |
1628 pool = newPool(npages); | |
1629 if (!pool) | |
1630 { state = 2; | |
1631 continue; | |
1632 } | |
1633 pn = pool.allocPages(npages); | |
1634 assert(pn != ~0u); | |
1635 goto L1; | |
1636 | |
1637 case 1: | |
1638 // Allocate new pool | |
1639 pool = newPool(npages); | |
1640 if (!pool) | |
1641 goto Lnomemory; | |
1642 pn = pool.allocPages(npages); | |
1643 assert(pn != ~0u); | |
1644 goto L1; | |
1645 | |
1646 case 2: | |
1647 goto Lnomemory; | |
1648 } | |
1649 } | |
1650 | |
1651 L1: | |
1652 pool.pagetable[pn] = B_PAGE; | |
1653 if (npages > 1) | |
1654 cstring.memset(&pool.pagetable[pn + 1], B_PAGEPLUS, npages - 1); | |
1655 p = pool.baseAddr + pn * PAGESIZE; | |
1656 cstring.memset(cast(char *)p + size, 0, npages * PAGESIZE - size); | |
1657 debug (MEMSTOMP) cstring.memset(p, 0xF1, size); | |
1658 //debug(PRINTF) printf("\tp = %x\n", p); | |
1659 return p; | |
1660 | |
1661 Lnomemory: | |
1662 onOutOfMemoryError(); | |
1663 return null; | |
1664 } | |
1665 | |
1666 | |
1667 /** | |
1668 * Allocate a new pool with at least npages in it. | |
1669 * Sort it into pooltable[]. | |
1670 * Return null if failed. | |
1671 */ | |
1672 Pool *newPool(uint npages) | |
1673 { | |
1674 Pool *pool; | |
1675 Pool **newpooltable; | |
1676 uint newnpools; | |
1677 uint i; | |
1678 | |
1679 //debug(PRINTF) printf("************Gcx::newPool(npages = %d)****************\n", npages); | |
1680 | |
1681 // Round up to COMMITSIZE pages | |
1682 npages = (npages + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1); | |
1683 | |
1684 // Minimum of POOLSIZE | |
1685 if (npages < POOLSIZE/PAGESIZE) | |
1686 npages = POOLSIZE/PAGESIZE; | |
1687 else if (npages > POOLSIZE/PAGESIZE) | |
1688 { // Give us 150% of requested size, so there's room to extend | |
1689 auto n = npages + (npages >> 1); | |
1690 if (n < size_t.max/PAGESIZE) | |
1691 npages = n; | |
1692 } | |
1693 | |
1694 // Allocate successively larger pools up to 8 megs | |
1695 if (npools) | |
1696 { uint n; | |
1697 | |
1698 n = npools; | |
1699 if (n > 8) | |
1700 n = 8; // cap pool size at 8 megs | |
1701 n *= (POOLSIZE / PAGESIZE); | |
1702 if (npages < n) | |
1703 npages = n; | |
1704 } | |
1705 | |
1706 pool = cast(Pool *)cstdlib.calloc(1, Pool.sizeof); | |
1707 if (pool) | |
1708 { | |
1709 pool.initialize(npages); | |
1710 if (!pool.baseAddr) | |
1711 goto Lerr; | |
1712 | |
1713 newnpools = npools + 1; | |
1714 newpooltable = cast(Pool **)cstdlib.realloc(pooltable, newnpools * (Pool *).sizeof); | |
1715 if (!newpooltable) | |
1716 goto Lerr; | |
1717 | |
1718 // Sort pool into newpooltable[] | |
1719 for (i = 0; i < npools; i++) | |
1720 { | |
1721 if (pool.opCmp(newpooltable[i]) < 0) | |
1722 break; | |
1723 } | |
1724 cstring.memmove(newpooltable + i + 1, newpooltable + i, (npools - i) * (Pool *).sizeof); | |
1725 newpooltable[i] = pool; | |
1726 | |
1727 pooltable = newpooltable; | |
1728 npools = newnpools; | |
1729 | |
1730 minAddr = pooltable[0].baseAddr; | |
1731 maxAddr = pooltable[npools - 1].topAddr; | |
1732 } | |
1733 return pool; | |
1734 | |
1735 Lerr: | |
1736 pool.Dtor(); | |
1737 cstdlib.free(pool); | |
1738 return null; | |
1739 } | |
1740 | |
1741 | |
1742 /** | |
1743 * Allocate a page of bin's. | |
1744 * Returns: | |
1745 * 0 failed | |
1746 */ | |
1747 int allocPage(Bins bin) | |
1748 { | |
1749 Pool *pool; | |
1750 uint n; | |
1751 uint pn; | |
1752 byte *p; | |
1753 byte *ptop; | |
1754 | |
1755 //debug(PRINTF) printf("Gcx::allocPage(bin = %d)\n", bin); | |
1756 for (n = 0; n < npools; n++) | |
1757 { | |
1758 pool = pooltable[n]; | |
1759 pn = pool.allocPages(1); | |
1760 if (pn != ~0u) | |
1761 goto L1; | |
1762 } | |
1763 return 0; // failed | |
1764 | |
1765 L1: | |
1766 pool.pagetable[pn] = cast(ubyte)bin; | |
1767 | |
1768 // Convert page to free list | |
1769 size_t size = binsize[bin]; | |
1770 List **b = &bucket[bin]; | |
1771 | |
1772 p = pool.baseAddr + pn * PAGESIZE; | |
1773 ptop = p + PAGESIZE; | |
1774 for (; p < ptop; p += size) | |
1775 { | |
1776 (cast(List *)p).next = *b; | |
1777 *b = cast(List *)p; | |
1778 } | |
1779 return 1; | |
1780 } | |
1781 | |
1782 | |
1783 /** | |
1784 * Search a range of memory values and mark any pointers into the GC pool. | |
1785 */ | |
1786 void mark(void *pbot, void *ptop) | |
1787 { | |
1788 void **p1 = cast(void **)pbot; | |
1789 void **p2 = cast(void **)ptop; | |
1790 uint changes = 0; | |
1791 | |
1792 //printf("marking range: %p -> %p\n", pbot, ptop); | |
1793 for (; p1 < p2; p1++) | |
1794 { | |
1795 Pool *pool; | |
1796 byte *p = cast(byte *)(*p1); | |
1797 | |
1798 //if (log) debug(PRINTF) printf("\tmark %x\n", p); | |
1799 if (p >= minAddr) | |
1800 { | |
1801 pool = findPool(p); | |
1802 if (pool) | |
1803 { | |
1804 size_t offset = cast(size_t)(p - pool.baseAddr); | |
1805 uint biti; | |
1806 uint pn = offset / PAGESIZE; | |
1807 Bins bin = cast(Bins)pool.pagetable[pn]; | |
1808 | |
1809 //debug(PRINTF) printf("\t\tfound pool %x, base=%x, pn = %d, bin = %d, biti = x%x\n", pool, pool.baseAddr, pn, bin, biti); | |
1810 | |
1811 // Adjust bit to be at start of allocated memory block | |
1812 if (bin <= B_PAGE) | |
1813 { | |
1814 biti = (offset & notbinsize[bin]) >> 4; | |
1815 //debug(PRINTF) printf("\t\tbiti = x%x\n", biti); | |
1816 } | |
1817 else if (bin == B_PAGEPLUS) | |
1818 { | |
1819 do | |
1820 { --pn; | |
1821 } while (cast(Bins)pool.pagetable[pn] == B_PAGEPLUS); | |
1822 biti = pn * (PAGESIZE / 16); | |
1823 } | |
1824 else | |
1825 { | |
1826 // Don't mark bits in B_FREE or B_UNCOMMITTED pages | |
1827 continue; | |
1828 } | |
1829 | |
1830 //debug(PRINTF) printf("\t\tmark(x%x) = %d\n", biti, pool.mark.test(biti)); | |
1831 if (!pool.mark.test(biti)) | |
1832 { | |
1833 //if (log) debug(PRINTF) printf("\t\tmarking %x\n", p); | |
1834 pool.mark.set(biti); | |
1835 if (!pool.noscan.test(biti)) | |
1836 { | |
1837 pool.scan.set(biti); | |
1838 changes = 1; | |
1839 } | |
1840 log_parent(sentinel_add(pool.baseAddr + biti * 16), sentinel_add(pbot)); | |
1841 } | |
1842 } | |
1843 } | |
1844 } | |
1845 anychanges |= changes; | |
1846 } | |
1847 | |
1848 | |
1849 /** | |
1850 * Return number of full pages free'd. | |
1851 */ | |
1852 size_t fullcollectshell() | |
1853 { | |
1854 // The purpose of the 'shell' is to ensure all the registers | |
1855 // get put on the stack so they'll be scanned | |
1856 void *sp; | |
1857 size_t result; | |
1858 version (GNU) | |
1859 { | |
1860 __builtin_unwind_init(); | |
1861 sp = & sp; | |
1862 } | |
1863 else | |
1864 { | |
1865 /*asm | |
1866 { | |
1867 pushad ; | |
1868 mov sp[EBP],ESP ; | |
1869 }*/ | |
1870 } | |
1871 result = fullcollect(sp); | |
1872 version (GNU) | |
1873 { | |
1874 // nothing to do | |
1875 } | |
1876 else | |
1877 { | |
1878 /*asm | |
1879 { | |
1880 popad ; | |
1881 }*/ | |
1882 } | |
1883 return result; | |
1884 } | |
1885 | |
1886 | |
1887 /** | |
1888 * | |
1889 */ | |
1890 size_t fullcollect(void *stackTop) | |
1891 { | |
1892 uint n; | |
1893 Pool *pool; | |
1894 | |
1895 debug(COLLECT_PRINTF) printf("Gcx.fullcollect()\n"); | |
1896 | |
1897 Thread.pauseAll(); | |
1898 | |
1899 p_cache = null; | |
1900 size_cache = 0; | |
1901 | |
1902 anychanges = 0; | |
1903 for (n = 0; n < npools; n++) | |
1904 { | |
1905 pool = pooltable[n]; | |
1906 pool.mark.zero(); | |
1907 pool.scan.zero(); | |
1908 pool.freebits.zero(); | |
1909 } | |
1910 | |
1911 // Mark each free entry, so it doesn't get scanned | |
1912 for (n = 0; n < B_PAGE; n++) | |
1913 { | |
1914 for (List *list = bucket[n]; list; list = list.next) | |
1915 { | |
1916 pool = findPool(list); | |
1917 assert(pool); | |
1918 pool.freebits.set(cast(uint)(cast(byte *)list - pool.baseAddr) / 16); | |
1919 } | |
1920 } | |
1921 | |
1922 for (n = 0; n < npools; n++) | |
1923 { | |
1924 pool = pooltable[n]; | |
1925 pool.mark.copy(&pool.freebits); | |
1926 } | |
1927 | |
1928 version (MULTI_THREADED) | |
1929 { | |
1930 // Scan stacks and registers for each paused thread | |
1931 Thread[] threads = Thread.getAll(); | |
1932 //thread_id id = cast(thread_id) GetCurrentThread(); | |
1933 for (n = 0; n < threads.length; n++) | |
1934 { Thread t = threads[n]; | |
1935 | |
1936 if (t && t.getState() == Thread.TS.RUNNING) | |
1937 { | |
1938 if (noStack && threads.length == 1) | |
1939 break; | |
1940 | |
1941 version (Win32) | |
1942 { | |
1943 CONTEXT context; | |
1944 | |
1945 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL; | |
1946 if (!GetThreadContext(t.hdl, &context)) | |
1947 { | |
1948 assert(0); | |
1949 } | |
1950 debug (PRINTF) printf("mt scan stack bot = %x, top = %x\n", context.Esp, t.stackBottom); | |
1951 mark(cast(void *)context.Esp, t.stackBottom); | |
1952 mark(&context.Edi, &context.Eip); | |
1953 } | |
1954 else version (GNU) | |
1955 { | |
1956 if (t.isSelf()) | |
1957 t.stackTop = Thread.getESP(); | |
1958 | |
1959 //%%fry printf("top=%08x bot=%08x ext=%08x\n", t.stackTop, t.stackBottom, Thread.getESP());//%%try | |
1960 version (STACKGROWSDOWN) | |
1961 mark(t.stackTop, t.stackBottom); | |
1962 else | |
1963 mark(t.stackBottom, t.stackTop); | |
1964 } | |
1965 else version (linux) | |
1966 { | |
1967 // The registers are already stored in the stack | |
1968 //printf("Thread: ESP = x%x, stackBottom = x%x, isSelf = %d\n", Thread.getESP(), t.stackBottom, t.isSelf()); | |
1969 if (t.isSelf()) | |
1970 t.stackTop = Thread.getESP(); | |
1971 | |
1972 version (STACKGROWSDOWN) | |
1973 mark(t.stackTop, t.stackBottom); | |
1974 else | |
1975 mark(t.stackBottom, t.stackTop); | |
1976 } | |
1977 } | |
1978 } | |
1979 } | |
1980 else | |
1981 { | |
1982 if (!noStack) | |
1983 { | |
1984 // Scan stack for main thread | |
1985 debug(PRINTF) printf(" scan stack bot = %x, top = %x\n", stackTop, stackBottom); | |
1986 version (STACKGROWSDOWN) | |
1987 mark(stackTop, stackBottom); | |
1988 else | |
1989 mark(stackBottom, stackTop); | |
1990 } | |
1991 } | |
1992 | |
1993 // Scan roots[] | |
1994 debug(COLLECT_PRINTF) printf("scan roots[]\n"); | |
1995 mark(roots, roots + nroots); | |
1996 | |
1997 // Scan ranges[] | |
1998 debug(COLLECT_PRINTF) printf("scan ranges[]\n"); | |
1999 //log++; | |
2000 for (n = 0; n < nranges; n++) | |
2001 { | |
2002 debug(COLLECT_PRINTF) printf("\t%x .. %x\n", ranges[n].pbot, ranges[n].ptop); | |
2003 mark(ranges[n].pbot, ranges[n].ptop); | |
2004 } | |
2005 //log--; | |
2006 | |
2007 debug(COLLECT_PRINTF) printf("\tscan heap\n"); | |
2008 while (anychanges) | |
2009 { | |
2010 anychanges = 0; | |
2011 for (n = 0; n < npools; n++) | |
2012 { | |
2013 uint *bbase; | |
2014 uint *b; | |
2015 uint *btop; | |
2016 | |
2017 pool = pooltable[n]; | |
2018 | |
2019 bbase = pool.scan.base(); | |
2020 btop = bbase + pool.scan.nwords; | |
2021 for (b = bbase; b < btop;) | |
2022 { Bins bin; | |
2023 uint pn; | |
2024 uint u; | |
2025 uint bitm; | |
2026 byte *o; | |
2027 | |
2028 bitm = *b; | |
2029 if (!bitm) | |
2030 { b++; | |
2031 continue; | |
2032 } | |
2033 *b = 0; | |
2034 | |
2035 o = pool.baseAddr + (b - bbase) * 32 * 16; | |
2036 /* version (BigEndian) | |
2037 bitm = bswap(bitm); | |
2038 */ | |
2039 if (!(bitm & 0xFFFF)) | |
2040 { | |
2041 bitm >>= 16; | |
2042 o += 16 * 16; | |
2043 } | |
2044 for (; bitm; o += 16, bitm >>= 1) | |
2045 { | |
2046 if (!(bitm & 1)) | |
2047 continue; | |
2048 | |
2049 pn = (o - pool.baseAddr) / PAGESIZE; | |
2050 bin = cast(Bins)pool.pagetable[pn]; | |
2051 if (bin < B_PAGE) | |
2052 { | |
2053 mark(o, o + binsize[bin]); | |
2054 } | |
2055 else if (bin == B_PAGE || bin == B_PAGEPLUS) | |
2056 { | |
2057 if (bin == B_PAGEPLUS) | |
2058 { | |
2059 while (pool.pagetable[pn - 1] != B_PAGE) | |
2060 pn--; | |
2061 } | |
2062 u = 1; | |
2063 while (pn + u < pool.ncommitted && pool.pagetable[pn + u] == B_PAGEPLUS) | |
2064 u++; | |
2065 mark(o, o + u * PAGESIZE); | |
2066 } | |
2067 } | |
2068 } | |
2069 } | |
2070 } | |
2071 | |
2072 // Free up everything not marked | |
2073 debug(COLLECT_PRINTF) printf("\tfree'ing\n"); | |
2074 size_t freedpages = 0; | |
2075 size_t freed = 0; | |
2076 for (n = 0; n < npools; n++) | |
2077 { uint pn; | |
2078 uint ncommitted; | |
2079 uint *bbase; | |
2080 | |
2081 pool = pooltable[n]; | |
2082 bbase = pool.mark.base(); | |
2083 ncommitted = pool.ncommitted; | |
2084 for (pn = 0; pn < ncommitted; pn++, bbase += PAGESIZE / (32 * 16)) | |
2085 { | |
2086 Bins bin = cast(Bins)pool.pagetable[pn]; | |
2087 | |
2088 if (bin < B_PAGE) | |
2089 { byte *p; | |
2090 byte *ptop; | |
2091 uint biti; | |
2092 uint bitstride; | |
2093 uint size = binsize[bin]; | |
2094 | |
2095 p = pool.baseAddr + pn * PAGESIZE; | |
2096 ptop = p + PAGESIZE; | |
2097 biti = pn * (PAGESIZE/16); | |
2098 bitstride = size / 16; | |
2099 | |
2100 version(none) // BUG: doesn't work because freebits() must also be cleared | |
2101 { | |
2102 // If free'd entire page | |
2103 if (bbase[0] == 0 && bbase[1] == 0 && bbase[2] == 0 && bbase[3] == 0 && | |
2104 bbase[4] == 0 && bbase[5] == 0 && bbase[6] == 0 && bbase[7] == 0) | |
2105 { | |
2106 for (; p < ptop; p += size, biti += bitstride) | |
2107 { | |
2108 if (pool.finals.nbits && pool.finals.testClear(biti)) | |
2109 rt_finalize(cast(List *)sentinel_add(p), false); | |
2110 | |
2111 List *list = cast(List *)p; | |
2112 //debug(PRINTF) printf("\tcollecting %x\n", list); | |
2113 log_free(sentinel_add(list)); | |
2114 | |
2115 debug (MEMSTOMP) cstring.memset(p, 0xF3, size); | |
2116 } | |
2117 pool.pagetable[pn] = B_FREE; | |
2118 freed += PAGESIZE; | |
2119 //debug(PRINTF) printf("freeing entire page %d\n", pn); | |
2120 continue; | |
2121 } | |
2122 } | |
2123 for (; p < ptop; p += size, biti += bitstride) | |
2124 { | |
2125 if (!pool.mark.test(biti)) | |
2126 { | |
2127 sentinel_Invariant(sentinel_add(p)); | |
2128 | |
2129 pool.freebits.set(biti); | |
2130 pool.noscan.clear(biti); | |
2131 if (pool.finals.nbits && pool.finals.testClear(biti)) | |
2132 rt_finalize(cast(List *)sentinel_add(p), false); | |
2133 | |
2134 List *list = cast(List *)p; | |
2135 debug(PRINTF) printf("\tcollecting %x\n", list); | |
2136 log_free(sentinel_add(list)); | |
2137 | |
2138 debug (MEMSTOMP) cstring.memset(p, 0xF3, size); | |
2139 | |
2140 freed += size; | |
2141 } | |
2142 } | |
2143 } | |
2144 else if (bin == B_PAGE) | |
2145 { uint biti = pn * (PAGESIZE / 16); | |
2146 | |
2147 if (!pool.mark.test(biti)) | |
2148 { byte *p = pool.baseAddr + pn * PAGESIZE; | |
2149 | |
2150 sentinel_Invariant(sentinel_add(p)); | |
2151 pool.noscan.clear(biti); | |
2152 if (pool.finals.nbits && pool.finals.testClear(biti)) | |
2153 rt_finalize(sentinel_add(p), false); | |
2154 | |
2155 debug(COLLECT_PRINTF) printf("\tcollecting big %x\n", p); | |
2156 log_free(sentinel_add(p)); | |
2157 pool.pagetable[pn] = B_FREE; | |
2158 freedpages++; | |
2159 debug (MEMSTOMP) cstring.memset(p, 0xF3, PAGESIZE); | |
2160 while (pn + 1 < ncommitted && pool.pagetable[pn + 1] == B_PAGEPLUS) | |
2161 { | |
2162 pn++; | |
2163 pool.pagetable[pn] = B_FREE; | |
2164 freedpages++; | |
2165 | |
2166 debug (MEMSTOMP) | |
2167 { p += PAGESIZE; | |
2168 cstring.memset(p, 0xF3, PAGESIZE); | |
2169 } | |
2170 } | |
2171 } | |
2172 } | |
2173 } | |
2174 } | |
2175 | |
2176 // Zero buckets | |
2177 bucket[] = null; | |
2178 | |
2179 // Free complete pages, rebuild free list | |
2180 debug(COLLECT_PRINTF) printf("\tfree complete pages\n"); | |
2181 size_t recoveredpages = 0; | |
2182 for (n = 0; n < npools; n++) | |
2183 { uint pn; | |
2184 uint ncommitted; | |
2185 | |
2186 pool = pooltable[n]; | |
2187 ncommitted = pool.ncommitted; | |
2188 for (pn = 0; pn < ncommitted; pn++) | |
2189 { | |
2190 Bins bin = cast(Bins)pool.pagetable[pn]; | |
2191 uint biti; | |
2192 uint u; | |
2193 | |
2194 if (bin < B_PAGE) | |
2195 { | |
2196 uint size = binsize[bin]; | |
2197 uint bitstride = size / 16; | |
2198 uint bitbase = pn * (PAGESIZE / 16); | |
2199 uint bittop = bitbase + (PAGESIZE / 16); | |
2200 byte *p; | |
2201 | |
2202 biti = bitbase; | |
2203 for (biti = bitbase; biti < bittop; biti += bitstride) | |
2204 { if (!pool.freebits.test(biti)) | |
2205 goto Lnotfree; | |
2206 } | |
2207 pool.pagetable[pn] = B_FREE; | |
2208 recoveredpages++; | |
2209 continue; | |
2210 | |
2211 Lnotfree: | |
2212 p = pool.baseAddr + pn * PAGESIZE; | |
2213 for (u = 0; u < PAGESIZE; u += size) | |
2214 { biti = bitbase + u / 16; | |
2215 if (pool.freebits.test(biti)) | |
2216 { List *list; | |
2217 | |
2218 list = cast(List *)(p + u); | |
2219 if (list.next != bucket[bin]) // avoid unnecessary writes | |
2220 list.next = bucket[bin]; | |
2221 bucket[bin] = list; | |
2222 } | |
2223 } | |
2224 } | |
2225 } | |
2226 } | |
2227 | |
2228 debug(COLLECT_PRINTF) printf("recovered pages = %d\n", recoveredpages); | |
2229 debug(COLLECT_PRINTF) printf("\tfree'd %u bytes, %u pages from %u pools\n", freed, freedpages, npools); | |
2230 | |
2231 Thread.resumeAll(); | |
2232 | |
2233 return freedpages + recoveredpages; | |
2234 } | |
2235 | |
2236 | |
2237 /** | |
2238 * Run finalizer on p when it is free'd. | |
2239 */ | |
2240 void doFinalize(void *p) | |
2241 { | |
2242 Pool *pool = findPool(p); | |
2243 assert(pool); | |
2244 | |
2245 // Only allocate finals[] if we actually need it | |
2246 if (!pool.finals.nbits) | |
2247 pool.finals.alloc(pool.mark.nbits); | |
2248 | |
2249 pool.finals.set((p - pool.baseAddr) / 16); | |
2250 } | |
2251 | |
2252 | |
2253 /** | |
2254 * Indicate that block pointed to by p has possible pointers | |
2255 * to GC allocated memory in it. | |
2256 */ | |
2257 | |
2258 void HasPointers(void *p) | |
2259 { | |
2260 Pool *pool = findPool(p); | |
2261 assert(pool); | |
2262 | |
2263 pool.noscan.clear((p - pool.baseAddr) / 16); | |
2264 } | |
2265 | |
2266 | |
2267 /** | |
2268 * Indicate that block pointed to by p has no possible pointers | |
2269 * to GC allocated memory in it. | |
2270 */ | |
2271 void HasNoPointers(void *p) | |
2272 { | |
2273 //printf("HasNoPointers(%p)\n", p); | |
2274 Pool *pool = findPool(p); | |
2275 assert(pool); | |
2276 | |
2277 pool.noscan.set((p - pool.baseAddr) / 16); | |
2278 } | |
2279 | |
2280 | |
2281 /***** Leak Detector ******/ | |
2282 | |
2283 | |
2284 debug (LOGGING) | |
2285 { | |
2286 LogArray current; | |
2287 LogArray prev; | |
2288 | |
2289 | |
2290 void log_init() | |
2291 { | |
2292 //debug(PRINTF) printf("+log_init()\n"); | |
2293 current.reserve(1000); | |
2294 prev.reserve(1000); | |
2295 //debug(PRINTF) printf("-log_init()\n"); | |
2296 } | |
2297 | |
2298 | |
2299 void log_malloc(void *p, size_t size) | |
2300 { | |
2301 //debug(PRINTF) printf("+log_malloc(p = %x, size = %d)\n", p, size); | |
2302 Log log; | |
2303 | |
2304 log.p = p; | |
2305 log.size = size; | |
2306 log.line = GC.line; | |
2307 log.file = GC.file; | |
2308 log.parent = null; | |
2309 | |
2310 GC.line = 0; | |
2311 GC.file = null; | |
2312 | |
2313 current.push(log); | |
2314 //debug(PRINTF) printf("-log_malloc()\n"); | |
2315 } | |
2316 | |
2317 | |
2318 void log_free(void *p) | |
2319 { | |
2320 //debug(PRINTF) printf("+log_free(%x)\n", p); | |
2321 size_t i; | |
2322 | |
2323 i = current.find(p); | |
2324 if (i == ~0u) | |
2325 { | |
2326 debug(PRINTF) printf("free'ing unallocated memory %x\n", p); | |
2327 } | |
2328 else | |
2329 current.remove(i); | |
2330 //debug(PRINTF) printf("-log_free()\n"); | |
2331 } | |
2332 | |
2333 void log_collect() | |
2334 { | |
2335 //debug(PRINTF) printf("+log_collect()\n"); | |
2336 // Print everything in current that is not in prev | |
2337 | |
2338 debug(PRINTF) printf("New pointers this cycle: --------------------------------\n"); | |
2339 size_t used = 0; | |
2340 for (size_t i = 0; i < current.dim; i++) | |
2341 { | |
2342 size_t j; | |
2343 | |
2344 j = prev.find(current.data[i].p); | |
2345 if (j == ~0u) | |
2346 current.data[i].print(); | |
2347 else | |
2348 used++; | |
2349 } | |
2350 | |
2351 debug(PRINTF) printf("All roots this cycle: --------------------------------\n"); | |
2352 for (size_t i = 0; i < current.dim; i++) | |
2353 { | |
2354 void *p; | |
2355 size_t j; | |
2356 | |
2357 p = current.data[i].p; | |
2358 if (!findPool(current.data[i].parent)) | |
2359 { | |
2360 j = prev.find(current.data[i].p); | |
2361 if (j == ~0u) | |
2362 debug(PRINTF) printf("N"); | |
2363 else | |
2364 debug(PRINTF) printf(" ");; | |
2365 current.data[i].print(); | |
2366 } | |
2367 } | |
2368 | |
2369 debug(PRINTF) printf("Used = %d-------------------------------------------------\n", used); | |
2370 prev.copy(¤t); | |
2371 | |
2372 debug(PRINTF) printf("-log_collect()\n"); | |
2373 } | |
2374 | |
2375 | |
2376 void log_parent(void *p, void *parent) | |
2377 { | |
2378 //debug(PRINTF) printf("+log_parent()\n"); | |
2379 size_t i; | |
2380 | |
2381 i = current.find(p); | |
2382 if (i == ~0u) | |
2383 { | |
2384 debug(PRINTF) printf("parent'ing unallocated memory %x, parent = %x\n", p, parent); | |
2385 Pool *pool; | |
2386 pool = findPool(p); | |
2387 assert(pool); | |
2388 size_t offset = cast(uint)(p - pool.baseAddr); | |
2389 size_t biti; | |
2390 uint pn = offset / PAGESIZE; | |
2391 Bins bin = cast(Bins)pool.pagetable[pn]; | |
2392 biti = (offset & notbinsize[bin]); | |
2393 debug(PRINTF) printf("\tbin = %d, offset = x%x, biti = x%x\n", bin, offset, biti); | |
2394 } | |
2395 else | |
2396 { | |
2397 current.data[i].parent = parent; | |
2398 } | |
2399 //debug(PRINTF) printf("-log_parent()\n"); | |
2400 } | |
2401 | |
2402 } | |
2403 else | |
2404 { | |
2405 void log_init() { } | |
2406 void log_malloc(void *p, size_t size) { } | |
2407 void log_free(void *p) { } | |
2408 void log_collect() { } | |
2409 void log_parent(void *p, void *parent) { } | |
2410 } | |
2411 } | |
2412 | |
2413 | |
2414 /* ============================ Pool =============================== */ | |
2415 | |
2416 | |
2417 struct Pool | |
2418 { | |
2419 byte* baseAddr; | |
2420 byte* topAddr; | |
2421 GCBits mark; // entries already scanned, or should not be scanned | |
2422 GCBits scan; // entries that need to be scanned | |
2423 GCBits freebits; // entries that are on the free list | |
2424 GCBits finals; // entries that need finalizer run on them | |
2425 GCBits noscan; // entries that should not be scanned | |
2426 | |
2427 uint npages; | |
2428 uint ncommitted; // ncommitted <= npages | |
2429 ubyte* pagetable; | |
2430 | |
2431 | |
2432 void initialize(uint npages) | |
2433 { | |
2434 size_t poolsize; | |
2435 | |
2436 //debug(PRINTF) printf("Pool::Pool(%u)\n", npages); | |
2437 poolsize = npages * PAGESIZE; | |
2438 assert(poolsize >= POOLSIZE); | |
2439 baseAddr = cast(byte *)os_mem_map(poolsize); | |
2440 | |
2441 // Some of the code depends on page alignment of memory pools | |
2442 assert((cast(uint)baseAddr & (PAGESIZE - 1)) == 0); | |
2443 | |
2444 if (!baseAddr) | |
2445 { | |
2446 //debug(PRINTF) printf("GC fail: poolsize = x%x, errno = %d\n", poolsize, errno); | |
2447 //debug(PRINTF) printf("message = '%s'\n", sys_errlist[errno]); | |
2448 | |
2449 npages = 0; | |
2450 poolsize = 0; | |
2451 } | |
2452 //assert(baseAddr); | |
2453 topAddr = baseAddr + poolsize; | |
2454 | |
2455 mark.alloc(poolsize / 16); | |
2456 scan.alloc(poolsize / 16); | |
2457 freebits.alloc(poolsize / 16); | |
2458 noscan.alloc(poolsize / 16); | |
2459 | |
2460 pagetable = cast(ubyte*)cstdlib.malloc(npages); | |
2461 if (!pagetable) | |
2462 onOutOfMemoryError(); | |
2463 cstring.memset(pagetable, B_UNCOMMITTED, npages); | |
2464 | |
2465 this.npages = npages; | |
2466 ncommitted = 0; | |
2467 } | |
2468 | |
2469 | |
2470 void Dtor() | |
2471 { | |
2472 if (baseAddr) | |
2473 { | |
2474 int result; | |
2475 | |
2476 if (ncommitted) | |
2477 { | |
2478 result = os_mem_decommit(baseAddr, 0, ncommitted * PAGESIZE); | |
2479 assert(result == 0); | |
2480 ncommitted = 0; | |
2481 } | |
2482 | |
2483 if (npages) | |
2484 { | |
2485 result = os_mem_unmap(baseAddr, npages * PAGESIZE); | |
2486 assert(result == 0); | |
2487 npages = 0; | |
2488 } | |
2489 | |
2490 baseAddr = null; | |
2491 topAddr = null; | |
2492 } | |
2493 if (pagetable) | |
2494 cstdlib.free(pagetable); | |
2495 | |
2496 mark.Dtor(); | |
2497 scan.Dtor(); | |
2498 freebits.Dtor(); | |
2499 finals.Dtor(); | |
2500 noscan.Dtor(); | |
2501 } | |
2502 | |
2503 | |
2504 void Invariant() { } | |
2505 | |
2506 | |
2507 invariant() | |
2508 { | |
2509 //mark.Invariant(); | |
2510 //scan.Invariant(); | |
2511 //freebits.Invariant(); | |
2512 //finals.Invariant(); | |
2513 //noscan.Invariant(); | |
2514 | |
2515 if (baseAddr) | |
2516 { | |
2517 //if (baseAddr + npages * PAGESIZE != topAddr) | |
2518 //printf("baseAddr = %p, npages = %d, topAddr = %p\n", baseAddr, npages, topAddr); | |
2519 assert(baseAddr + npages * PAGESIZE == topAddr); | |
2520 assert(ncommitted <= npages); | |
2521 } | |
2522 | |
2523 for (uint i = 0; i < npages; i++) | |
2524 { Bins bin = cast(Bins)pagetable[i]; | |
2525 | |
2526 assert(bin < B_MAX); | |
2527 } | |
2528 } | |
2529 | |
2530 | |
2531 /** | |
2532 * Allocate n pages from Pool. | |
2533 * Returns ~0u on failure. | |
2534 */ | |
2535 uint allocPages(uint n) | |
2536 { | |
2537 uint i; | |
2538 uint n2; | |
2539 | |
2540 //debug(PRINTF) printf("Pool::allocPages(n = %d)\n", n); | |
2541 n2 = n; | |
2542 for (i = 0; i < ncommitted; i++) | |
2543 { | |
2544 if (pagetable[i] == B_FREE) | |
2545 { | |
2546 if (--n2 == 0) | |
2547 { //debug(PRINTF) printf("\texisting pn = %d\n", i - n + 1); | |
2548 return i - n + 1; | |
2549 } | |
2550 } | |
2551 else | |
2552 n2 = n; | |
2553 } | |
2554 return extendPages(n); | |
2555 } | |
2556 | |
2557 /** | |
2558 * Extend Pool by n pages. | |
2559 * Returns ~0u on failure. | |
2560 */ | |
2561 uint extendPages(uint n) | |
2562 { | |
2563 //debug(PRINTF) printf("Pool::extendPages(n = %d)\n", n); | |
2564 if (ncommitted + n <= npages) | |
2565 { | |
2566 uint tocommit; | |
2567 | |
2568 tocommit = (n + (COMMITSIZE/PAGESIZE) - 1) & ~(COMMITSIZE/PAGESIZE - 1); | |
2569 if (ncommitted + tocommit > npages) | |
2570 tocommit = npages - ncommitted; | |
2571 //debug(PRINTF) printf("\tlooking to commit %d more pages\n", tocommit); | |
2572 //fflush(stdout); | |
2573 if (os_mem_commit(baseAddr, ncommitted * PAGESIZE, tocommit * PAGESIZE) == 0) | |
2574 { | |
2575 cstring.memset(pagetable + ncommitted, B_FREE, tocommit); | |
2576 auto i = ncommitted; | |
2577 ncommitted += tocommit; | |
2578 | |
2579 while (i && pagetable[i - 1] == B_FREE) | |
2580 i--; | |
2581 | |
2582 return i; | |
2583 } | |
2584 //debug(PRINTF) printf("\tfailed to commit %d pages\n", tocommit); | |
2585 } | |
2586 | |
2587 return ~0u; | |
2588 } | |
2589 | |
2590 | |
2591 /** | |
2592 * Free npages pages starting with pagenum. | |
2593 */ | |
2594 void freePages(uint pagenum, uint npages) | |
2595 { | |
2596 cstring.memset(&pagetable[pagenum], B_FREE, npages); | |
2597 } | |
2598 | |
2599 | |
2600 /** | |
2601 * Used for sorting pooltable[] | |
2602 */ | |
2603 int opCmp(Pool *p2) | |
2604 { | |
2605 if (baseAddr < p2.baseAddr) | |
2606 return -1; | |
2607 else | |
2608 return cast(int)(baseAddr > p2.baseAddr); | |
2609 } | |
2610 } | |
2611 | |
2612 | |
2613 /* ============================ SENTINEL =============================== */ | |
2614 | |
2615 | |
2616 version (SENTINEL) | |
2617 { | |
2618 const size_t SENTINEL_PRE = cast(size_t) 0xF4F4F4F4F4F4F4F4UL; // 32 or 64 bits | |
2619 const ubyte SENTINEL_POST = 0xF5; // 8 bits | |
2620 const uint SENTINEL_EXTRA = 2 * size_t.sizeof + 1; | |
2621 | |
2622 | |
2623 size_t* sentinel_size(void *p) { return &(cast(size_t *)p)[-2]; } | |
2624 size_t* sentinel_pre(void *p) { return &(cast(size_t *)p)[-1]; } | |
2625 ubyte* sentinel_post(void *p) { return &(cast(ubyte *)p)[sentinel_size(p)]; } | |
2626 | |
2627 | |
2628 void sentinel_init(void *p, size_t size) | |
2629 { | |
2630 *sentinel_size(p) = size; | |
2631 *sentinel_pre(p) = SENTINEL_PRE; | |
2632 *sentinel_post(p) = SENTINEL_POST; | |
2633 } | |
2634 | |
2635 | |
2636 void sentinel_Invariant(void *p) | |
2637 { | |
2638 assert(*sentinel_pre(p) == SENTINEL_PRE); | |
2639 assert(*sentinel_post(p) == SENTINEL_POST); | |
2640 } | |
2641 | |
2642 | |
2643 void *sentinel_add(void *p) | |
2644 { | |
2645 return p + 2 * size_t.sizeof; | |
2646 } | |
2647 | |
2648 | |
2649 void *sentinel_sub(void *p) | |
2650 { | |
2651 return p - 2 * size_t.sizeof; | |
2652 } | |
2653 } | |
2654 else | |
2655 { | |
2656 const uint SENTINEL_EXTRA = 0; | |
2657 | |
2658 | |
2659 void sentinel_init(void *p, size_t size) | |
2660 { | |
2661 } | |
2662 | |
2663 | |
2664 void sentinel_Invariant(void *p) | |
2665 { | |
2666 } | |
2667 | |
2668 | |
2669 void *sentinel_add(void *p) | |
2670 { | |
2671 return p; | |
2672 } | |
2673 | |
2674 | |
2675 void *sentinel_sub(void *p) | |
2676 { | |
2677 return p; | |
2678 } | |
2679 } | |
2680 | |
2681 |