Mercurial > projects > ldc
comparison druntime/src/common/core/thread.d @ 759:d3eb054172f9
Added copy of druntime from DMD 2.020 modified for LDC.
author | Tomas Lindquist Olsen <tomas.l.olsen@gmail.com> |
---|---|
date | Tue, 11 Nov 2008 01:52:37 +0100 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
758:f04dde6e882c | 759:d3eb054172f9 |
---|---|
1 /** | |
2 * The thread module provides support for thread creation and management. | |
3 * | |
4 * Copyright: Copyright (c) 2005-2008, The D Runtime Project | |
5 * License: BSD Style, see LICENSE | |
6 * Authors: Sean Kelly | |
7 */ | |
8 module core.thread; | |
9 | |
10 | |
11 // this should be true for most architectures | |
12 version = StackGrowsDown; | |
13 | |
14 | |
15 /////////////////////////////////////////////////////////////////////////////// | |
16 // Thread and Fiber Exceptions | |
17 /////////////////////////////////////////////////////////////////////////////// | |
18 | |
19 | |
20 /** | |
21 * Base class for thread exceptions. | |
22 */ | |
23 class ThreadException : Exception | |
24 { | |
25 this( string msg ) | |
26 { | |
27 super( msg ); | |
28 } | |
29 } | |
30 | |
31 | |
32 /** | |
33 * Base class for fiber exceptions. | |
34 */ | |
35 class FiberException : Exception | |
36 { | |
37 this( string msg ) | |
38 { | |
39 super( msg ); | |
40 } | |
41 } | |
42 | |
43 | |
44 private | |
45 { | |
46 // | |
47 // exposed by compiler runtime | |
48 // | |
49 extern (C) void* rt_stackBottom(); | |
50 extern (C) void* rt_stackTop(); | |
51 | |
52 | |
53 void* getStackBottom() | |
54 { | |
55 return rt_stackBottom(); | |
56 } | |
57 | |
58 | |
59 void* getStackTop() | |
60 { | |
61 version( D_InlineAsm_X86 ) | |
62 { | |
63 asm | |
64 { | |
65 naked; | |
66 mov EAX, ESP; | |
67 ret; | |
68 } | |
69 } | |
70 else | |
71 { | |
72 return rt_stackTop(); | |
73 } | |
74 } | |
75 } | |
76 | |
77 | |
78 /////////////////////////////////////////////////////////////////////////////// | |
79 // Thread Entry Point and Signal Handlers | |
80 /////////////////////////////////////////////////////////////////////////////// | |
81 | |
82 | |
83 version( Windows ) | |
84 { | |
85 private | |
86 { | |
87 import stdc.stdint : uintptr_t; // for _beginthreadex decl below | |
88 import sys.windows.windows; | |
89 | |
90 const DWORD TLS_OUT_OF_INDEXES = 0xFFFFFFFF; | |
91 | |
92 extern (Windows) alias uint function(void*) btex_fptr; | |
93 extern (C) uintptr_t _beginthreadex(void*, uint, btex_fptr, void*, uint, uint*); | |
94 | |
95 | |
96 // | |
97 // entry point for Windows threads | |
98 // | |
99 extern (Windows) uint thread_entryPoint( void* arg ) | |
100 { | |
101 Thread obj = cast(Thread) arg; | |
102 assert( obj ); | |
103 scope( exit ) Thread.remove( obj ); | |
104 | |
105 assert( obj.m_curr is &obj.m_main ); | |
106 obj.m_main.bstack = getStackBottom(); | |
107 obj.m_main.tstack = obj.m_main.bstack; | |
108 Thread.add( &obj.m_main ); | |
109 Thread.setThis( obj ); | |
110 | |
111 // NOTE: No GC allocations may occur until the stack pointers have | |
112 // been set and Thread.getThis returns a valid reference to | |
113 // this thread object (this latter condition is not strictly | |
114 // necessary on Win32 but it should be followed for the sake | |
115 // of consistency). | |
116 | |
117 // TODO: Consider putting an auto exception object here (using | |
118 // alloca) forOutOfMemoryError plus something to track | |
119 // whether an exception is in-flight? | |
120 | |
121 try | |
122 { | |
123 obj.run(); | |
124 } | |
125 catch( Object o ) | |
126 { | |
127 obj.m_unhandled = o; | |
128 } | |
129 return 0; | |
130 } | |
131 | |
132 | |
133 // | |
134 // copy of the same-named function in phobos.std.thread--it uses the | |
135 // Windows naming convention to be consistent with GetCurrentThreadId | |
136 // | |
137 HANDLE GetCurrentThreadHandle() | |
138 { | |
139 const uint DUPLICATE_SAME_ACCESS = 0x00000002; | |
140 | |
141 HANDLE curr = GetCurrentThread(), | |
142 proc = GetCurrentProcess(), | |
143 hndl; | |
144 | |
145 DuplicateHandle( proc, curr, proc, &hndl, 0, TRUE, DUPLICATE_SAME_ACCESS ); | |
146 return hndl; | |
147 } | |
148 } | |
149 } | |
150 else version( Posix ) | |
151 { | |
152 private | |
153 { | |
154 import stdc.posix.semaphore; | |
155 import stdc.posix.pthread; | |
156 import stdc.posix.signal; | |
157 import stdc.posix.time; | |
158 import stdc.errno; | |
159 | |
160 extern (C) int getErrno(); | |
161 version( GNU ) | |
162 { | |
163 import gcc.builtins; | |
164 } | |
165 | |
166 | |
167 // | |
168 // entry point for POSIX threads | |
169 // | |
170 extern (C) void* thread_entryPoint( void* arg ) | |
171 { | |
172 Thread obj = cast(Thread) arg; | |
173 assert( obj ); | |
174 scope( exit ) | |
175 { | |
176 // NOTE: isRunning should be set to false after the thread is | |
177 // removed or a double-removal could occur between this | |
178 // function and thread_suspendAll. | |
179 Thread.remove( obj ); | |
180 obj.m_isRunning = false; | |
181 } | |
182 | |
183 static extern (C) void thread_cleanupHandler( void* arg ) | |
184 { | |
185 Thread obj = cast(Thread) arg; | |
186 assert( obj ); | |
187 | |
188 // NOTE: If the thread terminated abnormally, just set it as | |
189 // not running and let thread_suspendAll remove it from | |
190 // the thread list. This is safer and is consistent | |
191 // with the Windows thread code. | |
192 obj.m_isRunning = false; | |
193 } | |
194 | |
195 // NOTE: Using void to skip the initialization here relies on | |
196 // knowledge of how pthread_cleanup is implemented. It may | |
197 // not be appropriate for all platforms. However, it does | |
198 // avoid the need to link the pthread module. If any | |
199 // implementation actually requires default initialization | |
200 // then pthread_cleanup should be restructured to maintain | |
201 // the current lack of a link dependency. | |
202 pthread_cleanup cleanup = void; | |
203 cleanup.push( &thread_cleanupHandler, cast(void*) obj ); | |
204 | |
205 // NOTE: For some reason this does not always work for threads. | |
206 //obj.m_main.bstack = getStackBottom(); | |
207 version( D_InlineAsm_X86 ) | |
208 { | |
209 static void* getBasePtr() | |
210 { | |
211 asm | |
212 { | |
213 naked; | |
214 mov EAX, EBP; | |
215 ret; | |
216 } | |
217 } | |
218 | |
219 obj.m_main.bstack = getBasePtr(); | |
220 } | |
221 else version( StackGrowsDown ) | |
222 obj.m_main.bstack = &obj + 1; | |
223 else | |
224 obj.m_main.bstack = &obj; | |
225 obj.m_main.tstack = obj.m_main.bstack; | |
226 assert( obj.m_curr == &obj.m_main ); | |
227 Thread.add( &obj.m_main ); | |
228 Thread.setThis( obj ); | |
229 | |
230 // NOTE: No GC allocations may occur until the stack pointers have | |
231 // been set and Thread.getThis returns a valid reference to | |
232 // this thread object (this latter condition is not strictly | |
233 // necessary on Win32 but it should be followed for the sake | |
234 // of consistency). | |
235 | |
236 // TODO: Consider putting an auto exception object here (using | |
237 // alloca) forOutOfMemoryError plus something to track | |
238 // whether an exception is in-flight? | |
239 | |
240 try | |
241 { | |
242 obj.run(); | |
243 } | |
244 catch( Object o ) | |
245 { | |
246 obj.m_unhandled = o; | |
247 } | |
248 return null; | |
249 } | |
250 | |
251 | |
252 // | |
253 // used to track the number of suspended threads | |
254 // | |
255 sem_t suspendCount; | |
256 | |
257 | |
258 extern (C) void thread_suspendHandler( int sig ) | |
259 in | |
260 { | |
261 assert( sig == SIGUSR1 ); | |
262 } | |
263 body | |
264 { | |
265 version( LDC ) | |
266 { | |
267 version(X86) | |
268 { | |
269 uint eax,ecx,edx,ebx,ebp,esi,edi; | |
270 asm | |
271 { | |
272 mov eax[EBP], EAX ; | |
273 mov ecx[EBP], ECX ; | |
274 mov edx[EBP], EDX ; | |
275 mov ebx[EBP], EBX ; | |
276 mov ebp[EBP], EBP ; | |
277 mov esi[EBP], ESI ; | |
278 mov edi[EBP], EDI ; | |
279 } | |
280 } | |
281 else version (X86_64) | |
282 { | |
283 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r10,r11,r12,r13,r14,r15; | |
284 asm | |
285 { | |
286 movq rax[RBP], RAX ; | |
287 movq rbx[RBP], RBX ; | |
288 movq rcx[RBP], RCX ; | |
289 movq rdx[RBP], RDX ; | |
290 movq rbp[RBP], RBP ; | |
291 movq rsi[RBP], RSI ; | |
292 movq rdi[RBP], RDI ; | |
293 movq rsp[RBP], RSP ; | |
294 movq r10[RBP], R10 ; | |
295 movq r11[RBP], R11 ; | |
296 movq r12[RBP], R12 ; | |
297 movq r13[RBP], R13 ; | |
298 movq r14[RBP], R14 ; | |
299 movq r15[RBP], R15 ; | |
300 } | |
301 } | |
302 else | |
303 { | |
304 static assert( false, "Architecture not supported." ); | |
305 } | |
306 } | |
307 else version( D_InlineAsm_X86 ) | |
308 { | |
309 asm | |
310 { | |
311 pushad; | |
312 } | |
313 } | |
314 else version( GNU ) | |
315 { | |
316 __builtin_unwind_init(); | |
317 } | |
318 else | |
319 { | |
320 static assert( false, "Architecture not supported." ); | |
321 } | |
322 | |
323 // NOTE: Since registers are being pushed and popped from the | |
324 // stack, any other stack data used by this function should | |
325 // be gone before the stack cleanup code is called below. | |
326 { | |
327 Thread obj = Thread.getThis(); | |
328 | |
329 // NOTE: The thread reference returned by getThis is set within | |
330 // the thread startup code, so it is possible that this | |
331 // handler may be called before the reference is set. In | |
332 // this case it is safe to simply suspend and not worry | |
333 // about the stack pointers as the thread will not have | |
334 // any references to GC-managed data. | |
335 if( obj && !obj.m_lock ) | |
336 { | |
337 obj.m_curr.tstack = getStackTop(); | |
338 } | |
339 | |
340 sigset_t sigres = void; | |
341 int status; | |
342 | |
343 status = sigfillset( &sigres ); | |
344 assert( status == 0 ); | |
345 | |
346 status = sigdelset( &sigres, SIGUSR2 ); | |
347 assert( status == 0 ); | |
348 | |
349 status = sem_post( &suspendCount ); | |
350 assert( status == 0 ); | |
351 | |
352 sigsuspend( &sigres ); | |
353 | |
354 if( obj && !obj.m_lock ) | |
355 { | |
356 obj.m_curr.tstack = obj.m_curr.bstack; | |
357 } | |
358 } | |
359 | |
360 version( LDC ) | |
361 { | |
362 // nothing to pop | |
363 } | |
364 else version( D_InlineAsm_X86 ) | |
365 { | |
366 asm | |
367 { | |
368 popad; | |
369 } | |
370 } | |
371 else version( GNU ) | |
372 { | |
373 // registers will be popped automatically | |
374 } | |
375 else | |
376 { | |
377 static assert( false, "Architecture not supported." ); | |
378 } | |
379 } | |
380 | |
381 | |
382 extern (C) void thread_resumeHandler( int sig ) | |
383 in | |
384 { | |
385 assert( sig == SIGUSR2 ); | |
386 } | |
387 body | |
388 { | |
389 | |
390 } | |
391 } | |
392 } | |
393 else | |
394 { | |
395 // NOTE: This is the only place threading versions are checked. If a new | |
396 // version is added, the module code will need to be searched for | |
397 // places where version-specific code may be required. This can be | |
398 // easily accomlished by searching for 'Windows' or 'Posix'. | |
399 static assert( false, "Unknown threading implementation." ); | |
400 } | |
401 | |
402 | |
403 /////////////////////////////////////////////////////////////////////////////// | |
404 // Thread | |
405 /////////////////////////////////////////////////////////////////////////////// | |
406 | |
407 | |
408 /** | |
409 * This class encapsulates all threading functionality for the D | |
410 * programming language. As thread manipulation is a required facility | |
411 * for garbage collection, all user threads should derive from this | |
412 * class, and instances of this class should never be explicitly deleted. | |
413 * A new thread may be created using either derivation or composition, as | |
414 * in the following example. | |
415 * | |
416 * Example: | |
417 * ---------------------------------------------------------------------------- | |
418 * | |
419 * class DerivedThread : Thread | |
420 * { | |
421 * this() | |
422 * { | |
423 * super( &run ); | |
424 * } | |
425 * | |
426 * private : | |
427 * void run() | |
428 * { | |
429 * printf( "Derived thread running.\n" ); | |
430 * } | |
431 * } | |
432 * | |
433 * void threadFunc() | |
434 * { | |
435 * printf( "Composed thread running.\n" ); | |
436 * } | |
437 * | |
438 * // create instances of each type | |
439 * Thread derived = new DerivedThread(); | |
440 * Thread composed = new Thread( &threadFunc ); | |
441 * | |
442 * // start both threads | |
443 * derived.start(); | |
444 * composed.start(); | |
445 * | |
446 * ---------------------------------------------------------------------------- | |
447 */ | |
448 class Thread | |
449 { | |
450 /////////////////////////////////////////////////////////////////////////// | |
451 // Initialization | |
452 /////////////////////////////////////////////////////////////////////////// | |
453 | |
454 | |
455 /** | |
456 * Initializes a thread object which is associated with a static | |
457 * D function. | |
458 * | |
459 * Params: | |
460 * fn = The thread function. | |
461 * sz = The stack size for this thread. | |
462 * | |
463 * In: | |
464 * fn must not be null. | |
465 */ | |
466 this( void function() fn, size_t sz = 0 ) | |
467 in | |
468 { | |
469 assert( fn ); | |
470 } | |
471 body | |
472 { | |
473 m_fn = fn; | |
474 m_sz = sz; | |
475 m_call = Call.FN; | |
476 m_curr = &m_main; | |
477 } | |
478 | |
479 | |
480 /** | |
481 * Initializes a thread object which is associated with a dynamic | |
482 * D function. | |
483 * | |
484 * Params: | |
485 * dg = The thread function. | |
486 * sz = The stack size for this thread. | |
487 * | |
488 * In: | |
489 * dg must not be null. | |
490 */ | |
491 this( void delegate() dg, size_t sz = 0 ) | |
492 in | |
493 { | |
494 assert( dg ); | |
495 } | |
496 body | |
497 { | |
498 m_dg = dg; | |
499 m_sz = sz; | |
500 m_call = Call.DG; | |
501 m_curr = &m_main; | |
502 } | |
503 | |
504 | |
505 /** | |
506 * Cleans up any remaining resources used by this object. | |
507 */ | |
508 ~this() | |
509 { | |
510 if( m_addr == m_addr.init ) | |
511 { | |
512 return; | |
513 } | |
514 | |
515 version( Win32 ) | |
516 { | |
517 m_addr = m_addr.init; | |
518 CloseHandle( m_hndl ); | |
519 m_hndl = m_hndl.init; | |
520 } | |
521 else version( Posix ) | |
522 { | |
523 pthread_detach( m_addr ); | |
524 m_addr = m_addr.init; | |
525 } | |
526 } | |
527 | |
528 | |
529 /////////////////////////////////////////////////////////////////////////// | |
530 // General Actions | |
531 /////////////////////////////////////////////////////////////////////////// | |
532 | |
533 | |
534 /** | |
535 * Starts the thread and invokes the function or delegate passed upon | |
536 * construction. | |
537 * | |
538 * In: | |
539 * This routine may only be called once per thread instance. | |
540 * | |
541 * Throws: | |
542 * ThreadException if the thread fails to start. | |
543 */ | |
544 final void start() | |
545 in | |
546 { | |
547 assert( !next && !prev ); | |
548 } | |
549 body | |
550 { | |
551 version( Win32 ) {} else | |
552 version( Posix ) | |
553 { | |
554 pthread_attr_t attr; | |
555 | |
556 if( pthread_attr_init( &attr ) ) | |
557 throw new ThreadException( "Error initializing thread attributes" ); | |
558 if( m_sz && pthread_attr_setstacksize( &attr, m_sz ) ) | |
559 throw new ThreadException( "Error initializing thread stack size" ); | |
560 if( pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE ) ) | |
561 throw new ThreadException( "Error setting thread joinable" ); | |
562 } | |
563 | |
564 // NOTE: This operation needs to be synchronized to avoid a race | |
565 // condition with the GC. Without this lock, the thread | |
566 // could start and allocate memory before being added to | |
567 // the global thread list, preventing it from being scanned | |
568 // and causing memory to be collected that is still in use. | |
569 synchronized( slock ) | |
570 { | |
571 version( Win32 ) | |
572 { | |
573 m_hndl = cast(HANDLE) _beginthreadex( null, m_sz, &thread_entryPoint, cast(void*) this, 0, &m_addr ); | |
574 if( cast(size_t) m_hndl == 0 ) | |
575 throw new ThreadException( "Error creating thread" ); | |
576 } | |
577 else version( Posix ) | |
578 { | |
579 m_isRunning = true; | |
580 scope( failure ) m_isRunning = false; | |
581 | |
582 if( pthread_create( &m_addr, &attr, &thread_entryPoint, cast(void*) this ) != 0 ) | |
583 throw new ThreadException( "Error creating thread" ); | |
584 } | |
585 multiThreadedFlag = true; | |
586 add( this ); | |
587 } | |
588 } | |
589 | |
590 | |
591 /** | |
592 * Waits for this thread to complete. If the thread terminated as the | |
593 * result of an unhandled exception, this exception will be rethrown. | |
594 * | |
595 * Params: | |
596 * rethrow = Rethrow any unhandled exception which may have caused this | |
597 * thread to terminate. | |
598 * | |
599 * Throws: | |
600 * ThreadException if the operation fails. | |
601 * Any exception not handled by the joined thread. | |
602 * | |
603 * Returns: | |
604 * Any exception not handled by this thread if rethrow = false, null | |
605 * otherwise. | |
606 */ | |
607 final Object join( bool rethrow = true ) | |
608 { | |
609 version( Win32 ) | |
610 { | |
611 if( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 ) | |
612 throw new ThreadException( "Unable to join thread" ); | |
613 // NOTE: m_addr must be cleared before m_hndl is closed to avoid | |
614 // a race condition with isRunning. The operation is labeled | |
615 // volatile to prevent compiler reordering. | |
616 volatile m_addr = m_addr.init; | |
617 CloseHandle( m_hndl ); | |
618 m_hndl = m_hndl.init; | |
619 } | |
620 else version( Posix ) | |
621 { | |
622 if( pthread_join( m_addr, null ) != 0 ) | |
623 throw new ThreadException( "Unable to join thread" ); | |
624 // NOTE: pthread_join acts as a substitute for pthread_detach, | |
625 // which is normally called by the dtor. Setting m_addr | |
626 // to zero ensures that pthread_detach will not be called | |
627 // on object destruction. | |
628 volatile m_addr = m_addr.init; | |
629 } | |
630 if( m_unhandled ) | |
631 { | |
632 if( rethrow ) | |
633 throw m_unhandled; | |
634 return m_unhandled; | |
635 } | |
636 return null; | |
637 } | |
638 | |
639 | |
640 /////////////////////////////////////////////////////////////////////////// | |
641 // General Properties | |
642 /////////////////////////////////////////////////////////////////////////// | |
643 | |
644 | |
645 /** | |
646 * Gets the user-readable label for this thread. | |
647 * | |
648 * Returns: | |
649 * The name of this thread. | |
650 */ | |
651 final char[] name() | |
652 { | |
653 synchronized( this ) | |
654 { | |
655 return m_name; | |
656 } | |
657 } | |
658 | |
659 | |
660 /** | |
661 * Sets the user-readable label for this thread. | |
662 * | |
663 * Params: | |
664 * val = The new name of this thread. | |
665 */ | |
666 final void name( char[] val ) | |
667 { | |
668 synchronized( this ) | |
669 { | |
670 m_name = val.dup; | |
671 } | |
672 } | |
673 | |
674 | |
675 /** | |
676 * Gets the daemon status for this thread. While the runtime will wait for | |
677 * all normal threads to complete before tearing down the process, daemon | |
678 * threads are effectively ignored and thus will not prevent the process | |
679 * from terminating. In effect, daemon threads will be terminated | |
680 * automatically by the OS when the process exits. | |
681 * | |
682 * Returns: | |
683 * true if this is a daemon thread. | |
684 */ | |
685 final bool isDaemon() | |
686 { | |
687 synchronized( this ) | |
688 { | |
689 return m_isDaemon; | |
690 } | |
691 } | |
692 | |
693 | |
694 /** | |
695 * Sets the daemon status for this thread. While the runtime will wait for | |
696 * all normal threads to complete before tearing down the process, daemon | |
697 * threads are effectively ignored and thus will not prevent the process | |
698 * from terminating. In effect, daemon threads will be terminated | |
699 * automatically by the OS when the process exits. | |
700 * | |
701 * Params: | |
702 * val = The new daemon status for this thread. | |
703 */ | |
704 final void isDaemon( bool val ) | |
705 { | |
706 synchronized( this ) | |
707 { | |
708 m_isDaemon = val; | |
709 } | |
710 } | |
711 | |
712 | |
713 /** | |
714 * Tests whether this thread is running. | |
715 * | |
716 * Returns: | |
717 * true if the thread is running, false if not. | |
718 */ | |
719 final bool isRunning() | |
720 { | |
721 if( m_addr == m_addr.init ) | |
722 { | |
723 return false; | |
724 } | |
725 | |
726 version( Win32 ) | |
727 { | |
728 uint ecode = 0; | |
729 GetExitCodeThread( m_hndl, &ecode ); | |
730 return ecode == STILL_ACTIVE; | |
731 } | |
732 else version( Posix ) | |
733 { | |
734 // NOTE: It should be safe to access this value without | |
735 // memory barriers because word-tearing and such | |
736 // really isn't an issue for boolean values. | |
737 return m_isRunning; | |
738 } | |
739 } | |
740 | |
741 | |
742 /////////////////////////////////////////////////////////////////////////// | |
743 // Thread Priority Actions | |
744 /////////////////////////////////////////////////////////////////////////// | |
745 | |
746 | |
747 /** | |
748 * The minimum scheduling priority that may be set for a thread. On | |
749 * systems where multiple scheduling policies are defined, this value | |
750 * represents the minimum valid priority for the scheduling policy of | |
751 * the process. | |
752 */ | |
753 static const int PRIORITY_MIN; | |
754 | |
755 | |
756 /** | |
757 * The maximum scheduling priority that may be set for a thread. On | |
758 * systems where multiple scheduling policies are defined, this value | |
759 * represents the minimum valid priority for the scheduling policy of | |
760 * the process. | |
761 */ | |
762 static const int PRIORITY_MAX; | |
763 | |
764 | |
765 /** | |
766 * Gets the scheduling priority for the associated thread. | |
767 * | |
768 * Returns: | |
769 * The scheduling priority of this thread. | |
770 */ | |
771 final int priority() | |
772 { | |
773 version( Win32 ) | |
774 { | |
775 return GetThreadPriority( m_hndl ); | |
776 } | |
777 else version( Posix ) | |
778 { | |
779 int policy; | |
780 sched_param param; | |
781 | |
782 if( pthread_getschedparam( m_addr, &policy, ¶m ) ) | |
783 throw new ThreadException( "Unable to get thread priority" ); | |
784 return param.sched_priority; | |
785 } | |
786 } | |
787 | |
788 | |
789 /** | |
790 * Sets the scheduling priority for the associated thread. | |
791 * | |
792 * Params: | |
793 * val = The new scheduling priority of this thread. | |
794 */ | |
795 final void priority( int val ) | |
796 { | |
797 version( Win32 ) | |
798 { | |
799 if( !SetThreadPriority( m_hndl, val ) ) | |
800 throw new ThreadException( "Unable to set thread priority" ); | |
801 } | |
802 else version( Posix ) | |
803 { | |
804 // NOTE: pthread_setschedprio is not implemented on linux, so use | |
805 // the more complicated get/set sequence below. | |
806 //if( pthread_setschedprio( m_addr, val ) ) | |
807 // throw new ThreadException( "Unable to set thread priority" ); | |
808 | |
809 int policy; | |
810 sched_param param; | |
811 | |
812 if( pthread_getschedparam( m_addr, &policy, ¶m ) ) | |
813 throw new ThreadException( "Unable to set thread priority" ); | |
814 param.sched_priority = val; | |
815 if( pthread_setschedparam( m_addr, policy, ¶m ) ) | |
816 throw new ThreadException( "Unable to set thread priority" ); | |
817 } | |
818 } | |
819 | |
820 | |
821 /////////////////////////////////////////////////////////////////////////// | |
822 // Actions on Calling Thread | |
823 /////////////////////////////////////////////////////////////////////////// | |
824 | |
825 | |
826 /** | |
827 * Suspends the calling thread for at least the supplied period. This may | |
828 * result in multiple OS calls if period is greater than the maximum sleep | |
829 * duration supported by the operating system. | |
830 * | |
831 * Params: | |
832 * period = The minimum duration the calling thread should be suspended, | |
833 * in 100 nanosecond intervals. | |
834 * | |
835 * In: | |
836 * period must be non-negative. | |
837 * | |
838 * Example: | |
839 * ------------------------------------------------------------------------ | |
840 * | |
841 * Thread.sleep( 500 ); // sleep for 50 milliseconds | |
842 * Thread.sleep( 50_000_000 ); // sleep for 5 seconds | |
843 * | |
844 * ------------------------------------------------------------------------ | |
845 */ | |
846 static void sleep( long period ) | |
847 in | |
848 { | |
849 assert( period >= 0 ); | |
850 } | |
851 body | |
852 { | |
853 version( Win32 ) | |
854 { | |
855 enum : uint | |
856 { | |
857 TICKS_PER_MILLI = 10_000, | |
858 MAX_SLEEP_MILLIS = uint.max - 1 | |
859 } | |
860 | |
861 period = period < TICKS_PER_MILLI ? | |
862 1 : | |
863 period / TICKS_PER_MILLI; | |
864 while( period > MAX_SLEEP_MILLIS ) | |
865 { | |
866 Sleep( MAX_SLEEP_MILLIS ); | |
867 period -= MAX_SLEEP_MILLIS; | |
868 } | |
869 Sleep( cast(uint) period ); | |
870 } | |
871 else version( Posix ) | |
872 { | |
873 timespec tin = void; | |
874 timespec tout = void; | |
875 | |
876 enum : uint | |
877 { | |
878 NANOS_PER_TICK = 100, | |
879 TICKS_PER_SECOND = 10_000_000, | |
880 } | |
881 enum : typeof(period) | |
882 { | |
883 MAX_SLEEP_TICKS = cast(typeof(period)) tin.tv_sec.max * TICKS_PER_SECOND | |
884 } | |
885 | |
886 do | |
887 { | |
888 if( period > MAX_SLEEP_TICKS ) | |
889 { | |
890 tin.tv_sec = tin.tv_sec.max; | |
891 tin.tv_nsec = 0; | |
892 } | |
893 else | |
894 { | |
895 tin.tv_sec = cast(typeof(tin.tv_sec)) (period / TICKS_PER_SECOND); | |
896 tin.tv_nsec = cast(typeof(tin.tv_nsec)) (period % TICKS_PER_SECOND) * NANOS_PER_TICK; | |
897 } | |
898 while( true ) | |
899 { | |
900 if( !nanosleep( &tin, &tout ) ) | |
901 return; | |
902 if( getErrno() != EINTR ) | |
903 throw new ThreadException( "Unable to sleep for the specified duration" ); | |
904 tin = tout; | |
905 } | |
906 period -= (cast(typeof(period)) tin.tv_sec) * TICKS_PER_SECOND; | |
907 period -= (cast(typeof(period)) tin.tv_nsec) / NANOS_PER_TICK; | |
908 } while( period > 0 ); | |
909 } | |
910 } | |
911 | |
912 | |
913 /** | |
914 * Forces a context switch to occur away from the calling thread. | |
915 */ | |
916 static void yield() | |
917 { | |
918 version( Win32 ) | |
919 { | |
920 // NOTE: Sleep(1) is necessary because Sleep(0) does not give | |
921 // lower priority threads any timeslice, so looping on | |
922 // Sleep(0) could be resource-intensive in some cases. | |
923 Sleep( 1 ); | |
924 } | |
925 else version( Posix ) | |
926 { | |
927 sched_yield(); | |
928 } | |
929 } | |
930 | |
931 | |
932 /////////////////////////////////////////////////////////////////////////// | |
933 // Thread Accessors | |
934 /////////////////////////////////////////////////////////////////////////// | |
935 | |
936 | |
937 /** | |
938 * Provides a reference to the calling thread. | |
939 * | |
940 * Returns: | |
941 * The thread object representing the calling thread. The result of | |
942 * deleting this object is undefined. | |
943 */ | |
944 static Thread getThis() | |
945 { | |
946 // NOTE: This function may not be called until thread_init has | |
947 // completed. See thread_suspendAll for more information | |
948 // on why this might occur. | |
949 version( Win32 ) | |
950 { | |
951 return cast(Thread) TlsGetValue( sm_this ); | |
952 } | |
953 else version( Posix ) | |
954 { | |
955 return cast(Thread) pthread_getspecific( sm_this ); | |
956 } | |
957 } | |
958 | |
959 | |
960 /** | |
961 * Provides a list of all threads currently being tracked by the system. | |
962 * | |
963 * Returns: | |
964 * An array containing references to all threads currently being | |
965 * tracked by the system. The result of deleting any contained | |
966 * objects is undefined. | |
967 */ | |
968 static Thread[] getAll() | |
969 { | |
970 synchronized( slock ) | |
971 { | |
972 size_t pos = 0; | |
973 Thread[] buf = new Thread[sm_tlen]; | |
974 | |
975 foreach( Thread t; Thread ) | |
976 { | |
977 buf[pos++] = t; | |
978 } | |
979 return buf; | |
980 } | |
981 } | |
982 | |
983 | |
984 /** | |
985 * Operates on all threads currently being tracked by the system. The | |
986 * result of deleting any Thread object is undefined. | |
987 * | |
988 * Params: | |
989 * dg = The supplied code as a delegate. | |
990 * | |
991 * Returns: | |
992 * Zero if all elemented are visited, nonzero if not. | |
993 */ | |
994 static int opApply( int delegate( inout Thread ) dg ) | |
995 { | |
996 synchronized( slock ) | |
997 { | |
998 int ret = 0; | |
999 | |
1000 for( Thread t = sm_tbeg; t; t = t.next ) | |
1001 { | |
1002 ret = dg( t ); | |
1003 if( ret ) | |
1004 break; | |
1005 } | |
1006 return ret; | |
1007 } | |
1008 } | |
1009 | |
1010 | |
1011 /////////////////////////////////////////////////////////////////////////// | |
1012 // Local Storage Actions | |
1013 /////////////////////////////////////////////////////////////////////////// | |
1014 | |
1015 | |
1016 /** | |
1017 * Indicates the number of local storage pointers available at program | |
1018 * startup. It is recommended that this number be at least 64. | |
1019 */ | |
1020 static const uint LOCAL_MAX = 64; | |
1021 | |
1022 | |
1023 /** | |
1024 * Reserves a local storage pointer for use and initializes this location | |
1025 * to null for all running threads. | |
1026 * | |
1027 * Returns: | |
1028 * A key representing the array offset of this memory location. | |
1029 */ | |
1030 static uint createLocal() | |
1031 { | |
1032 synchronized( slock ) | |
1033 { | |
1034 foreach( uint key, inout bool set; sm_local ) | |
1035 { | |
1036 if( !set ) | |
1037 { | |
1038 //foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139) | |
1039 for( Thread t = sm_tbeg; t; t = t.next ) | |
1040 { | |
1041 t.m_local[key] = null; | |
1042 } | |
1043 set = true; | |
1044 return key; | |
1045 } | |
1046 } | |
1047 throw new ThreadException( "No more local storage slots available" ); | |
1048 } | |
1049 } | |
1050 | |
1051 | |
1052 /** | |
1053 * Marks the supplied key as available and sets the associated location | |
1054 * to null for all running threads. It is assumed that any key passed | |
1055 * to this function is valid. The result of calling this function for | |
1056 * a key which is still in use is undefined. | |
1057 * | |
1058 * Params: | |
1059 * key = The key to delete. | |
1060 */ | |
1061 static void deleteLocal( uint key ) | |
1062 { | |
1063 synchronized( slock ) | |
1064 { | |
1065 sm_local[key] = false; | |
1066 // foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139) | |
1067 for( Thread t = sm_tbeg; t; t = t.next ) | |
1068 { | |
1069 t.m_local[key] = null; | |
1070 } | |
1071 } | |
1072 } | |
1073 | |
1074 | |
1075 /** | |
1076 * Loads the value stored at key within a thread-local static array. It is | |
1077 * assumed that any key passed to this function is valid. | |
1078 * | |
1079 * Params: | |
1080 * key = The location which holds the desired data. | |
1081 * | |
1082 * Returns: | |
1083 * The data associated with the supplied key. | |
1084 */ | |
1085 static void* getLocal( uint key ) | |
1086 { | |
1087 return getThis().m_local[key]; | |
1088 } | |
1089 | |
1090 | |
1091 /** | |
1092 * Stores the supplied value at key within a thread-local static array. It | |
1093 * is assumed that any key passed to this function is valid. | |
1094 * | |
1095 * Params: | |
1096 * key = The location to store the supplied data. | |
1097 * val = The data to store. | |
1098 * | |
1099 * Returns: | |
1100 * A copy of the data which has just been stored. | |
1101 */ | |
1102 static void* setLocal( uint key, void* val ) | |
1103 { | |
1104 return getThis().m_local[key] = val; | |
1105 } | |
1106 | |
1107 | |
1108 /////////////////////////////////////////////////////////////////////////// | |
1109 // Static Initalizer | |
1110 /////////////////////////////////////////////////////////////////////////// | |
1111 | |
1112 | |
1113 /** | |
1114 * This initializer is used to set thread constants. All functional | |
1115 * initialization occurs within thread_init(). | |
1116 */ | |
1117 static this() | |
1118 { | |
1119 version( Win32 ) | |
1120 { | |
1121 PRIORITY_MIN = -15; | |
1122 PRIORITY_MAX = 15; | |
1123 } | |
1124 else version( Posix ) | |
1125 { | |
1126 int policy; | |
1127 sched_param param; | |
1128 pthread_t self = pthread_self(); | |
1129 | |
1130 int status = pthread_getschedparam( self, &policy, ¶m ); | |
1131 assert( status == 0 ); | |
1132 | |
1133 PRIORITY_MIN = sched_get_priority_min( policy ); | |
1134 assert( PRIORITY_MIN != -1 ); | |
1135 | |
1136 PRIORITY_MAX = sched_get_priority_max( policy ); | |
1137 assert( PRIORITY_MAX != -1 ); | |
1138 } | |
1139 } | |
1140 | |
1141 | |
1142 private: | |
1143 // | |
1144 // Initializes a thread object which has no associated executable function. | |
1145 // This is used for the main thread initialized in thread_init(). | |
1146 // | |
1147 this() | |
1148 { | |
1149 m_call = Call.NO; | |
1150 m_curr = &m_main; | |
1151 } | |
1152 | |
1153 | |
1154 // | |
1155 // Thread entry point. Invokes the function or delegate passed on | |
1156 // construction (if any). | |
1157 // | |
1158 final void run() | |
1159 { | |
1160 switch( m_call ) | |
1161 { | |
1162 case Call.FN: | |
1163 m_fn(); | |
1164 break; | |
1165 case Call.DG: | |
1166 m_dg(); | |
1167 break; | |
1168 default: | |
1169 break; | |
1170 } | |
1171 } | |
1172 | |
1173 | |
1174 private: | |
1175 // | |
1176 // The type of routine passed on thread construction. | |
1177 // | |
1178 enum Call | |
1179 { | |
1180 NO, | |
1181 FN, | |
1182 DG | |
1183 } | |
1184 | |
1185 | |
1186 // | |
1187 // Standard types | |
1188 // | |
1189 version( Win32 ) | |
1190 { | |
1191 alias uint TLSKey; | |
1192 alias uint ThreadAddr; | |
1193 } | |
1194 else version( Posix ) | |
1195 { | |
1196 alias pthread_key_t TLSKey; | |
1197 alias pthread_t ThreadAddr; | |
1198 } | |
1199 | |
1200 | |
1201 // | |
1202 // Local storage | |
1203 // | |
1204 static bool[LOCAL_MAX] sm_local; | |
1205 static TLSKey sm_this; | |
1206 | |
1207 void*[LOCAL_MAX] m_local; | |
1208 | |
1209 | |
1210 // | |
1211 // Standard thread data | |
1212 // | |
1213 version( Win32 ) | |
1214 { | |
1215 HANDLE m_hndl; | |
1216 } | |
1217 ThreadAddr m_addr; | |
1218 Call m_call; | |
1219 char[] m_name; | |
1220 union | |
1221 { | |
1222 void function() m_fn; | |
1223 void delegate() m_dg; | |
1224 } | |
1225 size_t m_sz; | |
1226 version( Posix ) | |
1227 { | |
1228 bool m_isRunning; | |
1229 } | |
1230 bool m_isDaemon; | |
1231 Object m_unhandled; | |
1232 | |
1233 | |
1234 private: | |
1235 /////////////////////////////////////////////////////////////////////////// | |
1236 // Storage of Active Thread | |
1237 /////////////////////////////////////////////////////////////////////////// | |
1238 | |
1239 | |
1240 // | |
1241 // Sets a thread-local reference to the current thread object. | |
1242 // | |
1243 static void setThis( Thread t ) | |
1244 { | |
1245 version( Win32 ) | |
1246 { | |
1247 TlsSetValue( sm_this, cast(void*) t ); | |
1248 } | |
1249 else version( Posix ) | |
1250 { | |
1251 pthread_setspecific( sm_this, cast(void*) t ); | |
1252 } | |
1253 } | |
1254 | |
1255 | |
1256 private: | |
1257 /////////////////////////////////////////////////////////////////////////// | |
1258 // Thread Context and GC Scanning Support | |
1259 /////////////////////////////////////////////////////////////////////////// | |
1260 | |
1261 | |
1262 final void pushContext( Context* c ) | |
1263 in | |
1264 { | |
1265 assert( !c.within ); | |
1266 } | |
1267 body | |
1268 { | |
1269 c.within = m_curr; | |
1270 m_curr = c; | |
1271 } | |
1272 | |
1273 | |
1274 final void popContext() | |
1275 in | |
1276 { | |
1277 assert( m_curr && m_curr.within ); | |
1278 } | |
1279 body | |
1280 { | |
1281 Context* c = m_curr; | |
1282 m_curr = c.within; | |
1283 c.within = null; | |
1284 } | |
1285 | |
1286 | |
1287 final Context* topContext() | |
1288 in | |
1289 { | |
1290 assert( m_curr ); | |
1291 } | |
1292 body | |
1293 { | |
1294 return m_curr; | |
1295 } | |
1296 | |
1297 | |
1298 static struct Context | |
1299 { | |
1300 void* bstack, | |
1301 tstack; | |
1302 Context* within; | |
1303 Context* next, | |
1304 prev; | |
1305 } | |
1306 | |
1307 | |
1308 Context m_main; | |
1309 Context* m_curr; | |
1310 bool m_lock; | |
1311 | |
1312 version( Win32 ) | |
1313 { | |
1314 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax | |
1315 } | |
1316 | |
1317 | |
1318 private: | |
1319 /////////////////////////////////////////////////////////////////////////// | |
1320 // GC Scanning Support | |
1321 /////////////////////////////////////////////////////////////////////////// | |
1322 | |
1323 | |
1324 // NOTE: The GC scanning process works like so: | |
1325 // | |
1326 // 1. Suspend all threads. | |
1327 // 2. Scan the stacks of all suspended threads for roots. | |
1328 // 3. Resume all threads. | |
1329 // | |
1330 // Step 1 and 3 require a list of all threads in the system, while | |
1331 // step 2 requires a list of all thread stacks (each represented by | |
1332 // a Context struct). Traditionally, there was one stack per thread | |
1333 // and the Context structs were not necessary. However, Fibers have | |
1334 // changed things so that each thread has its own 'main' stack plus | |
1335 // an arbitrary number of nested stacks (normally referenced via | |
1336 // m_curr). Also, there may be 'free-floating' stacks in the system, | |
1337 // which are Fibers that are not currently executing on any specific | |
1338 // thread but are still being processed and still contain valid | |
1339 // roots. | |
1340 // | |
1341 // To support all of this, the Context struct has been created to | |
1342 // represent a stack range, and a global list of Context structs has | |
1343 // been added to enable scanning of these stack ranges. The lifetime | |
1344 // (and presence in the Context list) of a thread's 'main' stack will | |
1345 // be equivalent to the thread's lifetime. So the Ccontext will be | |
1346 // added to the list on thread entry, and removed from the list on | |
1347 // thread exit (which is essentially the same as the presence of a | |
1348 // Thread object in its own global list). The lifetime of a Fiber's | |
1349 // context, however, will be tied to the lifetime of the Fiber object | |
1350 // itself, and Fibers are expected to add/remove their Context struct | |
1351 // on construction/deletion. | |
1352 | |
1353 | |
1354 // | |
1355 // All use of the global lists should synchronize on this lock. | |
1356 // | |
1357 static Object slock() | |
1358 { | |
1359 return Thread.classinfo; | |
1360 } | |
1361 | |
1362 | |
1363 static Context* sm_cbeg; | |
1364 static size_t sm_clen; | |
1365 | |
1366 static Thread sm_tbeg; | |
1367 static size_t sm_tlen; | |
1368 | |
1369 // | |
1370 // Used for ordering threads in the global thread list. | |
1371 // | |
1372 Thread prev; | |
1373 Thread next; | |
1374 | |
1375 | |
1376 /////////////////////////////////////////////////////////////////////////// | |
1377 // Global Context List Operations | |
1378 /////////////////////////////////////////////////////////////////////////// | |
1379 | |
1380 | |
1381 // | |
1382 // Add a context to the global context list. | |
1383 // | |
1384 static void add( Context* c ) | |
1385 in | |
1386 { | |
1387 assert( c ); | |
1388 assert( !c.next && !c.prev ); | |
1389 } | |
1390 body | |
1391 { | |
1392 synchronized( slock ) | |
1393 { | |
1394 if( sm_cbeg ) | |
1395 { | |
1396 c.next = sm_cbeg; | |
1397 sm_cbeg.prev = c; | |
1398 } | |
1399 sm_cbeg = c; | |
1400 ++sm_clen; | |
1401 } | |
1402 } | |
1403 | |
1404 | |
1405 // | |
1406 // Remove a context from the global context list. | |
1407 // | |
1408 static void remove( Context* c ) | |
1409 in | |
1410 { | |
1411 assert( c ); | |
1412 assert( c.next || c.prev ); | |
1413 } | |
1414 body | |
1415 { | |
1416 synchronized( slock ) | |
1417 { | |
1418 if( c.prev ) | |
1419 c.prev.next = c.next; | |
1420 if( c.next ) | |
1421 c.next.prev = c.prev; | |
1422 if( sm_cbeg == c ) | |
1423 sm_cbeg = c.next; | |
1424 --sm_clen; | |
1425 } | |
1426 // NOTE: Don't null out c.next or c.prev because opApply currently | |
1427 // follows c.next after removing a node. This could be easily | |
1428 // addressed by simply returning the next node from this | |
1429 // function, however, a context should never be re-added to the | |
1430 // list anyway and having next and prev be non-null is a good way | |
1431 // to ensure that. | |
1432 } | |
1433 | |
1434 | |
1435 /////////////////////////////////////////////////////////////////////////// | |
1436 // Global Thread List Operations | |
1437 /////////////////////////////////////////////////////////////////////////// | |
1438 | |
1439 | |
1440 // | |
1441 // Add a thread to the global thread list. | |
1442 // | |
1443 static void add( Thread t ) | |
1444 in | |
1445 { | |
1446 assert( t ); | |
1447 assert( !t.next && !t.prev ); | |
1448 assert( t.isRunning ); | |
1449 } | |
1450 body | |
1451 { | |
1452 synchronized( slock ) | |
1453 { | |
1454 if( sm_tbeg ) | |
1455 { | |
1456 t.next = sm_tbeg; | |
1457 sm_tbeg.prev = t; | |
1458 } | |
1459 sm_tbeg = t; | |
1460 ++sm_tlen; | |
1461 } | |
1462 } | |
1463 | |
1464 | |
1465 // | |
1466 // Remove a thread from the global thread list. | |
1467 // | |
1468 static void remove( Thread t ) | |
1469 in | |
1470 { | |
1471 assert( t ); | |
1472 assert( t.next || t.prev ); | |
1473 version( Win32 ) | |
1474 { | |
1475 // NOTE: This doesn't work for Posix as m_isRunning must be set to | |
1476 // false after the thread is removed during normal execution. | |
1477 assert( !t.isRunning ); | |
1478 } | |
1479 } | |
1480 body | |
1481 { | |
1482 synchronized( slock ) | |
1483 { | |
1484 // NOTE: When a thread is removed from the global thread list its | |
1485 // main context is invalid and should be removed as well. | |
1486 // It is possible that t.m_curr could reference more | |
1487 // than just the main context if the thread exited abnormally | |
1488 // (if it was terminated), but we must assume that the user | |
1489 // retains a reference to them and that they may be re-used | |
1490 // elsewhere. Therefore, it is the responsibility of any | |
1491 // object that creates contexts to clean them up properly | |
1492 // when it is done with them. | |
1493 remove( &t.m_main ); | |
1494 | |
1495 if( t.prev ) | |
1496 t.prev.next = t.next; | |
1497 if( t.next ) | |
1498 t.next.prev = t.prev; | |
1499 if( sm_tbeg == t ) | |
1500 sm_tbeg = t.next; | |
1501 --sm_tlen; | |
1502 } | |
1503 // NOTE: Don't null out t.next or t.prev because opApply currently | |
1504 // follows t.next after removing a node. This could be easily | |
1505 // addressed by simply returning the next node from this | |
1506 // function, however, a thread should never be re-added to the | |
1507 // list anyway and having next and prev be non-null is a good way | |
1508 // to ensure that. | |
1509 } | |
1510 } | |
1511 | |
1512 | |
1513 /////////////////////////////////////////////////////////////////////////////// | |
1514 // GC Support Routines | |
1515 /////////////////////////////////////////////////////////////////////////////// | |
1516 | |
1517 | |
1518 /** | |
1519 * Initializes the thread module. This function must be called by the | |
1520 * garbage collector on startup and before any other thread routines | |
1521 * are called. | |
1522 */ | |
1523 extern (C) void thread_init() | |
1524 { | |
1525 // NOTE: If thread_init itself performs any allocations then the thread | |
1526 // routines reserved for garbage collector use may be called while | |
1527 // thread_init is being processed. However, since no memory should | |
1528 // exist to be scanned at this point, it is sufficient for these | |
1529 // functions to detect the condition and return immediately. | |
1530 | |
1531 version( Win32 ) | |
1532 { | |
1533 Thread.sm_this = TlsAlloc(); | |
1534 assert( Thread.sm_this != TLS_OUT_OF_INDEXES ); | |
1535 } | |
1536 else version( Posix ) | |
1537 { | |
1538 int status; | |
1539 sigaction_t sigusr1 = void; | |
1540 sigaction_t sigusr2 = void; | |
1541 | |
1542 // This is a quick way to zero-initialize the structs without using | |
1543 // memset or creating a link dependency on their static initializer. | |
1544 (cast(byte*) &sigusr1)[0 .. sigaction_t.sizeof] = 0; | |
1545 (cast(byte*) &sigusr2)[0 .. sigaction_t.sizeof] = 0; | |
1546 | |
1547 // NOTE: SA_RESTART indicates that system calls should restart if they | |
1548 // are interrupted by a signal, but this is not available on all | |
1549 // Posix systems, even those that support multithreading. | |
1550 static if( is( typeof( SA_RESTART ) ) ) | |
1551 sigusr1.sa_flags = SA_RESTART; | |
1552 else | |
1553 sigusr1.sa_flags = 0; | |
1554 sigusr1.sa_handler = &thread_suspendHandler; | |
1555 // NOTE: We want to ignore all signals while in this handler, so fill | |
1556 // sa_mask to indicate this. | |
1557 status = sigfillset( &sigusr1.sa_mask ); | |
1558 assert( status == 0 ); | |
1559 | |
1560 // NOTE: Since SIGUSR2 should only be issued for threads within the | |
1561 // suspend handler, we don't want this signal to trigger a | |
1562 // restart. | |
1563 sigusr2.sa_flags = 0; | |
1564 sigusr2.sa_handler = &thread_resumeHandler; | |
1565 // NOTE: We want to ignore all signals while in this handler, so fill | |
1566 // sa_mask to indicate this. | |
1567 status = sigfillset( &sigusr2.sa_mask ); | |
1568 assert( status == 0 ); | |
1569 | |
1570 status = sigaction( SIGUSR1, &sigusr1, null ); | |
1571 assert( status == 0 ); | |
1572 | |
1573 status = sigaction( SIGUSR2, &sigusr2, null ); | |
1574 assert( status == 0 ); | |
1575 | |
1576 status = sem_init( &suspendCount, 0, 0 ); | |
1577 assert( status == 0 ); | |
1578 | |
1579 status = pthread_key_create( &Thread.sm_this, null ); | |
1580 assert( status == 0 ); | |
1581 } | |
1582 | |
1583 thread_attachThis(); | |
1584 } | |
1585 | |
1586 | |
1587 /** | |
1588 * Registers the calling thread for use with the D Runtime. If this routine | |
1589 * is called for a thread which is already registered, the result is undefined. | |
1590 */ | |
1591 extern (C) void thread_attachThis() | |
1592 { | |
1593 version( Win32 ) | |
1594 { | |
1595 Thread thisThread = new Thread(); | |
1596 Thread.Context* thisContext = &thisThread.m_main; | |
1597 assert( thisContext == thisThread.m_curr ); | |
1598 | |
1599 thisThread.m_addr = GetCurrentThreadId(); | |
1600 thisThread.m_hndl = GetCurrentThreadHandle(); | |
1601 thisContext.bstack = getStackBottom(); | |
1602 thisContext.tstack = thisContext.bstack; | |
1603 | |
1604 thisThread.m_isDaemon = true; | |
1605 | |
1606 Thread.setThis( thisThread ); | |
1607 } | |
1608 else version( Posix ) | |
1609 { | |
1610 Thread thisThread = new Thread(); | |
1611 Thread.Context* thisContext = thisThread.m_curr; | |
1612 assert( thisContext == &thisThread.m_main ); | |
1613 | |
1614 thisThread.m_addr = pthread_self(); | |
1615 thisContext.bstack = getStackBottom(); | |
1616 thisContext.tstack = thisContext.bstack; | |
1617 | |
1618 thisThread.m_isRunning = true; | |
1619 thisThread.m_isDaemon = true; | |
1620 | |
1621 Thread.setThis( thisThread ); | |
1622 } | |
1623 | |
1624 Thread.add( thisThread ); | |
1625 Thread.add( thisContext ); | |
1626 } | |
1627 | |
1628 | |
1629 /** | |
1630 * Deregisters the calling thread from use with the runtime. If this routine | |
1631 * is called for a thread which is already registered, the result is undefined. | |
1632 */ | |
1633 extern (C) void thread_detachThis() | |
1634 { | |
1635 Thread.remove( Thread.getThis() ); | |
1636 } | |
1637 | |
1638 | |
1639 /** | |
1640 * Joins all non-daemon threads that are currently running. This is done by | |
1641 * performing successive scans through the thread list until a scan consists | |
1642 * of only daemon threads. | |
1643 */ | |
1644 extern (C) void thread_joinAll() | |
1645 { | |
1646 | |
1647 while( true ) | |
1648 { | |
1649 Thread nonDaemon = null; | |
1650 | |
1651 foreach( t; Thread ) | |
1652 { | |
1653 if( !t.isDaemon ) | |
1654 { | |
1655 nonDaemon = t; | |
1656 break; | |
1657 } | |
1658 } | |
1659 if( nonDaemon is null ) | |
1660 return; | |
1661 nonDaemon.join(); | |
1662 } | |
1663 } | |
1664 | |
1665 | |
1666 /** | |
1667 * Performs intermediate shutdown of the thread module. | |
1668 */ | |
1669 static ~this() | |
1670 { | |
1671 // NOTE: The functionality related to garbage collection must be minimally | |
1672 // operable after this dtor completes. Therefore, only minimal | |
1673 // cleanup may occur. | |
1674 | |
1675 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1676 { | |
1677 if( !t.isRunning ) | |
1678 Thread.remove( t ); | |
1679 } | |
1680 } | |
1681 | |
1682 | |
1683 // Used for needLock below | |
1684 private bool multiThreadedFlag = false; | |
1685 | |
1686 | |
1687 /** | |
1688 * This function is used to determine whether the the process is | |
1689 * multi-threaded. Optimizations may only be performed on this | |
1690 * value if the programmer can guarantee that no path from the | |
1691 * enclosed code will start a thread. | |
1692 * | |
1693 * Returns: | |
1694 * True if Thread.start() has been called in this process. | |
1695 */ | |
1696 extern (C) bool thread_needLock() | |
1697 { | |
1698 return multiThreadedFlag; | |
1699 } | |
1700 | |
1701 | |
1702 // Used for suspendAll/resumeAll below | |
1703 private uint suspendDepth = 0; | |
1704 | |
1705 | |
1706 /** | |
1707 * Suspend all threads but the calling thread for "stop the world" garbage | |
1708 * collection runs. This function may be called multiple times, and must | |
1709 * be followed by a matching number of calls to thread_resumeAll before | |
1710 * processing is resumed. | |
1711 * | |
1712 * Throws: | |
1713 * ThreadException if the suspend operation fails for a running thread. | |
1714 */ | |
1715 extern (C) void thread_suspendAll() | |
1716 { | |
1717 /** | |
1718 * Suspend the specified thread and load stack and register information for | |
1719 * use by thread_scanAll. If the supplied thread is the calling thread, | |
1720 * stack and register information will be loaded but the thread will not | |
1721 * be suspended. If the suspend operation fails and the thread is not | |
1722 * running then it will be removed from the global thread list, otherwise | |
1723 * an exception will be thrown. | |
1724 * | |
1725 * Params: | |
1726 * t = The thread to suspend. | |
1727 * | |
1728 * Throws: | |
1729 * ThreadException if the suspend operation fails for a running thread. | |
1730 */ | |
1731 void suspend( Thread t ) | |
1732 { | |
1733 version( Win32 ) | |
1734 { | |
1735 if( t.m_addr != GetCurrentThreadId() && SuspendThread( t.m_hndl ) == 0xFFFFFFFF ) | |
1736 { | |
1737 if( !t.isRunning ) | |
1738 { | |
1739 Thread.remove( t ); | |
1740 return; | |
1741 } | |
1742 throw new ThreadException( "Unable to suspend thread" ); | |
1743 } | |
1744 | |
1745 CONTEXT context = void; | |
1746 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL; | |
1747 | |
1748 if( !GetThreadContext( t.m_hndl, &context ) ) | |
1749 throw new ThreadException( "Unable to load thread context" ); | |
1750 if( !t.m_lock ) | |
1751 t.m_curr.tstack = cast(void*) context.Esp; | |
1752 // edi,esi,ebp,esp,ebx,edx,ecx,eax | |
1753 t.m_reg[0] = context.Edi; | |
1754 t.m_reg[1] = context.Esi; | |
1755 t.m_reg[2] = context.Ebp; | |
1756 t.m_reg[3] = context.Esp; | |
1757 t.m_reg[4] = context.Ebx; | |
1758 t.m_reg[5] = context.Edx; | |
1759 t.m_reg[6] = context.Ecx; | |
1760 t.m_reg[7] = context.Eax; | |
1761 } | |
1762 else version( Posix ) | |
1763 { | |
1764 if( t.m_addr != pthread_self() ) | |
1765 { | |
1766 if( pthread_kill( t.m_addr, SIGUSR1 ) != 0 ) | |
1767 { | |
1768 if( !t.isRunning ) | |
1769 { | |
1770 Thread.remove( t ); | |
1771 return; | |
1772 } | |
1773 throw new ThreadException( "Unable to suspend thread" ); | |
1774 } | |
1775 // NOTE: It's really not ideal to wait for each thread to | |
1776 // signal individually -- rather, it would be better to | |
1777 // suspend them all and wait once at the end. However, | |
1778 // semaphores don't really work this way, and the obvious | |
1779 // alternative (looping on an atomic suspend count) | |
1780 // requires either the atomic module (which only works on | |
1781 // x86) or other specialized functionality. It would | |
1782 // also be possible to simply loop on sem_wait at the | |
1783 // end, but I'm not convinced that this would be much | |
1784 // faster than the current approach. | |
1785 sem_wait( &suspendCount ); | |
1786 } | |
1787 else if( !t.m_lock ) | |
1788 { | |
1789 t.m_curr.tstack = getStackTop(); | |
1790 } | |
1791 } | |
1792 } | |
1793 | |
1794 | |
1795 // NOTE: We've got an odd chicken & egg problem here, because while the GC | |
1796 // is required to call thread_init before calling any other thread | |
1797 // routines, thread_init may allocate memory which could in turn | |
1798 // trigger a collection. Thus, thread_suspendAll, thread_scanAll, | |
1799 // and thread_resumeAll must be callable before thread_init | |
1800 // completes, with the assumption that no other GC memory has yet | |
1801 // been allocated by the system, and thus there is no risk of losing | |
1802 // data if the global thread list is empty. The check of | |
1803 // Thread.sm_tbeg below is done to ensure thread_init has completed, | |
1804 // and therefore that calling Thread.getThis will not result in an | |
1805 // error. For the short time when Thread.sm_tbeg is null, there is | |
1806 // no reason not to simply call the multithreaded code below, with | |
1807 // the expectation that the foreach loop will never be entered. | |
1808 if( !multiThreadedFlag && Thread.sm_tbeg ) | |
1809 { | |
1810 if( ++suspendDepth == 1 ) | |
1811 suspend( Thread.getThis() ); | |
1812 return; | |
1813 } | |
1814 synchronized( Thread.slock ) | |
1815 { | |
1816 if( ++suspendDepth > 1 ) | |
1817 return; | |
1818 | |
1819 // NOTE: I'd really prefer not to check isRunning within this loop but | |
1820 // not doing so could be problematic if threads are termianted | |
1821 // abnormally and a new thread is created with the same thread | |
1822 // address before the next GC run. This situation might cause | |
1823 // the same thread to be suspended twice, which would likely | |
1824 // cause the second suspend to fail, the garbage collection to | |
1825 // abort, and Bad Things to occur. | |
1826 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1827 { | |
1828 if( t.isRunning ) | |
1829 suspend( t ); | |
1830 else | |
1831 Thread.remove( t ); | |
1832 } | |
1833 | |
1834 version( Posix ) | |
1835 { | |
1836 // wait on semaphore -- see note in suspend for | |
1837 // why this is currently not implemented | |
1838 } | |
1839 } | |
1840 } | |
1841 | |
1842 | |
1843 /** | |
1844 * Resume all threads but the calling thread for "stop the world" garbage | |
1845 * collection runs. This function must be called once for each preceding | |
1846 * call to thread_suspendAll before the threads are actually resumed. | |
1847 * | |
1848 * In: | |
1849 * This routine must be preceded by a call to thread_suspendAll. | |
1850 * | |
1851 * Throws: | |
1852 * ThreadException if the resume operation fails for a running thread. | |
1853 */ | |
1854 extern (C) void thread_resumeAll() | |
1855 in | |
1856 { | |
1857 assert( suspendDepth > 0 ); | |
1858 } | |
1859 body | |
1860 { | |
1861 /** | |
1862 * Resume the specified thread and unload stack and register information. | |
1863 * If the supplied thread is the calling thread, stack and register | |
1864 * information will be unloaded but the thread will not be resumed. If | |
1865 * the resume operation fails and the thread is not running then it will | |
1866 * be removed from the global thread list, otherwise an exception will be | |
1867 * thrown. | |
1868 * | |
1869 * Params: | |
1870 * t = The thread to resume. | |
1871 * | |
1872 * Throws: | |
1873 * ThreadException if the resume fails for a running thread. | |
1874 */ | |
1875 void resume( Thread t ) | |
1876 { | |
1877 version( Win32 ) | |
1878 { | |
1879 if( t.m_addr != GetCurrentThreadId() && ResumeThread( t.m_hndl ) == 0xFFFFFFFF ) | |
1880 { | |
1881 if( !t.isRunning ) | |
1882 { | |
1883 Thread.remove( t ); | |
1884 return; | |
1885 } | |
1886 throw new ThreadException( "Unable to resume thread" ); | |
1887 } | |
1888 | |
1889 if( !t.m_lock ) | |
1890 t.m_curr.tstack = t.m_curr.bstack; | |
1891 t.m_reg[0 .. $] = 0; | |
1892 } | |
1893 else version( Posix ) | |
1894 { | |
1895 if( t.m_addr != pthread_self() ) | |
1896 { | |
1897 if( pthread_kill( t.m_addr, SIGUSR2 ) != 0 ) | |
1898 { | |
1899 if( !t.isRunning ) | |
1900 { | |
1901 Thread.remove( t ); | |
1902 return; | |
1903 } | |
1904 throw new ThreadException( "Unable to resume thread" ); | |
1905 } | |
1906 } | |
1907 else if( !t.m_lock ) | |
1908 { | |
1909 t.m_curr.tstack = t.m_curr.bstack; | |
1910 } | |
1911 } | |
1912 } | |
1913 | |
1914 | |
1915 // NOTE: See thread_suspendAll for the logic behind this. | |
1916 if( !multiThreadedFlag && Thread.sm_tbeg ) | |
1917 { | |
1918 if( --suspendDepth == 0 ) | |
1919 resume( Thread.getThis() ); | |
1920 return; | |
1921 } | |
1922 synchronized( Thread.slock ) | |
1923 { | |
1924 if( --suspendDepth > 0 ) | |
1925 return; | |
1926 | |
1927 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1928 { | |
1929 resume( t ); | |
1930 } | |
1931 } | |
1932 } | |
1933 | |
1934 | |
1935 private alias void delegate( void*, void* ) scanAllThreadsFn; | |
1936 | |
1937 | |
1938 /** | |
1939 * The main entry point for garbage collection. The supplied delegate | |
1940 * will be passed ranges representing both stack and register values. | |
1941 * | |
1942 * Params: | |
1943 * scan = The scanner function. It should scan from p1 through p2 - 1. | |
1944 * curStackTop = An optional pointer to the top of the calling thread's stack. | |
1945 * | |
1946 * In: | |
1947 * This routine must be preceded by a call to thread_suspendAll. | |
1948 */ | |
1949 extern (C) void thread_scanAll( scanAllThreadsFn scan, void* curStackTop = null ) | |
1950 in | |
1951 { | |
1952 assert( suspendDepth > 0 ); | |
1953 } | |
1954 body | |
1955 { | |
1956 Thread thisThread = null; | |
1957 void* oldStackTop = null; | |
1958 | |
1959 if( curStackTop && Thread.sm_tbeg ) | |
1960 { | |
1961 thisThread = Thread.getThis(); | |
1962 if( !thisThread.m_lock ) | |
1963 { | |
1964 oldStackTop = thisThread.m_curr.tstack; | |
1965 thisThread.m_curr.tstack = curStackTop; | |
1966 } | |
1967 } | |
1968 | |
1969 scope( exit ) | |
1970 { | |
1971 if( curStackTop && Thread.sm_tbeg ) | |
1972 { | |
1973 if( !thisThread.m_lock ) | |
1974 { | |
1975 thisThread.m_curr.tstack = oldStackTop; | |
1976 } | |
1977 } | |
1978 } | |
1979 | |
1980 // NOTE: Synchronizing on Thread.slock is not needed because this | |
1981 // function may only be called after all other threads have | |
1982 // been suspended from within the same lock. | |
1983 for( Thread.Context* c = Thread.sm_cbeg; c; c = c.next ) | |
1984 { | |
1985 version( StackGrowsDown ) | |
1986 { | |
1987 // NOTE: We can't index past the bottom of the stack | |
1988 // so don't do the "+1" for StackGrowsDown. | |
1989 if( c.tstack && c.tstack < c.bstack ) | |
1990 scan( c.tstack, c.bstack ); | |
1991 } | |
1992 else | |
1993 { | |
1994 if( c.bstack && c.bstack < c.tstack ) | |
1995 scan( c.bstack, c.tstack + 1 ); | |
1996 } | |
1997 } | |
1998 version( Win32 ) | |
1999 { | |
2000 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
2001 { | |
2002 scan( &t.m_reg[0], &t.m_reg[0] + t.m_reg.length ); | |
2003 } | |
2004 } | |
2005 } | |
2006 | |
2007 | |
2008 /////////////////////////////////////////////////////////////////////////////// | |
2009 // Thread Local | |
2010 /////////////////////////////////////////////////////////////////////////////// | |
2011 | |
2012 | |
2013 /** | |
2014 * This class encapsulates the operations required to initialize, access, and | |
2015 * destroy thread local data. | |
2016 */ | |
2017 class ThreadLocal( T ) | |
2018 { | |
2019 /////////////////////////////////////////////////////////////////////////// | |
2020 // Initialization | |
2021 /////////////////////////////////////////////////////////////////////////// | |
2022 | |
2023 | |
2024 /** | |
2025 * Initializes thread local storage for the indicated value which will be | |
2026 * initialized to def for all threads. | |
2027 * | |
2028 * Params: | |
2029 * def = The default value to return if no value has been explicitly set. | |
2030 */ | |
2031 this( T def = T.init ) | |
2032 { | |
2033 m_def = def; | |
2034 m_key = Thread.createLocal(); | |
2035 } | |
2036 | |
2037 | |
2038 ~this() | |
2039 { | |
2040 Thread.deleteLocal( m_key ); | |
2041 } | |
2042 | |
2043 | |
2044 /////////////////////////////////////////////////////////////////////////// | |
2045 // Accessors | |
2046 /////////////////////////////////////////////////////////////////////////// | |
2047 | |
2048 | |
2049 /** | |
2050 * Gets the value last set by the calling thread, or def if no such value | |
2051 * has been set. | |
2052 * | |
2053 * Returns: | |
2054 * The stored value or def if no value is stored. | |
2055 */ | |
2056 T val() | |
2057 { | |
2058 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key ); | |
2059 | |
2060 return wrap ? wrap.val : m_def; | |
2061 } | |
2062 | |
2063 | |
2064 /** | |
2065 * Copies newval to a location specific to the calling thread, and returns | |
2066 * newval. | |
2067 * | |
2068 * Params: | |
2069 * newval = The value to set. | |
2070 * | |
2071 * Returns: | |
2072 * The value passed to this function. | |
2073 */ | |
2074 T val( T newval ) | |
2075 { | |
2076 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key ); | |
2077 | |
2078 if( wrap is null ) | |
2079 { | |
2080 wrap = new Wrap; | |
2081 Thread.setLocal( m_key, wrap ); | |
2082 } | |
2083 wrap.val = newval; | |
2084 return newval; | |
2085 } | |
2086 | |
2087 | |
2088 private: | |
2089 // | |
2090 // A wrapper for the stored data. This is needed for determining whether | |
2091 // set has ever been called for this thread (and therefore whether the | |
2092 // default value should be returned) and also to flatten the differences | |
2093 // between data that is smaller and larger than (void*).sizeof. The | |
2094 // obvious tradeoff here is an extra per-thread allocation for each | |
2095 // ThreadLocal value as compared to calling the Thread routines directly. | |
2096 // | |
2097 struct Wrap | |
2098 { | |
2099 T val; | |
2100 } | |
2101 | |
2102 | |
2103 T m_def; | |
2104 uint m_key; | |
2105 } | |
2106 | |
2107 | |
2108 /////////////////////////////////////////////////////////////////////////////// | |
2109 // Thread Group | |
2110 /////////////////////////////////////////////////////////////////////////////// | |
2111 | |
2112 | |
2113 /** | |
2114 * This class is intended to simplify certain common programming techniques. | |
2115 */ | |
2116 class ThreadGroup | |
2117 { | |
2118 /** | |
2119 * Creates and starts a new Thread object that executes fn and adds it to | |
2120 * the list of tracked threads. | |
2121 * | |
2122 * Params: | |
2123 * fn = The thread function. | |
2124 * | |
2125 * Returns: | |
2126 * A reference to the newly created thread. | |
2127 */ | |
2128 final Thread create( void function() fn ) | |
2129 { | |
2130 Thread t = new Thread( fn ); | |
2131 | |
2132 t.start(); | |
2133 synchronized( this ) | |
2134 { | |
2135 m_all[t] = t; | |
2136 } | |
2137 return t; | |
2138 } | |
2139 | |
2140 | |
2141 /** | |
2142 * Creates and starts a new Thread object that executes dg and adds it to | |
2143 * the list of tracked threads. | |
2144 * | |
2145 * Params: | |
2146 * dg = The thread function. | |
2147 * | |
2148 * Returns: | |
2149 * A reference to the newly created thread. | |
2150 */ | |
2151 final Thread create( void delegate() dg ) | |
2152 { | |
2153 Thread t = new Thread( dg ); | |
2154 | |
2155 t.start(); | |
2156 synchronized( this ) | |
2157 { | |
2158 m_all[t] = t; | |
2159 } | |
2160 return t; | |
2161 } | |
2162 | |
2163 | |
2164 /** | |
2165 * Add t to the list of tracked threads if it is not already being tracked. | |
2166 * | |
2167 * Params: | |
2168 * t = The thread to add. | |
2169 * | |
2170 * In: | |
2171 * t must not be null. | |
2172 */ | |
2173 final void add( Thread t ) | |
2174 in | |
2175 { | |
2176 assert( t ); | |
2177 } | |
2178 body | |
2179 { | |
2180 synchronized( this ) | |
2181 { | |
2182 m_all[t] = t; | |
2183 } | |
2184 } | |
2185 | |
2186 | |
2187 /** | |
2188 * Removes t from the list of tracked threads. No operation will be | |
2189 * performed if t is not currently being tracked by this object. | |
2190 * | |
2191 * Params: | |
2192 * t = The thread to remove. | |
2193 * | |
2194 * In: | |
2195 * t must not be null. | |
2196 */ | |
2197 final void remove( Thread t ) | |
2198 in | |
2199 { | |
2200 assert( t ); | |
2201 } | |
2202 body | |
2203 { | |
2204 synchronized( this ) | |
2205 { | |
2206 m_all.remove( t ); | |
2207 } | |
2208 } | |
2209 | |
2210 | |
2211 /** | |
2212 * Operates on all threads currently tracked by this object. | |
2213 */ | |
2214 final int opApply( int delegate( inout Thread ) dg ) | |
2215 { | |
2216 synchronized( this ) | |
2217 { | |
2218 int ret = 0; | |
2219 | |
2220 // NOTE: This loop relies on the knowledge that m_all uses the | |
2221 // Thread object for both the key and the mapped value. | |
2222 foreach( Thread t; m_all.keys ) | |
2223 { | |
2224 ret = dg( t ); | |
2225 if( ret ) | |
2226 break; | |
2227 } | |
2228 return ret; | |
2229 } | |
2230 } | |
2231 | |
2232 | |
2233 /** | |
2234 * Iteratively joins all tracked threads. This function will block add, | |
2235 * remove, and opApply until it completes. | |
2236 * | |
2237 * Params: | |
2238 * rethrow = Rethrow any unhandled exception which may have caused the | |
2239 * current thread to terminate. | |
2240 * | |
2241 * Throws: | |
2242 * Any exception not handled by the joined threads. | |
2243 */ | |
2244 final void joinAll( bool rethrow = true ) | |
2245 { | |
2246 synchronized( this ) | |
2247 { | |
2248 // NOTE: This loop relies on the knowledge that m_all uses the | |
2249 // Thread object for both the key and the mapped value. | |
2250 foreach( Thread t; m_all.keys ) | |
2251 { | |
2252 t.join( rethrow ); | |
2253 } | |
2254 } | |
2255 } | |
2256 | |
2257 | |
2258 private: | |
2259 Thread[Thread] m_all; | |
2260 } | |
2261 | |
2262 | |
2263 /////////////////////////////////////////////////////////////////////////////// | |
2264 // Fiber Platform Detection and Memory Allocation | |
2265 /////////////////////////////////////////////////////////////////////////////// | |
2266 | |
2267 | |
2268 private | |
2269 { | |
2270 version( D_InlineAsm_X86 ) | |
2271 { | |
2272 version( X86_64 ) | |
2273 { | |
2274 | |
2275 } | |
2276 else | |
2277 { | |
2278 version( Win32 ) | |
2279 version = AsmX86_Win32; | |
2280 else version( Posix ) | |
2281 version = AsmX86_Posix; | |
2282 } | |
2283 } | |
2284 else version( PPC ) | |
2285 { | |
2286 version( Posix ) | |
2287 version = AsmPPC_Posix; | |
2288 } | |
2289 | |
2290 version( LLVM_InlineAsm_X86 ) | |
2291 { | |
2292 version( Win32 ) | |
2293 version = LLVM_AsmX86_Win32; | |
2294 else version( Posix ) | |
2295 version = LLVM_AsmX86_Posix; | |
2296 } | |
2297 else version( LLVM_InlineAsm_X86_64 ) | |
2298 { | |
2299 version( Posix ) | |
2300 version = LLVM_AsmX86_64_Posix; | |
2301 } | |
2302 | |
2303 version( Posix ) | |
2304 { | |
2305 import stdc.posix.unistd; // for sysconf | |
2306 import stdc.posix.sys.mman; // for mmap | |
2307 import stdc.posix.stdlib; // for malloc, valloc, free | |
2308 | |
2309 version( AsmX86_Win32 ) {} else | |
2310 version( AsmX86_Posix ) {} else | |
2311 version( AsmPPC_Posix ) {} else | |
2312 version( LLVM_AsmX86_Win32 ) {} else | |
2313 version( LLVM_AsmX86_Posix ) {} else | |
2314 //TODO: Enable when x86-64 Posix supports fibers | |
2315 //version( LLVM_AsmX86_64_Posix ) {} else | |
2316 { | |
2317 // NOTE: The ucontext implementation requires architecture specific | |
2318 // data definitions to operate so testing for it must be done | |
2319 // by checking for the existence of ucontext_t rather than by | |
2320 // a version identifier. Please note that this is considered | |
2321 // an obsolescent feature according to the POSIX spec, so a | |
2322 // custom solution is still preferred. | |
2323 import stdc.posix.ucontext; | |
2324 } | |
2325 } | |
2326 | |
2327 const size_t PAGESIZE; | |
2328 } | |
2329 | |
2330 | |
2331 static this() | |
2332 { | |
2333 static if( is( typeof( GetSystemInfo ) ) ) | |
2334 { | |
2335 SYSTEM_INFO info; | |
2336 GetSystemInfo( &info ); | |
2337 | |
2338 PAGESIZE = info.dwPageSize; | |
2339 assert( PAGESIZE < int.max ); | |
2340 } | |
2341 else static if( is( typeof( sysconf ) ) && | |
2342 is( typeof( _SC_PAGESIZE ) ) ) | |
2343 { | |
2344 PAGESIZE = cast(size_t) sysconf( _SC_PAGESIZE ); | |
2345 assert( PAGESIZE < int.max ); | |
2346 } | |
2347 else | |
2348 { | |
2349 version( PPC ) | |
2350 PAGESIZE = 8192; | |
2351 else | |
2352 PAGESIZE = 4096; | |
2353 } | |
2354 } | |
2355 | |
2356 | |
2357 /////////////////////////////////////////////////////////////////////////////// | |
2358 // Fiber Entry Point and Context Switch | |
2359 /////////////////////////////////////////////////////////////////////////////// | |
2360 | |
2361 | |
2362 private | |
2363 { | |
2364 extern (C) void fiber_entryPoint() | |
2365 { | |
2366 Fiber obj = Fiber.getThis(); | |
2367 assert( obj ); | |
2368 | |
2369 assert( Thread.getThis().m_curr is obj.m_ctxt ); | |
2370 volatile Thread.getThis().m_lock = false; | |
2371 obj.m_ctxt.tstack = obj.m_ctxt.bstack; | |
2372 obj.m_state = Fiber.State.EXEC; | |
2373 | |
2374 try | |
2375 { | |
2376 obj.run(); | |
2377 } | |
2378 catch( Object o ) | |
2379 { | |
2380 obj.m_unhandled = o; | |
2381 } | |
2382 | |
2383 static if( is( ucontext_t ) ) | |
2384 obj.m_ucur = &obj.m_utxt; | |
2385 | |
2386 obj.m_state = Fiber.State.TERM; | |
2387 obj.switchOut(); | |
2388 } | |
2389 | |
2390 | |
2391 // NOTE: If AsmPPC_Posix is defined then the context switch routine will | |
2392 // be defined externally until GDC supports inline PPC ASM. | |
2393 version( AsmPPC_Posix ) | |
2394 extern (C) void fiber_switchContext( void** oldp, void* newp ); | |
2395 else | |
2396 extern (C) void fiber_switchContext( void** oldp, void* newp ) | |
2397 { | |
2398 // NOTE: The data pushed and popped in this routine must match the | |
2399 // default stack created by Fiber.initStack or the initial | |
2400 // switch into a new context will fail. | |
2401 | |
2402 version( AsmX86_Win32 ) | |
2403 { | |
2404 asm | |
2405 { | |
2406 naked; | |
2407 | |
2408 // save current stack state | |
2409 push EBP; | |
2410 mov EBP, ESP; | |
2411 push EAX; | |
2412 push dword ptr FS:[0]; | |
2413 push dword ptr FS:[4]; | |
2414 push dword ptr FS:[8]; | |
2415 push EBX; | |
2416 push ESI; | |
2417 push EDI; | |
2418 | |
2419 // store oldp again with more accurate address | |
2420 mov EAX, dword ptr 8[EBP]; | |
2421 mov [EAX], ESP; | |
2422 // load newp to begin context switch | |
2423 mov ESP, dword ptr 12[EBP]; | |
2424 | |
2425 // load saved state from new stack | |
2426 pop EDI; | |
2427 pop ESI; | |
2428 pop EBX; | |
2429 pop dword ptr FS:[8]; | |
2430 pop dword ptr FS:[4]; | |
2431 pop dword ptr FS:[0]; | |
2432 pop EAX; | |
2433 pop EBP; | |
2434 | |
2435 // 'return' to complete switch | |
2436 ret; | |
2437 } | |
2438 } | |
2439 else version( AsmX86_Posix ) | |
2440 { | |
2441 asm | |
2442 { | |
2443 naked; | |
2444 | |
2445 // save current stack state | |
2446 push EBP; | |
2447 mov EBP, ESP; | |
2448 push EAX; | |
2449 push EBX; | |
2450 push ESI; | |
2451 push EDI; | |
2452 | |
2453 // store oldp again with more accurate address | |
2454 mov EAX, dword ptr 8[EBP]; | |
2455 mov [EAX], ESP; | |
2456 // load newp to begin context switch | |
2457 mov ESP, dword ptr 12[EBP]; | |
2458 | |
2459 // load saved state from new stack | |
2460 pop EDI; | |
2461 pop ESI; | |
2462 pop EBX; | |
2463 pop EAX; | |
2464 pop EBP; | |
2465 | |
2466 // 'return' to complete switch | |
2467 ret; | |
2468 } | |
2469 } | |
2470 else version( LLVM_AsmX86_Posix ) | |
2471 { | |
2472 asm | |
2473 { | |
2474 // clobber registers to save | |
2475 inc EBX; | |
2476 inc ESI; | |
2477 inc EDI; | |
2478 | |
2479 // store oldp again with more acc | |
2480 mov EAX, oldp; | |
2481 mov [EAX], ESP; | |
2482 // load newp to begin context swi | |
2483 mov ESP, newp; | |
2484 } | |
2485 } | |
2486 /+ | |
2487 version( LLVM_AsmX86_64_Posix ) | |
2488 { | |
2489 //TODO: Fiber implementation here | |
2490 } | |
2491 +/ | |
2492 else static if( is( ucontext_t ) ) | |
2493 { | |
2494 Fiber cfib = Fiber.getThis(); | |
2495 void* ucur = cfib.m_ucur; | |
2496 | |
2497 *oldp = &ucur; | |
2498 swapcontext( **(cast(ucontext_t***) oldp), | |
2499 *(cast(ucontext_t**) newp) ); | |
2500 } | |
2501 } | |
2502 } | |
2503 | |
2504 | |
2505 /////////////////////////////////////////////////////////////////////////////// | |
2506 // Fiber | |
2507 /////////////////////////////////////////////////////////////////////////////// | |
2508 | |
2509 | |
2510 /** | |
2511 * This class provides a cooperative concurrency mechanism integrated with the | |
2512 * threading and garbage collection functionality. Calling a fiber may be | |
2513 * considered a blocking operation that returns when the fiber yields (via | |
2514 * Fiber.yield()). Execution occurs within the context of the calling thread | |
2515 * so synchronization is not necessary to guarantee memory visibility so long | |
2516 * as the same thread calls the fiber each time. Please note that there is no | |
2517 * requirement that a fiber be bound to one specific thread. Rather, fibers | |
2518 * may be freely passed between threads so long as they are not currently | |
2519 * executing. Like threads, a new fiber thread may be created using either | |
2520 * derivation or composition, as in the following example. | |
2521 * | |
2522 * Example: | |
2523 * ---------------------------------------------------------------------- | |
2524 * | |
2525 * class DerivedFiber : Fiber | |
2526 * { | |
2527 * this() | |
2528 * { | |
2529 * super( &run ); | |
2530 * } | |
2531 * | |
2532 * private : | |
2533 * void run() | |
2534 * { | |
2535 * printf( "Derived fiber running.\n" ); | |
2536 * } | |
2537 * } | |
2538 * | |
2539 * void fiberFunc() | |
2540 * { | |
2541 * printf( "Composed fiber running.\n" ); | |
2542 * Fiber.yield(); | |
2543 * printf( "Composed fiber running.\n" ); | |
2544 * } | |
2545 * | |
2546 * // create instances of each type | |
2547 * Fiber derived = new DerivedFiber(); | |
2548 * Fiber composed = new Fiber( &fiberFunc ); | |
2549 * | |
2550 * // call both fibers once | |
2551 * derived.call(); | |
2552 * composed.call(); | |
2553 * printf( "Execution returned to calling context.\n" ); | |
2554 * composed.call(); | |
2555 * | |
2556 * // since each fiber has run to completion, each should have state TERM | |
2557 * assert( derived.state == Fiber.State.TERM ); | |
2558 * assert( composed.state == Fiber.State.TERM ); | |
2559 * | |
2560 * ---------------------------------------------------------------------- | |
2561 * | |
2562 * Authors: Based on a design by Mikola Lysenko. | |
2563 */ | |
2564 class Fiber | |
2565 { | |
2566 /////////////////////////////////////////////////////////////////////////// | |
2567 // Initialization | |
2568 /////////////////////////////////////////////////////////////////////////// | |
2569 | |
2570 | |
2571 /** | |
2572 * Initializes a fiber object which is associated with a static | |
2573 * D function. | |
2574 * | |
2575 * Params: | |
2576 * fn = The thread function. | |
2577 * sz = The stack size for this fiber. | |
2578 * | |
2579 * In: | |
2580 * fn must not be null. | |
2581 */ | |
2582 this( void function() fn, size_t sz = PAGESIZE ) | |
2583 in | |
2584 { | |
2585 assert( fn ); | |
2586 } | |
2587 body | |
2588 { | |
2589 m_fn = fn; | |
2590 m_call = Call.FN; | |
2591 m_state = State.HOLD; | |
2592 allocStack( sz ); | |
2593 initStack(); | |
2594 } | |
2595 | |
2596 | |
2597 /** | |
2598 * Initializes a fiber object which is associated with a dynamic | |
2599 * D function. | |
2600 * | |
2601 * Params: | |
2602 * dg = The thread function. | |
2603 * sz = The stack size for this fiber. | |
2604 * | |
2605 * In: | |
2606 * dg must not be null. | |
2607 */ | |
2608 this( void delegate() dg, size_t sz = PAGESIZE ) | |
2609 in | |
2610 { | |
2611 assert( dg ); | |
2612 } | |
2613 body | |
2614 { | |
2615 m_dg = dg; | |
2616 m_call = Call.DG; | |
2617 m_state = State.HOLD; | |
2618 allocStack( sz ); | |
2619 initStack(); | |
2620 } | |
2621 | |
2622 | |
2623 /** | |
2624 * Cleans up any remaining resources used by this object. | |
2625 */ | |
2626 ~this() | |
2627 { | |
2628 // NOTE: A live reference to this object will exist on its associated | |
2629 // stack from the first time its call() method has been called | |
2630 // until its execution completes with State.TERM. Thus, the only | |
2631 // times this dtor should be called are either if the fiber has | |
2632 // terminated (and therefore has no active stack) or if the user | |
2633 // explicitly deletes this object. The latter case is an error | |
2634 // but is not easily tested for, since State.HOLD may imply that | |
2635 // the fiber was just created but has never been run. There is | |
2636 // not a compelling case to create a State.INIT just to offer a | |
2637 // means of ensuring the user isn't violating this object's | |
2638 // contract, so for now this requirement will be enforced by | |
2639 // documentation only. | |
2640 freeStack(); | |
2641 } | |
2642 | |
2643 | |
2644 /////////////////////////////////////////////////////////////////////////// | |
2645 // General Actions | |
2646 /////////////////////////////////////////////////////////////////////////// | |
2647 | |
2648 | |
2649 /** | |
2650 * Transfers execution to this fiber object. The calling context will be | |
2651 * suspended until the fiber calls Fiber.yield() or until it terminates | |
2652 * via an unhandled exception. | |
2653 * | |
2654 * Params: | |
2655 * rethrow = Rethrow any unhandled exception which may have caused this | |
2656 * fiber to terminate. | |
2657 * | |
2658 * In: | |
2659 * This fiber must be in state HOLD. | |
2660 * | |
2661 * Throws: | |
2662 * Any exception not handled by the joined thread. | |
2663 * | |
2664 * Returns: | |
2665 * Any exception not handled by this fiber if rethrow = false, null | |
2666 * otherwise. | |
2667 */ | |
2668 final Object call( bool rethrow = true ) | |
2669 in | |
2670 { | |
2671 assert( m_state == State.HOLD ); | |
2672 } | |
2673 body | |
2674 { | |
2675 Fiber cur = getThis(); | |
2676 | |
2677 static if( is( ucontext_t ) ) | |
2678 m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt; | |
2679 | |
2680 setThis( this ); | |
2681 this.switchIn(); | |
2682 setThis( cur ); | |
2683 | |
2684 static if( is( ucontext_t ) ) | |
2685 m_ucur = null; | |
2686 | |
2687 // NOTE: If the fiber has terminated then the stack pointers must be | |
2688 // reset. This ensures that the stack for this fiber is not | |
2689 // scanned if the fiber has terminated. This is necessary to | |
2690 // prevent any references lingering on the stack from delaying | |
2691 // the collection of otherwise dead objects. The most notable | |
2692 // being the current object, which is referenced at the top of | |
2693 // fiber_entryPoint. | |
2694 if( m_state == State.TERM ) | |
2695 { | |
2696 m_ctxt.tstack = m_ctxt.bstack; | |
2697 } | |
2698 if( m_unhandled ) | |
2699 { | |
2700 Object obj = m_unhandled; | |
2701 m_unhandled = null; | |
2702 if( rethrow ) | |
2703 throw obj; | |
2704 return obj; | |
2705 } | |
2706 return null; | |
2707 } | |
2708 | |
2709 | |
2710 /** | |
2711 * Resets this fiber so that it may be re-used. This routine may only be | |
2712 * called for fibers that have terminated, as doing otherwise could result | |
2713 * in scope-dependent functionality that is not executed. Stack-based | |
2714 * classes, for example, may not be cleaned up properly if a fiber is reset | |
2715 * before it has terminated. | |
2716 * | |
2717 * In: | |
2718 * This fiber must be in state TERM. | |
2719 */ | |
2720 final void reset() | |
2721 in | |
2722 { | |
2723 assert( m_state == State.TERM ); | |
2724 assert( m_ctxt.tstack == m_ctxt.bstack ); | |
2725 } | |
2726 body | |
2727 { | |
2728 m_state = State.HOLD; | |
2729 initStack(); | |
2730 m_unhandled = null; | |
2731 } | |
2732 | |
2733 | |
2734 /////////////////////////////////////////////////////////////////////////// | |
2735 // General Properties | |
2736 /////////////////////////////////////////////////////////////////////////// | |
2737 | |
2738 | |
2739 /** | |
2740 * A fiber may occupy one of three states: HOLD, EXEC, and TERM. The HOLD | |
2741 * state applies to any fiber that is suspended and ready to be called. | |
2742 * The EXEC state will be set for any fiber that is currently executing. | |
2743 * And the TERM state is set when a fiber terminates. Once a fiber | |
2744 * terminates, it must be reset before it may be called again. | |
2745 */ | |
2746 enum State | |
2747 { | |
2748 HOLD, /// | |
2749 EXEC, /// | |
2750 TERM /// | |
2751 } | |
2752 | |
2753 | |
2754 /** | |
2755 * Gets the current state of this fiber. | |
2756 * | |
2757 * Returns: | |
2758 * The state of this fiber as an enumerated value. | |
2759 */ | |
2760 final State state() | |
2761 { | |
2762 return m_state; | |
2763 } | |
2764 | |
2765 | |
2766 /////////////////////////////////////////////////////////////////////////// | |
2767 // Actions on Calling Fiber | |
2768 /////////////////////////////////////////////////////////////////////////// | |
2769 | |
2770 | |
2771 /** | |
2772 * Forces a context switch to occur away from the calling fiber. | |
2773 */ | |
2774 static void yield() | |
2775 { | |
2776 Fiber cur = getThis(); | |
2777 assert( cur, "Fiber.yield() called with no active fiber" ); | |
2778 assert( cur.m_state == State.EXEC ); | |
2779 | |
2780 static if( is( ucontext_t ) ) | |
2781 cur.m_ucur = &cur.m_utxt; | |
2782 | |
2783 cur.m_state = State.HOLD; | |
2784 cur.switchOut(); | |
2785 cur.m_state = State.EXEC; | |
2786 } | |
2787 | |
2788 | |
2789 /** | |
2790 * Forces a context switch to occur away from the calling fiber and then | |
2791 * throws obj in the calling fiber. | |
2792 * | |
2793 * Params: | |
2794 * obj = The object to throw. | |
2795 * | |
2796 * In: | |
2797 * obj must not be null. | |
2798 */ | |
2799 static void yieldAndThrow( Object obj ) | |
2800 in | |
2801 { | |
2802 assert( obj ); | |
2803 } | |
2804 body | |
2805 { | |
2806 Fiber cur = getThis(); | |
2807 assert( cur, "Fiber.yield() called with no active fiber" ); | |
2808 assert( cur.m_state == State.EXEC ); | |
2809 | |
2810 static if( is( ucontext_t ) ) | |
2811 cur.m_ucur = &cur.m_utxt; | |
2812 | |
2813 cur.m_unhandled = obj; | |
2814 cur.m_state = State.HOLD; | |
2815 cur.switchOut(); | |
2816 cur.m_state = State.EXEC; | |
2817 } | |
2818 | |
2819 | |
2820 /////////////////////////////////////////////////////////////////////////// | |
2821 // Fiber Accessors | |
2822 /////////////////////////////////////////////////////////////////////////// | |
2823 | |
2824 | |
2825 /** | |
2826 * Provides a reference to the calling fiber or null if no fiber is | |
2827 * currently active. | |
2828 * | |
2829 * Returns: | |
2830 * The fiber object representing the calling fiber or null if no fiber | |
2831 * is currently active. The result of deleting this object is undefined. | |
2832 */ | |
2833 static Fiber getThis() | |
2834 { | |
2835 version( Win32 ) | |
2836 { | |
2837 return cast(Fiber) TlsGetValue( sm_this ); | |
2838 } | |
2839 else version( Posix ) | |
2840 { | |
2841 return cast(Fiber) pthread_getspecific( sm_this ); | |
2842 } | |
2843 } | |
2844 | |
2845 | |
2846 /////////////////////////////////////////////////////////////////////////// | |
2847 // Static Initialization | |
2848 /////////////////////////////////////////////////////////////////////////// | |
2849 | |
2850 | |
2851 static this() | |
2852 { | |
2853 version( Win32 ) | |
2854 { | |
2855 sm_this = TlsAlloc(); | |
2856 assert( sm_this != TLS_OUT_OF_INDEXES ); | |
2857 } | |
2858 else version( Posix ) | |
2859 { | |
2860 int status; | |
2861 | |
2862 status = pthread_key_create( &sm_this, null ); | |
2863 assert( status == 0 ); | |
2864 | |
2865 static if( is( ucontext_t ) ) | |
2866 { | |
2867 status = getcontext( &sm_utxt ); | |
2868 assert( status == 0 ); | |
2869 } | |
2870 } | |
2871 } | |
2872 | |
2873 | |
2874 private: | |
2875 // | |
2876 // Initializes a fiber object which has no associated executable function. | |
2877 // | |
2878 this() | |
2879 { | |
2880 m_call = Call.NO; | |
2881 } | |
2882 | |
2883 | |
2884 // | |
2885 // Fiber entry point. Invokes the function or delegate passed on | |
2886 // construction (if any). | |
2887 // | |
2888 final void run() | |
2889 { | |
2890 switch( m_call ) | |
2891 { | |
2892 case Call.FN: | |
2893 m_fn(); | |
2894 break; | |
2895 case Call.DG: | |
2896 m_dg(); | |
2897 break; | |
2898 default: | |
2899 break; | |
2900 } | |
2901 } | |
2902 | |
2903 | |
2904 private: | |
2905 // | |
2906 // The type of routine passed on fiber construction. | |
2907 // | |
2908 enum Call | |
2909 { | |
2910 NO, | |
2911 FN, | |
2912 DG | |
2913 } | |
2914 | |
2915 | |
2916 // | |
2917 // Standard fiber data | |
2918 // | |
2919 Call m_call; | |
2920 union | |
2921 { | |
2922 void function() m_fn; | |
2923 void delegate() m_dg; | |
2924 } | |
2925 bool m_isRunning; | |
2926 Object m_unhandled; | |
2927 State m_state; | |
2928 | |
2929 | |
2930 private: | |
2931 /////////////////////////////////////////////////////////////////////////// | |
2932 // Stack Management | |
2933 /////////////////////////////////////////////////////////////////////////// | |
2934 | |
2935 | |
2936 // | |
2937 // Allocate a new stack for this fiber. | |
2938 // | |
2939 final void allocStack( size_t sz ) | |
2940 in | |
2941 { | |
2942 assert( !m_pmem && !m_ctxt ); | |
2943 } | |
2944 body | |
2945 { | |
2946 // adjust alloc size to a multiple of PAGESIZE | |
2947 sz += PAGESIZE - 1; | |
2948 sz -= sz % PAGESIZE; | |
2949 | |
2950 // NOTE: This instance of Thread.Context is dynamic so Fiber objects | |
2951 // can be collected by the GC so long as no user level references | |
2952 // to the object exist. If m_ctxt were not dynamic then its | |
2953 // presence in the global context list would be enough to keep | |
2954 // this object alive indefinitely. An alternative to allocating | |
2955 // room for this struct explicitly would be to mash it into the | |
2956 // base of the stack being allocated below. However, doing so | |
2957 // requires too much special logic to be worthwhile. | |
2958 m_ctxt = new Thread.Context; | |
2959 | |
2960 static if( is( typeof( VirtualAlloc ) ) ) | |
2961 { | |
2962 // reserve memory for stack | |
2963 m_pmem = VirtualAlloc( null, | |
2964 sz + PAGESIZE, | |
2965 MEM_RESERVE, | |
2966 PAGE_NOACCESS ); | |
2967 if( !m_pmem ) | |
2968 { | |
2969 throw new FiberException( "Unable to reserve memory for stack" ); | |
2970 } | |
2971 | |
2972 version( StackGrowsDown ) | |
2973 { | |
2974 void* stack = m_pmem + PAGESIZE; | |
2975 void* guard = m_pmem; | |
2976 void* pbase = stack + sz; | |
2977 } | |
2978 else | |
2979 { | |
2980 void* stack = m_pmem; | |
2981 void* guard = m_pmem + sz; | |
2982 void* pbase = stack; | |
2983 } | |
2984 | |
2985 // allocate reserved stack segment | |
2986 stack = VirtualAlloc( stack, | |
2987 sz, | |
2988 MEM_COMMIT, | |
2989 PAGE_READWRITE ); | |
2990 if( !stack ) | |
2991 { | |
2992 throw new FiberException( "Unable to allocate memory for stack" ); | |
2993 } | |
2994 | |
2995 // allocate reserved guard page | |
2996 guard = VirtualAlloc( guard, | |
2997 PAGESIZE, | |
2998 MEM_COMMIT, | |
2999 PAGE_READWRITE | PAGE_GUARD ); | |
3000 if( !guard ) | |
3001 { | |
3002 throw new FiberException( "Unable to create guard page for stack" ); | |
3003 } | |
3004 | |
3005 m_ctxt.bstack = pbase; | |
3006 m_ctxt.tstack = pbase; | |
3007 m_size = sz; | |
3008 } | |
3009 else | |
3010 { static if( is( typeof( mmap ) ) ) | |
3011 { | |
3012 m_pmem = mmap( null, | |
3013 sz, | |
3014 PROT_READ | PROT_WRITE, | |
3015 MAP_PRIVATE | MAP_ANON, | |
3016 -1, | |
3017 0 ); | |
3018 if( m_pmem == MAP_FAILED ) | |
3019 m_pmem = null; | |
3020 } | |
3021 else static if( is( typeof( valloc ) ) ) | |
3022 { | |
3023 m_pmem = valloc( sz ); | |
3024 } | |
3025 else static if( is( typeof( malloc ) ) ) | |
3026 { | |
3027 m_pmem = malloc( sz ); | |
3028 } | |
3029 else | |
3030 { | |
3031 m_pmem = null; | |
3032 } | |
3033 | |
3034 if( !m_pmem ) | |
3035 { | |
3036 throw new FiberException( "Unable to allocate memory for stack" ); | |
3037 } | |
3038 | |
3039 version( StackGrowsDown ) | |
3040 { | |
3041 m_ctxt.bstack = m_pmem + sz; | |
3042 m_ctxt.tstack = m_pmem + sz; | |
3043 } | |
3044 else | |
3045 { | |
3046 m_ctxt.bstack = m_pmem; | |
3047 m_ctxt.tstack = m_pmem; | |
3048 } | |
3049 m_size = sz; | |
3050 } | |
3051 | |
3052 Thread.add( m_ctxt ); | |
3053 } | |
3054 | |
3055 | |
3056 // | |
3057 // Free this fiber's stack. | |
3058 // | |
3059 final void freeStack() | |
3060 in | |
3061 { | |
3062 assert( m_pmem && m_ctxt ); | |
3063 } | |
3064 body | |
3065 { | |
3066 // NOTE: Since this routine is only ever expected to be called from | |
3067 // the dtor, pointers to freed data are not set to null. | |
3068 | |
3069 // NOTE: m_ctxt is guaranteed to be alive because it is held in the | |
3070 // global context list. | |
3071 Thread.remove( m_ctxt ); | |
3072 | |
3073 static if( is( typeof( VirtualAlloc ) ) ) | |
3074 { | |
3075 VirtualFree( m_pmem, 0, MEM_RELEASE ); | |
3076 } | |
3077 else static if( is( typeof( mmap ) ) ) | |
3078 { | |
3079 munmap( m_pmem, m_size ); | |
3080 } | |
3081 else static if( is( typeof( valloc ) ) ) | |
3082 { | |
3083 free( m_pmem ); | |
3084 } | |
3085 else static if( is( typeof( malloc ) ) ) | |
3086 { | |
3087 free( m_pmem ); | |
3088 } | |
3089 delete m_ctxt; | |
3090 } | |
3091 | |
3092 | |
3093 // | |
3094 // Initialize the allocated stack. | |
3095 // | |
3096 final void initStack() | |
3097 in | |
3098 { | |
3099 assert( m_ctxt.tstack && m_ctxt.tstack == m_ctxt.bstack ); | |
3100 assert( cast(size_t) m_ctxt.bstack % (void*).sizeof == 0 ); | |
3101 } | |
3102 body | |
3103 { | |
3104 void* pstack = m_ctxt.tstack; | |
3105 scope( exit ) m_ctxt.tstack = pstack; | |
3106 | |
3107 void push( size_t val ) | |
3108 { | |
3109 version( StackGrowsDown ) | |
3110 { | |
3111 pstack -= size_t.sizeof; | |
3112 *(cast(size_t*) pstack) = val; | |
3113 } | |
3114 else | |
3115 { | |
3116 pstack += size_t.sizeof; | |
3117 *(cast(size_t*) pstack) = val; | |
3118 } | |
3119 } | |
3120 | |
3121 // NOTE: On OS X the stack must be 16-byte aligned according to the | |
3122 // IA-32 call spec. | |
3123 version( darwin ) | |
3124 { | |
3125 pstack = cast(void*)(cast(uint)(pstack) - (cast(uint)(pstack) & 0x0F)); | |
3126 } | |
3127 | |
3128 version( AsmX86_Win32 ) | |
3129 { | |
3130 push( cast(size_t) &fiber_entryPoint ); // EIP | |
3131 push( 0xFFFFFFFF ); // EBP | |
3132 push( 0x00000000 ); // EAX | |
3133 push( 0xFFFFFFFF ); // FS:[0] | |
3134 version( StackGrowsDown ) | |
3135 { | |
3136 push( cast(size_t) m_ctxt.bstack ); // FS:[4] | |
3137 push( cast(size_t) m_ctxt.bstack - m_size ); // FS:[8] | |
3138 } | |
3139 else | |
3140 { | |
3141 push( cast(size_t) m_ctxt.bstack ); // FS:[4] | |
3142 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8] | |
3143 } | |
3144 push( 0x00000000 ); // EBX | |
3145 push( 0x00000000 ); // ESI | |
3146 push( 0x00000000 ); // EDI | |
3147 } | |
3148 else version( AsmX86_Posix ) | |
3149 { | |
3150 push( cast(size_t) &fiber_entryPoint ); // EIP | |
3151 push( 0x00000000 ); // EBP | |
3152 push( 0x00000000 ); // EAX | |
3153 push( 0x00000000 ); // EBX | |
3154 push( 0x00000000 ); // ESI | |
3155 push( 0x00000000 ); // EDI | |
3156 } | |
3157 else version( LLVM_AsmX86_Posix ) | |
3158 { | |
3159 push( cast(size_t) &fiber_entryPoint ); // EIP | |
3160 push( 0x00000000 ); // newp | |
3161 push( 0x00000000 ); // oldp | |
3162 push( 0x00000000 ); // EBP | |
3163 push( 0x00000000 ); // EBX | |
3164 push( 0x00000000 ); // ESI | |
3165 push( 0x00000000 ); // EDI | |
3166 } | |
3167 //TODO: Implement x86-64 fibers | |
3168 /+ | |
3169 else version( LLVM_AsmX86_Posix ) | |
3170 { | |
3171 } | |
3172 +/ | |
3173 else version( AsmPPC_Posix ) | |
3174 { | |
3175 version( StackGrowsDown ) | |
3176 { | |
3177 pstack -= int.sizeof * 5; | |
3178 } | |
3179 else | |
3180 { | |
3181 pstack += int.sizeof * 5; | |
3182 } | |
3183 | |
3184 push( cast(size_t) &fiber_entryPoint ); // link register | |
3185 push( 0x00000000 ); // control register | |
3186 push( 0x00000000 ); // old stack pointer | |
3187 | |
3188 // GPR values | |
3189 version( StackGrowsDown ) | |
3190 { | |
3191 pstack -= int.sizeof * 20; | |
3192 } | |
3193 else | |
3194 { | |
3195 pstack += int.sizeof * 20; | |
3196 } | |
3197 | |
3198 assert( cast(uint) pstack & 0x0f == 0 ); | |
3199 } | |
3200 else static if( is( ucontext_t ) ) | |
3201 { | |
3202 getcontext( &m_utxt ); | |
3203 m_utxt.uc_stack.ss_sp = m_ctxt.bstack; | |
3204 m_utxt.uc_stack.ss_size = m_size; | |
3205 makecontext( &m_utxt, &fiber_entryPoint, 0 ); | |
3206 // NOTE: If ucontext is being used then the top of the stack will | |
3207 // be a pointer to the ucontext_t struct for that fiber. | |
3208 push( cast(size_t) &m_utxt ); | |
3209 } | |
3210 } | |
3211 | |
3212 | |
3213 Thread.Context* m_ctxt; | |
3214 size_t m_size; | |
3215 void* m_pmem; | |
3216 | |
3217 static if( is( ucontext_t ) ) | |
3218 { | |
3219 // NOTE: The static ucontext instance is used to represent the context | |
3220 // of the main application thread. | |
3221 static ucontext_t sm_utxt = void; | |
3222 ucontext_t m_utxt = void; | |
3223 ucontext_t* m_ucur = null; | |
3224 } | |
3225 | |
3226 | |
3227 private: | |
3228 /////////////////////////////////////////////////////////////////////////// | |
3229 // Storage of Active Fiber | |
3230 /////////////////////////////////////////////////////////////////////////// | |
3231 | |
3232 | |
3233 // | |
3234 // Sets a thread-local reference to the current fiber object. | |
3235 // | |
3236 static void setThis( Fiber f ) | |
3237 { | |
3238 version( Win32 ) | |
3239 { | |
3240 TlsSetValue( sm_this, cast(void*) f ); | |
3241 } | |
3242 else version( Posix ) | |
3243 { | |
3244 pthread_setspecific( sm_this, cast(void*) f ); | |
3245 } | |
3246 } | |
3247 | |
3248 | |
3249 static Thread.TLSKey sm_this; | |
3250 | |
3251 | |
3252 private: | |
3253 /////////////////////////////////////////////////////////////////////////// | |
3254 // Context Switching | |
3255 /////////////////////////////////////////////////////////////////////////// | |
3256 | |
3257 | |
3258 // | |
3259 // Switches into the stack held by this fiber. | |
3260 // | |
3261 final void switchIn() | |
3262 { | |
3263 Thread tobj = Thread.getThis(); | |
3264 void** oldp = &tobj.m_curr.tstack; | |
3265 void* newp = m_ctxt.tstack; | |
3266 | |
3267 // NOTE: The order of operations here is very important. The current | |
3268 // stack top must be stored before m_lock is set, and pushContext | |
3269 // must not be called until after m_lock is set. This process | |
3270 // is intended to prevent a race condition with the suspend | |
3271 // mechanism used for garbage collection. If it is not followed, | |
3272 // a badly timed collection could cause the GC to scan from the | |
3273 // bottom of one stack to the top of another, or to miss scanning | |
3274 // a stack that still contains valid data. The old stack pointer | |
3275 // oldp will be set again before the context switch to guarantee | |
3276 // that it points to exactly the correct stack location so the | |
3277 // successive pop operations will succeed. | |
3278 *oldp = getStackTop(); | |
3279 volatile tobj.m_lock = true; | |
3280 tobj.pushContext( m_ctxt ); | |
3281 | |
3282 fiber_switchContext( oldp, newp ); | |
3283 | |
3284 // NOTE: As above, these operations must be performed in a strict order | |
3285 // to prevent Bad Things from happening. | |
3286 tobj.popContext(); | |
3287 volatile tobj.m_lock = false; | |
3288 tobj.m_curr.tstack = tobj.m_curr.bstack; | |
3289 } | |
3290 | |
3291 | |
3292 // | |
3293 // Switches out of the current stack and into the enclosing stack. | |
3294 // | |
3295 final void switchOut() | |
3296 { | |
3297 Thread tobj = Thread.getThis(); | |
3298 void** oldp = &m_ctxt.tstack; | |
3299 void* newp = tobj.m_curr.within.tstack; | |
3300 | |
3301 // NOTE: The order of operations here is very important. The current | |
3302 // stack top must be stored before m_lock is set, and pushContext | |
3303 // must not be called until after m_lock is set. This process | |
3304 // is intended to prevent a race condition with the suspend | |
3305 // mechanism used for garbage collection. If it is not followed, | |
3306 // a badly timed collection could cause the GC to scan from the | |
3307 // bottom of one stack to the top of another, or to miss scanning | |
3308 // a stack that still contains valid data. The old stack pointer | |
3309 // oldp will be set again before the context switch to guarantee | |
3310 // that it points to exactly the correct stack location so the | |
3311 // successive pop operations will succeed. | |
3312 *oldp = getStackTop(); | |
3313 volatile tobj.m_lock = true; | |
3314 | |
3315 fiber_switchContext( oldp, newp ); | |
3316 | |
3317 // NOTE: As above, these operations must be performed in a strict order | |
3318 // to prevent Bad Things from happening. | |
3319 volatile tobj.m_lock = false; | |
3320 tobj.m_curr.tstack = tobj.m_curr.bstack; | |
3321 } | |
3322 } |