Mercurial > projects > ldc
annotate tango/lib/common/tango/core/Thread.d @ 237:a168a2c3ea48 trunk
[svn r253] Removed -inlineasm option. inline asm is now enabled by default unless the new -noasm option is passed.
Tried adding a stack trace print when compiler crashes, not sure it's working though.
Changed data layouts to match that of llvm-gcc.
Fixed casting function pointers.
Added support checks in AsmStatement.
author | lindquist |
---|---|
date | Sun, 08 Jun 2008 19:09:24 +0200 |
parents | 1700239cab2e |
children | 4d006f7b2ada |
rev | line source |
---|---|
132 | 1 /** |
2 * The thread module provides support for thread creation and management. | |
3 * | |
4 * Copyright: Copyright (C) 2005-2006 Sean Kelly. All rights reserved. | |
5 * License: BSD style: $(LICENSE) | |
6 * Authors: Sean Kelly | |
7 */ | |
8 module tango.core.Thread; | |
9 | |
10 | |
11 // this should be true for most architectures | |
12 version = StackGrowsDown; | |
13 | |
14 | |
15 public | |
16 { | |
17 // import tango.core.TimeSpan; | |
18 } | |
19 private | |
20 { | |
21 import tango.core.Exception; | |
22 | |
23 | |
24 // | |
25 // exposed by compiler runtime | |
26 // | |
27 extern (C) void* rt_stackBottom(); | |
28 extern (C) void* rt_stackTop(); | |
29 | |
30 | |
31 void* getStackBottom() | |
32 { | |
33 return rt_stackBottom(); | |
34 } | |
35 | |
36 | |
37 void* getStackTop() | |
38 { | |
39 version( D_InlineAsm_X86 ) | |
40 { | |
41 asm | |
42 { | |
43 naked; | |
44 mov EAX, ESP; | |
45 ret; | |
46 } | |
47 } | |
48 else | |
49 { | |
50 return rt_stackTop(); | |
51 } | |
52 } | |
53 } | |
54 | |
55 | |
56 //////////////////////////////////////////////////////////////////////////////// | |
57 // Thread Entry Point and Signal Handlers | |
58 //////////////////////////////////////////////////////////////////////////////// | |
59 | |
60 | |
61 version( Win32 ) | |
62 { | |
63 private | |
64 { | |
65 import tango.stdc.stdint : uintptr_t; // for _beginthreadex decl below | |
66 import tango.sys.win32.UserGdi; | |
67 | |
68 const DWORD TLS_OUT_OF_INDEXES = 0xFFFFFFFF; | |
69 | |
70 // | |
71 // avoid multiple imports via tango.sys.windows.process | |
72 // | |
73 extern (Windows) alias uint function(void*) btex_fptr; | |
74 extern (C) uintptr_t _beginthreadex(void*, uint, btex_fptr, void*, uint, uint*); | |
75 | |
76 | |
77 // | |
78 // entry point for Windows threads | |
79 // | |
80 extern (Windows) uint thread_entryPoint( void* arg ) | |
81 { | |
82 Thread obj = cast(Thread) arg; | |
83 assert( obj ); | |
84 scope( exit ) Thread.remove( obj ); | |
85 | |
86 assert( obj.m_curr is &obj.m_main ); | |
87 obj.m_main.bstack = getStackBottom(); | |
88 obj.m_main.tstack = obj.m_main.bstack; | |
89 Thread.add( &obj.m_main ); | |
90 Thread.setThis( obj ); | |
91 | |
92 // NOTE: No GC allocations may occur until the stack pointers have | |
93 // been set and Thread.getThis returns a valid reference to | |
94 // this thread object (this latter condition is not strictly | |
95 // necessary on Win32 but it should be followed for the sake | |
96 // of consistency). | |
97 | |
98 // TODO: Consider putting an auto exception object here (using | |
99 // alloca) forOutOfMemoryError plus something to track | |
100 // whether an exception is in-flight? | |
101 | |
102 try | |
103 { | |
104 obj.run(); | |
105 } | |
106 catch( Object o ) | |
107 { | |
108 obj.m_unhandled = o; | |
109 } | |
110 return 0; | |
111 } | |
112 | |
113 | |
114 // | |
115 // copy of the same-named function in phobos.std.thread--it uses the | |
116 // Windows naming convention to be consistent with GetCurrentThreadId | |
117 // | |
118 HANDLE GetCurrentThreadHandle() | |
119 { | |
120 const uint DUPLICATE_SAME_ACCESS = 0x00000002; | |
121 | |
122 HANDLE curr = GetCurrentThread(), | |
123 proc = GetCurrentProcess(), | |
124 hndl; | |
125 | |
126 DuplicateHandle( proc, curr, proc, &hndl, 0, TRUE, DUPLICATE_SAME_ACCESS ); | |
127 return hndl; | |
128 } | |
129 } | |
130 } | |
131 else version( Posix ) | |
132 { | |
133 private | |
134 { | |
135 import tango.stdc.posix.semaphore; | |
136 import tango.stdc.posix.pthread; | |
137 import tango.stdc.posix.signal; | |
138 import tango.stdc.posix.time; | |
139 import tango.stdc.errno; | |
140 | |
141 extern (C) int getErrno(); | |
142 | |
143 version( GNU ) | |
144 { | |
145 import gcc.builtins; | |
146 } | |
147 | |
148 | |
149 // | |
150 // entry point for POSIX threads | |
151 // | |
152 extern (C) void* thread_entryPoint( void* arg ) | |
153 { | |
154 Thread obj = cast(Thread) arg; | |
155 assert( obj ); | |
156 scope( exit ) | |
157 { | |
158 // NOTE: isRunning should be set to false after the thread is | |
159 // removed or a double-removal could occur between this | |
160 // function and thread_suspendAll. | |
161 Thread.remove( obj ); | |
162 obj.m_isRunning = false; | |
163 } | |
164 | |
165 static extern (C) void thread_cleanupHandler( void* arg ) | |
166 { | |
167 Thread obj = cast(Thread) arg; | |
168 assert( obj ); | |
169 | |
170 // NOTE: If the thread terminated abnormally, just set it as | |
171 // not running and let thread_suspendAll remove it from | |
172 // the thread list. This is safer and is consistent | |
173 // with the Windows thread code. | |
174 obj.m_isRunning = false; | |
175 } | |
176 | |
177 // NOTE: Using void to skip the initialization here relies on | |
178 // knowledge of how pthread_cleanup is implemented. It may | |
179 // not be appropriate for all platforms. However, it does | |
180 // avoid the need to link the pthread module. If any | |
181 // implementation actually requires default initialization | |
182 // then pthread_cleanup should be restructured to maintain | |
183 // the current lack of a link dependency. | |
184 pthread_cleanup cleanup = void; | |
185 cleanup.push( &thread_cleanupHandler, cast(void*) obj ); | |
186 | |
187 // NOTE: For some reason this does not always work for threads. | |
188 //obj.m_main.bstack = getStackBottom(); | |
189 version( D_InlineAsm_X86 ) | |
190 { | |
191 static void* getBasePtr() | |
192 { | |
193 asm | |
194 { | |
195 naked; | |
196 mov EAX, EBP; | |
197 ret; | |
198 } | |
199 } | |
200 | |
201 obj.m_main.bstack = getBasePtr(); | |
202 } | |
203 else version( StackGrowsDown ) | |
204 obj.m_main.bstack = &obj + 1; | |
205 else | |
206 obj.m_main.bstack = &obj; | |
207 obj.m_main.tstack = obj.m_main.bstack; | |
208 assert( obj.m_curr == &obj.m_main ); | |
209 Thread.add( &obj.m_main ); | |
210 Thread.setThis( obj ); | |
211 | |
212 // NOTE: No GC allocations may occur until the stack pointers have | |
213 // been set and Thread.getThis returns a valid reference to | |
214 // this thread object (this latter condition is not strictly | |
215 // necessary on Win32 but it should be followed for the sake | |
216 // of consistency). | |
217 | |
218 // TODO: Consider putting an auto exception object here (using | |
219 // alloca) forOutOfMemoryError plus something to track | |
220 // whether an exception is in-flight? | |
221 | |
222 try | |
223 { | |
224 obj.run(); | |
225 } | |
226 catch( Object o ) | |
227 { | |
228 obj.m_unhandled = o; | |
229 } | |
230 return null; | |
231 } | |
232 | |
233 | |
234 // | |
235 // used to track the number of suspended threads | |
236 // | |
237 sem_t suspendCount; | |
238 | |
239 | |
240 extern (C) void thread_suspendHandler( int sig ) | |
241 in | |
242 { | |
243 assert( sig == SIGUSR1 ); | |
244 } | |
245 body | |
246 { | |
247 version( D_InlineAsm_X86 ) | |
248 { | |
249 asm | |
250 { | |
251 pushad; | |
252 } | |
253 } | |
254 else version( GNU ) | |
255 { | |
256 __builtin_unwind_init(); | |
257 } | |
258 else version( LLVMDC ) | |
259 { | |
260 // TODO below as well | |
261 } | |
262 else | |
263 { | |
264 static assert( false, "Architecture not supported." ); | |
265 } | |
266 | |
267 // NOTE: Since registers are being pushed and popped from the stack, | |
268 // any other stack data used by this function should be gone | |
269 // before the stack cleanup code is called below. | |
270 { | |
271 Thread obj = Thread.getThis(); | |
272 | |
273 // NOTE: The thread reference returned by getThis is set within | |
274 // the thread startup code, so it is possible that this | |
275 // handler may be called before the reference is set. In | |
276 // this case it is safe to simply suspend and not worry | |
277 // about the stack pointers as the thread will not have | |
278 // any references to GC-managed data. | |
279 if( obj && !obj.m_lock ) | |
280 { | |
281 obj.m_curr.tstack = getStackTop(); | |
282 } | |
283 | |
284 sigset_t sigres = void; | |
285 int status; | |
286 | |
287 status = sigfillset( &sigres ); | |
288 assert( status == 0 ); | |
289 | |
290 status = sigdelset( &sigres, SIGUSR2 ); | |
291 assert( status == 0 ); | |
292 | |
293 status = sem_post( &suspendCount ); | |
294 assert( status == 0 ); | |
295 | |
296 sigsuspend( &sigres ); | |
297 | |
298 if( obj && !obj.m_lock ) | |
299 { | |
300 obj.m_curr.tstack = obj.m_curr.bstack; | |
301 } | |
302 } | |
303 | |
304 version( D_InlineAsm_X86 ) | |
305 { | |
306 asm | |
307 { | |
308 popad; | |
309 } | |
310 } | |
311 else version( GNU ) | |
312 { | |
313 // registers will be popped automatically | |
314 } | |
315 else version( LLVMDC ) | |
316 { | |
317 // TODO | |
318 } | |
319 else | |
320 { | |
321 static assert( false, "Architecture not supported." ); | |
322 } | |
323 } | |
324 | |
325 | |
326 extern (C) void thread_resumeHandler( int sig ) | |
327 in | |
328 { | |
329 assert( sig == SIGUSR2 ); | |
330 } | |
331 body | |
332 { | |
333 | |
334 } | |
335 } | |
336 } | |
337 else | |
338 { | |
339 // NOTE: This is the only place threading versions are checked. If a new | |
340 // version is added, the module code will need to be searched for | |
341 // places where version-specific code may be required. This can be | |
342 // easily accomlished by searching for 'Windows' or 'Posix'. | |
343 static assert( false, "Unknown threading implementation." ); | |
344 } | |
345 | |
346 | |
347 //////////////////////////////////////////////////////////////////////////////// | |
348 // Thread | |
349 //////////////////////////////////////////////////////////////////////////////// | |
350 | |
351 | |
352 /** | |
353 * This class encapsulates all threading functionality for the D | |
354 * programming language. As thread manipulation is a required facility | |
355 * for garbage collection, all user threads should derive from this | |
356 * class, and instances of this class should never be explicitly deleted. | |
357 * A new thread may be created using either derivation or composition, as | |
358 * in the following example. | |
359 * | |
360 * Example: | |
361 * ----------------------------------------------------------------------------- | |
362 * | |
363 * class DerivedThread : Thread | |
364 * { | |
365 * this() | |
366 * { | |
367 * super( &run ); | |
368 * } | |
369 * | |
370 * private : | |
371 * void run() | |
372 * { | |
373 * printf( "Derived thread running.\n" ); | |
374 * } | |
375 * } | |
376 * | |
377 * void threadFunc() | |
378 * { | |
379 * printf( "Composed thread running.\n" ); | |
380 * } | |
381 * | |
382 * // create instances of each type | |
383 * Thread derived = new DerivedThread(); | |
384 * Thread composed = new Thread( &threadFunc ); | |
385 * | |
386 * // start both threads | |
387 * derived.start(); | |
388 * composed.start(); | |
389 * | |
390 * ----------------------------------------------------------------------------- | |
391 */ | |
392 class Thread | |
393 { | |
394 //////////////////////////////////////////////////////////////////////////// | |
395 // Initialization | |
396 //////////////////////////////////////////////////////////////////////////// | |
397 | |
398 | |
399 /** | |
400 * Initializes a thread object which is associated with a static | |
401 * D function. | |
402 * | |
403 * Params: | |
404 * fn = The thread function. | |
405 * sz = The stack size for this thread. | |
406 * | |
407 * In: | |
408 * fn must not be null. | |
409 */ | |
410 this( void function() fn, size_t sz = 0 ) | |
411 in | |
412 { | |
413 assert( fn ); | |
414 } | |
415 body | |
416 { | |
417 m_fn = fn; | |
418 m_sz = sz; | |
419 m_call = Call.FN; | |
420 m_curr = &m_main; | |
421 } | |
422 | |
423 | |
424 /** | |
425 * Initializes a thread object which is associated with a dynamic | |
426 * D function. | |
427 * | |
428 * Params: | |
429 * dg = The thread function. | |
430 * sz = The stack size for this thread. | |
431 * | |
432 * In: | |
433 * dg must not be null. | |
434 */ | |
435 this( void delegate() dg, size_t sz = 0 ) | |
436 in | |
437 { | |
438 assert( dg ); | |
439 } | |
440 body | |
441 { | |
442 m_dg = dg; | |
443 m_sz = sz; | |
444 m_call = Call.DG; | |
445 m_curr = &m_main; | |
446 } | |
447 | |
448 | |
449 /** | |
450 * Cleans up any remaining resources used by this object. | |
451 */ | |
452 ~this() | |
453 { | |
454 if( m_addr == m_addr.init ) | |
455 { | |
456 return; | |
457 } | |
458 | |
459 version( Win32 ) | |
460 { | |
461 m_addr = m_addr.init; | |
462 CloseHandle( m_hndl ); | |
463 m_hndl = m_hndl.init; | |
464 } | |
465 else version( Posix ) | |
466 { | |
467 pthread_detach( m_addr ); | |
468 m_addr = m_addr.init; | |
469 } | |
470 } | |
471 | |
472 | |
473 //////////////////////////////////////////////////////////////////////////// | |
474 // General Actions | |
475 //////////////////////////////////////////////////////////////////////////// | |
476 | |
477 | |
478 /** | |
479 * Starts the thread and invokes the function or delegate passed upon | |
480 * construction. | |
481 * | |
482 * In: | |
483 * This routine may only be called once per thread instance. | |
484 * | |
485 * Throws: | |
486 * ThreadException if the thread fails to start. | |
487 */ | |
488 final void start() | |
489 in | |
490 { | |
491 assert( !next && !prev ); | |
492 } | |
493 body | |
494 { | |
495 version( Win32 ) {} else | |
496 version( Posix ) | |
497 { | |
498 pthread_attr_t attr; | |
499 | |
500 if( pthread_attr_init( &attr ) ) | |
501 throw new ThreadException( "Error initializing thread attributes" ); | |
502 if( m_sz && pthread_attr_setstacksize( &attr, m_sz ) ) | |
503 throw new ThreadException( "Error initializing thread stack size" ); | |
504 } | |
505 | |
506 // NOTE: This operation needs to be synchronized to avoid a race | |
507 // condition with the GC. Without this lock, the thread | |
508 // could start and allocate memory before being added to | |
509 // the global thread list, preventing it from being scanned | |
510 // and causing memory to be collected that is still in use. | |
511 synchronized( slock ) | |
512 { | |
513 version( Win32 ) | |
514 { | |
515 m_hndl = cast(HANDLE) _beginthreadex( null, m_sz, &thread_entryPoint, cast(void*) this, 0, &m_addr ); | |
516 if( cast(size_t) m_hndl == 0 ) | |
517 throw new ThreadException( "Error creating thread" ); | |
518 } | |
519 else version( Posix ) | |
520 { | |
521 m_isRunning = true; | |
522 scope( failure ) m_isRunning = false; | |
523 | |
524 if( pthread_create( &m_addr, &attr, &thread_entryPoint, cast(void*) this ) != 0 ) | |
525 throw new ThreadException( "Error creating thread" ); | |
526 } | |
527 multiThreadedFlag = true; | |
528 add( this ); | |
529 } | |
530 } | |
531 | |
532 | |
533 /** | |
534 * Waits for this thread to complete. If the thread terminated as the | |
535 * result of an unhandled exception, this exception will be rethrown. | |
536 * | |
537 * Params: | |
538 * rethrow = Rethrow any unhandled exception which may have caused this | |
539 * thread to terminate. | |
540 * | |
541 * Throws: | |
542 * ThreadException if the operation fails. | |
543 * Any exception not handled by the joined thread. | |
544 */ | |
545 final void join( bool rethrow = true ) | |
546 { | |
547 version( Win32 ) | |
548 { | |
549 if( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 ) | |
550 throw new ThreadException( "Unable to join thread" ); | |
551 // NOTE: m_addr must be cleared before m_hndl is closed to avoid | |
552 // a race condition with isRunning. The operation is labeled | |
553 // volatile to prevent compiler reordering. | |
554 volatile m_addr = m_addr.init; | |
555 CloseHandle( m_hndl ); | |
556 m_hndl = m_hndl.init; | |
557 } | |
558 else version( Posix ) | |
559 { | |
560 if( pthread_join( m_addr, null ) != 0 ) | |
561 throw new ThreadException( "Unable to join thread" ); | |
562 // NOTE: pthread_join acts as a substitute for pthread_detach, | |
563 // which is normally called by the dtor. Setting m_addr | |
564 // to zero ensures that pthread_detach will not be called | |
565 // on object destruction. | |
566 volatile m_addr = m_addr.init; | |
567 } | |
568 if( rethrow && m_unhandled ) | |
569 { | |
570 throw m_unhandled; | |
571 } | |
572 } | |
573 | |
574 | |
575 //////////////////////////////////////////////////////////////////////////// | |
576 // General Properties | |
577 //////////////////////////////////////////////////////////////////////////// | |
578 | |
579 | |
580 /** | |
581 * Gets the user-readable label for this thread. | |
582 * | |
583 * Returns: | |
584 * The name of this thread. | |
585 */ | |
586 final char[] name() | |
587 { | |
588 synchronized( this ) | |
589 { | |
590 return m_name; | |
591 } | |
592 } | |
593 | |
594 | |
595 /** | |
596 * Sets the user-readable label for this thread. | |
597 * | |
598 * Params: | |
599 * val = The new name of this thread. | |
600 */ | |
601 final void name( char[] val ) | |
602 { | |
603 synchronized( this ) | |
604 { | |
605 m_name = val.dup; | |
606 } | |
607 } | |
608 | |
609 | |
610 /** | |
611 * Gets the daemon status for this thread. | |
612 * | |
613 * Returns: | |
614 * true if this is a daemon thread. | |
615 */ | |
616 final bool isDaemon() | |
617 { | |
618 synchronized( this ) | |
619 { | |
620 return m_isDaemon; | |
621 } | |
622 } | |
623 | |
624 | |
625 /** | |
626 * Sets the daemon status for this thread. | |
627 * | |
628 * Params: | |
629 * val = The new daemon status for this thread. | |
630 */ | |
631 final void isDaemon( bool val ) | |
632 { | |
633 synchronized( this ) | |
634 { | |
635 m_isDaemon = val; | |
636 } | |
637 } | |
638 | |
639 | |
640 /** | |
641 * Tests whether this thread is running. | |
642 * | |
643 * Returns: | |
644 * true if the thread is running, false if not. | |
645 */ | |
646 final bool isRunning() | |
647 { | |
648 if( m_addr == m_addr.init ) | |
649 { | |
650 return false; | |
651 } | |
652 | |
653 version( Win32 ) | |
654 { | |
655 uint ecode = 0; | |
656 GetExitCodeThread( m_hndl, &ecode ); | |
657 return ecode == STILL_ACTIVE; | |
658 } | |
659 else version( Posix ) | |
660 { | |
661 // NOTE: It should be safe to access this value without | |
662 // memory barriers because word-tearing and such | |
663 // really isn't an issue for boolean values. | |
664 return m_isRunning; | |
665 } | |
666 } | |
667 | |
668 | |
669 //////////////////////////////////////////////////////////////////////////// | |
670 // Thread Priority Actions | |
671 //////////////////////////////////////////////////////////////////////////// | |
672 | |
673 | |
674 /** | |
675 * The minimum scheduling priority that may be set for a thread. On | |
676 * systems where multiple scheduling policies are defined, this value | |
677 * represents the minimum valid priority for the scheduling policy of | |
678 * the process. | |
679 */ | |
680 static const int PRIORITY_MIN; | |
681 | |
682 | |
683 /** | |
684 * The maximum scheduling priority that may be set for a thread. On | |
685 * systems where multiple scheduling policies are defined, this value | |
686 * represents the minimum valid priority for the scheduling policy of | |
687 * the process. | |
688 */ | |
689 static const int PRIORITY_MAX; | |
690 | |
691 | |
692 /** | |
693 * Gets the scheduling priority for the associated thread. | |
694 * | |
695 * Returns: | |
696 * The scheduling priority of this thread. | |
697 */ | |
698 final int priority() | |
699 { | |
700 version( Win32 ) | |
701 { | |
702 return GetThreadPriority( m_hndl ); | |
703 } | |
704 else version( Posix ) | |
705 { | |
706 int policy; | |
707 sched_param param; | |
708 | |
709 if( pthread_getschedparam( m_addr, &policy, ¶m ) ) | |
710 throw new ThreadException( "Unable to get thread priority" ); | |
711 return param.sched_priority; | |
712 } | |
713 } | |
714 | |
715 | |
716 /** | |
717 * Sets the scheduling priority for the associated thread. | |
718 * | |
719 * Params: | |
720 * val = The new scheduling priority of this thread. | |
721 */ | |
722 final void priority( int val ) | |
723 { | |
724 version( Win32 ) | |
725 { | |
726 if( !SetThreadPriority( m_hndl, val ) ) | |
727 throw new ThreadException( "Unable to set thread priority" ); | |
728 } | |
729 else version( Posix ) | |
730 { | |
731 // NOTE: pthread_setschedprio is not implemented on linux, so use | |
732 // the more complicated get/set sequence below. | |
733 //if( pthread_setschedprio( m_addr, val ) ) | |
734 // throw new ThreadException( "Unable to set thread priority" ); | |
735 | |
736 int policy; | |
737 sched_param param; | |
738 | |
739 if( pthread_getschedparam( m_addr, &policy, ¶m ) ) | |
740 throw new ThreadException( "Unable to set thread priority" ); | |
741 param.sched_priority = val; | |
742 if( pthread_setschedparam( m_addr, policy, ¶m ) ) | |
743 throw new ThreadException( "Unable to set thread priority" ); | |
744 } | |
745 } | |
746 | |
747 | |
748 //////////////////////////////////////////////////////////////////////////// | |
749 // Actions on Calling Thread | |
750 //////////////////////////////////////////////////////////////////////////// | |
751 | |
752 | |
753 /** | |
754 * Suspends the calling thread for at least the supplied time, up to a | |
755 * maximum of (uint.max - 1) milliseconds. | |
756 * | |
757 * Params: | |
758 * period = The minimum duration the calling thread should be suspended, | |
759 * in seconds. Sub-second durations are specified as fractional | |
760 * values. | |
761 * | |
762 * In: | |
763 * period must be less than (uint.max - 1) milliseconds. | |
764 * | |
765 * Example: | |
766 * ------------------------------------------------------------------------- | |
767 * | |
768 * Thread.sleep( 0.05 ); // sleep for 50 milliseconds | |
769 * Thread.sleep( 5 ); // sleep for 5 seconds | |
770 * | |
771 * ------------------------------------------------------------------------- | |
772 */ | |
773 static void sleep( double period ) | |
774 in | |
775 { | |
776 // NOTE: The fractional value added to period is to correct fp error. | |
777 assert( period * 1000 + 0.1 < uint.max - 1 ); | |
778 } | |
779 body | |
780 { | |
781 version( Win32 ) | |
782 { | |
783 Sleep( cast(uint)( period * 1000 + 0.1 ) ); | |
784 } | |
785 else version( Posix ) | |
786 { | |
787 timespec tin = void; | |
788 timespec tout = void; | |
789 | |
790 period += 0.000_000_000_1; | |
791 | |
792 if( tin.tv_sec.max < period ) | |
793 { | |
794 tin.tv_sec = tin.tv_sec.max; | |
795 tin.tv_nsec = 0; | |
796 } | |
797 else | |
798 { | |
799 tin.tv_sec = cast(typeof(tin.tv_sec)) period; | |
800 tin.tv_nsec = cast(typeof(tin.tv_nsec)) ((period % 1.0) * 1_000_000_000); | |
801 } | |
802 | |
803 while( true ) | |
804 { | |
805 if( !nanosleep( &tin, &tout ) ) | |
806 return; | |
807 if( getErrno() != EINTR ) | |
808 throw new ThreadException( "Unable to sleep for specified duration" ); | |
809 tin = tout; | |
810 } | |
811 } | |
812 } | |
813 | |
814 | |
815 /+ | |
816 /** | |
817 * Suspends the calling thread for at least the supplied time, up to a | |
818 * maximum of (uint.max - 1) milliseconds. | |
819 * | |
820 * Params: | |
821 * period = The minimum duration the calling thread should be suspended. | |
822 * | |
823 * In: | |
824 * period must be less than (uint.max - 1) milliseconds. | |
825 * | |
826 * Example: | |
827 * ------------------------------------------------------------------------- | |
828 * | |
829 * Thread.sleep( TimeSpan.milliseconds( 50 ) ); // sleep for 50 milliseconds | |
830 * Thread.sleep( TimeSpan.seconds( 5 ) ); // sleep for 5 seconds | |
831 * | |
832 * ------------------------------------------------------------------------- | |
833 */ | |
834 static void sleep( TimeSpan period ) | |
835 in | |
836 { | |
837 assert( period.milliseconds < uint.max - 1 ); | |
838 } | |
839 body | |
840 { | |
841 version( Win32 ) | |
842 { | |
843 Sleep( cast(uint)( period.milliseconds ) ); | |
844 } | |
845 else version( Posix ) | |
846 { | |
847 timespec tin = void; | |
848 timespec tout = void; | |
849 | |
850 if( tin.tv_sec.max < period.seconds ) | |
851 { | |
852 tin.tv_sec = tin.tv_sec.max; | |
853 tin.tv_nsec = 0; | |
854 } | |
855 else | |
856 { | |
857 tin.tv_sec = cast(typeof(tin.tv_sec)) period.seconds; | |
858 tin.tv_nsec = cast(typeof(tin.tv_nsec)) period.nanoseconds % 1_000_000_000; | |
859 } | |
860 | |
861 while( true ) | |
862 { | |
863 if( !nanosleep( &tin, &tout ) ) | |
864 return; | |
865 if( getErrno() != EINTR ) | |
866 throw new ThreadException( "Unable to sleep for specified duration" ); | |
867 tin = tout; | |
868 } | |
869 } | |
870 } | |
871 | |
872 | |
873 /** | |
874 * Suspends the calling thread for at least the supplied time, up to a | |
875 * maximum of (uint.max - 1) milliseconds. | |
876 * | |
877 * Params: | |
878 * period = The minimum duration the calling thread should be suspended, | |
879 * in seconds. Sub-second durations are specified as fractional | |
880 * values. Please note that because period is a floating-point | |
881 * number, some accuracy may be lost for certain intervals. For | |
882 * this reason, the TimeSpan overload is preferred in instances | |
883 * where an exact interval is required. | |
884 * | |
885 * In: | |
886 * period must be less than (uint.max - 1) milliseconds. | |
887 * | |
888 * Example: | |
889 * ------------------------------------------------------------------------- | |
890 * | |
891 * Thread.sleep( 0.05 ); // sleep for 50 milliseconds | |
892 * Thread.sleep( 5 ); // sleep for 5 seconds | |
893 * | |
894 * ------------------------------------------------------------------------- | |
895 */ | |
896 static void sleep( double period ) | |
897 { | |
898 sleep( TimeSpan.interval( period ) ); | |
899 } | |
900 +/ | |
901 | |
902 | |
903 /** | |
904 * Forces a context switch to occur away from the calling thread. | |
905 */ | |
906 static void yield() | |
907 { | |
908 version( Win32 ) | |
909 { | |
910 // NOTE: Sleep(1) is necessary because Sleep(0) does not give | |
911 // lower priority threads any timeslice, so looping on | |
912 // Sleep(0) could be resource-intensive in some cases. | |
913 Sleep( 1 ); | |
914 } | |
915 else version( Posix ) | |
916 { | |
917 sched_yield(); | |
918 } | |
919 } | |
920 | |
921 | |
922 //////////////////////////////////////////////////////////////////////////// | |
923 // Thread Accessors | |
924 //////////////////////////////////////////////////////////////////////////// | |
925 | |
926 | |
927 /** | |
928 * Provides a reference to the calling thread. | |
929 * | |
930 * Returns: | |
931 * The thread object representing the calling thread. The result of | |
932 * deleting this object is undefined. | |
933 */ | |
934 static Thread getThis() | |
935 { | |
936 // NOTE: This function may not be called until thread_init has | |
937 // completed. See thread_suspendAll for more information | |
938 // on why this might occur. | |
939 version( Win32 ) | |
940 { | |
941 return cast(Thread) TlsGetValue( sm_this ); | |
942 } | |
943 else version( Posix ) | |
944 { | |
945 return cast(Thread) pthread_getspecific( sm_this ); | |
946 } | |
947 } | |
948 | |
949 | |
950 /** | |
951 * Provides a list of all threads currently being tracked by the system. | |
952 * | |
953 * Returns: | |
954 * An array containing references to all threads currently being | |
955 * tracked by the system. The result of deleting any contained | |
956 * objects is undefined. | |
957 */ | |
958 static Thread[] getAll() | |
959 { | |
960 synchronized( slock ) | |
961 { | |
962 size_t pos = 0; | |
963 Thread[] buf = new Thread[sm_tlen]; | |
964 | |
965 foreach( Thread t; Thread ) | |
966 { | |
967 buf[pos++] = t; | |
968 } | |
969 return buf; | |
970 } | |
971 } | |
972 | |
973 | |
974 /** | |
975 * Operates on all threads currently being tracked by the system. The | |
976 * result of deleting any Thread object is undefined. | |
977 * | |
978 * Params: | |
979 * | |
980 * dg = The supplied code as a delegate. | |
981 * | |
982 * Returns: | |
983 * Zero if all elemented are visited, nonzero if not. | |
984 */ | |
985 static int opApply( int delegate( inout Thread ) dg ) | |
986 { | |
987 synchronized( slock ) | |
988 { | |
989 int ret = 0; | |
990 | |
991 for( Thread t = sm_tbeg; t; t = t.next ) | |
992 { | |
993 ret = dg( t ); | |
994 if( ret ) | |
995 break; | |
996 } | |
997 return ret; | |
998 } | |
999 } | |
1000 | |
1001 | |
1002 //////////////////////////////////////////////////////////////////////////// | |
1003 // Local Storage Actions | |
1004 //////////////////////////////////////////////////////////////////////////// | |
1005 | |
1006 | |
1007 /** | |
1008 * Indicates the number of local storage pointers available at program | |
1009 * startup. It is recommended that this number be at least 64. | |
1010 */ | |
1011 static const uint LOCAL_MAX = 64; | |
1012 | |
1013 | |
1014 /** | |
1015 * Reserves a local storage pointer for use and initializes this | |
1016 * location to null for all running threads. | |
1017 * | |
1018 * Returns: | |
1019 * A key representing the array offset of this memory location. | |
1020 */ | |
1021 static uint createLocal() | |
1022 { | |
1023 synchronized( slock ) | |
1024 { | |
1025 foreach( uint key, inout bool set; sm_local ) | |
1026 { | |
1027 if( !set ) | |
1028 { | |
1029 //foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139) | |
1030 for( Thread t = sm_tbeg; t; t = t.next ) | |
1031 { | |
1032 t.m_local[key] = null; | |
1033 } | |
1034 set = true; | |
1035 return key; | |
1036 } | |
1037 } | |
1038 throw new ThreadException( "No more local storage slots available" ); | |
1039 } | |
1040 } | |
1041 | |
1042 | |
1043 /** | |
1044 * Marks the supplied key as available and sets the associated location | |
1045 * to null for all running threads. It is assumed that any key passed | |
1046 * to this function is valid. The result of calling this function for | |
1047 * a key which is still in use is undefined. | |
1048 * | |
1049 * Params: | |
1050 * key = The key to delete. | |
1051 */ | |
1052 static void deleteLocal( uint key ) | |
1053 { | |
1054 synchronized( slock ) | |
1055 { | |
1056 sm_local[key] = false; | |
1057 // foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139) | |
1058 for( Thread t = sm_tbeg; t; t = t.next ) | |
1059 { | |
1060 t.m_local[key] = null; | |
1061 } | |
1062 } | |
1063 } | |
1064 | |
1065 | |
1066 /** | |
1067 * Gets the data associated with the supplied key value. It is assumed | |
1068 * that any key passed to this function is valid. | |
1069 * | |
1070 * Params: | |
1071 * key = The location which holds the desired data. | |
1072 * | |
1073 * Returns: | |
1074 * The data associated with the supplied key. | |
1075 */ | |
1076 static void* getLocal( uint key ) | |
1077 { | |
1078 return getThis().m_local[key]; | |
1079 } | |
1080 | |
1081 | |
1082 /** | |
1083 * Stores the supplied value in the specified location. It is assumed | |
1084 * that any key passed to this function is valid. | |
1085 * | |
1086 * Params: | |
1087 * key = The location to store the supplied data. | |
1088 * val = The data to store. | |
1089 * | |
1090 * Returns: | |
1091 * A copy of the data which has just been stored. | |
1092 */ | |
1093 static void* setLocal( uint key, void* val ) | |
1094 { | |
1095 return getThis().m_local[key] = val; | |
1096 } | |
1097 | |
1098 | |
1099 //////////////////////////////////////////////////////////////////////////// | |
1100 // Static Initalizer | |
1101 //////////////////////////////////////////////////////////////////////////// | |
1102 | |
1103 | |
1104 /** | |
1105 * This initializer is used to set thread constants. All functional | |
1106 * initialization occurs within thread_init(). | |
1107 */ | |
1108 static this() | |
1109 { | |
1110 version( Win32 ) | |
1111 { | |
1112 PRIORITY_MIN = -15; | |
1113 PRIORITY_MAX = 15; | |
1114 } | |
1115 else version( Posix ) | |
1116 { | |
1117 int policy; | |
1118 sched_param param; | |
1119 pthread_t self = pthread_self(); | |
1120 | |
1121 int status = pthread_getschedparam( self, &policy, ¶m ); | |
1122 assert( status == 0 ); | |
1123 | |
1124 PRIORITY_MIN = sched_get_priority_min( policy ); | |
1125 assert( PRIORITY_MIN != -1 ); | |
1126 | |
1127 PRIORITY_MAX = sched_get_priority_max( policy ); | |
1128 assert( PRIORITY_MAX != -1 ); | |
1129 } | |
1130 } | |
1131 | |
1132 | |
1133 private: | |
1134 // | |
1135 // Initializes a thread object which has no associated executable function. | |
1136 // This is used for the main thread initialized in thread_init(). | |
1137 // | |
1138 this() | |
1139 { | |
1140 m_call = Call.NO; | |
1141 m_curr = &m_main; | |
1142 } | |
1143 | |
1144 | |
1145 // | |
1146 // Thread entry point. Invokes the function or delegate passed on | |
1147 // construction (if any). | |
1148 // | |
1149 final void run() | |
1150 { | |
1151 switch( m_call ) | |
1152 { | |
1153 case Call.FN: | |
1154 m_fn(); | |
1155 break; | |
1156 case Call.DG: | |
1157 m_dg(); | |
1158 break; | |
1159 default: | |
1160 break; | |
1161 } | |
1162 } | |
1163 | |
1164 | |
1165 private: | |
1166 // | |
1167 // The type of routine passed on thread construction. | |
1168 // | |
1169 enum Call | |
1170 { | |
1171 NO, | |
1172 FN, | |
1173 DG | |
1174 } | |
1175 | |
1176 | |
1177 // | |
1178 // Standard types | |
1179 // | |
1180 version( Win32 ) | |
1181 { | |
1182 alias uint TLSKey; | |
1183 alias uint ThreadAddr; | |
1184 } | |
1185 else version( Posix ) | |
1186 { | |
1187 alias pthread_key_t TLSKey; | |
1188 alias pthread_t ThreadAddr; | |
1189 } | |
1190 | |
1191 | |
1192 // | |
1193 // Local storage | |
1194 // | |
1195 static bool[LOCAL_MAX] sm_local; | |
1196 static TLSKey sm_this; | |
1197 | |
1198 void*[LOCAL_MAX] m_local; | |
1199 | |
1200 | |
1201 // | |
1202 // Standard thread data | |
1203 // | |
1204 version( Win32 ) | |
1205 { | |
1206 HANDLE m_hndl; | |
1207 } | |
1208 ThreadAddr m_addr; | |
1209 Call m_call; | |
1210 char[] m_name; | |
1211 union | |
1212 { | |
1213 void function() m_fn; | |
1214 void delegate() m_dg; | |
1215 } | |
1216 size_t m_sz; | |
1217 version( Posix ) | |
1218 { | |
1219 bool m_isRunning; | |
1220 } | |
1221 bool m_isDaemon; | |
1222 Object m_unhandled; | |
1223 | |
1224 | |
1225 private: | |
1226 //////////////////////////////////////////////////////////////////////////// | |
1227 // Storage of Active Thread | |
1228 //////////////////////////////////////////////////////////////////////////// | |
1229 | |
1230 | |
1231 // | |
1232 // Sets a thread-local reference to the current thread object. | |
1233 // | |
1234 static void setThis( Thread t ) | |
1235 { | |
1236 version( Win32 ) | |
1237 { | |
1238 TlsSetValue( sm_this, cast(void*) t ); | |
1239 } | |
1240 else version( Posix ) | |
1241 { | |
1242 pthread_setspecific( sm_this, cast(void*) t ); | |
1243 } | |
1244 } | |
1245 | |
1246 | |
1247 private: | |
1248 //////////////////////////////////////////////////////////////////////////// | |
1249 // Thread Context and GC Scanning Support | |
1250 //////////////////////////////////////////////////////////////////////////// | |
1251 | |
1252 | |
1253 final void pushContext( Context* c ) | |
1254 in | |
1255 { | |
1256 assert( !c.within ); | |
1257 } | |
1258 body | |
1259 { | |
1260 c.within = m_curr; | |
1261 m_curr = c; | |
1262 } | |
1263 | |
1264 | |
1265 final void popContext() | |
1266 in | |
1267 { | |
1268 assert( m_curr && m_curr.within ); | |
1269 } | |
1270 body | |
1271 { | |
1272 Context* c = m_curr; | |
1273 m_curr = c.within; | |
1274 c.within = null; | |
1275 } | |
1276 | |
1277 | |
1278 final Context* topContext() | |
1279 in | |
1280 { | |
1281 assert( m_curr ); | |
1282 } | |
1283 body | |
1284 { | |
1285 return m_curr; | |
1286 } | |
1287 | |
1288 | |
1289 static struct Context | |
1290 { | |
1291 void* bstack, | |
1292 tstack; | |
1293 Context* within; | |
1294 Context* next, | |
1295 prev; | |
1296 } | |
1297 | |
1298 | |
1299 Context m_main; | |
1300 Context* m_curr; | |
1301 bool m_lock; | |
1302 | |
1303 version( Win32 ) | |
1304 { | |
1305 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax | |
1306 } | |
1307 | |
1308 | |
1309 private: | |
1310 //////////////////////////////////////////////////////////////////////////// | |
1311 // GC Scanning Support | |
1312 //////////////////////////////////////////////////////////////////////////// | |
1313 | |
1314 | |
1315 // NOTE: The GC scanning process works like so: | |
1316 // | |
1317 // 1. Suspend all threads. | |
1318 // 2. Scan the stacks of all suspended threads for roots. | |
1319 // 3. Resume all threads. | |
1320 // | |
1321 // Step 1 and 3 require a list of all threads in the system, while | |
1322 // step 2 requires a list of all thread stacks (each represented by | |
1323 // a Context struct). Traditionally, there was one stack per thread | |
1324 // and the Context structs were not necessary. However, Fibers have | |
1325 // changed things so that each thread has its own 'main' stack plus | |
1326 // an arbitrary number of nested stacks (normally referenced via | |
1327 // m_curr). Also, there may be 'free-floating' stacks in the system, | |
1328 // which are Fibers that are not currently executing on any specific | |
1329 // thread but are still being processed and still contain valid | |
1330 // roots. | |
1331 // | |
1332 // To support all of this, the Context struct has been created to | |
1333 // represent a stack range, and a global list of Context structs has | |
1334 // been added to enable scanning of these stack ranges. The lifetime | |
1335 // (and presence in the Context list) of a thread's 'main' stack will | |
1336 // be equivalent to the thread's lifetime. So the Ccontext will be | |
1337 // added to the list on thread entry, and removed from the list on | |
1338 // thread exit (which is essentially the same as the presence of a | |
1339 // Thread object in its own global list). The lifetime of a Fiber's | |
1340 // context, however, will be tied to the lifetime of the Fiber object | |
1341 // itself, and Fibers are expected to add/remove their Context struct | |
1342 // on construction/deletion. | |
1343 | |
1344 | |
1345 // | |
1346 // All use of the global lists should synchronize on this lock. | |
1347 // | |
1348 static Object slock() | |
1349 { | |
1350 return Thread.classinfo; | |
1351 } | |
1352 | |
1353 | |
1354 static Context* sm_cbeg; | |
1355 static size_t sm_clen; | |
1356 | |
1357 static Thread sm_tbeg; | |
1358 static size_t sm_tlen; | |
1359 | |
1360 // | |
1361 // Used for ordering threads in the global thread list. | |
1362 // | |
1363 Thread prev; | |
1364 Thread next; | |
1365 | |
1366 | |
1367 //////////////////////////////////////////////////////////////////////////// | |
1368 // Global Context List Operations | |
1369 //////////////////////////////////////////////////////////////////////////// | |
1370 | |
1371 | |
1372 // | |
1373 // Add a context to the global context list. | |
1374 // | |
1375 static void add( Context* c ) | |
1376 in | |
1377 { | |
1378 assert( c ); | |
1379 assert( !c.next && !c.prev ); | |
1380 } | |
1381 body | |
1382 { | |
1383 synchronized( slock ) | |
1384 { | |
1385 if( sm_cbeg ) | |
1386 { | |
1387 c.next = sm_cbeg; | |
1388 sm_cbeg.prev = c; | |
1389 } | |
1390 sm_cbeg = c; | |
1391 ++sm_clen; | |
1392 } | |
1393 } | |
1394 | |
1395 | |
1396 // | |
1397 // Remove a context from the global context list. | |
1398 // | |
1399 static void remove( Context* c ) | |
1400 in | |
1401 { | |
1402 assert( c ); | |
1403 assert( c.next || c.prev ); | |
1404 } | |
1405 body | |
1406 { | |
1407 synchronized( slock ) | |
1408 { | |
1409 if( c.prev ) | |
1410 c.prev.next = c.next; | |
1411 if( c.next ) | |
1412 c.next.prev = c.prev; | |
1413 if( sm_cbeg == c ) | |
1414 sm_cbeg = c.next; | |
1415 --sm_clen; | |
1416 } | |
1417 // NOTE: Don't null out c.next or c.prev because opApply currently | |
1418 // follows c.next after removing a node. This could be easily | |
1419 // addressed by simply returning the next node from this function, | |
1420 // however, a context should never be re-added to the list anyway | |
1421 // and having next and prev be non-null is a good way to | |
1422 // ensure that. | |
1423 } | |
1424 | |
1425 | |
1426 //////////////////////////////////////////////////////////////////////////// | |
1427 // Global Thread List Operations | |
1428 //////////////////////////////////////////////////////////////////////////// | |
1429 | |
1430 | |
1431 // | |
1432 // Add a thread to the global thread list. | |
1433 // | |
1434 static void add( Thread t ) | |
1435 in | |
1436 { | |
1437 assert( t ); | |
1438 assert( !t.next && !t.prev ); | |
1439 assert( t.isRunning ); | |
1440 } | |
1441 body | |
1442 { | |
1443 synchronized( slock ) | |
1444 { | |
1445 if( sm_tbeg ) | |
1446 { | |
1447 t.next = sm_tbeg; | |
1448 sm_tbeg.prev = t; | |
1449 } | |
1450 sm_tbeg = t; | |
1451 ++sm_tlen; | |
1452 } | |
1453 } | |
1454 | |
1455 | |
1456 // | |
1457 // Remove a thread from the global thread list. | |
1458 // | |
1459 static void remove( Thread t ) | |
1460 in | |
1461 { | |
1462 assert( t ); | |
1463 assert( t.next || t.prev ); | |
1464 version( Win32 ) | |
1465 { | |
1466 // NOTE: This doesn't work for Posix as m_isRunning must be set to | |
1467 // false after the thread is removed during normal execution. | |
1468 assert( !t.isRunning ); | |
1469 } | |
1470 } | |
1471 body | |
1472 { | |
1473 synchronized( slock ) | |
1474 { | |
1475 // NOTE: When a thread is removed from the global thread list its | |
1476 // main context is invalid and should be removed as well. | |
1477 // It is possible that t.m_curr could reference more | |
1478 // than just the main context if the thread exited abnormally | |
1479 // (if it was terminated), but we must assume that the user | |
1480 // retains a reference to them and that they may be re-used | |
1481 // elsewhere. Therefore, it is the responsibility of any | |
1482 // object that creates contexts to clean them up properly | |
1483 // when it is done with them. | |
1484 remove( &t.m_main ); | |
1485 | |
1486 if( t.prev ) | |
1487 t.prev.next = t.next; | |
1488 if( t.next ) | |
1489 t.next.prev = t.prev; | |
1490 if( sm_tbeg == t ) | |
1491 sm_tbeg = t.next; | |
1492 --sm_tlen; | |
1493 } | |
1494 // NOTE: Don't null out t.next or t.prev because opApply currently | |
1495 // follows t.next after removing a node. This could be easily | |
1496 // addressed by simply returning the next node from this function, | |
1497 // however, a thread should never be re-added to the list anyway | |
1498 // and having next and prev be non-null is a good way to | |
1499 // ensure that. | |
1500 } | |
1501 } | |
1502 | |
1503 | |
1504 //////////////////////////////////////////////////////////////////////////////// | |
1505 // GC Support Routines | |
1506 //////////////////////////////////////////////////////////////////////////////// | |
1507 | |
1508 | |
1509 /** | |
1510 * Initializes the thread module. This function must be called by the | |
1511 * garbage collector on startup and before any other thread routines | |
1512 * are called. | |
1513 */ | |
1514 extern (C) void thread_init() | |
1515 { | |
1516 // NOTE: If thread_init itself performs any allocations then the thread | |
1517 // routines reserved for garbage collector use may be called while | |
1518 // thread_init is being processed. However, since no memory should | |
1519 // exist to be scanned at this point, it is sufficient for these | |
1520 // functions to detect the condition and return immediately. | |
1521 | |
1522 version( Win32 ) | |
1523 { | |
1524 Thread.sm_this = TlsAlloc(); | |
1525 assert( Thread.sm_this != TLS_OUT_OF_INDEXES ); | |
1526 } | |
1527 else version( Posix ) | |
1528 { | |
1529 int status; | |
1530 sigaction_t sigusr1 = void; | |
1531 sigaction_t sigusr2 = void; | |
1532 | |
1533 // This is a quick way to zero-initialize the structs without using | |
1534 // memset or creating a link dependency on their static initializer. | |
1535 (cast(byte*) &sigusr1)[0 .. sigaction_t.sizeof] = 0; | |
1536 (cast(byte*) &sigusr2)[0 .. sigaction_t.sizeof] = 0; | |
1537 | |
1538 // NOTE: SA_RESTART indicates that system calls should restart if they | |
1539 // are interrupted by a signal, but this is not available on all | |
1540 // Posix systems, even those that support multithreading. | |
1541 static if( is( typeof( SA_RESTART ) ) ) | |
1542 sigusr1.sa_flags = SA_RESTART; | |
1543 else | |
1544 sigusr1.sa_flags = 0; | |
1545 sigusr1.sa_handler = &thread_suspendHandler; | |
1546 // NOTE: We want to ignore all signals while in this handler, so fill | |
1547 // sa_mask to indicate this. | |
1548 status = sigfillset( &sigusr1.sa_mask ); | |
1549 assert( status == 0 ); | |
1550 | |
1551 // NOTE: Since SIGUSR2 should only be issued for threads within the | |
1552 // suspend handler, we don't want this signal to trigger a | |
1553 // restart. | |
1554 sigusr2.sa_flags = 0; | |
1555 sigusr2.sa_handler = &thread_resumeHandler; | |
1556 // NOTE: We want to ignore all signals while in this handler, so fill | |
1557 // sa_mask to indicate this. | |
1558 status = sigfillset( &sigusr2.sa_mask ); | |
1559 assert( status == 0 ); | |
1560 | |
1561 status = sigaction( SIGUSR1, &sigusr1, null ); | |
1562 assert( status == 0 ); | |
1563 | |
1564 status = sigaction( SIGUSR2, &sigusr2, null ); | |
1565 assert( status == 0 ); | |
1566 | |
1567 status = sem_init( &suspendCount, 0, 0 ); | |
1568 assert( status == 0 ); | |
1569 | |
1570 status = pthread_key_create( &Thread.sm_this, null ); | |
1571 assert( status == 0 ); | |
1572 } | |
1573 | |
1574 thread_attachThis(); | |
1575 } | |
1576 | |
1577 | |
1578 /** | |
1579 * Registers the calling thread for use with Tango. If this routine is called | |
1580 * for a thread which is already registered, the result is undefined. | |
1581 */ | |
1582 extern (C) void thread_attachThis() | |
1583 { | |
1584 version( Win32 ) | |
1585 { | |
1586 Thread thisThread = new Thread(); | |
1587 Thread.Context* thisContext = &thisThread.m_main; | |
1588 assert( thisContext == thisThread.m_curr ); | |
1589 | |
1590 thisThread.m_addr = GetCurrentThreadId(); | |
1591 thisThread.m_hndl = GetCurrentThreadHandle(); | |
1592 thisContext.bstack = getStackBottom(); | |
1593 thisContext.tstack = thisContext.bstack; | |
1594 | |
1595 thisThread.m_isDaemon = true; | |
1596 | |
1597 Thread.setThis( thisThread ); | |
1598 } | |
1599 else version( Posix ) | |
1600 { | |
1601 Thread thisThread = new Thread(); | |
1602 Thread.Context* thisContext = thisThread.m_curr; | |
1603 assert( thisContext == &thisThread.m_main ); | |
1604 | |
1605 thisThread.m_addr = pthread_self(); | |
1606 thisContext.bstack = getStackBottom(); | |
1607 thisContext.tstack = thisContext.bstack; | |
1608 | |
1609 thisThread.m_isRunning = true; | |
1610 thisThread.m_isDaemon = true; | |
1611 | |
1612 Thread.setThis( thisThread ); | |
1613 } | |
1614 | |
1615 Thread.add( thisThread ); | |
1616 Thread.add( thisContext ); | |
1617 } | |
1618 | |
1619 | |
1620 /** | |
1621 * Deregisters the calling thread from use with Tango. If this routine is | |
1622 * called for a thread which is already registered, the result is undefined. | |
1623 */ | |
1624 extern (C) void thread_detachThis() | |
1625 { | |
1626 Thread.remove( Thread.getThis() ); | |
1627 } | |
1628 | |
1629 | |
1630 /** | |
1631 * Joins all non-daemon threads that are currently running. This is done by | |
1632 * performing successive scans through the thread list until a scan consists | |
1633 * of only daemon threads. | |
1634 */ | |
1635 extern (C) void thread_joinAll() | |
1636 { | |
1637 | |
1638 while( true ) | |
1639 { | |
1640 Thread nonDaemon = null; | |
1641 | |
1642 foreach( t; Thread ) | |
1643 { | |
1644 if( !t.isDaemon ) | |
1645 { | |
1646 nonDaemon = t; | |
1647 break; | |
1648 } | |
1649 } | |
1650 if( nonDaemon is null ) | |
1651 return; | |
1652 nonDaemon.join(); | |
1653 } | |
1654 } | |
1655 | |
1656 | |
1657 /** | |
1658 * Performs intermediate shutdown of the thread module. | |
1659 */ | |
1660 static ~this() | |
1661 { | |
1662 // NOTE: The functionality related to garbage collection must be minimally | |
1663 // operable after this dtor completes. Therefore, only minimal | |
1664 // cleanup may occur. | |
1665 | |
1666 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1667 { | |
1668 if( !t.isRunning ) | |
1669 Thread.remove( t ); | |
1670 } | |
1671 } | |
1672 | |
1673 | |
1674 // Used for needLock below | |
1675 private bool multiThreadedFlag = false; | |
1676 | |
1677 | |
1678 /** | |
1679 * This function is used to determine whether the the process is | |
1680 * multi-threaded. Optimizations may only be performed on this | |
1681 * value if the programmer can guarantee that no path from the | |
1682 * enclosed code will start a thread. | |
1683 * | |
1684 * Returns: | |
1685 * True if Thread.start() has been called in this process. | |
1686 */ | |
1687 extern (C) bool thread_needLock() | |
1688 { | |
1689 return multiThreadedFlag; | |
1690 } | |
1691 | |
1692 | |
1693 // Used for suspendAll/resumeAll below | |
1694 private uint suspendDepth = 0; | |
1695 | |
1696 | |
1697 /** | |
1698 * Suspend all threads but the calling thread for "stop the world" garbage | |
1699 * collection runs. This function may be called multiple times, and must | |
1700 * be followed by a matching number of calls to thread_resumeAll before | |
1701 * processing is resumed. | |
1702 * | |
1703 * Throws: | |
1704 * ThreadException if the suspend operation fails for a running thread. | |
1705 */ | |
1706 extern (C) void thread_suspendAll() | |
1707 { | |
1708 /** | |
1709 * Suspend the specified thread and load stack and register information for | |
1710 * use by thread_scanAll. If the supplied thread is the calling thread, | |
1711 * stack and register information will be loaded but the thread will not | |
1712 * be suspended. If the suspend operation fails and the thread is not | |
1713 * running then it will be removed from the global thread list, otherwise | |
1714 * an exception will be thrown. | |
1715 * | |
1716 * Params: | |
1717 * t = The thread to suspend. | |
1718 * | |
1719 * Throws: | |
1720 * ThreadException if the suspend operation fails for a running thread. | |
1721 */ | |
1722 void suspend( Thread t ) | |
1723 { | |
1724 version( Win32 ) | |
1725 { | |
1726 if( t.m_addr != GetCurrentThreadId() && SuspendThread( t.m_hndl ) == 0xFFFFFFFF ) | |
1727 { | |
1728 if( !t.isRunning ) | |
1729 { | |
1730 Thread.remove( t ); | |
1731 return; | |
1732 } | |
1733 throw new ThreadException( "Unable to suspend thread" ); | |
1734 } | |
1735 | |
1736 CONTEXT context = void; | |
1737 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL; | |
1738 | |
1739 if( !GetThreadContext( t.m_hndl, &context ) ) | |
1740 throw new ThreadException( "Unable to load thread context" ); | |
1741 if( !t.m_lock ) | |
1742 t.m_curr.tstack = cast(void*) context.Esp; | |
1743 // edi,esi,ebp,esp,ebx,edx,ecx,eax | |
1744 t.m_reg[0] = context.Edi; | |
1745 t.m_reg[1] = context.Esi; | |
1746 t.m_reg[2] = context.Ebp; | |
1747 t.m_reg[3] = context.Esp; | |
1748 t.m_reg[4] = context.Ebx; | |
1749 t.m_reg[5] = context.Edx; | |
1750 t.m_reg[6] = context.Ecx; | |
1751 t.m_reg[7] = context.Eax; | |
1752 } | |
1753 else version( Posix ) | |
1754 { | |
1755 if( t.m_addr != pthread_self() ) | |
1756 { | |
1757 if( pthread_kill( t.m_addr, SIGUSR1 ) != 0 ) | |
1758 { | |
1759 if( !t.isRunning ) | |
1760 { | |
1761 Thread.remove( t ); | |
1762 return; | |
1763 } | |
1764 throw new ThreadException( "Unable to suspend thread" ); | |
1765 } | |
1766 // NOTE: It's really not ideal to wait for each thread to signal | |
1767 // individually -- rather, it would be better to suspend | |
1768 // them all and wait once at the end. However, semaphores | |
1769 // don't really work this way, and the obvious alternative | |
1770 // (looping on an atomic suspend count) requires either | |
1771 // the atomic module (which only works on x86) or other | |
1772 // specialized functionality. It would also be possible | |
1773 // to simply loop on sem_wait at the end, but I'm not | |
1774 // convinced that this would be much faster than the | |
1775 // current approach. | |
1776 sem_wait( &suspendCount ); | |
1777 } | |
1778 else if( !t.m_lock ) | |
1779 { | |
1780 t.m_curr.tstack = getStackTop(); | |
1781 } | |
1782 } | |
1783 } | |
1784 | |
1785 | |
1786 // NOTE: We've got an odd chicken & egg problem here, because while the GC | |
1787 // is required to call thread_init before calling any other thread | |
1788 // routines, thread_init may allocate memory which could in turn | |
1789 // trigger a collection. Thus, thread_suspendAll, thread_scanAll, | |
1790 // and thread_resumeAll must be callable before thread_init completes, | |
1791 // with the assumption that no other GC memory has yet been allocated | |
1792 // by the system, and thus there is no risk of losing data if the | |
1793 // global thread list is empty. The check of Thread.sm_tbeg | |
1794 // below is done to ensure thread_init has completed, and therefore | |
1795 // that calling Thread.getThis will not result in an error. For the | |
1796 // short time when Thread.sm_tbeg is null, there is no reason | |
1797 // not to simply call the multithreaded code below, with the | |
1798 // expectation that the foreach loop will never be entered. | |
1799 if( !multiThreadedFlag && Thread.sm_tbeg ) | |
1800 { | |
1801 if( ++suspendDepth == 1 ) | |
1802 suspend( Thread.getThis() ); | |
1803 return; | |
1804 } | |
1805 synchronized( Thread.slock ) | |
1806 { | |
1807 if( ++suspendDepth > 1 ) | |
1808 return; | |
1809 | |
1810 // NOTE: I'd really prefer not to check isRunning within this loop but | |
1811 // not doing so could be problematic if threads are termianted | |
1812 // abnormally and a new thread is created with the same thread | |
1813 // address before the next GC run. This situation might cause | |
1814 // the same thread to be suspended twice, which would likely | |
1815 // cause the second suspend to fail, the garbage collection to | |
1816 // abort, and Bad Things to occur. | |
1817 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1818 { | |
1819 if( t.isRunning ) | |
1820 suspend( t ); | |
1821 else | |
1822 Thread.remove( t ); | |
1823 } | |
1824 | |
1825 version( Posix ) | |
1826 { | |
1827 // wait on semaphore -- see note in suspend for | |
1828 // why this is currently not implemented | |
1829 } | |
1830 } | |
1831 } | |
1832 | |
1833 | |
1834 /** | |
1835 * Resume all threads but the calling thread for "stop the world" garbage | |
1836 * collection runs. This function must be called once for each preceding | |
1837 * call to thread_suspendAll before the threads are actually resumed. | |
1838 * | |
1839 * In: | |
1840 * This routine must be preceded by a call to thread_suspendAll. | |
1841 * | |
1842 * Throws: | |
1843 * ThreadException if the resume operation fails for a running thread. | |
1844 */ | |
1845 extern (C) void thread_resumeAll() | |
1846 in | |
1847 { | |
1848 assert( suspendDepth > 0 ); | |
1849 } | |
1850 body | |
1851 { | |
1852 /** | |
1853 * Resume the specified thread and unload stack and register information. | |
1854 * If the supplied thread is the calling thread, stack and register | |
1855 * information will be unloaded but the thread will not be resumed. If | |
1856 * the resume operation fails and the thread is not running then it will | |
1857 * be removed from the global thread list, otherwise an exception will be | |
1858 * thrown. | |
1859 * | |
1860 * Params: | |
1861 * t = The thread to resume. | |
1862 * | |
1863 * Throws: | |
1864 * ThreadException if the resume fails for a running thread. | |
1865 */ | |
1866 void resume( Thread t ) | |
1867 { | |
1868 version( Win32 ) | |
1869 { | |
1870 if( t.m_addr != GetCurrentThreadId() && ResumeThread( t.m_hndl ) == 0xFFFFFFFF ) | |
1871 { | |
1872 if( !t.isRunning ) | |
1873 { | |
1874 Thread.remove( t ); | |
1875 return; | |
1876 } | |
1877 throw new ThreadException( "Unable to resume thread" ); | |
1878 } | |
1879 | |
1880 if( !t.m_lock ) | |
1881 t.m_curr.tstack = t.m_curr.bstack; | |
1882 t.m_reg[0 .. $] = 0; | |
1883 } | |
1884 else version( Posix ) | |
1885 { | |
1886 if( t.m_addr != pthread_self() ) | |
1887 { | |
1888 if( pthread_kill( t.m_addr, SIGUSR2 ) != 0 ) | |
1889 { | |
1890 if( !t.isRunning ) | |
1891 { | |
1892 Thread.remove( t ); | |
1893 return; | |
1894 } | |
1895 throw new ThreadException( "Unable to resume thread" ); | |
1896 } | |
1897 } | |
1898 else if( !t.m_lock ) | |
1899 { | |
1900 t.m_curr.tstack = t.m_curr.bstack; | |
1901 } | |
1902 } | |
1903 } | |
1904 | |
1905 | |
1906 // NOTE: See thread_suspendAll for the logic behind this. | |
1907 if( !multiThreadedFlag && Thread.sm_tbeg ) | |
1908 { | |
1909 if( --suspendDepth == 0 ) | |
1910 resume( Thread.getThis() ); | |
1911 return; | |
1912 } | |
1913 synchronized( Thread.slock ) | |
1914 { | |
1915 if( --suspendDepth > 0 ) | |
1916 return; | |
1917 | |
1918 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1919 { | |
1920 resume( t ); | |
1921 } | |
1922 } | |
1923 } | |
1924 | |
1925 | |
1926 private alias void delegate( void*, void* ) scanAllThreadsFn; | |
1927 | |
1928 | |
1929 /** | |
1930 * The main entry point for garbage collection. The supplied delegate | |
1931 * will be passed ranges representing both stack and register values. | |
1932 * | |
1933 * Params: | |
1934 * scan = The scanner function. It should scan from p1 through p2 - 1. | |
1935 * curStackTop = An optional pointer to the top of the calling thread's stack. | |
1936 * | |
1937 * In: | |
1938 * This routine must be preceded by a call to thread_suspendAll. | |
1939 */ | |
1940 extern (C) void thread_scanAll( scanAllThreadsFn scan, void* curStackTop = null ) | |
1941 in | |
1942 { | |
1943 assert( suspendDepth > 0 ); | |
1944 } | |
1945 body | |
1946 { | |
1947 Thread thisThread = null; | |
1948 void* oldStackTop = null; | |
1949 | |
1950 if( curStackTop && Thread.sm_tbeg ) | |
1951 { | |
1952 thisThread = Thread.getThis(); | |
1953 if( !thisThread.m_lock ) | |
1954 { | |
1955 oldStackTop = thisThread.m_curr.tstack; | |
1956 thisThread.m_curr.tstack = curStackTop; | |
1957 } | |
1958 } | |
1959 | |
1960 scope( exit ) | |
1961 { | |
1962 if( curStackTop && Thread.sm_tbeg ) | |
1963 { | |
1964 if( !thisThread.m_lock ) | |
1965 { | |
1966 thisThread.m_curr.tstack = oldStackTop; | |
1967 } | |
1968 } | |
1969 } | |
1970 | |
1971 // NOTE: Synchronizing on Thread.slock is not needed because this | |
1972 // function may only be called after all other threads have | |
1973 // been suspended from within the same lock. | |
1974 for( Thread.Context* c = Thread.sm_cbeg; c; c = c.next ) | |
1975 { | |
1976 version( StackGrowsDown ) | |
1977 { | |
1978 // NOTE: We can't index past the bottom of the stack | |
1979 // so don't do the "+1" for StackGrowsDown. | |
1980 if( c.tstack && c.tstack < c.bstack ) | |
1981 scan( c.tstack, c.bstack ); | |
1982 } | |
1983 else | |
1984 { | |
1985 if( c.bstack && c.bstack < c.tstack ) | |
1986 scan( c.bstack, c.tstack + 1 ); | |
1987 } | |
1988 } | |
1989 version( Win32 ) | |
1990 { | |
1991 for( Thread t = Thread.sm_tbeg; t; t = t.next ) | |
1992 { | |
1993 scan( &t.m_reg[0], &t.m_reg[0] + t.m_reg.length ); | |
1994 } | |
1995 } | |
1996 } | |
1997 | |
1998 | |
1999 //////////////////////////////////////////////////////////////////////////////// | |
2000 // Thread Local | |
2001 //////////////////////////////////////////////////////////////////////////////// | |
2002 | |
2003 | |
2004 /** | |
2005 * This class encapsulates the operations required to initialize, access, and | |
2006 * destroy thread local data. | |
2007 */ | |
2008 class ThreadLocal( T ) | |
2009 { | |
2010 //////////////////////////////////////////////////////////////////////////// | |
2011 // Initialization | |
2012 //////////////////////////////////////////////////////////////////////////// | |
2013 | |
2014 | |
2015 /** | |
2016 * Initializes thread local storage for the indicated value which will be | |
2017 * initialized to def for all threads. | |
2018 * | |
2019 * Params: | |
2020 * def = The default value to return if no value has been explicitly set. | |
2021 */ | |
2022 this( T def = T.init ) | |
2023 { | |
2024 m_def = def; | |
2025 m_key = Thread.createLocal(); | |
2026 } | |
2027 | |
2028 | |
2029 ~this() | |
2030 { | |
2031 Thread.deleteLocal( m_key ); | |
2032 } | |
2033 | |
2034 | |
2035 //////////////////////////////////////////////////////////////////////////// | |
2036 // Accessors | |
2037 //////////////////////////////////////////////////////////////////////////// | |
2038 | |
2039 | |
2040 /** | |
2041 * Gets the value last set by the calling thread, or def if no such value | |
2042 * has been set. | |
2043 * | |
2044 * Returns: | |
2045 * The stored value or def if no value is stored. | |
2046 */ | |
2047 T val() | |
2048 { | |
2049 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key ); | |
2050 | |
2051 return wrap ? wrap.val : m_def; | |
2052 } | |
2053 | |
2054 | |
2055 /** | |
2056 * Copies newval to a location specific to the calling thread, and returns | |
2057 * newval. | |
2058 * | |
2059 * Params: | |
2060 * newval = The value to set. | |
2061 * | |
2062 * Returns: | |
2063 * The value passed to this function. | |
2064 */ | |
2065 T val( T newval ) | |
2066 { | |
2067 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key ); | |
2068 | |
2069 if( wrap is null ) | |
2070 { | |
2071 wrap = new Wrap; | |
2072 Thread.setLocal( m_key, wrap ); | |
2073 } | |
2074 wrap.val = newval; | |
2075 return newval; | |
2076 } | |
2077 | |
2078 | |
2079 private: | |
2080 // | |
2081 // A wrapper for the stored data. This is needed for determining whether | |
2082 // set has ever been called for this thread (and therefore whether the | |
2083 // default value should be returned) and also to flatten the differences | |
2084 // between data that is smaller and larger than (void*).sizeof. The | |
2085 // obvious tradeoff here is an extra per-thread allocation for each | |
2086 // ThreadLocal value as compared to calling the Thread routines directly. | |
2087 // | |
2088 struct Wrap | |
2089 { | |
2090 T val; | |
2091 } | |
2092 | |
2093 | |
2094 T m_def; | |
2095 uint m_key; | |
2096 } | |
2097 | |
2098 | |
2099 //////////////////////////////////////////////////////////////////////////////// | |
2100 // Thread Group | |
2101 //////////////////////////////////////////////////////////////////////////////// | |
2102 | |
2103 | |
2104 /** | |
2105 * This class is intended to simplify certain common programming techniques. | |
2106 */ | |
2107 class ThreadGroup | |
2108 { | |
2109 /** | |
2110 * Creates and starts a new Thread object that executes fn and adds it to | |
2111 * the list of tracked threads. | |
2112 * | |
2113 * Params: | |
2114 * fn = The thread function. | |
2115 * | |
2116 * Returns: | |
2117 * A reference to the newly created thread. | |
2118 */ | |
2119 final Thread create( void function() fn ) | |
2120 { | |
2121 Thread t = new Thread( fn ); | |
2122 | |
2123 t.start(); | |
2124 synchronized | |
2125 { | |
2126 m_all[t] = t; | |
2127 } | |
2128 return t; | |
2129 } | |
2130 | |
2131 | |
2132 /** | |
2133 * Creates and starts a new Thread object that executes dg and adds it to | |
2134 * the list of tracked threads. | |
2135 * | |
2136 * Params: | |
2137 * dg = The thread function. | |
2138 * | |
2139 * Returns: | |
2140 * A reference to the newly created thread. | |
2141 */ | |
2142 final Thread create( void delegate() dg ) | |
2143 { | |
2144 Thread t = new Thread( dg ); | |
2145 | |
2146 t.start(); | |
2147 synchronized | |
2148 { | |
2149 m_all[t] = t; | |
2150 } | |
2151 return t; | |
2152 } | |
2153 | |
2154 | |
2155 /** | |
2156 * Add t to the list of tracked threads if it is not already being tracked. | |
2157 * | |
2158 * Params: | |
2159 * t = The thread to add. | |
2160 * | |
2161 * In: | |
2162 * t must not be null. | |
2163 */ | |
2164 final void add( Thread t ) | |
2165 in | |
2166 { | |
2167 assert( t ); | |
2168 } | |
2169 body | |
2170 { | |
2171 synchronized | |
2172 { | |
2173 m_all[t] = t; | |
2174 } | |
2175 } | |
2176 | |
2177 | |
2178 /** | |
2179 * Removes t from the list of tracked threads. No operation will be | |
2180 * performed if t is not currently being tracked by this object. | |
2181 * | |
2182 * Params: | |
2183 * t = The thread to remove. | |
2184 * | |
2185 * In: | |
2186 * t must not be null. | |
2187 */ | |
2188 final void remove( Thread t ) | |
2189 in | |
2190 { | |
2191 assert( t ); | |
2192 } | |
2193 body | |
2194 { | |
2195 synchronized | |
2196 { | |
2197 m_all.remove( t ); | |
2198 } | |
2199 } | |
2200 | |
2201 | |
2202 /** | |
2203 * Operates on all threads currently tracked by this object. | |
2204 */ | |
2205 final int opApply( int delegate( inout Thread ) dg ) | |
2206 { | |
2207 synchronized | |
2208 { | |
2209 int ret = 0; | |
2210 | |
2211 // NOTE: This loop relies on the knowledge that m_all uses the | |
2212 // Thread object for both the key and the mapped value. | |
2213 foreach( Thread t; m_all.keys ) | |
2214 { | |
2215 ret = dg( t ); | |
2216 if( ret ) | |
2217 break; | |
2218 } | |
2219 return ret; | |
2220 } | |
2221 } | |
2222 | |
2223 | |
2224 /** | |
2225 * Iteratively joins all tracked threads. This function will block add, | |
2226 * remove, and opApply until it completes. | |
2227 * | |
2228 * Params: | |
2229 * rethrow = Rethrow any unhandled exception which may have caused the | |
2230 * current thread to terminate. | |
2231 * | |
2232 * Throws: | |
2233 * Any exception not handled by the joined threads. | |
2234 */ | |
2235 final void joinAll( bool rethrow = true ) | |
2236 { | |
2237 synchronized | |
2238 { | |
2239 // NOTE: This loop relies on the knowledge that m_all uses the | |
2240 // Thread object for both the key and the mapped value. | |
2241 foreach( Thread t; m_all.keys ) | |
2242 { | |
2243 t.join( rethrow ); | |
2244 } | |
2245 } | |
2246 } | |
2247 | |
2248 | |
2249 private: | |
2250 Thread[Thread] m_all; | |
2251 } | |
2252 | |
2253 | |
2254 //////////////////////////////////////////////////////////////////////////////// | |
2255 // Fiber Platform Detection and Memory Allocation | |
2256 //////////////////////////////////////////////////////////////////////////////// | |
2257 | |
2258 | |
2259 private | |
2260 { | |
2261 version( D_InlineAsm_X86 ) | |
2262 { | |
237
a168a2c3ea48
[svn r253] Removed -inlineasm option. inline asm is now enabled by default unless the new -noasm option is passed.
lindquist
parents:
132
diff
changeset
|
2263 version( LLVMDC ) |
a168a2c3ea48
[svn r253] Removed -inlineasm option. inline asm is now enabled by default unless the new -noasm option is passed.
lindquist
parents:
132
diff
changeset
|
2264 { |
a168a2c3ea48
[svn r253] Removed -inlineasm option. inline asm is now enabled by default unless the new -noasm option is passed.
lindquist
parents:
132
diff
changeset
|
2265 } |
a168a2c3ea48
[svn r253] Removed -inlineasm option. inline asm is now enabled by default unless the new -noasm option is passed.
lindquist
parents:
132
diff
changeset
|
2266 else version( X86_64 ) |
132 | 2267 { |
2268 | |
2269 } | |
2270 else | |
2271 { | |
2272 version( Win32 ) | |
2273 version = AsmX86_Win32; | |
2274 else version( Posix ) | |
2275 version = AsmX86_Posix; | |
2276 } | |
2277 } | |
2278 else version( PPC ) | |
2279 { | |
2280 version( Posix ) | |
2281 version = AsmPPC_Posix; | |
2282 } | |
2283 | |
2284 | |
2285 version( Posix ) | |
2286 { | |
2287 import tango.stdc.posix.unistd; // for sysconf | |
2288 import tango.stdc.posix.sys.mman; // for mmap | |
2289 import tango.stdc.posix.stdlib; // for malloc, valloc, free | |
2290 | |
2291 version( AsmX86_Win32 ) {} else | |
2292 version( AsmX86_Posix ) {} else | |
2293 version( AsmPPC_Posix ) {} else | |
2294 { | |
2295 // NOTE: The ucontext implementation requires architecture specific | |
2296 // data definitions to operate so testing for it must be done | |
2297 // by checking for the existence of ucontext_t rather than by | |
2298 // a version identifier. Please note that this is considered | |
2299 // an obsolescent feature according to the POSIX spec, so a | |
2300 // custom solution is still preferred. | |
2301 import tango.stdc.posix.ucontext; | |
2302 } | |
2303 } | |
2304 | |
2305 const size_t PAGESIZE; | |
2306 } | |
2307 | |
2308 | |
2309 static this() | |
2310 { | |
2311 static if( is( typeof( GetSystemInfo ) ) ) | |
2312 { | |
2313 SYSTEM_INFO info; | |
2314 GetSystemInfo( &info ); | |
2315 | |
2316 PAGESIZE = info.dwPageSize; | |
2317 assert( PAGESIZE < int.max ); | |
2318 } | |
2319 else static if( is( typeof( sysconf ) ) && | |
2320 is( typeof( _SC_PAGESIZE ) ) ) | |
2321 { | |
2322 PAGESIZE = cast(typeof(PAGESIZE))sysconf( _SC_PAGESIZE ); | |
2323 assert( PAGESIZE < int.max ); | |
2324 } | |
2325 else | |
2326 { | |
2327 version( PPC ) | |
2328 PAGESIZE = 8192; | |
2329 else | |
2330 PAGESIZE = 4096; | |
2331 } | |
2332 } | |
2333 | |
2334 | |
2335 //////////////////////////////////////////////////////////////////////////////// | |
2336 // Fiber Entry Point and Context Switch | |
2337 //////////////////////////////////////////////////////////////////////////////// | |
2338 | |
2339 | |
2340 private | |
2341 { | |
2342 extern (C) void fiber_entryPoint() | |
2343 { | |
2344 Fiber obj = Fiber.getThis(); | |
2345 assert( obj ); | |
2346 | |
2347 assert( Thread.getThis().m_curr is obj.m_ctxt ); | |
2348 volatile Thread.getThis().m_lock = false; | |
2349 obj.m_ctxt.tstack = obj.m_ctxt.bstack; | |
2350 obj.m_state = Fiber.State.EXEC; | |
2351 | |
2352 try | |
2353 { | |
2354 obj.run(); | |
2355 } | |
2356 catch( Object o ) | |
2357 { | |
2358 obj.m_unhandled = o; | |
2359 } | |
2360 | |
2361 static if( is( typeof( ucontext_t ) ) ) | |
2362 obj.m_ucur = &obj.m_utxt; | |
2363 | |
2364 obj.m_state = Fiber.State.TERM; | |
2365 obj.switchOut(); | |
2366 } | |
2367 | |
2368 | |
2369 // NOTE: If AsmPPC_Posix is defined then the context switch routine will | |
2370 // be defined externally until GDC supports inline PPC ASM. | |
2371 version( AsmPPC_Posix ) | |
2372 extern (C) void fiber_switchContext( void** oldp, void* newp ); | |
2373 else | |
2374 extern (C) void fiber_switchContext( void** oldp, void* newp ) | |
2375 { | |
2376 // NOTE: The data pushed and popped in this routine must match the | |
2377 // default stack created by Fiber.initStack or the initial | |
2378 // switch into a new context will fail. | |
2379 | |
2380 version( AsmX86_Win32 ) | |
2381 { | |
2382 asm | |
2383 { | |
2384 naked; | |
2385 | |
2386 // save current stack state | |
2387 push EBP; | |
2388 mov EBP, ESP; | |
2389 push EAX; | |
2390 push dword ptr FS:[0]; | |
2391 push dword ptr FS:[4]; | |
2392 push dword ptr FS:[8]; | |
2393 push EBX; | |
2394 push ESI; | |
2395 push EDI; | |
2396 | |
2397 // store oldp again with more accurate address | |
2398 mov EAX, dword ptr 8[EBP]; | |
2399 mov [EAX], ESP; | |
2400 // load newp to begin context switch | |
2401 mov ESP, dword ptr 12[EBP]; | |
2402 | |
2403 // load saved state from new stack | |
2404 pop EDI; | |
2405 pop ESI; | |
2406 pop EBX; | |
2407 pop dword ptr FS:[8]; | |
2408 pop dword ptr FS:[4]; | |
2409 pop dword ptr FS:[0]; | |
2410 pop EAX; | |
2411 pop EBP; | |
2412 | |
2413 // 'return' to complete switch | |
2414 ret; | |
2415 } | |
2416 } | |
2417 else version( AsmX86_Posix ) | |
2418 { | |
2419 asm | |
2420 { | |
2421 naked; | |
2422 | |
2423 // save current stack state | |
2424 push EBP; | |
2425 mov EBP, ESP; | |
2426 push EAX; | |
2427 push EBX; | |
2428 push ESI; | |
2429 push EDI; | |
2430 | |
2431 // store oldp again with more accurate address | |
2432 mov EAX, dword ptr 8[EBP]; | |
2433 mov [EAX], ESP; | |
2434 // load newp to begin context switch | |
2435 mov ESP, dword ptr 12[EBP]; | |
2436 | |
2437 // load saved state from new stack | |
2438 pop EDI; | |
2439 pop ESI; | |
2440 pop EBX; | |
2441 pop EAX; | |
2442 pop EBP; | |
2443 | |
2444 // 'return' to complete switch | |
2445 ret; | |
2446 } | |
2447 } | |
2448 else static if( is( typeof( ucontext_t ) ) ) | |
2449 { | |
2450 Fiber cfib = Fiber.getThis(); | |
2451 void* ucur = cfib.m_ucur; | |
2452 | |
2453 *oldp = &ucur; | |
2454 swapcontext( **(cast(ucontext_t***) oldp), | |
2455 *(cast(ucontext_t**) newp) ); | |
2456 } | |
2457 } | |
2458 } | |
2459 | |
2460 | |
2461 //////////////////////////////////////////////////////////////////////////////// | |
2462 // Fiber | |
2463 //////////////////////////////////////////////////////////////////////////////// | |
2464 | |
2465 | |
2466 /** | |
2467 * This class provides a cooperative concurrency mechanism integrated with the | |
2468 * threading and garbage collection functionality. Calling a fiber may be | |
2469 * considered a blocking operation that returns when the fiber yields (via | |
2470 * Fiber.yield()). Execution occurs within the context of the calling thread | |
2471 * so synchronization is not necessary to guarantee memory visibility so long | |
2472 * as the same thread calls the fiber each time. Please note that there is no | |
2473 * requirement that a fiber be bound to one specific thread. Rather, fibers | |
2474 * may be freely passed between threads so long as they are not currently | |
2475 * executing. Like threads, a new fiber thread may be created using either | |
2476 * derivation or composition, as in the following example. | |
2477 * | |
2478 * Example: | |
2479 * ---------------------------------------------------------------------- | |
2480 * | |
2481 * class DerivedFiber : Fiber | |
2482 * { | |
2483 * this() | |
2484 * { | |
2485 * super( &run ); | |
2486 * } | |
2487 * | |
2488 * private : | |
2489 * void run() | |
2490 * { | |
2491 * printf( "Derived fiber running.\n" ); | |
2492 * } | |
2493 * } | |
2494 * | |
2495 * void fiberFunc() | |
2496 * { | |
2497 * printf( "Composed fiber running.\n" ); | |
2498 * Fiber.yield(); | |
2499 * printf( "Composed fiber running.\n" ); | |
2500 * } | |
2501 * | |
2502 * // create instances of each type | |
2503 * Fiber derived = new DerivedFiber(); | |
2504 * Fiber composed = new Fiber( &fiberFunc ); | |
2505 * | |
2506 * // call both fibers once | |
2507 * derived.call(); | |
2508 * composed.call(); | |
2509 * printf( "Execution returned to calling context.\n" ); | |
2510 * composed.call(); | |
2511 * | |
2512 * // since each fiber has run to completion, each should have state TERM | |
2513 * assert( derived.state == Fiber.State.TERM ); | |
2514 * assert( composed.state == Fiber.State.TERM ); | |
2515 * | |
2516 * ---------------------------------------------------------------------- | |
2517 * | |
2518 * Authors: Based on a design by Mikola Lysenko. | |
2519 */ | |
2520 class Fiber | |
2521 { | |
2522 //////////////////////////////////////////////////////////////////////////// | |
2523 // Initialization | |
2524 //////////////////////////////////////////////////////////////////////////// | |
2525 | |
2526 | |
2527 /** | |
2528 * Initializes a fiber object which is associated with a static | |
2529 * D function. | |
2530 * | |
2531 * Params: | |
2532 * fn = The thread function. | |
2533 * sz = The stack size for this fiber. | |
2534 * | |
2535 * In: | |
2536 * fn must not be null. | |
2537 */ | |
2538 this( void function() fn, size_t sz = PAGESIZE ) | |
2539 in | |
2540 { | |
2541 assert( fn ); | |
2542 } | |
2543 body | |
2544 { | |
2545 m_fn = fn; | |
2546 m_call = Call.FN; | |
2547 m_state = State.HOLD; | |
2548 allocStack( sz ); | |
2549 initStack(); | |
2550 } | |
2551 | |
2552 | |
2553 /** | |
2554 * Initializes a fiber object which is associated with a dynamic | |
2555 * D function. | |
2556 * | |
2557 * Params: | |
2558 * dg = The thread function. | |
2559 * sz = The stack size for this fiber. | |
2560 * | |
2561 * In: | |
2562 * dg must not be null. | |
2563 */ | |
2564 this( void delegate() dg, size_t sz = PAGESIZE ) | |
2565 in | |
2566 { | |
2567 assert( dg ); | |
2568 } | |
2569 body | |
2570 { | |
2571 m_dg = dg; | |
2572 m_call = Call.DG; | |
2573 m_state = State.HOLD; | |
2574 allocStack( sz ); | |
2575 initStack(); | |
2576 } | |
2577 | |
2578 | |
2579 /** | |
2580 * Cleans up any remaining resources used by this object. | |
2581 */ | |
2582 ~this() | |
2583 { | |
2584 // NOTE: A live reference to this object will exist on its associated | |
2585 // stack from the first time its call() method has been called | |
2586 // until its execution completes with State.TERM. Thus, the only | |
2587 // times this dtor should be called are either if the fiber has | |
2588 // terminated (and therefore has no active stack) or if the user | |
2589 // explicitly deletes this object. The latter case is an error | |
2590 // but is not easily tested for, since State.HOLD may imply that | |
2591 // the fiber was just created but has never been run. There is | |
2592 // not a compelling case to create a State.INIT just to offer a | |
2593 // means of ensuring the user isn't violating this object's | |
2594 // contract, so for now this requirement will be enforced by | |
2595 // documentation only. | |
2596 freeStack(); | |
2597 } | |
2598 | |
2599 | |
2600 //////////////////////////////////////////////////////////////////////////// | |
2601 // General Actions | |
2602 //////////////////////////////////////////////////////////////////////////// | |
2603 | |
2604 | |
2605 /** | |
2606 * Transfers execution to this fiber object. The calling context will be | |
2607 * suspended until the fiber calls Fiber.yield() or until it terminates | |
2608 * via an unhandled exception. | |
2609 * | |
2610 * Params: | |
2611 * rethrow = Rethrow any unhandled exception which may have caused this | |
2612 * fiber to terminate. | |
2613 * | |
2614 * In: | |
2615 * This fiber must be in state HOLD. | |
2616 * | |
2617 * Throws: | |
2618 * Any exception not handled by the joined thread. | |
2619 */ | |
2620 final void call( bool rethrow = true ) | |
2621 in | |
2622 { | |
2623 assert( m_state == State.HOLD ); | |
2624 } | |
2625 body | |
2626 { | |
2627 Fiber cur = getThis(); | |
2628 | |
2629 static if( is( typeof( ucontext_t ) ) ) | |
2630 m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt; | |
2631 | |
2632 setThis( this ); | |
2633 this.switchIn(); | |
2634 setThis( cur ); | |
2635 | |
2636 static if( is( typeof( ucontext_t ) ) ) | |
2637 m_ucur = null; | |
2638 | |
2639 // NOTE: If the fiber has terminated then the stack pointers must be | |
2640 // reset. This ensures that the stack for this fiber is not | |
2641 // scanned if the fiber has terminated. This is necessary to | |
2642 // prevent any references lingering on the stack from delaying | |
2643 // the collection of otherwise dead objects. The most notable | |
2644 // being the current object, which is referenced at the top of | |
2645 // fiber_entryPoint. | |
2646 if( m_state == State.TERM ) | |
2647 { | |
2648 m_ctxt.tstack = m_ctxt.bstack; | |
2649 } | |
2650 if( m_unhandled ) | |
2651 { | |
2652 Object obj = m_unhandled; | |
2653 m_unhandled = null; | |
2654 if( rethrow ) | |
2655 { | |
2656 throw obj; | |
2657 } | |
2658 } | |
2659 } | |
2660 | |
2661 | |
2662 /** | |
2663 * Resets this fiber so that it may be re-used. This routine may only be | |
2664 * called for fibers that have terminated, as doing otherwise could result | |
2665 * in scope-dependent functionality that is not executed. Stack-based | |
2666 * classes, for example, may not be cleaned up properly if a fiber is reset | |
2667 * before it has terminated. | |
2668 * | |
2669 * In: | |
2670 * This fiber must be in state TERM. | |
2671 */ | |
2672 final void reset() | |
2673 in | |
2674 { | |
2675 assert( m_state == State.TERM ); | |
2676 assert( m_ctxt.tstack == m_ctxt.bstack ); | |
2677 } | |
2678 body | |
2679 { | |
2680 m_state = State.HOLD; | |
2681 initStack(); | |
2682 m_unhandled = null; | |
2683 } | |
2684 | |
2685 | |
2686 //////////////////////////////////////////////////////////////////////////// | |
2687 // General Properties | |
2688 //////////////////////////////////////////////////////////////////////////// | |
2689 | |
2690 | |
2691 /** | |
2692 * A fiber may occupy one of three states: HOLD, EXEC, and TERM. The HOLD | |
2693 * state applies to any fiber that is suspended and ready to be called. | |
2694 * The EXEC state will be set for any fiber that is currently executing. | |
2695 * And the TERM state is set when a fiber terminates. Once a fiber | |
2696 * terminates, it must be reset before it may be called again. | |
2697 */ | |
2698 enum State | |
2699 { | |
2700 HOLD, /// | |
2701 EXEC, /// | |
2702 TERM /// | |
2703 } | |
2704 | |
2705 | |
2706 /** | |
2707 * Gets the current state of this fiber. | |
2708 * | |
2709 * Returns: | |
2710 * The state of this fiber as an enumerated value. | |
2711 */ | |
2712 final State state() | |
2713 { | |
2714 return m_state; | |
2715 } | |
2716 | |
2717 | |
2718 //////////////////////////////////////////////////////////////////////////// | |
2719 // Actions on Calling Fiber | |
2720 //////////////////////////////////////////////////////////////////////////// | |
2721 | |
2722 | |
2723 /** | |
2724 * Forces a context switch to occur away from the calling fiber. | |
2725 */ | |
2726 static void yield() | |
2727 { | |
2728 Fiber cur = getThis(); | |
2729 assert( cur, "Fiber.yield() called with no active fiber" ); | |
2730 assert( cur.m_state == State.EXEC ); | |
2731 | |
2732 static if( is( typeof( ucontext_t ) ) ) | |
2733 cur.m_ucur = &cur.m_utxt; | |
2734 | |
2735 cur.m_state = State.HOLD; | |
2736 cur.switchOut(); | |
2737 cur.m_state = State.EXEC; | |
2738 } | |
2739 | |
2740 | |
2741 /** | |
2742 * Forces a context switch to occur away from the calling fiber and then | |
2743 * throws obj in the calling fiber. | |
2744 * | |
2745 * Params: | |
2746 * obj = The object to throw. | |
2747 * | |
2748 * In: | |
2749 * obj must not be null. | |
2750 */ | |
2751 static void yieldAndThrow( Object obj ) | |
2752 in | |
2753 { | |
2754 assert( obj ); | |
2755 } | |
2756 body | |
2757 { | |
2758 Fiber cur = getThis(); | |
2759 assert( cur, "Fiber.yield() called with no active fiber" ); | |
2760 assert( cur.m_state == State.EXEC ); | |
2761 | |
2762 static if( is( typeof( ucontext_t ) ) ) | |
2763 cur.m_ucur = &cur.m_utxt; | |
2764 | |
2765 cur.m_unhandled = obj; | |
2766 cur.m_state = State.HOLD; | |
2767 cur.switchOut(); | |
2768 cur.m_state = State.EXEC; | |
2769 } | |
2770 | |
2771 | |
2772 //////////////////////////////////////////////////////////////////////////// | |
2773 // Fiber Accessors | |
2774 //////////////////////////////////////////////////////////////////////////// | |
2775 | |
2776 | |
2777 /** | |
2778 * Provides a reference to the calling fiber or null if no fiber is | |
2779 * currently active. | |
2780 * | |
2781 * Returns: | |
2782 * The fiber object representing the calling fiber or null if no fiber | |
2783 * is currently active. The result of deleting this object is undefined. | |
2784 */ | |
2785 static Fiber getThis() | |
2786 { | |
2787 version( Win32 ) | |
2788 { | |
2789 return cast(Fiber) TlsGetValue( sm_this ); | |
2790 } | |
2791 else version( Posix ) | |
2792 { | |
2793 return cast(Fiber) pthread_getspecific( sm_this ); | |
2794 } | |
2795 } | |
2796 | |
2797 | |
2798 //////////////////////////////////////////////////////////////////////////// | |
2799 // Static Initialization | |
2800 //////////////////////////////////////////////////////////////////////////// | |
2801 | |
2802 | |
2803 static this() | |
2804 { | |
2805 version( Win32 ) | |
2806 { | |
2807 sm_this = TlsAlloc(); | |
2808 assert( sm_this != TLS_OUT_OF_INDEXES ); | |
2809 } | |
2810 else version( Posix ) | |
2811 { | |
2812 int status; | |
2813 | |
2814 status = pthread_key_create( &sm_this, null ); | |
2815 assert( status == 0 ); | |
2816 | |
2817 static if( is( typeof( ucontext_t ) ) ) | |
2818 { | |
2819 status = getcontext( &sm_utxt ); | |
2820 assert( status == 0 ); | |
2821 } | |
2822 } | |
2823 } | |
2824 | |
2825 | |
2826 private: | |
2827 // | |
2828 // Initializes a fiber object which has no associated executable function. | |
2829 // | |
2830 this() | |
2831 { | |
2832 m_call = Call.NO; | |
2833 } | |
2834 | |
2835 | |
2836 // | |
2837 // Fiber entry point. Invokes the function or delegate passed on | |
2838 // construction (if any). | |
2839 // | |
2840 final void run() | |
2841 { | |
2842 switch( m_call ) | |
2843 { | |
2844 case Call.FN: | |
2845 m_fn(); | |
2846 break; | |
2847 case Call.DG: | |
2848 m_dg(); | |
2849 break; | |
2850 default: | |
2851 break; | |
2852 } | |
2853 } | |
2854 | |
2855 | |
2856 private: | |
2857 // | |
2858 // The type of routine passed on fiber construction. | |
2859 // | |
2860 enum Call | |
2861 { | |
2862 NO, | |
2863 FN, | |
2864 DG | |
2865 } | |
2866 | |
2867 | |
2868 // | |
2869 // Standard fiber data | |
2870 // | |
2871 Call m_call; | |
2872 union | |
2873 { | |
2874 void function() m_fn; | |
2875 void delegate() m_dg; | |
2876 } | |
2877 bool m_isRunning; | |
2878 Object m_unhandled; | |
2879 State m_state; | |
2880 | |
2881 | |
2882 private: | |
2883 //////////////////////////////////////////////////////////////////////////// | |
2884 // Stack Management | |
2885 //////////////////////////////////////////////////////////////////////////// | |
2886 | |
2887 | |
2888 // | |
2889 // Allocate a new stack for this fiber. | |
2890 // | |
2891 final void allocStack( size_t sz ) | |
2892 in | |
2893 { | |
2894 assert( !m_pmem && !m_ctxt ); | |
2895 } | |
2896 body | |
2897 { | |
2898 // adjust alloc size to a multiple of PAGESIZE | |
2899 sz += PAGESIZE - 1; | |
2900 sz -= sz % PAGESIZE; | |
2901 | |
2902 // NOTE: This instance of Thread.Context is dynamic so Fiber objects | |
2903 // can be collected by the GC so long as no user level references | |
2904 // to the object exist. If m_ctxt were not dynamic then its | |
2905 // presence in the global context list would be enough to keep | |
2906 // this object alive indefinitely. An alternative to allocating | |
2907 // room for this struct explicitly would be to mash it into the | |
2908 // base of the stack being allocated below. However, doing so | |
2909 // requires too much special logic to be worthwhile. | |
2910 m_ctxt = new Thread.Context; | |
2911 | |
2912 static if( is( typeof( VirtualAlloc ) ) ) | |
2913 { | |
2914 // reserve memory for stack | |
2915 m_pmem = VirtualAlloc( null, | |
2916 sz + PAGESIZE, | |
2917 MEM_RESERVE, | |
2918 PAGE_NOACCESS ); | |
2919 if( !m_pmem ) | |
2920 { | |
2921 throw new FiberException( "Unable to reserve memory for stack" ); | |
2922 } | |
2923 | |
2924 version( StackGrowsDown ) | |
2925 { | |
2926 void* stack = m_pmem + PAGESIZE; | |
2927 void* guard = m_pmem; | |
2928 void* pbase = stack + sz; | |
2929 } | |
2930 else | |
2931 { | |
2932 void* stack = m_pmem; | |
2933 void* guard = m_pmem + sz; | |
2934 void* pbase = stack; | |
2935 } | |
2936 | |
2937 // allocate reserved stack segment | |
2938 stack = VirtualAlloc( stack, | |
2939 sz, | |
2940 MEM_COMMIT, | |
2941 PAGE_READWRITE ); | |
2942 if( !stack ) | |
2943 { | |
2944 throw new FiberException( "Unable to allocate memory for stack" ); | |
2945 } | |
2946 | |
2947 // allocate reserved guard page | |
2948 guard = VirtualAlloc( guard, | |
2949 PAGESIZE, | |
2950 MEM_COMMIT, | |
2951 PAGE_READWRITE | PAGE_GUARD ); | |
2952 if( !guard ) | |
2953 { | |
2954 throw new FiberException( "Unable to create guard page for stack" ); | |
2955 } | |
2956 | |
2957 m_ctxt.bstack = pbase; | |
2958 m_ctxt.tstack = pbase; | |
2959 m_size = sz; | |
2960 } | |
2961 else | |
2962 { static if( is( typeof( mmap ) ) ) | |
2963 { | |
2964 m_pmem = mmap( null, | |
2965 sz, | |
2966 PROT_READ | PROT_WRITE, | |
2967 MAP_PRIVATE | MAP_ANON, | |
2968 -1, | |
2969 0 ); | |
2970 if( m_pmem == MAP_FAILED ) | |
2971 m_pmem = null; | |
2972 } | |
2973 else static if( is( typeof( valloc ) ) ) | |
2974 { | |
2975 m_pmem = valloc( sz ); | |
2976 } | |
2977 else static if( is( typeof( malloc ) ) ) | |
2978 { | |
2979 m_pmem = malloc( sz ); | |
2980 } | |
2981 else | |
2982 { | |
2983 m_pmem = null; | |
2984 } | |
2985 | |
2986 if( !m_pmem ) | |
2987 { | |
2988 throw new FiberException( "Unable to allocate memory for stack" ); | |
2989 } | |
2990 | |
2991 version( StackGrowsDown ) | |
2992 { | |
2993 m_ctxt.bstack = m_pmem + sz; | |
2994 m_ctxt.tstack = m_pmem + sz; | |
2995 } | |
2996 else | |
2997 { | |
2998 m_ctxt.bstack = m_pmem; | |
2999 m_ctxt.tstack = m_pmem; | |
3000 } | |
3001 m_size = sz; | |
3002 } | |
3003 | |
3004 Thread.add( m_ctxt ); | |
3005 } | |
3006 | |
3007 | |
3008 // | |
3009 // Free this fiber's stack. | |
3010 // | |
3011 final void freeStack() | |
3012 in | |
3013 { | |
3014 assert( m_pmem && m_ctxt ); | |
3015 } | |
3016 body | |
3017 { | |
3018 // NOTE: Since this routine is only ever expected to be called from | |
3019 // the dtor, pointers to freed data are not set to null. | |
3020 | |
3021 // NOTE: m_ctxt is guaranteed to be alive because it is held in the | |
3022 // global context list. | |
3023 Thread.remove( m_ctxt ); | |
3024 | |
3025 static if( is( typeof( VirtualAlloc ) ) ) | |
3026 { | |
3027 VirtualFree( m_pmem, 0, MEM_RELEASE ); | |
3028 } | |
3029 else static if( is( typeof( mmap ) ) ) | |
3030 { | |
3031 munmap( m_pmem, m_size ); | |
3032 } | |
3033 else static if( is( typeof( valloc ) ) ) | |
3034 { | |
3035 free( m_pmem ); | |
3036 } | |
3037 else static if( is( typeof( malloc ) ) ) | |
3038 { | |
3039 free( m_pmem ); | |
3040 } | |
3041 delete m_ctxt; | |
3042 } | |
3043 | |
3044 | |
3045 // | |
3046 // Initialize the allocated stack. | |
3047 // | |
3048 final void initStack() | |
3049 in | |
3050 { | |
3051 assert( m_ctxt.tstack && m_ctxt.tstack == m_ctxt.bstack ); | |
3052 assert( cast(size_t) m_ctxt.bstack % (void*).sizeof == 0 ); | |
3053 } | |
3054 body | |
3055 { | |
3056 void* pstack = m_ctxt.tstack; | |
3057 scope( exit ) m_ctxt.tstack = pstack; | |
3058 | |
3059 void push( size_t val ) | |
3060 { | |
3061 version( StackGrowsDown ) | |
3062 { | |
3063 pstack -= size_t.sizeof; | |
3064 *(cast(size_t*) pstack) = val; | |
3065 } | |
3066 else | |
3067 { | |
3068 pstack += size_t.sizeof; | |
3069 *(cast(size_t*) pstack) = val; | |
3070 } | |
3071 } | |
3072 | |
3073 // NOTE: On OS X the stack must be 16-byte aligned according to the | |
3074 // IA-32 call spec. | |
3075 version( darwin ) | |
3076 { | |
3077 pstack = cast(void*)(cast(uint)(pstack) - (cast(uint)(pstack) & 0x0F)); | |
3078 } | |
3079 | |
3080 version( AsmX86_Win32 ) | |
3081 { | |
3082 push( cast(size_t) &fiber_entryPoint ); // EIP | |
3083 push( 0xFFFFFFFF ); // EBP | |
3084 push( 0x00000000 ); // EAX | |
3085 push( 0xFFFFFFFF ); // FS:[0] | |
3086 // BUG: Are the frame pointers the same for both versions? | |
3087 version( StackGrowsDown ) | |
3088 { | |
3089 push( cast(size_t) m_ctxt.bstack ); // FS:[4] | |
3090 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8] | |
3091 } | |
3092 else | |
3093 { | |
3094 push( cast(size_t) m_ctxt.bstack ); // FS:[4] | |
3095 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8] | |
3096 } | |
3097 push( 0x00000000 ); // EBX | |
3098 push( 0x00000000 ); // ESI | |
3099 push( 0x00000000 ); // EDI | |
3100 } | |
3101 else version( AsmX86_Posix ) | |
3102 { | |
3103 push( cast(size_t) &fiber_entryPoint ); // EIP | |
3104 push( 0x00000000 ); // EBP | |
3105 push( 0x00000000 ); // EAX | |
3106 push( 0x00000000 ); // EBX | |
3107 push( 0x00000000 ); // ESI | |
3108 push( 0x00000000 ); // EDI | |
3109 } | |
3110 else version( AsmPPC_Posix ) | |
3111 { | |
3112 version( StackGrowsDown ) | |
3113 { | |
3114 pstack -= int.sizeof * 5; | |
3115 } | |
3116 else | |
3117 { | |
3118 pstack += int.sizeof * 5; | |
3119 } | |
3120 | |
3121 push( cast(size_t) &fiber_entryPoint ); // link register | |
3122 push( 0x00000000 ); // control register | |
3123 push( 0x00000000 ); // old stack pointer | |
3124 | |
3125 // GPR values | |
3126 version( StackGrowsDown ) | |
3127 { | |
3128 pstack -= int.sizeof * 20; | |
3129 } | |
3130 else | |
3131 { | |
3132 pstack += int.sizeof * 20; | |
3133 } | |
3134 | |
3135 assert( cast(uint) pstack & 0x0f == 0 ); | |
3136 } | |
3137 else static if( is( typeof( ucontext_t ) ) ) | |
3138 { | |
3139 getcontext( &m_utxt ); | |
3140 m_utxt.uc_stack.ss_sp = m_ctxt.bstack; | |
3141 m_utxt.uc_stack.ss_size = m_size; | |
3142 makecontext( &m_utxt, &fiber_entryPoint, 0 ); | |
3143 // NOTE: If ucontext is being used then the top of the stack will | |
3144 // be a pointer to the ucontext_t struct for that fiber. | |
3145 push( cast(size_t) &m_utxt ); | |
3146 } | |
3147 } | |
3148 | |
3149 | |
3150 Thread.Context* m_ctxt; | |
3151 size_t m_size; | |
3152 void* m_pmem; | |
3153 | |
3154 static if( is( typeof( ucontext_t ) ) ) | |
3155 { | |
3156 // NOTE: The static ucontext instance is used to represent the context | |
3157 // of the main application thread. | |
3158 static ucontext_t sm_utxt = void; | |
3159 ucontext_t m_utxt = void; | |
3160 ucontext_t* m_ucur = null; | |
3161 } | |
3162 | |
3163 | |
3164 private: | |
3165 //////////////////////////////////////////////////////////////////////////// | |
3166 // Storage of Active Fiber | |
3167 //////////////////////////////////////////////////////////////////////////// | |
3168 | |
3169 | |
3170 // | |
3171 // Sets a thread-local reference to the current fiber object. | |
3172 // | |
3173 static void setThis( Fiber f ) | |
3174 { | |
3175 version( Win32 ) | |
3176 { | |
3177 TlsSetValue( sm_this, cast(void*) f ); | |
3178 } | |
3179 else version( Posix ) | |
3180 { | |
3181 pthread_setspecific( sm_this, cast(void*) f ); | |
3182 } | |
3183 } | |
3184 | |
3185 | |
3186 static Thread.TLSKey sm_this; | |
3187 | |
3188 | |
3189 private: | |
3190 //////////////////////////////////////////////////////////////////////////// | |
3191 // Context Switching | |
3192 //////////////////////////////////////////////////////////////////////////// | |
3193 | |
3194 | |
3195 // | |
3196 // Switches into the stack held by this fiber. | |
3197 // | |
3198 final void switchIn() | |
3199 { | |
3200 Thread tobj = Thread.getThis(); | |
3201 void** oldp = &tobj.m_curr.tstack; | |
3202 void* newp = m_ctxt.tstack; | |
3203 | |
3204 // NOTE: The order of operations here is very important. The current | |
3205 // stack top must be stored before m_lock is set, and pushContext | |
3206 // must not be called until after m_lock is set. This process | |
3207 // is intended to prevent a race condition with the suspend | |
3208 // mechanism used for garbage collection. If it is not followed, | |
3209 // a badly timed collection could cause the GC to scan from the | |
3210 // bottom of one stack to the top of another, or to miss scanning | |
3211 // a stack that still contains valid data. The old stack pointer | |
3212 // oldp will be set again before the context switch to guarantee | |
3213 // that it points to exactly the correct stack location so the | |
3214 // successive pop operations will succeed. | |
3215 *oldp = getStackTop(); | |
3216 volatile tobj.m_lock = true; | |
3217 tobj.pushContext( m_ctxt ); | |
3218 | |
3219 fiber_switchContext( oldp, newp ); | |
3220 | |
3221 // NOTE: As above, these operations must be performed in a strict order | |
3222 // to prevent Bad Things from happening. | |
3223 tobj.popContext(); | |
3224 volatile tobj.m_lock = false; | |
3225 tobj.m_curr.tstack = tobj.m_curr.bstack; | |
3226 } | |
3227 | |
3228 | |
3229 // | |
3230 // Switches out of the current stack and into the enclosing stack. | |
3231 // | |
3232 final void switchOut() | |
3233 { | |
3234 Thread tobj = Thread.getThis(); | |
3235 void** oldp = &m_ctxt.tstack; | |
3236 void* newp = tobj.m_curr.within.tstack; | |
3237 | |
3238 // NOTE: The order of operations here is very important. The current | |
3239 // stack top must be stored before m_lock is set, and pushContext | |
3240 // must not be called until after m_lock is set. This process | |
3241 // is intended to prevent a race condition with the suspend | |
3242 // mechanism used for garbage collection. If it is not followed, | |
3243 // a badly timed collection could cause the GC to scan from the | |
3244 // bottom of one stack to the top of another, or to miss scanning | |
3245 // a stack that still contains valid data. The old stack pointer | |
3246 // oldp will be set again before the context switch to guarantee | |
3247 // that it points to exactly the correct stack location so the | |
3248 // successive pop operations will succeed. | |
3249 *oldp = getStackTop(); | |
3250 volatile tobj.m_lock = true; | |
3251 | |
3252 fiber_switchContext( oldp, newp ); | |
3253 | |
3254 // NOTE: As above, these operations must be performed in a strict order | |
3255 // to prevent Bad Things from happening. | |
3256 volatile tobj.m_lock = false; | |
3257 tobj.m_curr.tstack = tobj.m_curr.bstack; | |
3258 } | |
3259 } |