comparison druntime/src/common/core/thread.d @ 1458:e0b2d67cfe7c

Added druntime (this should be removed once it works).
author Robert Clipsham <robert@octarineparrot.com>
date Tue, 02 Jun 2009 17:43:06 +0100
parents
children
comparison
equal deleted inserted replaced
1456:7b218ec1044f 1458:e0b2d67cfe7c
1 /**
2 * The thread module provides support for thread creation and management.
3 *
4 * Copyright: Copyright Sean Kelly 2005 - 2009.
5 * License: <a href="http://www.boost.org/LICENSE_1_0.txt>Boost License 1.0</a>.
6 * Authors: Sean Kelly
7 *
8 * Copyright Sean Kelly 2005 - 2009.
9 * Distributed under the Boost Software License, Version 1.0.
10 * (See accompanying file LICENSE_1_0.txt or copy at
11 * http://www.boost.org/LICENSE_1_0.txt)
12 */
13 module core.thread;
14
15
16 // this should be true for most architectures
17 version = StackGrowsDown;
18
19
20 ///////////////////////////////////////////////////////////////////////////////
21 // Thread and Fiber Exceptions
22 ///////////////////////////////////////////////////////////////////////////////
23
24
25 /**
26 * Base class for thread exceptions.
27 */
28 class ThreadException : Exception
29 {
30 this( string msg )
31 {
32 super( msg );
33 }
34 }
35
36
37 /**
38 * Base class for fiber exceptions.
39 */
40 class FiberException : Exception
41 {
42 this( string msg )
43 {
44 super( msg );
45 }
46 }
47
48
49 private
50 {
51 //
52 // exposed by compiler runtime
53 //
54 extern (C) void* rt_stackBottom();
55 extern (C) void* rt_stackTop();
56
57
58 void* getStackBottom()
59 {
60 return rt_stackBottom();
61 }
62
63
64 void* getStackTop()
65 {
66 version( D_InlineAsm_X86 )
67 {
68 asm
69 {
70 naked;
71 mov EAX, ESP;
72 ret;
73 }
74 }
75 else
76 {
77 return rt_stackTop();
78 }
79 }
80 }
81
82
83 ///////////////////////////////////////////////////////////////////////////////
84 // Thread Entry Point and Signal Handlers
85 ///////////////////////////////////////////////////////////////////////////////
86
87
88 version( Windows )
89 {
90 private
91 {
92 import core.stdc.stdint : uintptr_t; // for _beginthreadex decl below
93 import core.sys.windows.windows;
94
95 const DWORD TLS_OUT_OF_INDEXES = 0xFFFFFFFF;
96
97 extern (Windows) alias uint function(void*) btex_fptr;
98 extern (C) uintptr_t _beginthreadex(void*, uint, btex_fptr, void*, uint, uint*);
99
100 version( DigitalMars )
101 {
102 // NOTE: The memory between the addresses of _tlsstart and _tlsend
103 // is the storage for thread-local data in D 2.0. Both of
104 // these are defined in dm\src\win32\tlsseg.asm by DMC.
105 extern (C)
106 {
107 extern __thread int _tlsstart;
108 extern __thread int _tlsend;
109 }
110 }
111 else
112 {
113 __gshared int _tlsstart;
114 alias _tlsstart _tlsend;
115 }
116
117
118 //
119 // entry point for Windows threads
120 //
121 extern (Windows) uint thread_entryPoint( void* arg )
122 {
123 Thread obj = cast(Thread) arg;
124 assert( obj );
125 scope( exit ) Thread.remove( obj );
126
127 assert( obj.m_curr is &obj.m_main );
128 obj.m_main.bstack = getStackBottom();
129 obj.m_main.tstack = obj.m_main.bstack;
130 Thread.add( &obj.m_main );
131 Thread.setThis( obj );
132
133 void* pstart = cast(void*) &_tlsstart;
134 void* pend = cast(void*) &_tlsend;
135 obj.m_tls = pstart[0 .. pend - pstart];
136
137 // NOTE: No GC allocations may occur until the stack pointers have
138 // been set and Thread.getThis returns a valid reference to
139 // this thread object (this latter condition is not strictly
140 // necessary on Windows but it should be followed for the
141 // sake of consistency).
142
143 // TODO: Consider putting an auto exception object here (using
144 // alloca) forOutOfMemoryError plus something to track
145 // whether an exception is in-flight?
146
147 try
148 {
149 obj.run();
150 }
151 catch( Object o )
152 {
153 obj.m_unhandled = o;
154 }
155 return 0;
156 }
157
158
159 //
160 // copy of the same-named function in phobos.std.thread--it uses the
161 // Windows naming convention to be consistent with GetCurrentThreadId
162 //
163 HANDLE GetCurrentThreadHandle()
164 {
165 const uint DUPLICATE_SAME_ACCESS = 0x00000002;
166
167 HANDLE curr = GetCurrentThread(),
168 proc = GetCurrentProcess(),
169 hndl;
170
171 DuplicateHandle( proc, curr, proc, &hndl, 0, TRUE, DUPLICATE_SAME_ACCESS );
172 return hndl;
173 }
174 }
175 }
176 else version( Posix )
177 {
178 private
179 {
180 import core.sys.posix.semaphore;
181 import core.sys.posix.pthread;
182 import core.sys.posix.signal;
183 import core.sys.posix.time;
184 import core.stdc.errno;
185
186 extern (C) int getErrno();
187
188 version( OSX )
189 {
190 import core.sys.osx.mach.thread_act;
191 extern (C) mach_port_t pthread_mach_thread_np(pthread_t);
192 }
193
194 version( GNU )
195 {
196 import gcc.builtins;
197 }
198
199 version( DigitalMars )
200 {
201 version( linux )
202 {
203 extern (C)
204 {
205 extern __thread int _tlsstart;
206 extern __thread int _tlsend;
207 }
208 }
209 else
210 {
211 __gshared int _tlsstart;
212 alias _tlsstart _tlsend;
213 }
214 }
215 else
216 {
217 __gshared int _tlsstart;
218 alias _tlsstart _tlsend;
219 }
220
221
222 //
223 // entry point for POSIX threads
224 //
225 extern (C) void* thread_entryPoint( void* arg )
226 {
227 Thread obj = cast(Thread) arg;
228 assert( obj );
229 scope( exit )
230 {
231 // NOTE: isRunning should be set to false after the thread is
232 // removed or a double-removal could occur between this
233 // function and thread_suspendAll.
234 Thread.remove( obj );
235 obj.m_isRunning = false;
236 }
237
238 static extern (C) void thread_cleanupHandler( void* arg )
239 {
240 Thread obj = cast(Thread) arg;
241 assert( obj );
242
243 // NOTE: If the thread terminated abnormally, just set it as
244 // not running and let thread_suspendAll remove it from
245 // the thread list. This is safer and is consistent
246 // with the Windows thread code.
247 obj.m_isRunning = false;
248 }
249
250 // NOTE: Using void to skip the initialization here relies on
251 // knowledge of how pthread_cleanup is implemented. It may
252 // not be appropriate for all platforms. However, it does
253 // avoid the need to link the pthread module. If any
254 // implementation actually requires default initialization
255 // then pthread_cleanup should be restructured to maintain
256 // the current lack of a link dependency.
257 version( linux )
258 {
259 pthread_cleanup cleanup = void;
260 cleanup.push( &thread_cleanupHandler, cast(void*) obj );
261 }
262 else version( OSX )
263 {
264 pthread_cleanup cleanup = void;
265 cleanup.push( &thread_cleanupHandler, cast(void*) obj );
266 }
267 else
268 {
269 pthread_cleanup_push( &thread_cleanupHandler, cast(void*) obj );
270 }
271
272 // NOTE: For some reason this does not always work for threads.
273 //obj.m_main.bstack = getStackBottom();
274 version( D_InlineAsm_X86 )
275 {
276 static void* getBasePtr()
277 {
278 asm
279 {
280 naked;
281 mov EAX, EBP;
282 ret;
283 }
284 }
285
286 obj.m_main.bstack = getBasePtr();
287 }
288 else version( StackGrowsDown )
289 obj.m_main.bstack = &obj + 1;
290 else
291 obj.m_main.bstack = &obj;
292 obj.m_main.tstack = obj.m_main.bstack;
293 assert( obj.m_curr == &obj.m_main );
294 Thread.add( &obj.m_main );
295 Thread.setThis( obj );
296
297 void* pstart = cast(void*) &_tlsstart;
298 void* pend = cast(void*) &_tlsend;
299 obj.m_tls = pstart[0 .. pend - pstart];
300
301 // NOTE: No GC allocations may occur until the stack pointers have
302 // been set and Thread.getThis returns a valid reference to
303 // this thread object (this latter condition is not strictly
304 // necessary on Windows but it should be followed for the
305 // sake of consistency).
306
307 // TODO: Consider putting an auto exception object here (using
308 // alloca) forOutOfMemoryError plus something to track
309 // whether an exception is in-flight?
310
311 try
312 {
313 obj.run();
314 }
315 catch( Object o )
316 {
317 obj.m_unhandled = o;
318 }
319 return null;
320 }
321
322
323 //
324 // used to track the number of suspended threads
325 //
326 __gshared sem_t suspendCount;
327
328
329 extern (C) void thread_suspendHandler( int sig )
330 in
331 {
332 assert( sig == SIGUSR1 );
333 }
334 body
335 {
336 version( LDC)
337 {
338 version(X86)
339 {
340 uint eax,ecx,edx,ebx,ebp,esi,edi;
341 asm
342 {
343 mov eax[EBP], EAX ;
344 mov ecx[EBP], ECX ;
345 mov edx[EBP], EDX ;
346 mov ebx[EBP], EBX ;
347 mov ebp[EBP], EBP ;
348 mov esi[EBP], ESI ;
349 mov edi[EBP], EDI ;
350 }
351 }
352 else version (X86_64)
353 {
354 ulong rax,rbx,rcx,rdx,rbp,rsi,rdi,rsp,r8,r9,r10,r11,r12,r13,r14,r15;
355 asm
356 {
357 movq rax[RBP], RAX ;
358 movq rbx[RBP], RBX ;
359 movq rcx[RBP], RCX ;
360 movq rdx[RBP], RDX ;
361 movq rbp[RBP], RBP ;
362 movq rsi[RBP], RSI ;
363 movq rdi[RBP], RDI ;
364 movq rsp[RBP], RSP ;
365 movq r8[RBP], R8 ;
366 movq r9[RBP], R9 ;
367 movq r10[RBP], R10 ;
368 movq r11[RBP], R11 ;
369 movq r12[RBP], R12 ;
370 movq r13[RBP], R13 ;
371 movq r14[RBP], R14 ;
372 movq r15[RBP], R15 ;
373 }
374 }
375 else
376 {
377 static assert( false, "Architecture not supported." );
378 }
379 } else version( D_InlineAsm_X86 )
380 {
381 asm
382 {
383 pushad;
384 }
385 }
386 else version( GNU )
387 {
388 __builtin_unwind_init();
389 }
390 else
391 {
392 static assert( false, "Architecture not supported." );
393 }
394
395 // NOTE: Since registers are being pushed and popped from the
396 // stack, any other stack data used by this function should
397 // be gone before the stack cleanup code is called below.
398 {
399 Thread obj = Thread.getThis();
400
401 // NOTE: The thread reference returned by getThis is set within
402 // the thread startup code, so it is possible that this
403 // handler may be called before the reference is set. In
404 // this case it is safe to simply suspend and not worry
405 // about the stack pointers as the thread will not have
406 // any references to GC-managed data.
407 if( obj && !obj.m_lock )
408 {
409 obj.m_curr.tstack = getStackTop();
410 }
411
412 sigset_t sigres = void;
413 int status;
414
415 status = sigfillset( &sigres );
416 assert( status == 0 );
417
418 status = sigdelset( &sigres, SIGUSR2 );
419 assert( status == 0 );
420
421 status = sem_post( &suspendCount );
422 assert( status == 0 );
423
424 sigsuspend( &sigres );
425
426 if( obj && !obj.m_lock )
427 {
428 obj.m_curr.tstack = obj.m_curr.bstack;
429 }
430 }
431 version( LDC ){}
432 else version( D_InlineAsm_X86 )
433 {
434 asm
435 {
436 popad;
437 }
438 }
439 else version( GNU )
440 {
441 // registers will be popped automatically
442 }
443 else
444 {
445 static assert( false, "Architecture not supported." );
446 }
447 }
448
449
450 extern (C) void thread_resumeHandler( int sig )
451 in
452 {
453 assert( sig == SIGUSR2 );
454 }
455 body
456 {
457
458 }
459 }
460 }
461 else
462 {
463 // NOTE: This is the only place threading versions are checked. If a new
464 // version is added, the module code will need to be searched for
465 // places where version-specific code may be required. This can be
466 // easily accomlished by searching for 'Windows' or 'Posix'.
467 static assert( false, "Unknown threading implementation." );
468 }
469
470
471 ///////////////////////////////////////////////////////////////////////////////
472 // Thread
473 ///////////////////////////////////////////////////////////////////////////////
474
475
476 /**
477 * This class encapsulates all threading functionality for the D
478 * programming language. As thread manipulation is a required facility
479 * for garbage collection, all user threads should derive from this
480 * class, and instances of this class should never be explicitly deleted.
481 * A new thread may be created using either derivation or composition, as
482 * in the following example.
483 *
484 * Example:
485 * ----------------------------------------------------------------------------
486 *
487 * class DerivedThread : Thread
488 * {
489 * this()
490 * {
491 * super( &run );
492 * }
493 *
494 * private :
495 * void run()
496 * {
497 * printf( "Derived thread running.\n" );
498 * }
499 * }
500 *
501 * void threadFunc()
502 * {
503 * printf( "Composed thread running.\n" );
504 * }
505 *
506 * // create instances of each type
507 * Thread derived = new DerivedThread();
508 * Thread composed = new Thread( &threadFunc );
509 *
510 * // start both threads
511 * derived.start();
512 * composed.start();
513 *
514 * ----------------------------------------------------------------------------
515 */
516 class Thread
517 {
518 ///////////////////////////////////////////////////////////////////////////
519 // Initialization
520 ///////////////////////////////////////////////////////////////////////////
521
522
523 /**
524 * Initializes a thread object which is associated with a static
525 * D function.
526 *
527 * Params:
528 * fn = The thread function.
529 * sz = The stack size for this thread.
530 *
531 * In:
532 * fn must not be null.
533 */
534 this( void function() fn, size_t sz = 0 )
535 in
536 {
537 assert( fn );
538 }
539 body
540 {
541 m_fn = fn;
542 m_sz = sz;
543 m_call = Call.FN;
544 m_curr = &m_main;
545 }
546
547
548 /**
549 * Initializes a thread object which is associated with a dynamic
550 * D function.
551 *
552 * Params:
553 * dg = The thread function.
554 * sz = The stack size for this thread.
555 *
556 * In:
557 * dg must not be null.
558 */
559 this( void delegate() dg, size_t sz = 0 )
560 in
561 {
562 assert( dg );
563 }
564 body
565 {
566 m_dg = dg;
567 m_sz = sz;
568 m_call = Call.DG;
569 m_curr = &m_main;
570 }
571
572
573 /**
574 * Cleans up any remaining resources used by this object.
575 */
576 ~this()
577 {
578 if( m_addr == m_addr.init )
579 {
580 return;
581 }
582
583 version( Windows )
584 {
585 m_addr = m_addr.init;
586 CloseHandle( m_hndl );
587 m_hndl = m_hndl.init;
588 }
589 else version( Posix )
590 {
591 pthread_detach( m_addr );
592 m_addr = m_addr.init;
593 }
594 version( OSX )
595 {
596 m_tmach = m_tmach.init;
597 }
598 }
599
600
601 ///////////////////////////////////////////////////////////////////////////
602 // General Actions
603 ///////////////////////////////////////////////////////////////////////////
604
605
606 /**
607 * Starts the thread and invokes the function or delegate passed upon
608 * construction.
609 *
610 * In:
611 * This routine may only be called once per thread instance.
612 *
613 * Throws:
614 * ThreadException if the thread fails to start.
615 */
616 final void start()
617 in
618 {
619 assert( !next && !prev );
620 }
621 body
622 {
623 auto wasThreaded = multiThreadedFlag;
624 multiThreadedFlag = true;
625 scope( failure )
626 {
627 if( !wasThreaded )
628 multiThreadedFlag = false;
629 }
630
631 version( Windows ) {} else
632 version( Posix )
633 {
634 pthread_attr_t attr;
635
636 if( pthread_attr_init( &attr ) )
637 throw new ThreadException( "Error initializing thread attributes" );
638 if( m_sz && pthread_attr_setstacksize( &attr, m_sz ) )
639 throw new ThreadException( "Error initializing thread stack size" );
640 if( pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE ) )
641 throw new ThreadException( "Error setting thread joinable" );
642 }
643
644 // NOTE: This operation needs to be synchronized to avoid a race
645 // condition with the GC. Without this lock, the thread
646 // could start and allocate memory before being added to
647 // the global thread list, preventing it from being scanned
648 // and causing memory to be collected that is still in use.
649 synchronized( slock )
650 {
651 version( Windows )
652 {
653 m_hndl = cast(HANDLE) _beginthreadex( null, m_sz, &thread_entryPoint, cast(void*) this, 0, &m_addr );
654 if( cast(size_t) m_hndl == 0 )
655 throw new ThreadException( "Error creating thread" );
656 }
657 else version( Posix )
658 {
659 m_isRunning = true;
660 scope( failure ) m_isRunning = false;
661
662 if( pthread_create( &m_addr, &attr, &thread_entryPoint, cast(void*) this ) != 0 )
663 throw new ThreadException( "Error creating thread" );
664 }
665 version( OSX )
666 {
667 m_tmach = pthread_mach_thread_np( m_addr );
668 if( m_tmach == m_tmach.init )
669 throw new ThreadException( "Error creating thread" );
670 }
671 add( this );
672 }
673 }
674
675
676 /**
677 * Waits for this thread to complete. If the thread terminated as the
678 * result of an unhandled exception, this exception will be rethrown.
679 *
680 * Params:
681 * rethrow = Rethrow any unhandled exception which may have caused this
682 * thread to terminate.
683 *
684 * Throws:
685 * ThreadException if the operation fails.
686 * Any exception not handled by the joined thread.
687 *
688 * Returns:
689 * Any exception not handled by this thread if rethrow = false, null
690 * otherwise.
691 */
692 final Object join( bool rethrow = true )
693 {
694 version( Windows )
695 {
696 if( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 )
697 throw new ThreadException( "Unable to join thread" );
698 // NOTE: m_addr must be cleared before m_hndl is closed to avoid
699 // a race condition with isRunning. The operation is labeled
700 // volatile to prevent compiler reordering.
701 volatile m_addr = m_addr.init;
702 CloseHandle( m_hndl );
703 m_hndl = m_hndl.init;
704 }
705 else version( Posix )
706 {
707 if( pthread_join( m_addr, null ) != 0 )
708 throw new ThreadException( "Unable to join thread" );
709 // NOTE: pthread_join acts as a substitute for pthread_detach,
710 // which is normally called by the dtor. Setting m_addr
711 // to zero ensures that pthread_detach will not be called
712 // on object destruction.
713 volatile m_addr = m_addr.init;
714 }
715 if( m_unhandled )
716 {
717 if( rethrow )
718 throw m_unhandled;
719 return m_unhandled;
720 }
721 return null;
722 }
723
724
725 ///////////////////////////////////////////////////////////////////////////
726 // General Properties
727 ///////////////////////////////////////////////////////////////////////////
728
729
730 /**
731 * Gets the user-readable label for this thread.
732 *
733 * Returns:
734 * The name of this thread.
735 */
736 final char[] name()
737 {
738 synchronized( this )
739 {
740 return m_name;
741 }
742 }
743
744
745 /**
746 * Sets the user-readable label for this thread.
747 *
748 * Params:
749 * val = The new name of this thread.
750 */
751 final void name( char[] val )
752 {
753 synchronized( this )
754 {
755 m_name = val.dup;
756 }
757 }
758
759
760 /**
761 * Gets the daemon status for this thread. While the runtime will wait for
762 * all normal threads to complete before tearing down the process, daemon
763 * threads are effectively ignored and thus will not prevent the process
764 * from terminating. In effect, daemon threads will be terminated
765 * automatically by the OS when the process exits.
766 *
767 * Returns:
768 * true if this is a daemon thread.
769 */
770 final bool isDaemon()
771 {
772 synchronized( this )
773 {
774 return m_isDaemon;
775 }
776 }
777
778
779 /**
780 * Sets the daemon status for this thread. While the runtime will wait for
781 * all normal threads to complete before tearing down the process, daemon
782 * threads are effectively ignored and thus will not prevent the process
783 * from terminating. In effect, daemon threads will be terminated
784 * automatically by the OS when the process exits.
785 *
786 * Params:
787 * val = The new daemon status for this thread.
788 */
789 final void isDaemon( bool val )
790 {
791 synchronized( this )
792 {
793 m_isDaemon = val;
794 }
795 }
796
797
798 /**
799 * Tests whether this thread is running.
800 *
801 * Returns:
802 * true if the thread is running, false if not.
803 */
804 final bool isRunning()
805 {
806 if( m_addr == m_addr.init )
807 {
808 return false;
809 }
810
811 version( Windows )
812 {
813 uint ecode = 0;
814 GetExitCodeThread( m_hndl, &ecode );
815 return ecode == STILL_ACTIVE;
816 }
817 else version( Posix )
818 {
819 // NOTE: It should be safe to access this value without
820 // memory barriers because word-tearing and such
821 // really isn't an issue for boolean values.
822 return m_isRunning;
823 }
824 }
825
826
827 ///////////////////////////////////////////////////////////////////////////
828 // Thread Priority Actions
829 ///////////////////////////////////////////////////////////////////////////
830
831
832 /**
833 * The minimum scheduling priority that may be set for a thread. On
834 * systems where multiple scheduling policies are defined, this value
835 * represents the minimum valid priority for the scheduling policy of
836 * the process.
837 */
838 static const int PRIORITY_MIN;
839
840
841 /**
842 * The maximum scheduling priority that may be set for a thread. On
843 * systems where multiple scheduling policies are defined, this value
844 * represents the minimum valid priority for the scheduling policy of
845 * the process.
846 */
847 static const int PRIORITY_MAX;
848
849
850 /**
851 * Gets the scheduling priority for the associated thread.
852 *
853 * Returns:
854 * The scheduling priority of this thread.
855 */
856 final int priority()
857 {
858 version( Windows )
859 {
860 return GetThreadPriority( m_hndl );
861 }
862 else version( Posix )
863 {
864 int policy;
865 sched_param param;
866
867 if( pthread_getschedparam( m_addr, &policy, &param ) )
868 throw new ThreadException( "Unable to get thread priority" );
869 return param.sched_priority;
870 }
871 }
872
873
874 /**
875 * Sets the scheduling priority for the associated thread.
876 *
877 * Params:
878 * val = The new scheduling priority of this thread.
879 */
880 final void priority( int val )
881 {
882 version( Windows )
883 {
884 if( !SetThreadPriority( m_hndl, val ) )
885 throw new ThreadException( "Unable to set thread priority" );
886 }
887 else version( Posix )
888 {
889 // NOTE: pthread_setschedprio is not implemented on linux, so use
890 // the more complicated get/set sequence below.
891 //if( pthread_setschedprio( m_addr, val ) )
892 // throw new ThreadException( "Unable to set thread priority" );
893
894 int policy;
895 sched_param param;
896
897 if( pthread_getschedparam( m_addr, &policy, &param ) )
898 throw new ThreadException( "Unable to set thread priority" );
899 param.sched_priority = val;
900 if( pthread_setschedparam( m_addr, policy, &param ) )
901 throw new ThreadException( "Unable to set thread priority" );
902 }
903 }
904
905
906 ///////////////////////////////////////////////////////////////////////////
907 // Actions on Calling Thread
908 ///////////////////////////////////////////////////////////////////////////
909
910
911 /**
912 * Suspends the calling thread for at least the supplied period. This may
913 * result in multiple OS calls if period is greater than the maximum sleep
914 * duration supported by the operating system.
915 *
916 * Params:
917 * period = The minimum duration the calling thread should be suspended,
918 * in 100 nanosecond intervals.
919 *
920 * In:
921 * period must be non-negative.
922 *
923 * Example:
924 * ------------------------------------------------------------------------
925 *
926 * Thread.sleep( 500_000 ); // sleep for 50 milliseconds
927 * Thread.sleep( 50_000_000 ); // sleep for 5 seconds
928 *
929 * ------------------------------------------------------------------------
930 */
931 static void sleep( long period )
932 in
933 {
934 assert( period >= 0 );
935 }
936 body
937 {
938 version( Windows )
939 {
940 enum : uint
941 {
942 TICKS_PER_MILLI = 10_000,
943 MAX_SLEEP_MILLIS = uint.max - 1
944 }
945
946 // NOTE: In instances where all other threads in the process have a
947 // lower priority than the current thread, the current thread
948 // will not yield with a sleep time of zero. However, unlike
949 // yield(), the user is not asking for a yield to occur but
950 // only for execution to suspend for the requested interval.
951 // Therefore, expected performance may not be met if a yield
952 // is forced upon the user.
953 period /= TICKS_PER_MILLI;
954 while( period > MAX_SLEEP_MILLIS )
955 {
956 Sleep( MAX_SLEEP_MILLIS );
957 period -= MAX_SLEEP_MILLIS;
958 }
959 Sleep( cast(uint) period );
960 }
961 else version( Posix )
962 {
963 timespec tin = void;
964 timespec tout = void;
965
966 enum : uint
967 {
968 NANOS_PER_TICK = 100,
969 TICKS_PER_SECOND = 10_000_000,
970 }
971 enum : typeof(period)
972 {
973 MAX_SLEEP_TICKS = cast(typeof(period)) tin.tv_sec.max * TICKS_PER_SECOND
974 }
975
976 do
977 {
978 if( period > MAX_SLEEP_TICKS )
979 {
980 tin.tv_sec = tin.tv_sec.max;
981 tin.tv_nsec = 0;
982 }
983 else
984 {
985 tin.tv_sec = cast(typeof(tin.tv_sec)) (period / TICKS_PER_SECOND);
986 tin.tv_nsec = cast(typeof(tin.tv_nsec)) (period % TICKS_PER_SECOND) * NANOS_PER_TICK;
987 }
988 while( true )
989 {
990 if( !nanosleep( &tin, &tout ) )
991 return;
992 if( getErrno() != EINTR )
993 throw new ThreadException( "Unable to sleep for the specified duration" );
994 tin = tout;
995 }
996 period -= (cast(typeof(period)) tin.tv_sec) * TICKS_PER_SECOND;
997 period -= (cast(typeof(period)) tin.tv_nsec) / NANOS_PER_TICK;
998 } while( period > 0 );
999 }
1000 }
1001
1002
1003 /**
1004 * Forces a context switch to occur away from the calling thread.
1005 */
1006 static void yield()
1007 {
1008 version( Windows )
1009 {
1010 // NOTE: Sleep(1) is necessary because Sleep(0) does not give
1011 // lower priority threads any timeslice, so looping on
1012 // Sleep(0) could be resource-intensive in some cases.
1013 Sleep( 1 );
1014 }
1015 else version( Posix )
1016 {
1017 sched_yield();
1018 }
1019 }
1020
1021
1022 ///////////////////////////////////////////////////////////////////////////
1023 // Thread Accessors
1024 ///////////////////////////////////////////////////////////////////////////
1025
1026
1027 /**
1028 * Provides a reference to the calling thread.
1029 *
1030 * Returns:
1031 * The thread object representing the calling thread. The result of
1032 * deleting this object is undefined.
1033 */
1034 static Thread getThis()
1035 {
1036 // NOTE: This function may not be called until thread_init has
1037 // completed. See thread_suspendAll for more information
1038 // on why this might occur.
1039 version( Windows )
1040 {
1041 return cast(Thread) TlsGetValue( sm_this );
1042 }
1043 else version( Posix )
1044 {
1045 return cast(Thread) pthread_getspecific( sm_this );
1046 }
1047 }
1048
1049
1050 /**
1051 * Provides a list of all threads currently being tracked by the system.
1052 *
1053 * Returns:
1054 * An array containing references to all threads currently being
1055 * tracked by the system. The result of deleting any contained
1056 * objects is undefined.
1057 */
1058 static Thread[] getAll()
1059 {
1060 synchronized( slock )
1061 {
1062 size_t pos = 0;
1063 Thread[] buf = new Thread[sm_tlen];
1064
1065 foreach( Thread t; Thread )
1066 {
1067 buf[pos++] = t;
1068 }
1069 return buf;
1070 }
1071 }
1072
1073
1074 /**
1075 * Operates on all threads currently being tracked by the system. The
1076 * result of deleting any Thread object is undefined.
1077 *
1078 * Params:
1079 * dg = The supplied code as a delegate.
1080 *
1081 * Returns:
1082 * Zero if all elemented are visited, nonzero if not.
1083 */
1084 static int opApply( int delegate( inout Thread ) dg )
1085 {
1086 synchronized( slock )
1087 {
1088 int ret = 0;
1089
1090 for( Thread t = sm_tbeg; t; t = t.next )
1091 {
1092 ret = dg( t );
1093 if( ret )
1094 break;
1095 }
1096 return ret;
1097 }
1098 }
1099
1100
1101 ///////////////////////////////////////////////////////////////////////////
1102 // Local Storage Actions
1103 ///////////////////////////////////////////////////////////////////////////
1104
1105
1106 /**
1107 * Indicates the number of local storage pointers available at program
1108 * startup. It is recommended that this number be at least 64.
1109 */
1110 static const uint LOCAL_MAX = 64;
1111
1112
1113 /**
1114 * Reserves a local storage pointer for use and initializes this location
1115 * to null for all running threads.
1116 *
1117 * Returns:
1118 * A key representing the array offset of this memory location.
1119 */
1120 static uint createLocal()
1121 {
1122 synchronized( slock )
1123 {
1124 foreach( uint key, inout bool set; sm_local )
1125 {
1126 if( !set )
1127 {
1128 //foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139)
1129 for( Thread t = sm_tbeg; t; t = t.next )
1130 {
1131 t.m_local[key] = null;
1132 }
1133 set = true;
1134 return key;
1135 }
1136 }
1137 throw new ThreadException( "No more local storage slots available" );
1138 }
1139 }
1140
1141
1142 /**
1143 * Marks the supplied key as available and sets the associated location
1144 * to null for all running threads. It is assumed that any key passed
1145 * to this function is valid. The result of calling this function for
1146 * a key which is still in use is undefined.
1147 *
1148 * Params:
1149 * key = The key to delete.
1150 */
1151 static void deleteLocal( uint key )
1152 {
1153 synchronized( slock )
1154 {
1155 sm_local[key] = false;
1156 // foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139)
1157 for( Thread t = sm_tbeg; t; t = t.next )
1158 {
1159 t.m_local[key] = null;
1160 }
1161 }
1162 }
1163
1164
1165 /**
1166 * Loads the value stored at key within a thread-local static array. It is
1167 * assumed that any key passed to this function is valid.
1168 *
1169 * Params:
1170 * key = The location which holds the desired data.
1171 *
1172 * Returns:
1173 * The data associated with the supplied key.
1174 */
1175 static void* getLocal( uint key )
1176 {
1177 return getThis().m_local[key];
1178 }
1179
1180
1181 /**
1182 * Stores the supplied value at key within a thread-local static array. It
1183 * is assumed that any key passed to this function is valid.
1184 *
1185 * Params:
1186 * key = The location to store the supplied data.
1187 * val = The data to store.
1188 *
1189 * Returns:
1190 * A copy of the data which has just been stored.
1191 */
1192 static void* setLocal( uint key, void* val )
1193 {
1194 return getThis().m_local[key] = val;
1195 }
1196
1197
1198 ///////////////////////////////////////////////////////////////////////////
1199 // Static Initalizer
1200 ///////////////////////////////////////////////////////////////////////////
1201
1202
1203 /**
1204 * This initializer is used to set thread constants. All functional
1205 * initialization occurs within thread_init().
1206 */
1207 static this()
1208 {
1209 version( Windows )
1210 {
1211 PRIORITY_MIN = -15;
1212 PRIORITY_MAX = 15;
1213 }
1214 else version( Posix )
1215 {
1216 int policy;
1217 sched_param param;
1218 pthread_t self = pthread_self();
1219
1220 int status = pthread_getschedparam( self, &policy, &param );
1221 assert( status == 0 );
1222
1223 PRIORITY_MIN = sched_get_priority_min( policy );
1224 assert( PRIORITY_MIN != -1 );
1225
1226 PRIORITY_MAX = sched_get_priority_max( policy );
1227 assert( PRIORITY_MAX != -1 );
1228 }
1229 }
1230
1231
1232 private:
1233 //
1234 // Initializes a thread object which has no associated executable function.
1235 // This is used for the main thread initialized in thread_init().
1236 //
1237 this()
1238 {
1239 m_call = Call.NO;
1240 m_curr = &m_main;
1241
1242 void* pstart = cast(void*) &_tlsstart;
1243 void* pend = cast(void*) &_tlsend;
1244 m_tls = pstart[0 .. pend - pstart];
1245 }
1246
1247
1248 //
1249 // Thread entry point. Invokes the function or delegate passed on
1250 // construction (if any).
1251 //
1252 final void run()
1253 {
1254 switch( m_call )
1255 {
1256 case Call.FN:
1257 m_fn();
1258 break;
1259 case Call.DG:
1260 m_dg();
1261 break;
1262 default:
1263 break;
1264 }
1265 }
1266
1267
1268 private:
1269 //
1270 // The type of routine passed on thread construction.
1271 //
1272 enum Call
1273 {
1274 NO,
1275 FN,
1276 DG
1277 }
1278
1279
1280 //
1281 // Standard types
1282 //
1283 version( Windows )
1284 {
1285 alias uint TLSKey;
1286 alias uint ThreadAddr;
1287 }
1288 else version( Posix )
1289 {
1290 alias pthread_key_t TLSKey;
1291 alias pthread_t ThreadAddr;
1292 }
1293
1294
1295 //
1296 // Local storage
1297 //
1298 __gshared bool[LOCAL_MAX] sm_local;
1299 __gshared TLSKey sm_this;
1300
1301 void*[LOCAL_MAX] m_local;
1302
1303
1304 //
1305 // Standard thread data
1306 //
1307 version( Windows )
1308 {
1309 HANDLE m_hndl;
1310 }
1311 else version( OSX )
1312 {
1313 mach_port_t m_tmach;
1314 }
1315 ThreadAddr m_addr;
1316 Call m_call;
1317 char[] m_name;
1318 union
1319 {
1320 void function() m_fn;
1321 void delegate() m_dg;
1322 }
1323 size_t m_sz;
1324 version( Posix )
1325 {
1326 bool m_isRunning;
1327 }
1328 bool m_isDaemon;
1329 Object m_unhandled;
1330
1331
1332 private:
1333 ///////////////////////////////////////////////////////////////////////////
1334 // Storage of Active Thread
1335 ///////////////////////////////////////////////////////////////////////////
1336
1337
1338 //
1339 // Sets a thread-local reference to the current thread object.
1340 //
1341 static void setThis( Thread t )
1342 {
1343 version( Windows )
1344 {
1345 TlsSetValue( sm_this, cast(void*) t );
1346 }
1347 else version( Posix )
1348 {
1349 pthread_setspecific( sm_this, cast(void*) t );
1350 }
1351 }
1352
1353
1354 private:
1355 ///////////////////////////////////////////////////////////////////////////
1356 // Thread Context and GC Scanning Support
1357 ///////////////////////////////////////////////////////////////////////////
1358
1359
1360 final void pushContext( Context* c )
1361 in
1362 {
1363 assert( !c.within );
1364 }
1365 body
1366 {
1367 c.within = m_curr;
1368 m_curr = c;
1369 }
1370
1371
1372 final void popContext()
1373 in
1374 {
1375 assert( m_curr && m_curr.within );
1376 }
1377 body
1378 {
1379 Context* c = m_curr;
1380 m_curr = c.within;
1381 c.within = null;
1382 }
1383
1384
1385 final Context* topContext()
1386 in
1387 {
1388 assert( m_curr );
1389 }
1390 body
1391 {
1392 return m_curr;
1393 }
1394
1395
1396 static struct Context
1397 {
1398 void* bstack,
1399 tstack;
1400 Context* within;
1401 Context* next,
1402 prev;
1403 }
1404
1405
1406 Context m_main;
1407 Context* m_curr;
1408 bool m_lock;
1409 void[] m_tls; // spans implicit thread local storage
1410
1411 version( Windows )
1412 {
1413 version( X86 )
1414 {
1415 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax
1416 }
1417 else version( X86_64 )
1418 {
1419 ulong[16] m_reg; // rdi,rsi,rbp,rsp,rbx,rdx,rcx,rax
1420 // r8,r9,r10,r11,r12,r13,r14,r15
1421 }
1422 else
1423 {
1424 static assert( "Architecture not supported." );
1425 }
1426 }
1427 else version( OSX )
1428 {
1429 version( X86 )
1430 {
1431 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax
1432 }
1433 else version( X86_64 )
1434 {
1435 ulong[16] m_reg; // rdi,rsi,rbp,rsp,rbx,rdx,rcx,rax
1436 // r8,r9,r10,r11,r12,r13,r14,r15
1437 }
1438 else
1439 {
1440 static assert( "Architecture not supported." );
1441 }
1442 }
1443
1444
1445 private:
1446 ///////////////////////////////////////////////////////////////////////////
1447 // GC Scanning Support
1448 ///////////////////////////////////////////////////////////////////////////
1449
1450
1451 // NOTE: The GC scanning process works like so:
1452 //
1453 // 1. Suspend all threads.
1454 // 2. Scan the stacks of all suspended threads for roots.
1455 // 3. Resume all threads.
1456 //
1457 // Step 1 and 3 require a list of all threads in the system, while
1458 // step 2 requires a list of all thread stacks (each represented by
1459 // a Context struct). Traditionally, there was one stack per thread
1460 // and the Context structs were not necessary. However, Fibers have
1461 // changed things so that each thread has its own 'main' stack plus
1462 // an arbitrary number of nested stacks (normally referenced via
1463 // m_curr). Also, there may be 'free-floating' stacks in the system,
1464 // which are Fibers that are not currently executing on any specific
1465 // thread but are still being processed and still contain valid
1466 // roots.
1467 //
1468 // To support all of this, the Context struct has been created to
1469 // represent a stack range, and a global list of Context structs has
1470 // been added to enable scanning of these stack ranges. The lifetime
1471 // (and presence in the Context list) of a thread's 'main' stack will
1472 // be equivalent to the thread's lifetime. So the Ccontext will be
1473 // added to the list on thread entry, and removed from the list on
1474 // thread exit (which is essentially the same as the presence of a
1475 // Thread object in its own global list). The lifetime of a Fiber's
1476 // context, however, will be tied to the lifetime of the Fiber object
1477 // itself, and Fibers are expected to add/remove their Context struct
1478 // on construction/deletion.
1479
1480
1481 //
1482 // All use of the global lists should synchronize on this lock.
1483 //
1484 static Object slock()
1485 {
1486 return Thread.classinfo;
1487 }
1488
1489
1490 __gshared
1491 {
1492 Context* sm_cbeg;
1493 size_t sm_clen;
1494
1495 Thread sm_tbeg;
1496 size_t sm_tlen;
1497 }
1498
1499 //
1500 // Used for ordering threads in the global thread list.
1501 //
1502 Thread prev;
1503 Thread next;
1504
1505
1506 ///////////////////////////////////////////////////////////////////////////
1507 // Global Context List Operations
1508 ///////////////////////////////////////////////////////////////////////////
1509
1510
1511 //
1512 // Add a context to the global context list.
1513 //
1514 static void add( Context* c )
1515 in
1516 {
1517 assert( c );
1518 assert( !c.next && !c.prev );
1519 }
1520 body
1521 {
1522 synchronized( slock )
1523 {
1524 if( sm_cbeg )
1525 {
1526 c.next = sm_cbeg;
1527 sm_cbeg.prev = c;
1528 }
1529 sm_cbeg = c;
1530 ++sm_clen;
1531 }
1532 }
1533
1534
1535 //
1536 // Remove a context from the global context list.
1537 //
1538 static void remove( Context* c )
1539 in
1540 {
1541 assert( c );
1542 assert( c.next || c.prev );
1543 }
1544 body
1545 {
1546 synchronized( slock )
1547 {
1548 if( c.prev )
1549 c.prev.next = c.next;
1550 if( c.next )
1551 c.next.prev = c.prev;
1552 if( sm_cbeg == c )
1553 sm_cbeg = c.next;
1554 --sm_clen;
1555 }
1556 // NOTE: Don't null out c.next or c.prev because opApply currently
1557 // follows c.next after removing a node. This could be easily
1558 // addressed by simply returning the next node from this
1559 // function, however, a context should never be re-added to the
1560 // list anyway and having next and prev be non-null is a good way
1561 // to ensure that.
1562 }
1563
1564
1565 ///////////////////////////////////////////////////////////////////////////
1566 // Global Thread List Operations
1567 ///////////////////////////////////////////////////////////////////////////
1568
1569
1570 //
1571 // Add a thread to the global thread list.
1572 //
1573 static void add( Thread t )
1574 in
1575 {
1576 assert( t );
1577 assert( !t.next && !t.prev );
1578 assert( t.isRunning );
1579 }
1580 body
1581 {
1582 synchronized( slock )
1583 {
1584 if( sm_tbeg )
1585 {
1586 t.next = sm_tbeg;
1587 sm_tbeg.prev = t;
1588 }
1589 sm_tbeg = t;
1590 ++sm_tlen;
1591 }
1592 }
1593
1594
1595 //
1596 // Remove a thread from the global thread list.
1597 //
1598 static void remove( Thread t )
1599 in
1600 {
1601 assert( t );
1602 assert( t.next || t.prev );
1603 version( Windows )
1604 {
1605 // NOTE: This doesn't work for Posix as m_isRunning must be set to
1606 // false after the thread is removed during normal execution.
1607 assert( !t.isRunning );
1608 }
1609 }
1610 body
1611 {
1612 synchronized( slock )
1613 {
1614 // NOTE: When a thread is removed from the global thread list its
1615 // main context is invalid and should be removed as well.
1616 // It is possible that t.m_curr could reference more
1617 // than just the main context if the thread exited abnormally
1618 // (if it was terminated), but we must assume that the user
1619 // retains a reference to them and that they may be re-used
1620 // elsewhere. Therefore, it is the responsibility of any
1621 // object that creates contexts to clean them up properly
1622 // when it is done with them.
1623 remove( &t.m_main );
1624
1625 if( t.prev )
1626 t.prev.next = t.next;
1627 if( t.next )
1628 t.next.prev = t.prev;
1629 if( sm_tbeg == t )
1630 sm_tbeg = t.next;
1631 --sm_tlen;
1632 }
1633 // NOTE: Don't null out t.next or t.prev because opApply currently
1634 // follows t.next after removing a node. This could be easily
1635 // addressed by simply returning the next node from this
1636 // function, however, a thread should never be re-added to the
1637 // list anyway and having next and prev be non-null is a good way
1638 // to ensure that.
1639 }
1640 }
1641
1642
1643 ///////////////////////////////////////////////////////////////////////////////
1644 // GC Support Routines
1645 ///////////////////////////////////////////////////////////////////////////////
1646
1647
1648 /**
1649 * Initializes the thread module. This function must be called by the
1650 * garbage collector on startup and before any other thread routines
1651 * are called.
1652 */
1653 extern (C) void thread_init()
1654 {
1655 // NOTE: If thread_init itself performs any allocations then the thread
1656 // routines reserved for garbage collector use may be called while
1657 // thread_init is being processed. However, since no memory should
1658 // exist to be scanned at this point, it is sufficient for these
1659 // functions to detect the condition and return immediately.
1660
1661 version( Windows )
1662 {
1663 Thread.sm_this = TlsAlloc();
1664 assert( Thread.sm_this != TLS_OUT_OF_INDEXES );
1665 }
1666 else version( Posix )
1667 {
1668 int status;
1669 sigaction_t sigusr1 = void;
1670 sigaction_t sigusr2 = void;
1671
1672 // This is a quick way to zero-initialize the structs without using
1673 // memset or creating a link dependency on their static initializer.
1674 (cast(byte*) &sigusr1)[0 .. sigaction_t.sizeof] = 0;
1675 (cast(byte*) &sigusr2)[0 .. sigaction_t.sizeof] = 0;
1676
1677 // NOTE: SA_RESTART indicates that system calls should restart if they
1678 // are interrupted by a signal, but this is not available on all
1679 // Posix systems, even those that support multithreading.
1680 static if( is( typeof( SA_RESTART ) ) )
1681 sigusr1.sa_flags = SA_RESTART;
1682 else
1683 sigusr1.sa_flags = 0;
1684 sigusr1.sa_handler = &thread_suspendHandler;
1685 // NOTE: We want to ignore all signals while in this handler, so fill
1686 // sa_mask to indicate this.
1687 status = sigfillset( &sigusr1.sa_mask );
1688 assert( status == 0 );
1689
1690 // NOTE: Since SIGUSR2 should only be issued for threads within the
1691 // suspend handler, we don't want this signal to trigger a
1692 // restart.
1693 sigusr2.sa_flags = 0;
1694 sigusr2.sa_handler = &thread_resumeHandler;
1695 // NOTE: We want to ignore all signals while in this handler, so fill
1696 // sa_mask to indicate this.
1697 status = sigfillset( &sigusr2.sa_mask );
1698 assert( status == 0 );
1699
1700 status = sigaction( SIGUSR1, &sigusr1, null );
1701 assert( status == 0 );
1702
1703 status = sigaction( SIGUSR2, &sigusr2, null );
1704 assert( status == 0 );
1705
1706 status = sem_init( &suspendCount, 0, 0 );
1707 assert( status == 0 );
1708
1709 status = pthread_key_create( &Thread.sm_this, null );
1710 assert( status == 0 );
1711 }
1712
1713 thread_attachThis();
1714 }
1715
1716
1717 /**
1718 * Registers the calling thread for use with the D Runtime. If this routine
1719 * is called for a thread which is already registered, the result is undefined.
1720 */
1721 extern (C) void thread_attachThis()
1722 {
1723 version( Windows )
1724 {
1725 Thread thisThread = new Thread();
1726 Thread.Context* thisContext = &thisThread.m_main;
1727 assert( thisContext == thisThread.m_curr );
1728
1729 thisThread.m_addr = GetCurrentThreadId();
1730 thisThread.m_hndl = GetCurrentThreadHandle();
1731 thisContext.bstack = getStackBottom();
1732 thisContext.tstack = thisContext.bstack;
1733
1734 thisThread.m_isDaemon = true;
1735
1736 Thread.setThis( thisThread );
1737 }
1738 else version( Posix )
1739 {
1740 Thread thisThread = new Thread();
1741 Thread.Context* thisContext = thisThread.m_curr;
1742 assert( thisContext == &thisThread.m_main );
1743
1744 thisThread.m_addr = pthread_self();
1745 thisContext.bstack = getStackBottom();
1746 thisContext.tstack = thisContext.bstack;
1747
1748 thisThread.m_isRunning = true;
1749 thisThread.m_isDaemon = true;
1750
1751 Thread.setThis( thisThread );
1752 }
1753 version( OSX )
1754 {
1755 thisThread.m_tmach = pthread_mach_thread_np( thisThread.m_addr );
1756 assert( thisThread.m_tmach != thisThread.m_tmach.init );
1757 }
1758
1759 Thread.add( thisThread );
1760 Thread.add( thisContext );
1761 }
1762
1763
1764 /**
1765 * Deregisters the calling thread from use with the runtime. If this routine
1766 * is called for a thread which is already registered, the result is undefined.
1767 */
1768 extern (C) void thread_detachThis()
1769 {
1770 Thread.remove( Thread.getThis() );
1771 }
1772
1773
1774 /**
1775 * Joins all non-daemon threads that are currently running. This is done by
1776 * performing successive scans through the thread list until a scan consists
1777 * of only daemon threads.
1778 */
1779 extern (C) void thread_joinAll()
1780 {
1781
1782 while( true )
1783 {
1784 Thread nonDaemon = null;
1785
1786 foreach( t; Thread )
1787 {
1788 if( !t.isDaemon )
1789 {
1790 nonDaemon = t;
1791 break;
1792 }
1793 }
1794 if( nonDaemon is null )
1795 return;
1796 nonDaemon.join();
1797 }
1798 }
1799
1800
1801 /**
1802 * Performs intermediate shutdown of the thread module.
1803 */
1804 static ~this()
1805 {
1806 // NOTE: The functionality related to garbage collection must be minimally
1807 // operable after this dtor completes. Therefore, only minimal
1808 // cleanup may occur.
1809
1810 for( Thread t = Thread.sm_tbeg; t; t = t.next )
1811 {
1812 if( !t.isRunning )
1813 Thread.remove( t );
1814 }
1815 }
1816
1817
1818 // Used for needLock below
1819 private __gshared bool multiThreadedFlag = false;
1820
1821
1822 /**
1823 * This function is used to determine whether the the process is
1824 * multi-threaded. Optimizations may only be performed on this
1825 * value if the programmer can guarantee that no path from the
1826 * enclosed code will start a thread.
1827 *
1828 * Returns:
1829 * True if Thread.start() has been called in this process.
1830 */
1831 extern (C) bool thread_needLock()
1832 {
1833 return multiThreadedFlag;
1834 }
1835
1836
1837 // Used for suspendAll/resumeAll below
1838 private __gshared uint suspendDepth = 0;
1839
1840
1841 /**
1842 * Suspend all threads but the calling thread for "stop the world" garbage
1843 * collection runs. This function may be called multiple times, and must
1844 * be followed by a matching number of calls to thread_resumeAll before
1845 * processing is resumed.
1846 *
1847 * Throws:
1848 * ThreadException if the suspend operation fails for a running thread.
1849 */
1850 extern (C) void thread_suspendAll()
1851 {
1852 /**
1853 * Suspend the specified thread and load stack and register information for
1854 * use by thread_scanAll. If the supplied thread is the calling thread,
1855 * stack and register information will be loaded but the thread will not
1856 * be suspended. If the suspend operation fails and the thread is not
1857 * running then it will be removed from the global thread list, otherwise
1858 * an exception will be thrown.
1859 *
1860 * Params:
1861 * t = The thread to suspend.
1862 *
1863 * Throws:
1864 * ThreadException if the suspend operation fails for a running thread.
1865 */
1866 void suspend( Thread t )
1867 {
1868 version( Windows )
1869 {
1870 if( t.m_addr != GetCurrentThreadId() && SuspendThread( t.m_hndl ) == 0xFFFFFFFF )
1871 {
1872 if( !t.isRunning )
1873 {
1874 Thread.remove( t );
1875 return;
1876 }
1877 throw new ThreadException( "Unable to suspend thread" );
1878 }
1879
1880 CONTEXT context = void;
1881 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
1882
1883 if( !GetThreadContext( t.m_hndl, &context ) )
1884 throw new ThreadException( "Unable to load thread context" );
1885
1886 version( X86 )
1887 {
1888 if( !t.m_lock )
1889 t.m_curr.tstack = cast(void*) context.Esp;
1890 // eax,ebx,ecx,edx,edi,esi,ebp,esp
1891 t.m_reg[0] = context.Eax;
1892 t.m_reg[1] = context.Ebx;
1893 t.m_reg[2] = context.Ecx;
1894 t.m_reg[3] = context.Edx;
1895 t.m_reg[4] = context.Edi;
1896 t.m_reg[5] = context.Esi;
1897 t.m_reg[6] = context.Ebp;
1898 t.m_reg[7] = context.Esp;
1899 }
1900 else
1901 {
1902 static assert( "Architecture not supported." );
1903 }
1904 }
1905 else version( OSX )
1906 {
1907 if( t.m_addr != pthread_self() && thread_suspend( t.m_tmach ) != KERN_SUCCESS )
1908 {
1909 if( !t.isRunning )
1910 {
1911 Thread.remove( t );
1912 return;
1913 }
1914 throw new ThreadException( "Unable to suspend thread" );
1915 }
1916
1917 version( X86 )
1918 {
1919 x86_thread_state32_t state = void;
1920 mach_msg_type_number_t count = x86_THREAD_STATE32_COUNT;
1921
1922 if( thread_get_state( t.m_tmach, x86_THREAD_STATE32, &state, &count ) != KERN_SUCCESS )
1923 throw new ThreadException( "Unable to load thread state" );
1924 if( !t.m_lock )
1925 t.m_curr.tstack = cast(void*) state.esp;
1926 // eax,ebx,ecx,edx,edi,esi,ebp,esp
1927 t.m_reg[0] = state.eax;
1928 t.m_reg[1] = state.ebx;
1929 t.m_reg[2] = state.ecx;
1930 t.m_reg[3] = state.edx;
1931 t.m_reg[4] = state.edi;
1932 t.m_reg[5] = state.esi;
1933 t.m_reg[6] = state.ebp;
1934 t.m_reg[7] = state.esp;
1935 }
1936 else version( X86_64 )
1937 {
1938 x86_thread_state64_t state = void;
1939 mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
1940
1941 if( thread_get_state( t.m_tmach, x86_THREAD_STATE64, &state, &count ) != KERN_SUCCESS )
1942 throw new ThreadException( "Unable to load thread state" );
1943 if( !t.m_lock )
1944 t.m_curr.tstack = cast(void*) state.rsp;
1945 // rax,rbx,rcx,rdx,rdi,rsi,rbp,rsp
1946 t.m_reg[0] = state.rax;
1947 t.m_reg[1] = state.rbx;
1948 t.m_reg[2] = state.rcx;
1949 t.m_reg[3] = state.rdx;
1950 t.m_reg[4] = state.rdi;
1951 t.m_reg[5] = state.rsi;
1952 t.m_reg[6] = state.rbp;
1953 t.m_reg[7] = state.rsp;
1954 // r8,r9,r10,r11,r12,r13,r14,r15
1955 t.m_reg[8] = state.r8;
1956 t.m_reg[9] = state.r9;
1957 t.m_reg[10] = state.r10;
1958 t.m_reg[11] = state.r11;
1959 t.m_reg[12] = state.r12;
1960 t.m_reg[13] = state.r13;
1961 t.m_reg[14] = state.r14;
1962 t.m_reg[15] = state.r15;
1963 }
1964 else
1965 {
1966 static assert( "Architecture not supported." );
1967 }
1968 }
1969 else version( Posix )
1970 {
1971 if( t.m_addr != pthread_self() )
1972 {
1973 if( pthread_kill( t.m_addr, SIGUSR1 ) != 0 )
1974 {
1975 if( !t.isRunning )
1976 {
1977 Thread.remove( t );
1978 return;
1979 }
1980 throw new ThreadException( "Unable to suspend thread" );
1981 }
1982 // NOTE: It's really not ideal to wait for each thread to
1983 // signal individually -- rather, it would be better to
1984 // suspend them all and wait once at the end. However,
1985 // semaphores don't really work this way, and the obvious
1986 // alternative (looping on an atomic suspend count)
1987 // requires either the atomic module (which only works on
1988 // x86) or other specialized functionality. It would
1989 // also be possible to simply loop on sem_wait at the
1990 // end, but I'm not convinced that this would be much
1991 // faster than the current approach.
1992 sem_wait( &suspendCount );
1993 }
1994 else if( !t.m_lock )
1995 {
1996 t.m_curr.tstack = getStackTop();
1997 }
1998 }
1999 }
2000
2001
2002 // NOTE: We've got an odd chicken & egg problem here, because while the GC
2003 // is required to call thread_init before calling any other thread
2004 // routines, thread_init may allocate memory which could in turn
2005 // trigger a collection. Thus, thread_suspendAll, thread_scanAll,
2006 // and thread_resumeAll must be callable before thread_init
2007 // completes, with the assumption that no other GC memory has yet
2008 // been allocated by the system, and thus there is no risk of losing
2009 // data if the global thread list is empty. The check of
2010 // Thread.sm_tbeg below is done to ensure thread_init has completed,
2011 // and therefore that calling Thread.getThis will not result in an
2012 // error. For the short time when Thread.sm_tbeg is null, there is
2013 // no reason not to simply call the multithreaded code below, with
2014 // the expectation that the foreach loop will never be entered.
2015 if( !multiThreadedFlag && Thread.sm_tbeg )
2016 {
2017 if( ++suspendDepth == 1 )
2018 suspend( Thread.getThis() );
2019 return;
2020 }
2021 synchronized( Thread.slock )
2022 {
2023 if( ++suspendDepth > 1 )
2024 return;
2025
2026 // NOTE: I'd really prefer not to check isRunning within this loop but
2027 // not doing so could be problematic if threads are termianted
2028 // abnormally and a new thread is created with the same thread
2029 // address before the next GC run. This situation might cause
2030 // the same thread to be suspended twice, which would likely
2031 // cause the second suspend to fail, the garbage collection to
2032 // abort, and Bad Things to occur.
2033 for( Thread t = Thread.sm_tbeg; t; t = t.next )
2034 {
2035 if( t.isRunning )
2036 suspend( t );
2037 else
2038 Thread.remove( t );
2039 }
2040
2041 version( Posix )
2042 {
2043 // wait on semaphore -- see note in suspend for
2044 // why this is currently not implemented
2045 }
2046 }
2047 }
2048
2049
2050 /**
2051 * Resume all threads but the calling thread for "stop the world" garbage
2052 * collection runs. This function must be called once for each preceding
2053 * call to thread_suspendAll before the threads are actually resumed.
2054 *
2055 * In:
2056 * This routine must be preceded by a call to thread_suspendAll.
2057 *
2058 * Throws:
2059 * ThreadException if the resume operation fails for a running thread.
2060 */
2061 extern (C) void thread_resumeAll()
2062 in
2063 {
2064 assert( suspendDepth > 0 );
2065 }
2066 body
2067 {
2068 /**
2069 * Resume the specified thread and unload stack and register information.
2070 * If the supplied thread is the calling thread, stack and register
2071 * information will be unloaded but the thread will not be resumed. If
2072 * the resume operation fails and the thread is not running then it will
2073 * be removed from the global thread list, otherwise an exception will be
2074 * thrown.
2075 *
2076 * Params:
2077 * t = The thread to resume.
2078 *
2079 * Throws:
2080 * ThreadException if the resume fails for a running thread.
2081 */
2082 void resume( Thread t )
2083 {
2084 version( Windows )
2085 {
2086 if( t.m_addr != GetCurrentThreadId() && ResumeThread( t.m_hndl ) == 0xFFFFFFFF )
2087 {
2088 if( !t.isRunning )
2089 {
2090 Thread.remove( t );
2091 return;
2092 }
2093 throw new ThreadException( "Unable to resume thread" );
2094 }
2095
2096 if( !t.m_lock )
2097 t.m_curr.tstack = t.m_curr.bstack;
2098 t.m_reg[0 .. $] = 0;
2099 }
2100 else version( OSX )
2101 {
2102 if( t.m_addr != pthread_self() && thread_resume( t.m_tmach ) != KERN_SUCCESS )
2103 {
2104 if( !t.isRunning )
2105 {
2106 Thread.remove( t );
2107 return;
2108 }
2109 throw new ThreadException( "Unable to resume thread" );
2110 }
2111
2112 if( !t.m_lock )
2113 t.m_curr.tstack = t.m_curr.bstack;
2114 t.m_reg[0 .. $] = 0;
2115 }
2116 else version( Posix )
2117 {
2118 if( t.m_addr != pthread_self() )
2119 {
2120 if( pthread_kill( t.m_addr, SIGUSR2 ) != 0 )
2121 {
2122 if( !t.isRunning )
2123 {
2124 Thread.remove( t );
2125 return;
2126 }
2127 throw new ThreadException( "Unable to resume thread" );
2128 }
2129 }
2130 else if( !t.m_lock )
2131 {
2132 t.m_curr.tstack = t.m_curr.bstack;
2133 }
2134 }
2135 }
2136
2137
2138 // NOTE: See thread_suspendAll for the logic behind this.
2139 if( !multiThreadedFlag && Thread.sm_tbeg )
2140 {
2141 if( --suspendDepth == 0 )
2142 resume( Thread.getThis() );
2143 return;
2144 }
2145 synchronized( Thread.slock )
2146 {
2147 if( --suspendDepth > 0 )
2148 return;
2149
2150 for( Thread t = Thread.sm_tbeg; t; t = t.next )
2151 {
2152 resume( t );
2153 }
2154 }
2155 }
2156
2157
2158 private alias void delegate( void*, void* ) scanAllThreadsFn;
2159
2160
2161 /**
2162 * The main entry point for garbage collection. The supplied delegate
2163 * will be passed ranges representing both stack and register values.
2164 *
2165 * Params:
2166 * scan = The scanner function. It should scan from p1 through p2 - 1.
2167 * curStackTop = An optional pointer to the top of the calling thread's stack.
2168 *
2169 * In:
2170 * This routine must be preceded by a call to thread_suspendAll.
2171 */
2172 extern (C) void thread_scanAll( scanAllThreadsFn scan, void* curStackTop = null )
2173 in
2174 {
2175 assert( suspendDepth > 0 );
2176 }
2177 body
2178 {
2179 Thread thisThread = null;
2180 void* oldStackTop = null;
2181
2182 if( curStackTop && Thread.sm_tbeg )
2183 {
2184 thisThread = Thread.getThis();
2185 if( !thisThread.m_lock )
2186 {
2187 oldStackTop = thisThread.m_curr.tstack;
2188 thisThread.m_curr.tstack = curStackTop;
2189 }
2190 }
2191
2192 scope( exit )
2193 {
2194 if( curStackTop && Thread.sm_tbeg )
2195 {
2196 if( !thisThread.m_lock )
2197 {
2198 thisThread.m_curr.tstack = oldStackTop;
2199 }
2200 }
2201 }
2202
2203 // NOTE: Synchronizing on Thread.slock is not needed because this
2204 // function may only be called after all other threads have
2205 // been suspended from within the same lock.
2206 for( Thread.Context* c = Thread.sm_cbeg; c; c = c.next )
2207 {
2208 version( StackGrowsDown )
2209 {
2210 // NOTE: We can't index past the bottom of the stack
2211 // so don't do the "+1" for StackGrowsDown.
2212 if( c.tstack && c.tstack < c.bstack )
2213 scan( c.tstack, c.bstack );
2214 }
2215 else
2216 {
2217 if( c.bstack && c.bstack < c.tstack )
2218 scan( c.bstack, c.tstack + 1 );
2219 }
2220 }
2221
2222 for( Thread t = Thread.sm_tbeg; t; t = t.next )
2223 {
2224 scan( &t.m_tls[0], &t.m_tls[0] + t.m_tls.length );
2225
2226 version( Windows )
2227 {
2228 scan( &t.m_reg[0], &t.m_reg[0] + t.m_reg.length );
2229 }
2230 }
2231 }
2232
2233
2234 ///////////////////////////////////////////////////////////////////////////////
2235 // Thread Local
2236 ///////////////////////////////////////////////////////////////////////////////
2237
2238
2239 /**
2240 * This class encapsulates the operations required to initialize, access, and
2241 * destroy thread local data.
2242 */
2243 class ThreadLocal( T )
2244 {
2245 ///////////////////////////////////////////////////////////////////////////
2246 // Initialization
2247 ///////////////////////////////////////////////////////////////////////////
2248
2249
2250 /**
2251 * Initializes thread local storage for the indicated value which will be
2252 * initialized to def for all threads.
2253 *
2254 * Params:
2255 * def = The default value to return if no value has been explicitly set.
2256 */
2257 this( T def = T.init )
2258 {
2259 m_def = def;
2260 m_key = Thread.createLocal();
2261 }
2262
2263
2264 ~this()
2265 {
2266 Thread.deleteLocal( m_key );
2267 }
2268
2269
2270 ///////////////////////////////////////////////////////////////////////////
2271 // Accessors
2272 ///////////////////////////////////////////////////////////////////////////
2273
2274
2275 /**
2276 * Gets the value last set by the calling thread, or def if no such value
2277 * has been set.
2278 *
2279 * Returns:
2280 * The stored value or def if no value is stored.
2281 */
2282 T val()
2283 {
2284 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key );
2285
2286 return wrap ? wrap.val : m_def;
2287 }
2288
2289
2290 /**
2291 * Copies newval to a location specific to the calling thread, and returns
2292 * newval.
2293 *
2294 * Params:
2295 * newval = The value to set.
2296 *
2297 * Returns:
2298 * The value passed to this function.
2299 */
2300 T val( T newval )
2301 {
2302 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key );
2303
2304 if( wrap is null )
2305 {
2306 wrap = new Wrap;
2307 Thread.setLocal( m_key, wrap );
2308 }
2309 wrap.val = newval;
2310 return newval;
2311 }
2312
2313
2314 private:
2315 //
2316 // A wrapper for the stored data. This is needed for determining whether
2317 // set has ever been called for this thread (and therefore whether the
2318 // default value should be returned) and also to flatten the differences
2319 // between data that is smaller and larger than (void*).sizeof. The
2320 // obvious tradeoff here is an extra per-thread allocation for each
2321 // ThreadLocal value as compared to calling the Thread routines directly.
2322 //
2323 struct Wrap
2324 {
2325 T val;
2326 }
2327
2328
2329 T m_def;
2330 uint m_key;
2331 }
2332
2333
2334 ///////////////////////////////////////////////////////////////////////////////
2335 // Thread Group
2336 ///////////////////////////////////////////////////////////////////////////////
2337
2338
2339 /**
2340 * This class is intended to simplify certain common programming techniques.
2341 */
2342 class ThreadGroup
2343 {
2344 /**
2345 * Creates and starts a new Thread object that executes fn and adds it to
2346 * the list of tracked threads.
2347 *
2348 * Params:
2349 * fn = The thread function.
2350 *
2351 * Returns:
2352 * A reference to the newly created thread.
2353 */
2354 final Thread create( void function() fn )
2355 {
2356 Thread t = new Thread( fn );
2357
2358 t.start();
2359 synchronized( this )
2360 {
2361 m_all[t] = t;
2362 }
2363 return t;
2364 }
2365
2366
2367 /**
2368 * Creates and starts a new Thread object that executes dg and adds it to
2369 * the list of tracked threads.
2370 *
2371 * Params:
2372 * dg = The thread function.
2373 *
2374 * Returns:
2375 * A reference to the newly created thread.
2376 */
2377 final Thread create( void delegate() dg )
2378 {
2379 Thread t = new Thread( dg );
2380
2381 t.start();
2382 synchronized( this )
2383 {
2384 m_all[t] = t;
2385 }
2386 return t;
2387 }
2388
2389
2390 /**
2391 * Add t to the list of tracked threads if it is not already being tracked.
2392 *
2393 * Params:
2394 * t = The thread to add.
2395 *
2396 * In:
2397 * t must not be null.
2398 */
2399 final void add( Thread t )
2400 in
2401 {
2402 assert( t );
2403 }
2404 body
2405 {
2406 synchronized( this )
2407 {
2408 m_all[t] = t;
2409 }
2410 }
2411
2412
2413 /**
2414 * Removes t from the list of tracked threads. No operation will be
2415 * performed if t is not currently being tracked by this object.
2416 *
2417 * Params:
2418 * t = The thread to remove.
2419 *
2420 * In:
2421 * t must not be null.
2422 */
2423 final void remove( Thread t )
2424 in
2425 {
2426 assert( t );
2427 }
2428 body
2429 {
2430 synchronized( this )
2431 {
2432 m_all.remove( t );
2433 }
2434 }
2435
2436
2437 /**
2438 * Operates on all threads currently tracked by this object.
2439 */
2440 final int opApply( int delegate( inout Thread ) dg )
2441 {
2442 synchronized( this )
2443 {
2444 int ret = 0;
2445
2446 // NOTE: This loop relies on the knowledge that m_all uses the
2447 // Thread object for both the key and the mapped value.
2448 foreach( Thread t; m_all.keys )
2449 {
2450 ret = dg( t );
2451 if( ret )
2452 break;
2453 }
2454 return ret;
2455 }
2456 }
2457
2458
2459 /**
2460 * Iteratively joins all tracked threads. This function will block add,
2461 * remove, and opApply until it completes.
2462 *
2463 * Params:
2464 * rethrow = Rethrow any unhandled exception which may have caused the
2465 * current thread to terminate.
2466 *
2467 * Throws:
2468 * Any exception not handled by the joined threads.
2469 */
2470 final void joinAll( bool rethrow = true )
2471 {
2472 synchronized( this )
2473 {
2474 // NOTE: This loop relies on the knowledge that m_all uses the
2475 // Thread object for both the key and the mapped value.
2476 foreach( Thread t; m_all.keys )
2477 {
2478 t.join( rethrow );
2479 }
2480 }
2481 }
2482
2483
2484 private:
2485 Thread[Thread] m_all;
2486 }
2487
2488
2489 ///////////////////////////////////////////////////////////////////////////////
2490 // Fiber Platform Detection and Memory Allocation
2491 ///////////////////////////////////////////////////////////////////////////////
2492
2493
2494 private
2495 {
2496 version( D_InlineAsm_X86 )
2497 {
2498 version( X86_64 )
2499 {
2500
2501 }
2502 else
2503 {
2504 version( Windows )
2505 version = AsmX86_Win32;
2506 else version( Posix )
2507 version = AsmX86_Posix;
2508 }
2509 }
2510 else version( PPC )
2511 {
2512 version( Posix )
2513 version = AsmPPC_Posix;
2514 }
2515
2516
2517 version( Posix )
2518 {
2519 import core.sys.posix.unistd; // for sysconf
2520 import core.sys.posix.sys.mman; // for mmap
2521 import core.sys.posix.stdlib; // for malloc, valloc, free
2522
2523 version( AsmX86_Win32 ) {} else
2524 version( AsmX86_Posix ) {} else
2525 version( AsmPPC_Posix ) {} else
2526 {
2527 // NOTE: The ucontext implementation requires architecture specific
2528 // data definitions to operate so testing for it must be done
2529 // by checking for the existence of ucontext_t rather than by
2530 // a version identifier. Please note that this is considered
2531 // an obsolescent feature according to the POSIX spec, so a
2532 // custom solution is still preferred.
2533 import core.sys.posix.ucontext;
2534 }
2535 }
2536
2537 const size_t PAGESIZE;
2538 }
2539
2540
2541 static this()
2542 {
2543 static if( is( typeof( GetSystemInfo ) ) )
2544 {
2545 SYSTEM_INFO info;
2546 GetSystemInfo( &info );
2547
2548 PAGESIZE = info.dwPageSize;
2549 assert( PAGESIZE < int.max );
2550 }
2551 else static if( is( typeof( sysconf ) ) &&
2552 is( typeof( _SC_PAGESIZE ) ) )
2553 {
2554 PAGESIZE = cast(size_t) sysconf( _SC_PAGESIZE );
2555 assert( PAGESIZE < int.max );
2556 }
2557 else
2558 {
2559 version( PPC )
2560 PAGESIZE = 8192;
2561 else
2562 PAGESIZE = 4096;
2563 }
2564 }
2565
2566
2567 ///////////////////////////////////////////////////////////////////////////////
2568 // Fiber Entry Point and Context Switch
2569 ///////////////////////////////////////////////////////////////////////////////
2570
2571
2572 private
2573 {
2574 extern (C) void fiber_entryPoint()
2575 {
2576 Fiber obj = Fiber.getThis();
2577 assert( obj );
2578
2579 assert( Thread.getThis().m_curr is obj.m_ctxt );
2580 volatile Thread.getThis().m_lock = false;
2581 obj.m_ctxt.tstack = obj.m_ctxt.bstack;
2582 obj.m_state = Fiber.State.EXEC;
2583
2584 try
2585 {
2586 obj.run();
2587 }
2588 catch( Object o )
2589 {
2590 obj.m_unhandled = o;
2591 }
2592
2593 static if( is( ucontext_t ) )
2594 obj.m_ucur = &obj.m_utxt;
2595
2596 obj.m_state = Fiber.State.TERM;
2597 obj.switchOut();
2598 }
2599
2600
2601 // NOTE: If AsmPPC_Posix is defined then the context switch routine will
2602 // be defined externally until GDC supports inline PPC ASM.
2603 version( AsmPPC_Posix )
2604 extern (C) void fiber_switchContext( void** oldp, void* newp );
2605 else
2606 extern (C) void fiber_switchContext( void** oldp, void* newp )
2607 {
2608 // NOTE: The data pushed and popped in this routine must match the
2609 // default stack created by Fiber.initStack or the initial
2610 // switch into a new context will fail.
2611
2612 version( AsmX86_Win32 )
2613 {
2614 asm
2615 {
2616 naked;
2617
2618 // save current stack state
2619 push EBP;
2620 mov EBP, ESP;
2621 push EAX;
2622 push dword ptr FS:[0];
2623 push dword ptr FS:[4];
2624 push dword ptr FS:[8];
2625 push EBX;
2626 push ESI;
2627 push EDI;
2628
2629 // store oldp again with more accurate address
2630 mov EAX, dword ptr 8[EBP];
2631 mov [EAX], ESP;
2632 // load newp to begin context switch
2633 mov ESP, dword ptr 12[EBP];
2634
2635 // load saved state from new stack
2636 pop EDI;
2637 pop ESI;
2638 pop EBX;
2639 pop dword ptr FS:[8];
2640 pop dword ptr FS:[4];
2641 pop dword ptr FS:[0];
2642 pop EAX;
2643 pop EBP;
2644
2645 // 'return' to complete switch
2646 ret;
2647 }
2648 }
2649 else version( AsmX86_Posix )
2650 {
2651 asm
2652 {
2653 naked;
2654
2655 // save current stack state
2656 push EBP;
2657 mov EBP, ESP;
2658 push EAX;
2659 push EBX;
2660 push ESI;
2661 push EDI;
2662
2663 // store oldp again with more accurate address
2664 mov EAX, dword ptr 8[EBP];
2665 mov [EAX], ESP;
2666 // load newp to begin context switch
2667 mov ESP, dword ptr 12[EBP];
2668
2669 // load saved state from new stack
2670 pop EDI;
2671 pop ESI;
2672 pop EBX;
2673 pop EAX;
2674 pop EBP;
2675
2676 // 'return' to complete switch
2677 ret;
2678 }
2679 }
2680 else static if( is( ucontext_t ) )
2681 {
2682 Fiber cfib = Fiber.getThis();
2683 void* ucur = cfib.m_ucur;
2684
2685 *oldp = &ucur;
2686 swapcontext( **(cast(ucontext_t***) oldp),
2687 *(cast(ucontext_t**) newp) );
2688 }
2689 }
2690 }
2691
2692
2693 ///////////////////////////////////////////////////////////////////////////////
2694 // Fiber
2695 ///////////////////////////////////////////////////////////////////////////////
2696
2697
2698 /**
2699 * This class provides a cooperative concurrency mechanism integrated with the
2700 * threading and garbage collection functionality. Calling a fiber may be
2701 * considered a blocking operation that returns when the fiber yields (via
2702 * Fiber.yield()). Execution occurs within the context of the calling thread
2703 * so synchronization is not necessary to guarantee memory visibility so long
2704 * as the same thread calls the fiber each time. Please note that there is no
2705 * requirement that a fiber be bound to one specific thread. Rather, fibers
2706 * may be freely passed between threads so long as they are not currently
2707 * executing. Like threads, a new fiber thread may be created using either
2708 * derivation or composition, as in the following example.
2709 *
2710 * Example:
2711 * ----------------------------------------------------------------------
2712 *
2713 * class DerivedFiber : Fiber
2714 * {
2715 * this()
2716 * {
2717 * super( &run );
2718 * }
2719 *
2720 * private :
2721 * void run()
2722 * {
2723 * printf( "Derived fiber running.\n" );
2724 * }
2725 * }
2726 *
2727 * void fiberFunc()
2728 * {
2729 * printf( "Composed fiber running.\n" );
2730 * Fiber.yield();
2731 * printf( "Composed fiber running.\n" );
2732 * }
2733 *
2734 * // create instances of each type
2735 * Fiber derived = new DerivedFiber();
2736 * Fiber composed = new Fiber( &fiberFunc );
2737 *
2738 * // call both fibers once
2739 * derived.call();
2740 * composed.call();
2741 * printf( "Execution returned to calling context.\n" );
2742 * composed.call();
2743 *
2744 * // since each fiber has run to completion, each should have state TERM
2745 * assert( derived.state == Fiber.State.TERM );
2746 * assert( composed.state == Fiber.State.TERM );
2747 *
2748 * ----------------------------------------------------------------------
2749 *
2750 * Authors: Based on a design by Mikola Lysenko.
2751 */
2752 class Fiber
2753 {
2754 ///////////////////////////////////////////////////////////////////////////
2755 // Initialization
2756 ///////////////////////////////////////////////////////////////////////////
2757
2758
2759 /**
2760 * Initializes a fiber object which is associated with a static
2761 * D function.
2762 *
2763 * Params:
2764 * fn = The thread function.
2765 * sz = The stack size for this fiber.
2766 *
2767 * In:
2768 * fn must not be null.
2769 */
2770 this( void function() fn, size_t sz = PAGESIZE )
2771 in
2772 {
2773 assert( fn );
2774 }
2775 body
2776 {
2777 m_fn = fn;
2778 m_call = Call.FN;
2779 m_state = State.HOLD;
2780 allocStack( sz );
2781 initStack();
2782 }
2783
2784
2785 /**
2786 * Initializes a fiber object which is associated with a dynamic
2787 * D function.
2788 *
2789 * Params:
2790 * dg = The thread function.
2791 * sz = The stack size for this fiber.
2792 *
2793 * In:
2794 * dg must not be null.
2795 */
2796 this( void delegate() dg, size_t sz = PAGESIZE )
2797 in
2798 {
2799 assert( dg );
2800 }
2801 body
2802 {
2803 m_dg = dg;
2804 m_call = Call.DG;
2805 m_state = State.HOLD;
2806 allocStack( sz );
2807 initStack();
2808 }
2809
2810
2811 /**
2812 * Cleans up any remaining resources used by this object.
2813 */
2814 ~this()
2815 {
2816 // NOTE: A live reference to this object will exist on its associated
2817 // stack from the first time its call() method has been called
2818 // until its execution completes with State.TERM. Thus, the only
2819 // times this dtor should be called are either if the fiber has
2820 // terminated (and therefore has no active stack) or if the user
2821 // explicitly deletes this object. The latter case is an error
2822 // but is not easily tested for, since State.HOLD may imply that
2823 // the fiber was just created but has never been run. There is
2824 // not a compelling case to create a State.INIT just to offer a
2825 // means of ensuring the user isn't violating this object's
2826 // contract, so for now this requirement will be enforced by
2827 // documentation only.
2828 freeStack();
2829 }
2830
2831
2832 ///////////////////////////////////////////////////////////////////////////
2833 // General Actions
2834 ///////////////////////////////////////////////////////////////////////////
2835
2836
2837 /**
2838 * Transfers execution to this fiber object. The calling context will be
2839 * suspended until the fiber calls Fiber.yield() or until it terminates
2840 * via an unhandled exception.
2841 *
2842 * Params:
2843 * rethrow = Rethrow any unhandled exception which may have caused this
2844 * fiber to terminate.
2845 *
2846 * In:
2847 * This fiber must be in state HOLD.
2848 *
2849 * Throws:
2850 * Any exception not handled by the joined thread.
2851 *
2852 * Returns:
2853 * Any exception not handled by this fiber if rethrow = false, null
2854 * otherwise.
2855 */
2856 final Object call( bool rethrow = true )
2857 in
2858 {
2859 assert( m_state == State.HOLD );
2860 }
2861 body
2862 {
2863 Fiber cur = getThis();
2864
2865 static if( is( ucontext_t ) )
2866 m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt;
2867
2868 setThis( this );
2869 this.switchIn();
2870 setThis( cur );
2871
2872 static if( is( ucontext_t ) )
2873 m_ucur = null;
2874
2875 // NOTE: If the fiber has terminated then the stack pointers must be
2876 // reset. This ensures that the stack for this fiber is not
2877 // scanned if the fiber has terminated. This is necessary to
2878 // prevent any references lingering on the stack from delaying
2879 // the collection of otherwise dead objects. The most notable
2880 // being the current object, which is referenced at the top of
2881 // fiber_entryPoint.
2882 if( m_state == State.TERM )
2883 {
2884 m_ctxt.tstack = m_ctxt.bstack;
2885 }
2886 if( m_unhandled )
2887 {
2888 Object obj = m_unhandled;
2889 m_unhandled = null;
2890 if( rethrow )
2891 throw obj;
2892 return obj;
2893 }
2894 return null;
2895 }
2896
2897
2898 /**
2899 * Resets this fiber so that it may be re-used. This routine may only be
2900 * called for fibers that have terminated, as doing otherwise could result
2901 * in scope-dependent functionality that is not executed. Stack-based
2902 * classes, for example, may not be cleaned up properly if a fiber is reset
2903 * before it has terminated.
2904 *
2905 * In:
2906 * This fiber must be in state TERM.
2907 */
2908 final void reset()
2909 in
2910 {
2911 assert( m_state == State.TERM );
2912 assert( m_ctxt.tstack == m_ctxt.bstack );
2913 }
2914 body
2915 {
2916 m_state = State.HOLD;
2917 initStack();
2918 m_unhandled = null;
2919 }
2920
2921
2922 ///////////////////////////////////////////////////////////////////////////
2923 // General Properties
2924 ///////////////////////////////////////////////////////////////////////////
2925
2926
2927 /**
2928 * A fiber may occupy one of three states: HOLD, EXEC, and TERM. The HOLD
2929 * state applies to any fiber that is suspended and ready to be called.
2930 * The EXEC state will be set for any fiber that is currently executing.
2931 * And the TERM state is set when a fiber terminates. Once a fiber
2932 * terminates, it must be reset before it may be called again.
2933 */
2934 enum State
2935 {
2936 HOLD, ///
2937 EXEC, ///
2938 TERM ///
2939 }
2940
2941
2942 /**
2943 * Gets the current state of this fiber.
2944 *
2945 * Returns:
2946 * The state of this fiber as an enumerated value.
2947 */
2948 final State state()
2949 {
2950 return m_state;
2951 }
2952
2953
2954 ///////////////////////////////////////////////////////////////////////////
2955 // Actions on Calling Fiber
2956 ///////////////////////////////////////////////////////////////////////////
2957
2958
2959 /**
2960 * Forces a context switch to occur away from the calling fiber.
2961 */
2962 static void yield()
2963 {
2964 Fiber cur = getThis();
2965 assert( cur, "Fiber.yield() called with no active fiber" );
2966 assert( cur.m_state == State.EXEC );
2967
2968 static if( is( ucontext_t ) )
2969 cur.m_ucur = &cur.m_utxt;
2970
2971 cur.m_state = State.HOLD;
2972 cur.switchOut();
2973 cur.m_state = State.EXEC;
2974 }
2975
2976
2977 /**
2978 * Forces a context switch to occur away from the calling fiber and then
2979 * throws obj in the calling fiber.
2980 *
2981 * Params:
2982 * obj = The object to throw.
2983 *
2984 * In:
2985 * obj must not be null.
2986 */
2987 static void yieldAndThrow( Object obj )
2988 in
2989 {
2990 assert( obj );
2991 }
2992 body
2993 {
2994 Fiber cur = getThis();
2995 assert( cur, "Fiber.yield() called with no active fiber" );
2996 assert( cur.m_state == State.EXEC );
2997
2998 static if( is( ucontext_t ) )
2999 cur.m_ucur = &cur.m_utxt;
3000
3001 cur.m_unhandled = obj;
3002 cur.m_state = State.HOLD;
3003 cur.switchOut();
3004 cur.m_state = State.EXEC;
3005 }
3006
3007
3008 ///////////////////////////////////////////////////////////////////////////
3009 // Fiber Accessors
3010 ///////////////////////////////////////////////////////////////////////////
3011
3012
3013 /**
3014 * Provides a reference to the calling fiber or null if no fiber is
3015 * currently active.
3016 *
3017 * Returns:
3018 * The fiber object representing the calling fiber or null if no fiber
3019 * is currently active. The result of deleting this object is undefined.
3020 */
3021 static Fiber getThis()
3022 {
3023 version( Windows )
3024 {
3025 return cast(Fiber) TlsGetValue( sm_this );
3026 }
3027 else version( Posix )
3028 {
3029 return cast(Fiber) pthread_getspecific( sm_this );
3030 }
3031 }
3032
3033
3034 ///////////////////////////////////////////////////////////////////////////
3035 // Static Initialization
3036 ///////////////////////////////////////////////////////////////////////////
3037
3038
3039 static this()
3040 {
3041 version( Windows )
3042 {
3043 sm_this = TlsAlloc();
3044 assert( sm_this != TLS_OUT_OF_INDEXES );
3045 }
3046 else version( Posix )
3047 {
3048 int status;
3049
3050 status = pthread_key_create( &sm_this, null );
3051 assert( status == 0 );
3052
3053 static if( is( ucontext_t ) )
3054 {
3055 status = getcontext( &sm_utxt );
3056 assert( status == 0 );
3057 }
3058 }
3059 }
3060
3061
3062 private:
3063 //
3064 // Initializes a fiber object which has no associated executable function.
3065 //
3066 this()
3067 {
3068 m_call = Call.NO;
3069 }
3070
3071
3072 //
3073 // Fiber entry point. Invokes the function or delegate passed on
3074 // construction (if any).
3075 //
3076 final void run()
3077 {
3078 switch( m_call )
3079 {
3080 case Call.FN:
3081 m_fn();
3082 break;
3083 case Call.DG:
3084 m_dg();
3085 break;
3086 default:
3087 break;
3088 }
3089 }
3090
3091
3092 private:
3093 //
3094 // The type of routine passed on fiber construction.
3095 //
3096 enum Call
3097 {
3098 NO,
3099 FN,
3100 DG
3101 }
3102
3103
3104 //
3105 // Standard fiber data
3106 //
3107 Call m_call;
3108 union
3109 {
3110 void function() m_fn;
3111 void delegate() m_dg;
3112 }
3113 bool m_isRunning;
3114 Object m_unhandled;
3115 State m_state;
3116
3117
3118 private:
3119 ///////////////////////////////////////////////////////////////////////////
3120 // Stack Management
3121 ///////////////////////////////////////////////////////////////////////////
3122
3123
3124 //
3125 // Allocate a new stack for this fiber.
3126 //
3127 final void allocStack( size_t sz )
3128 in
3129 {
3130 assert( !m_pmem && !m_ctxt );
3131 }
3132 body
3133 {
3134 // adjust alloc size to a multiple of PAGESIZE
3135 sz += PAGESIZE - 1;
3136 sz -= sz % PAGESIZE;
3137
3138 // NOTE: This instance of Thread.Context is dynamic so Fiber objects
3139 // can be collected by the GC so long as no user level references
3140 // to the object exist. If m_ctxt were not dynamic then its
3141 // presence in the global context list would be enough to keep
3142 // this object alive indefinitely. An alternative to allocating
3143 // room for this struct explicitly would be to mash it into the
3144 // base of the stack being allocated below. However, doing so
3145 // requires too much special logic to be worthwhile.
3146 m_ctxt = new Thread.Context;
3147
3148 static if( is( typeof( VirtualAlloc ) ) )
3149 {
3150 // reserve memory for stack
3151 m_pmem = VirtualAlloc( null,
3152 sz + PAGESIZE,
3153 MEM_RESERVE,
3154 PAGE_NOACCESS );
3155 if( !m_pmem )
3156 {
3157 throw new FiberException( "Unable to reserve memory for stack" );
3158 }
3159
3160 version( StackGrowsDown )
3161 {
3162 void* stack = m_pmem + PAGESIZE;
3163 void* guard = m_pmem;
3164 void* pbase = stack + sz;
3165 }
3166 else
3167 {
3168 void* stack = m_pmem;
3169 void* guard = m_pmem + sz;
3170 void* pbase = stack;
3171 }
3172
3173 // allocate reserved stack segment
3174 stack = VirtualAlloc( stack,
3175 sz,
3176 MEM_COMMIT,
3177 PAGE_READWRITE );
3178 if( !stack )
3179 {
3180 throw new FiberException( "Unable to allocate memory for stack" );
3181 }
3182
3183 // allocate reserved guard page
3184 guard = VirtualAlloc( guard,
3185 PAGESIZE,
3186 MEM_COMMIT,
3187 PAGE_READWRITE | PAGE_GUARD );
3188 if( !guard )
3189 {
3190 throw new FiberException( "Unable to create guard page for stack" );
3191 }
3192
3193 m_ctxt.bstack = pbase;
3194 m_ctxt.tstack = pbase;
3195 m_size = sz;
3196 }
3197 else
3198 { static if( is( typeof( mmap ) ) )
3199 {
3200 m_pmem = mmap( null,
3201 sz,
3202 PROT_READ | PROT_WRITE,
3203 MAP_PRIVATE | MAP_ANON,
3204 -1,
3205 0 );
3206 if( m_pmem == MAP_FAILED )
3207 m_pmem = null;
3208 }
3209 else static if( is( typeof( valloc ) ) )
3210 {
3211 m_pmem = valloc( sz );
3212 }
3213 else static if( is( typeof( malloc ) ) )
3214 {
3215 m_pmem = malloc( sz );
3216 }
3217 else
3218 {
3219 m_pmem = null;
3220 }
3221
3222 if( !m_pmem )
3223 {
3224 throw new FiberException( "Unable to allocate memory for stack" );
3225 }
3226
3227 version( StackGrowsDown )
3228 {
3229 m_ctxt.bstack = m_pmem + sz;
3230 m_ctxt.tstack = m_pmem + sz;
3231 }
3232 else
3233 {
3234 m_ctxt.bstack = m_pmem;
3235 m_ctxt.tstack = m_pmem;
3236 }
3237 m_size = sz;
3238 }
3239
3240 Thread.add( m_ctxt );
3241 }
3242
3243
3244 //
3245 // Free this fiber's stack.
3246 //
3247 final void freeStack()
3248 in
3249 {
3250 assert( m_pmem && m_ctxt );
3251 }
3252 body
3253 {
3254 // NOTE: Since this routine is only ever expected to be called from
3255 // the dtor, pointers to freed data are not set to null.
3256
3257 // NOTE: m_ctxt is guaranteed to be alive because it is held in the
3258 // global context list.
3259 Thread.remove( m_ctxt );
3260
3261 static if( is( typeof( VirtualAlloc ) ) )
3262 {
3263 VirtualFree( m_pmem, 0, MEM_RELEASE );
3264 }
3265 else static if( is( typeof( mmap ) ) )
3266 {
3267 munmap( m_pmem, m_size );
3268 }
3269 else static if( is( typeof( valloc ) ) )
3270 {
3271 free( m_pmem );
3272 }
3273 else static if( is( typeof( malloc ) ) )
3274 {
3275 free( m_pmem );
3276 }
3277 delete m_ctxt;
3278 }
3279
3280
3281 //
3282 // Initialize the allocated stack.
3283 //
3284 final void initStack()
3285 in
3286 {
3287 assert( m_ctxt.tstack && m_ctxt.tstack == m_ctxt.bstack );
3288 assert( cast(size_t) m_ctxt.bstack % (void*).sizeof == 0 );
3289 }
3290 body
3291 {
3292 void* pstack = m_ctxt.tstack;
3293 scope( exit ) m_ctxt.tstack = pstack;
3294
3295 void push( size_t val )
3296 {
3297 version( StackGrowsDown )
3298 {
3299 pstack -= size_t.sizeof;
3300 *(cast(size_t*) pstack) = val;
3301 }
3302 else
3303 {
3304 pstack += size_t.sizeof;
3305 *(cast(size_t*) pstack) = val;
3306 }
3307 }
3308
3309 // NOTE: On OS X the stack must be 16-byte aligned according to the
3310 // IA-32 call spec.
3311 version( OSX )
3312 {
3313 version( StackGrowsDown )
3314 {
3315 pstack = cast(void*)(cast(uint)(pstack) - (cast(uint)(pstack) & 0x0F));
3316 }
3317 else
3318 {
3319 pstack = cast(void*)(cast(uint)(pstack) + (cast(uint)(pstack) & 0x0F));
3320 }
3321 }
3322
3323 version( AsmX86_Win32 )
3324 {
3325 push( cast(size_t) &fiber_entryPoint ); // EIP
3326 push( 0xFFFFFFFF ); // EBP
3327 push( 0x00000000 ); // EAX
3328 push( 0xFFFFFFFF ); // FS:[0]
3329 version( StackGrowsDown )
3330 {
3331 push( cast(size_t) m_ctxt.bstack ); // FS:[4]
3332 push( cast(size_t) m_ctxt.bstack - m_size ); // FS:[8]
3333 }
3334 else
3335 {
3336 push( cast(size_t) m_ctxt.bstack ); // FS:[4]
3337 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8]
3338 }
3339 push( 0x00000000 ); // EBX
3340 push( 0x00000000 ); // ESI
3341 push( 0x00000000 ); // EDI
3342 }
3343 else version( AsmX86_Posix )
3344 {
3345 push( 0x00000000 ); // Pad stack for OSX
3346 push( cast(size_t) &fiber_entryPoint ); // EIP
3347 push( 0x00000000 ); // EBP
3348 push( 0x00000000 ); // EAX
3349 push( 0x00000000 ); // EBX
3350 push( 0x00000000 ); // ESI
3351 push( 0x00000000 ); // EDI
3352 }
3353 else version( AsmPPC_Posix )
3354 {
3355 version( StackGrowsDown )
3356 {
3357 pstack -= int.sizeof * 5;
3358 }
3359 else
3360 {
3361 pstack += int.sizeof * 5;
3362 }
3363
3364 push( cast(size_t) &fiber_entryPoint ); // link register
3365 push( 0x00000000 ); // control register
3366 push( 0x00000000 ); // old stack pointer
3367
3368 // GPR values
3369 version( StackGrowsDown )
3370 {
3371 pstack -= int.sizeof * 20;
3372 }
3373 else
3374 {
3375 pstack += int.sizeof * 20;
3376 }
3377
3378 assert( cast(uint) pstack & 0x0f == 0 );
3379 }
3380 else static if( is( ucontext_t ) )
3381 {
3382 getcontext( &m_utxt );
3383 m_utxt.uc_stack.ss_sp = m_ctxt.bstack;
3384 m_utxt.uc_stack.ss_size = m_size;
3385 makecontext( &m_utxt, &fiber_entryPoint, 0 );
3386 // NOTE: If ucontext is being used then the top of the stack will
3387 // be a pointer to the ucontext_t struct for that fiber.
3388 push( cast(size_t) &m_utxt );
3389 }
3390 }
3391
3392
3393 Thread.Context* m_ctxt;
3394 size_t m_size;
3395 void* m_pmem;
3396
3397 static if( is( ucontext_t ) )
3398 {
3399 // NOTE: The static ucontext instance is used to represent the context
3400 // of the main application thread.
3401 static ucontext_t sm_utxt = void;
3402 ucontext_t m_utxt = void;
3403 ucontext_t* m_ucur = null;
3404 }
3405
3406
3407 private:
3408 ///////////////////////////////////////////////////////////////////////////
3409 // Storage of Active Fiber
3410 ///////////////////////////////////////////////////////////////////////////
3411
3412
3413 //
3414 // Sets a thread-local reference to the current fiber object.
3415 //
3416 static void setThis( Fiber f )
3417 {
3418 version( Windows )
3419 {
3420 TlsSetValue( sm_this, cast(void*) f );
3421 }
3422 else version( Posix )
3423 {
3424 pthread_setspecific( sm_this, cast(void*) f );
3425 }
3426 }
3427
3428
3429 __gshared Thread.TLSKey sm_this;
3430
3431
3432 private:
3433 ///////////////////////////////////////////////////////////////////////////
3434 // Context Switching
3435 ///////////////////////////////////////////////////////////////////////////
3436
3437
3438 //
3439 // Switches into the stack held by this fiber.
3440 //
3441 final void switchIn()
3442 {
3443 Thread tobj = Thread.getThis();
3444 void** oldp = &tobj.m_curr.tstack;
3445 void* newp = m_ctxt.tstack;
3446
3447 // NOTE: The order of operations here is very important. The current
3448 // stack top must be stored before m_lock is set, and pushContext
3449 // must not be called until after m_lock is set. This process
3450 // is intended to prevent a race condition with the suspend
3451 // mechanism used for garbage collection. If it is not followed,
3452 // a badly timed collection could cause the GC to scan from the
3453 // bottom of one stack to the top of another, or to miss scanning
3454 // a stack that still contains valid data. The old stack pointer
3455 // oldp will be set again before the context switch to guarantee
3456 // that it points to exactly the correct stack location so the
3457 // successive pop operations will succeed.
3458 *oldp = getStackTop();
3459 volatile tobj.m_lock = true;
3460 tobj.pushContext( m_ctxt );
3461
3462 fiber_switchContext( oldp, newp );
3463
3464 // NOTE: As above, these operations must be performed in a strict order
3465 // to prevent Bad Things from happening.
3466 tobj.popContext();
3467 volatile tobj.m_lock = false;
3468 tobj.m_curr.tstack = tobj.m_curr.bstack;
3469 }
3470
3471
3472 //
3473 // Switches out of the current stack and into the enclosing stack.
3474 //
3475 final void switchOut()
3476 {
3477 Thread tobj = Thread.getThis();
3478 void** oldp = &m_ctxt.tstack;
3479 void* newp = tobj.m_curr.within.tstack;
3480
3481 // NOTE: The order of operations here is very important. The current
3482 // stack top must be stored before m_lock is set, and pushContext
3483 // must not be called until after m_lock is set. This process
3484 // is intended to prevent a race condition with the suspend
3485 // mechanism used for garbage collection. If it is not followed,
3486 // a badly timed collection could cause the GC to scan from the
3487 // bottom of one stack to the top of another, or to miss scanning
3488 // a stack that still contains valid data. The old stack pointer
3489 // oldp will be set again before the context switch to guarantee
3490 // that it points to exactly the correct stack location so the
3491 // successive pop operations will succeed.
3492 *oldp = getStackTop();
3493 volatile tobj.m_lock = true;
3494
3495 fiber_switchContext( oldp, newp );
3496
3497 // NOTE: As above, these operations must be performed in a strict order
3498 // to prevent Bad Things from happening.
3499 volatile tobj.m_lock = false;
3500 tobj.m_curr.tstack = tobj.m_curr.bstack;
3501 }
3502 }
3503
3504 version (OSX)
3505 {
3506 /* The Mach-O object file format does not allow for thread local storage
3507 * declarations. So, instead we roll our own by putting tls into
3508 * the sections __tlsdata and __tlscoal_nt.
3509 */
3510
3511 extern (D)
3512 void* ___tls_get_addr(void* p)
3513 {
3514 return p;
3515 }
3516 }