comparison tango/lib/common/tango/core/Thread.d @ 132:1700239cab2e trunk

[svn r136] MAJOR UNSTABLE UPDATE!!! Initial commit after moving to Tango instead of Phobos. Lots of bugfixes... This build is not suitable for most things.
author lindquist
date Fri, 11 Jan 2008 17:57:40 +0100
parents
children a168a2c3ea48
comparison
equal deleted inserted replaced
131:5825d48b27d1 132:1700239cab2e
1 /**
2 * The thread module provides support for thread creation and management.
3 *
4 * Copyright: Copyright (C) 2005-2006 Sean Kelly. All rights reserved.
5 * License: BSD style: $(LICENSE)
6 * Authors: Sean Kelly
7 */
8 module tango.core.Thread;
9
10
11 // this should be true for most architectures
12 version = StackGrowsDown;
13
14
15 public
16 {
17 // import tango.core.TimeSpan;
18 }
19 private
20 {
21 import tango.core.Exception;
22
23
24 //
25 // exposed by compiler runtime
26 //
27 extern (C) void* rt_stackBottom();
28 extern (C) void* rt_stackTop();
29
30
31 void* getStackBottom()
32 {
33 return rt_stackBottom();
34 }
35
36
37 void* getStackTop()
38 {
39 version( D_InlineAsm_X86 )
40 {
41 asm
42 {
43 naked;
44 mov EAX, ESP;
45 ret;
46 }
47 }
48 else
49 {
50 return rt_stackTop();
51 }
52 }
53 }
54
55
56 ////////////////////////////////////////////////////////////////////////////////
57 // Thread Entry Point and Signal Handlers
58 ////////////////////////////////////////////////////////////////////////////////
59
60
61 version( Win32 )
62 {
63 private
64 {
65 import tango.stdc.stdint : uintptr_t; // for _beginthreadex decl below
66 import tango.sys.win32.UserGdi;
67
68 const DWORD TLS_OUT_OF_INDEXES = 0xFFFFFFFF;
69
70 //
71 // avoid multiple imports via tango.sys.windows.process
72 //
73 extern (Windows) alias uint function(void*) btex_fptr;
74 extern (C) uintptr_t _beginthreadex(void*, uint, btex_fptr, void*, uint, uint*);
75
76
77 //
78 // entry point for Windows threads
79 //
80 extern (Windows) uint thread_entryPoint( void* arg )
81 {
82 Thread obj = cast(Thread) arg;
83 assert( obj );
84 scope( exit ) Thread.remove( obj );
85
86 assert( obj.m_curr is &obj.m_main );
87 obj.m_main.bstack = getStackBottom();
88 obj.m_main.tstack = obj.m_main.bstack;
89 Thread.add( &obj.m_main );
90 Thread.setThis( obj );
91
92 // NOTE: No GC allocations may occur until the stack pointers have
93 // been set and Thread.getThis returns a valid reference to
94 // this thread object (this latter condition is not strictly
95 // necessary on Win32 but it should be followed for the sake
96 // of consistency).
97
98 // TODO: Consider putting an auto exception object here (using
99 // alloca) forOutOfMemoryError plus something to track
100 // whether an exception is in-flight?
101
102 try
103 {
104 obj.run();
105 }
106 catch( Object o )
107 {
108 obj.m_unhandled = o;
109 }
110 return 0;
111 }
112
113
114 //
115 // copy of the same-named function in phobos.std.thread--it uses the
116 // Windows naming convention to be consistent with GetCurrentThreadId
117 //
118 HANDLE GetCurrentThreadHandle()
119 {
120 const uint DUPLICATE_SAME_ACCESS = 0x00000002;
121
122 HANDLE curr = GetCurrentThread(),
123 proc = GetCurrentProcess(),
124 hndl;
125
126 DuplicateHandle( proc, curr, proc, &hndl, 0, TRUE, DUPLICATE_SAME_ACCESS );
127 return hndl;
128 }
129 }
130 }
131 else version( Posix )
132 {
133 private
134 {
135 import tango.stdc.posix.semaphore;
136 import tango.stdc.posix.pthread;
137 import tango.stdc.posix.signal;
138 import tango.stdc.posix.time;
139 import tango.stdc.errno;
140
141 extern (C) int getErrno();
142
143 version( GNU )
144 {
145 import gcc.builtins;
146 }
147
148
149 //
150 // entry point for POSIX threads
151 //
152 extern (C) void* thread_entryPoint( void* arg )
153 {
154 Thread obj = cast(Thread) arg;
155 assert( obj );
156 scope( exit )
157 {
158 // NOTE: isRunning should be set to false after the thread is
159 // removed or a double-removal could occur between this
160 // function and thread_suspendAll.
161 Thread.remove( obj );
162 obj.m_isRunning = false;
163 }
164
165 static extern (C) void thread_cleanupHandler( void* arg )
166 {
167 Thread obj = cast(Thread) arg;
168 assert( obj );
169
170 // NOTE: If the thread terminated abnormally, just set it as
171 // not running and let thread_suspendAll remove it from
172 // the thread list. This is safer and is consistent
173 // with the Windows thread code.
174 obj.m_isRunning = false;
175 }
176
177 // NOTE: Using void to skip the initialization here relies on
178 // knowledge of how pthread_cleanup is implemented. It may
179 // not be appropriate for all platforms. However, it does
180 // avoid the need to link the pthread module. If any
181 // implementation actually requires default initialization
182 // then pthread_cleanup should be restructured to maintain
183 // the current lack of a link dependency.
184 pthread_cleanup cleanup = void;
185 cleanup.push( &thread_cleanupHandler, cast(void*) obj );
186
187 // NOTE: For some reason this does not always work for threads.
188 //obj.m_main.bstack = getStackBottom();
189 version( D_InlineAsm_X86 )
190 {
191 static void* getBasePtr()
192 {
193 asm
194 {
195 naked;
196 mov EAX, EBP;
197 ret;
198 }
199 }
200
201 obj.m_main.bstack = getBasePtr();
202 }
203 else version( StackGrowsDown )
204 obj.m_main.bstack = &obj + 1;
205 else
206 obj.m_main.bstack = &obj;
207 obj.m_main.tstack = obj.m_main.bstack;
208 assert( obj.m_curr == &obj.m_main );
209 Thread.add( &obj.m_main );
210 Thread.setThis( obj );
211
212 // NOTE: No GC allocations may occur until the stack pointers have
213 // been set and Thread.getThis returns a valid reference to
214 // this thread object (this latter condition is not strictly
215 // necessary on Win32 but it should be followed for the sake
216 // of consistency).
217
218 // TODO: Consider putting an auto exception object here (using
219 // alloca) forOutOfMemoryError plus something to track
220 // whether an exception is in-flight?
221
222 try
223 {
224 obj.run();
225 }
226 catch( Object o )
227 {
228 obj.m_unhandled = o;
229 }
230 return null;
231 }
232
233
234 //
235 // used to track the number of suspended threads
236 //
237 sem_t suspendCount;
238
239
240 extern (C) void thread_suspendHandler( int sig )
241 in
242 {
243 assert( sig == SIGUSR1 );
244 }
245 body
246 {
247 version( D_InlineAsm_X86 )
248 {
249 asm
250 {
251 pushad;
252 }
253 }
254 else version( GNU )
255 {
256 __builtin_unwind_init();
257 }
258 else version( LLVMDC )
259 {
260 // TODO below as well
261 }
262 else
263 {
264 static assert( false, "Architecture not supported." );
265 }
266
267 // NOTE: Since registers are being pushed and popped from the stack,
268 // any other stack data used by this function should be gone
269 // before the stack cleanup code is called below.
270 {
271 Thread obj = Thread.getThis();
272
273 // NOTE: The thread reference returned by getThis is set within
274 // the thread startup code, so it is possible that this
275 // handler may be called before the reference is set. In
276 // this case it is safe to simply suspend and not worry
277 // about the stack pointers as the thread will not have
278 // any references to GC-managed data.
279 if( obj && !obj.m_lock )
280 {
281 obj.m_curr.tstack = getStackTop();
282 }
283
284 sigset_t sigres = void;
285 int status;
286
287 status = sigfillset( &sigres );
288 assert( status == 0 );
289
290 status = sigdelset( &sigres, SIGUSR2 );
291 assert( status == 0 );
292
293 status = sem_post( &suspendCount );
294 assert( status == 0 );
295
296 sigsuspend( &sigres );
297
298 if( obj && !obj.m_lock )
299 {
300 obj.m_curr.tstack = obj.m_curr.bstack;
301 }
302 }
303
304 version( D_InlineAsm_X86 )
305 {
306 asm
307 {
308 popad;
309 }
310 }
311 else version( GNU )
312 {
313 // registers will be popped automatically
314 }
315 else version( LLVMDC )
316 {
317 // TODO
318 }
319 else
320 {
321 static assert( false, "Architecture not supported." );
322 }
323 }
324
325
326 extern (C) void thread_resumeHandler( int sig )
327 in
328 {
329 assert( sig == SIGUSR2 );
330 }
331 body
332 {
333
334 }
335 }
336 }
337 else
338 {
339 // NOTE: This is the only place threading versions are checked. If a new
340 // version is added, the module code will need to be searched for
341 // places where version-specific code may be required. This can be
342 // easily accomlished by searching for 'Windows' or 'Posix'.
343 static assert( false, "Unknown threading implementation." );
344 }
345
346
347 ////////////////////////////////////////////////////////////////////////////////
348 // Thread
349 ////////////////////////////////////////////////////////////////////////////////
350
351
352 /**
353 * This class encapsulates all threading functionality for the D
354 * programming language. As thread manipulation is a required facility
355 * for garbage collection, all user threads should derive from this
356 * class, and instances of this class should never be explicitly deleted.
357 * A new thread may be created using either derivation or composition, as
358 * in the following example.
359 *
360 * Example:
361 * -----------------------------------------------------------------------------
362 *
363 * class DerivedThread : Thread
364 * {
365 * this()
366 * {
367 * super( &run );
368 * }
369 *
370 * private :
371 * void run()
372 * {
373 * printf( "Derived thread running.\n" );
374 * }
375 * }
376 *
377 * void threadFunc()
378 * {
379 * printf( "Composed thread running.\n" );
380 * }
381 *
382 * // create instances of each type
383 * Thread derived = new DerivedThread();
384 * Thread composed = new Thread( &threadFunc );
385 *
386 * // start both threads
387 * derived.start();
388 * composed.start();
389 *
390 * -----------------------------------------------------------------------------
391 */
392 class Thread
393 {
394 ////////////////////////////////////////////////////////////////////////////
395 // Initialization
396 ////////////////////////////////////////////////////////////////////////////
397
398
399 /**
400 * Initializes a thread object which is associated with a static
401 * D function.
402 *
403 * Params:
404 * fn = The thread function.
405 * sz = The stack size for this thread.
406 *
407 * In:
408 * fn must not be null.
409 */
410 this( void function() fn, size_t sz = 0 )
411 in
412 {
413 assert( fn );
414 }
415 body
416 {
417 m_fn = fn;
418 m_sz = sz;
419 m_call = Call.FN;
420 m_curr = &m_main;
421 }
422
423
424 /**
425 * Initializes a thread object which is associated with a dynamic
426 * D function.
427 *
428 * Params:
429 * dg = The thread function.
430 * sz = The stack size for this thread.
431 *
432 * In:
433 * dg must not be null.
434 */
435 this( void delegate() dg, size_t sz = 0 )
436 in
437 {
438 assert( dg );
439 }
440 body
441 {
442 m_dg = dg;
443 m_sz = sz;
444 m_call = Call.DG;
445 m_curr = &m_main;
446 }
447
448
449 /**
450 * Cleans up any remaining resources used by this object.
451 */
452 ~this()
453 {
454 if( m_addr == m_addr.init )
455 {
456 return;
457 }
458
459 version( Win32 )
460 {
461 m_addr = m_addr.init;
462 CloseHandle( m_hndl );
463 m_hndl = m_hndl.init;
464 }
465 else version( Posix )
466 {
467 pthread_detach( m_addr );
468 m_addr = m_addr.init;
469 }
470 }
471
472
473 ////////////////////////////////////////////////////////////////////////////
474 // General Actions
475 ////////////////////////////////////////////////////////////////////////////
476
477
478 /**
479 * Starts the thread and invokes the function or delegate passed upon
480 * construction.
481 *
482 * In:
483 * This routine may only be called once per thread instance.
484 *
485 * Throws:
486 * ThreadException if the thread fails to start.
487 */
488 final void start()
489 in
490 {
491 assert( !next && !prev );
492 }
493 body
494 {
495 version( Win32 ) {} else
496 version( Posix )
497 {
498 pthread_attr_t attr;
499
500 if( pthread_attr_init( &attr ) )
501 throw new ThreadException( "Error initializing thread attributes" );
502 if( m_sz && pthread_attr_setstacksize( &attr, m_sz ) )
503 throw new ThreadException( "Error initializing thread stack size" );
504 }
505
506 // NOTE: This operation needs to be synchronized to avoid a race
507 // condition with the GC. Without this lock, the thread
508 // could start and allocate memory before being added to
509 // the global thread list, preventing it from being scanned
510 // and causing memory to be collected that is still in use.
511 synchronized( slock )
512 {
513 version( Win32 )
514 {
515 m_hndl = cast(HANDLE) _beginthreadex( null, m_sz, &thread_entryPoint, cast(void*) this, 0, &m_addr );
516 if( cast(size_t) m_hndl == 0 )
517 throw new ThreadException( "Error creating thread" );
518 }
519 else version( Posix )
520 {
521 m_isRunning = true;
522 scope( failure ) m_isRunning = false;
523
524 if( pthread_create( &m_addr, &attr, &thread_entryPoint, cast(void*) this ) != 0 )
525 throw new ThreadException( "Error creating thread" );
526 }
527 multiThreadedFlag = true;
528 add( this );
529 }
530 }
531
532
533 /**
534 * Waits for this thread to complete. If the thread terminated as the
535 * result of an unhandled exception, this exception will be rethrown.
536 *
537 * Params:
538 * rethrow = Rethrow any unhandled exception which may have caused this
539 * thread to terminate.
540 *
541 * Throws:
542 * ThreadException if the operation fails.
543 * Any exception not handled by the joined thread.
544 */
545 final void join( bool rethrow = true )
546 {
547 version( Win32 )
548 {
549 if( WaitForSingleObject( m_hndl, INFINITE ) != WAIT_OBJECT_0 )
550 throw new ThreadException( "Unable to join thread" );
551 // NOTE: m_addr must be cleared before m_hndl is closed to avoid
552 // a race condition with isRunning. The operation is labeled
553 // volatile to prevent compiler reordering.
554 volatile m_addr = m_addr.init;
555 CloseHandle( m_hndl );
556 m_hndl = m_hndl.init;
557 }
558 else version( Posix )
559 {
560 if( pthread_join( m_addr, null ) != 0 )
561 throw new ThreadException( "Unable to join thread" );
562 // NOTE: pthread_join acts as a substitute for pthread_detach,
563 // which is normally called by the dtor. Setting m_addr
564 // to zero ensures that pthread_detach will not be called
565 // on object destruction.
566 volatile m_addr = m_addr.init;
567 }
568 if( rethrow && m_unhandled )
569 {
570 throw m_unhandled;
571 }
572 }
573
574
575 ////////////////////////////////////////////////////////////////////////////
576 // General Properties
577 ////////////////////////////////////////////////////////////////////////////
578
579
580 /**
581 * Gets the user-readable label for this thread.
582 *
583 * Returns:
584 * The name of this thread.
585 */
586 final char[] name()
587 {
588 synchronized( this )
589 {
590 return m_name;
591 }
592 }
593
594
595 /**
596 * Sets the user-readable label for this thread.
597 *
598 * Params:
599 * val = The new name of this thread.
600 */
601 final void name( char[] val )
602 {
603 synchronized( this )
604 {
605 m_name = val.dup;
606 }
607 }
608
609
610 /**
611 * Gets the daemon status for this thread.
612 *
613 * Returns:
614 * true if this is a daemon thread.
615 */
616 final bool isDaemon()
617 {
618 synchronized( this )
619 {
620 return m_isDaemon;
621 }
622 }
623
624
625 /**
626 * Sets the daemon status for this thread.
627 *
628 * Params:
629 * val = The new daemon status for this thread.
630 */
631 final void isDaemon( bool val )
632 {
633 synchronized( this )
634 {
635 m_isDaemon = val;
636 }
637 }
638
639
640 /**
641 * Tests whether this thread is running.
642 *
643 * Returns:
644 * true if the thread is running, false if not.
645 */
646 final bool isRunning()
647 {
648 if( m_addr == m_addr.init )
649 {
650 return false;
651 }
652
653 version( Win32 )
654 {
655 uint ecode = 0;
656 GetExitCodeThread( m_hndl, &ecode );
657 return ecode == STILL_ACTIVE;
658 }
659 else version( Posix )
660 {
661 // NOTE: It should be safe to access this value without
662 // memory barriers because word-tearing and such
663 // really isn't an issue for boolean values.
664 return m_isRunning;
665 }
666 }
667
668
669 ////////////////////////////////////////////////////////////////////////////
670 // Thread Priority Actions
671 ////////////////////////////////////////////////////////////////////////////
672
673
674 /**
675 * The minimum scheduling priority that may be set for a thread. On
676 * systems where multiple scheduling policies are defined, this value
677 * represents the minimum valid priority for the scheduling policy of
678 * the process.
679 */
680 static const int PRIORITY_MIN;
681
682
683 /**
684 * The maximum scheduling priority that may be set for a thread. On
685 * systems where multiple scheduling policies are defined, this value
686 * represents the minimum valid priority for the scheduling policy of
687 * the process.
688 */
689 static const int PRIORITY_MAX;
690
691
692 /**
693 * Gets the scheduling priority for the associated thread.
694 *
695 * Returns:
696 * The scheduling priority of this thread.
697 */
698 final int priority()
699 {
700 version( Win32 )
701 {
702 return GetThreadPriority( m_hndl );
703 }
704 else version( Posix )
705 {
706 int policy;
707 sched_param param;
708
709 if( pthread_getschedparam( m_addr, &policy, &param ) )
710 throw new ThreadException( "Unable to get thread priority" );
711 return param.sched_priority;
712 }
713 }
714
715
716 /**
717 * Sets the scheduling priority for the associated thread.
718 *
719 * Params:
720 * val = The new scheduling priority of this thread.
721 */
722 final void priority( int val )
723 {
724 version( Win32 )
725 {
726 if( !SetThreadPriority( m_hndl, val ) )
727 throw new ThreadException( "Unable to set thread priority" );
728 }
729 else version( Posix )
730 {
731 // NOTE: pthread_setschedprio is not implemented on linux, so use
732 // the more complicated get/set sequence below.
733 //if( pthread_setschedprio( m_addr, val ) )
734 // throw new ThreadException( "Unable to set thread priority" );
735
736 int policy;
737 sched_param param;
738
739 if( pthread_getschedparam( m_addr, &policy, &param ) )
740 throw new ThreadException( "Unable to set thread priority" );
741 param.sched_priority = val;
742 if( pthread_setschedparam( m_addr, policy, &param ) )
743 throw new ThreadException( "Unable to set thread priority" );
744 }
745 }
746
747
748 ////////////////////////////////////////////////////////////////////////////
749 // Actions on Calling Thread
750 ////////////////////////////////////////////////////////////////////////////
751
752
753 /**
754 * Suspends the calling thread for at least the supplied time, up to a
755 * maximum of (uint.max - 1) milliseconds.
756 *
757 * Params:
758 * period = The minimum duration the calling thread should be suspended,
759 * in seconds. Sub-second durations are specified as fractional
760 * values.
761 *
762 * In:
763 * period must be less than (uint.max - 1) milliseconds.
764 *
765 * Example:
766 * -------------------------------------------------------------------------
767 *
768 * Thread.sleep( 0.05 ); // sleep for 50 milliseconds
769 * Thread.sleep( 5 ); // sleep for 5 seconds
770 *
771 * -------------------------------------------------------------------------
772 */
773 static void sleep( double period )
774 in
775 {
776 // NOTE: The fractional value added to period is to correct fp error.
777 assert( period * 1000 + 0.1 < uint.max - 1 );
778 }
779 body
780 {
781 version( Win32 )
782 {
783 Sleep( cast(uint)( period * 1000 + 0.1 ) );
784 }
785 else version( Posix )
786 {
787 timespec tin = void;
788 timespec tout = void;
789
790 period += 0.000_000_000_1;
791
792 if( tin.tv_sec.max < period )
793 {
794 tin.tv_sec = tin.tv_sec.max;
795 tin.tv_nsec = 0;
796 }
797 else
798 {
799 tin.tv_sec = cast(typeof(tin.tv_sec)) period;
800 tin.tv_nsec = cast(typeof(tin.tv_nsec)) ((period % 1.0) * 1_000_000_000);
801 }
802
803 while( true )
804 {
805 if( !nanosleep( &tin, &tout ) )
806 return;
807 if( getErrno() != EINTR )
808 throw new ThreadException( "Unable to sleep for specified duration" );
809 tin = tout;
810 }
811 }
812 }
813
814
815 /+
816 /**
817 * Suspends the calling thread for at least the supplied time, up to a
818 * maximum of (uint.max - 1) milliseconds.
819 *
820 * Params:
821 * period = The minimum duration the calling thread should be suspended.
822 *
823 * In:
824 * period must be less than (uint.max - 1) milliseconds.
825 *
826 * Example:
827 * -------------------------------------------------------------------------
828 *
829 * Thread.sleep( TimeSpan.milliseconds( 50 ) ); // sleep for 50 milliseconds
830 * Thread.sleep( TimeSpan.seconds( 5 ) ); // sleep for 5 seconds
831 *
832 * -------------------------------------------------------------------------
833 */
834 static void sleep( TimeSpan period )
835 in
836 {
837 assert( period.milliseconds < uint.max - 1 );
838 }
839 body
840 {
841 version( Win32 )
842 {
843 Sleep( cast(uint)( period.milliseconds ) );
844 }
845 else version( Posix )
846 {
847 timespec tin = void;
848 timespec tout = void;
849
850 if( tin.tv_sec.max < period.seconds )
851 {
852 tin.tv_sec = tin.tv_sec.max;
853 tin.tv_nsec = 0;
854 }
855 else
856 {
857 tin.tv_sec = cast(typeof(tin.tv_sec)) period.seconds;
858 tin.tv_nsec = cast(typeof(tin.tv_nsec)) period.nanoseconds % 1_000_000_000;
859 }
860
861 while( true )
862 {
863 if( !nanosleep( &tin, &tout ) )
864 return;
865 if( getErrno() != EINTR )
866 throw new ThreadException( "Unable to sleep for specified duration" );
867 tin = tout;
868 }
869 }
870 }
871
872
873 /**
874 * Suspends the calling thread for at least the supplied time, up to a
875 * maximum of (uint.max - 1) milliseconds.
876 *
877 * Params:
878 * period = The minimum duration the calling thread should be suspended,
879 * in seconds. Sub-second durations are specified as fractional
880 * values. Please note that because period is a floating-point
881 * number, some accuracy may be lost for certain intervals. For
882 * this reason, the TimeSpan overload is preferred in instances
883 * where an exact interval is required.
884 *
885 * In:
886 * period must be less than (uint.max - 1) milliseconds.
887 *
888 * Example:
889 * -------------------------------------------------------------------------
890 *
891 * Thread.sleep( 0.05 ); // sleep for 50 milliseconds
892 * Thread.sleep( 5 ); // sleep for 5 seconds
893 *
894 * -------------------------------------------------------------------------
895 */
896 static void sleep( double period )
897 {
898 sleep( TimeSpan.interval( period ) );
899 }
900 +/
901
902
903 /**
904 * Forces a context switch to occur away from the calling thread.
905 */
906 static void yield()
907 {
908 version( Win32 )
909 {
910 // NOTE: Sleep(1) is necessary because Sleep(0) does not give
911 // lower priority threads any timeslice, so looping on
912 // Sleep(0) could be resource-intensive in some cases.
913 Sleep( 1 );
914 }
915 else version( Posix )
916 {
917 sched_yield();
918 }
919 }
920
921
922 ////////////////////////////////////////////////////////////////////////////
923 // Thread Accessors
924 ////////////////////////////////////////////////////////////////////////////
925
926
927 /**
928 * Provides a reference to the calling thread.
929 *
930 * Returns:
931 * The thread object representing the calling thread. The result of
932 * deleting this object is undefined.
933 */
934 static Thread getThis()
935 {
936 // NOTE: This function may not be called until thread_init has
937 // completed. See thread_suspendAll for more information
938 // on why this might occur.
939 version( Win32 )
940 {
941 return cast(Thread) TlsGetValue( sm_this );
942 }
943 else version( Posix )
944 {
945 return cast(Thread) pthread_getspecific( sm_this );
946 }
947 }
948
949
950 /**
951 * Provides a list of all threads currently being tracked by the system.
952 *
953 * Returns:
954 * An array containing references to all threads currently being
955 * tracked by the system. The result of deleting any contained
956 * objects is undefined.
957 */
958 static Thread[] getAll()
959 {
960 synchronized( slock )
961 {
962 size_t pos = 0;
963 Thread[] buf = new Thread[sm_tlen];
964
965 foreach( Thread t; Thread )
966 {
967 buf[pos++] = t;
968 }
969 return buf;
970 }
971 }
972
973
974 /**
975 * Operates on all threads currently being tracked by the system. The
976 * result of deleting any Thread object is undefined.
977 *
978 * Params:
979 *
980 * dg = The supplied code as a delegate.
981 *
982 * Returns:
983 * Zero if all elemented are visited, nonzero if not.
984 */
985 static int opApply( int delegate( inout Thread ) dg )
986 {
987 synchronized( slock )
988 {
989 int ret = 0;
990
991 for( Thread t = sm_tbeg; t; t = t.next )
992 {
993 ret = dg( t );
994 if( ret )
995 break;
996 }
997 return ret;
998 }
999 }
1000
1001
1002 ////////////////////////////////////////////////////////////////////////////
1003 // Local Storage Actions
1004 ////////////////////////////////////////////////////////////////////////////
1005
1006
1007 /**
1008 * Indicates the number of local storage pointers available at program
1009 * startup. It is recommended that this number be at least 64.
1010 */
1011 static const uint LOCAL_MAX = 64;
1012
1013
1014 /**
1015 * Reserves a local storage pointer for use and initializes this
1016 * location to null for all running threads.
1017 *
1018 * Returns:
1019 * A key representing the array offset of this memory location.
1020 */
1021 static uint createLocal()
1022 {
1023 synchronized( slock )
1024 {
1025 foreach( uint key, inout bool set; sm_local )
1026 {
1027 if( !set )
1028 {
1029 //foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139)
1030 for( Thread t = sm_tbeg; t; t = t.next )
1031 {
1032 t.m_local[key] = null;
1033 }
1034 set = true;
1035 return key;
1036 }
1037 }
1038 throw new ThreadException( "No more local storage slots available" );
1039 }
1040 }
1041
1042
1043 /**
1044 * Marks the supplied key as available and sets the associated location
1045 * to null for all running threads. It is assumed that any key passed
1046 * to this function is valid. The result of calling this function for
1047 * a key which is still in use is undefined.
1048 *
1049 * Params:
1050 * key = The key to delete.
1051 */
1052 static void deleteLocal( uint key )
1053 {
1054 synchronized( slock )
1055 {
1056 sm_local[key] = false;
1057 // foreach( Thread t; sm_tbeg ) Bug in GDC 0.24 SVN (r139)
1058 for( Thread t = sm_tbeg; t; t = t.next )
1059 {
1060 t.m_local[key] = null;
1061 }
1062 }
1063 }
1064
1065
1066 /**
1067 * Gets the data associated with the supplied key value. It is assumed
1068 * that any key passed to this function is valid.
1069 *
1070 * Params:
1071 * key = The location which holds the desired data.
1072 *
1073 * Returns:
1074 * The data associated with the supplied key.
1075 */
1076 static void* getLocal( uint key )
1077 {
1078 return getThis().m_local[key];
1079 }
1080
1081
1082 /**
1083 * Stores the supplied value in the specified location. It is assumed
1084 * that any key passed to this function is valid.
1085 *
1086 * Params:
1087 * key = The location to store the supplied data.
1088 * val = The data to store.
1089 *
1090 * Returns:
1091 * A copy of the data which has just been stored.
1092 */
1093 static void* setLocal( uint key, void* val )
1094 {
1095 return getThis().m_local[key] = val;
1096 }
1097
1098
1099 ////////////////////////////////////////////////////////////////////////////
1100 // Static Initalizer
1101 ////////////////////////////////////////////////////////////////////////////
1102
1103
1104 /**
1105 * This initializer is used to set thread constants. All functional
1106 * initialization occurs within thread_init().
1107 */
1108 static this()
1109 {
1110 version( Win32 )
1111 {
1112 PRIORITY_MIN = -15;
1113 PRIORITY_MAX = 15;
1114 }
1115 else version( Posix )
1116 {
1117 int policy;
1118 sched_param param;
1119 pthread_t self = pthread_self();
1120
1121 int status = pthread_getschedparam( self, &policy, &param );
1122 assert( status == 0 );
1123
1124 PRIORITY_MIN = sched_get_priority_min( policy );
1125 assert( PRIORITY_MIN != -1 );
1126
1127 PRIORITY_MAX = sched_get_priority_max( policy );
1128 assert( PRIORITY_MAX != -1 );
1129 }
1130 }
1131
1132
1133 private:
1134 //
1135 // Initializes a thread object which has no associated executable function.
1136 // This is used for the main thread initialized in thread_init().
1137 //
1138 this()
1139 {
1140 m_call = Call.NO;
1141 m_curr = &m_main;
1142 }
1143
1144
1145 //
1146 // Thread entry point. Invokes the function or delegate passed on
1147 // construction (if any).
1148 //
1149 final void run()
1150 {
1151 switch( m_call )
1152 {
1153 case Call.FN:
1154 m_fn();
1155 break;
1156 case Call.DG:
1157 m_dg();
1158 break;
1159 default:
1160 break;
1161 }
1162 }
1163
1164
1165 private:
1166 //
1167 // The type of routine passed on thread construction.
1168 //
1169 enum Call
1170 {
1171 NO,
1172 FN,
1173 DG
1174 }
1175
1176
1177 //
1178 // Standard types
1179 //
1180 version( Win32 )
1181 {
1182 alias uint TLSKey;
1183 alias uint ThreadAddr;
1184 }
1185 else version( Posix )
1186 {
1187 alias pthread_key_t TLSKey;
1188 alias pthread_t ThreadAddr;
1189 }
1190
1191
1192 //
1193 // Local storage
1194 //
1195 static bool[LOCAL_MAX] sm_local;
1196 static TLSKey sm_this;
1197
1198 void*[LOCAL_MAX] m_local;
1199
1200
1201 //
1202 // Standard thread data
1203 //
1204 version( Win32 )
1205 {
1206 HANDLE m_hndl;
1207 }
1208 ThreadAddr m_addr;
1209 Call m_call;
1210 char[] m_name;
1211 union
1212 {
1213 void function() m_fn;
1214 void delegate() m_dg;
1215 }
1216 size_t m_sz;
1217 version( Posix )
1218 {
1219 bool m_isRunning;
1220 }
1221 bool m_isDaemon;
1222 Object m_unhandled;
1223
1224
1225 private:
1226 ////////////////////////////////////////////////////////////////////////////
1227 // Storage of Active Thread
1228 ////////////////////////////////////////////////////////////////////////////
1229
1230
1231 //
1232 // Sets a thread-local reference to the current thread object.
1233 //
1234 static void setThis( Thread t )
1235 {
1236 version( Win32 )
1237 {
1238 TlsSetValue( sm_this, cast(void*) t );
1239 }
1240 else version( Posix )
1241 {
1242 pthread_setspecific( sm_this, cast(void*) t );
1243 }
1244 }
1245
1246
1247 private:
1248 ////////////////////////////////////////////////////////////////////////////
1249 // Thread Context and GC Scanning Support
1250 ////////////////////////////////////////////////////////////////////////////
1251
1252
1253 final void pushContext( Context* c )
1254 in
1255 {
1256 assert( !c.within );
1257 }
1258 body
1259 {
1260 c.within = m_curr;
1261 m_curr = c;
1262 }
1263
1264
1265 final void popContext()
1266 in
1267 {
1268 assert( m_curr && m_curr.within );
1269 }
1270 body
1271 {
1272 Context* c = m_curr;
1273 m_curr = c.within;
1274 c.within = null;
1275 }
1276
1277
1278 final Context* topContext()
1279 in
1280 {
1281 assert( m_curr );
1282 }
1283 body
1284 {
1285 return m_curr;
1286 }
1287
1288
1289 static struct Context
1290 {
1291 void* bstack,
1292 tstack;
1293 Context* within;
1294 Context* next,
1295 prev;
1296 }
1297
1298
1299 Context m_main;
1300 Context* m_curr;
1301 bool m_lock;
1302
1303 version( Win32 )
1304 {
1305 uint[8] m_reg; // edi,esi,ebp,esp,ebx,edx,ecx,eax
1306 }
1307
1308
1309 private:
1310 ////////////////////////////////////////////////////////////////////////////
1311 // GC Scanning Support
1312 ////////////////////////////////////////////////////////////////////////////
1313
1314
1315 // NOTE: The GC scanning process works like so:
1316 //
1317 // 1. Suspend all threads.
1318 // 2. Scan the stacks of all suspended threads for roots.
1319 // 3. Resume all threads.
1320 //
1321 // Step 1 and 3 require a list of all threads in the system, while
1322 // step 2 requires a list of all thread stacks (each represented by
1323 // a Context struct). Traditionally, there was one stack per thread
1324 // and the Context structs were not necessary. However, Fibers have
1325 // changed things so that each thread has its own 'main' stack plus
1326 // an arbitrary number of nested stacks (normally referenced via
1327 // m_curr). Also, there may be 'free-floating' stacks in the system,
1328 // which are Fibers that are not currently executing on any specific
1329 // thread but are still being processed and still contain valid
1330 // roots.
1331 //
1332 // To support all of this, the Context struct has been created to
1333 // represent a stack range, and a global list of Context structs has
1334 // been added to enable scanning of these stack ranges. The lifetime
1335 // (and presence in the Context list) of a thread's 'main' stack will
1336 // be equivalent to the thread's lifetime. So the Ccontext will be
1337 // added to the list on thread entry, and removed from the list on
1338 // thread exit (which is essentially the same as the presence of a
1339 // Thread object in its own global list). The lifetime of a Fiber's
1340 // context, however, will be tied to the lifetime of the Fiber object
1341 // itself, and Fibers are expected to add/remove their Context struct
1342 // on construction/deletion.
1343
1344
1345 //
1346 // All use of the global lists should synchronize on this lock.
1347 //
1348 static Object slock()
1349 {
1350 return Thread.classinfo;
1351 }
1352
1353
1354 static Context* sm_cbeg;
1355 static size_t sm_clen;
1356
1357 static Thread sm_tbeg;
1358 static size_t sm_tlen;
1359
1360 //
1361 // Used for ordering threads in the global thread list.
1362 //
1363 Thread prev;
1364 Thread next;
1365
1366
1367 ////////////////////////////////////////////////////////////////////////////
1368 // Global Context List Operations
1369 ////////////////////////////////////////////////////////////////////////////
1370
1371
1372 //
1373 // Add a context to the global context list.
1374 //
1375 static void add( Context* c )
1376 in
1377 {
1378 assert( c );
1379 assert( !c.next && !c.prev );
1380 }
1381 body
1382 {
1383 synchronized( slock )
1384 {
1385 if( sm_cbeg )
1386 {
1387 c.next = sm_cbeg;
1388 sm_cbeg.prev = c;
1389 }
1390 sm_cbeg = c;
1391 ++sm_clen;
1392 }
1393 }
1394
1395
1396 //
1397 // Remove a context from the global context list.
1398 //
1399 static void remove( Context* c )
1400 in
1401 {
1402 assert( c );
1403 assert( c.next || c.prev );
1404 }
1405 body
1406 {
1407 synchronized( slock )
1408 {
1409 if( c.prev )
1410 c.prev.next = c.next;
1411 if( c.next )
1412 c.next.prev = c.prev;
1413 if( sm_cbeg == c )
1414 sm_cbeg = c.next;
1415 --sm_clen;
1416 }
1417 // NOTE: Don't null out c.next or c.prev because opApply currently
1418 // follows c.next after removing a node. This could be easily
1419 // addressed by simply returning the next node from this function,
1420 // however, a context should never be re-added to the list anyway
1421 // and having next and prev be non-null is a good way to
1422 // ensure that.
1423 }
1424
1425
1426 ////////////////////////////////////////////////////////////////////////////
1427 // Global Thread List Operations
1428 ////////////////////////////////////////////////////////////////////////////
1429
1430
1431 //
1432 // Add a thread to the global thread list.
1433 //
1434 static void add( Thread t )
1435 in
1436 {
1437 assert( t );
1438 assert( !t.next && !t.prev );
1439 assert( t.isRunning );
1440 }
1441 body
1442 {
1443 synchronized( slock )
1444 {
1445 if( sm_tbeg )
1446 {
1447 t.next = sm_tbeg;
1448 sm_tbeg.prev = t;
1449 }
1450 sm_tbeg = t;
1451 ++sm_tlen;
1452 }
1453 }
1454
1455
1456 //
1457 // Remove a thread from the global thread list.
1458 //
1459 static void remove( Thread t )
1460 in
1461 {
1462 assert( t );
1463 assert( t.next || t.prev );
1464 version( Win32 )
1465 {
1466 // NOTE: This doesn't work for Posix as m_isRunning must be set to
1467 // false after the thread is removed during normal execution.
1468 assert( !t.isRunning );
1469 }
1470 }
1471 body
1472 {
1473 synchronized( slock )
1474 {
1475 // NOTE: When a thread is removed from the global thread list its
1476 // main context is invalid and should be removed as well.
1477 // It is possible that t.m_curr could reference more
1478 // than just the main context if the thread exited abnormally
1479 // (if it was terminated), but we must assume that the user
1480 // retains a reference to them and that they may be re-used
1481 // elsewhere. Therefore, it is the responsibility of any
1482 // object that creates contexts to clean them up properly
1483 // when it is done with them.
1484 remove( &t.m_main );
1485
1486 if( t.prev )
1487 t.prev.next = t.next;
1488 if( t.next )
1489 t.next.prev = t.prev;
1490 if( sm_tbeg == t )
1491 sm_tbeg = t.next;
1492 --sm_tlen;
1493 }
1494 // NOTE: Don't null out t.next or t.prev because opApply currently
1495 // follows t.next after removing a node. This could be easily
1496 // addressed by simply returning the next node from this function,
1497 // however, a thread should never be re-added to the list anyway
1498 // and having next and prev be non-null is a good way to
1499 // ensure that.
1500 }
1501 }
1502
1503
1504 ////////////////////////////////////////////////////////////////////////////////
1505 // GC Support Routines
1506 ////////////////////////////////////////////////////////////////////////////////
1507
1508
1509 /**
1510 * Initializes the thread module. This function must be called by the
1511 * garbage collector on startup and before any other thread routines
1512 * are called.
1513 */
1514 extern (C) void thread_init()
1515 {
1516 // NOTE: If thread_init itself performs any allocations then the thread
1517 // routines reserved for garbage collector use may be called while
1518 // thread_init is being processed. However, since no memory should
1519 // exist to be scanned at this point, it is sufficient for these
1520 // functions to detect the condition and return immediately.
1521
1522 version( Win32 )
1523 {
1524 Thread.sm_this = TlsAlloc();
1525 assert( Thread.sm_this != TLS_OUT_OF_INDEXES );
1526 }
1527 else version( Posix )
1528 {
1529 int status;
1530 sigaction_t sigusr1 = void;
1531 sigaction_t sigusr2 = void;
1532
1533 // This is a quick way to zero-initialize the structs without using
1534 // memset or creating a link dependency on their static initializer.
1535 (cast(byte*) &sigusr1)[0 .. sigaction_t.sizeof] = 0;
1536 (cast(byte*) &sigusr2)[0 .. sigaction_t.sizeof] = 0;
1537
1538 // NOTE: SA_RESTART indicates that system calls should restart if they
1539 // are interrupted by a signal, but this is not available on all
1540 // Posix systems, even those that support multithreading.
1541 static if( is( typeof( SA_RESTART ) ) )
1542 sigusr1.sa_flags = SA_RESTART;
1543 else
1544 sigusr1.sa_flags = 0;
1545 sigusr1.sa_handler = &thread_suspendHandler;
1546 // NOTE: We want to ignore all signals while in this handler, so fill
1547 // sa_mask to indicate this.
1548 status = sigfillset( &sigusr1.sa_mask );
1549 assert( status == 0 );
1550
1551 // NOTE: Since SIGUSR2 should only be issued for threads within the
1552 // suspend handler, we don't want this signal to trigger a
1553 // restart.
1554 sigusr2.sa_flags = 0;
1555 sigusr2.sa_handler = &thread_resumeHandler;
1556 // NOTE: We want to ignore all signals while in this handler, so fill
1557 // sa_mask to indicate this.
1558 status = sigfillset( &sigusr2.sa_mask );
1559 assert( status == 0 );
1560
1561 status = sigaction( SIGUSR1, &sigusr1, null );
1562 assert( status == 0 );
1563
1564 status = sigaction( SIGUSR2, &sigusr2, null );
1565 assert( status == 0 );
1566
1567 status = sem_init( &suspendCount, 0, 0 );
1568 assert( status == 0 );
1569
1570 status = pthread_key_create( &Thread.sm_this, null );
1571 assert( status == 0 );
1572 }
1573
1574 thread_attachThis();
1575 }
1576
1577
1578 /**
1579 * Registers the calling thread for use with Tango. If this routine is called
1580 * for a thread which is already registered, the result is undefined.
1581 */
1582 extern (C) void thread_attachThis()
1583 {
1584 version( Win32 )
1585 {
1586 Thread thisThread = new Thread();
1587 Thread.Context* thisContext = &thisThread.m_main;
1588 assert( thisContext == thisThread.m_curr );
1589
1590 thisThread.m_addr = GetCurrentThreadId();
1591 thisThread.m_hndl = GetCurrentThreadHandle();
1592 thisContext.bstack = getStackBottom();
1593 thisContext.tstack = thisContext.bstack;
1594
1595 thisThread.m_isDaemon = true;
1596
1597 Thread.setThis( thisThread );
1598 }
1599 else version( Posix )
1600 {
1601 Thread thisThread = new Thread();
1602 Thread.Context* thisContext = thisThread.m_curr;
1603 assert( thisContext == &thisThread.m_main );
1604
1605 thisThread.m_addr = pthread_self();
1606 thisContext.bstack = getStackBottom();
1607 thisContext.tstack = thisContext.bstack;
1608
1609 thisThread.m_isRunning = true;
1610 thisThread.m_isDaemon = true;
1611
1612 Thread.setThis( thisThread );
1613 }
1614
1615 Thread.add( thisThread );
1616 Thread.add( thisContext );
1617 }
1618
1619
1620 /**
1621 * Deregisters the calling thread from use with Tango. If this routine is
1622 * called for a thread which is already registered, the result is undefined.
1623 */
1624 extern (C) void thread_detachThis()
1625 {
1626 Thread.remove( Thread.getThis() );
1627 }
1628
1629
1630 /**
1631 * Joins all non-daemon threads that are currently running. This is done by
1632 * performing successive scans through the thread list until a scan consists
1633 * of only daemon threads.
1634 */
1635 extern (C) void thread_joinAll()
1636 {
1637
1638 while( true )
1639 {
1640 Thread nonDaemon = null;
1641
1642 foreach( t; Thread )
1643 {
1644 if( !t.isDaemon )
1645 {
1646 nonDaemon = t;
1647 break;
1648 }
1649 }
1650 if( nonDaemon is null )
1651 return;
1652 nonDaemon.join();
1653 }
1654 }
1655
1656
1657 /**
1658 * Performs intermediate shutdown of the thread module.
1659 */
1660 static ~this()
1661 {
1662 // NOTE: The functionality related to garbage collection must be minimally
1663 // operable after this dtor completes. Therefore, only minimal
1664 // cleanup may occur.
1665
1666 for( Thread t = Thread.sm_tbeg; t; t = t.next )
1667 {
1668 if( !t.isRunning )
1669 Thread.remove( t );
1670 }
1671 }
1672
1673
1674 // Used for needLock below
1675 private bool multiThreadedFlag = false;
1676
1677
1678 /**
1679 * This function is used to determine whether the the process is
1680 * multi-threaded. Optimizations may only be performed on this
1681 * value if the programmer can guarantee that no path from the
1682 * enclosed code will start a thread.
1683 *
1684 * Returns:
1685 * True if Thread.start() has been called in this process.
1686 */
1687 extern (C) bool thread_needLock()
1688 {
1689 return multiThreadedFlag;
1690 }
1691
1692
1693 // Used for suspendAll/resumeAll below
1694 private uint suspendDepth = 0;
1695
1696
1697 /**
1698 * Suspend all threads but the calling thread for "stop the world" garbage
1699 * collection runs. This function may be called multiple times, and must
1700 * be followed by a matching number of calls to thread_resumeAll before
1701 * processing is resumed.
1702 *
1703 * Throws:
1704 * ThreadException if the suspend operation fails for a running thread.
1705 */
1706 extern (C) void thread_suspendAll()
1707 {
1708 /**
1709 * Suspend the specified thread and load stack and register information for
1710 * use by thread_scanAll. If the supplied thread is the calling thread,
1711 * stack and register information will be loaded but the thread will not
1712 * be suspended. If the suspend operation fails and the thread is not
1713 * running then it will be removed from the global thread list, otherwise
1714 * an exception will be thrown.
1715 *
1716 * Params:
1717 * t = The thread to suspend.
1718 *
1719 * Throws:
1720 * ThreadException if the suspend operation fails for a running thread.
1721 */
1722 void suspend( Thread t )
1723 {
1724 version( Win32 )
1725 {
1726 if( t.m_addr != GetCurrentThreadId() && SuspendThread( t.m_hndl ) == 0xFFFFFFFF )
1727 {
1728 if( !t.isRunning )
1729 {
1730 Thread.remove( t );
1731 return;
1732 }
1733 throw new ThreadException( "Unable to suspend thread" );
1734 }
1735
1736 CONTEXT context = void;
1737 context.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
1738
1739 if( !GetThreadContext( t.m_hndl, &context ) )
1740 throw new ThreadException( "Unable to load thread context" );
1741 if( !t.m_lock )
1742 t.m_curr.tstack = cast(void*) context.Esp;
1743 // edi,esi,ebp,esp,ebx,edx,ecx,eax
1744 t.m_reg[0] = context.Edi;
1745 t.m_reg[1] = context.Esi;
1746 t.m_reg[2] = context.Ebp;
1747 t.m_reg[3] = context.Esp;
1748 t.m_reg[4] = context.Ebx;
1749 t.m_reg[5] = context.Edx;
1750 t.m_reg[6] = context.Ecx;
1751 t.m_reg[7] = context.Eax;
1752 }
1753 else version( Posix )
1754 {
1755 if( t.m_addr != pthread_self() )
1756 {
1757 if( pthread_kill( t.m_addr, SIGUSR1 ) != 0 )
1758 {
1759 if( !t.isRunning )
1760 {
1761 Thread.remove( t );
1762 return;
1763 }
1764 throw new ThreadException( "Unable to suspend thread" );
1765 }
1766 // NOTE: It's really not ideal to wait for each thread to signal
1767 // individually -- rather, it would be better to suspend
1768 // them all and wait once at the end. However, semaphores
1769 // don't really work this way, and the obvious alternative
1770 // (looping on an atomic suspend count) requires either
1771 // the atomic module (which only works on x86) or other
1772 // specialized functionality. It would also be possible
1773 // to simply loop on sem_wait at the end, but I'm not
1774 // convinced that this would be much faster than the
1775 // current approach.
1776 sem_wait( &suspendCount );
1777 }
1778 else if( !t.m_lock )
1779 {
1780 t.m_curr.tstack = getStackTop();
1781 }
1782 }
1783 }
1784
1785
1786 // NOTE: We've got an odd chicken & egg problem here, because while the GC
1787 // is required to call thread_init before calling any other thread
1788 // routines, thread_init may allocate memory which could in turn
1789 // trigger a collection. Thus, thread_suspendAll, thread_scanAll,
1790 // and thread_resumeAll must be callable before thread_init completes,
1791 // with the assumption that no other GC memory has yet been allocated
1792 // by the system, and thus there is no risk of losing data if the
1793 // global thread list is empty. The check of Thread.sm_tbeg
1794 // below is done to ensure thread_init has completed, and therefore
1795 // that calling Thread.getThis will not result in an error. For the
1796 // short time when Thread.sm_tbeg is null, there is no reason
1797 // not to simply call the multithreaded code below, with the
1798 // expectation that the foreach loop will never be entered.
1799 if( !multiThreadedFlag && Thread.sm_tbeg )
1800 {
1801 if( ++suspendDepth == 1 )
1802 suspend( Thread.getThis() );
1803 return;
1804 }
1805 synchronized( Thread.slock )
1806 {
1807 if( ++suspendDepth > 1 )
1808 return;
1809
1810 // NOTE: I'd really prefer not to check isRunning within this loop but
1811 // not doing so could be problematic if threads are termianted
1812 // abnormally and a new thread is created with the same thread
1813 // address before the next GC run. This situation might cause
1814 // the same thread to be suspended twice, which would likely
1815 // cause the second suspend to fail, the garbage collection to
1816 // abort, and Bad Things to occur.
1817 for( Thread t = Thread.sm_tbeg; t; t = t.next )
1818 {
1819 if( t.isRunning )
1820 suspend( t );
1821 else
1822 Thread.remove( t );
1823 }
1824
1825 version( Posix )
1826 {
1827 // wait on semaphore -- see note in suspend for
1828 // why this is currently not implemented
1829 }
1830 }
1831 }
1832
1833
1834 /**
1835 * Resume all threads but the calling thread for "stop the world" garbage
1836 * collection runs. This function must be called once for each preceding
1837 * call to thread_suspendAll before the threads are actually resumed.
1838 *
1839 * In:
1840 * This routine must be preceded by a call to thread_suspendAll.
1841 *
1842 * Throws:
1843 * ThreadException if the resume operation fails for a running thread.
1844 */
1845 extern (C) void thread_resumeAll()
1846 in
1847 {
1848 assert( suspendDepth > 0 );
1849 }
1850 body
1851 {
1852 /**
1853 * Resume the specified thread and unload stack and register information.
1854 * If the supplied thread is the calling thread, stack and register
1855 * information will be unloaded but the thread will not be resumed. If
1856 * the resume operation fails and the thread is not running then it will
1857 * be removed from the global thread list, otherwise an exception will be
1858 * thrown.
1859 *
1860 * Params:
1861 * t = The thread to resume.
1862 *
1863 * Throws:
1864 * ThreadException if the resume fails for a running thread.
1865 */
1866 void resume( Thread t )
1867 {
1868 version( Win32 )
1869 {
1870 if( t.m_addr != GetCurrentThreadId() && ResumeThread( t.m_hndl ) == 0xFFFFFFFF )
1871 {
1872 if( !t.isRunning )
1873 {
1874 Thread.remove( t );
1875 return;
1876 }
1877 throw new ThreadException( "Unable to resume thread" );
1878 }
1879
1880 if( !t.m_lock )
1881 t.m_curr.tstack = t.m_curr.bstack;
1882 t.m_reg[0 .. $] = 0;
1883 }
1884 else version( Posix )
1885 {
1886 if( t.m_addr != pthread_self() )
1887 {
1888 if( pthread_kill( t.m_addr, SIGUSR2 ) != 0 )
1889 {
1890 if( !t.isRunning )
1891 {
1892 Thread.remove( t );
1893 return;
1894 }
1895 throw new ThreadException( "Unable to resume thread" );
1896 }
1897 }
1898 else if( !t.m_lock )
1899 {
1900 t.m_curr.tstack = t.m_curr.bstack;
1901 }
1902 }
1903 }
1904
1905
1906 // NOTE: See thread_suspendAll for the logic behind this.
1907 if( !multiThreadedFlag && Thread.sm_tbeg )
1908 {
1909 if( --suspendDepth == 0 )
1910 resume( Thread.getThis() );
1911 return;
1912 }
1913 synchronized( Thread.slock )
1914 {
1915 if( --suspendDepth > 0 )
1916 return;
1917
1918 for( Thread t = Thread.sm_tbeg; t; t = t.next )
1919 {
1920 resume( t );
1921 }
1922 }
1923 }
1924
1925
1926 private alias void delegate( void*, void* ) scanAllThreadsFn;
1927
1928
1929 /**
1930 * The main entry point for garbage collection. The supplied delegate
1931 * will be passed ranges representing both stack and register values.
1932 *
1933 * Params:
1934 * scan = The scanner function. It should scan from p1 through p2 - 1.
1935 * curStackTop = An optional pointer to the top of the calling thread's stack.
1936 *
1937 * In:
1938 * This routine must be preceded by a call to thread_suspendAll.
1939 */
1940 extern (C) void thread_scanAll( scanAllThreadsFn scan, void* curStackTop = null )
1941 in
1942 {
1943 assert( suspendDepth > 0 );
1944 }
1945 body
1946 {
1947 Thread thisThread = null;
1948 void* oldStackTop = null;
1949
1950 if( curStackTop && Thread.sm_tbeg )
1951 {
1952 thisThread = Thread.getThis();
1953 if( !thisThread.m_lock )
1954 {
1955 oldStackTop = thisThread.m_curr.tstack;
1956 thisThread.m_curr.tstack = curStackTop;
1957 }
1958 }
1959
1960 scope( exit )
1961 {
1962 if( curStackTop && Thread.sm_tbeg )
1963 {
1964 if( !thisThread.m_lock )
1965 {
1966 thisThread.m_curr.tstack = oldStackTop;
1967 }
1968 }
1969 }
1970
1971 // NOTE: Synchronizing on Thread.slock is not needed because this
1972 // function may only be called after all other threads have
1973 // been suspended from within the same lock.
1974 for( Thread.Context* c = Thread.sm_cbeg; c; c = c.next )
1975 {
1976 version( StackGrowsDown )
1977 {
1978 // NOTE: We can't index past the bottom of the stack
1979 // so don't do the "+1" for StackGrowsDown.
1980 if( c.tstack && c.tstack < c.bstack )
1981 scan( c.tstack, c.bstack );
1982 }
1983 else
1984 {
1985 if( c.bstack && c.bstack < c.tstack )
1986 scan( c.bstack, c.tstack + 1 );
1987 }
1988 }
1989 version( Win32 )
1990 {
1991 for( Thread t = Thread.sm_tbeg; t; t = t.next )
1992 {
1993 scan( &t.m_reg[0], &t.m_reg[0] + t.m_reg.length );
1994 }
1995 }
1996 }
1997
1998
1999 ////////////////////////////////////////////////////////////////////////////////
2000 // Thread Local
2001 ////////////////////////////////////////////////////////////////////////////////
2002
2003
2004 /**
2005 * This class encapsulates the operations required to initialize, access, and
2006 * destroy thread local data.
2007 */
2008 class ThreadLocal( T )
2009 {
2010 ////////////////////////////////////////////////////////////////////////////
2011 // Initialization
2012 ////////////////////////////////////////////////////////////////////////////
2013
2014
2015 /**
2016 * Initializes thread local storage for the indicated value which will be
2017 * initialized to def for all threads.
2018 *
2019 * Params:
2020 * def = The default value to return if no value has been explicitly set.
2021 */
2022 this( T def = T.init )
2023 {
2024 m_def = def;
2025 m_key = Thread.createLocal();
2026 }
2027
2028
2029 ~this()
2030 {
2031 Thread.deleteLocal( m_key );
2032 }
2033
2034
2035 ////////////////////////////////////////////////////////////////////////////
2036 // Accessors
2037 ////////////////////////////////////////////////////////////////////////////
2038
2039
2040 /**
2041 * Gets the value last set by the calling thread, or def if no such value
2042 * has been set.
2043 *
2044 * Returns:
2045 * The stored value or def if no value is stored.
2046 */
2047 T val()
2048 {
2049 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key );
2050
2051 return wrap ? wrap.val : m_def;
2052 }
2053
2054
2055 /**
2056 * Copies newval to a location specific to the calling thread, and returns
2057 * newval.
2058 *
2059 * Params:
2060 * newval = The value to set.
2061 *
2062 * Returns:
2063 * The value passed to this function.
2064 */
2065 T val( T newval )
2066 {
2067 Wrap* wrap = cast(Wrap*) Thread.getLocal( m_key );
2068
2069 if( wrap is null )
2070 {
2071 wrap = new Wrap;
2072 Thread.setLocal( m_key, wrap );
2073 }
2074 wrap.val = newval;
2075 return newval;
2076 }
2077
2078
2079 private:
2080 //
2081 // A wrapper for the stored data. This is needed for determining whether
2082 // set has ever been called for this thread (and therefore whether the
2083 // default value should be returned) and also to flatten the differences
2084 // between data that is smaller and larger than (void*).sizeof. The
2085 // obvious tradeoff here is an extra per-thread allocation for each
2086 // ThreadLocal value as compared to calling the Thread routines directly.
2087 //
2088 struct Wrap
2089 {
2090 T val;
2091 }
2092
2093
2094 T m_def;
2095 uint m_key;
2096 }
2097
2098
2099 ////////////////////////////////////////////////////////////////////////////////
2100 // Thread Group
2101 ////////////////////////////////////////////////////////////////////////////////
2102
2103
2104 /**
2105 * This class is intended to simplify certain common programming techniques.
2106 */
2107 class ThreadGroup
2108 {
2109 /**
2110 * Creates and starts a new Thread object that executes fn and adds it to
2111 * the list of tracked threads.
2112 *
2113 * Params:
2114 * fn = The thread function.
2115 *
2116 * Returns:
2117 * A reference to the newly created thread.
2118 */
2119 final Thread create( void function() fn )
2120 {
2121 Thread t = new Thread( fn );
2122
2123 t.start();
2124 synchronized
2125 {
2126 m_all[t] = t;
2127 }
2128 return t;
2129 }
2130
2131
2132 /**
2133 * Creates and starts a new Thread object that executes dg and adds it to
2134 * the list of tracked threads.
2135 *
2136 * Params:
2137 * dg = The thread function.
2138 *
2139 * Returns:
2140 * A reference to the newly created thread.
2141 */
2142 final Thread create( void delegate() dg )
2143 {
2144 Thread t = new Thread( dg );
2145
2146 t.start();
2147 synchronized
2148 {
2149 m_all[t] = t;
2150 }
2151 return t;
2152 }
2153
2154
2155 /**
2156 * Add t to the list of tracked threads if it is not already being tracked.
2157 *
2158 * Params:
2159 * t = The thread to add.
2160 *
2161 * In:
2162 * t must not be null.
2163 */
2164 final void add( Thread t )
2165 in
2166 {
2167 assert( t );
2168 }
2169 body
2170 {
2171 synchronized
2172 {
2173 m_all[t] = t;
2174 }
2175 }
2176
2177
2178 /**
2179 * Removes t from the list of tracked threads. No operation will be
2180 * performed if t is not currently being tracked by this object.
2181 *
2182 * Params:
2183 * t = The thread to remove.
2184 *
2185 * In:
2186 * t must not be null.
2187 */
2188 final void remove( Thread t )
2189 in
2190 {
2191 assert( t );
2192 }
2193 body
2194 {
2195 synchronized
2196 {
2197 m_all.remove( t );
2198 }
2199 }
2200
2201
2202 /**
2203 * Operates on all threads currently tracked by this object.
2204 */
2205 final int opApply( int delegate( inout Thread ) dg )
2206 {
2207 synchronized
2208 {
2209 int ret = 0;
2210
2211 // NOTE: This loop relies on the knowledge that m_all uses the
2212 // Thread object for both the key and the mapped value.
2213 foreach( Thread t; m_all.keys )
2214 {
2215 ret = dg( t );
2216 if( ret )
2217 break;
2218 }
2219 return ret;
2220 }
2221 }
2222
2223
2224 /**
2225 * Iteratively joins all tracked threads. This function will block add,
2226 * remove, and opApply until it completes.
2227 *
2228 * Params:
2229 * rethrow = Rethrow any unhandled exception which may have caused the
2230 * current thread to terminate.
2231 *
2232 * Throws:
2233 * Any exception not handled by the joined threads.
2234 */
2235 final void joinAll( bool rethrow = true )
2236 {
2237 synchronized
2238 {
2239 // NOTE: This loop relies on the knowledge that m_all uses the
2240 // Thread object for both the key and the mapped value.
2241 foreach( Thread t; m_all.keys )
2242 {
2243 t.join( rethrow );
2244 }
2245 }
2246 }
2247
2248
2249 private:
2250 Thread[Thread] m_all;
2251 }
2252
2253
2254 ////////////////////////////////////////////////////////////////////////////////
2255 // Fiber Platform Detection and Memory Allocation
2256 ////////////////////////////////////////////////////////////////////////////////
2257
2258
2259 private
2260 {
2261 version( D_InlineAsm_X86 )
2262 {
2263 version( X86_64 )
2264 {
2265
2266 }
2267 else
2268 {
2269 version( Win32 )
2270 version = AsmX86_Win32;
2271 else version( Posix )
2272 version = AsmX86_Posix;
2273 }
2274 }
2275 else version( PPC )
2276 {
2277 version( Posix )
2278 version = AsmPPC_Posix;
2279 }
2280
2281
2282 version( Posix )
2283 {
2284 import tango.stdc.posix.unistd; // for sysconf
2285 import tango.stdc.posix.sys.mman; // for mmap
2286 import tango.stdc.posix.stdlib; // for malloc, valloc, free
2287
2288 version( AsmX86_Win32 ) {} else
2289 version( AsmX86_Posix ) {} else
2290 version( AsmPPC_Posix ) {} else
2291 {
2292 // NOTE: The ucontext implementation requires architecture specific
2293 // data definitions to operate so testing for it must be done
2294 // by checking for the existence of ucontext_t rather than by
2295 // a version identifier. Please note that this is considered
2296 // an obsolescent feature according to the POSIX spec, so a
2297 // custom solution is still preferred.
2298 import tango.stdc.posix.ucontext;
2299 }
2300 }
2301
2302 const size_t PAGESIZE;
2303 }
2304
2305
2306 static this()
2307 {
2308 static if( is( typeof( GetSystemInfo ) ) )
2309 {
2310 SYSTEM_INFO info;
2311 GetSystemInfo( &info );
2312
2313 PAGESIZE = info.dwPageSize;
2314 assert( PAGESIZE < int.max );
2315 }
2316 else static if( is( typeof( sysconf ) ) &&
2317 is( typeof( _SC_PAGESIZE ) ) )
2318 {
2319 PAGESIZE = cast(typeof(PAGESIZE))sysconf( _SC_PAGESIZE );
2320 assert( PAGESIZE < int.max );
2321 }
2322 else
2323 {
2324 version( PPC )
2325 PAGESIZE = 8192;
2326 else
2327 PAGESIZE = 4096;
2328 }
2329 }
2330
2331
2332 ////////////////////////////////////////////////////////////////////////////////
2333 // Fiber Entry Point and Context Switch
2334 ////////////////////////////////////////////////////////////////////////////////
2335
2336
2337 private
2338 {
2339 extern (C) void fiber_entryPoint()
2340 {
2341 Fiber obj = Fiber.getThis();
2342 assert( obj );
2343
2344 assert( Thread.getThis().m_curr is obj.m_ctxt );
2345 volatile Thread.getThis().m_lock = false;
2346 obj.m_ctxt.tstack = obj.m_ctxt.bstack;
2347 obj.m_state = Fiber.State.EXEC;
2348
2349 try
2350 {
2351 obj.run();
2352 }
2353 catch( Object o )
2354 {
2355 obj.m_unhandled = o;
2356 }
2357
2358 static if( is( typeof( ucontext_t ) ) )
2359 obj.m_ucur = &obj.m_utxt;
2360
2361 obj.m_state = Fiber.State.TERM;
2362 obj.switchOut();
2363 }
2364
2365
2366 // NOTE: If AsmPPC_Posix is defined then the context switch routine will
2367 // be defined externally until GDC supports inline PPC ASM.
2368 version( AsmPPC_Posix )
2369 extern (C) void fiber_switchContext( void** oldp, void* newp );
2370 else
2371 extern (C) void fiber_switchContext( void** oldp, void* newp )
2372 {
2373 // NOTE: The data pushed and popped in this routine must match the
2374 // default stack created by Fiber.initStack or the initial
2375 // switch into a new context will fail.
2376
2377 version( AsmX86_Win32 )
2378 {
2379 asm
2380 {
2381 naked;
2382
2383 // save current stack state
2384 push EBP;
2385 mov EBP, ESP;
2386 push EAX;
2387 push dword ptr FS:[0];
2388 push dword ptr FS:[4];
2389 push dword ptr FS:[8];
2390 push EBX;
2391 push ESI;
2392 push EDI;
2393
2394 // store oldp again with more accurate address
2395 mov EAX, dword ptr 8[EBP];
2396 mov [EAX], ESP;
2397 // load newp to begin context switch
2398 mov ESP, dword ptr 12[EBP];
2399
2400 // load saved state from new stack
2401 pop EDI;
2402 pop ESI;
2403 pop EBX;
2404 pop dword ptr FS:[8];
2405 pop dword ptr FS:[4];
2406 pop dword ptr FS:[0];
2407 pop EAX;
2408 pop EBP;
2409
2410 // 'return' to complete switch
2411 ret;
2412 }
2413 }
2414 else version( AsmX86_Posix )
2415 {
2416 asm
2417 {
2418 naked;
2419
2420 // save current stack state
2421 push EBP;
2422 mov EBP, ESP;
2423 push EAX;
2424 push EBX;
2425 push ESI;
2426 push EDI;
2427
2428 // store oldp again with more accurate address
2429 mov EAX, dword ptr 8[EBP];
2430 mov [EAX], ESP;
2431 // load newp to begin context switch
2432 mov ESP, dword ptr 12[EBP];
2433
2434 // load saved state from new stack
2435 pop EDI;
2436 pop ESI;
2437 pop EBX;
2438 pop EAX;
2439 pop EBP;
2440
2441 // 'return' to complete switch
2442 ret;
2443 }
2444 }
2445 else static if( is( typeof( ucontext_t ) ) )
2446 {
2447 Fiber cfib = Fiber.getThis();
2448 void* ucur = cfib.m_ucur;
2449
2450 *oldp = &ucur;
2451 swapcontext( **(cast(ucontext_t***) oldp),
2452 *(cast(ucontext_t**) newp) );
2453 }
2454 }
2455 }
2456
2457
2458 ////////////////////////////////////////////////////////////////////////////////
2459 // Fiber
2460 ////////////////////////////////////////////////////////////////////////////////
2461
2462
2463 /**
2464 * This class provides a cooperative concurrency mechanism integrated with the
2465 * threading and garbage collection functionality. Calling a fiber may be
2466 * considered a blocking operation that returns when the fiber yields (via
2467 * Fiber.yield()). Execution occurs within the context of the calling thread
2468 * so synchronization is not necessary to guarantee memory visibility so long
2469 * as the same thread calls the fiber each time. Please note that there is no
2470 * requirement that a fiber be bound to one specific thread. Rather, fibers
2471 * may be freely passed between threads so long as they are not currently
2472 * executing. Like threads, a new fiber thread may be created using either
2473 * derivation or composition, as in the following example.
2474 *
2475 * Example:
2476 * ----------------------------------------------------------------------
2477 *
2478 * class DerivedFiber : Fiber
2479 * {
2480 * this()
2481 * {
2482 * super( &run );
2483 * }
2484 *
2485 * private :
2486 * void run()
2487 * {
2488 * printf( "Derived fiber running.\n" );
2489 * }
2490 * }
2491 *
2492 * void fiberFunc()
2493 * {
2494 * printf( "Composed fiber running.\n" );
2495 * Fiber.yield();
2496 * printf( "Composed fiber running.\n" );
2497 * }
2498 *
2499 * // create instances of each type
2500 * Fiber derived = new DerivedFiber();
2501 * Fiber composed = new Fiber( &fiberFunc );
2502 *
2503 * // call both fibers once
2504 * derived.call();
2505 * composed.call();
2506 * printf( "Execution returned to calling context.\n" );
2507 * composed.call();
2508 *
2509 * // since each fiber has run to completion, each should have state TERM
2510 * assert( derived.state == Fiber.State.TERM );
2511 * assert( composed.state == Fiber.State.TERM );
2512 *
2513 * ----------------------------------------------------------------------
2514 *
2515 * Authors: Based on a design by Mikola Lysenko.
2516 */
2517 class Fiber
2518 {
2519 ////////////////////////////////////////////////////////////////////////////
2520 // Initialization
2521 ////////////////////////////////////////////////////////////////////////////
2522
2523
2524 /**
2525 * Initializes a fiber object which is associated with a static
2526 * D function.
2527 *
2528 * Params:
2529 * fn = The thread function.
2530 * sz = The stack size for this fiber.
2531 *
2532 * In:
2533 * fn must not be null.
2534 */
2535 this( void function() fn, size_t sz = PAGESIZE )
2536 in
2537 {
2538 assert( fn );
2539 }
2540 body
2541 {
2542 m_fn = fn;
2543 m_call = Call.FN;
2544 m_state = State.HOLD;
2545 allocStack( sz );
2546 initStack();
2547 }
2548
2549
2550 /**
2551 * Initializes a fiber object which is associated with a dynamic
2552 * D function.
2553 *
2554 * Params:
2555 * dg = The thread function.
2556 * sz = The stack size for this fiber.
2557 *
2558 * In:
2559 * dg must not be null.
2560 */
2561 this( void delegate() dg, size_t sz = PAGESIZE )
2562 in
2563 {
2564 assert( dg );
2565 }
2566 body
2567 {
2568 m_dg = dg;
2569 m_call = Call.DG;
2570 m_state = State.HOLD;
2571 allocStack( sz );
2572 initStack();
2573 }
2574
2575
2576 /**
2577 * Cleans up any remaining resources used by this object.
2578 */
2579 ~this()
2580 {
2581 // NOTE: A live reference to this object will exist on its associated
2582 // stack from the first time its call() method has been called
2583 // until its execution completes with State.TERM. Thus, the only
2584 // times this dtor should be called are either if the fiber has
2585 // terminated (and therefore has no active stack) or if the user
2586 // explicitly deletes this object. The latter case is an error
2587 // but is not easily tested for, since State.HOLD may imply that
2588 // the fiber was just created but has never been run. There is
2589 // not a compelling case to create a State.INIT just to offer a
2590 // means of ensuring the user isn't violating this object's
2591 // contract, so for now this requirement will be enforced by
2592 // documentation only.
2593 freeStack();
2594 }
2595
2596
2597 ////////////////////////////////////////////////////////////////////////////
2598 // General Actions
2599 ////////////////////////////////////////////////////////////////////////////
2600
2601
2602 /**
2603 * Transfers execution to this fiber object. The calling context will be
2604 * suspended until the fiber calls Fiber.yield() or until it terminates
2605 * via an unhandled exception.
2606 *
2607 * Params:
2608 * rethrow = Rethrow any unhandled exception which may have caused this
2609 * fiber to terminate.
2610 *
2611 * In:
2612 * This fiber must be in state HOLD.
2613 *
2614 * Throws:
2615 * Any exception not handled by the joined thread.
2616 */
2617 final void call( bool rethrow = true )
2618 in
2619 {
2620 assert( m_state == State.HOLD );
2621 }
2622 body
2623 {
2624 Fiber cur = getThis();
2625
2626 static if( is( typeof( ucontext_t ) ) )
2627 m_ucur = cur ? &cur.m_utxt : &Fiber.sm_utxt;
2628
2629 setThis( this );
2630 this.switchIn();
2631 setThis( cur );
2632
2633 static if( is( typeof( ucontext_t ) ) )
2634 m_ucur = null;
2635
2636 // NOTE: If the fiber has terminated then the stack pointers must be
2637 // reset. This ensures that the stack for this fiber is not
2638 // scanned if the fiber has terminated. This is necessary to
2639 // prevent any references lingering on the stack from delaying
2640 // the collection of otherwise dead objects. The most notable
2641 // being the current object, which is referenced at the top of
2642 // fiber_entryPoint.
2643 if( m_state == State.TERM )
2644 {
2645 m_ctxt.tstack = m_ctxt.bstack;
2646 }
2647 if( m_unhandled )
2648 {
2649 Object obj = m_unhandled;
2650 m_unhandled = null;
2651 if( rethrow )
2652 {
2653 throw obj;
2654 }
2655 }
2656 }
2657
2658
2659 /**
2660 * Resets this fiber so that it may be re-used. This routine may only be
2661 * called for fibers that have terminated, as doing otherwise could result
2662 * in scope-dependent functionality that is not executed. Stack-based
2663 * classes, for example, may not be cleaned up properly if a fiber is reset
2664 * before it has terminated.
2665 *
2666 * In:
2667 * This fiber must be in state TERM.
2668 */
2669 final void reset()
2670 in
2671 {
2672 assert( m_state == State.TERM );
2673 assert( m_ctxt.tstack == m_ctxt.bstack );
2674 }
2675 body
2676 {
2677 m_state = State.HOLD;
2678 initStack();
2679 m_unhandled = null;
2680 }
2681
2682
2683 ////////////////////////////////////////////////////////////////////////////
2684 // General Properties
2685 ////////////////////////////////////////////////////////////////////////////
2686
2687
2688 /**
2689 * A fiber may occupy one of three states: HOLD, EXEC, and TERM. The HOLD
2690 * state applies to any fiber that is suspended and ready to be called.
2691 * The EXEC state will be set for any fiber that is currently executing.
2692 * And the TERM state is set when a fiber terminates. Once a fiber
2693 * terminates, it must be reset before it may be called again.
2694 */
2695 enum State
2696 {
2697 HOLD, ///
2698 EXEC, ///
2699 TERM ///
2700 }
2701
2702
2703 /**
2704 * Gets the current state of this fiber.
2705 *
2706 * Returns:
2707 * The state of this fiber as an enumerated value.
2708 */
2709 final State state()
2710 {
2711 return m_state;
2712 }
2713
2714
2715 ////////////////////////////////////////////////////////////////////////////
2716 // Actions on Calling Fiber
2717 ////////////////////////////////////////////////////////////////////////////
2718
2719
2720 /**
2721 * Forces a context switch to occur away from the calling fiber.
2722 */
2723 static void yield()
2724 {
2725 Fiber cur = getThis();
2726 assert( cur, "Fiber.yield() called with no active fiber" );
2727 assert( cur.m_state == State.EXEC );
2728
2729 static if( is( typeof( ucontext_t ) ) )
2730 cur.m_ucur = &cur.m_utxt;
2731
2732 cur.m_state = State.HOLD;
2733 cur.switchOut();
2734 cur.m_state = State.EXEC;
2735 }
2736
2737
2738 /**
2739 * Forces a context switch to occur away from the calling fiber and then
2740 * throws obj in the calling fiber.
2741 *
2742 * Params:
2743 * obj = The object to throw.
2744 *
2745 * In:
2746 * obj must not be null.
2747 */
2748 static void yieldAndThrow( Object obj )
2749 in
2750 {
2751 assert( obj );
2752 }
2753 body
2754 {
2755 Fiber cur = getThis();
2756 assert( cur, "Fiber.yield() called with no active fiber" );
2757 assert( cur.m_state == State.EXEC );
2758
2759 static if( is( typeof( ucontext_t ) ) )
2760 cur.m_ucur = &cur.m_utxt;
2761
2762 cur.m_unhandled = obj;
2763 cur.m_state = State.HOLD;
2764 cur.switchOut();
2765 cur.m_state = State.EXEC;
2766 }
2767
2768
2769 ////////////////////////////////////////////////////////////////////////////
2770 // Fiber Accessors
2771 ////////////////////////////////////////////////////////////////////////////
2772
2773
2774 /**
2775 * Provides a reference to the calling fiber or null if no fiber is
2776 * currently active.
2777 *
2778 * Returns:
2779 * The fiber object representing the calling fiber or null if no fiber
2780 * is currently active. The result of deleting this object is undefined.
2781 */
2782 static Fiber getThis()
2783 {
2784 version( Win32 )
2785 {
2786 return cast(Fiber) TlsGetValue( sm_this );
2787 }
2788 else version( Posix )
2789 {
2790 return cast(Fiber) pthread_getspecific( sm_this );
2791 }
2792 }
2793
2794
2795 ////////////////////////////////////////////////////////////////////////////
2796 // Static Initialization
2797 ////////////////////////////////////////////////////////////////////////////
2798
2799
2800 static this()
2801 {
2802 version( Win32 )
2803 {
2804 sm_this = TlsAlloc();
2805 assert( sm_this != TLS_OUT_OF_INDEXES );
2806 }
2807 else version( Posix )
2808 {
2809 int status;
2810
2811 status = pthread_key_create( &sm_this, null );
2812 assert( status == 0 );
2813
2814 static if( is( typeof( ucontext_t ) ) )
2815 {
2816 status = getcontext( &sm_utxt );
2817 assert( status == 0 );
2818 }
2819 }
2820 }
2821
2822
2823 private:
2824 //
2825 // Initializes a fiber object which has no associated executable function.
2826 //
2827 this()
2828 {
2829 m_call = Call.NO;
2830 }
2831
2832
2833 //
2834 // Fiber entry point. Invokes the function or delegate passed on
2835 // construction (if any).
2836 //
2837 final void run()
2838 {
2839 switch( m_call )
2840 {
2841 case Call.FN:
2842 m_fn();
2843 break;
2844 case Call.DG:
2845 m_dg();
2846 break;
2847 default:
2848 break;
2849 }
2850 }
2851
2852
2853 private:
2854 //
2855 // The type of routine passed on fiber construction.
2856 //
2857 enum Call
2858 {
2859 NO,
2860 FN,
2861 DG
2862 }
2863
2864
2865 //
2866 // Standard fiber data
2867 //
2868 Call m_call;
2869 union
2870 {
2871 void function() m_fn;
2872 void delegate() m_dg;
2873 }
2874 bool m_isRunning;
2875 Object m_unhandled;
2876 State m_state;
2877
2878
2879 private:
2880 ////////////////////////////////////////////////////////////////////////////
2881 // Stack Management
2882 ////////////////////////////////////////////////////////////////////////////
2883
2884
2885 //
2886 // Allocate a new stack for this fiber.
2887 //
2888 final void allocStack( size_t sz )
2889 in
2890 {
2891 assert( !m_pmem && !m_ctxt );
2892 }
2893 body
2894 {
2895 // adjust alloc size to a multiple of PAGESIZE
2896 sz += PAGESIZE - 1;
2897 sz -= sz % PAGESIZE;
2898
2899 // NOTE: This instance of Thread.Context is dynamic so Fiber objects
2900 // can be collected by the GC so long as no user level references
2901 // to the object exist. If m_ctxt were not dynamic then its
2902 // presence in the global context list would be enough to keep
2903 // this object alive indefinitely. An alternative to allocating
2904 // room for this struct explicitly would be to mash it into the
2905 // base of the stack being allocated below. However, doing so
2906 // requires too much special logic to be worthwhile.
2907 m_ctxt = new Thread.Context;
2908
2909 static if( is( typeof( VirtualAlloc ) ) )
2910 {
2911 // reserve memory for stack
2912 m_pmem = VirtualAlloc( null,
2913 sz + PAGESIZE,
2914 MEM_RESERVE,
2915 PAGE_NOACCESS );
2916 if( !m_pmem )
2917 {
2918 throw new FiberException( "Unable to reserve memory for stack" );
2919 }
2920
2921 version( StackGrowsDown )
2922 {
2923 void* stack = m_pmem + PAGESIZE;
2924 void* guard = m_pmem;
2925 void* pbase = stack + sz;
2926 }
2927 else
2928 {
2929 void* stack = m_pmem;
2930 void* guard = m_pmem + sz;
2931 void* pbase = stack;
2932 }
2933
2934 // allocate reserved stack segment
2935 stack = VirtualAlloc( stack,
2936 sz,
2937 MEM_COMMIT,
2938 PAGE_READWRITE );
2939 if( !stack )
2940 {
2941 throw new FiberException( "Unable to allocate memory for stack" );
2942 }
2943
2944 // allocate reserved guard page
2945 guard = VirtualAlloc( guard,
2946 PAGESIZE,
2947 MEM_COMMIT,
2948 PAGE_READWRITE | PAGE_GUARD );
2949 if( !guard )
2950 {
2951 throw new FiberException( "Unable to create guard page for stack" );
2952 }
2953
2954 m_ctxt.bstack = pbase;
2955 m_ctxt.tstack = pbase;
2956 m_size = sz;
2957 }
2958 else
2959 { static if( is( typeof( mmap ) ) )
2960 {
2961 m_pmem = mmap( null,
2962 sz,
2963 PROT_READ | PROT_WRITE,
2964 MAP_PRIVATE | MAP_ANON,
2965 -1,
2966 0 );
2967 if( m_pmem == MAP_FAILED )
2968 m_pmem = null;
2969 }
2970 else static if( is( typeof( valloc ) ) )
2971 {
2972 m_pmem = valloc( sz );
2973 }
2974 else static if( is( typeof( malloc ) ) )
2975 {
2976 m_pmem = malloc( sz );
2977 }
2978 else
2979 {
2980 m_pmem = null;
2981 }
2982
2983 if( !m_pmem )
2984 {
2985 throw new FiberException( "Unable to allocate memory for stack" );
2986 }
2987
2988 version( StackGrowsDown )
2989 {
2990 m_ctxt.bstack = m_pmem + sz;
2991 m_ctxt.tstack = m_pmem + sz;
2992 }
2993 else
2994 {
2995 m_ctxt.bstack = m_pmem;
2996 m_ctxt.tstack = m_pmem;
2997 }
2998 m_size = sz;
2999 }
3000
3001 Thread.add( m_ctxt );
3002 }
3003
3004
3005 //
3006 // Free this fiber's stack.
3007 //
3008 final void freeStack()
3009 in
3010 {
3011 assert( m_pmem && m_ctxt );
3012 }
3013 body
3014 {
3015 // NOTE: Since this routine is only ever expected to be called from
3016 // the dtor, pointers to freed data are not set to null.
3017
3018 // NOTE: m_ctxt is guaranteed to be alive because it is held in the
3019 // global context list.
3020 Thread.remove( m_ctxt );
3021
3022 static if( is( typeof( VirtualAlloc ) ) )
3023 {
3024 VirtualFree( m_pmem, 0, MEM_RELEASE );
3025 }
3026 else static if( is( typeof( mmap ) ) )
3027 {
3028 munmap( m_pmem, m_size );
3029 }
3030 else static if( is( typeof( valloc ) ) )
3031 {
3032 free( m_pmem );
3033 }
3034 else static if( is( typeof( malloc ) ) )
3035 {
3036 free( m_pmem );
3037 }
3038 delete m_ctxt;
3039 }
3040
3041
3042 //
3043 // Initialize the allocated stack.
3044 //
3045 final void initStack()
3046 in
3047 {
3048 assert( m_ctxt.tstack && m_ctxt.tstack == m_ctxt.bstack );
3049 assert( cast(size_t) m_ctxt.bstack % (void*).sizeof == 0 );
3050 }
3051 body
3052 {
3053 void* pstack = m_ctxt.tstack;
3054 scope( exit ) m_ctxt.tstack = pstack;
3055
3056 void push( size_t val )
3057 {
3058 version( StackGrowsDown )
3059 {
3060 pstack -= size_t.sizeof;
3061 *(cast(size_t*) pstack) = val;
3062 }
3063 else
3064 {
3065 pstack += size_t.sizeof;
3066 *(cast(size_t*) pstack) = val;
3067 }
3068 }
3069
3070 // NOTE: On OS X the stack must be 16-byte aligned according to the
3071 // IA-32 call spec.
3072 version( darwin )
3073 {
3074 pstack = cast(void*)(cast(uint)(pstack) - (cast(uint)(pstack) & 0x0F));
3075 }
3076
3077 version( AsmX86_Win32 )
3078 {
3079 push( cast(size_t) &fiber_entryPoint ); // EIP
3080 push( 0xFFFFFFFF ); // EBP
3081 push( 0x00000000 ); // EAX
3082 push( 0xFFFFFFFF ); // FS:[0]
3083 // BUG: Are the frame pointers the same for both versions?
3084 version( StackGrowsDown )
3085 {
3086 push( cast(size_t) m_ctxt.bstack ); // FS:[4]
3087 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8]
3088 }
3089 else
3090 {
3091 push( cast(size_t) m_ctxt.bstack ); // FS:[4]
3092 push( cast(size_t) m_ctxt.bstack + m_size ); // FS:[8]
3093 }
3094 push( 0x00000000 ); // EBX
3095 push( 0x00000000 ); // ESI
3096 push( 0x00000000 ); // EDI
3097 }
3098 else version( AsmX86_Posix )
3099 {
3100 push( cast(size_t) &fiber_entryPoint ); // EIP
3101 push( 0x00000000 ); // EBP
3102 push( 0x00000000 ); // EAX
3103 push( 0x00000000 ); // EBX
3104 push( 0x00000000 ); // ESI
3105 push( 0x00000000 ); // EDI
3106 }
3107 else version( AsmPPC_Posix )
3108 {
3109 version( StackGrowsDown )
3110 {
3111 pstack -= int.sizeof * 5;
3112 }
3113 else
3114 {
3115 pstack += int.sizeof * 5;
3116 }
3117
3118 push( cast(size_t) &fiber_entryPoint ); // link register
3119 push( 0x00000000 ); // control register
3120 push( 0x00000000 ); // old stack pointer
3121
3122 // GPR values
3123 version( StackGrowsDown )
3124 {
3125 pstack -= int.sizeof * 20;
3126 }
3127 else
3128 {
3129 pstack += int.sizeof * 20;
3130 }
3131
3132 assert( cast(uint) pstack & 0x0f == 0 );
3133 }
3134 else static if( is( typeof( ucontext_t ) ) )
3135 {
3136 getcontext( &m_utxt );
3137 m_utxt.uc_stack.ss_sp = m_ctxt.bstack;
3138 m_utxt.uc_stack.ss_size = m_size;
3139 makecontext( &m_utxt, &fiber_entryPoint, 0 );
3140 // NOTE: If ucontext is being used then the top of the stack will
3141 // be a pointer to the ucontext_t struct for that fiber.
3142 push( cast(size_t) &m_utxt );
3143 }
3144 }
3145
3146
3147 Thread.Context* m_ctxt;
3148 size_t m_size;
3149 void* m_pmem;
3150
3151 static if( is( typeof( ucontext_t ) ) )
3152 {
3153 // NOTE: The static ucontext instance is used to represent the context
3154 // of the main application thread.
3155 static ucontext_t sm_utxt = void;
3156 ucontext_t m_utxt = void;
3157 ucontext_t* m_ucur = null;
3158 }
3159
3160
3161 private:
3162 ////////////////////////////////////////////////////////////////////////////
3163 // Storage of Active Fiber
3164 ////////////////////////////////////////////////////////////////////////////
3165
3166
3167 //
3168 // Sets a thread-local reference to the current fiber object.
3169 //
3170 static void setThis( Fiber f )
3171 {
3172 version( Win32 )
3173 {
3174 TlsSetValue( sm_this, cast(void*) f );
3175 }
3176 else version( Posix )
3177 {
3178 pthread_setspecific( sm_this, cast(void*) f );
3179 }
3180 }
3181
3182
3183 static Thread.TLSKey sm_this;
3184
3185
3186 private:
3187 ////////////////////////////////////////////////////////////////////////////
3188 // Context Switching
3189 ////////////////////////////////////////////////////////////////////////////
3190
3191
3192 //
3193 // Switches into the stack held by this fiber.
3194 //
3195 final void switchIn()
3196 {
3197 Thread tobj = Thread.getThis();
3198 void** oldp = &tobj.m_curr.tstack;
3199 void* newp = m_ctxt.tstack;
3200
3201 // NOTE: The order of operations here is very important. The current
3202 // stack top must be stored before m_lock is set, and pushContext
3203 // must not be called until after m_lock is set. This process
3204 // is intended to prevent a race condition with the suspend
3205 // mechanism used for garbage collection. If it is not followed,
3206 // a badly timed collection could cause the GC to scan from the
3207 // bottom of one stack to the top of another, or to miss scanning
3208 // a stack that still contains valid data. The old stack pointer
3209 // oldp will be set again before the context switch to guarantee
3210 // that it points to exactly the correct stack location so the
3211 // successive pop operations will succeed.
3212 *oldp = getStackTop();
3213 volatile tobj.m_lock = true;
3214 tobj.pushContext( m_ctxt );
3215
3216 fiber_switchContext( oldp, newp );
3217
3218 // NOTE: As above, these operations must be performed in a strict order
3219 // to prevent Bad Things from happening.
3220 tobj.popContext();
3221 volatile tobj.m_lock = false;
3222 tobj.m_curr.tstack = tobj.m_curr.bstack;
3223 }
3224
3225
3226 //
3227 // Switches out of the current stack and into the enclosing stack.
3228 //
3229 final void switchOut()
3230 {
3231 Thread tobj = Thread.getThis();
3232 void** oldp = &m_ctxt.tstack;
3233 void* newp = tobj.m_curr.within.tstack;
3234
3235 // NOTE: The order of operations here is very important. The current
3236 // stack top must be stored before m_lock is set, and pushContext
3237 // must not be called until after m_lock is set. This process
3238 // is intended to prevent a race condition with the suspend
3239 // mechanism used for garbage collection. If it is not followed,
3240 // a badly timed collection could cause the GC to scan from the
3241 // bottom of one stack to the top of another, or to miss scanning
3242 // a stack that still contains valid data. The old stack pointer
3243 // oldp will be set again before the context switch to guarantee
3244 // that it points to exactly the correct stack location so the
3245 // successive pop operations will succeed.
3246 *oldp = getStackTop();
3247 volatile tobj.m_lock = true;
3248
3249 fiber_switchContext( oldp, newp );
3250
3251 // NOTE: As above, these operations must be performed in a strict order
3252 // to prevent Bad Things from happening.
3253 volatile tobj.m_lock = false;
3254 tobj.m_curr.tstack = tobj.m_curr.bstack;
3255 }
3256 }