comparison d1/qtd/Atomic.d @ 344:96a75b1e5b26

project structure changes
author Max Samukha <maxter@spambox.com>
date Fri, 14 May 2010 12:14:37 +0300
parents qt/qtd/Atomic.d@b460cd08041f
children
comparison
equal deleted inserted replaced
343:552647ec0f82 344:96a75b1e5b26
1 /**
2 * The atomic module is intended to provide some basic support for lock-free
3 * concurrent programming. Some common operations are defined, each of which
4 * may be performed using the specified memory barrier or a less granular
5 * barrier if the hardware does not support the version requested. This
6 * model is based on a design by Alexander Terekhov as outlined in
7 * <a href=http://groups.google.com/groups?threadm=3E4820EE.6F408B25%40web.de>
8 * this thread</a>. Another useful reference for memory ordering on modern
9 * architectures is <a href=http://www.linuxjournal.com/article/8211>this
10 * article by Paul McKenney</a>.
11 *
12 * Copyright: Copyright (C) 2005-2006 Sean Kelly. All rights reserved.
13 * License: BSD style: $(LICENSE)
14 * Authors: Sean Kelly
15 */
16 module qt.qtd.Atomic;
17 //deprecated:
18 ////////////////////////////////////////////////////////////////////////////////
19 // Synchronization Options
20 ////////////////////////////////////////////////////////////////////////////////
21
22
23 /**
24 * Memory synchronization flag. If the supplied option is not available on the
25 * current platform then a stronger method will be used instead.
26 */
27 enum msync
28 {
29 raw, /// not sequenced
30 hlb, /// hoist-load barrier
31 hsb, /// hoist-store barrier
32 slb, /// sink-load barrier
33 ssb, /// sink-store barrier
34 acq, /// hoist-load + hoist-store barrier
35 rel, /// sink-load + sink-store barrier
36 seq, /// fully sequenced (acq + rel)
37 }
38
39
40 ////////////////////////////////////////////////////////////////////////////////
41 // Internal Type Checking
42 ////////////////////////////////////////////////////////////////////////////////
43
44
45 private
46 {
47 version( D_Ddoc ) {} else
48 {
49 import std.traits;
50
51
52 template isValidAtomicType( T )
53 {
54 const bool isValidAtomicType = T.sizeof == byte.sizeof ||
55 T.sizeof == short.sizeof ||
56 T.sizeof == int.sizeof ||
57 T.sizeof == long.sizeof;
58 }
59
60
61 template isValidNumericType( T )
62 {
63 const bool isValidNumericType = isIntegral!( T ) ||
64 isPointer!( T );
65 }
66
67
68 template isHoistOp( msync ms )
69 {
70 const bool isHoistOp = ms == msync.hlb ||
71 ms == msync.hsb ||
72 ms == msync.acq ||
73 ms == msync.seq;
74 }
75
76
77 template isSinkOp( msync ms )
78 {
79 const bool isSinkOp = ms == msync.slb ||
80 ms == msync.ssb ||
81 ms == msync.rel ||
82 ms == msync.seq;
83 }
84 }
85 }
86
87
88 ////////////////////////////////////////////////////////////////////////////////
89 // DDoc Documentation for Atomic Functions
90 ////////////////////////////////////////////////////////////////////////////////
91
92
93 version( D_Ddoc )
94 {
95 ////////////////////////////////////////////////////////////////////////////
96 // Atomic Load
97 ////////////////////////////////////////////////////////////////////////////
98
99
100 /**
101 * Supported msync values:
102 * msync.raw
103 * msync.hlb
104 * msync.acq
105 * msync.seq
106 */
107 template atomicLoad( msync ms, T )
108 {
109 /**
110 * Refreshes the contents of 'val' from main memory. This operation is
111 * both lock-free and atomic.
112 *
113 * Params:
114 * val = The value to load. This value must be properly aligned.
115 *
116 * Returns:
117 * The loaded value.
118 */
119 T atomicLoad( ref T val )
120 {
121 return val;
122 }
123 }
124
125
126 ////////////////////////////////////////////////////////////////////////////
127 // Atomic Store
128 ////////////////////////////////////////////////////////////////////////////
129
130
131 /**
132 * Supported msync values:
133 * msync.raw
134 * msync.ssb
135 * msync.acq
136 * msync.rel
137 * msync.seq
138 */
139 template atomicStore( msync ms, T )
140 {
141 /**
142 * Stores 'newval' to the memory referenced by 'val'. This operation
143 * is both lock-free and atomic.
144 *
145 * Params:
146 * val = The destination variable.
147 * newval = The value to store.
148 */
149 void atomicStore( ref T val, T newval )
150 {
151
152 }
153 }
154
155
156 ////////////////////////////////////////////////////////////////////////////
157 // Atomic StoreIf
158 ////////////////////////////////////////////////////////////////////////////
159
160
161 /**
162 * Supported msync values:
163 * msync.raw
164 * msync.ssb
165 * msync.acq
166 * msync.rel
167 * msync.seq
168 */
169 template atomicStoreIf( msync ms, T )
170 {
171 /**
172 * Stores 'newval' to the memory referenced by 'val' if val is equal to
173 * 'equalTo'. This operation is both lock-free and atomic.
174 *
175 * Params:
176 * val = The destination variable.
177 * newval = The value to store.
178 * equalTo = The comparison value.
179 *
180 * Returns:
181 * true if the store occurred, false if not.
182 */
183 bool atomicStoreIf( ref T val, T newval, T equalTo )
184 {
185 return false;
186 }
187 }
188
189
190 ////////////////////////////////////////////////////////////////////////////
191 // Atomic Increment
192 ////////////////////////////////////////////////////////////////////////////
193
194
195 /**
196 * Supported msync values:
197 * msync.raw
198 * msync.ssb
199 * msync.acq
200 * msync.rel
201 * msync.seq
202 */
203 template atomicIncrement( msync ms, T )
204 {
205 /**
206 * This operation is only legal for built-in value and pointer types,
207 * and is equivalent to an atomic "val = val + 1" operation. This
208 * function exists to facilitate use of the optimized increment
209 * instructions provided by some architecures. If no such instruction
210 * exists on the target platform then the behavior will perform the
211 * operation using more traditional means. This operation is both
212 * lock-free and atomic.
213 *
214 * Params:
215 * val = The value to increment.
216 *
217 * Returns:
218 * The result of an atomicLoad of val immediately following the
219 * increment operation. This value is not required to be equal to the
220 * newly stored value. Thus, competing writes are allowed to occur
221 * between the increment and successive load operation.
222 */
223 T atomicIncrement( ref T val )
224 {
225 return val;
226 }
227 }
228
229
230 ////////////////////////////////////////////////////////////////////////////
231 // Atomic Decrement
232 ////////////////////////////////////////////////////////////////////////////
233
234
235 /**
236 * Supported msync values:
237 * msync.raw
238 * msync.ssb
239 * msync.acq
240 * msync.rel
241 * msync.seq
242 */
243 template atomicDecrement( msync ms, T )
244 {
245 /**
246 * This operation is only legal for built-in value and pointer types,
247 * and is equivalent to an atomic "val = val - 1" operation. This
248 * function exists to facilitate use of the optimized decrement
249 * instructions provided by some architecures. If no such instruction
250 * exists on the target platform then the behavior will perform the
251 * operation using more traditional means. This operation is both
252 * lock-free and atomic.
253 *
254 * Params:
255 * val = The value to decrement.
256 *
257 * Returns:
258 * The result of an atomicLoad of val immediately following the
259 * increment operation. This value is not required to be equal to the
260 * newly stored value. Thus, competing writes are allowed to occur
261 * between the increment and successive load operation.
262 */
263 T atomicDecrement( ref T val )
264 {
265 return val;
266 }
267 }
268 }
269
270
271 ////////////////////////////////////////////////////////////////////////////////
272 // LDC Atomics Implementation
273 ////////////////////////////////////////////////////////////////////////////////
274
275
276 else version( LDC )
277 {
278 import ldc.intrinsics;
279
280
281 ////////////////////////////////////////////////////////////////////////////
282 // Atomic Load
283 ////////////////////////////////////////////////////////////////////////////
284
285
286 template atomicLoad( msync ms = msync.seq, T )
287 {
288 T atomicLoad(ref T val)
289 {
290 llvm_memory_barrier(
291 ms == msync.hlb || ms == msync.acq || ms == msync.seq,
292 ms == msync.hsb || ms == msync.acq || ms == msync.seq,
293 ms == msync.slb || ms == msync.rel || ms == msync.seq,
294 ms == msync.ssb || ms == msync.rel || ms == msync.seq,
295 false);
296 static if (isPointerType!(T))
297 {
298 return cast(T)llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 0);
299 }
300 else static if (is(T == bool))
301 {
302 return llvm_atomic_load_add!(ubyte)(cast(ubyte*)&val, cast(ubyte)0) ? 1 : 0;
303 }
304 else
305 {
306 return llvm_atomic_load_add!(T)(&val, cast(T)0);
307 }
308 }
309 }
310
311
312 ////////////////////////////////////////////////////////////////////////////
313 // Atomic Store
314 ////////////////////////////////////////////////////////////////////////////
315
316
317 template atomicStore( msync ms = msync.seq, T )
318 {
319 void atomicStore( ref T val, T newval )
320 {
321 llvm_memory_barrier(
322 ms == msync.hlb || ms == msync.acq || ms == msync.seq,
323 ms == msync.hsb || ms == msync.acq || ms == msync.seq,
324 ms == msync.slb || ms == msync.rel || ms == msync.seq,
325 ms == msync.ssb || ms == msync.rel || ms == msync.seq,
326 false);
327 static if (isPointerType!(T))
328 {
329 llvm_atomic_swap!(size_t)(cast(size_t*)&val, cast(size_t)newval);
330 }
331 else static if (is(T == bool))
332 {
333 llvm_atomic_swap!(ubyte)(cast(ubyte*)&val, newval?1:0);
334 }
335 else
336 {
337 llvm_atomic_swap!(T)(&val, newval);
338 }
339 }
340 }
341
342
343 ////////////////////////////////////////////////////////////////////////////
344 // Atomic Store If
345 ////////////////////////////////////////////////////////////////////////////
346
347
348 template atomicStoreIf( msync ms = msync.seq, T )
349 {
350 bool atomicStoreIf( ref T val, T newval, T equalTo )
351 {
352 llvm_memory_barrier(
353 ms == msync.hlb || ms == msync.acq || ms == msync.seq,
354 ms == msync.hsb || ms == msync.acq || ms == msync.seq,
355 ms == msync.slb || ms == msync.rel || ms == msync.seq,
356 ms == msync.ssb || ms == msync.rel || ms == msync.seq,
357 false);
358 T oldval = void;
359 static if (isPointerType!(T))
360 {
361 oldval = cast(T)llvm_atomic_cmp_swap!(size_t)(cast(size_t*)&val, cast(size_t)equalTo, cast(size_t)newval);
362 }
363 else static if (is(T == bool))
364 {
365 oldval = llvm_atomic_cmp_swap!(ubyte)(cast(ubyte*)&val, equalTo?1:0, newval?1:0)?0:1;
366 }
367 else
368 {
369 oldval = llvm_atomic_cmp_swap!(T)(&val, equalTo, newval);
370 }
371 return oldval == equalTo;
372 }
373 }
374
375
376 ////////////////////////////////////////////////////////////////////////////
377 // Atomic Increment
378 ////////////////////////////////////////////////////////////////////////////
379
380
381 template atomicIncrement( msync ms = msync.seq, T )
382 {
383 //
384 // NOTE: This operation is only valid for integer or pointer types
385 //
386 static assert( isValidNumericType!(T) );
387
388
389 T atomicIncrement( ref T val )
390 {
391 static if (isPointerType!(T))
392 {
393 llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 1);
394 }
395 else
396 {
397 llvm_atomic_load_add!(T)(&val, cast(T)1);
398 }
399 return val;
400 }
401 }
402
403
404 ////////////////////////////////////////////////////////////////////////////
405 // Atomic Decrement
406 ////////////////////////////////////////////////////////////////////////////
407
408
409 template atomicDecrement( msync ms = msync.seq, T )
410 {
411 //
412 // NOTE: This operation is only valid for integer or pointer types
413 //
414 static assert( isValidNumericType!(T) );
415
416
417 T atomicDecrement( ref T val )
418 {
419 static if (isPointerType!(T))
420 {
421 llvm_atomic_load_sub!(size_t)(cast(size_t*)&val, 1);
422 }
423 else
424 {
425 llvm_atomic_load_sub!(T)(&val, cast(T)1);
426 }
427 return val;
428 }
429 }
430 }
431
432 ////////////////////////////////////////////////////////////////////////////////
433 // x86 Atomic Function Implementation
434 ////////////////////////////////////////////////////////////////////////////////
435
436
437 else version( D_InlineAsm_X86 )
438 {
439 version( X86 )
440 {
441 version( BuildInfo )
442 {
443 pragma( msg, "tango.core.Atomic: using IA-32 inline asm" );
444 }
445
446 version(darwin){
447 extern(C) bool OSAtomicCompareAndSwap64(long oldValue, long newValue, long *theValue);
448 extern(C) bool OSAtomicCompareAndSwap64Barrier(long oldValue, long newValue, long *theValue);
449 }
450 version = Has64BitCAS;
451 version = Has32BitOps;
452 }
453 version( X86_64 )
454 {
455 version( BuildInfo )
456 {
457 pragma( msg, "tango.core.Atomic: using AMD64 inline asm" );
458 }
459
460 version = Has64BitOps;
461 }
462
463 private
464 {
465 ////////////////////////////////////////////////////////////////////////
466 // x86 Value Requirements
467 ////////////////////////////////////////////////////////////////////////
468
469
470 // NOTE: Strictly speaking, the x86 supports atomic operations on
471 // unaligned values. However, this is far slower than the
472 // common case, so such behavior should be prohibited.
473 template atomicValueIsProperlyAligned( T )
474 {
475 bool atomicValueIsProperlyAligned( size_t addr )
476 {
477 return addr % T.sizeof == 0;
478 }
479 }
480
481
482 ////////////////////////////////////////////////////////////////////////
483 // x86 Synchronization Requirements
484 ////////////////////////////////////////////////////////////////////////
485
486
487 // NOTE: While x86 loads have acquire semantics for stores, it appears
488 // that independent loads may be reordered by some processors
489 // (notably the AMD64). This implies that the hoist-load barrier
490 // op requires an ordering instruction, which also extends this
491 // requirement to acquire ops (though hoist-store should not need
492 // one if support is added for this later). However, since no
493 // modern architectures will reorder dependent loads to occur
494 // before the load they depend on (except the Alpha), raw loads
495 // are actually a possible means of ordering specific sequences
496 // of loads in some instances. The original atomic<>
497 // implementation provides a 'ddhlb' ordering specifier for
498 // data-dependent loads to handle this situation, but as there
499 // are no plans to support the Alpha there is no reason to add
500 // that option here.
501 //
502 // For reference, the old behavior (acquire semantics for loads)
503 // required a memory barrier if: ms == msync.seq || isSinkOp!(ms)
504 template needsLoadBarrier( msync ms )
505 {
506 const bool needsLoadBarrier = ms != msync.raw;
507 }
508
509
510 // NOTE: x86 stores implicitly have release semantics so a membar is only
511 // necessary on acquires.
512 template needsStoreBarrier( msync ms )
513 {
514 const bool needsStoreBarrier = ms == msync.seq || isHoistOp!(ms);
515 }
516 }
517
518
519 ////////////////////////////////////////////////////////////////////////////
520 // Atomic Load
521 ////////////////////////////////////////////////////////////////////////////
522
523
524 template atomicLoad( msync ms = msync.seq, T )
525 {
526 T atomicLoad( ref T val )
527 in
528 {
529 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
530 }
531 body
532 {
533 static if( T.sizeof == byte.sizeof )
534 {
535 ////////////////////////////////////////////////////////////////
536 // 1 Byte Load
537 ////////////////////////////////////////////////////////////////
538
539
540 static if( needsLoadBarrier!(ms) )
541 {
542 asm
543 {
544 mov DL, 42;
545 mov AL, 42;
546 mov ECX, val;
547 lock;
548 cmpxchg [ECX], DL;
549 }
550 }
551 else
552 {
553 synchronized
554 {
555 return val;
556 }
557 }
558 }
559 else static if( T.sizeof == short.sizeof )
560 {
561 ////////////////////////////////////////////////////////////////
562 // 2 Byte Load
563 ////////////////////////////////////////////////////////////////
564
565 static if( needsLoadBarrier!(ms) )
566 {
567 asm
568 {
569 mov DX, 42;
570 mov AX, 42;
571 mov ECX, val;
572 lock;
573 cmpxchg [ECX], DX;
574 }
575 }
576 else
577 {
578 synchronized
579 {
580 return val;
581 }
582 }
583 }
584 else static if( T.sizeof == int.sizeof )
585 {
586 ////////////////////////////////////////////////////////////////
587 // 4 Byte Load
588 ////////////////////////////////////////////////////////////////
589
590
591 static if( needsLoadBarrier!(ms) )
592 {
593 asm
594 {
595 mov EDX, 42;
596 mov EAX, 42;
597 mov ECX, val;
598 lock;
599 cmpxchg [ECX], EDX;
600 }
601 }
602 else
603 {
604 synchronized
605 {
606 return val;
607 }
608 }
609 }
610 else static if( T.sizeof == long.sizeof )
611 {
612 ////////////////////////////////////////////////////////////////
613 // 8 Byte Load
614 ////////////////////////////////////////////////////////////////
615
616
617 version( Has64BitOps )
618 {
619 ////////////////////////////////////////////////////////////
620 // 8 Byte Load on 64-Bit Processor
621 ////////////////////////////////////////////////////////////
622
623
624 static if( needsLoadBarrier!(ms) )
625 {
626 asm
627 {
628 mov RAX, val;
629 lock;
630 mov RAX, [RAX];
631 }
632 }
633 else
634 {
635 synchronized
636 {
637 return val;
638 }
639 }
640 }
641 else
642 {
643 ////////////////////////////////////////////////////////////
644 // 8 Byte Load on 32-Bit Processor
645 ////////////////////////////////////////////////////////////
646
647
648 pragma( msg, "This operation is only available on 64-bit platforms." );
649 static assert( false );
650 }
651 }
652 else
653 {
654 ////////////////////////////////////////////////////////////////
655 // Not a 1, 2, 4, or 8 Byte Type
656 ////////////////////////////////////////////////////////////////
657
658
659 pragma( msg, "Invalid template type specified." );
660 static assert( false );
661 }
662 }
663 }
664
665
666 ////////////////////////////////////////////////////////////////////////////
667 // Atomic Store
668 ////////////////////////////////////////////////////////////////////////////
669
670
671 template atomicStore( msync ms = msync.seq, T )
672 {
673 void atomicStore( ref T val, T newval )
674 in
675 {
676 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
677 }
678 body
679 {
680 static if( T.sizeof == byte.sizeof )
681 {
682 ////////////////////////////////////////////////////////////////
683 // 1 Byte Store
684 ////////////////////////////////////////////////////////////////
685
686
687 static if( needsStoreBarrier!(ms) )
688 {
689 asm
690 {
691 mov EAX, val;
692 mov DL, newval;
693 lock;
694 xchg [EAX], DL;
695 }
696 }
697 else
698 {
699 asm
700 {
701 mov EAX, val;
702 mov DL, newval;
703 mov [EAX], DL;
704 }
705 }
706 }
707 else static if( T.sizeof == short.sizeof )
708 {
709 ////////////////////////////////////////////////////////////////
710 // 2 Byte Store
711 ////////////////////////////////////////////////////////////////
712
713
714 static if( needsStoreBarrier!(ms) )
715 {
716 asm
717 {
718 mov EAX, val;
719 mov DX, newval;
720 lock;
721 xchg [EAX], DX;
722 }
723 }
724 else
725 {
726 asm
727 {
728 mov EAX, val;
729 mov DX, newval;
730 mov [EAX], DX;
731 }
732 }
733 }
734 else static if( T.sizeof == int.sizeof )
735 {
736 ////////////////////////////////////////////////////////////////
737 // 4 Byte Store
738 ////////////////////////////////////////////////////////////////
739
740
741 static if( needsStoreBarrier!(ms) )
742 {
743 asm
744 {
745 mov EAX, val;
746 mov EDX, newval;
747 lock;
748 xchg [EAX], EDX;
749 }
750 }
751 else
752 {
753 asm
754 {
755 mov EAX, val;
756 mov EDX, newval;
757 mov [EAX], EDX;
758 }
759 }
760 }
761 else static if( T.sizeof == long.sizeof )
762 {
763 ////////////////////////////////////////////////////////////////
764 // 8 Byte Store
765 ////////////////////////////////////////////////////////////////
766
767
768 version( Has64BitOps )
769 {
770 ////////////////////////////////////////////////////////////
771 // 8 Byte Store on 64-Bit Processor
772 ////////////////////////////////////////////////////////////
773
774
775 static if( needsStoreBarrier!(ms) )
776 {
777 asm
778 {
779 mov RAX, val;
780 mov RDX, newval;
781 lock;
782 xchg [RAX], RDX;
783 }
784 }
785 else
786 {
787 asm
788 {
789 mov RAX, val;
790 mov RDX, newval;
791 mov [RAX], RDX;
792 }
793 }
794 }
795 else
796 {
797 ////////////////////////////////////////////////////////////
798 // 8 Byte Store on 32-Bit Processor
799 ////////////////////////////////////////////////////////////
800
801
802 pragma( msg, "This operation is only available on 64-bit platforms." );
803 static assert( false );
804 }
805 }
806 else
807 {
808 ////////////////////////////////////////////////////////////////
809 // Not a 1, 2, 4, or 8 Byte Type
810 ////////////////////////////////////////////////////////////////
811
812
813 pragma( msg, "Invalid template type specified." );
814 static assert( false );
815 }
816 }
817 }
818
819
820 ////////////////////////////////////////////////////////////////////////////
821 // Atomic Store If
822 ////////////////////////////////////////////////////////////////////////////
823
824
825 template atomicStoreIf( msync ms = msync.seq, T )
826 {
827 bool atomicStoreIf( ref T val, T newval, T equalTo )
828 in
829 {
830 // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
831 // 4 byte alignment, so use size_t as the align type here.
832 static if( T.sizeof > size_t.sizeof )
833 assert( atomicValueIsProperlyAligned!(size_t)( cast(size_t) &val ) );
834 else
835 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
836 }
837 body
838 {
839 static if( T.sizeof == byte.sizeof )
840 {
841 ////////////////////////////////////////////////////////////////
842 // 1 Byte StoreIf
843 ////////////////////////////////////////////////////////////////
844
845
846 asm
847 {
848 mov DL, newval;
849 mov AL, equalTo;
850 mov ECX, val;
851 lock; // lock always needed to make this op atomic
852 cmpxchg [ECX], DL;
853 setz AL;
854 }
855 }
856 else static if( T.sizeof == short.sizeof )
857 {
858 ////////////////////////////////////////////////////////////////
859 // 2 Byte StoreIf
860 ////////////////////////////////////////////////////////////////
861
862
863 asm
864 {
865 mov DX, newval;
866 mov AX, equalTo;
867 mov ECX, val;
868 lock; // lock always needed to make this op atomic
869 cmpxchg [ECX], DX;
870 setz AL;
871 }
872 }
873 else static if( T.sizeof == int.sizeof )
874 {
875 ////////////////////////////////////////////////////////////////
876 // 4 Byte StoreIf
877 ////////////////////////////////////////////////////////////////
878
879
880 asm
881 {
882 mov EDX, newval;
883 mov EAX, equalTo;
884 mov ECX, val;
885 lock; // lock always needed to make this op atomic
886 cmpxchg [ECX], EDX;
887 setz AL;
888 }
889 }
890 else static if( T.sizeof == long.sizeof )
891 {
892 ////////////////////////////////////////////////////////////////
893 // 8 Byte StoreIf
894 ////////////////////////////////////////////////////////////////
895
896
897 version( Has64BitOps )
898 {
899 ////////////////////////////////////////////////////////////
900 // 8 Byte StoreIf on 64-Bit Processor
901 ////////////////////////////////////////////////////////////
902
903
904 asm
905 {
906 mov RDX, newval;
907 mov RAX, equalTo;
908 mov RCX, val;
909 lock; // lock always needed to make this op atomic
910 cmpxchg [RCX], RDX;
911 setz AL;
912 }
913 }
914 else version( Has64BitCAS )
915 {
916 ////////////////////////////////////////////////////////////
917 // 8 Byte StoreIf on 32-Bit Processor
918 ////////////////////////////////////////////////////////////
919 version(darwin){
920 static if(ms==msync.raw){
921 return OSAtomicCompareAndSwap64(cast(long)equalTo, cast(long)newval, cast(long*)&val);
922 } else {
923 return OSAtomicCompareAndSwap64Barrier(cast(long)equalTo, cast(long)newval, cast(long*)&val);
924 }
925 } else {
926 asm
927 {
928 push EDI;
929 push EBX;
930 lea EDI, newval;
931 mov EBX, [EDI];
932 mov ECX, 4[EDI];
933 lea EDI, equalTo;
934 mov EAX, [EDI];
935 mov EDX, 4[EDI];
936 mov EDI, val;
937 lock; // lock always needed to make this op atomic
938 cmpxch8b [EDI];
939 setz AL;
940 pop EBX;
941 pop EDI;
942 }
943 }
944 }
945 }
946 else
947 {
948 ////////////////////////////////////////////////////////////////
949 // Not a 1, 2, 4, or 8 Byte Type
950 ////////////////////////////////////////////////////////////////
951
952
953 pragma( msg, "Invalid template type specified." );
954 static assert( false );
955 }
956 }
957 }
958
959
960 ////////////////////////////////////////////////////////////////////////////
961 // Atomic Increment
962 ////////////////////////////////////////////////////////////////////////////
963
964
965 template atomicIncrement( msync ms = msync.seq, T )
966 {
967 //
968 // NOTE: This operation is only valid for integer or pointer types
969 //
970 static assert( isValidNumericType!(T) );
971
972
973 T atomicIncrement( ref T val )
974 in
975 {
976 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
977 }
978 body
979 {
980 static if( T.sizeof == byte.sizeof )
981 {
982 ////////////////////////////////////////////////////////////////
983 // 1 Byte Increment
984 ////////////////////////////////////////////////////////////////
985
986
987 asm
988 {
989 mov EAX, val;
990 lock; // lock always needed to make this op atomic
991 inc [EAX];
992 mov AL, [EAX];
993 }
994 }
995 else static if( T.sizeof == short.sizeof )
996 {
997 ////////////////////////////////////////////////////////////////
998 // 2 Byte Increment
999 ////////////////////////////////////////////////////////////////
1000
1001
1002 asm
1003 {
1004 mov EAX, val;
1005 lock; // lock always needed to make this op atomic
1006 inc short ptr [EAX];
1007 mov AX, [EAX];
1008 }
1009 }
1010 else static if( T.sizeof == int.sizeof )
1011 {
1012 ////////////////////////////////////////////////////////////////
1013 // 4 Byte Increment
1014 ////////////////////////////////////////////////////////////////
1015
1016
1017 asm
1018 {
1019 mov EAX, val;
1020 lock; // lock always needed to make this op atomic
1021 inc int ptr [EAX];
1022 mov EAX, [EAX];
1023 }
1024 }
1025 else static if( T.sizeof == long.sizeof )
1026 {
1027 ////////////////////////////////////////////////////////////////
1028 // 8 Byte Increment
1029 ////////////////////////////////////////////////////////////////
1030
1031
1032 version( Has64BitOps )
1033 {
1034 ////////////////////////////////////////////////////////////
1035 // 8 Byte Increment on 64-Bit Processor
1036 ////////////////////////////////////////////////////////////
1037
1038
1039 asm
1040 {
1041 mov RAX, val;
1042 lock; // lock always needed to make this op atomic
1043 inc qword ptr [RAX];
1044 mov RAX, [RAX];
1045 }
1046 }
1047 else
1048 {
1049 ////////////////////////////////////////////////////////////
1050 // 8 Byte Increment on 32-Bit Processor
1051 ////////////////////////////////////////////////////////////
1052
1053
1054 pragma( msg, "This operation is only available on 64-bit platforms." );
1055 static assert( false );
1056 }
1057 }
1058 else
1059 {
1060 ////////////////////////////////////////////////////////////////
1061 // Not a 1, 2, 4, or 8 Byte Type
1062 ////////////////////////////////////////////////////////////////
1063
1064
1065 pragma( msg, "Invalid template type specified." );
1066 static assert( false );
1067 }
1068 }
1069 }
1070
1071
1072 ////////////////////////////////////////////////////////////////////////////
1073 // Atomic Decrement
1074 ////////////////////////////////////////////////////////////////////////////
1075
1076
1077 template atomicDecrement( msync ms = msync.seq, T )
1078 {
1079 //
1080 // NOTE: This operation is only valid for integer or pointer types
1081 //
1082 static assert( isValidNumericType!(T) );
1083
1084
1085 T atomicDecrement( ref T val )
1086 in
1087 {
1088 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1089 }
1090 body
1091 {
1092 static if( T.sizeof == byte.sizeof )
1093 {
1094 ////////////////////////////////////////////////////////////////
1095 // 1 Byte Decrement
1096 ////////////////////////////////////////////////////////////////
1097
1098
1099 asm
1100 {
1101 mov EAX, val;
1102 lock; // lock always needed to make this op atomic
1103 dec [EAX];
1104 mov AL, [EAX];
1105 }
1106 }
1107 else static if( T.sizeof == short.sizeof )
1108 {
1109 ////////////////////////////////////////////////////////////////
1110 // 2 Byte Decrement
1111 ////////////////////////////////////////////////////////////////
1112
1113
1114 asm
1115 {
1116 mov EAX, val;
1117 lock; // lock always needed to make this op atomic
1118 dec short ptr [EAX];
1119 mov AX, [EAX];
1120 }
1121 }
1122 else static if( T.sizeof == int.sizeof )
1123 {
1124 ////////////////////////////////////////////////////////////////
1125 // 4 Byte Decrement
1126 ////////////////////////////////////////////////////////////////
1127
1128
1129 asm
1130 {
1131 mov EAX, val;
1132 lock; // lock always needed to make this op atomic
1133 dec int ptr [EAX];
1134 mov EAX, [EAX];
1135 }
1136 }
1137 else static if( T.sizeof == long.sizeof )
1138 {
1139 ////////////////////////////////////////////////////////////////
1140 // 8 Byte Decrement
1141 ////////////////////////////////////////////////////////////////
1142
1143
1144 version( Has64BitOps )
1145 {
1146 ////////////////////////////////////////////////////////////
1147 // 8 Byte Decrement on 64-Bit Processor
1148 ////////////////////////////////////////////////////////////
1149
1150
1151 asm
1152 {
1153 mov RAX, val;
1154 lock; // lock always needed to make this op atomic
1155 dec qword ptr [RAX];
1156 mov RAX, [RAX];
1157 }
1158 }
1159 else
1160 {
1161 ////////////////////////////////////////////////////////////
1162 // 8 Byte Decrement on 32-Bit Processor
1163 ////////////////////////////////////////////////////////////
1164
1165
1166 pragma( msg, "This operation is only available on 64-bit platforms." );
1167 static assert( false );
1168 }
1169 }
1170 else
1171 {
1172 ////////////////////////////////////////////////////////////////
1173 // Not a 1, 2, 4, or 8 Byte Type
1174 ////////////////////////////////////////////////////////////////
1175
1176
1177 pragma( msg, "Invalid template type specified." );
1178 static assert( false );
1179 }
1180 }
1181 }
1182 }
1183 else
1184 {
1185 version( BuildInfo )
1186 {
1187 pragma( msg, "tango.core.Atomic: using synchronized ops" );
1188 }
1189
1190 private
1191 {
1192 ////////////////////////////////////////////////////////////////////////
1193 // Default Value Requirements
1194 ////////////////////////////////////////////////////////////////////////
1195
1196
1197 template atomicValueIsProperlyAligned( T )
1198 {
1199 bool atomicValueIsProperlyAligned( size_t addr )
1200 {
1201 return addr % T.sizeof == 0;
1202 }
1203 }
1204
1205
1206 ////////////////////////////////////////////////////////////////////////
1207 // Default Synchronization Requirements
1208 ////////////////////////////////////////////////////////////////////////
1209
1210
1211 template needsLoadBarrier( msync ms )
1212 {
1213 const bool needsLoadBarrier = ms != msync.raw;
1214 }
1215
1216
1217 template needsStoreBarrier( msync ms )
1218 {
1219 const bool needsStoreBarrier = ms != msync.raw;
1220 }
1221 }
1222
1223
1224 ////////////////////////////////////////////////////////////////////////////
1225 // Atomic Load
1226 ////////////////////////////////////////////////////////////////////////////
1227
1228
1229 template atomicLoad( msync ms = msync.seq, T )
1230 {
1231 T atomicLoad( ref T val )
1232 in
1233 {
1234 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1235 }
1236 body
1237 {
1238 static if( T.sizeof <= (void*).sizeof )
1239 {
1240 ////////////////////////////////////////////////////////////////
1241 // <= (void*).sizeof Byte Load
1242 ////////////////////////////////////////////////////////////////
1243
1244
1245 static if( needsLoadBarrier!(ms) )
1246 {
1247 synchronized
1248 {
1249 return val;
1250 }
1251 }
1252 else
1253 {
1254 synchronized
1255 {
1256 return val;
1257 }
1258 }
1259 }
1260 else
1261 {
1262 ////////////////////////////////////////////////////////////////
1263 // > (void*).sizeof Byte Type
1264 ////////////////////////////////////////////////////////////////
1265
1266
1267 pragma( msg, "Invalid template type specified." );
1268 static assert( false );
1269 }
1270 }
1271 }
1272
1273
1274 ////////////////////////////////////////////////////////////////////////////
1275 // Atomic Store
1276 ////////////////////////////////////////////////////////////////////////////
1277
1278
1279 template atomicStore( msync ms = msync.seq, T )
1280 {
1281 void atomicStore( ref T val, T newval )
1282 in
1283 {
1284 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1285 }
1286 body
1287 {
1288 static if( T.sizeof <= (void*).sizeof )
1289 {
1290 ////////////////////////////////////////////////////////////////
1291 // <= (void*).sizeof Byte Store
1292 ////////////////////////////////////////////////////////////////
1293
1294
1295 static if( needsStoreBarrier!(ms) )
1296 {
1297 synchronized
1298 {
1299 val = newval;
1300 }
1301 }
1302 else
1303 {
1304 synchronized
1305 {
1306 val = newval;
1307 }
1308 }
1309 }
1310 else
1311 {
1312 ////////////////////////////////////////////////////////////////
1313 // > (void*).sizeof Byte Type
1314 ////////////////////////////////////////////////////////////////
1315
1316
1317 pragma( msg, "Invalid template type specified." );
1318 static assert( false );
1319 }
1320 }
1321 }
1322
1323
1324 ////////////////////////////////////////////////////////////////////////////
1325 // Atomic Store If
1326 ////////////////////////////////////////////////////////////////////////////
1327
1328
1329 template atomicStoreIf( msync ms = msync.seq, T )
1330 {
1331 bool atomicStoreIf( ref T val, T newval, T equalTo )
1332 in
1333 {
1334 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1335 }
1336 body
1337 {
1338 static if( T.sizeof <= (void*).sizeof )
1339 {
1340 ////////////////////////////////////////////////////////////////
1341 // <= (void*).sizeof Byte StoreIf
1342 ////////////////////////////////////////////////////////////////
1343
1344
1345 synchronized
1346 {
1347 if( val == equalTo )
1348 {
1349 val = newval;
1350 return true;
1351 }
1352 return false;
1353 }
1354 }
1355 else
1356 {
1357 ////////////////////////////////////////////////////////////////
1358 // > (void*).sizeof Byte Type
1359 ////////////////////////////////////////////////////////////////
1360
1361
1362 pragma( msg, "Invalid template type specified." );
1363 static assert( false );
1364 }
1365 }
1366 }
1367
1368
1369 /////////////////////////////////////////////////////////////////////////////
1370 // Atomic Increment
1371 ////////////////////////////////////////////////////////////////////////////
1372
1373
1374 template atomicIncrement( msync ms = msync.seq, T )
1375 {
1376 //
1377 // NOTE: This operation is only valid for integer or pointer types
1378 //
1379 static assert( isValidNumericType!(T) );
1380
1381
1382 T atomicIncrement( ref T val )
1383 in
1384 {
1385 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1386 }
1387 body
1388 {
1389 static if( T.sizeof <= (void*).sizeof )
1390 {
1391 ////////////////////////////////////////////////////////////////
1392 // <= (void*).sizeof Byte Increment
1393 ////////////////////////////////////////////////////////////////
1394
1395
1396 synchronized
1397 {
1398 return ++val;
1399 }
1400 }
1401 else
1402 {
1403 ////////////////////////////////////////////////////////////////
1404 // > (void*).sizeof Byte Type
1405 ////////////////////////////////////////////////////////////////
1406
1407
1408 pragma( msg, "Invalid template type specified." );
1409 static assert( false );
1410 }
1411 }
1412 }
1413
1414
1415 ////////////////////////////////////////////////////////////////////////////
1416 // Atomic Decrement
1417 ////////////////////////////////////////////////////////////////////////////
1418
1419
1420 template atomicDecrement( msync ms = msync.seq, T )
1421 {
1422 //
1423 // NOTE: This operation is only valid for integer or pointer types
1424 //
1425 static assert( isValidNumericType!(T) );
1426
1427
1428 T atomicDecrement( ref T val )
1429 in
1430 {
1431 assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
1432 }
1433 body
1434 {
1435 static if( T.sizeof <= (void*).sizeof )
1436 {
1437 ////////////////////////////////////////////////////////////////
1438 // <= (void*).sizeof Byte Decrement
1439 ////////////////////////////////////////////////////////////////
1440
1441
1442 synchronized
1443 {
1444 return --val;
1445 }
1446 }
1447 else
1448 {
1449 ////////////////////////////////////////////////////////////////
1450 // > (void*).sizeof Byte Type
1451 ////////////////////////////////////////////////////////////////
1452
1453
1454 pragma( msg, "Invalid template type specified." );
1455 static assert( false );
1456 }
1457 }
1458 }
1459 }
1460
1461
1462 ////////////////////////////////////////////////////////////////////////////////
1463 // Atomic
1464 ////////////////////////////////////////////////////////////////////////////////
1465
1466
1467 /**
1468 * This struct represents a value which will be subject to competing access.
1469 * All accesses to this value will be synchronized with main memory, and
1470 * various memory barriers may be employed for instruction ordering. Any
1471 * primitive type of size equal to or smaller than the memory bus size is
1472 * allowed, so 32-bit machines may use values with size <= int.sizeof and
1473 * 64-bit machines may use values with size <= long.sizeof. The one exception
1474 * to this rule is that architectures that support DCAS will allow double-wide
1475 * storeIf operations. The 32-bit x86 architecture, for example, supports
1476 * 64-bit storeIf operations.
1477 */
1478 struct Atomic( T )
1479 {
1480 ////////////////////////////////////////////////////////////////////////////
1481 // Atomic Load
1482 ////////////////////////////////////////////////////////////////////////////
1483
1484
1485 template load( msync ms = msync.seq )
1486 {
1487 static assert( ms == msync.raw || ms == msync.hlb ||
1488 ms == msync.acq || ms == msync.seq,
1489 "ms must be one of: msync.raw, msync.hlb, msync.acq, msync.seq" );
1490
1491 /**
1492 * Refreshes the contents of this value from main memory. This
1493 * operation is both lock-free and atomic.
1494 *
1495 * Returns:
1496 * The loaded value.
1497 */
1498 T load()
1499 {
1500 return atomicLoad!(ms,T)( m_val );
1501 }
1502 }
1503
1504
1505 ////////////////////////////////////////////////////////////////////////////
1506 // Atomic Store
1507 ////////////////////////////////////////////////////////////////////////////
1508
1509
1510 template store( msync ms = msync.seq )
1511 {
1512 static assert( ms == msync.raw || ms == msync.ssb ||
1513 ms == msync.acq || ms == msync.rel ||
1514 ms == msync.seq,
1515 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
1516
1517 /**
1518 * Stores 'newval' to the memory referenced by this value. This
1519 * operation is both lock-free and atomic.
1520 *
1521 * Params:
1522 * newval = The value to store.
1523 */
1524 void store( T newval )
1525 {
1526 atomicStore!(ms,T)( m_val, newval );
1527 }
1528 }
1529
1530
1531 ////////////////////////////////////////////////////////////////////////////
1532 // Atomic StoreIf
1533 ////////////////////////////////////////////////////////////////////////////
1534
1535
1536 template storeIf( msync ms = msync.seq )
1537 {
1538 static assert( ms == msync.raw || ms == msync.ssb ||
1539 ms == msync.acq || ms == msync.rel ||
1540 ms == msync.seq,
1541 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
1542
1543 /**
1544 * Stores 'newval' to the memory referenced by this value if val is
1545 * equal to 'equalTo'. This operation is both lock-free and atomic.
1546 *
1547 * Params:
1548 * newval = The value to store.
1549 * equalTo = The comparison value.
1550 *
1551 * Returns:
1552 * true if the store occurred, false if not.
1553 */
1554 bool storeIf( T newval, T equalTo )
1555 {
1556 return atomicStoreIf!(ms,T)( m_val, newval, equalTo );
1557 }
1558 }
1559
1560
1561 ////////////////////////////////////////////////////////////////////////////
1562 // Numeric Functions
1563 ////////////////////////////////////////////////////////////////////////////
1564
1565
1566 /**
1567 * The following additional functions are available for integer types.
1568 */
1569 static if( isValidNumericType!(T) )
1570 {
1571 ////////////////////////////////////////////////////////////////////////
1572 // Atomic Increment
1573 ////////////////////////////////////////////////////////////////////////
1574
1575
1576 template increment( msync ms = msync.seq )
1577 {
1578 static assert( ms == msync.raw || ms == msync.ssb ||
1579 ms == msync.acq || ms == msync.rel ||
1580 ms == msync.seq,
1581 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
1582
1583 /**
1584 * This operation is only legal for built-in value and pointer
1585 * types, and is equivalent to an atomic "val = val + 1" operation.
1586 * This function exists to facilitate use of the optimized
1587 * increment instructions provided by some architecures. If no
1588 * such instruction exists on the target platform then the
1589 * behavior will perform the operation using more traditional
1590 * means. This operation is both lock-free and atomic.
1591 *
1592 * Returns:
1593 * The result of an atomicLoad of val immediately following the
1594 * increment operation. This value is not required to be equal to
1595 * the newly stored value. Thus, competing writes are allowed to
1596 * occur between the increment and successive load operation.
1597 */
1598 T increment()
1599 {
1600 return atomicIncrement!(ms,T)( m_val );
1601 }
1602 }
1603
1604
1605 ////////////////////////////////////////////////////////////////////////
1606 // Atomic Decrement
1607 ////////////////////////////////////////////////////////////////////////
1608
1609
1610 template decrement( msync ms = msync.seq )
1611 {
1612 static assert( ms == msync.raw || ms == msync.ssb ||
1613 ms == msync.acq || ms == msync.rel ||
1614 ms == msync.seq,
1615 "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
1616
1617 /**
1618 * This operation is only legal for built-in value and pointer
1619 * types, and is equivalent to an atomic "val = val - 1" operation.
1620 * This function exists to facilitate use of the optimized
1621 * decrement instructions provided by some architecures. If no
1622 * such instruction exists on the target platform then the behavior
1623 * will perform the operation using more traditional means. This
1624 * operation is both lock-free and atomic.
1625 *
1626 * Returns:
1627 * The result of an atomicLoad of val immediately following the
1628 * increment operation. This value is not required to be equal to
1629 * the newly stored value. Thus, competing writes are allowed to
1630 * occur between the increment and successive load operation.
1631 */
1632 T decrement()
1633 {
1634 return atomicDecrement!(ms,T)( m_val );
1635 }
1636 }
1637 }
1638
1639 private:
1640 T m_val;
1641 }
1642
1643
1644 ////////////////////////////////////////////////////////////////////////////////
1645 // Support Code for Unit Tests
1646 ////////////////////////////////////////////////////////////////////////////////
1647
1648
1649 private
1650 {
1651 version( D_Ddoc ) {} else
1652 {
1653 template testLoad( msync ms, T )
1654 {
1655 void testLoad( T val = T.init + 1 )
1656 {
1657 T base;
1658 Atomic!(T) atom;
1659
1660 assert( atom.load!(ms)() == base );
1661 base = val;
1662 atom.m_val = val;
1663 assert( atom.load!(ms)() == base );
1664 }
1665 }
1666
1667
1668 template testStore( msync ms, T )
1669 {
1670 void testStore( T val = T.init + 1 )
1671 {
1672 T base;
1673 Atomic!(T) atom;
1674
1675 assert( atom.m_val == base );
1676 base = val;
1677 atom.store!(ms)( base );
1678 assert( atom.m_val == base );
1679 }
1680 }
1681
1682
1683 template testStoreIf( msync ms, T )
1684 {
1685 void testStoreIf( T val = T.init + 1 )
1686 {
1687 T base;
1688 Atomic!(T) atom;
1689
1690 assert( atom.m_val == base );
1691 base = val;
1692 atom.storeIf!(ms)( base, val );
1693 assert( atom.m_val != base );
1694 atom.storeIf!(ms)( base, T.init );
1695 assert( atom.m_val == base );
1696 }
1697 }
1698
1699
1700 template testIncrement( msync ms, T )
1701 {
1702 void testIncrement( T val = T.init + 1 )
1703 {
1704 T base = val;
1705 T incr = val;
1706 Atomic!(T) atom;
1707
1708 atom.m_val = val;
1709 assert( atom.m_val == base && incr == base );
1710 base = cast(T)( base + 1 );
1711 incr = atom.increment!(ms)();
1712 assert( atom.m_val == base && incr == base );
1713 }
1714 }
1715
1716
1717 template testDecrement( msync ms, T )
1718 {
1719 void testDecrement( T val = T.init + 1 )
1720 {
1721 T base = val;
1722 T decr = val;
1723 Atomic!(T) atom;
1724
1725 atom.m_val = val;
1726 assert( atom.m_val == base && decr == base );
1727 base = cast(T)( base - 1 );
1728 decr = atom.decrement!(ms)();
1729 assert( atom.m_val == base && decr == base );
1730 }
1731 }
1732
1733
1734 template testType( T )
1735 {
1736 void testType( T val = T.init +1 )
1737 {
1738 testLoad!(msync.raw, T)( val );
1739 testLoad!(msync.hlb, T)( val );
1740 testLoad!(msync.acq, T)( val );
1741 testLoad!(msync.seq, T)( val );
1742
1743 testStore!(msync.raw, T)( val );
1744 testStore!(msync.ssb, T)( val );
1745 testStore!(msync.acq, T)( val );
1746 testStore!(msync.rel, T)( val );
1747 testStore!(msync.seq, T)( val );
1748
1749 testStoreIf!(msync.raw, T)( val );
1750 testStoreIf!(msync.ssb, T)( val );
1751 testStoreIf!(msync.acq, T)( val );
1752 testStoreIf!(msync.rel, T)( val );
1753 testStoreIf!(msync.seq, T)( val );
1754
1755 static if( isValidNumericType!(T) )
1756 {
1757 testIncrement!(msync.raw, T)( val );
1758 testIncrement!(msync.ssb, T)( val );
1759 testIncrement!(msync.acq, T)( val );
1760 testIncrement!(msync.rel, T)( val );
1761 testIncrement!(msync.seq, T)( val );
1762
1763 testDecrement!(msync.raw, T)( val );
1764 testDecrement!(msync.ssb, T)( val );
1765 testDecrement!(msync.acq, T)( val );
1766 testDecrement!(msync.rel, T)( val );
1767 testDecrement!(msync.seq, T)( val );
1768 }
1769 }
1770 }
1771 }
1772 }
1773
1774
1775 ////////////////////////////////////////////////////////////////////////////////
1776 // Unit Tests
1777 ////////////////////////////////////////////////////////////////////////////////
1778
1779
1780 debug( UnitTest )
1781 {
1782 unittest
1783 {
1784 testType!(bool)();
1785
1786 testType!(byte)();
1787 testType!(ubyte)();
1788
1789 testType!(short)();
1790 testType!(ushort)();
1791
1792 testType!(int)();
1793 testType!(uint)();
1794
1795 int x;
1796 testType!(void*)( &x );
1797
1798 version( Has64BitOps )
1799 {
1800 testType!(long)();
1801 testType!(ulong)();
1802 }
1803 else version( Has64BitCAS )
1804 {
1805 testStoreIf!(msync.raw, long)();
1806 testStoreIf!(msync.ssb, long)();
1807 testStoreIf!(msync.acq, long)();
1808 testStoreIf!(msync.rel, long)();
1809 testStoreIf!(msync.seq, long)();
1810
1811 testStoreIf!(msync.raw, ulong)();
1812 testStoreIf!(msync.ssb, ulong)();
1813 testStoreIf!(msync.acq, ulong)();
1814 testStoreIf!(msync.rel, ulong)();
1815 testStoreIf!(msync.seq, ulong)();
1816 }
1817 }
1818 }