changeset 291:0d2094800bdb signals

QList native implementation
author eldar
date Mon, 09 Nov 2009 20:49:26 +0000
parents dcc2b22d1f55
children 19498f420252
files qt/QGlobal.d qt/core/QList.d qt/core/QTypeInfo.d qt/qtd/Atomic.d
diffstat 4 files changed, 2349 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- a/qt/QGlobal.d	Mon Nov 09 06:57:48 2009 +0000
+++ b/qt/QGlobal.d	Mon Nov 09 20:49:26 2009 +0000
@@ -225,6 +225,11 @@
 //class QString;
 //char[] qPrintable(QString string) { string.toLocal8Bit().constData(); }
 //TODO(katrina) These should probably actually call into the c++ functions
+void qFatal(string str)
+{
+    throw new Exception(str);
+}
+
 void qDebug( char[] str ) /* print debug message */
 { writeln(str); }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/qt/core/QList.d	Mon Nov 09 20:49:26 2009 +0000
@@ -0,0 +1,402 @@
+module qt.core.QList;
+
+import qt.QGlobal;
+import qt.qtd.Atomic;
+
+import core.stdc.stdlib : qRealloc = realloc, qFree = free, qMalloc = malloc;
+import core.stdc.string : memcpy, memmove;
+
+enum INT_MAX = int.max;
+
+int qAllocMore(int alloc, int extra)
+{
+    if (alloc == 0 && extra == 0)
+        return 0;
+    const int page = 1 << 12;
+    int nalloc;
+    alloc += extra;
+    if (alloc < 1<<6) {
+        nalloc = (1<<3) + ((alloc >>3) << 3);
+    } else  {
+        // don't do anything if the loop will overflow signed int.
+        if (alloc >= INT_MAX/2)
+            return INT_MAX;
+        nalloc = (alloc < page) ? 1 << 3 : page;
+        while (nalloc < alloc) {
+            if (nalloc <= 0)
+                return INT_MAX;
+            nalloc *= 2;
+        }
+    }
+    return nalloc - extra;
+}
+
+private int grow(int size)
+{
+    // dear compiler: don't optimize me out.
+    synchronized {
+        int x = qAllocMore(size * (void*).sizeof, QListData.DataHeaderSize) / (void*).sizeof;
+        return x;
+    }
+}
+
+struct QListData {
+    struct Data {
+        Atomic!int ref_;
+        int alloc, begin, end;
+        uint sharable;
+        void*[1] array;
+    }
+    
+    enum { DataHeaderSize = Data.sizeof - (void*).sizeof }
+    
+    static Data shared_null;
+    Data *d;
+    
+    static this()
+    {
+        shared_null = Data(Atomic!int(1), 0, 0, 0, true, [null]);
+    }
+    
+
+//    Data *detach(); // remove in 5.0
+
+    Data* detach2()
+    {
+        Data* x = d;
+        d = cast(Data*)(qMalloc(DataHeaderSize + x.alloc * (void*).sizeof));
+        if (!d)
+            qFatal("QList: Out of memory");
+
+        memcpy(d, x, DataHeaderSize + x.alloc * (void*).sizeof);
+        d.alloc = x.alloc;
+        d.ref_.store(1);
+        d.sharable = true;
+        if (!d.alloc)
+            d.begin = d.end = 0;
+
+        return x;
+    }
+    
+    void realloc(int alloc)
+    {
+//        assert(d.ref_ == 1);
+        Data* x = cast(Data*)(qRealloc(d, DataHeaderSize + alloc * (void*).sizeof));
+        if (!x)
+            qFatal("QList: Out of memory");
+
+        d = x;
+        d.alloc = alloc;
+        if (!alloc)
+            d.begin = d.end = 0;
+    }
+    
+    void** append()
+    {
+// #TODO        Q_ASSERT(d.ref_ == 1);
+        if (d.end == d.alloc) {
+            int n = d.end - d.begin;
+            if (d.begin > 2 * d.alloc / 3) {
+                memcpy(d.array.ptr + n, d.array.ptr + d.begin, n * (void*).sizeof);
+                d.begin = n;
+                d.end = n * 2;
+            } else {
+                realloc(grow(d.alloc + 1));
+            }
+        }
+        return d.array.ptr + d.end++;
+    }
+
+    void **append(const ref QListData l)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        int e = d.end;
+        int n = l.d.end - l.d.begin;
+        if (n) {
+            if (e + n > d.alloc)
+                realloc(grow(e + l.d.end - l.d.begin));
+            memcpy(d.array.ptr + d.end, l.d.array.ptr + l.d.begin, n * (void*).sizeof);
+            d.end += n;
+        }
+        return d.array.ptr + e;
+    }
+
+    void **prepend()
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        if (d.begin == 0) {
+            if (d.end >= d.alloc / 3)
+                realloc(grow(d.alloc + 1));
+
+            if (d.end < d.alloc / 3)
+                d.begin = d.alloc - 2 * d.end;
+            else
+                d.begin = d.alloc - d.end;
+
+            memmove(d.array.ptr + d.begin, d.array.ptr, d.end * (void*).sizeof);
+            d.end += d.begin;
+        }
+        return d.array.ptr + --d.begin;
+    }
+
+    void **insert(int i)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        if (i <= 0)
+            return prepend();
+        if (i >= d.end - d.begin)
+            return append();
+
+        bool leftward = false;
+        int size = d.end - d.begin;
+
+        if (d.begin == 0) {
+            if (d.end == d.alloc) {
+                // If the array is full, we expand it and move some items rightward
+                realloc(grow(d.alloc + 1));
+            } else {
+                // If there is free space at the end of the array, we move some items rightward
+            }
+        } else {
+            if (d.end == d.alloc) {
+                // If there is free space at the beginning of the array, we move some items leftward
+                leftward = true;
+            } else {
+                // If there is free space at both ends, we move as few items as possible
+                leftward = (i < size - i);
+            }
+        }
+
+        if (leftward) {
+            --d.begin;
+            memmove(d.array.ptr + d.begin, d.array.ptr + d.begin + 1, i * (void*).sizeof);
+        } else {
+            memmove(d.array.ptr + d.begin + i + 1, d.array.ptr + d.begin + i,
+                    (size - i) * (void*).sizeof);
+            ++d.end;
+        }
+        return d.array.ptr + d.begin + i;
+    }
+
+    void remove(int i)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        i += d.begin;
+        if (i - d.begin < d.end - i) {
+            if (int offset = i - d.begin)
+                memmove(d.array.ptr + d.begin + 1, d.array.ptr + d.begin, offset * (void*).sizeof);
+            d.begin++;
+        } else {
+            if (int offset = d.end - i - 1)
+                memmove(d.array.ptr + i, d.array.ptr + i + 1, offset * (void*).sizeof);
+            d.end--;
+        }
+    }
+
+    void remove(int i, int n)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        i += d.begin;
+        int middle = i + n/2;
+        if (middle - d.begin < d.end - middle) {
+            memmove(d.array.ptr + d.begin + n, d.array.ptr + d.begin,
+                    (i - d.begin) * (void*).sizeof);
+            d.begin += n;
+        } else {
+            memmove(d.array.ptr + i, d.array.ptr + i + n,
+                    (d.end - i - n) * (void*).sizeof);
+            d.end -= n;
+        }
+    }
+
+    void move(int from, int to)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        if (from == to)
+            return;
+
+        from += d.begin;
+        to += d.begin;
+        void *t = d.array.ptr[from];
+
+        if (from < to) {
+            if (d.end == d.alloc || 3 * (to - from) < 2 * (d.end - d.begin)) {
+                memmove(d.array.ptr + from, d.array.ptr + from + 1, (to - from) * (void*).sizeof);
+            } else {
+                // optimization
+                if (int offset = from - d.begin)
+                    memmove(d.array.ptr + d.begin + 1, d.array.ptr + d.begin, offset * (void*).sizeof);
+                if (int offset = d.end - (to + 1))
+                    memmove(d.array.ptr + to + 2, d.array.ptr + to + 1, offset * (void*).sizeof);
+                ++d.begin;
+                ++d.end;
+                ++to;
+            }
+        } else {
+            if (d.begin == 0 || 3 * (from - to) < 2 * (d.end - d.begin)) {
+                memmove(d.array.ptr + to + 1, d.array.ptr + to, (from - to) * (void*).sizeof);
+            } else {
+                // optimization
+                if (int offset = to - d.begin)
+                    memmove(d.array.ptr + d.begin - 1, d.array.ptr + d.begin, offset * (void*).sizeof);
+                if (int offset = d.end - (from + 1))
+                    memmove(d.array.ptr + from, d.array.ptr + from + 1, offset * (void*).sizeof);
+                --d.begin;
+                --d.end;
+                --to;
+            }
+        }
+        d.array.ptr[to] = t;
+    }
+
+    void **erase(void **xi)
+    {
+//        Q_ASSERT(d.ref_ == 1);
+        int i = xi - (d.array.ptr + d.begin);
+        remove(i);
+        return d.array.ptr + d.begin + i;
+    }
+
+    int size() const { return d.end - d.begin; }
+    bool isEmpty() const { return d.end  == d.begin; }
+    const (void*)* at(int i) const { return d.array.ptr + d.begin + i; }
+    const (void*)* begin() const { return d.array.ptr + d.begin; }
+    const (void*)* end() const { return d.array.ptr + d.end; }
+}
+
+import std.stdio;
+
+struct QList(T)
+{
+    struct Node
+    {
+        void *v;
+    
+        ref T t()
+        { return *cast(T*)(&this); }
+//        { return *cast(T*)(QTypeInfo!T.isLarge || QTypeInfo!T.isStatic
+//                                       ? v : &this); }    }
+    }
+    
+    union {
+        QListData p;
+        QListData.Data* d;
+    }
+
+public:
+    void output()
+    {
+        writeln("QList atomic ", d.ref_.load());
+    }
+    
+    static QList!T opCall()
+    {
+        QList!T res;
+        writeln("QList opCall");
+        
+        res.d = &QListData.shared_null;
+        res.d.ref_.increment();
+        
+        return res;
+    }
+
+    this(this)
+    {
+        writeln("QList postblit");
+        d.ref_.increment();
+        if (!d.sharable)
+            detach_helper();
+    }
+
+    ~this()
+    {
+        writeln("QList ~this");
+        if (d && !d.ref_.decrement())
+            free(d);
+    }
+
+    ref QList!T opAssign(const ref QList!T l)
+    {
+        writeln("QList opAssign");
+        if (d != l.d) {
+            l.d.ref_.increment();
+            if (!d.ref_.decrement())
+                free(d);
+            d = cast(QListData.Data*)l.d;
+            if (!d.sharable)
+                detach_helper();
+        }
+        return this;
+    }
+    
+    void detach() { if (d.ref_.load() != 1) detach_helper(); }
+    
+    private void detach_helper()
+    {
+        Node *n = cast(Node*)(p.begin());
+        QListData.Data* x = p.detach2();
+        node_copy(cast(Node*)(p.begin()), cast(Node*)(p.end()), n);
+        if (!x.ref_.decrement())
+            free(x);
+    }
+    
+    void append(const T t) // fix to const ref for complex types TODO
+    {
+        detach();
+/*        static if (QTypeInfo!T.isLarge || QTypeInfo!T.isStatic)
+        {
+            node_construct(cast(Node*)(p.append()), t);
+        }
+        else*/
+        {
+            const T cpy = t;
+            node_construct(cast(Node*)(p.append()), cpy);
+        }
+    }
+    
+    ref const (T) at(int i) const
+    {
+        assert(i >= 0 && i < p.size(), "QList!T.at(): index out of range");
+        return (cast(Node*)(p.at(i))).t();
+    }
+
+    void node_construct(Node *n, const ref T t)
+    {
+/* TODO       static if (QTypeInfo!T.isLarge || QTypeInfo!T.isStatic)
+            n.v = new T(t);
+        else static if (QTypeInfo!T.isComplex)
+            new (n) T(t);
+        else*/
+            *cast(T*)(n) = t;
+    }
+    
+    void node_copy(Node *from, Node *to, Node *src)
+    {
+/* TODO       if (QTypeInfo<T>::isLarge || QTypeInfo<T>::isStatic)
+            while(from != to)
+                (from++)->v = new T(*reinterpret_cast<T*>((src++)->v));
+        else if (QTypeInfo<T>::isComplex)
+            while(from != to)
+                new (from++) T(*reinterpret_cast<T*>(src++));
+            */
+    }
+
+    void free(QListData.Data* data)
+    {
+        node_destruct(cast(Node*)(data.array.ptr + data.begin),
+                      cast(Node*)(data.array.ptr + data.end));
+        if (data.ref_.load() == 0)
+            {} // qFree(data); TODO
+    }
+    
+    void node_destruct(Node *from, Node *to)
+    {/* TODO
+        if (QTypeInfo!T.isLarge || QTypeInfo!T.isStatic)
+            while (from != to) --to, delete cast(T*)(to->v);
+        else if (QTypeInfo!T.isComplex)
+            while (from != to) --to, cast(T*)(to).~T();
+            */
+    }
+}
+
+extern(C) void qtd_create_QList(void *nativeId);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/qt/core/QTypeInfo.d	Mon Nov 09 20:49:26 2009 +0000
@@ -0,0 +1,124 @@
+module qt.core.QTypeInfo;
+
+//import qt.QGlobal;
+//import qt.qtd.Atomic;
+
+/*
+  The catch-all template.
+*/
+
+bool qIsDetached(T)(ref T) { return true; }
+
+struct QTypeInfo(T)
+{
+public:
+    enum {
+        isPointer = false,
+        isComplex = true,
+        isStatic = true,
+        isLarge = (T.sizeof > sizeof(void*)),
+        isDummy = false
+    }
+}
+
+struct QTypeInfo(T : T*)
+{
+public:
+    enum {
+        isPointer = true,
+        isComplex = false,
+        isStatic = false,
+        isLarge = false,
+        isDummy = false
+    }
+}
+
+#else
+
+template <typename T> char QTypeInfoHelper(T*(*)());
+void* QTypeInfoHelper(...);
+
+template <typename T> inline bool qIsDetached(T &) { return true; }
+
+template <typename T>
+class QTypeInfo
+{
+public:
+    enum {
+        isPointer = (1 == sizeof(QTypeInfoHelper((T(*)())0))),
+        isComplex = !isPointer,
+        isStatic = !isPointer,
+        isLarge = (sizeof(T)>sizeof(void*)),
+        isDummy = false
+    };
+};
+
+#endif /* QT_NO_PARTIAL_TEMPLATE_SPECIALIZATION */
+
+/*
+   Specialize a specific type with:
+
+     Q_DECLARE_TYPEINFO(type, flags);
+
+   where 'type' is the name of the type to specialize and 'flags' is
+   logically-OR'ed combination of the flags below.
+*/
+enum { /* TYPEINFO flags */
+    Q_COMPLEX_TYPE = 0,
+    Q_PRIMITIVE_TYPE = 0x1,
+    Q_STATIC_TYPE = 0,
+    Q_MOVABLE_TYPE = 0x2,
+    Q_DUMMY_TYPE = 0x4
+};
+
+#define Q_DECLARE_TYPEINFO(TYPE, FLAGS) \
+template <> \
+class QTypeInfo<TYPE> \
+{ \
+public: \
+    enum { \
+        isComplex = (((FLAGS) & Q_PRIMITIVE_TYPE) == 0), \
+        isStatic = (((FLAGS) & (Q_MOVABLE_TYPE | Q_PRIMITIVE_TYPE)) == 0), \
+        isLarge = (sizeof(TYPE)>sizeof(void*)), \
+        isPointer = false, \
+        isDummy = (((FLAGS) & Q_DUMMY_TYPE) != 0) \
+    }; \
+    static inline const char *name() { return #TYPE; } \
+}
+
+/*
+   Specialize a shared type with:
+
+     Q_DECLARE_SHARED(type);
+
+   where 'type' is the name of the type to specialize.  NOTE: shared
+   types must declare a 'bool isDetached(void) const;' member for this
+   to work.
+*/
+#if defined Q_CC_MSVC && _MSC_VER < 1300
+template <typename T>
+inline void qSwap_helper(T &value1, T &value2, T*)
+{
+    T t = value1;
+    value1 = value2;
+    value2 = t;
+}
+#define Q_DECLARE_SHARED(TYPE)                                          \
+template <> inline bool qIsDetached<TYPE>(TYPE &t) { return t.isDetached(); } \
+template <> inline void qSwap_helper<TYPE>(TYPE &value1, TYPE &value2, TYPE*) \
+{ \
+    const TYPE::DataPtr t = value1.data_ptr(); \
+    value1.data_ptr() = value2.data_ptr(); \
+    value2.data_ptr() = t; \
+}
+#else
+#define Q_DECLARE_SHARED(TYPE)                                          \
+template <> inline bool qIsDetached<TYPE>(TYPE &t) { return t.isDetached(); } \
+template <typename T> inline void qSwap(T &, T &); \
+template <> inline void qSwap<TYPE>(TYPE &value1, TYPE &value2) \
+{ \
+    const TYPE::DataPtr t = value1.data_ptr(); \
+    value1.data_ptr() = value2.data_ptr(); \
+    value2.data_ptr() = t; \
+}
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/qt/qtd/Atomic.d	Mon Nov 09 20:49:26 2009 +0000
@@ -0,0 +1,1818 @@
+/**
+ * The atomic module is intended to provide some basic support for lock-free
+ * concurrent programming.  Some common operations are defined, each of which
+ * may be performed using the specified memory barrier or a less granular
+ * barrier if the hardware does not support the version requested.  This
+ * model is based on a design by Alexander Terekhov as outlined in
+ * <a href=http://groups.google.com/groups?threadm=3E4820EE.6F408B25%40web.de>
+ * this thread</a>.  Another useful reference for memory ordering on modern
+ * architectures is <a href=http://www.linuxjournal.com/article/8211>this
+ * article by Paul McKenney</a>.
+ *
+ * Copyright: Copyright (C) 2005-2006 Sean Kelly.  All rights reserved.
+ * License:   BSD style: $(LICENSE)
+ * Authors:   Sean Kelly
+ */
+module qt.qtd.Atomic;
+//deprecated:
+////////////////////////////////////////////////////////////////////////////////
+// Synchronization Options
+////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * Memory synchronization flag.  If the supplied option is not available on the
+ * current platform then a stronger method will be used instead.
+ */
+enum msync
+{
+    raw,    /// not sequenced
+    hlb,    /// hoist-load barrier
+    hsb,    /// hoist-store barrier
+    slb,    /// sink-load barrier
+    ssb,    /// sink-store barrier
+    acq,    /// hoist-load + hoist-store barrier
+    rel,    /// sink-load + sink-store barrier
+    seq,    /// fully sequenced (acq + rel)
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Internal Type Checking
+////////////////////////////////////////////////////////////////////////////////
+
+
+private
+{
+    version( D_Ddoc ) {} else
+    {
+        import std.traits;
+
+
+        template isValidAtomicType( T )
+        {
+            const bool isValidAtomicType = T.sizeof == byte.sizeof  ||
+                                           T.sizeof == short.sizeof ||
+                                           T.sizeof == int.sizeof   ||
+                                           T.sizeof == long.sizeof;
+        }
+
+
+        template isValidNumericType( T )
+        {
+            const bool isValidNumericType = isIntegral!( T ) ||
+                                            isPointer!( T );
+        }
+
+
+        template isHoistOp( msync ms )
+        {
+            const bool isHoistOp = ms == msync.hlb ||
+                                   ms == msync.hsb ||
+                                   ms == msync.acq ||
+                                   ms == msync.seq;
+        }
+
+
+        template isSinkOp( msync ms )
+        {
+            const bool isSinkOp = ms == msync.slb ||
+                                  ms == msync.ssb ||
+                                  ms == msync.rel ||
+                                  ms == msync.seq;
+        }
+    }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// DDoc Documentation for Atomic Functions
+////////////////////////////////////////////////////////////////////////////////
+
+
+version( D_Ddoc )
+{
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Load
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * Supported msync values:
+     *  msync.raw
+     *  msync.hlb
+     *  msync.acq
+     *  msync.seq
+     */
+    template atomicLoad( msync ms, T )
+    {
+        /**
+         * Refreshes the contents of 'val' from main memory.  This operation is
+         * both lock-free and atomic.
+         *
+         * Params:
+         *  val = The value to load.  This value must be properly aligned.
+         *
+         * Returns:
+         *  The loaded value.
+         */
+        T atomicLoad( inout T val )
+        {
+            return val;
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * Supported msync values:
+     *  msync.raw
+     *  msync.ssb
+     *  msync.acq
+     *  msync.rel
+     *  msync.seq
+     */
+    template atomicStore( msync ms, T )
+    {
+        /**
+         * Stores 'newval' to the memory referenced by 'val'.  This operation
+         * is both lock-free and atomic.
+         *
+         * Params:
+         *  val     = The destination variable.
+         *  newval  = The value to store.
+         */
+        void atomicStore( inout T val, T newval )
+        {
+
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic StoreIf
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * Supported msync values:
+     *  msync.raw
+     *  msync.ssb
+     *  msync.acq
+     *  msync.rel
+     *  msync.seq
+     */
+    template atomicStoreIf( msync ms, T )
+    {
+        /**
+         * Stores 'newval' to the memory referenced by 'val' if val is equal to
+         * 'equalTo'.  This operation is both lock-free and atomic.
+         *
+         * Params:
+         *  val     = The destination variable.
+         *  newval  = The value to store.
+         *  equalTo = The comparison value.
+         *
+         * Returns:
+         *  true if the store occurred, false if not.
+         */
+        bool atomicStoreIf( inout T val, T newval, T equalTo )
+        {
+            return false;
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Increment
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * Supported msync values:
+     *  msync.raw
+     *  msync.ssb
+     *  msync.acq
+     *  msync.rel
+     *  msync.seq
+     */
+    template atomicIncrement( msync ms, T )
+    {
+        /**
+         * This operation is only legal for built-in value and pointer types,
+         * and is equivalent to an atomic "val = val + 1" operation.  This
+         * function exists to facilitate use of the optimized increment
+         * instructions provided by some architecures.  If no such instruction
+         * exists on the target platform then the behavior will perform the
+         * operation using more traditional means.  This operation is both
+         * lock-free and atomic.
+         *
+         * Params:
+         *  val = The value to increment.
+         *
+         * Returns:
+         *  The result of an atomicLoad of val immediately following the
+         *  increment operation.  This value is not required to be equal to the
+         *  newly stored value.  Thus, competing writes are allowed to occur
+         *  between the increment and successive load operation.
+         */
+        T atomicIncrement( inout T val )
+        {
+            return val;
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Decrement
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * Supported msync values:
+     *  msync.raw
+     *  msync.ssb
+     *  msync.acq
+     *  msync.rel
+     *  msync.seq
+     */
+    template atomicDecrement( msync ms, T )
+    {
+        /**
+         * This operation is only legal for built-in value and pointer types,
+         * and is equivalent to an atomic "val = val - 1" operation.  This
+         * function exists to facilitate use of the optimized decrement
+         * instructions provided by some architecures.  If no such instruction
+         * exists on the target platform then the behavior will perform the
+         * operation using more traditional means.  This operation is both
+         * lock-free and atomic.
+         *
+         * Params:
+         *  val = The value to decrement.
+         *
+         * Returns:
+         *  The result of an atomicLoad of val immediately following the
+         *  increment operation.  This value is not required to be equal to the
+         *  newly stored value.  Thus, competing writes are allowed to occur
+         *  between the increment and successive load operation.
+         */
+        T atomicDecrement( inout T val )
+        {
+            return val;
+        }
+    }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// LDC Atomics Implementation
+////////////////////////////////////////////////////////////////////////////////
+
+
+else version( LDC )
+{
+    import ldc.intrinsics;
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Load
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicLoad( msync ms = msync.seq, T )
+    {
+        T atomicLoad(ref T val)
+        {
+            llvm_memory_barrier(
+                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
+                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
+                ms == msync.slb || ms == msync.rel || ms == msync.seq,
+                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
+                false);
+            static if (isPointerType!(T))
+            {
+                return cast(T)llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 0);
+            }
+            else static if (is(T == bool))
+            {
+                return llvm_atomic_load_add!(ubyte)(cast(ubyte*)&val, cast(ubyte)0) ? 1 : 0;
+            }
+            else
+            {
+                return llvm_atomic_load_add!(T)(&val, cast(T)0);
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStore( msync ms = msync.seq, T )
+    {
+        void atomicStore( ref T val, T newval )
+        {
+            llvm_memory_barrier(
+                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
+                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
+                ms == msync.slb || ms == msync.rel || ms == msync.seq,
+                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
+                false);
+            static if (isPointerType!(T))
+            {
+                llvm_atomic_swap!(size_t)(cast(size_t*)&val, cast(size_t)newval);
+            }
+            else static if (is(T == bool))
+            {
+                llvm_atomic_swap!(ubyte)(cast(ubyte*)&val, newval?1:0);
+            }
+            else
+            {
+                llvm_atomic_swap!(T)(&val, newval);
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store If
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStoreIf( msync ms = msync.seq, T )
+    {
+        bool atomicStoreIf( ref T val, T newval, T equalTo )
+        {
+            llvm_memory_barrier(
+                ms == msync.hlb || ms == msync.acq || ms == msync.seq,
+                ms == msync.hsb || ms == msync.acq || ms == msync.seq,
+                ms == msync.slb || ms == msync.rel || ms == msync.seq,
+                ms == msync.ssb || ms == msync.rel || ms == msync.seq,
+                false);
+            T oldval = void;
+            static if (isPointerType!(T))
+            {
+                oldval = cast(T)llvm_atomic_cmp_swap!(size_t)(cast(size_t*)&val, cast(size_t)equalTo, cast(size_t)newval);
+            }
+            else static if (is(T == bool))
+            {
+                oldval = llvm_atomic_cmp_swap!(ubyte)(cast(ubyte*)&val, equalTo?1:0, newval?1:0)?0:1;
+            }
+            else
+            {
+                oldval = llvm_atomic_cmp_swap!(T)(&val, equalTo, newval);
+            }
+            return oldval == equalTo;
+        }
+    }
+    
+    
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Increment
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicIncrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicIncrement( ref T val )
+        {
+            static if (isPointerType!(T))
+            {
+                llvm_atomic_load_add!(size_t)(cast(size_t*)&val, 1);
+            }
+            else
+            {
+                llvm_atomic_load_add!(T)(&val, cast(T)1);
+            }
+            return val;
+        }
+    }
+    
+    
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Decrement
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicDecrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicDecrement( ref T val )
+        {
+            static if (isPointerType!(T))
+            {
+                llvm_atomic_load_sub!(size_t)(cast(size_t*)&val, 1);
+            }
+            else
+            {
+                llvm_atomic_load_sub!(T)(&val, cast(T)1);
+            }
+            return val;
+        }
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// x86 Atomic Function Implementation
+////////////////////////////////////////////////////////////////////////////////
+
+
+else version( D_InlineAsm_X86 )
+{
+    version( X86 )
+    {
+        version( BuildInfo )
+        {
+            pragma( msg, "tango.core.Atomic: using IA-32 inline asm" );
+        }
+
+        version(darwin){
+            extern(C) bool OSAtomicCompareAndSwap64(long oldValue, long newValue, long *theValue);
+            extern(C) bool OSAtomicCompareAndSwap64Barrier(long oldValue, long newValue, long *theValue);
+        }
+        version = Has64BitCAS;
+        version = Has32BitOps;
+    }
+    version( X86_64 )
+    {
+        version( BuildInfo )
+        {
+            pragma( msg, "tango.core.Atomic: using AMD64 inline asm" );
+        }
+
+        version = Has64BitOps;
+    }
+
+    private
+    {
+        ////////////////////////////////////////////////////////////////////////
+        // x86 Value Requirements
+        ////////////////////////////////////////////////////////////////////////
+
+
+        // NOTE: Strictly speaking, the x86 supports atomic operations on
+        //       unaligned values.  However, this is far slower than the
+        //       common case, so such behavior should be prohibited.
+        template atomicValueIsProperlyAligned( T )
+        {
+            bool atomicValueIsProperlyAligned( size_t addr )
+            {
+                return addr % T.sizeof == 0;
+            }
+        }
+
+
+        ////////////////////////////////////////////////////////////////////////
+        // x86 Synchronization Requirements
+        ////////////////////////////////////////////////////////////////////////
+
+
+        // NOTE: While x86 loads have acquire semantics for stores, it appears
+        //       that independent loads may be reordered by some processors
+        //       (notably the AMD64).  This implies that the hoist-load barrier
+        //       op requires an ordering instruction, which also extends this
+        //       requirement to acquire ops (though hoist-store should not need
+        //       one if support is added for this later).  However, since no
+        //       modern architectures will reorder dependent loads to occur
+        //       before the load they depend on (except the Alpha), raw loads
+        //       are actually a possible means of ordering specific sequences
+        //       of loads in some instances.  The original atomic<>
+        //       implementation provides a 'ddhlb' ordering specifier for
+        //       data-dependent loads to handle this situation, but as there
+        //       are no plans to support the Alpha there is no reason to add
+        //       that option here.
+        //
+        //       For reference, the old behavior (acquire semantics for loads)
+        //       required a memory barrier if: ms == msync.seq || isSinkOp!(ms)
+        template needsLoadBarrier( msync ms )
+        {
+            const bool needsLoadBarrier = ms != msync.raw;
+        }
+
+
+        // NOTE: x86 stores implicitly have release semantics so a membar is only
+        //       necessary on acquires.
+        template needsStoreBarrier( msync ms )
+        {
+            const bool needsStoreBarrier = ms == msync.seq || isHoistOp!(ms);
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Load
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicLoad( msync ms = msync.seq, T )
+    {
+        T atomicLoad( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof == byte.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 1 Byte Load
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsLoadBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov DL, 42;
+                        mov AL, 42;
+                        mov ECX, val;
+                        lock;
+                        cmpxchg [ECX], DL;
+                    }
+                }
+                else
+                {
+                    synchronized
+                    {
+                        return val;
+                    }
+                }
+            }
+            else static if( T.sizeof == short.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 2 Byte Load
+                ////////////////////////////////////////////////////////////////
+
+                static if( needsLoadBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov DX, 42;
+                        mov AX, 42;
+                        mov ECX, val;
+                        lock;
+                        cmpxchg [ECX], DX;
+                    }
+                }
+                else
+                {
+                    synchronized
+                    {
+                        return val;
+                    }
+                }
+            }
+            else static if( T.sizeof == int.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 4 Byte Load
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsLoadBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov EDX, 42;
+                        mov EAX, 42;
+                        mov ECX, val;
+                        lock;
+                        cmpxchg [ECX], EDX;
+                    }
+                }
+                else
+                {
+                    synchronized
+                    {
+                        return val;
+                    }
+                }
+            }
+            else static if( T.sizeof == long.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 8 Byte Load
+                ////////////////////////////////////////////////////////////////
+
+
+                version( Has64BitOps )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Load on 64-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    static if( needsLoadBarrier!(ms) )
+                    {
+                        asm
+                        {
+                            mov RAX, val;
+                            lock;
+                            mov RAX, [RAX];
+                        }
+                    }
+                    else
+                    {
+                        synchronized
+                        {
+                            return val;
+                        }
+                    }
+                }
+                else
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Load on 32-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    pragma( msg, "This operation is only available on 64-bit platforms." );
+                    static assert( false );
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // Not a 1, 2, 4, or 8 Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStore( msync ms = msync.seq, T )
+    {
+        void atomicStore( inout T val, T newval )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof == byte.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 1 Byte Store
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsStoreBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov DL, newval;
+                        lock;
+                        xchg [EAX], DL;
+                    }
+                }
+                else
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov DL, newval;
+                        mov [EAX], DL;
+                    }
+                }
+            }
+            else static if( T.sizeof == short.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 2 Byte Store
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsStoreBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov DX, newval;
+                        lock;
+                        xchg [EAX], DX;
+                    }
+                }
+                else
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov DX, newval;
+                        mov [EAX], DX;
+                    }
+                }
+            }
+            else static if( T.sizeof == int.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 4 Byte Store
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsStoreBarrier!(ms) )
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov EDX, newval;
+                        lock;
+                        xchg [EAX], EDX;
+                    }
+                }
+                else
+                {
+                    asm
+                    {
+                        mov EAX, val;
+                        mov EDX, newval;
+                        mov [EAX], EDX;
+                    }
+                }
+            }
+            else static if( T.sizeof == long.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 8 Byte Store
+                ////////////////////////////////////////////////////////////////
+
+
+                version( Has64BitOps )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Store on 64-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    static if( needsStoreBarrier!(ms) )
+                    {
+                        asm
+                        {
+                            mov RAX, val;
+                            mov RDX, newval;
+                            lock;
+                            xchg [RAX], RDX;
+                        }
+                    }
+                    else
+                    {
+                        asm
+                        {
+                            mov RAX, val;
+                            mov RDX, newval;
+                            mov [RAX], RDX;
+                        }
+                    }
+                }
+                else
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Store on 32-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    pragma( msg, "This operation is only available on 64-bit platforms." );
+                    static assert( false );
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // Not a 1, 2, 4, or 8 Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store If
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStoreIf( msync ms = msync.seq, T )
+    {
+        bool atomicStoreIf( inout T val, T newval, T equalTo )
+        in
+        {
+            // NOTE: 32 bit x86 systems support 8 byte CAS, which only requires
+            //       4 byte alignment, so use size_t as the align type here.
+            static if( T.sizeof > size_t.sizeof )
+                assert( atomicValueIsProperlyAligned!(size_t)( cast(size_t) &val ) );
+            else
+                assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof == byte.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 1 Byte StoreIf
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov DL, newval;
+                    mov AL, equalTo;
+                    mov ECX, val;
+                    lock; // lock always needed to make this op atomic
+                    cmpxchg [ECX], DL;
+                    setz AL;
+                }
+            }
+            else static if( T.sizeof == short.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 2 Byte StoreIf
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov DX, newval;
+                    mov AX, equalTo;
+                    mov ECX, val;
+                    lock; // lock always needed to make this op atomic
+                    cmpxchg [ECX], DX;
+                    setz AL;
+                }
+            }
+            else static if( T.sizeof == int.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 4 Byte StoreIf
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EDX, newval;
+                    mov EAX, equalTo;
+                    mov ECX, val;
+                    lock; // lock always needed to make this op atomic
+                    cmpxchg [ECX], EDX;
+                    setz AL;
+                }
+            }
+            else static if( T.sizeof == long.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 8 Byte StoreIf
+                ////////////////////////////////////////////////////////////////
+
+
+                version( Has64BitOps )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte StoreIf on 64-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    asm
+                    {
+                        mov RDX, newval;
+                        mov RAX, equalTo;
+                        mov RCX, val;
+                        lock; // lock always needed to make this op atomic
+                        cmpxchg [RCX], RDX;
+                        setz AL;
+                    }
+                }
+                else version( Has64BitCAS )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte StoreIf on 32-Bit Processor
+                    ////////////////////////////////////////////////////////////
+                    version(darwin){
+                        static if(ms==msync.raw){
+                            return OSAtomicCompareAndSwap64(cast(long)equalTo, cast(long)newval,  cast(long*)&val);
+                        } else {
+                            return OSAtomicCompareAndSwap64Barrier(cast(long)equalTo, cast(long)newval,  cast(long*)&val);
+                        }
+                    } else {
+                        asm
+                        {
+                            push EDI;
+                            push EBX;
+                            lea EDI, newval;
+                            mov EBX, [EDI];
+                            mov ECX, 4[EDI];
+                            lea EDI, equalTo;
+                            mov EAX, [EDI];
+                            mov EDX, 4[EDI];
+                            mov EDI, val;
+                            lock; // lock always needed to make this op atomic
+                            cmpxch8b [EDI];
+                            setz AL;
+                            pop EBX;
+                            pop EDI;
+                        }
+                    }
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // Not a 1, 2, 4, or 8 Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Increment
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicIncrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicIncrement( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof == byte.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 1 Byte Increment
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    inc [EAX];
+                    mov AL, [EAX];
+                }
+            }
+            else static if( T.sizeof == short.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 2 Byte Increment
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    inc short ptr [EAX];
+                    mov AX, [EAX];
+                }
+            }
+            else static if( T.sizeof == int.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 4 Byte Increment
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    inc int ptr [EAX];
+                    mov EAX, [EAX];
+                }
+            }
+            else static if( T.sizeof == long.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 8 Byte Increment
+                ////////////////////////////////////////////////////////////////
+
+
+                version( Has64BitOps )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Increment on 64-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    asm
+                    {
+                        mov RAX, val;
+                        lock; // lock always needed to make this op atomic
+                        inc qword ptr [RAX];
+                        mov RAX, [RAX];
+                    }
+                }
+                else
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Increment on 32-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    pragma( msg, "This operation is only available on 64-bit platforms." );
+                    static assert( false );
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // Not a 1, 2, 4, or 8 Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Decrement
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicDecrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicDecrement( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof == byte.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 1 Byte Decrement
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    dec [EAX];
+                    mov AL, [EAX];
+                }
+            }
+            else static if( T.sizeof == short.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 2 Byte Decrement
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    dec short ptr [EAX];
+                    mov AX, [EAX];
+                }
+            }
+            else static if( T.sizeof == int.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 4 Byte Decrement
+                ////////////////////////////////////////////////////////////////
+
+
+                asm
+                {
+                    mov EAX, val;
+                    lock; // lock always needed to make this op atomic
+                    dec int ptr [EAX];
+                    mov EAX, [EAX];
+                }
+            }
+            else static if( T.sizeof == long.sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // 8 Byte Decrement
+                ////////////////////////////////////////////////////////////////
+
+
+                version( Has64BitOps )
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Decrement on 64-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    asm
+                    {
+                        mov RAX, val;
+                        lock; // lock always needed to make this op atomic
+                        dec qword ptr [RAX];
+                        mov RAX, [RAX];
+                    }
+                }
+                else
+                {
+                    ////////////////////////////////////////////////////////////
+                    // 8 Byte Decrement on 32-Bit Processor
+                    ////////////////////////////////////////////////////////////
+
+
+                    pragma( msg, "This operation is only available on 64-bit platforms." );
+                    static assert( false );
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // Not a 1, 2, 4, or 8 Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+}
+else
+{
+    version( BuildInfo )
+    {
+        pragma( msg, "tango.core.Atomic: using synchronized ops" );
+    }
+
+    private
+    {
+        ////////////////////////////////////////////////////////////////////////
+        // Default Value Requirements
+        ////////////////////////////////////////////////////////////////////////
+
+
+        template atomicValueIsProperlyAligned( T )
+        {
+            bool atomicValueIsProperlyAligned( size_t addr )
+            {
+                return addr % T.sizeof == 0;
+            }
+        }
+
+
+        ////////////////////////////////////////////////////////////////////////
+        // Default Synchronization Requirements
+        ////////////////////////////////////////////////////////////////////////
+
+
+        template needsLoadBarrier( msync ms )
+        {
+            const bool needsLoadBarrier = ms != msync.raw;
+        }
+
+
+        template needsStoreBarrier( msync ms )
+        {
+            const bool needsStoreBarrier = ms != msync.raw;
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Load
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicLoad( msync ms = msync.seq, T )
+    {
+        T atomicLoad( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof <= (void*).sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // <= (void*).sizeof Byte Load
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsLoadBarrier!(ms) )
+                {
+                    synchronized
+                    {
+                        return val;
+                    }
+                }
+                else
+                {
+                    synchronized
+                    {
+                        return val;
+                    }
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // > (void*).sizeof Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStore( msync ms = msync.seq, T )
+    {
+        void atomicStore( inout T val, T newval )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof <= (void*).sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // <= (void*).sizeof Byte Store
+                ////////////////////////////////////////////////////////////////
+
+
+                static if( needsStoreBarrier!(ms) )
+                {
+                    synchronized
+                    {
+                        val = newval;
+                    }
+                }
+                else
+                {
+                    synchronized
+                    {
+                        val = newval;
+                    }
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // > (void*).sizeof Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store If
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicStoreIf( msync ms = msync.seq, T )
+    {
+        bool atomicStoreIf( inout T val, T newval, T equalTo )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof <= (void*).sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // <= (void*).sizeof Byte StoreIf
+                ////////////////////////////////////////////////////////////////
+
+
+                synchronized
+                {
+                    if( val == equalTo )
+                    {
+                        val = newval;
+                        return true;
+                    }
+                    return false;
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // > (void*).sizeof Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    /////////////////////////////////////////////////////////////////////////////
+    // Atomic Increment
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicIncrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicIncrement( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof <= (void*).sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // <= (void*).sizeof Byte Increment
+                ////////////////////////////////////////////////////////////////
+
+
+                synchronized
+                {
+                    return ++val;
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // > (void*).sizeof Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Decrement
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template atomicDecrement( msync ms = msync.seq, T )
+    {
+        //
+        // NOTE: This operation is only valid for integer or pointer types
+        //
+        static assert( isValidNumericType!(T) );
+
+
+        T atomicDecrement( inout T val )
+        in
+        {
+            assert( atomicValueIsProperlyAligned!(T)( cast(size_t) &val ) );
+        }
+        body
+        {
+            static if( T.sizeof <= (void*).sizeof )
+            {
+                ////////////////////////////////////////////////////////////////
+                // <= (void*).sizeof Byte Decrement
+                ////////////////////////////////////////////////////////////////
+
+
+                synchronized
+                {
+                    return --val;
+                }
+            }
+            else
+            {
+                ////////////////////////////////////////////////////////////////
+                // > (void*).sizeof Byte Type
+                ////////////////////////////////////////////////////////////////
+
+
+                pragma( msg, "Invalid template type specified." );
+                static assert( false );
+            }
+        }
+    }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Atomic
+////////////////////////////////////////////////////////////////////////////////
+
+
+/**
+ * This struct represents a value which will be subject to competing access.
+ * All accesses to this value will be synchronized with main memory, and
+ * various memory barriers may be employed for instruction ordering.  Any
+ * primitive type of size equal to or smaller than the memory bus size is
+ * allowed, so 32-bit machines may use values with size <= int.sizeof and
+ * 64-bit machines may use values with size <= long.sizeof.  The one exception
+ * to this rule is that architectures that support DCAS will allow double-wide
+ * storeIf operations.  The 32-bit x86 architecture, for example, supports
+ * 64-bit storeIf operations.
+ */
+struct Atomic( T )
+{
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Load
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template load( msync ms = msync.seq )
+    {
+        static assert( ms == msync.raw || ms == msync.hlb ||
+                       ms == msync.acq || ms == msync.seq,
+                       "ms must be one of: msync.raw, msync.hlb, msync.acq, msync.seq" );
+
+        /**
+         * Refreshes the contents of this value from main memory.  This
+         * operation is both lock-free and atomic.
+         *
+         * Returns:
+         *  The loaded value.
+         */
+        T load()
+        {
+            return atomicLoad!(ms,T)( m_val );
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic Store
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template store( msync ms = msync.seq )
+    {
+        static assert( ms == msync.raw || ms == msync.ssb ||
+                       ms == msync.acq || ms == msync.rel ||
+                       ms == msync.seq,
+                       "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
+
+        /**
+         * Stores 'newval' to the memory referenced by this value.  This
+         * operation is both lock-free and atomic.
+         *
+         * Params:
+         *  newval  = The value to store.
+         */
+        void store( T newval )
+        {
+            atomicStore!(ms,T)( m_val, newval );
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Atomic StoreIf
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    template storeIf( msync ms = msync.seq )
+    {
+        static assert( ms == msync.raw || ms == msync.ssb ||
+                       ms == msync.acq || ms == msync.rel ||
+                       ms == msync.seq,
+                       "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
+
+        /**
+         * Stores 'newval' to the memory referenced by this value if val is
+         * equal to 'equalTo'.  This operation is both lock-free and atomic.
+         *
+         * Params:
+         *  newval  = The value to store.
+         *  equalTo = The comparison value.
+         *
+         * Returns:
+         *  true if the store occurred, false if not.
+         */
+        bool storeIf( T newval, T equalTo )
+        {
+            return atomicStoreIf!(ms,T)( m_val, newval, equalTo );
+        }
+    }
+
+
+    ////////////////////////////////////////////////////////////////////////////
+    // Numeric Functions
+    ////////////////////////////////////////////////////////////////////////////
+
+
+    /**
+     * The following additional functions are available for integer types.
+     */
+    static if( isValidNumericType!(T) )
+    {
+        ////////////////////////////////////////////////////////////////////////
+        // Atomic Increment
+        ////////////////////////////////////////////////////////////////////////
+
+
+        template increment( msync ms = msync.seq )
+        {
+            static assert( ms == msync.raw || ms == msync.ssb ||
+                           ms == msync.acq || ms == msync.rel ||
+                           ms == msync.seq,
+                           "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
+
+            /**
+             * This operation is only legal for built-in value and pointer
+             * types, and is equivalent to an atomic "val = val + 1" operation.
+             * This function exists to facilitate use of the optimized
+             * increment instructions provided by some architecures.  If no
+             * such instruction exists on the target platform then the
+             * behavior will perform the operation using more traditional
+             * means.  This operation is both lock-free and atomic.
+             *
+             * Returns:
+             *  The result of an atomicLoad of val immediately following the
+             *  increment operation.  This value is not required to be equal to
+             *  the newly stored value.  Thus, competing writes are allowed to
+             *  occur between the increment and successive load operation.
+             */
+            T increment()
+            {
+                return atomicIncrement!(ms,T)( m_val );
+            }
+        }
+
+
+        ////////////////////////////////////////////////////////////////////////
+        // Atomic Decrement
+        ////////////////////////////////////////////////////////////////////////
+
+
+        template decrement( msync ms = msync.seq )
+        {
+            static assert( ms == msync.raw || ms == msync.ssb ||
+                           ms == msync.acq || ms == msync.rel ||
+                           ms == msync.seq,
+                           "ms must be one of: msync.raw, msync.ssb, msync.acq, msync.rel, msync.seq" );
+
+            /**
+             * This operation is only legal for built-in value and pointer
+             * types, and is equivalent to an atomic "val = val - 1" operation.
+             * This function exists to facilitate use of the optimized
+             * decrement instructions provided by some architecures.  If no
+             * such instruction exists on the target platform then the behavior
+             * will perform the operation using more traditional means.  This
+             * operation is both lock-free and atomic.
+             *
+             * Returns:
+             *  The result of an atomicLoad of val immediately following the
+             *  increment operation.  This value is not required to be equal to
+             *  the newly stored value.  Thus, competing writes are allowed to
+             *  occur between the increment and successive load operation.
+             */
+            T decrement()
+            {
+                return atomicDecrement!(ms,T)( m_val );
+            }
+        }
+    }
+
+private:
+    T   m_val;
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Support Code for Unit Tests
+////////////////////////////////////////////////////////////////////////////////
+
+
+private
+{
+    version( D_Ddoc ) {} else
+    {
+        template testLoad( msync ms, T )
+        {
+            void testLoad( T val = T.init + 1 )
+            {
+                T          base;
+                Atomic!(T) atom;
+
+                assert( atom.load!(ms)() == base );
+                base        = val;
+                atom.m_val  = val;
+                assert( atom.load!(ms)() == base );
+            }
+        }
+
+
+        template testStore( msync ms, T )
+        {
+            void testStore( T val = T.init + 1 )
+            {
+                T          base;
+                Atomic!(T) atom;
+
+                assert( atom.m_val == base );
+                base = val;
+                atom.store!(ms)( base );
+                assert( atom.m_val == base );
+            }
+        }
+
+
+        template testStoreIf( msync ms, T )
+        {
+            void testStoreIf( T val = T.init + 1 )
+            {
+                T          base;
+                Atomic!(T) atom;
+
+                assert( atom.m_val == base );
+                base = val;
+                atom.storeIf!(ms)( base, val );
+                assert( atom.m_val != base );
+                atom.storeIf!(ms)( base, T.init );
+                assert( atom.m_val == base );
+            }
+        }
+
+
+        template testIncrement( msync ms, T )
+        {
+            void testIncrement( T val = T.init + 1 )
+            {
+                T          base = val;
+                T          incr = val;
+                Atomic!(T) atom;
+
+                atom.m_val = val;
+                assert( atom.m_val == base && incr == base );
+                base = cast(T)( base + 1 );
+                incr = atom.increment!(ms)();
+                assert( atom.m_val == base && incr == base );
+            }
+        }
+
+
+        template testDecrement( msync ms, T )
+        {
+            void testDecrement( T val = T.init + 1 )
+            {
+                T          base = val;
+                T          decr = val;
+                Atomic!(T) atom;
+
+                atom.m_val = val;
+                assert( atom.m_val == base && decr == base );
+                base = cast(T)( base - 1 );
+                decr = atom.decrement!(ms)();
+                assert( atom.m_val == base && decr == base );
+            }
+        }
+
+
+        template testType( T )
+        {
+            void testType( T val = T.init  +1 )
+            {
+                testLoad!(msync.raw, T)( val );
+                testLoad!(msync.hlb, T)( val );
+                testLoad!(msync.acq, T)( val );
+                testLoad!(msync.seq, T)( val );
+
+                testStore!(msync.raw, T)( val );
+                testStore!(msync.ssb, T)( val );
+                testStore!(msync.acq, T)( val );
+                testStore!(msync.rel, T)( val );
+                testStore!(msync.seq, T)( val );
+
+                testStoreIf!(msync.raw, T)( val );
+                testStoreIf!(msync.ssb, T)( val );
+                testStoreIf!(msync.acq, T)( val );
+                testStoreIf!(msync.rel, T)( val );
+                testStoreIf!(msync.seq, T)( val );
+
+                static if( isValidNumericType!(T) )
+                {
+                    testIncrement!(msync.raw, T)( val );
+                    testIncrement!(msync.ssb, T)( val );
+                    testIncrement!(msync.acq, T)( val );
+                    testIncrement!(msync.rel, T)( val );
+                    testIncrement!(msync.seq, T)( val );
+
+                    testDecrement!(msync.raw, T)( val );
+                    testDecrement!(msync.ssb, T)( val );
+                    testDecrement!(msync.acq, T)( val );
+                    testDecrement!(msync.rel, T)( val );
+                    testDecrement!(msync.seq, T)( val );
+                }
+            }
+        }
+    }
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Unit Tests
+////////////////////////////////////////////////////////////////////////////////
+
+
+debug( UnitTest )
+{
+    unittest
+    {
+        testType!(bool)();
+
+        testType!(byte)();
+        testType!(ubyte)();
+
+        testType!(short)();
+        testType!(ushort)();
+
+        testType!(int)();
+        testType!(uint)();
+
+        int x;
+        testType!(void*)( &x );
+
+        version( Has64BitOps )
+        {
+            testType!(long)();
+            testType!(ulong)();
+        }
+        else version( Has64BitCAS )
+        {
+            testStoreIf!(msync.raw, long)();
+            testStoreIf!(msync.ssb, long)();
+            testStoreIf!(msync.acq, long)();
+            testStoreIf!(msync.rel, long)();
+            testStoreIf!(msync.seq, long)();
+
+            testStoreIf!(msync.raw, ulong)();
+            testStoreIf!(msync.ssb, ulong)();
+            testStoreIf!(msync.acq, ulong)();
+            testStoreIf!(msync.rel, ulong)();
+            testStoreIf!(msync.seq, ulong)();
+        }
+    }
+}