-/* -----------------------------------------------------------------------------
- * Locking/unlocking closures
- *
- * This is used primarily in the implementation of MVars.
- * -------------------------------------------------------------------------- */
-
-#define SPIN_COUNT 4000
-
-INLINE_HEADER StgInfoTable *
-lockClosure(StgClosure *p)
-{
- StgWord info;
- do {
- nat i = 0;
- do {
- info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
- if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
- } while (++i < SPIN_COUNT);
- yieldThread();
- } while (1);
-}
-
-INLINE_HEADER void
-unlockClosure(StgClosure *p, StgInfoTable *info)
-{
- // This is a strictly ordered write, so we need a write_barrier():
- write_barrier();
- p->header.info = info;
-}
-
-/* -----------------------------------------------------------------------------
- * Spin locks
- *
- * These are simple spin-only locks as opposed to Mutexes which
- * probably spin for a while before blocking in the kernel. We use
- * these when we are sure that all our threads are actively running on
- * a CPU, eg. in the GC.
- *
- * TODO: measure whether we really need these, or whether Mutexes
- * would do (and be a bit safer if a CPU becomes loaded).
- * -------------------------------------------------------------------------- */
-
-#if defined(DEBUG)
-typedef struct StgSync_
-{
- StgWord32 lock;
- StgWord64 spin; // DEBUG version counts how much it spins
-} StgSync;
+EXTERN_INLINE void
+store_load_barrier(void) {
+#if i386_HOST_ARCH
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory");
+#elif x86_64_HOST_ARCH
+ __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory");
+#elif powerpc_HOST_ARCH
+ __asm__ __volatile__ ("sync" : : : "memory");
+#elif sparc_HOST_ARCH
+ __asm__ __volatile__ ("membar #StoreLoad" : : : "memory");
+#elif !defined(WITHSMP)
+ return;