/* ----------------------------------------------------------------------------
*
- * (c) The GHC Team, 2005
+ * (c) The GHC Team, 2005-2008
*
- * Macros for THREADED_RTS support
+ * Macros for multi-CPU support
*
* -------------------------------------------------------------------------- */
Atomic operations
------------------------------------------------------------------------- */
+#if !IN_STG_CODE
+// We only want write_barrier() declared in .hc files. Defining the
+// other inline functions here causes type mismatch errors from gcc,
+// because the generated C code is assuming that there are no
+// prototypes in scope.
+
/*
* The atomic exchange operation: xchg(p,w) exchanges the value
* pointed to by p with the value w, returning the old value.
* Used for locking closures during updates (see lockClosure() below)
* and the MVar primops.
*/
-INLINE_HEADER StgWord xchg(StgPtr p, StgWord w);
+EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w);
/*
* Compare-and-swap. Atomically does this:
* return r;
* }
*/
-INLINE_HEADER StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
+EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
+
+#endif // !IN_STG_CODE
/*
- * Prevents write operations from moving across this call in either
- * direction.
+ * Various kinds of memory barrier.
+ * write_barrier: prevents future stores occurring before prededing stores.
+ * store_load_barrier: prevents future loads occurring before preceding stores.
+ * load_load_barrier: prevents future loads occurring before earlier stores.
+ *
+ * Reference for these: "The JSR-133 Cookbook for Compiler Writers"
+ * http://gee.cs.oswego.edu/dl/jmm/cookbook.html
+ *
+ * To check whether you got these right, try the test in
+ * testsuite/tests/ghc-regress/rts/testwsdeque.c
+ * This tests the work-stealing deque implementation, which relies on
+ * properly working store_load and load_load memory barriers.
*/
-INLINE_HEADER void write_barrier(void);
+EXTERN_INLINE void write_barrier(void);
+EXTERN_INLINE void store_load_barrier(void);
+EXTERN_INLINE void load_load_barrier(void);
/* ----------------------------------------------------------------------------
Implementations
------------------------------------------------------------------------- */
+
+#if !IN_STG_CODE
+
/*
* NB: the xchg instruction is implicitly locked, so we do not need
* a lock prefix here.
*/
-INLINE_HEADER StgWord
+EXTERN_INLINE StgWord
xchg(StgPtr p, StgWord w)
{
StgWord result;
"1: lwarx %0, 0, %2\n"
" stwcx. %1, 0, %2\n"
" bne- 1b"
- :"=r" (result)
+ :"=&r" (result)
:"r" (w), "r" (p)
);
#elif sparc_HOST_ARCH
* CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
* in the STM implementation.
*/
-INLINE_HEADER StgWord
+EXTERN_INLINE StgWord
cas(StgVolatilePtr p, StgWord o, StgWord n)
{
#if i386_HOST_ARCH || x86_64_HOST_ARCH
__asm__ __volatile__ (
- "lock/cmpxchg %3,%1"
+ "lock\ncmpxchg %3,%1"
:"=a"(o), "=m" (*(volatile unsigned int *)p)
:"0" (o), "r" (n));
return o;
#endif
}
+#endif // !IN_STG_CODE
+
/*
- * Write barrier - ensure that all preceding writes have happened
- * before all following writes.
- *
- * We need to tell both the compiler AND the CPU about the barrier.
- * This is a brute force solution; better results might be obtained by
- * using volatile type declarations to get fine-grained ordering
- * control in C, and optionally a memory barrier instruction on CPUs
- * that require it (not x86 or x86_64).
+ * We need to tell both the compiler AND the CPU about the barriers.
+ * It's no good preventing the CPU from reordering the operations if
+ * the compiler has already done so - hence the "memory" restriction
+ * on each of the barriers below.
*/
-INLINE_HEADER void
+EXTERN_INLINE void
write_barrier(void) {
#if i386_HOST_ARCH || x86_64_HOST_ARCH
__asm__ __volatile__ ("" : : : "memory");
#elif powerpc_HOST_ARCH
__asm__ __volatile__ ("lwsync" : : : "memory");
#elif sparc_HOST_ARCH
- /* Sparc in TSO mode does not require write/write barriers. */
+ /* Sparc in TSO mode does not require store/store barriers. */
__asm__ __volatile__ ("" : : : "memory");
#elif !defined(WITHSMP)
return;
#endif
}
-/* -----------------------------------------------------------------------------
- * Locking/unlocking closures
- *
- * This is used primarily in the implementation of MVars.
- * -------------------------------------------------------------------------- */
-
-#define SPIN_COUNT 4000
-
-INLINE_HEADER StgInfoTable *
-lockClosure(StgClosure *p)
-{
- StgWord info;
- do {
- nat i = 0;
- do {
- info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
- if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
- } while (++i < SPIN_COUNT);
- yieldThread();
- } while (1);
-}
-
-INLINE_HEADER void
-unlockClosure(StgClosure *p, StgInfoTable *info)
-{
- // This is a strictly ordered write, so we need a write_barrier():
- write_barrier();
- p->header.info = info;
-}
-
-/* -----------------------------------------------------------------------------
- * Spin locks
- *
- * These are simple spin-only locks as opposed to Mutexes which
- * probably spin for a while before blocking in the kernel. We use
- * these when we are sure that all our threads are actively running on
- * a CPU, eg. in the GC.
- *
- * TODO: measure whether we really need these, or whether Mutexes
- * would do (and be a bit safer if a CPU becomes loaded).
- * -------------------------------------------------------------------------- */
-
-#if defined(DEBUG)
-typedef struct StgSync_
-{
- StgWord32 lock;
- StgWord64 spin; // DEBUG version counts how much it spins
-} StgSync;
+EXTERN_INLINE void
+store_load_barrier(void) {
+#if i386_HOST_ARCH
+ __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory");
+#elif x86_64_HOST_ARCH
+ __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory");
+#elif powerpc_HOST_ARCH
+ __asm__ __volatile__ ("sync" : : : "memory");
+#elif sparc_HOST_ARCH
+ __asm__ __volatile__ ("membar #StoreLoad" : : : "memory");
+#elif !defined(WITHSMP)
+ return;
#else
-typedef StgWord StgSync;
+#error memory barriers unimplemented on this architecture
#endif
-
-typedef lnat StgSyncCount;
-
-
-#if defined(DEBUG)
-
-// Debug versions of spin locks maintain a spin count
-
-// How to use:
-// To use the debug veriosn of the spin locks, a debug version of the program
-// can be run under a deugger with a break point on stat_exit. At exit time
-// of the program one can examine the state the spin count counts of various
-// spin locks to check for contention.
-
-// acquire spin lock
-INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
-{
- StgWord32 r = 0;
- do {
- p->spin++;
- r = cas((StgVolatilePtr)&(p->lock), 1, 0);
- } while(r == 0);
- p->spin--;
-}
-
-// release spin lock
-INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
-{
- write_barrier();
- p->lock = 1;
-}
-
-// initialise spin lock
-INLINE_HEADER void initSpinLock(StgSync * p)
-{
- write_barrier();
- p->lock = 1;
- p->spin = 0;
}
+EXTERN_INLINE void
+load_load_barrier(void) {
+#if i386_HOST_ARCH
+ __asm__ __volatile__ ("" : : : "memory");
+#elif x86_64_HOST_ARCH
+ __asm__ __volatile__ ("" : : : "memory");
+#elif powerpc_HOST_ARCH
+ __asm__ __volatile__ ("lwsync" : : : "memory");
+#elif sparc_HOST_ARCH
+ /* Sparc in TSO mode does not require load/load barriers. */
+ __asm__ __volatile__ ("" : : : "memory");
+#elif !defined(WITHSMP)
+ return;
#else
-
-// acquire spin lock
-INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
-{
- StgWord32 r = 0;
- do {
- r = cas((StgVolatilePtr)p, 1, 0);
- } while(r == 0);
-}
-
-// release spin lock
-INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
-{
- write_barrier();
- (*p) = 1;
-}
-
-// init spin lock
-INLINE_HEADER void initSpinLock(StgSync * p)
-{
- write_barrier();
- (*p) = 1;
+#error memory barriers unimplemented on this architecture
+#endif
}
-#endif /* DEBUG */
-
/* ---------------------------------------------------------------------- */
#else /* !THREADED_RTS */
-#define write_barrier() /* nothing */
+#define write_barrier() /* nothing */
+#define store_load_barrier() /* nothing */
+#define load_load_barrier() /* nothing */
INLINE_HEADER StgWord
xchg(StgPtr p, StgWord w)
return old;
}
-INLINE_HEADER StgInfoTable *
-lockClosure(StgClosure *p)
-{ return (StgInfoTable *)p->header.info; }
-
-INLINE_HEADER void
-unlockClosure(StgClosure *p STG_UNUSED, StgInfoTable *info STG_UNUSED)
-{ /* nothing */ }
-
-// Using macros here means we don't have to ensure the argument is in scope
-#define ACQUIRE_SPIN_LOCK(p) /* nothing */
-#define RELEASE_SPIN_LOCK(p) /* nothing */
-
-INLINE_HEADER void initSpinLock(void * p STG_UNUSED)
-{ /* nothing */ }
+STATIC_INLINE StgWord
+cas(StgVolatilePtr p, StgWord o, StgWord n)
+{
+ StgWord result;
+ result = *p;
+ if (result == o) {
+ *p = n;
+ }
+ return result;
+}
#endif /* !THREADED_RTS */
-// Handy specialised versions of lockClosure()/unlockClosure()
-INLINE_HEADER void lockTSO(StgTSO *tso)
-{ lockClosure((StgClosure *)tso); }
-
-INLINE_HEADER void unlockTSO(StgTSO *tso)
-{ unlockClosure((StgClosure*)tso, (StgInfoTable*)&stg_TSO_info); }
-
#endif /* SMP_H */