X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=includes%2FSMP.h;h=ac98feb7a3c2c7e55fd09e1567545ea9b3de2ee8;hb=88605454e4bf30d7fca2b0eea4643c5637afd5cc;hp=a91e5d56197783205fbbf2c98e61f870547bd344;hpb=ba206f7e1bafea81ebcde17a3fff88a55040e599;p=ghc-hetmet.git diff --git a/includes/SMP.h b/includes/SMP.h index a91e5d5..ac98feb 100644 --- a/includes/SMP.h +++ b/includes/SMP.h @@ -1,8 +1,8 @@ /* ---------------------------------------------------------------------------- * - * (c) The GHC Team, 2005 + * (c) The GHC Team, 2005-2008 * - * Macros for THREADED_RTS support + * Macros for multi-CPU support * * -------------------------------------------------------------------------- */ @@ -16,14 +16,6 @@ * Unregisterised builds are ok, but only 1 CPU supported. */ -#ifdef CMINUSMINUS - -#define unlockClosure(ptr,info) \ - prim %write_barrier() []; \ - StgHeader_info(ptr) = info; - -#else - #if defined(THREADED_RTS) #if defined(TICKY_TICKY) @@ -34,6 +26,12 @@ Atomic operations ------------------------------------------------------------------------- */ +#if !IN_STG_CODE +// We only want write_barrier() declared in .hc files. Defining the +// other inline functions here causes type mismatch errors from gcc, +// because the generated C code is assuming that there are no +// prototypes in scope. + /* * The atomic exchange operation: xchg(p,w) exchanges the value * pointed to by p with the value w, returning the old value. @@ -41,7 +39,7 @@ * Used for locking closures during updates (see lockClosure() below) * and the MVar primops. */ -INLINE_HEADER StgWord xchg(StgPtr p, StgWord w); +EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w); /* * Compare-and-swap. Atomically does this: @@ -52,22 +50,39 @@ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w); * return r; * } */ -INLINE_HEADER StgWord cas(StgVolatilePtr p, StgWord o, StgWord n); +EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n); + +#endif // !IN_STG_CODE /* - * Prevents write operations from moving across this call in either - * direction. + * Various kinds of memory barrier. + * write_barrier: prevents future stores occurring before prededing stores. + * store_load_barrier: prevents future loads occurring before preceding stores. + * load_load_barrier: prevents future loads occurring before earlier stores. + * + * Reference for these: "The JSR-133 Cookbook for Compiler Writers" + * http://gee.cs.oswego.edu/dl/jmm/cookbook.html + * + * To check whether you got these right, try the test in + * testsuite/tests/ghc-regress/rts/testwsdeque.c + * This tests the work-stealing deque implementation, which relies on + * properly working store_load and load_load memory barriers. */ -INLINE_HEADER void write_barrier(void); +EXTERN_INLINE void write_barrier(void); +EXTERN_INLINE void store_load_barrier(void); +EXTERN_INLINE void load_load_barrier(void); /* ---------------------------------------------------------------------------- Implementations ------------------------------------------------------------------------- */ + +#if !IN_STG_CODE + /* * NB: the xchg instruction is implicitly locked, so we do not need * a lock prefix here. */ -INLINE_HEADER StgWord +EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w) { StgWord result; @@ -106,7 +121,7 @@ xchg(StgPtr p, StgWord w) * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used * in the STM implementation. */ -INLINE_HEADER StgWord +EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n) { #if i386_HOST_ARCH || x86_64_HOST_ARCH @@ -149,24 +164,22 @@ cas(StgVolatilePtr p, StgWord o, StgWord n) #endif } +#endif // !IN_STG_CODE + /* - * Write barrier - ensure that all preceding writes have happened - * before all following writes. - * - * We need to tell both the compiler AND the CPU about the barrier. - * This is a brute force solution; better results might be obtained by - * using volatile type declarations to get fine-grained ordering - * control in C, and optionally a memory barrier instruction on CPUs - * that require it (not x86 or x86_64). + * We need to tell both the compiler AND the CPU about the barriers. + * It's no good preventing the CPU from reordering the operations if + * the compiler has already done so - hence the "memory" restriction + * on each of the barriers below. */ -INLINE_HEADER void +EXTERN_INLINE void write_barrier(void) { #if i386_HOST_ARCH || x86_64_HOST_ARCH __asm__ __volatile__ ("" : : : "memory"); #elif powerpc_HOST_ARCH __asm__ __volatile__ ("lwsync" : : : "memory"); #elif sparc_HOST_ARCH - /* Sparc in TSO mode does not require write/write barriers. */ + /* Sparc in TSO mode does not require store/store barriers. */ __asm__ __volatile__ ("" : : : "memory"); #elif !defined(WITHSMP) return; @@ -175,136 +188,47 @@ write_barrier(void) { #endif } -/* ----------------------------------------------------------------------------- - * Locking/unlocking closures - * - * This is used primarily in the implementation of MVars. - * -------------------------------------------------------------------------- */ - -#define SPIN_COUNT 4000 - -#ifdef KEEP_LOCKCLOSURE -// We want a callable copy of lockClosure() so that we can refer to it -// from .cmm files compiled using the native codegen. -extern StgInfoTable *lockClosure(StgClosure *p); -INLINE_ME +EXTERN_INLINE void +store_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("sync" : : : "memory"); +#elif sparc_HOST_ARCH + __asm__ __volatile__ ("membar #StoreLoad" : : : "memory"); +#elif !defined(WITHSMP) + return; #else -INLINE_HEADER +#error memory barriers unimplemented on this architecture #endif -StgInfoTable * -lockClosure(StgClosure *p) -{ - StgWord info; - do { - nat i = 0; - do { - info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info); - if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info; - } while (++i < SPIN_COUNT); - yieldThread(); - } while (1); -} - -INLINE_HEADER void -unlockClosure(StgClosure *p, const StgInfoTable *info) -{ - // This is a strictly ordered write, so we need a write_barrier(): - write_barrier(); - p->header.info = info; } -/* ----------------------------------------------------------------------------- - * Spin locks - * - * These are simple spin-only locks as opposed to Mutexes which - * probably spin for a while before blocking in the kernel. We use - * these when we are sure that all our threads are actively running on - * a CPU, eg. in the GC. - * - * TODO: measure whether we really need these, or whether Mutexes - * would do (and be a bit safer if a CPU becomes loaded). - * -------------------------------------------------------------------------- */ - -#if defined(DEBUG) -typedef struct StgSync_ -{ - StgWord32 lock; - StgWord64 spin; // DEBUG version counts how much it spins -} StgSync; +EXTERN_INLINE void +load_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("lwsync" : : : "memory"); +#elif sparc_HOST_ARCH + /* Sparc in TSO mode does not require load/load barriers. */ + __asm__ __volatile__ ("" : : : "memory"); +#elif !defined(WITHSMP) + return; #else -typedef StgWord StgSync; +#error memory barriers unimplemented on this architecture #endif - -typedef lnat StgSyncCount; - - -#if defined(DEBUG) - -// Debug versions of spin locks maintain a spin count - -// How to use: -// To use the debug veriosn of the spin locks, a debug version of the program -// can be run under a deugger with a break point on stat_exit. At exit time -// of the program one can examine the state the spin count counts of various -// spin locks to check for contention. - -// acquire spin lock -INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p) -{ - StgWord32 r = 0; - do { - p->spin++; - r = cas((StgVolatilePtr)&(p->lock), 1, 0); - } while(r == 0); - p->spin--; -} - -// release spin lock -INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p) -{ - write_barrier(); - p->lock = 1; -} - -// initialise spin lock -INLINE_HEADER void initSpinLock(StgSync * p) -{ - write_barrier(); - p->lock = 1; - p->spin = 0; -} - -#else - -// acquire spin lock -INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p) -{ - StgWord32 r = 0; - do { - r = cas((StgVolatilePtr)p, 1, 0); - } while(r == 0); -} - -// release spin lock -INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p) -{ - write_barrier(); - (*p) = 1; } -// init spin lock -INLINE_HEADER void initSpinLock(StgSync * p) -{ - write_barrier(); - (*p) = 1; -} - -#endif /* DEBUG */ - /* ---------------------------------------------------------------------- */ #else /* !THREADED_RTS */ -#define write_barrier() /* nothing */ +#define write_barrier() /* nothing */ +#define store_load_barrier() /* nothing */ +#define load_load_barrier() /* nothing */ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w) @@ -314,30 +238,17 @@ xchg(StgPtr p, StgWord w) return old; } -INLINE_HEADER StgInfoTable * -lockClosure(StgClosure *p) -{ return (StgInfoTable *)p->header.info; } - -INLINE_HEADER void -unlockClosure(StgClosure *p STG_UNUSED, const StgInfoTable *info STG_UNUSED) -{ /* nothing */ } - -// Using macros here means we don't have to ensure the argument is in scope -#define ACQUIRE_SPIN_LOCK(p) /* nothing */ -#define RELEASE_SPIN_LOCK(p) /* nothing */ - -INLINE_HEADER void initSpinLock(void * p STG_UNUSED) -{ /* nothing */ } +STATIC_INLINE StgWord +cas(StgVolatilePtr p, StgWord o, StgWord n) +{ + StgWord result; + result = *p; + if (result == o) { + *p = n; + } + return result; +} #endif /* !THREADED_RTS */ -// Handy specialised versions of lockClosure()/unlockClosure() -INLINE_HEADER void lockTSO(StgTSO *tso) -{ lockClosure((StgClosure *)tso); } - -INLINE_HEADER void unlockTSO(StgTSO *tso) -{ unlockClosure((StgClosure*)tso, (const StgInfoTable *)&stg_TSO_info); } - #endif /* SMP_H */ - -#endif /* CMINUSMINUS */