X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=includes%2FSMP.h;h=873bbbbfc2702c7ac868aeefbbe855ee75e5d69c;hb=1d6d8bc152fdcabea6d26525f269d8ade8b75dd6;hp=5974c962add936413d76506066940fe06cf46224;hpb=0065d5ab628975892cea1ec7303f968c3338cbe1;p=ghc-hetmet.git diff --git a/includes/SMP.h b/includes/SMP.h index 5974c96..873bbbb 100644 --- a/includes/SMP.h +++ b/includes/SMP.h @@ -1,8 +1,8 @@ /* ---------------------------------------------------------------------------- * - * (c) The GHC Team, 2005 + * (c) The GHC Team, 2005-2008 * - * Macros for THREADED_RTS support + * Macros for multi-CPU support * * -------------------------------------------------------------------------- */ @@ -18,18 +18,71 @@ #if defined(THREADED_RTS) -#if defined(TICKY_TICKY) +#if defined(TICKY_TICKY) #error Build options incompatible with THREADED_RTS. #endif +/* ---------------------------------------------------------------------------- + Atomic operations + ------------------------------------------------------------------------- */ + +#if !IN_STG_CODE +// We only want write_barrier() declared in .hc files. Defining the +// other inline functions here causes type mismatch errors from gcc, +// because the generated C code is assuming that there are no +// prototypes in scope. + /* - * XCHG - the atomic exchange instruction. Used for locking closures - * during updates (see lockClosure() below) and the MVar primops. + * The atomic exchange operation: xchg(p,w) exchanges the value + * pointed to by p with the value w, returning the old value. * + * Used for locking closures during updates (see lockClosure() below) + * and the MVar primops. + */ +EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w); + +/* + * Compare-and-swap. Atomically does this: + * + * cas(p,o,n) { + * r = *p; + * if (r == o) { *p = n }; + * return r; + * } + */ +EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n); + +#endif // !IN_STG_CODE + +/* + * Various kinds of memory barrier. + * write_barrier: prevents future stores occurring before prededing stores. + * store_load_barrier: prevents future loads occurring before preceding stores. + * load_load_barrier: prevents future loads occurring before earlier stores. + * + * Reference for these: "The JSR-133 Cookbook for Compiler Writers" + * http://gee.cs.oswego.edu/dl/jmm/cookbook.html + * + * To check whether you got these right, try the test in + * testsuite/tests/ghc-regress/rts/testwsdeque.c + * This tests the work-stealing deque implementation, which relies on + * properly working store_load and load_load memory barriers. + */ +EXTERN_INLINE void write_barrier(void); +EXTERN_INLINE void store_load_barrier(void); +EXTERN_INLINE void load_load_barrier(void); + +/* ---------------------------------------------------------------------------- + Implementations + ------------------------------------------------------------------------- */ + +#if !IN_STG_CODE + +/* * NB: the xchg instruction is implicitly locked, so we do not need * a lock prefix here. */ -INLINE_HEADER StgWord +EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w) { StgWord result; @@ -45,9 +98,19 @@ xchg(StgPtr p, StgWord w) "1: lwarx %0, 0, %2\n" " stwcx. %1, 0, %2\n" " bne- 1b" - :"=r" (result) + :"=&r" (result) :"r" (w), "r" (p) ); +#elif sparc_HOST_ARCH + result = w; + __asm__ __volatile__ ( + "swap %1,%0" + : "+r" (result), "+m" (*p) + : /* no input-only operands */ + ); +#elif !defined(WITHSMP) + result = *p; + *p = w; #else #error xchg() unimplemented on this architecture #endif @@ -58,12 +121,12 @@ xchg(StgPtr p, StgWord w) * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used * in the STM implementation. */ -INLINE_HEADER StgWord +EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n) { #if i386_HOST_ARCH || x86_64_HOST_ARCH __asm__ __volatile__ ( - "lock/cmpxchg %3,%1" + "lock\ncmpxchg %3,%1" :"=a"(o), "=m" (*(volatile unsigned int *)p) :"0" (o), "r" (n)); return o; @@ -76,76 +139,96 @@ cas(StgVolatilePtr p, StgWord o, StgWord n) " stwcx. %2, 0, %3\n" " bne- 1b\n" "2:" - :"=r" (result) + :"=&r" (result) :"r" (o), "r" (n), "r" (p) + :"cc", "memory" ); return result; +#elif sparc_HOST_ARCH + __asm__ __volatile__ ( + "cas [%1], %2, %0" + : "+r" (n) + : "r" (p), "r" (o) + : "memory" + ); + return n; +#elif !defined(WITHSMP) + StgWord result; + result = *p; + if (result == o) { + *p = n; + } + return result; #else #error cas() unimplemented on this architecture #endif } +#endif // !IN_STG_CODE + /* - * Write barrier - ensure that all preceding writes have happened - * before all following writes. - * - * We need to tell both the compiler AND the CPU about the barrier. - * This is a brute force solution; better results might be obtained by - * using volatile type declarations to get fine-grained ordering - * control in C, and optionally a memory barrier instruction on CPUs - * that require it (not x86 or x86_64). + * We need to tell both the compiler AND the CPU about the barriers. + * It's no good preventing the CPU from reordering the operations if + * the compiler has already done so - hence the "memory" restriction + * on each of the barriers below. */ -INLINE_HEADER void -wb(void) { +EXTERN_INLINE void +write_barrier(void) { #if i386_HOST_ARCH || x86_64_HOST_ARCH __asm__ __volatile__ ("" : : : "memory"); #elif powerpc_HOST_ARCH __asm__ __volatile__ ("lwsync" : : : "memory"); +#elif sparc_HOST_ARCH + /* Sparc in TSO mode does not require store/store barriers. */ + __asm__ __volatile__ ("" : : : "memory"); +#elif !defined(WITHSMP) + return; #else #error memory barriers unimplemented on this architecture #endif } -/* - * Locking/unlocking closures - * - * This is used primarily in the implementation of MVars. - */ -#define SPIN_COUNT 4000 - -INLINE_HEADER StgInfoTable * -lockClosure(StgClosure *p) -{ -#if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH - StgWord info; - do { - nat i = 0; - do { - info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info); - if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info; - } while (++i < SPIN_COUNT); - yieldThread(); - } while (1); +EXTERN_INLINE void +store_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("sync" : : : "memory"); +#elif sparc_HOST_ARCH + __asm__ __volatile__ ("membar #StoreLoad" : : : "memory"); +#elif !defined(WITHSMP) + return; #else - ACQUIRE_SM_LOCK +#error memory barriers unimplemented on this architecture #endif } -INLINE_HEADER void -unlockClosure(StgClosure *p, StgInfoTable *info) -{ -#if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH - // This is a strictly ordered write, so we need a wb(): - wb(); - p->header.info = info; +EXTERN_INLINE void +load_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("lwsync" : : : "memory"); +#elif sparc_HOST_ARCH + /* Sparc in TSO mode does not require load/load barriers. */ + __asm__ __volatile__ ("" : : : "memory"); +#elif !defined(WITHSMP) + return; #else - RELEASE_SM_LOCK; +#error memory barriers unimplemented on this architecture #endif } +/* ---------------------------------------------------------------------- */ #else /* !THREADED_RTS */ -#define wb() /* nothing */ +#define write_barrier() /* nothing */ +#define store_load_barrier() /* nothing */ +#define load_load_barrier() /* nothing */ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w) @@ -155,6 +238,17 @@ xchg(StgPtr p, StgWord w) return old; } +STATIC_INLINE StgWord +cas(StgVolatilePtr p, StgWord o, StgWord n) +{ + StgWord result; + result = *p; + if (result == o) { + *p = n; + } + return result; +} + #endif /* !THREADED_RTS */ #endif /* SMP_H */