X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=includes%2FSMP.h;h=873bbbbfc2702c7ac868aeefbbe855ee75e5d69c;hb=a4005d2d0c18ffa72ba7bd0fa052666e70e8c16e;hp=eaac770107b4ffe529cb34a71bd8e4a3f8326515;hpb=24ad9cf0325bb5fedc9f0ca8bd70f78096d8d326;p=ghc-hetmet.git diff --git a/includes/SMP.h b/includes/SMP.h index eaac770..873bbbb 100644 --- a/includes/SMP.h +++ b/includes/SMP.h @@ -16,17 +16,9 @@ * Unregisterised builds are ok, but only 1 CPU supported. */ -#ifdef CMINUSMINUS - -#define unlockClosure(ptr,info) \ - prim %write_barrier() []; \ - StgHeader_info(ptr) = info; - -#else - #if defined(THREADED_RTS) -#if defined(TICKY_TICKY) +#if defined(TICKY_TICKY) #error Build options incompatible with THREADED_RTS. #endif @@ -34,6 +26,12 @@ Atomic operations ------------------------------------------------------------------------- */ +#if !IN_STG_CODE +// We only want write_barrier() declared in .hc files. Defining the +// other inline functions here causes type mismatch errors from gcc, +// because the generated C code is assuming that there are no +// prototypes in scope. + /* * The atomic exchange operation: xchg(p,w) exchanges the value * pointed to by p with the value w, returning the old value. @@ -54,15 +52,32 @@ EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w); */ EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n); +#endif // !IN_STG_CODE + /* - * Prevents write operations from moving across this call in either - * direction. + * Various kinds of memory barrier. + * write_barrier: prevents future stores occurring before prededing stores. + * store_load_barrier: prevents future loads occurring before preceding stores. + * load_load_barrier: prevents future loads occurring before earlier stores. + * + * Reference for these: "The JSR-133 Cookbook for Compiler Writers" + * http://gee.cs.oswego.edu/dl/jmm/cookbook.html + * + * To check whether you got these right, try the test in + * testsuite/tests/ghc-regress/rts/testwsdeque.c + * This tests the work-stealing deque implementation, which relies on + * properly working store_load and load_load memory barriers. */ EXTERN_INLINE void write_barrier(void); +EXTERN_INLINE void store_load_barrier(void); +EXTERN_INLINE void load_load_barrier(void); /* ---------------------------------------------------------------------------- Implementations ------------------------------------------------------------------------- */ + +#if !IN_STG_CODE + /* * NB: the xchg instruction is implicitly locked, so we do not need * a lock prefix here. @@ -149,15 +164,13 @@ cas(StgVolatilePtr p, StgWord o, StgWord n) #endif } +#endif // !IN_STG_CODE + /* - * Write barrier - ensure that all preceding writes have happened - * before all following writes. - * - * We need to tell both the compiler AND the CPU about the barrier. - * This is a brute force solution; better results might be obtained by - * using volatile type declarations to get fine-grained ordering - * control in C, and optionally a memory barrier instruction on CPUs - * that require it (not x86 or x86_64). + * We need to tell both the compiler AND the CPU about the barriers. + * It's no good preventing the CPU from reordering the operations if + * the compiler has already done so - hence the "memory" restriction + * on each of the barriers below. */ EXTERN_INLINE void write_barrier(void) { @@ -166,7 +179,42 @@ write_barrier(void) { #elif powerpc_HOST_ARCH __asm__ __volatile__ ("lwsync" : : : "memory"); #elif sparc_HOST_ARCH - /* Sparc in TSO mode does not require write/write barriers. */ + /* Sparc in TSO mode does not require store/store barriers. */ + __asm__ __volatile__ ("" : : : "memory"); +#elif !defined(WITHSMP) + return; +#else +#error memory barriers unimplemented on this architecture +#endif +} + +EXTERN_INLINE void +store_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("sync" : : : "memory"); +#elif sparc_HOST_ARCH + __asm__ __volatile__ ("membar #StoreLoad" : : : "memory"); +#elif !defined(WITHSMP) + return; +#else +#error memory barriers unimplemented on this architecture +#endif +} + +EXTERN_INLINE void +load_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("lwsync" : : : "memory"); +#elif sparc_HOST_ARCH + /* Sparc in TSO mode does not require load/load barriers. */ __asm__ __volatile__ ("" : : : "memory"); #elif !defined(WITHSMP) return; @@ -178,7 +226,9 @@ write_barrier(void) { /* ---------------------------------------------------------------------- */ #else /* !THREADED_RTS */ -#define write_barrier() /* nothing */ +#define write_barrier() /* nothing */ +#define store_load_barrier() /* nothing */ +#define load_load_barrier() /* nothing */ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w) @@ -188,8 +238,17 @@ xchg(StgPtr p, StgWord w) return old; } -#endif /* !THREADED_RTS */ +STATIC_INLINE StgWord +cas(StgVolatilePtr p, StgWord o, StgWord n) +{ + StgWord result; + result = *p; + if (result == o) { + *p = n; + } + return result; +} -#endif /* CMINUSMINUS */ +#endif /* !THREADED_RTS */ #endif /* SMP_H */