X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=includes%2FSMP.h;h=ac98feb7a3c2c7e55fd09e1567545ea9b3de2ee8;hb=88605454e4bf30d7fca2b0eea4643c5637afd5cc;hp=e3b4cd54a2bb71048207a580cdad344b2a8fc3e2;hpb=fa4e32f81bcfbba98ba7e9d5cb96a74b75b052da;p=ghc-hetmet.git diff --git a/includes/SMP.h b/includes/SMP.h index e3b4cd5..ac98feb 100644 --- a/includes/SMP.h +++ b/includes/SMP.h @@ -16,14 +16,6 @@ * Unregisterised builds are ok, but only 1 CPU supported. */ -#ifdef CMINUSMINUS - -#define unlockClosure(ptr,info) \ - prim %write_barrier() []; \ - StgHeader_info(ptr) = info; - -#else - #if defined(THREADED_RTS) #if defined(TICKY_TICKY) @@ -63,10 +55,22 @@ EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n); #endif // !IN_STG_CODE /* - * Prevents write operations from moving across this call in either - * direction. + * Various kinds of memory barrier. + * write_barrier: prevents future stores occurring before prededing stores. + * store_load_barrier: prevents future loads occurring before preceding stores. + * load_load_barrier: prevents future loads occurring before earlier stores. + * + * Reference for these: "The JSR-133 Cookbook for Compiler Writers" + * http://gee.cs.oswego.edu/dl/jmm/cookbook.html + * + * To check whether you got these right, try the test in + * testsuite/tests/ghc-regress/rts/testwsdeque.c + * This tests the work-stealing deque implementation, which relies on + * properly working store_load and load_load memory barriers. */ EXTERN_INLINE void write_barrier(void); +EXTERN_INLINE void store_load_barrier(void); +EXTERN_INLINE void load_load_barrier(void); /* ---------------------------------------------------------------------------- Implementations @@ -163,14 +167,10 @@ cas(StgVolatilePtr p, StgWord o, StgWord n) #endif // !IN_STG_CODE /* - * Write barrier - ensure that all preceding writes have happened - * before all following writes. - * - * We need to tell both the compiler AND the CPU about the barrier. - * This is a brute force solution; better results might be obtained by - * using volatile type declarations to get fine-grained ordering - * control in C, and optionally a memory barrier instruction on CPUs - * that require it (not x86 or x86_64). + * We need to tell both the compiler AND the CPU about the barriers. + * It's no good preventing the CPU from reordering the operations if + * the compiler has already done so - hence the "memory" restriction + * on each of the barriers below. */ EXTERN_INLINE void write_barrier(void) { @@ -179,7 +179,42 @@ write_barrier(void) { #elif powerpc_HOST_ARCH __asm__ __volatile__ ("lwsync" : : : "memory"); #elif sparc_HOST_ARCH - /* Sparc in TSO mode does not require write/write barriers. */ + /* Sparc in TSO mode does not require store/store barriers. */ + __asm__ __volatile__ ("" : : : "memory"); +#elif !defined(WITHSMP) + return; +#else +#error memory barriers unimplemented on this architecture +#endif +} + +EXTERN_INLINE void +store_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("lock; addq $0,0(%%rsp)" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("sync" : : : "memory"); +#elif sparc_HOST_ARCH + __asm__ __volatile__ ("membar #StoreLoad" : : : "memory"); +#elif !defined(WITHSMP) + return; +#else +#error memory barriers unimplemented on this architecture +#endif +} + +EXTERN_INLINE void +load_load_barrier(void) { +#if i386_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif x86_64_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("lwsync" : : : "memory"); +#elif sparc_HOST_ARCH + /* Sparc in TSO mode does not require load/load barriers. */ __asm__ __volatile__ ("" : : : "memory"); #elif !defined(WITHSMP) return; @@ -191,7 +226,9 @@ write_barrier(void) { /* ---------------------------------------------------------------------- */ #else /* !THREADED_RTS */ -#define write_barrier() /* nothing */ +#define write_barrier() /* nothing */ +#define store_load_barrier() /* nothing */ +#define load_load_barrier() /* nothing */ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w) @@ -201,8 +238,17 @@ xchg(StgPtr p, StgWord w) return old; } -#endif /* !THREADED_RTS */ +STATIC_INLINE StgWord +cas(StgVolatilePtr p, StgWord o, StgWord n) +{ + StgWord result; + result = *p; + if (result == o) { + *p = n; + } + return result; +} -#endif /* CMINUSMINUS */ +#endif /* !THREADED_RTS */ #endif /* SMP_H */