X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Fincludes%2FSMP.h;h=5974c962add936413d76506066940fe06cf46224;hb=5ed93b107550cc10fda7ae187de65da1a4b24d87;hp=cc959413d6e9b71001834ff267679d1e7d4ef931;hpb=55472d778c441b65e013d27f5228283eef85986c;p=ghc-hetmet.git diff --git a/ghc/includes/SMP.h b/ghc/includes/SMP.h index cc95941..5974c96 100644 --- a/ghc/includes/SMP.h +++ b/ghc/includes/SMP.h @@ -2,30 +2,29 @@ * * (c) The GHC Team, 2005 * - * Macros for SMP support + * Macros for THREADED_RTS support * * -------------------------------------------------------------------------- */ #ifndef SMP_H #define SMP_H -/* SMP is currently not compatible with the following options: +/* THREADED_RTS is currently not compatible with the following options: * - * INTERPRETER - * PROFILING + * PROFILING (but only 1 CPU supported) * TICKY_TICKY - * and unregisterised builds. + * Unregisterised builds are ok, but only 1 CPU supported. */ -#if defined(SMP) +#if defined(THREADED_RTS) -#if defined(PROFILING) || defined(TICKY_TICKY) -#error Build options incompatible with SMP. +#if defined(TICKY_TICKY) +#error Build options incompatible with THREADED_RTS. #endif /* * XCHG - the atomic exchange instruction. Used for locking closures - * during updates (see LOCK_CLOSURE below) and the MVar primops. + * during updates (see lockClosure() below) and the MVar primops. * * NB: the xchg instruction is implicitly locked, so we do not need * a lock prefix here. @@ -34,12 +33,24 @@ INLINE_HEADER StgWord xchg(StgPtr p, StgWord w) { StgWord result; +#if i386_HOST_ARCH || x86_64_HOST_ARCH result = w; __asm__ __volatile__ ( "xchg %1,%0" :"+r" (result), "+m" (*p) : /* no input-only operands */ ); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ( + "1: lwarx %0, 0, %2\n" + " stwcx. %1, 0, %2\n" + " bne- 1b" + :"=r" (result) + :"r" (w), "r" (p) + ); +#else +#error xchg() unimplemented on this architecture +#endif return result; } @@ -50,21 +61,69 @@ xchg(StgPtr p, StgWord w) INLINE_HEADER StgWord cas(StgVolatilePtr p, StgWord o, StgWord n) { +#if i386_HOST_ARCH || x86_64_HOST_ARCH __asm__ __volatile__ ( - "lock cmpxchg %3,%1" + "lock/cmpxchg %3,%1" :"=a"(o), "=m" (*(volatile unsigned int *)p) :"0" (o), "r" (n)); return o; +#elif powerpc_HOST_ARCH + StgWord result; + __asm__ __volatile__ ( + "1: lwarx %0, 0, %3\n" + " cmpw %0, %1\n" + " bne 2f\n" + " stwcx. %2, 0, %3\n" + " bne- 1b\n" + "2:" + :"=r" (result) + :"r" (o), "r" (n), "r" (p) + ); + return result; +#else +#error cas() unimplemented on this architecture +#endif +} + +/* + * Write barrier - ensure that all preceding writes have happened + * before all following writes. + * + * We need to tell both the compiler AND the CPU about the barrier. + * This is a brute force solution; better results might be obtained by + * using volatile type declarations to get fine-grained ordering + * control in C, and optionally a memory barrier instruction on CPUs + * that require it (not x86 or x86_64). + */ +INLINE_HEADER void +wb(void) { +#if i386_HOST_ARCH || x86_64_HOST_ARCH + __asm__ __volatile__ ("" : : : "memory"); +#elif powerpc_HOST_ARCH + __asm__ __volatile__ ("lwsync" : : : "memory"); +#else +#error memory barriers unimplemented on this architecture +#endif } +/* + * Locking/unlocking closures + * + * This is used primarily in the implementation of MVars. + */ +#define SPIN_COUNT 4000 + INLINE_HEADER StgInfoTable * lockClosure(StgClosure *p) { -#if i386_HOST_ARCH || x86_64_HOST_ARCH +#if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH StgWord info; do { - info = xchg((P_)&p->header.info, (W_)&stg_WHITEHOLE_info); - if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info; + nat i = 0; + do { + info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info); + if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info; + } while (++i < SPIN_COUNT); yieldThread(); } while (1); #else @@ -75,14 +134,27 @@ lockClosure(StgClosure *p) INLINE_HEADER void unlockClosure(StgClosure *p, StgInfoTable *info) { -#if i386_HOST_ARCH || x86_64_HOST_ARCH - // This is safe enough, because lockClosure() does the memory barrier: +#if i386_HOST_ARCH || x86_64_HOST_ARCH || powerpc_HOST_ARCH + // This is a strictly ordered write, so we need a wb(): + wb(); p->header.info = info; #else RELEASE_SM_LOCK; #endif } -#endif /* SMP */ +#else /* !THREADED_RTS */ + +#define wb() /* nothing */ + +INLINE_HEADER StgWord +xchg(StgPtr p, StgWord w) +{ + StgWord old = *p; + *p = w; + return old; +} + +#endif /* !THREADED_RTS */ #endif /* SMP_H */