1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 2005
5 * Macros for THREADED_RTS support
7 * -------------------------------------------------------------------------- */
12 /* THREADED_RTS is currently not compatible with the following options:
14 * PROFILING (but only 1 CPU supported)
16 * Unregisterised builds are ok, but only 1 CPU supported.
19 #if defined(THREADED_RTS)
21 #if defined(TICKY_TICKY)
22 #error Build options incompatible with THREADED_RTS.
26 * XCHG - the atomic exchange instruction. Used for locking closures
27 * during updates (see lockClosure() below) and the MVar primops.
29 * NB: the xchg instruction is implicitly locked, so we do not need
33 xchg(StgPtr p, StgWord w)
36 #if i386_HOST_ARCH || x86_64_HOST_ARCH
38 __asm__ __volatile__ (
40 :"+r" (result), "+m" (*p)
41 : /* no input-only operands */
43 #elif powerpc_HOST_ARCH
44 __asm__ __volatile__ (
45 "1: lwarx %0, 0, %2\n"
51 #elif !defined(WITHSMP)
55 #error xchg() unimplemented on this architecture
61 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
62 * in the STM implementation.
65 cas(StgVolatilePtr p, StgWord o, StgWord n)
67 #if i386_HOST_ARCH || x86_64_HOST_ARCH
68 __asm__ __volatile__ (
70 :"=a"(o), "=m" (*(volatile unsigned int *)p)
73 #elif powerpc_HOST_ARCH
75 __asm__ __volatile__ (
76 "1: lwarx %0, 0, %3\n"
83 :"r" (o), "r" (n), "r" (p)
87 #elif !defined(WITHSMP)
95 #error cas() unimplemented on this architecture
100 * Write barrier - ensure that all preceding writes have happened
101 * before all following writes.
103 * We need to tell both the compiler AND the CPU about the barrier.
104 * This is a brute force solution; better results might be obtained by
105 * using volatile type declarations to get fine-grained ordering
106 * control in C, and optionally a memory barrier instruction on CPUs
107 * that require it (not x86 or x86_64).
110 write_barrier(void) {
111 #if i386_HOST_ARCH || x86_64_HOST_ARCH
112 __asm__ __volatile__ ("" : : : "memory");
113 #elif powerpc_HOST_ARCH
114 __asm__ __volatile__ ("lwsync" : : : "memory");
115 #elif !defined(WITHSMP)
118 #error memory barriers unimplemented on this architecture
123 * Locking/unlocking closures
125 * This is used primarily in the implementation of MVars.
127 #define SPIN_COUNT 4000
129 INLINE_HEADER StgInfoTable *
130 lockClosure(StgClosure *p)
136 info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
137 if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
138 } while (++i < SPIN_COUNT);
144 unlockClosure(StgClosure *p, StgInfoTable *info)
146 // This is a strictly ordered write, so we need a wb():
148 p->header.info = info;
151 #else /* !THREADED_RTS */
153 #define write_barrier() /* nothing */
155 INLINE_HEADER StgWord
156 xchg(StgPtr p, StgWord w)
163 INLINE_HEADER StgInfoTable *
164 lockClosure(StgClosure *p)
165 { return (StgInfoTable *)p->header.info; }
168 unlockClosure(StgClosure *p STG_UNUSED, StgInfoTable *info STG_UNUSED)
171 #endif /* !THREADED_RTS */
173 // Handy specialised versions of lockClosure()/unlockClosure()
174 INLINE_HEADER void lockTSO(StgTSO *tso)
175 { lockClosure((StgClosure *)tso); }
177 INLINE_HEADER void unlockTSO(StgTSO *tso)
178 { unlockClosure((StgClosure*)tso, (StgInfoTable*)&stg_TSO_info); }