1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 2005
5 * Macros for THREADED_RTS support
7 * -------------------------------------------------------------------------- */
12 /* THREADED_RTS is currently not compatible with the following options:
14 * PROFILING (but only 1 CPU supported)
16 * Unregisterised builds are ok, but only 1 CPU supported.
21 #define unlockClosure(ptr,info) \
22 prim %write_barrier() []; \
23 StgHeader_info(ptr) = info;
27 #if defined(THREADED_RTS)
29 #if defined(TICKY_TICKY)
30 #error Build options incompatible with THREADED_RTS.
33 /* ----------------------------------------------------------------------------
35 ------------------------------------------------------------------------- */
38 * The atomic exchange operation: xchg(p,w) exchanges the value
39 * pointed to by p with the value w, returning the old value.
41 * Used for locking closures during updates (see lockClosure() below)
42 * and the MVar primops.
44 INLINE_HEADER StgWord xchg(StgPtr p, StgWord w);
47 * Compare-and-swap. Atomically does this:
51 * if (r == o) { *p = n };
55 INLINE_HEADER StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
58 * Prevents write operations from moving across this call in either
61 INLINE_HEADER void write_barrier(void);
63 /* ----------------------------------------------------------------------------
65 ------------------------------------------------------------------------- */
67 * NB: the xchg instruction is implicitly locked, so we do not need
71 xchg(StgPtr p, StgWord w)
74 #if i386_HOST_ARCH || x86_64_HOST_ARCH
76 __asm__ __volatile__ (
78 :"+r" (result), "+m" (*p)
79 : /* no input-only operands */
81 #elif powerpc_HOST_ARCH
82 __asm__ __volatile__ (
83 "1: lwarx %0, 0, %2\n"
91 __asm__ __volatile__ (
93 : "+r" (result), "+m" (*p)
94 : /* no input-only operands */
96 #elif !defined(WITHSMP)
100 #error xchg() unimplemented on this architecture
106 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
107 * in the STM implementation.
109 INLINE_HEADER StgWord
110 cas(StgVolatilePtr p, StgWord o, StgWord n)
112 #if i386_HOST_ARCH || x86_64_HOST_ARCH
113 __asm__ __volatile__ (
114 "lock\ncmpxchg %3,%1"
115 :"=a"(o), "=m" (*(volatile unsigned int *)p)
118 #elif powerpc_HOST_ARCH
120 __asm__ __volatile__ (
121 "1: lwarx %0, 0, %3\n"
124 " stwcx. %2, 0, %3\n"
128 :"r" (o), "r" (n), "r" (p)
132 #elif sparc_HOST_ARCH
133 __asm__ __volatile__ (
140 #elif !defined(WITHSMP)
148 #error cas() unimplemented on this architecture
153 * Write barrier - ensure that all preceding writes have happened
154 * before all following writes.
156 * We need to tell both the compiler AND the CPU about the barrier.
157 * This is a brute force solution; better results might be obtained by
158 * using volatile type declarations to get fine-grained ordering
159 * control in C, and optionally a memory barrier instruction on CPUs
160 * that require it (not x86 or x86_64).
163 write_barrier(void) {
164 #if i386_HOST_ARCH || x86_64_HOST_ARCH
165 __asm__ __volatile__ ("" : : : "memory");
166 #elif powerpc_HOST_ARCH
167 __asm__ __volatile__ ("lwsync" : : : "memory");
168 #elif sparc_HOST_ARCH
169 /* Sparc in TSO mode does not require write/write barriers. */
170 __asm__ __volatile__ ("" : : : "memory");
171 #elif !defined(WITHSMP)
174 #error memory barriers unimplemented on this architecture
178 /* -----------------------------------------------------------------------------
179 * Locking/unlocking closures
181 * This is used primarily in the implementation of MVars.
182 * -------------------------------------------------------------------------- */
184 #define SPIN_COUNT 4000
186 #ifdef KEEP_LOCKCLOSURE
187 // We want a callable copy of lockClosure() so that we can refer to it
188 // from .cmm files compiled using the native codegen.
189 extern StgInfoTable *lockClosure(StgClosure *p);
195 lockClosure(StgClosure *p)
201 info = xchg((P_)(void *)&p->header.info, (W_)&stg_WHITEHOLE_info);
202 if (info != (W_)&stg_WHITEHOLE_info) return (StgInfoTable *)info;
203 } while (++i < SPIN_COUNT);
209 unlockClosure(StgClosure *p, StgInfoTable *info)
211 // This is a strictly ordered write, so we need a write_barrier():
213 p->header.info = info;
216 /* -----------------------------------------------------------------------------
219 * These are simple spin-only locks as opposed to Mutexes which
220 * probably spin for a while before blocking in the kernel. We use
221 * these when we are sure that all our threads are actively running on
222 * a CPU, eg. in the GC.
224 * TODO: measure whether we really need these, or whether Mutexes
225 * would do (and be a bit safer if a CPU becomes loaded).
226 * -------------------------------------------------------------------------- */
229 typedef struct StgSync_
232 StgWord64 spin; // DEBUG version counts how much it spins
235 typedef StgWord StgSync;
238 typedef lnat StgSyncCount;
243 // Debug versions of spin locks maintain a spin count
246 // To use the debug veriosn of the spin locks, a debug version of the program
247 // can be run under a deugger with a break point on stat_exit. At exit time
248 // of the program one can examine the state the spin count counts of various
249 // spin locks to check for contention.
252 INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
257 r = cas((StgVolatilePtr)&(p->lock), 1, 0);
263 INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
269 // initialise spin lock
270 INLINE_HEADER void initSpinLock(StgSync * p)
280 INLINE_HEADER void ACQUIRE_SPIN_LOCK(StgSync * p)
284 r = cas((StgVolatilePtr)p, 1, 0);
289 INLINE_HEADER void RELEASE_SPIN_LOCK(StgSync * p)
296 INLINE_HEADER void initSpinLock(StgSync * p)
304 /* ---------------------------------------------------------------------- */
305 #else /* !THREADED_RTS */
307 #define write_barrier() /* nothing */
309 INLINE_HEADER StgWord
310 xchg(StgPtr p, StgWord w)
317 INLINE_HEADER StgInfoTable *
318 lockClosure(StgClosure *p)
319 { return (StgInfoTable *)p->header.info; }
322 unlockClosure(StgClosure *p STG_UNUSED, StgInfoTable *info STG_UNUSED)
325 // Using macros here means we don't have to ensure the argument is in scope
326 #define ACQUIRE_SPIN_LOCK(p) /* nothing */
327 #define RELEASE_SPIN_LOCK(p) /* nothing */
329 INLINE_HEADER void initSpinLock(void * p STG_UNUSED)
332 #endif /* !THREADED_RTS */
334 // Handy specialised versions of lockClosure()/unlockClosure()
335 INLINE_HEADER void lockTSO(StgTSO *tso)
336 { lockClosure((StgClosure *)tso); }
338 INLINE_HEADER void unlockTSO(StgTSO *tso)
339 { unlockClosure((StgClosure*)tso, (StgInfoTable*)&stg_TSO_info); }
343 #endif /* CMINUSMINUS */