1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 2005-2008
5 * Macros for multi-CPU support
7 * -------------------------------------------------------------------------- */
12 /* THREADED_RTS is currently not compatible with the following options:
14 * PROFILING (but only 1 CPU supported)
16 * Unregisterised builds are ok, but only 1 CPU supported.
21 #define unlockClosure(ptr,info) \
22 prim %write_barrier() []; \
23 StgHeader_info(ptr) = info;
27 #if defined(THREADED_RTS)
29 #if defined(TICKY_TICKY)
30 #error Build options incompatible with THREADED_RTS.
33 /* ----------------------------------------------------------------------------
35 ------------------------------------------------------------------------- */
38 // We only want write_barrier() declared in .hc files. Defining the
39 // other inline functions here causes type mismatch errors from gcc,
40 // because the generated C code is assuming that there are no
41 // prototypes in scope.
44 * The atomic exchange operation: xchg(p,w) exchanges the value
45 * pointed to by p with the value w, returning the old value.
47 * Used for locking closures during updates (see lockClosure() below)
48 * and the MVar primops.
50 EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w);
53 * Compare-and-swap. Atomically does this:
57 * if (r == o) { *p = n };
61 EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
63 #endif // !IN_STG_CODE
66 * Prevents write operations from moving across this call in either
69 EXTERN_INLINE void write_barrier(void);
71 /* ----------------------------------------------------------------------------
73 ------------------------------------------------------------------------- */
78 * NB: the xchg instruction is implicitly locked, so we do not need
82 xchg(StgPtr p, StgWord w)
85 #if i386_HOST_ARCH || x86_64_HOST_ARCH
87 __asm__ __volatile__ (
89 :"+r" (result), "+m" (*p)
90 : /* no input-only operands */
92 #elif powerpc_HOST_ARCH
93 __asm__ __volatile__ (
94 "1: lwarx %0, 0, %2\n"
100 #elif sparc_HOST_ARCH
102 __asm__ __volatile__ (
104 : "+r" (result), "+m" (*p)
105 : /* no input-only operands */
107 #elif !defined(WITHSMP)
111 #error xchg() unimplemented on this architecture
117 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
118 * in the STM implementation.
120 EXTERN_INLINE StgWord
121 cas(StgVolatilePtr p, StgWord o, StgWord n)
123 #if i386_HOST_ARCH || x86_64_HOST_ARCH
124 __asm__ __volatile__ (
125 "lock\ncmpxchg %3,%1"
126 :"=a"(o), "=m" (*(volatile unsigned int *)p)
129 #elif powerpc_HOST_ARCH
131 __asm__ __volatile__ (
132 "1: lwarx %0, 0, %3\n"
135 " stwcx. %2, 0, %3\n"
139 :"r" (o), "r" (n), "r" (p)
143 #elif sparc_HOST_ARCH
144 __asm__ __volatile__ (
151 #elif !defined(WITHSMP)
159 #error cas() unimplemented on this architecture
163 #endif // !IN_STG_CODE
166 * Write barrier - ensure that all preceding writes have happened
167 * before all following writes.
169 * We need to tell both the compiler AND the CPU about the barrier.
170 * This is a brute force solution; better results might be obtained by
171 * using volatile type declarations to get fine-grained ordering
172 * control in C, and optionally a memory barrier instruction on CPUs
173 * that require it (not x86 or x86_64).
176 write_barrier(void) {
177 #if i386_HOST_ARCH || x86_64_HOST_ARCH
178 __asm__ __volatile__ ("" : : : "memory");
179 #elif powerpc_HOST_ARCH
180 __asm__ __volatile__ ("lwsync" : : : "memory");
181 #elif sparc_HOST_ARCH
182 /* Sparc in TSO mode does not require write/write barriers. */
183 __asm__ __volatile__ ("" : : : "memory");
184 #elif !defined(WITHSMP)
187 #error memory barriers unimplemented on this architecture
191 /* ---------------------------------------------------------------------- */
192 #else /* !THREADED_RTS */
194 #define write_barrier() /* nothing */
196 INLINE_HEADER StgWord
197 xchg(StgPtr p, StgWord w)
204 #endif /* !THREADED_RTS */
206 #endif /* CMINUSMINUS */