1 /* ----------------------------------------------------------------------------
3 * (c) The GHC Team, 2005-2008
5 * Macros for multi-CPU support
7 * -------------------------------------------------------------------------- */
12 /* THREADED_RTS is currently not compatible with the following options:
14 * PROFILING (but only 1 CPU supported)
16 * Unregisterised builds are ok, but only 1 CPU supported.
19 #if defined(THREADED_RTS)
21 #if defined(TICKY_TICKY)
22 #error Build options incompatible with THREADED_RTS.
25 /* ----------------------------------------------------------------------------
27 ------------------------------------------------------------------------- */
30 // We only want write_barrier() declared in .hc files. Defining the
31 // other inline functions here causes type mismatch errors from gcc,
32 // because the generated C code is assuming that there are no
33 // prototypes in scope.
36 * The atomic exchange operation: xchg(p,w) exchanges the value
37 * pointed to by p with the value w, returning the old value.
39 * Used for locking closures during updates (see lockClosure() below)
40 * and the MVar primops.
42 EXTERN_INLINE StgWord xchg(StgPtr p, StgWord w);
45 * Compare-and-swap. Atomically does this:
49 * if (r == o) { *p = n };
53 EXTERN_INLINE StgWord cas(StgVolatilePtr p, StgWord o, StgWord n);
55 #endif // !IN_STG_CODE
58 * Prevents write operations from moving across this call in either
61 EXTERN_INLINE void write_barrier(void);
63 /* ----------------------------------------------------------------------------
65 ------------------------------------------------------------------------- */
70 * NB: the xchg instruction is implicitly locked, so we do not need
74 xchg(StgPtr p, StgWord w)
77 #if i386_HOST_ARCH || x86_64_HOST_ARCH
79 __asm__ __volatile__ (
81 :"+r" (result), "+m" (*p)
82 : /* no input-only operands */
84 #elif powerpc_HOST_ARCH
85 __asm__ __volatile__ (
86 "1: lwarx %0, 0, %2\n"
94 __asm__ __volatile__ (
96 : "+r" (result), "+m" (*p)
97 : /* no input-only operands */
99 #elif !defined(WITHSMP)
103 #error xchg() unimplemented on this architecture
109 * CMPXCHG - the single-word atomic compare-and-exchange instruction. Used
110 * in the STM implementation.
112 EXTERN_INLINE StgWord
113 cas(StgVolatilePtr p, StgWord o, StgWord n)
115 #if i386_HOST_ARCH || x86_64_HOST_ARCH
116 __asm__ __volatile__ (
117 "lock\ncmpxchg %3,%1"
118 :"=a"(o), "=m" (*(volatile unsigned int *)p)
121 #elif powerpc_HOST_ARCH
123 __asm__ __volatile__ (
124 "1: lwarx %0, 0, %3\n"
127 " stwcx. %2, 0, %3\n"
131 :"r" (o), "r" (n), "r" (p)
135 #elif sparc_HOST_ARCH
136 __asm__ __volatile__ (
143 #elif !defined(WITHSMP)
151 #error cas() unimplemented on this architecture
155 #endif // !IN_STG_CODE
158 * Write barrier - ensure that all preceding writes have happened
159 * before all following writes.
161 * We need to tell both the compiler AND the CPU about the barrier.
162 * This is a brute force solution; better results might be obtained by
163 * using volatile type declarations to get fine-grained ordering
164 * control in C, and optionally a memory barrier instruction on CPUs
165 * that require it (not x86 or x86_64).
168 write_barrier(void) {
169 #if i386_HOST_ARCH || x86_64_HOST_ARCH
170 __asm__ __volatile__ ("" : : : "memory");
171 #elif powerpc_HOST_ARCH
172 __asm__ __volatile__ ("lwsync" : : : "memory");
173 #elif sparc_HOST_ARCH
174 /* Sparc in TSO mode does not require write/write barriers. */
175 __asm__ __volatile__ ("" : : : "memory");
176 #elif !defined(WITHSMP)
179 #error memory barriers unimplemented on this architecture
183 /* ---------------------------------------------------------------------- */
184 #else /* !THREADED_RTS */
186 #define write_barrier() /* nothing */
188 INLINE_HEADER StgWord
189 xchg(StgPtr p, StgWord w)
196 #endif /* !THREADED_RTS */