1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
7 * ---------------------------------------------------------------------------*/
12 /* -----------------------------------------------------------------------------
15 We have two layers of update macros. The top layer, UPD_IND() and
16 friends perform all the work of an update. In detail:
18 - if the closure being updated is a blocking queue, then all the
19 threads waiting on the blocking queue are updated.
21 - then the lower level updateWithIndirection() macro is invoked
22 to actually replace the closure with an indirection (see below).
24 -------------------------------------------------------------------------- */
27 # define UPD_IND(updclosure, heapptr) \
28 UPD_PERM_IND(updclosure,heapptr)
29 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
30 UPD_PERM_IND(updclosure,heapptr); and_then
33 # define UPD_IND(updclosure, heapptr) \
34 UPD_REAL_IND(updclosure,INFO_PTR(stg_IND_info),heapptr,SEMI)
35 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
36 UPD_REAL_IND(updclosure,ind_info,heapptr,and_then)
39 /* These macros have to work in both C and C--, so here's the
45 #define DECLARE_IPTR(info) W_ info
46 #define FCALL foreign "C"
47 #define INFO_PTR(info) info
52 #define DECLARE_IPTR(info) const StgInfoTable *(info)
53 #define FCALL /* nothing */
54 #define INFO_PTR(info) &info
55 #define StgBlockingQueue_blocking_queue(closure) \
56 (((StgBlockingQueue *)closure)->blocking_queue)
57 #define ARG_PTR /* nothing */
60 /* UPD_IND actually does a PERM_IND if TICKY_TICKY is on;
61 if you *really* need an IND use UPD_REAL_IND
63 #define UPD_REAL_IND(updclosure, ind_info, heapptr, and_then) \
66 info = GET_INFO(updclosure); \
67 AWAKEN_BQ(info,updclosure); \
68 updateWithIndirection(GET_INFO(updclosure), ind_info, \
74 #if defined(PROFILING) || defined(TICKY_TICKY)
75 #define UPD_PERM_IND(updclosure, heapptr) \
78 info = GET_INFO(updclosure); \
79 AWAKEN_BQ(info,updclosure); \
80 updateWithPermIndirection(info, \
86 #if defined(RTS_SUPPORTS_THREADS)
89 # define UPD_IND_NOLOCK(updclosure, heapptr) \
92 info = GET_INFO(updclosure); \
93 AWAKEN_BQ_NOLOCK(info,updclosure); \
94 updateWithPermIndirection(info, \
99 # define UPD_IND_NOLOCK(updclosure, heapptr) \
101 DECLARE_IPTR(info); \
102 info = GET_INFO(updclosure); \
103 AWAKEN_BQ_NOLOCK(info,updclosure); \
104 updateWithIndirection(info, INFO_PTR(stg_IND_info), \
111 #define UPD_IND_NOLOCK(updclosure,heapptr) UPD_IND(updclosure,heapptr)
114 /* -----------------------------------------------------------------------------
115 Awaken any threads waiting on a blocking queue (BLACKHOLE_BQ).
116 -------------------------------------------------------------------------- */
121 In a parallel setup several types of closures might have a blocking queue:
122 BLACKHOLE_BQ ... same as in the default concurrent setup; it will be
123 reawakened via calling UPD_IND on that closure after
124 having finished the computation of the graph
125 FETCH_ME_BQ ... a global indirection (FETCH_ME) may be entered by a
126 local TSO, turning it into a FETCH_ME_BQ; it will be
127 reawakened via calling processResume
128 RBH ... a revertible black hole may be entered by another
129 local TSO, putting it onto its blocking queue; since
130 RBHs only exist while the corresponding closure is in
131 transit, they will be reawakened via calling
132 convertToFetchMe (upon processing an ACK message)
134 In a parallel setup a blocking queue may contain 3 types of closures:
135 TSO ... as in the default concurrent setup
136 BLOCKED_FETCH ... indicating that a TSO on another PE is waiting for
137 the result of the current computation
138 CONSTR ... an RBHSave closure (which contains data ripped out of
139 the closure to make room for a blocking queue; since
140 it only contains data we use the exisiting type of
141 a CONSTR closure); this closure is the end of a
142 blocking queue for an RBH closure; it only exists in
143 this kind of blocking queue and must be at the end
146 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
147 #define DO_AWAKEN_BQ(bqe, node) STGCALL2(awakenBlockedQueue, bqe, node);
149 #define AWAKEN_BQ(info,closure) \
150 if (info == &stg_BLACKHOLE_BQ_info || \
151 info == &stg_FETCH_ME_BQ_info || \
152 get_itbl(closure)->type == RBH) { \
153 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
158 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
159 #define DO_AWAKEN_BQ(bq, node) STGCALL2(awakenBlockedQueue, bq, node);
161 /* In GranSim we don't have FETCH_ME or FETCH_ME_BQ closures, so they are
162 not checked. The rest of the code is the same as for GUM.
164 #define AWAKEN_BQ(info,closure) \
165 if (info == &stg_BLACKHOLE_BQ_info || \
166 get_itbl(closure)->type == RBH) { \
167 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
171 #else /* !GRAN && !PAR */
173 #define DO_AWAKEN_BQ(closure) \
174 FCALL awakenBlockedQueue(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
176 #define AWAKEN_BQ(info,closure) \
177 if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
178 DO_AWAKEN_BQ(closure); \
181 #define AWAKEN_STATIC_BQ(info,closure) \
182 if (info == INFO_PTR(stg_BLACKHOLE_BQ_STATIC_info)) { \
183 DO_AWAKEN_BQ(closure); \
186 #ifdef RTS_SUPPORTS_THREADS
187 #define DO_AWAKEN_BQ_NOLOCK(closure) \
188 FCALL awakenBlockedQueueNoLock(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
190 #define AWAKEN_BQ_NOLOCK(info,closure) \
191 if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
192 DO_AWAKEN_BQ_NOLOCK(closure); \
195 #endif /* GRAN || PAR */
197 /* -----------------------------------------------------------------------------
198 Updates: lower-level macros which update a closure with an
199 indirection to another closure.
201 There are several variants of this code.
204 -------------------------------------------------------------------------- */
207 * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
211 * After all, we do *NOT* need to call LDV_RECORD_CREATE() for both IND and
212 * IND_OLDGEN closures because they are inherently used. But, it corrupts
213 * the invariants that every closure keeps its creation time in the profiling
214 * field. So, we call LDV_RECORD_CREATE().
217 /* In the DEBUG case, we also zero out the slop of the old closure,
218 * so that the sanity checker can tell where the next closure is.
220 * Two important invariants: we should never try to update a closure
221 * to point to itself, and the closure being updated should not
222 * already have been updated (the mutable list will get messed up
227 #define DEBUG_FILL_SLOP(p) /* nothing */
233 #define DEBUG_FILL_SLOP(p) \
238 inf = %GET_STD_INFO(p); \
239 np = TO_W_(%INFO_PTRS(inf)); \
240 nw = TO_W_(%INFO_NPTRS(inf)); \
241 if (%INFO_TYPE(inf) != HALF_W_(THUNK_SELECTOR)) { \
245 StgClosure_payload(p,i) = 0; \
252 #else /* !CMINUSMINUS */
255 DEBUG_FILL_SLOP(StgClosure *p)
257 StgInfoTable *inf = get_itbl(p);
258 nat np = inf->layout.payload.ptrs,
259 nw = inf->layout.payload.nptrs, i;
260 if (inf->type != THUNK_SELECTOR) {
261 for (i = 0; i < np + nw; i++) {
262 ((StgClosure *)p)->payload[i] = 0;
267 #endif /* CMINUSMINUS */
270 /* We have two versions of this macro (sadly), one for use in C-- code,
271 * and the other for C.
273 * The and_then argument is a performance hack so that we can paste in
274 * the continuation code directly. It helps shave a couple of
275 * instructions off the common case in the update code, which is
276 * worthwhile (the update code is often part of the inner loop).
277 * (except that gcc now appears to common up this code again and
278 * invert the optimisation. Grrrr --SDM).
281 #define generation(n) (W_[generations] + n*SIZEOF_generation)
282 #define updateWithIndirection(info, ind_info, p1, p2, and_then) \
285 /* ASSERT( p1 != p2 && !closure_IND(p1) ); \
286 */ LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
288 if (bdescr_gen_no(bd) == 0 :: CInt) { \
289 StgInd_indirectee(p1) = p2; \
290 SET_INFO(p1, ind_info); \
291 LDV_RECORD_CREATE(p1); \
292 TICK_UPD_NEW_IND(); \
295 if (info != stg_BLACKHOLE_BQ_info) { \
296 DEBUG_FILL_SLOP(p1); \
297 foreign "C" recordMutableGen(p1 "ptr", \
298 generation(TO_W_(bdescr_gen_no(bd))) "ptr"); \
300 StgInd_indirectee(p1) = p2; \
301 SET_INFO(p1, stg_IND_OLDGEN_info); \
302 LDV_RECORD_CREATE(p1); \
303 TICK_UPD_OLD_IND(); \
307 #define updateWithIndirection(_info, ind_info, p1, p2, and_then) \
311 ASSERT( (P_)p1 != (P_)p2 && !closure_IND(p1) ); \
312 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
313 bd = Bdescr((P_)p1); \
314 if (bd->gen_no == 0) { \
315 ((StgInd *)p1)->indirectee = p2; \
316 SET_INFO(p1, ind_info); \
317 LDV_RECORD_CREATE(p1); \
318 TICK_UPD_NEW_IND(); \
321 if (_info != &stg_BLACKHOLE_BQ_info) { \
322 DEBUG_FILL_SLOP(p1); \
323 recordMutableGen(p1, &generations[bd->gen_no]); \
325 ((StgInd *)p1)->indirectee = p2; \
326 SET_INFO(p1, &stg_IND_OLDGEN_info); \
327 TICK_UPD_OLD_IND(); \
333 /* The permanent indirection version isn't performance critical. We
334 * therefore use an inline C function instead of the C-- macro.
338 updateWithPermIndirection(const StgInfoTable *info,
344 ASSERT( p1 != p2 && !closure_IND(p1) );
347 // Destroy the old closure.
348 // Nb: LDV_* stuff cannot mix with ticky-ticky
349 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1);
352 if (bd->gen_no == 0) {
353 ((StgInd *)p1)->indirectee = p2;
354 SET_INFO(p1, &stg_IND_PERM_info);
356 // We have just created a new closure.
357 LDV_RECORD_CREATE(p1);
358 TICK_UPD_NEW_PERM_IND(p1);
360 if (info != &stg_BLACKHOLE_BQ_info) {
361 recordMutableGen(p1, &generations[bd->gen_no]);
363 ((StgInd *)p1)->indirectee = p2;
364 SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
366 // We have just created a new closure.
367 LDV_RECORD_CREATE(p1);
368 TICK_UPD_OLD_PERM_IND();
373 #endif /* UPDATES_H */