1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
7 * ---------------------------------------------------------------------------*/
12 /* -----------------------------------------------------------------------------
15 We have two layers of update macros. The top layer, UPD_IND() and
16 friends perform all the work of an update. In detail:
18 - if the closure being updated is a blocking queue, then all the
19 threads waiting on the blocking queue are updated.
21 - then the lower level updateWithIndirection() macro is invoked
22 to actually replace the closure with an indirection (see below).
24 -------------------------------------------------------------------------- */
27 # define UPD_IND(updclosure, heapptr) \
28 UPD_PERM_IND(updclosure,heapptr)
29 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
30 UPD_PERM_IND(updclosure,heapptr); and_then
33 # define UPD_IND(updclosure, heapptr) \
34 UPD_REAL_IND(updclosure,INFO_PTR(stg_IND_info),heapptr,SEMI)
35 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
36 UPD_REAL_IND(updclosure,ind_info,heapptr,and_then)
39 /* These macros have to work in both C and C--, so here's the
43 #define DECLARE_IPTR(info) W_ info
44 #define FCALL foreign "C"
45 #define INFO_PTR(info) info
48 #define DECLARE_IPTR(info) const StgInfoTable *(info)
49 #define FCALL /* nothing */
50 #define INFO_PTR(info) &info
51 #define StgBlockingQueue_blocking_queue(closure) \
52 (((StgBlockingQueue *)closure)->blocking_queue)
53 #define ARG_PTR /* nothing */
56 /* UPD_IND actually does a PERM_IND if TICKY_TICKY is on;
57 if you *really* need an IND use UPD_REAL_IND
59 #define UPD_REAL_IND(updclosure, ind_info, heapptr, and_then) \
61 info = GET_INFO(updclosure); \
62 AWAKEN_BQ(info,updclosure); \
63 updateWithIndirection(GET_INFO(updclosure), ind_info, \
68 #if defined(PROFILING) || defined(TICKY_TICKY)
69 #define UPD_PERM_IND(updclosure, heapptr) \
71 info = GET_INFO(updclosure); \
72 AWAKEN_BQ(info,updclosure); \
73 updateWithPermIndirection(info, \
78 #if defined(RTS_SUPPORTS_THREADS)
81 # define UPD_IND_NOLOCK(updclosure, heapptr) \
83 info = GET_INFO(updclosure); \
84 AWAKEN_BQ_NOLOCK(info,updclosure); \
85 updateWithPermIndirection(info, \
89 # define UPD_IND_NOLOCK(updclosure, heapptr) \
91 info = GET_INFO(updclosure); \
92 AWAKEN_BQ_NOLOCK(info,updclosure); \
93 updateWithIndirection(info, INFO_PTR(stg_IND_info), \
99 #define UPD_IND_NOLOCK(updclosure,heapptr) UPD_IND(updclosure,heapptr)
102 /* -----------------------------------------------------------------------------
103 Awaken any threads waiting on a blocking queue (BLACKHOLE_BQ).
104 -------------------------------------------------------------------------- */
109 In a parallel setup several types of closures might have a blocking queue:
110 BLACKHOLE_BQ ... same as in the default concurrent setup; it will be
111 reawakened via calling UPD_IND on that closure after
112 having finished the computation of the graph
113 FETCH_ME_BQ ... a global indirection (FETCH_ME) may be entered by a
114 local TSO, turning it into a FETCH_ME_BQ; it will be
115 reawakened via calling processResume
116 RBH ... a revertible black hole may be entered by another
117 local TSO, putting it onto its blocking queue; since
118 RBHs only exist while the corresponding closure is in
119 transit, they will be reawakened via calling
120 convertToFetchMe (upon processing an ACK message)
122 In a parallel setup a blocking queue may contain 3 types of closures:
123 TSO ... as in the default concurrent setup
124 BLOCKED_FETCH ... indicating that a TSO on another PE is waiting for
125 the result of the current computation
126 CONSTR ... an RBHSave closure (which contains data ripped out of
127 the closure to make room for a blocking queue; since
128 it only contains data we use the exisiting type of
129 a CONSTR closure); this closure is the end of a
130 blocking queue for an RBH closure; it only exists in
131 this kind of blocking queue and must be at the end
134 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
135 #define DO_AWAKEN_BQ(bqe, node) STGCALL2(awakenBlockedQueue, bqe, node);
137 #define AWAKEN_BQ(info,closure) \
138 if (info == &stg_BLACKHOLE_BQ_info || \
139 info == &stg_FETCH_ME_BQ_info || \
140 get_itbl(closure)->type == RBH) { \
141 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
146 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
147 #define DO_AWAKEN_BQ(bq, node) STGCALL2(awakenBlockedQueue, bq, node);
149 /* In GranSim we don't have FETCH_ME or FETCH_ME_BQ closures, so they are
150 not checked. The rest of the code is the same as for GUM.
152 #define AWAKEN_BQ(info,closure) \
153 if (info == &stg_BLACKHOLE_BQ_info || \
154 get_itbl(closure)->type == RBH) { \
155 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
159 #else /* !GRAN && !PAR */
161 #define DO_AWAKEN_BQ(closure) \
162 FCALL awakenBlockedQueue(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
164 #define AWAKEN_BQ(info,closure) \
165 if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
166 DO_AWAKEN_BQ(closure); \
169 #define AWAKEN_STATIC_BQ(info,closure) \
170 if (info == INFO_PTR(stg_BLACKHOLE_BQ_STATIC_info)) { \
171 DO_AWAKEN_BQ(closure); \
174 #ifdef RTS_SUPPORTS_THREADS
175 #define DO_AWAKEN_BQ_NOLOCK(closure) \
176 FCALL awakenBlockedQueueNoLock(StgBlockingQueue_blocking_queue(closure) ARG_PTR);
178 #define AWAKEN_BQ_NOLOCK(info,closure) \
179 if (info == INFO_PTR(stg_BLACKHOLE_BQ_info)) { \
180 DO_AWAKEN_BQ_NOLOCK(closure); \
183 #endif /* GRAN || PAR */
185 /* -----------------------------------------------------------------------------
186 Updates: lower-level macros which update a closure with an
187 indirection to another closure.
189 There are several variants of this code.
192 -------------------------------------------------------------------------- */
195 * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
199 * After all, we do *NOT* need to call LDV_RECORD_CREATE() for both IND and
200 * IND_OLDGEN closures because they are inherently used. But, it corrupts
201 * the invariants that every closure keeps its creation time in the profiling
202 * field. So, we call LDV_RECORD_CREATE().
205 /* In the DEBUG case, we also zero out the slop of the old closure,
206 * so that the sanity checker can tell where the next closure is.
208 * Two important invariants: we should never try to update a closure
209 * to point to itself, and the closure being updated should not
210 * already have been updated (the mutable list will get messed up
215 #define DEBUG_FILL_SLOP(p) /* nothing */
221 #define DEBUG_FILL_SLOP(p) \
226 inf = %GET_STD_INFO(p); \
227 np = TO_W_(%INFO_PTRS(inf)); \
228 nw = TO_W_(%INFO_NPTRS(inf)); \
229 if (%INFO_TYPE(inf) != THUNK_SELECTOR::I16) { \
233 StgClosure_payload(p,i) = 0; \
240 #else /* !CMINUSMINUS */
243 DEBUG_FILL_SLOP(StgClosure *p)
245 StgInfoTable *inf = get_itbl(p);
246 nat np = inf->layout.payload.ptrs,
247 nw = inf->layout.payload.nptrs, i;
248 if (inf->type != THUNK_SELECTOR) {
249 for (i = 0; i < np + nw; i++) {
250 ((StgClosure *)p)->payload[i] = 0;
255 #endif /* CMINUSMINUS */
258 /* We have two versions of this macro (sadly), one for use in C-- code,
259 * and the other for C.
261 * The and_then argument is a performance hack so that we can paste in
262 * the continuation code directly. It helps shave a couple of
263 * instructions off the common case in the update code, which is
264 * worthwhile (the update code is often part of the inner loop).
265 * (except that gcc now appears to common up this code again and
266 * invert the optimisation. Grrrr --SDM).
269 #define generation(n) (W_[generations] + n*SIZEOF_generation)
270 #define updateWithIndirection(info, ind_info, p1, p2, and_then) \
273 /* ASSERT( p1 != p2 && !closure_IND(p1) ); \
274 */ LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
276 if (bdescr_gen_no(bd) == 0) { \
277 StgInd_indirectee(p1) = p2; \
278 SET_INFO(p1, ind_info); \
279 LDV_RECORD_CREATE(p1); \
280 TICK_UPD_NEW_IND(); \
283 if (info != stg_BLACKHOLE_BQ_info) { \
284 DEBUG_FILL_SLOP(p1); \
285 W_ __mut_once_list; \
286 __mut_once_list = generation(bdescr_gen_no(bd)) + \
287 OFFSET_generation_mut_once_list; \
288 StgMutClosure_mut_link(p1) = W_[__mut_once_list]; \
289 W_[__mut_once_list] = p1; \
291 StgInd_indirectee(p1) = p2; \
292 SET_INFO(p1, stg_IND_OLDGEN_info); \
293 LDV_RECORD_CREATE(p1); \
294 TICK_UPD_OLD_IND(); \
298 #define updateWithIndirection(_info, ind_info, p1, p2, and_then) \
302 ASSERT( (P_)p1 != (P_)p2 && !closure_IND(p1) ); \
303 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
304 bd = Bdescr((P_)p1); \
305 if (bd->gen_no == 0) { \
306 ((StgInd *)p1)->indirectee = p2; \
307 SET_INFO(p1, ind_info); \
308 LDV_RECORD_CREATE(p1); \
309 TICK_UPD_NEW_IND(); \
312 if (_info != &stg_BLACKHOLE_BQ_info) { \
313 DEBUG_FILL_SLOP(p1); \
314 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list; \
315 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1; \
317 ((StgIndOldGen *)p1)->indirectee = p2; \
318 SET_INFO(p1, &stg_IND_OLDGEN_info); \
319 TICK_UPD_OLD_IND(); \
325 /* The permanent indirection version isn't performance critical. We
326 * therefore use an inline C function instead of the C-- macro.
330 updateWithPermIndirection(const StgInfoTable *info,
336 ASSERT( p1 != p2 && !closure_IND(p1) );
339 // Destroy the old closure.
340 // Nb: LDV_* stuff cannot mix with ticky-ticky
341 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1);
344 if (bd->gen_no == 0) {
345 ((StgInd *)p1)->indirectee = p2;
346 SET_INFO(p1, &stg_IND_PERM_info);
348 // We have just created a new closure.
349 LDV_RECORD_CREATE(p1);
350 TICK_UPD_NEW_PERM_IND(p1);
352 if (info != &stg_BLACKHOLE_BQ_info) {
353 ((StgIndOldGen *)p1)->mut_link = generations[bd->gen_no].mut_once_list;
354 generations[bd->gen_no].mut_once_list = (StgMutClosure *)p1;
356 ((StgIndOldGen *)p1)->indirectee = p2;
357 SET_INFO(p1, &stg_IND_OLDGEN_PERM_info);
359 // We have just created a new closure.
360 LDV_RECORD_CREATE(p1);
361 TICK_UPD_OLD_PERM_IND();
366 #endif /* UPDATES_H */