1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team, 1998-2004
7 * ---------------------------------------------------------------------------*/
12 /* -----------------------------------------------------------------------------
15 We have two layers of update macros. The top layer, UPD_IND() and
16 friends perform all the work of an update. In detail:
18 - if the closure being updated is a blocking queue, then all the
19 threads waiting on the blocking queue are updated.
21 - then the lower level updateWithIndirection() macro is invoked
22 to actually replace the closure with an indirection (see below).
24 -------------------------------------------------------------------------- */
27 # define UPD_IND(updclosure, heapptr) \
28 UPD_REAL_IND(updclosure,INFO_PTR(stg_IND_info),heapptr,SEMI)
29 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
30 UPD_REAL_IND(updclosure,ind_info,heapptr,and_then)
32 /* These macros have to work in both C and C--, so here's the
38 #define DECLARE_IPTR(info) W_ info
39 #define FCALL foreign "C"
40 #define INFO_PTR(info) info
45 #define DECLARE_IPTR(info) const StgInfoTable *(info)
46 #define FCALL /* nothing */
47 #define INFO_PTR(info) &info
48 #define StgBlockingQueue_blocking_queue(closure) \
49 (((StgBlockingQueue *)closure)->blocking_queue)
50 #define ARG_PTR /* nothing */
53 /* krc: there used to be an UPD_REAL_IND and an
54 UPD_PERM_IND, the latter of which was used for
55 ticky and cost-centre profiling.
56 for now, we just have UPD_REAL_IND. */
57 #define UPD_REAL_IND(updclosure, ind_info, heapptr, and_then) \
60 info = GET_INFO(updclosure); \
61 updateWithIndirection(ind_info, \
67 #if defined(RTS_SUPPORTS_THREADS)
69 # define UPD_IND_NOLOCK(updclosure, heapptr) \
71 updateWithIndirection(INFO_PTR(stg_IND_info), \
77 #define UPD_IND_NOLOCK(updclosure,heapptr) UPD_IND(updclosure,heapptr)
78 #endif /* RTS_SUPPORTS_THREADS */
80 /* -----------------------------------------------------------------------------
81 Awaken any threads waiting on a blocking queue (BLACKHOLE_BQ).
82 -------------------------------------------------------------------------- */
87 In a parallel setup several types of closures might have a blocking queue:
88 BLACKHOLE_BQ ... same as in the default concurrent setup; it will be
89 reawakened via calling UPD_IND on that closure after
90 having finished the computation of the graph
91 FETCH_ME_BQ ... a global indirection (FETCH_ME) may be entered by a
92 local TSO, turning it into a FETCH_ME_BQ; it will be
93 reawakened via calling processResume
94 RBH ... a revertible black hole may be entered by another
95 local TSO, putting it onto its blocking queue; since
96 RBHs only exist while the corresponding closure is in
97 transit, they will be reawakened via calling
98 convertToFetchMe (upon processing an ACK message)
100 In a parallel setup a blocking queue may contain 3 types of closures:
101 TSO ... as in the default concurrent setup
102 BLOCKED_FETCH ... indicating that a TSO on another PE is waiting for
103 the result of the current computation
104 CONSTR ... an RBHSave closure (which contains data ripped out of
105 the closure to make room for a blocking queue; since
106 it only contains data we use the exisiting type of
107 a CONSTR closure); this closure is the end of a
108 blocking queue for an RBH closure; it only exists in
109 this kind of blocking queue and must be at the end
112 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
113 #define DO_AWAKEN_BQ(bqe, node) STGCALL2(awakenBlockedQueue, bqe, node);
115 #define AWAKEN_BQ(info,closure) \
116 if (info == &stg_BLACKHOLE_BQ_info || \
117 info == &stg_FETCH_ME_BQ_info || \
118 get_itbl(closure)->type == RBH) { \
119 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
124 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
125 #define DO_AWAKEN_BQ(bq, node) STGCALL2(awakenBlockedQueue, bq, node);
127 /* In GranSim we don't have FETCH_ME or FETCH_ME_BQ closures, so they are
128 not checked. The rest of the code is the same as for GUM.
130 #define AWAKEN_BQ(info,closure) \
131 if (info == &stg_BLACKHOLE_BQ_info || \
132 get_itbl(closure)->type == RBH) { \
133 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
136 #endif /* GRAN || PAR */
139 /* -----------------------------------------------------------------------------
140 Updates: lower-level macros which update a closure with an
141 indirection to another closure.
143 There are several variants of this code.
146 -------------------------------------------------------------------------- */
149 * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
153 * After all, we do *NOT* need to call LDV_RECORD_CREATE() for both IND and
154 * IND_OLDGEN closures because they are inherently used. But, it corrupts
155 * the invariants that every closure keeps its creation time in the profiling
156 * field. So, we call LDV_RECORD_CREATE().
159 /* In the DEBUG case, we also zero out the slop of the old closure,
160 * so that the sanity checker can tell where the next closure is.
162 * Two important invariants: we should never try to update a closure
163 * to point to itself, and the closure being updated should not
164 * already have been updated (the mutable list will get messed up
167 * NB. We do *not* do this in THREADED_RTS mode, because when we have the
168 * possibility of multiple threads entering the same closure, zeroing
169 * the slop in one of the threads would have a disastrous effect on
170 * the other (seen in the wild!).
174 #define FILL_SLOP(p) \
178 inf = %GET_STD_INFO(p); \
179 if (%INFO_TYPE(inf) != HALF_W_(BLACKHOLE) \
180 && %INFO_TYPE(inf) != HALF_W_(CAF_BLACKHOLE)) { \
181 if (%INFO_TYPE(inf) == HALF_W_(THUNK_SELECTOR)) { \
182 sz = BYTES_TO_WDS(SIZEOF_StgSelector_NoThunkHdr); \
184 if (%INFO_TYPE(inf) == HALF_W_(AP_STACK)) { \
185 sz = StgAP_STACK_size(p) + BYTES_TO_WDS(SIZEOF_StgAP_STACK_NoThunkHdr); \
187 if (%INFO_TYPE(inf) == HALF_W_(AP)) { \
188 sz = TO_W_(StgAP_n_args(p)) + BYTES_TO_WDS(SIZEOF_StgAP_NoThunkHdr); \
190 sz = TO_W_(%INFO_PTRS(inf)) + TO_W_(%INFO_NPTRS(inf)); \
197 StgThunk_payload(p,i) = 0; \
203 #else /* !CMINUSMINUS */
206 FILL_SLOP(StgClosure *p)
208 StgInfoTable *inf = get_itbl(p);
215 // we already filled in the slop when we overwrote the thunk
216 // with BLACKHOLE, and also an evacuated BLACKHOLE is only the
219 sz = sizeofW(StgSelector) - sizeofW(StgThunkHeader);
222 sz = ((StgAP *)p)->n_args + sizeofW(StgAP) - sizeofW(StgThunkHeader);
225 sz = ((StgAP_STACK *)p)->size + sizeofW(StgAP_STACK) - sizeofW(StgThunkHeader);
228 sz = inf->layout.payload.ptrs + inf->layout.payload.nptrs;
231 for (i = 0; i < sz; i++) {
232 ((StgThunk *)p)->payload[i] = 0;
238 #endif /* CMINUSMINUS */
240 #if !defined(DEBUG) || defined(THREADED_RTS)
241 #define DEBUG_FILL_SLOP(p) /* do nothing */
243 #define DEBUG_FILL_SLOP(p) FILL_SLOP(p)
246 /* We have two versions of this macro (sadly), one for use in C-- code,
247 * and the other for C.
249 * The and_then argument is a performance hack so that we can paste in
250 * the continuation code directly. It helps shave a couple of
251 * instructions off the common case in the update code, which is
252 * worthwhile (the update code is often part of the inner loop).
253 * (except that gcc now appears to common up this code again and
254 * invert the optimisation. Grrrr --SDM).
257 #define generation(n) (W_[generations] + n*SIZEOF_generation)
258 #define updateWithIndirection(ind_info, p1, p2, and_then) \
261 DEBUG_FILL_SLOP(p1); \
262 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
263 StgInd_indirectee(p1) = p2; \
264 prim %write_barrier() []; \
266 if (bdescr_gen_no(bd) != 0 :: CInt) { \
267 recordMutableCap(p1, TO_W_(bdescr_gen_no(bd)), R1); \
268 SET_INFO(p1, stg_IND_OLDGEN_info); \
269 LDV_RECORD_CREATE(p1); \
270 TICK_UPD_OLD_IND(); \
273 SET_INFO(p1, ind_info); \
274 LDV_RECORD_CREATE(p1); \
275 TICK_UPD_NEW_IND(); \
279 #define updateWithIndirection(ind_info, p1, p2, and_then) \
283 ASSERT( (P_)p1 != (P_)p2 ); \
284 /* not necessarily true: ASSERT( !closure_IND(p1) ); */ \
285 /* occurs in RaiseAsync.c:raiseAsync() */ \
286 DEBUG_FILL_SLOP(p1); \
287 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
288 ((StgInd *)p1)->indirectee = p2; \
290 bd = Bdescr((P_)p1); \
291 if (bd->gen_no != 0) { \
292 recordMutableGenLock(p1, &generations[bd->gen_no]); \
293 SET_INFO(p1, &stg_IND_OLDGEN_info); \
294 TICK_UPD_OLD_IND(); \
297 SET_INFO(p1, ind_info); \
298 LDV_RECORD_CREATE(p1); \
299 TICK_UPD_NEW_IND(); \
303 #endif /* CMINUSMINUS */
304 #endif /* UPDATES_H */