1 /* -----------------------------------------------------------------------------
2 * $Id: Updates.h,v 1.32 2003/07/18 14:39:05 simonmar Exp $
4 * (c) The GHC Team, 1998-1999
6 * Definitions related to updates.
8 * ---------------------------------------------------------------------------*/
13 /* -----------------------------------------------------------------------------
14 Update a closure with an indirection. This may also involve waking
15 up a queue of blocked threads waiting on the result of this
17 -------------------------------------------------------------------------- */
19 /* ToDo: overwrite slop words with something safe in case sanity checking
21 * (I think the fancy version of the GC is supposed to do this too.)
24 /* This expands to a fair chunk of code, what with waking up threads
25 * and checking whether we're updating something in a old generation.
26 * preferably don't use this macro inline in compiled code.
30 # define UPD_IND(updclosure, heapptr) \
31 UPD_PERM_IND(updclosure,heapptr)
32 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
33 UPD_PERM_IND(updclosure,heapptr); and_then
35 # define UPD_IND(updclosure, heapptr) \
36 UPD_REAL_IND(updclosure,&stg_IND_info,heapptr,)
37 # define UPD_SPEC_IND(updclosure, ind_info, heapptr, and_then) \
38 UPD_REAL_IND(updclosure,ind_info,heapptr,and_then)
41 /* UPD_IND actually does a PERM_IND if TICKY_TICKY is on;
42 if you *really* need an IND use UPD_REAL_IND
45 #define UPD_REAL_IND(updclosure, ind_info, heapptr, and_then) \
47 const StgInfoTable *info; \
48 if (Bdescr((P_)updclosure)->u.back != (bdescr *)BaseReg) { \
49 info = LOCK_CLOSURE(updclosure); \
51 info = updclosure->header.info; \
53 AWAKEN_BQ(info,updclosure); \
54 updateWithIndirection(info, ind_info, \
55 (StgClosure *)updclosure, \
56 (StgClosure *)heapptr, \
60 #define UPD_REAL_IND(updclosure, ind_info, heapptr, and_then) \
62 const StgInfoTable *info; \
63 info = ((StgClosure *)updclosure)->header.info; \
64 AWAKEN_BQ(info,updclosure); \
65 updateWithIndirection(((StgClosure *)updclosure)->header.info, ind_info, \
66 (StgClosure *)updclosure, \
67 (StgClosure *)heapptr, \
72 #define UPD_STATIC_IND(updclosure, heapptr) \
74 const StgInfoTable *info; \
75 info = ((StgClosure *)updclosure)->header.info; \
76 AWAKEN_STATIC_BQ(info,updclosure); \
77 updateWithStaticIndirection(info, \
78 (StgClosure *)updclosure, \
79 (StgClosure *)heapptr); \
82 #if defined(PROFILING) || defined(TICKY_TICKY)
83 #define UPD_PERM_IND(updclosure, heapptr) \
85 const StgInfoTable *info; \
86 info = ((StgClosure *)updclosure)->header.info; \
87 AWAKEN_BQ(info,updclosure); \
88 updateWithPermIndirection(info, \
89 (StgClosure *)updclosure, \
90 (StgClosure *)heapptr); \
95 #define UPD_IND_NOLOCK(updclosure, heapptr) \
97 const StgInfoTable *info; \
98 info = updclosure->header.info; \
99 AWAKEN_BQ(info,updclosure); \
100 updateWithIndirection(info,&stg_IND_info, \
101 (StgClosure *)updclosure, \
102 (StgClosure *)heapptr,); \
104 #elif defined(RTS_SUPPORTS_THREADS)
107 # define UPD_IND_NOLOCK(updclosure, heapptr) \
109 const StgInfoTable *info; \
110 info = ((StgClosure *)updclosure)->header.info; \
111 AWAKEN_BQ_NOLOCK(info,updclosure); \
112 updateWithPermIndirection(info, \
113 (StgClosure *)updclosure, \
114 (StgClosure *)heapptr); \
117 # define UPD_IND_NOLOCK(updclosure, heapptr) \
119 const StgInfoTable *info; \
120 info = ((StgClosure *)updclosure)->header.info; \
121 AWAKEN_BQ_NOLOCK(info,updclosure); \
122 updateWithIndirection(info,&stg_IND_info, \
123 (StgClosure *)updclosure, \
124 (StgClosure *)heapptr,); \
129 #define UPD_IND_NOLOCK(updclosure,heapptr) UPD_IND(updclosure,heapptr)
132 /* -----------------------------------------------------------------------------
133 Awaken any threads waiting on this computation
134 -------------------------------------------------------------------------- */
136 #define AWAKEN_BQ_CLOSURE(closure) \
138 const StgInfoTable *info; \
139 info = ((StgClosure *)closure)->header.info; \
140 AWAKEN_BQ(info,closure); \
146 In a parallel setup several types of closures might have a blocking queue:
147 BLACKHOLE_BQ ... same as in the default concurrent setup; it will be
148 reawakened via calling UPD_IND on that closure after
149 having finished the computation of the graph
150 FETCH_ME_BQ ... a global indirection (FETCH_ME) may be entered by a
151 local TSO, turning it into a FETCH_ME_BQ; it will be
152 reawakened via calling processResume
153 RBH ... a revertible black hole may be entered by another
154 local TSO, putting it onto its blocking queue; since
155 RBHs only exist while the corresponding closure is in
156 transit, they will be reawakened via calling
157 convertToFetchMe (upon processing an ACK message)
159 In a parallel setup a blocking queue may contain 3 types of closures:
160 TSO ... as in the default concurrent setup
161 BLOCKED_FETCH ... indicating that a TSO on another PE is waiting for
162 the result of the current computation
163 CONSTR ... an RBHSave closure (which contains data ripped out of
164 the closure to make room for a blocking queue; since
165 it only contains data we use the exisiting type of
166 a CONSTR closure); this closure is the end of a
167 blocking queue for an RBH closure; it only exists in
168 this kind of blocking queue and must be at the end
171 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
172 #define DO_AWAKEN_BQ(bqe, node) STGCALL2(awakenBlockedQueue, bqe, node);
174 #define AWAKEN_BQ(info,closure) \
175 if (info == &stg_BLACKHOLE_BQ_info || \
176 info == &stg_FETCH_ME_BQ_info || \
177 get_itbl(closure)->type == RBH) { \
178 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
183 extern void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
184 #define DO_AWAKEN_BQ(bq, node) STGCALL2(awakenBlockedQueue, bq, node);
186 /* In GranSim we don't have FETCH_ME or FETCH_ME_BQ closures, so they are
187 not checked. The rest of the code is the same as for GUM.
189 #define AWAKEN_BQ(info,closure) \
190 if (info == &stg_BLACKHOLE_BQ_info || \
191 get_itbl(closure)->type == RBH) { \
192 DO_AWAKEN_BQ(((StgBlockingQueue *)closure)->blocking_queue, closure); \
196 #else /* !GRAN && !PAR */
198 extern void awakenBlockedQueue(StgTSO *q);
199 #define DO_AWAKEN_BQ(closure) \
200 STGCALL1(awakenBlockedQueue, \
201 ((StgBlockingQueue *)closure)->blocking_queue);
203 #define AWAKEN_BQ(info,closure) \
204 if (info == &stg_BLACKHOLE_BQ_info) { \
205 DO_AWAKEN_BQ(closure); \
208 #define AWAKEN_STATIC_BQ(info,closure) \
209 if (info == &stg_BLACKHOLE_BQ_STATIC_info) { \
210 DO_AWAKEN_BQ(closure); \
213 #ifdef RTS_SUPPORTS_THREADS
214 extern void awakenBlockedQueueNoLock(StgTSO *q);
215 #define DO_AWAKEN_BQ_NOLOCK(closure) \
216 STGCALL1(awakenBlockedQueueNoLock, \
217 ((StgBlockingQueue *)closure)->blocking_queue);
219 #define AWAKEN_BQ_NOLOCK(info,closure) \
220 if (info == &stg_BLACKHOLE_BQ_info) { \
221 DO_AWAKEN_BQ_NOLOCK(closure); \
224 #endif /* GRAN || PAR */
226 /* -------------------------------------------------------------------------
227 Push an update frame on the stack.
228 ------------------------------------------------------------------------- */
230 #if defined(PROFILING)
231 // frame->header.prof.hp.rs = NULL (or frame-header.prof.hp.ldvw = 0) is unnecessary
232 // because it is not used anyhow.
233 #define PUSH_STD_CCCS(frame) (frame->header.prof.ccs = CCCS)
235 #define PUSH_STD_CCCS(frame)
238 extern DLL_IMPORT_RTS const StgPolyInfoTable stg_upd_frame_info;
239 extern DLL_IMPORT_RTS const StgPolyInfoTable stg_noupd_frame_info;
241 #define PUSH_UPD_FRAME(target, Sp_offset) \
243 StgUpdateFrame *__frame; \
244 TICK_UPDF_PUSHED(target, GET_INFO((StgClosure*)target)); \
245 __frame = (StgUpdateFrame *)(Sp + (Sp_offset)) - 1; \
246 SET_INFO(__frame, (StgInfoTable *)&stg_upd_frame_info); \
247 __frame->updatee = (StgClosure *)(target); \
248 PUSH_STD_CCCS(__frame); \
251 /* -----------------------------------------------------------------------------
254 When a CAF is first entered, it creates a black hole in the heap,
255 and updates itself with an indirection to this new black hole.
257 We update the CAF with an indirection to a newly-allocated black
258 hole in the heap. We also set the blocking queue on the newly
259 allocated black hole to be empty.
261 Why do we make a black hole in the heap when we enter a CAF?
263 - for a generational garbage collector, which needs a fast
264 test for whether an updatee is in an old generation or not
266 - for the parallel system, which can implement updates more
267 easily if the updatee is always in the heap. (allegedly).
269 When debugging, we maintain a separate CAF list so we can tell when
270 a CAF has been garbage collected.
271 -------------------------------------------------------------------------- */
273 /* ToDo: only call newCAF when debugging. */
275 extern void newCAF(StgClosure*);
277 /* newCAF must be called before the itbl ptr is overwritten, since
278 newCAF records the old itbl ptr in order to do CAF reverting
279 (which Hugs needs to do in order that combined mode works right.)
281 #define UPD_CAF(cafptr, bhptr) \
283 LOCK_CLOSURE(cafptr); \
284 STGCALL1(newCAF,(StgClosure *)cafptr); \
285 ((StgInd *)cafptr)->indirectee = (StgClosure *)(bhptr); \
286 SET_INFO((StgInd *)cafptr,(const StgInfoTable*)&stg_IND_STATIC_info);\
289 /* -----------------------------------------------------------------------------
290 Update-related prototypes
291 -------------------------------------------------------------------------- */
293 #endif /* UPDATES_H */