1 \section[SMupdate.h]{Update interface}
3 This interface provides a second level of abstraction from the storage
4 manager hiding all the nasties associated with updates, indirections,
12 %************************************************************************
14 \subsubsection[update-frames]{Pushing Update Frames}
16 %************************************************************************
18 If a closure is to be updated with the result of the computation an
19 update frame must be pushed onto the B stack.
21 A {\em Standard update frame} contains (in order from the top of the
24 \item The return vector.
25 \item The closure to be updated.
26 \item Saved @SuB@ (points to the next update frame).
30 Note: We used to keep the {\em offsets} smashed into one word, but the
31 introduction of strict evaluation meant we could overflow this.
33 [Don't really believe this cost-centre stuff WDP 94/07]
35 If we are keeping track of the current cost centre we have to make the
39 The current cost centre, @CCC@, is added as an additional field to the
40 update frames described above. It is the last field in the frame.
41 When the update is executed the cost centre is restored.
44 A special restore cost centre frame is introduced which does not
45 contain a closure to update, but just a cost centre to restore.
48 The different update frame sizes, @STD_UF_SIZE@, @CON_UF_SIZE@ and
49 optionally @RCC_UF_SIZE@, and the offsets within a frame (@UF_RET@,
50 @UF_UPDATEE@, etc) are declared in \tr{GhcConstants.lh}.
52 We now have the macros to push update frames. They are passed the
53 update @target@ and the A and B stack offsets at which the current top
54 of stack is found. E.g. @SpX + SpX_offset@ points to the top word on
55 the stack. The update frame is pushed directly above this position on
56 the B stack. @SpB + SpB_offset + xxx_UF_SIZE@ gives the topmost word
57 of the update frame, from which we subtract the offsets.
59 ``A picture is worth five thousand bytes.''
61 A stk | |-----------------------|
63 |-----------------------|
65 |-----------------------|
67 |-----------------------|
72 |=======================| (new update frame)
73 | upd code or vector | <- new SuB
74 |-----------------------|
76 |-----------------------|
77 | SuB (grip: SuB_off) | (SuX_off = abs ( new SuX - prev SuX );e.g., 7 for SuB
78 |-----------------------|
79 | SuA (grip: SuA_off) |
80 |-----------------------|
82 |=======================|
83 | | <- SpB now [really (SpB + SpB_offset)]
84 |-----------------------|
86 |-----------------------|
88 |=======================| (prev update frame [e.g.])
89 | upd code or vector | <- prev SuB
90 |-----------------------|
92 |-----------------------|
94 |-----------------------|
96 |-----------------------|
98 |=======================|
100 ^ |-----------------------|
105 I_ SqueezeUpdateFrames PROTO((P_, P_, P_));
107 EXTDATA_RO(vtbl_StdUpdFrame);
108 EXTFUN(StdUpdFrameDirectReturn);
110 EXTFUN(StdErrorCode); /* Where should this go? */
120 EXTFUN(IndUpdRetDir);
123 Note that UNVEC() is used to select whole statements (declarations) as
124 well as labels. Please don't put parentheses around the expansion.
127 #ifdef __STG_REV_TBLS__
128 #define RVREL(offset) (-(offset)-1)
129 #define DIRECT(target) (target)
130 #define UNVEC(direct,vector) direct
132 #define RVREL(offset) (offset)
133 #define DIRECT(target) (*(target))
134 #define UNVEC(direct,vector) vector
138 /* for stack chunking */
139 extern const W_ vtbl_Underflow[];
140 EXTFUN(UnderflowDirectReturn);
141 EXTFUN(UnderflowVect0);
142 EXTFUN(UnderflowVect1);
143 EXTFUN(UnderflowVect2);
144 EXTFUN(UnderflowVect3);
145 EXTFUN(UnderflowVect4);
146 EXTFUN(UnderflowVect5);
147 EXTFUN(UnderflowVect6);
148 EXTFUN(UnderflowVect7);
149 EXTFUN(StackUnderflowEnterNode);
150 EXTFUN(CommonUnderflow);
151 EXTFUN(PrimUnderflow);
152 #endif /* CONCURRENT */
154 /* Now, we always use pointers in update frame, even in the threaded world */
156 #define PUSH_RET(frame, rv) (frame)[BREL(UF_RET)] = (W_)(rv)
157 #define PUSH_UPDATEE(frame, updatee) (frame)[BREL(UF_UPDATEE)] = (W_)(updatee)
158 #define PUSH_SuB(frame, sub) (frame)[BREL(UF_SUB)] = (W_)(sub)
159 #define PUSH_SuA(frame, sua) (frame)[BREL(UF_SUA)] = (W_)(sua)
161 #if defined(USE_COST_CENTRES)
162 #define PUSH_STD_CCC(frame) (frame)[BREL(UF_COST_CENTRE)] = (W_)(CCC)
164 #define PUSH_STD_CCC(frame)
167 /* When GRABing, "frame" pts to an update frame */
169 #define GRAB_RET(frame) ((void *)((frame)[BREL(UF_RET)]))
170 #define GRAB_SuB(frame) ((P_)((frame)[BREL(UF_SUB)]))
171 #define GRAB_SuA(frame) ((PP_)((frame)[BREL(UF_SUA)]))
172 #define GRAB_UPDATEE(frame) ((P_)((frame)[BREL(UF_UPDATEE)]))
173 #define GRAB_COST_CENTRE(frame) ((CostCentre)((frame)[BREL(UF_COST_CENTRE)]))
175 #define PUSH_STD_UPD_FRAME(target, SpA_offset, SpB_offset) \
178 UPDF_STD_PUSHED(); /* ticky-ticky, spat */ \
179 __frame = SpB - BREL(SpB_offset + STD_UF_SIZE); \
180 PUSH_RET(__frame, RetReg); \
181 PUSH_SuB(__frame, SuB); \
182 PUSH_SuA(__frame, SuA); \
183 PUSH_UPDATEE(__frame, target); \
184 PUSH_STD_CCC(__frame); \
186 SuA = SpA - AREL(SpA_offset); \
189 #define POP_STD_UPD_FRAME() \
191 RetReg = GRAB_RET(SpB); \
192 SuB = GRAB_SuB(SpB); \
193 SuA = GRAB_SuA(SpB); \
194 SpB += BREL(STD_UF_SIZE); \
200 %************************************************************************
202 \subsubsection[black-hole-overwrite]{Overwriting with Black Holes}
204 %************************************************************************
206 An updatable closure may be overwritten with a black hole so that
207 the free variables in the closure being evaluated are not kept alive.
208 This may be done on entering the closure or later by the garbage
212 EXTDATA_RO(BH_UPD_info);
213 EXTFUN(BH_UPD_entry);
214 EXTDATA_RO(BH_SINGLE_info);
215 EXTFUN(BH_SINGLE_entry);
217 #define UPD_BH(heapptr,infolbl) INFO_PTR(heapptr) = (W_) infolbl
220 The following macros are actually planted by the code generator. They
221 allow us to delay the decision about if/when we black hole. It should
222 be noted that single entry closures do not have update frames which
223 can be traced by the garbage collector. It is only possibly to
224 overwrite with a black hole on entry.
226 In the sequential system, updatable closures are not black-holed until GC.
227 When GC occurs, the only active updatable closures are those with update
228 frames on the stack, so the GC routine can walk the stack frames to find
229 the updatable closures to black hole (primarily for plugging space leaks).
230 This approach saves the overhead of black-holing every updatable closure on
233 In the parallel system, however, it is essential that updatable closures
234 be black-holed immediately on entry, so that other local threads will
235 block when attempting to enter a closure already under evaluation.
238 #if defined(CONCURRENT)
239 #define UPD_BH_UPDATABLE(heapptr) UPD_BH(heapptr,BH_UPD_info)
241 #define UPD_BH_UPDATABLE(heapptr) /* nothing -- BHed by GC */
244 #define UPD_BH_SINGLE_ENTRY(heapptr) UPD_BH(heapptr,BH_SINGLE_info)
245 /* BHed on entry -- GC cant do it */
248 Finally we indicate to the storage manager if it is required to trace
249 closures on the B stack and overwrite them with black holes.
252 /* define SM_DO_BH_UPDATE if B stack closures to be BHed by GC */
253 #if !defined(CONCURRENT)
254 #define SM_DO_BH_UPDATE
259 %************************************************************************
261 \subsubsection[caf-update]{Entering CAFs}
263 %************************************************************************
265 When we enter a CAF we update it with an indirection to a heap
266 allocated black hole. The @UPD_CAF@ macro updates the CAF with an
267 @CAF@ indirection to the heap allocated closure and adds the updated
268 CAF to the list of CAFs. It is up to the entry code to allocate the
271 The @CAF@ info table used is the @Caf_Return@ table. It will be
272 overwritten at the start of garbage collection with the @Caf_Evac_Upd@
273 and then reset to @Caf_Return@ during garbage collection.
275 In the parallel case, the new black hole will be a local node
276 (with a GA of 0). This means that the code to update indirections
277 does not need to check whether it's updating a CAF: the situation
278 simply never arises! If you change how this code works (e.g. to
279 update CAFs across the parallel machine), you should check @UPD_IND@
284 EXTDATA_RO(Caf_info);
287 #define UPD_CAF(cafptr, bhptr) \
289 SET_INFO_PTR(cafptr, Caf_info); \
290 IND_CLOSURE_PTR(cafptr) = (W_) (bhptr); \
291 IND_CLOSURE_LINK(cafptr) = (W_) StorageMgrInfo.CAFlist; \
292 StorageMgrInfo.CAFlist = (P_) (cafptr); \
298 %************************************************************************
300 \subsection[updating-closures]{Updating Closures}
302 %************************************************************************
304 We provide three macros:
307 \item[@UPD_IND(updclosure, heapptr)@]\ \\
308 Overwrites the updatable closure @updclosure@ with an indirection to
311 \item[@UPD_INPLACE_NOPTRS(updclosure, livemask)@]\ \\
312 This prepares the closure pointed to by @updclosure@ to be updated in-place
313 with a closure of size @MIN_UPD_SIZE@ containing no pointers.
315 \item[@UPD_INPLACE_PTRS(updclosure, livemask)@]\ \\
316 This prepares the closure pointed to by @updclosure@ to be updated in-place
317 with a closure of size @MIN_UPD_SIZE@ which may contain pointers. It checks
318 whether @updclosure@ is allowed to be updated inplace. If it is not
321 \item Allocates space for a new closure of size @MIN_UPD_SIZE@ (by
322 calling @HEAP_CHK_RETRY@);
323 \item Overwrites @updclosure@ with an indirection to this new closure;
324 \item Modifies @updclosure@ to point to the newly-allocated closure.
327 All the macros ensure that @updclosure@ points to a closure of size
328 @MIN_UPD_SIZE@ ready to be filled in with the result of the update.
330 The code following @UPDATE_INPLACE@ is responsible for filling it in.
331 This requires the header to be set, with @INPLACE_UPD_HDR@, and the
332 body to be filled out.
335 The @UPD_IND@ and @UPDATE_INPLACE@ macros may have different
336 definitions depending on the garbage collection schemes in use.
338 First we have the declarations which trace updates. These are calls to
339 tracing routines inserted if @DO_RUNTIME_TRACE_UPDATES@ is defined and
340 printed if @traceUpdates@ is true.
343 #if defined(DO_RUNTIME_TRACE_UPDATES)
345 extern I_ traceUpdates;
346 extern void TRACE_UPDATE_Ind();
347 extern void TRACE_UPDATE_Inplace_NoPtrs();
348 extern void TRACE_UPDATE_Inplace_Ptrs();
350 #define TRACE_UPDATE(_trace) _trace
352 #define TRACE_UPDATE(_trace) /* nothing */
356 Before describing the update macros we declare the partial application
357 entry and update code (See \tr{StgUpdate.lhc}).
360 EXTDATA_RO(PAP_info);
365 %************************************************************************
367 \subsubsection[updates-standard]{Implementation of Standard Updates}
369 %************************************************************************
374 #define ALREADY_LINKED(closure) \
375 (IS_MUTABLE(INFO_PTR(closure)) && MUT_LINK(closure) != MUT_NOT_LINKED)
378 extern I_ AwakenBlockingQueue PROTO((P_));
380 extern void AwakenBlockingQueue PROTO((P_));
384 #define AWAKEN_BQ(updatee) \
385 do { if (IS_BQ_CLOSURE(updatee)) \
386 STGCALL1(void,(void *, P_), AwakenBlockingQueue, (P_) BQ_ENTRIES(updatee)); \
391 #define AWAKEN_BQ(updatee) \
392 do { if (IS_BQ_CLOSURE(updatee)) \
393 AwakenBlockingQueue((P_)BQ_ENTRIES(updatee)); \
397 #define AWAKEN_INPLACE_BQ()
401 #define ALREADY_LINKED(closure) 0
403 #define AWAKEN_BQ(updatee)
404 #define AWAKEN_INPLACE_BQ()
408 EXTDATA_RO(Ind_info);
411 #if defined(GC2s) || defined(GC1s) || defined(GCdu)
413 #define UPD_IND(updclosure, heapptr) \
414 TRACE_UPDATE(TRACE_UPDATE_Ind(updclosure,heapptr)); \
415 UPDATED_SET_UPDATED(updclosure); /* subs entry count */ \
416 UPDATE_PROFILE_CLOSURE((P_)updclosure); \
417 AWAKEN_BQ(updclosure); \
418 SET_INFO_PTR(updclosure, Ind_info); \
419 IND_CLOSURE_PTR(updclosure) = (W_)(heapptr)
421 #define UPD_INPLACE_NOPTRS(livemask) \
422 TRACE_UPDATE(TRACE_UPDATE_Inplace_NoPtrs(Node)); \
423 UPDATED_SET_UPDATED(Node); /* subs entry count */ \
424 UPDATE_PROFILE_CLOSURE(Node); \
427 #define UPD_INPLACE_PTRS(livemask) \
428 TRACE_UPDATE(TRACE_UPDATE_Inplace_Ptrs(Node,hp)); \
429 UPDATED_SET_UPDATED(Node); /* subs entry count */ \
430 UPDATE_PROFILE_CLOSURE(Node); \
433 #define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
434 UPD_FIXED_HDR(closure,infolbl,cc)
437 %************************************************************************
439 \subsubsection[updates-appel]{Implementation of Appel's Updates}
441 %************************************************************************
443 Appel's updates require the identification of old generation closures
444 which are updated. They must be updated with an indirection and linked
445 onto the list of old generation closures.
449 #if defined(GCap) || defined(GCgn)
451 #define UPD_IND(updclosure, heapptr) \
452 { TRACE_UPDATE(TRACE_UPDATE_Ind(updclosure,heapptr)); \
453 if ( ((P_)(updclosure)) <= StorageMgrInfo.OldLim) { \
455 if(!ALREADY_LINKED(updclosure)) { \
456 MUT_LINK(updclosure) \
457 = (W_) StorageMgrInfo.OldMutables; \
458 StorageMgrInfo.OldMutables = (P_) (updclosure); \
463 AWAKEN_BQ(updclosure); \
464 SET_INFO_PTR(updclosure, Ind_info); \
465 IND_CLOSURE_PTR(updclosure) = (W_)(heapptr); \
469 * In threaded-land, we have to do the same nonsense as UPD_INPLACE_PTRS if
470 * we were a blocking queue on the old mutables list.
472 #define UPD_INPLACE_NOPTRS(live_regs_mask) \
473 TRACE_UPDATE(TRACE_UPDATE_Inplace_NoPtrs(Node)); \
474 if ( Node <= StorageMgrInfo.OldLim) { \
475 UPD_OLD_IN_PLACE_NOPTRS(); \
476 if(ALREADY_LINKED(Node)) { \
477 /* We are already on the old mutables list, so we \
478 can't update in place any more */ \
479 HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
480 /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
481 ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
482 CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
483 /* must awaken after any possible GC */ \
485 SET_INFO_PTR(Node, Ind_info); \
486 IND_CLOSURE_PTR(Node) = \
487 (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
488 Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
491 UPD_NEW_IN_PLACE_NOPTRS(); \
495 #define UPD_INPLACE_PTRS(live_regs_mask) \
496 TRACE_UPDATE(TRACE_UPDATE_Inplace_Ptrs(Node,hp)); \
497 if ( Node <= StorageMgrInfo.OldLim) { \
498 /* redirect update with indirection */ \
499 UPD_OLD_IN_PLACE_PTRS(); \
501 HEAP_CHK(live_regs_mask, _FHS+MIN_UPD_SIZE, 0); \
502 /* ticky-ticky (NB: was ALLOC_UPD_CON) */ \
503 ALLOC_CON(_FHS,1,MIN_UPD_SIZE-1,_FHS+MIN_UPD_SIZE); \
504 CC_ALLOC(CCC,_FHS+MIN_UPD_SIZE,CON_K); \
506 if (!ALREADY_LINKED(Node)) { \
508 = (W_) StorageMgrInfo.OldMutables; \
509 StorageMgrInfo.OldMutables = (P_) (Node); \
511 /* must awaken after any possible GC */ \
513 SET_INFO_PTR(Node, Ind_info); \
514 IND_CLOSURE_PTR(Node) \
515 = (W_)(Hp-(_FHS+MIN_UPD_SIZE-1)); \
516 Node = Hp-(_FHS+MIN_UPD_SIZE-1); \
518 UPD_NEW_IN_PLACE_PTRS(); \
524 #define INPLACE_UPD_HDR(closure,infolbl,cc,size,ptrs) \
525 UPD_FIXED_HDR(closure,infolbl,cc)
527 #endif /* GCap || GCgn */
531 %************************************************************************
533 \subsection[freezing-arrays]{Changing Mutable Pointer closures into Immutable Closures}
535 %************************************************************************
537 When freezing an array of pointers we change the info table to
538 indicate it is now immutable to the garbage collector. The array will
539 be removed from the old generation mutable array list by the garbage\
542 This is only required for generational garbage collectors but we always
543 do it so better profiling information is provided.
546 #ifdef GC_MUT_REQUIRED
547 #define FREEZE_MUT_HDR(freezeclosure,immutinfo) \
548 SET_INFO_PTR(freezeclosure, immutinfo)
550 #define FREEZE_MUT_HDR(freezeclosure,immutinfo) \
551 SET_INFO_PTR(freezeclosure, immutinfo)
555 #endif /* SMUPDATE_H */