1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: evacuation functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
25 #include "LdvProfile.h"
27 #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
28 StgWord64 whitehole_spin = 0;
31 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
32 #define evacuate(p) evacuate1(p)
33 #define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
36 #if !defined(PARALLEL_GC)
37 #define copy_tag_nolock(p, info, src, size, stp, tag) \
38 copy_tag(p, info, src, size, stp, tag)
41 /* Used to avoid long recursion due to selector thunks
43 #define MAX_THUNK_SELECTOR_DEPTH 16
45 static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
46 STATIC_INLINE void evacuate_large(StgPtr p);
48 /* -----------------------------------------------------------------------------
49 Allocate some space in which to copy an object.
50 -------------------------------------------------------------------------- */
53 alloc_for_copy (nat size, step *stp)
58 /* Find out where we're going, using the handy "to" pointer in
59 * the step of the source object. If it turns out we need to
60 * evacuate to an older generation, adjust it here (see comment
63 if (stp < gct->evac_step) {
64 if (gct->eager_promotion) {
67 gct->failed_to_evac = rtsTrue;
71 ws = &gct->steps[stp->abs_no];
72 // this compiles to a single mem access to stp->abs_no only
74 /* chain a new block onto the to-space for the destination step if
78 ws->todo_free += size;
79 if (ws->todo_free > ws->todo_lim) {
80 to = todo_block_full(size, ws);
82 ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
87 /* -----------------------------------------------------------------------------
89 -------------------------------------------------------------------------- */
91 STATIC_INLINE GNUC_ATTR_HOT void
92 copy_tag(StgClosure **p, const StgInfoTable *info,
93 StgClosure *src, nat size, step *stp, StgWord tag)
98 to = alloc_for_copy(size,stp);
102 for (i = 1; i < size; i++) { // unroll for small i
106 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
107 // __builtin_prefetch(to + size + 2, 1);
110 #if defined(PARALLEL_GC)
112 const StgInfoTable *new_info;
113 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
114 if (new_info != info) {
115 return evacuate(p); // does the failed_to_evac stuff
117 *p = TAG_CLOSURE(tag,(StgClosure*)to);
121 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
122 *p = TAG_CLOSURE(tag,(StgClosure*)to);
126 // We store the size of the just evacuated object in the LDV word so that
127 // the profiler can guess the position of the next object later.
128 SET_EVACUAEE_FOR_LDV(from, size);
132 #if defined(PARALLEL_GC)
134 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
135 StgClosure *src, nat size, step *stp, StgWord tag)
140 to = alloc_for_copy(size,stp);
141 *p = TAG_CLOSURE(tag,(StgClosure*)to);
142 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
146 for (i = 1; i < size; i++) { // unroll for small i
150 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
151 // __builtin_prefetch(to + size + 2, 1);
155 // We store the size of the just evacuated object in the LDV word so that
156 // the profiler can guess the position of the next object later.
157 SET_EVACUAEE_FOR_LDV(from, size);
162 /* Special version of copy() for when we only want to copy the info
163 * pointer of an object, but reserve some padding after it. This is
164 * used to optimise evacuation of BLACKHOLEs.
167 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
173 #if defined(PARALLEL_GC)
175 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
176 if (info == (W_)&stg_WHITEHOLE_info) {
182 if (IS_FORWARDING_PTR(info)) {
183 src->header.info = (const StgInfoTable *)info;
184 evacuate(p); // does the failed_to_evac stuff
188 info = (W_)src->header.info;
191 to = alloc_for_copy(size_to_reserve, stp);
192 *p = (StgClosure *)to;
196 for (i = 1; i < size_to_copy; i++) { // unroll for small i
200 #if defined(PARALLEL_GC)
203 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
206 // We store the size of the just evacuated object in the LDV word so that
207 // the profiler can guess the position of the next object later.
208 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
210 if (size_to_reserve - size_to_copy > 0)
211 LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
218 /* Copy wrappers that don't tag the closure after copying */
219 STATIC_INLINE GNUC_ATTR_HOT void
220 copy(StgClosure **p, const StgInfoTable *info,
221 StgClosure *src, nat size, step *stp)
223 copy_tag(p,info,src,size,stp,0);
226 /* -----------------------------------------------------------------------------
227 Evacuate a large object
229 This just consists of removing the object from the (doubly-linked)
230 step->large_objects list, and linking it on to the (singly-linked)
231 step->new_large_objects list, from where it will be scavenged later.
233 Convention: bd->flags has BF_EVACUATED set for a large object
234 that has been evacuated, or unset otherwise.
235 -------------------------------------------------------------------------- */
238 evacuate_large(StgPtr p)
240 bdescr *bd = Bdescr(p);
245 ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
247 // already evacuated?
248 if (bd->flags & BF_EVACUATED) {
249 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
250 * the desired destination (see comments in evacuate()).
252 if (stp < gct->evac_step) {
253 gct->failed_to_evac = rtsTrue;
254 TICK_GC_FAILED_PROMOTION();
256 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
260 // remove from large_object list
262 bd->u.back->link = bd->link;
263 } else { // first object in the list
264 stp->large_objects = bd->link;
267 bd->link->u.back = bd->u.back;
270 /* link it on to the evacuated large object list of the destination step
273 if (new_stp < gct->evac_step) {
274 if (gct->eager_promotion) {
275 new_stp = gct->evac_step;
277 gct->failed_to_evac = rtsTrue;
281 ws = &gct->steps[new_stp->abs_no];
283 bd->flags |= BF_EVACUATED;
285 bd->gen_no = new_stp->gen_no;
287 // If this is a block of pinned objects, we don't have to scan
288 // these objects, because they aren't allowed to contain any
289 // pointers. For these blocks, we skip the scavenge stage and put
290 // them straight on the scavenged_large_objects list.
291 if (bd->flags & BF_PINNED) {
292 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
293 if (new_stp != stp) { ACQUIRE_SPIN_LOCK(&new_stp->sync_large_objects); }
294 dbl_link_onto(bd, &new_stp->scavenged_large_objects);
295 new_stp->n_scavenged_large_blocks += bd->blocks;
296 if (new_stp != stp) { RELEASE_SPIN_LOCK(&new_stp->sync_large_objects); }
298 bd->link = ws->todo_large_objects;
299 ws->todo_large_objects = bd;
302 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
305 /* ----------------------------------------------------------------------------
308 This is called (eventually) for every live object in the system.
310 The caller to evacuate specifies a desired generation in the
311 gct->evac_step thread-local variable. The following conditions apply to
312 evacuating an object which resides in generation M when we're
313 collecting up to generation N
315 if M >= gct->evac_step
317 else evac to step->to
319 if M < gct->evac_step evac to gct->evac_step, step 0
321 if the object is already evacuated, then we check which generation
324 if M >= gct->evac_step do nothing
325 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
326 didn't manage to evacuate this object into gct->evac_step.
331 evacuate() is the single most important function performance-wise
332 in the GC. Various things have been tried to speed it up, but as
333 far as I can tell the code generated by gcc 3.2 with -O2 is about
334 as good as it's going to get. We pass the argument to evacuate()
335 in a register using the 'regparm' attribute (see the prototype for
336 evacuate() near the top of this file).
338 Changing evacuate() to take an (StgClosure **) rather than
339 returning the new pointer seems attractive, because we can avoid
340 writing back the pointer when it hasn't changed (eg. for a static
341 object, or an object in a generation > N). However, I tried it and
342 it doesn't help. One reason is that the (StgClosure **) pointer
343 gets spilled to the stack inside evacuate(), resulting in far more
344 extra reads/writes than we save.
345 ------------------------------------------------------------------------- */
347 REGPARM1 GNUC_ATTR_HOT void
348 evacuate(StgClosure **p)
353 const StgInfoTable *info;
359 /* The tag and the pointer are split, to be merged after evacing */
360 tag = GET_CLOSURE_TAG(q);
361 q = UNTAG_CLOSURE(q);
363 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
365 if (!HEAP_ALLOCED_GC(q)) {
367 if (!major_gc) return;
370 switch (info->type) {
373 if (info->srt_bitmap != 0) {
374 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
376 *THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
377 gct->static_objects = (StgClosure *)q;
380 link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
382 (StgWord)gct->static_objects);
384 gct->static_objects = (StgClosure *)q;
392 if (info->srt_bitmap != 0 &&
393 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
395 *FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
396 gct->static_objects = (StgClosure *)q;
399 link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
401 (StgWord)gct->static_objects);
403 gct->static_objects = (StgClosure *)q;
410 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
411 * on the CAF list, so don't do anything with it here (we'll
412 * scavenge it later).
414 if (((StgIndStatic *)q)->saved_info == NULL) {
415 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
417 *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
418 gct->static_objects = (StgClosure *)q;
421 link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
423 (StgWord)gct->static_objects);
425 gct->static_objects = (StgClosure *)q;
433 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
435 *STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
436 gct->static_objects = (StgClosure *)q;
439 link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
441 (StgWord)gct->static_objects);
443 gct->static_objects = (StgClosure *)q;
447 /* I am assuming that static_objects pointers are not
448 * written to other objects, and thus, no need to retag. */
451 case CONSTR_NOCAF_STATIC:
452 /* no need to put these on the static linked list, they don't need
458 barf("evacuate(static): strange closure type %d", (int)(info->type));
464 if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
466 // pointer into to-space: just return it. It might be a pointer
467 // into a generation that we aren't collecting (> N), or it
468 // might just be a pointer into to-space. The latter doesn't
469 // happen often, but allowing it makes certain things a bit
470 // easier; e.g. scavenging an object is idempotent, so it's OK to
471 // have an object on the mutable list multiple times.
472 if (bd->flags & BF_EVACUATED) {
473 // We aren't copying this object, so we have to check
474 // whether it is already in the target generation. (this is
475 // the write barrier).
476 if (bd->step < gct->evac_step) {
477 gct->failed_to_evac = rtsTrue;
478 TICK_GC_FAILED_PROMOTION();
483 /* evacuate large objects by re-linking them onto a different list.
485 if (bd->flags & BF_LARGE) {
487 if (info->type == TSO &&
488 ((StgTSO *)q)->what_next == ThreadRelocated) {
489 q = (StgClosure *)((StgTSO *)q)->_link;
493 evacuate_large((P_)q);
497 /* If the object is in a step that we're compacting, then we
498 * need to use an alternative evacuate procedure.
500 if (!is_marked((P_)q,bd)) {
502 if (mark_stack_full()) {
503 debugTrace(DEBUG_gc,"mark stack overflowed");
504 mark_stack_overflowed = rtsTrue;
507 push_mark_stack((P_)q);
514 info = q->header.info;
515 if (IS_FORWARDING_PTR(info))
517 /* Already evacuated, just return the forwarding address.
518 * HOWEVER: if the requested destination generation (gct->evac_step) is
519 * older than the actual generation (because the object was
520 * already evacuated to a younger generation) then we have to
521 * set the gct->failed_to_evac flag to indicate that we couldn't
522 * manage to promote the object to the desired generation.
525 * Optimisation: the check is fairly expensive, but we can often
526 * shortcut it if either the required generation is 0, or the
527 * current object (the EVACUATED) is in a high enough generation.
528 * We know that an EVACUATED always points to an object in the
529 * same or an older generation. stp is the lowest step that the
530 * current object would be evacuated to, so we only do the full
531 * check if stp is too low.
533 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
534 *p = TAG_CLOSURE(tag,e);
535 if (stp < gct->evac_step) { // optimisation
536 if (Bdescr((P_)e)->step < gct->evac_step) {
537 gct->failed_to_evac = rtsTrue;
538 TICK_GC_FAILED_PROMOTION();
544 switch (INFO_PTR_TO_STRUCT(info)->type) {
553 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
558 StgWord w = (StgWord)q->payload[0];
559 if (info == Czh_con_info &&
560 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
561 (StgChar)w <= MAX_CHARLIKE) {
562 *p = TAG_CLOSURE(tag,
563 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
566 else if (info == Izh_con_info &&
567 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
568 *p = TAG_CLOSURE(tag,
569 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
573 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
581 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag);
586 copy(p,info,q,sizeofW(StgThunk)+1,stp);
592 #ifdef NO_PROMOTE_THUNKS
593 if (bd->gen_no == 0 &&
595 bd->step->no == generations[bd->gen_no].n_steps-1) {
599 copy(p,info,q,sizeofW(StgThunk)+2,stp);
607 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
611 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag);
615 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp);
620 case IND_OLDGEN_PERM:
622 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
627 copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag);
631 copy(p,info,q,bco_sizeW((StgBCO *)q),stp);
636 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
640 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
645 // follow chains of indirections, don't evacuate them
646 q = ((StgInd*)q)->indirectee;
657 case CATCH_STM_FRAME:
658 case CATCH_RETRY_FRAME:
659 case ATOMICALLY_FRAME:
660 // shouldn't see these
661 barf("evacuate: stack frame at %p\n", q);
664 copy(p,info,q,pap_sizeW((StgPAP*)q),stp);
668 copy(p,info,q,ap_sizeW((StgAP*)q),stp);
672 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
676 // just copy the block
677 copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp);
680 case MUT_ARR_PTRS_CLEAN:
681 case MUT_ARR_PTRS_DIRTY:
682 case MUT_ARR_PTRS_FROZEN:
683 case MUT_ARR_PTRS_FROZEN0:
684 // just copy the block
685 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
690 StgTSO *tso = (StgTSO *)q;
692 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
694 if (tso->what_next == ThreadRelocated) {
695 q = (StgClosure *)tso->_link;
700 /* To evacuate a small TSO, we need to adjust the stack pointer
707 mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso),
708 sizeofW(StgTSO), stp);
710 new_tso = (StgTSO *)*p;
711 move_TSO(tso, new_tso);
712 for (r = tso->sp, s = new_tso->sp;
713 r < tso->stack+tso->stack_size;) {
722 copy(p,info,q,sizeofW(StgTRecHeader),stp);
725 case TVAR_WATCH_QUEUE:
726 copy(p,info,q,sizeofW(StgTVarWatchQueue),stp);
730 copy(p,info,q,sizeofW(StgTVar),stp);
734 copy(p,info,q,sizeofW(StgTRecChunk),stp);
737 case ATOMIC_INVARIANT:
738 copy(p,info,q,sizeofW(StgAtomicInvariant),stp);
741 case INVARIANT_CHECK_QUEUE:
742 copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp);
746 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
752 /* -----------------------------------------------------------------------------
753 Evaluate a THUNK_SELECTOR if possible.
755 p points to a THUNK_SELECTOR that we want to evaluate. The
756 result of "evaluating" it will be evacuated and a pointer to the
757 to-space closure will be returned.
759 If the THUNK_SELECTOR could not be evaluated (its selectee is still
760 a THUNK, for example), then the THUNK_SELECTOR itself will be
762 -------------------------------------------------------------------------- */
764 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
772 ASSERT(p->header.info == &stg_WHITEHOLE_info);
774 ASSERT(p->header.info == &stg_BLACKHOLE_info);
776 // val must be in to-space. Not always: when we recursively
777 // invoke eval_thunk_selector(), the recursive calls will not
778 // evacuate the value (because we want to select on the value,
779 // not evacuate it), so in this case val is in from-space.
780 // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
782 prev = (StgSelector*)((StgClosure *)p)->payload[0];
784 // Update the THUNK_SELECTOR with an indirection to the
785 // value. The value is still in from-space at this stage.
787 // (old note: Why not do upd_evacuee(q,p)? Because we have an
788 // invariant that an EVACUATED closure always points to an
789 // object in the same or an older generation (required by
790 // the short-cut test in the EVACUATED case, below).
791 if ((StgClosure *)p == val) {
792 // must be a loop; just leave a BLACKHOLE in place. This
793 // can happen when we have a chain of selectors that
794 // eventually loops back on itself. We can't leave an
795 // indirection pointing to itself, and we want the program
796 // to deadlock if it ever enters this closure, so
797 // BLACKHOLE is correct.
798 SET_INFO(p, &stg_BLACKHOLE_info);
800 ((StgInd *)p)->indirectee = val;
802 SET_INFO(p, &stg_IND_info);
805 // For the purposes of LDV profiling, we have created an
807 LDV_RECORD_CREATE(p);
814 eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
815 // NB. for legacy reasons, p & q are swapped around :(
820 StgClosure *selectee;
821 StgSelector *prev_thunk_selector;
825 prev_thunk_selector = NULL;
826 // this is a chain of THUNK_SELECTORs that we are going to update
827 // to point to the value of the current THUNK_SELECTOR. Each
828 // closure on the chain is a BLACKHOLE, and points to the next in the
829 // chain with payload[0].
833 bd = Bdescr((StgPtr)p);
834 if (HEAP_ALLOCED_GC(p)) {
835 // If the THUNK_SELECTOR is in to-space or in a generation that we
836 // are not collecting, then bale out early. We won't be able to
837 // save any space in any case, and updating with an indirection is
838 // trickier in a non-collected gen: we would have to update the
840 if (bd->flags & BF_EVACUATED) {
841 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
842 *q = (StgClosure *)p;
843 // shortcut, behave as for: if (evac) evacuate(q);
844 if (evac && bd->step < gct->evac_step) {
845 gct->failed_to_evac = rtsTrue;
846 TICK_GC_FAILED_PROMOTION();
850 // we don't update THUNK_SELECTORS in the compacted
851 // generation, because compaction does not remove the INDs
852 // that result, this causes confusion later
853 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
854 // bit is very tricky to get right. If you make changes
855 // around here, test by compiling stage 3 with +RTS -c -RTS.
856 if (bd->flags & BF_MARKED) {
857 // must call evacuate() to mark this closure if evac==rtsTrue
858 *q = (StgClosure *)p;
859 if (evac) evacuate(q);
860 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
866 // BLACKHOLE the selector thunk, since it is now under evaluation.
867 // This is important to stop us going into an infinite loop if
868 // this selector thunk eventually refers to itself.
869 #if defined(THREADED_RTS)
870 // In threaded mode, we'll use WHITEHOLE to lock the selector
871 // thunk while we evaluate it.
874 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
875 } while (info_ptr == (W_)&stg_WHITEHOLE_info);
877 // make sure someone else didn't get here first...
878 if (IS_FORWARDING_PTR(p) ||
879 INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
880 // v. tricky now. The THUNK_SELECTOR has been evacuated
881 // by another thread, and is now either a forwarding ptr or IND.
882 // We need to extract ourselves from the current situation
883 // as cleanly as possible.
884 // - unlock the closure
885 // - update *q, we may have done *some* evaluation
886 // - if evac, we need to call evacuate(), because we
887 // need the write-barrier stuff.
888 // - undo the chain we've built to point to p.
889 SET_INFO(p, (const StgInfoTable *)info_ptr);
890 *q = (StgClosure *)p;
891 if (evac) evacuate(q);
892 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
897 // Save the real info pointer (NOTE: not the same as get_itbl()).
898 info_ptr = (StgWord)p->header.info;
899 SET_INFO(p,&stg_BLACKHOLE_info);
902 field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
904 // The selectee might be a constructor closure,
905 // so we untag the pointer.
906 selectee = UNTAG_CLOSURE(p->selectee);
909 // selectee now points to the closure that we're trying to select
910 // a field from. It may or may not be in to-space: we try not to
911 // end up in to-space, but it's impractical to avoid it in
912 // general. The compacting GC scatters to-space pointers in
913 // from-space during marking, for example. We rely on the property
914 // that evacuate() doesn't mind if it gets passed a to-space pointer.
916 info = (StgInfoTable*)selectee->header.info;
918 if (IS_FORWARDING_PTR(info)) {
919 // We don't follow pointers into to-space; the constructor
920 // has already been evacuated, so we won't save any space
921 // leaks by evaluating this selector thunk anyhow.
925 info = INFO_PTR_TO_STRUCT(info);
926 switch (info->type) {
928 goto bale_out; // about to be evacuated by another thread (or a loop).
937 case CONSTR_NOCAF_STATIC:
939 // check that the size is in range
940 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
941 info->layout.payload.nptrs));
943 // Select the right field from the constructor
944 val = selectee->payload[field];
947 // For the purposes of LDV profiling, we have destroyed
948 // the original selector thunk, p.
949 SET_INFO(p, (StgInfoTable *)info_ptr);
950 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
951 #if defined(THREADED_RTS)
952 SET_INFO(p, &stg_WHITEHOLE_info);
954 SET_INFO(p, &stg_BLACKHOLE_info);
958 // the closure in val is now the "value" of the
959 // THUNK_SELECTOR in p. However, val may itself be a
960 // THUNK_SELECTOR, in which case we want to continue
961 // evaluating until we find the real value, and then
962 // update the whole chain to point to the value.
964 info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
965 if (!IS_FORWARDING_PTR(info_ptr))
967 info = INFO_PTR_TO_STRUCT(info_ptr);
968 switch (info->type) {
972 case IND_OLDGEN_PERM:
974 val = ((StgInd *)val)->indirectee;
977 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
978 prev_thunk_selector = p;
979 p = (StgSelector*)val;
985 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
986 prev_thunk_selector = p;
990 // update the other selectors in the chain *before*
991 // evacuating the value. This is necessary in the case
992 // where the value turns out to be one of the selectors
993 // in the chain (i.e. we have a loop), and evacuating it
994 // would corrupt the chain.
995 unchain_thunk_selectors(prev_thunk_selector, val);
997 // evacuate() cannot recurse through
998 // eval_thunk_selector(), because we know val is not
1000 if (evac) evacuate(q);
1007 case IND_OLDGEN_PERM:
1009 // Again, we might need to untag a constructor.
1010 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1013 case THUNK_SELECTOR:
1017 // recursively evaluate this selector. We don't want to
1018 // recurse indefinitely, so we impose a depth bound.
1019 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
1023 gct->thunk_selector_depth++;
1024 // rtsFalse says "don't evacuate the result". It will,
1025 // however, update any THUNK_SELECTORs that are evaluated
1027 eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
1028 gct->thunk_selector_depth--;
1030 // did we actually manage to evaluate it?
1031 if (val == selectee) goto bale_out;
1033 // Of course this pointer might be tagged...
1034 selectee = UNTAG_CLOSURE(val);
1049 // not evaluated yet
1053 barf("eval_thunk_selector: strange selectee %d",
1058 // We didn't manage to evaluate this thunk; restore the old info
1059 // pointer. But don't forget: we still need to evacuate the thunk itself.
1060 SET_INFO(p, (const StgInfoTable *)info_ptr);
1061 // THREADED_RTS: we just unlocked the thunk, so another thread
1062 // might get in and update it. copy() will lock it again and
1063 // check whether it was updated in the meantime.
1064 *q = (StgClosure *)p;
1066 copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
1068 unchain_thunk_selectors(prev_thunk_selector, *q);