1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: evacuation functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
23 #include "MarkStack.h"
26 #include "LdvProfile.h"
28 #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC)
29 StgWord64 whitehole_spin = 0;
32 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
33 #define evacuate(p) evacuate1(p)
34 #define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p)
37 #if !defined(PARALLEL_GC)
38 #define copy_tag_nolock(p, info, src, size, stp, tag) \
39 copy_tag(p, info, src, size, stp, tag)
42 /* Used to avoid long recursion due to selector thunks
44 #define MAX_THUNK_SELECTOR_DEPTH 16
46 static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
47 STATIC_INLINE void evacuate_large(StgPtr p);
49 /* -----------------------------------------------------------------------------
50 Allocate some space in which to copy an object.
51 -------------------------------------------------------------------------- */
54 alloc_for_copy (nat size, generation *gen)
59 /* Find out where we're going, using the handy "to" pointer in
60 * the gen of the source object. If it turns out we need to
61 * evacuate to an older generation, adjust it here (see comment
64 if (gen < gct->evac_gen) {
65 if (gct->eager_promotion) {
68 gct->failed_to_evac = rtsTrue;
72 ws = &gct->gens[gen->no];
73 // this compiles to a single mem access to gen->abs_no only
75 /* chain a new block onto the to-space for the destination gen if
79 ws->todo_free += size;
80 if (ws->todo_free > ws->todo_lim) {
81 to = todo_block_full(size, ws);
83 ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim);
88 /* -----------------------------------------------------------------------------
90 -------------------------------------------------------------------------- */
92 STATIC_INLINE GNUC_ATTR_HOT void
93 copy_tag(StgClosure **p, const StgInfoTable *info,
94 StgClosure *src, nat size, generation *gen, StgWord tag)
99 to = alloc_for_copy(size,gen);
103 for (i = 1; i < size; i++) { // unroll for small i
107 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
108 // __builtin_prefetch(to + size + 2, 1);
111 #if defined(PARALLEL_GC)
113 const StgInfoTable *new_info;
114 new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to));
115 if (new_info != info) {
116 return evacuate(p); // does the failed_to_evac stuff
118 *p = TAG_CLOSURE(tag,(StgClosure*)to);
122 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
123 *p = TAG_CLOSURE(tag,(StgClosure*)to);
127 // We store the size of the just evacuated object in the LDV word so that
128 // the profiler can guess the position of the next object later.
129 SET_EVACUAEE_FOR_LDV(from, size);
133 #if defined(PARALLEL_GC)
135 copy_tag_nolock(StgClosure **p, const StgInfoTable *info,
136 StgClosure *src, nat size, generation *gen, StgWord tag)
141 to = alloc_for_copy(size,gen);
145 for (i = 1; i < size; i++) { // unroll for small i
149 // if somebody else reads the forwarding pointer, we better make
150 // sure there's a closure at the end of it.
152 *p = TAG_CLOSURE(tag,(StgClosure*)to);
153 src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to);
155 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
156 // __builtin_prefetch(to + size + 2, 1);
160 // We store the size of the just evacuated object in the LDV word so that
161 // the profiler can guess the position of the next object later.
162 SET_EVACUAEE_FOR_LDV(from, size);
167 /* Special version of copy() for when we only want to copy the info
168 * pointer of an object, but reserve some padding after it. This is
169 * used to optimise evacuation of TSOs.
172 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve,
173 nat size_to_copy, generation *gen)
179 #if defined(PARALLEL_GC)
181 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
182 if (info == (W_)&stg_WHITEHOLE_info) {
188 if (IS_FORWARDING_PTR(info)) {
189 src->header.info = (const StgInfoTable *)info;
190 evacuate(p); // does the failed_to_evac stuff
194 info = (W_)src->header.info;
197 to = alloc_for_copy(size_to_reserve, gen);
201 for (i = 1; i < size_to_copy; i++) { // unroll for small i
206 src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to);
207 *p = (StgClosure *)to;
210 // We store the size of the just evacuated object in the LDV word so that
211 // the profiler can guess the position of the next object later.
212 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
214 if (size_to_reserve - size_to_copy > 0)
215 LDV_FILL_SLOP(to + size_to_copy, (int)(size_to_reserve - size_to_copy));
222 /* Copy wrappers that don't tag the closure after copying */
223 STATIC_INLINE GNUC_ATTR_HOT void
224 copy(StgClosure **p, const StgInfoTable *info,
225 StgClosure *src, nat size, generation *gen)
227 copy_tag(p,info,src,size,gen,0);
230 /* -----------------------------------------------------------------------------
231 Evacuate a large object
233 This just consists of removing the object from the (doubly-linked)
234 gen->large_objects list, and linking it on to the (singly-linked)
235 gen->new_large_objects list, from where it will be scavenged later.
237 Convention: bd->flags has BF_EVACUATED set for a large object
238 that has been evacuated, or unset otherwise.
239 -------------------------------------------------------------------------- */
242 evacuate_large(StgPtr p)
245 generation *gen, *new_gen;
250 ACQUIRE_SPIN_LOCK(&gen->sync_large_objects);
252 // already evacuated?
253 if (bd->flags & BF_EVACUATED) {
254 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
255 * the desired destination (see comments in evacuate()).
257 if (gen < gct->evac_gen) {
258 gct->failed_to_evac = rtsTrue;
259 TICK_GC_FAILED_PROMOTION();
261 RELEASE_SPIN_LOCK(&gen->sync_large_objects);
265 // remove from large_object list
267 bd->u.back->link = bd->link;
268 } else { // first object in the list
269 gen->large_objects = bd->link;
272 bd->link->u.back = bd->u.back;
275 /* link it on to the evacuated large object list of the destination gen
278 if (new_gen < gct->evac_gen) {
279 if (gct->eager_promotion) {
280 new_gen = gct->evac_gen;
282 gct->failed_to_evac = rtsTrue;
286 ws = &gct->gens[new_gen->no];
288 bd->flags |= BF_EVACUATED;
289 initBdescr(bd, new_gen, new_gen->to);
291 // If this is a block of pinned objects, we don't have to scan
292 // these objects, because they aren't allowed to contain any
293 // pointers. For these blocks, we skip the scavenge stage and put
294 // them straight on the scavenged_large_objects list.
295 if (bd->flags & BF_PINNED) {
296 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS);
297 if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync_large_objects); }
298 dbl_link_onto(bd, &new_gen->scavenged_large_objects);
299 new_gen->n_scavenged_large_blocks += bd->blocks;
300 if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync_large_objects); }
302 bd->link = ws->todo_large_objects;
303 ws->todo_large_objects = bd;
306 RELEASE_SPIN_LOCK(&gen->sync_large_objects);
309 /* ----------------------------------------------------------------------------
312 This is called (eventually) for every live object in the system.
314 The caller to evacuate specifies a desired generation in the
315 gct->evac_gen thread-local variable. The following conditions apply to
316 evacuating an object which resides in generation M when we're
317 collecting up to generation N
319 if M >= gct->evac_gen
323 if M < gct->evac_gen evac to gct->evac_gen, step 0
325 if the object is already evacuated, then we check which generation
328 if M >= gct->evac_gen do nothing
329 if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
330 didn't manage to evacuate this object into gct->evac_gen.
335 evacuate() is the single most important function performance-wise
336 in the GC. Various things have been tried to speed it up, but as
337 far as I can tell the code generated by gcc 3.2 with -O2 is about
338 as good as it's going to get. We pass the argument to evacuate()
339 in a register using the 'regparm' attribute (see the prototype for
340 evacuate() near the top of this file).
342 Changing evacuate() to take an (StgClosure **) rather than
343 returning the new pointer seems attractive, because we can avoid
344 writing back the pointer when it hasn't changed (eg. for a static
345 object, or an object in a generation > N). However, I tried it and
346 it doesn't help. One reason is that the (StgClosure **) pointer
347 gets spilled to the stack inside evacuate(), resulting in far more
348 extra reads/writes than we save.
349 ------------------------------------------------------------------------- */
351 REGPARM1 GNUC_ATTR_HOT void
352 evacuate(StgClosure **p)
357 const StgInfoTable *info;
363 /* The tag and the pointer are split, to be merged after evacing */
364 tag = GET_CLOSURE_TAG(q);
365 q = UNTAG_CLOSURE(q);
367 ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info);
369 if (!HEAP_ALLOCED_GC(q)) {
371 if (!major_gc) return;
374 switch (info->type) {
377 if (info->srt_bitmap != 0) {
378 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
380 *THUNK_STATIC_LINK((StgClosure *)q) = gct->static_objects;
381 gct->static_objects = (StgClosure *)q;
384 link = (StgPtr)cas((StgPtr)THUNK_STATIC_LINK((StgClosure *)q),
386 (StgWord)gct->static_objects);
388 gct->static_objects = (StgClosure *)q;
396 if (info->srt_bitmap != 0 &&
397 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
399 *FUN_STATIC_LINK((StgClosure *)q) = gct->static_objects;
400 gct->static_objects = (StgClosure *)q;
403 link = (StgPtr)cas((StgPtr)FUN_STATIC_LINK((StgClosure *)q),
405 (StgWord)gct->static_objects);
407 gct->static_objects = (StgClosure *)q;
414 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
415 * on the CAF list, so don't do anything with it here (we'll
416 * scavenge it later).
418 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
420 *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects;
421 gct->static_objects = (StgClosure *)q;
424 link = (StgPtr)cas((StgPtr)IND_STATIC_LINK((StgClosure *)q),
426 (StgWord)gct->static_objects);
428 gct->static_objects = (StgClosure *)q;
435 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
437 *STATIC_LINK(info,(StgClosure *)q) = gct->static_objects;
438 gct->static_objects = (StgClosure *)q;
441 link = (StgPtr)cas((StgPtr)STATIC_LINK(info,(StgClosure *)q),
443 (StgWord)gct->static_objects);
445 gct->static_objects = (StgClosure *)q;
449 /* I am assuming that static_objects pointers are not
450 * written to other objects, and thus, no need to retag. */
453 case CONSTR_NOCAF_STATIC:
454 /* no need to put these on the static linked list, they don't need
460 barf("evacuate(static): strange closure type %d", (int)(info->type));
466 if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) {
468 // pointer into to-space: just return it. It might be a pointer
469 // into a generation that we aren't collecting (> N), or it
470 // might just be a pointer into to-space. The latter doesn't
471 // happen often, but allowing it makes certain things a bit
472 // easier; e.g. scavenging an object is idempotent, so it's OK to
473 // have an object on the mutable list multiple times.
474 if (bd->flags & BF_EVACUATED) {
475 // We aren't copying this object, so we have to check
476 // whether it is already in the target generation. (this is
477 // the write barrier).
478 if (bd->gen < gct->evac_gen) {
479 gct->failed_to_evac = rtsTrue;
480 TICK_GC_FAILED_PROMOTION();
485 /* evacuate large objects by re-linking them onto a different list.
487 if (bd->flags & BF_LARGE) {
488 evacuate_large((P_)q);
492 /* If the object is in a gen that we're compacting, then we
493 * need to use an alternative evacuate procedure.
495 if (!is_marked((P_)q,bd)) {
497 push_mark_stack((P_)q);
504 info = q->header.info;
505 if (IS_FORWARDING_PTR(info))
507 /* Already evacuated, just return the forwarding address.
508 * HOWEVER: if the requested destination generation (gct->evac_gen) is
509 * older than the actual generation (because the object was
510 * already evacuated to a younger generation) then we have to
511 * set the gct->failed_to_evac flag to indicate that we couldn't
512 * manage to promote the object to the desired generation.
515 * Optimisation: the check is fairly expensive, but we can often
516 * shortcut it if either the required generation is 0, or the
517 * current object (the EVACUATED) is in a high enough generation.
518 * We know that an EVACUATED always points to an object in the
519 * same or an older generation. gen is the lowest generation that the
520 * current object would be evacuated to, so we only do the full
521 * check if gen is too low.
523 StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info);
524 *p = TAG_CLOSURE(tag,e);
525 if (gen < gct->evac_gen) { // optimisation
526 if (Bdescr((P_)e)->gen < gct->evac_gen) {
527 gct->failed_to_evac = rtsTrue;
528 TICK_GC_FAILED_PROMOTION();
534 switch (INFO_PTR_TO_STRUCT(info)->type) {
543 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
546 // For ints and chars of low value, save space by replacing references to
547 // these with closures with references to common, shared ones in the RTS.
549 // * Except when compiling into Windows DLLs which don't support cross-package
550 // data references very well.
554 #if defined(__PIC__) && defined(mingw32_HOST_OS)
555 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
557 StgWord w = (StgWord)q->payload[0];
558 if (info == Czh_con_info &&
559 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
560 (StgChar)w <= MAX_CHARLIKE) {
561 *p = TAG_CLOSURE(tag,
562 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
565 else if (info == Izh_con_info &&
566 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
567 *p = TAG_CLOSURE(tag,
568 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
572 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
581 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen,tag);
586 copy(p,info,q,sizeofW(StgThunk)+1,gen);
592 #ifdef NO_PROMOTE_THUNKS
595 copy(p,info,q,sizeofW(StgThunk)+2,gen);
603 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen,tag);
607 copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen,tag);
611 copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
617 copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen,tag);
623 const StgInfoTable *i;
624 r = ((StgInd*)q)->indirectee;
625 if (GET_CLOSURE_TAG(r) == 0) {
627 if (IS_FORWARDING_PTR(i)) {
628 r = (StgClosure *)UN_FORWARDING_PTR(i);
631 if (i == &stg_TSO_info
632 || i == &stg_WHITEHOLE_info
633 || i == &stg_BLOCKING_QUEUE_CLEAN_info
634 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
635 copy(p,info,q,sizeofW(StgInd),gen);
638 ASSERT(i != &stg_IND_info);
649 copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen);
653 copy(p,info,q,bco_sizeW((StgBCO *)q),gen);
657 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
661 // follow chains of indirections, don't evacuate them
662 q = ((StgInd*)q)->indirectee;
671 case UNDERFLOW_FRAME:
674 case CATCH_STM_FRAME:
675 case CATCH_RETRY_FRAME:
676 case ATOMICALLY_FRAME:
677 // shouldn't see these
678 barf("evacuate: stack frame at %p\n", q);
681 copy(p,info,q,pap_sizeW((StgPAP*)q),gen);
685 copy(p,info,q,ap_sizeW((StgAP*)q),gen);
689 copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen);
693 // just copy the block
694 copy(p,info,q,arr_words_sizeW((StgArrWords *)q),gen);
697 case MUT_ARR_PTRS_CLEAN:
698 case MUT_ARR_PTRS_DIRTY:
699 case MUT_ARR_PTRS_FROZEN:
700 case MUT_ARR_PTRS_FROZEN0:
701 // just copy the block
702 copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen);
706 copy(p,info,q,sizeofW(StgTSO),gen);
711 StgStack *stack = (StgStack *)q;
713 /* To evacuate a small STACK, we need to adjust the stack pointer
720 mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack),
721 sizeofW(StgStack), gen);
723 new_stack = (StgStack *)*p;
724 move_STACK(stack, new_stack);
725 for (r = stack->sp, s = new_stack->sp;
726 r < stack->stack + stack->stack_size;) {
735 copy(p,info,q,sizeofW(StgTRecChunk),gen);
739 barf("evacuate: strange closure type %d", (int)(INFO_PTR_TO_STRUCT(info)->type));
745 /* -----------------------------------------------------------------------------
746 Evaluate a THUNK_SELECTOR if possible.
748 p points to a THUNK_SELECTOR that we want to evaluate. The
749 result of "evaluating" it will be evacuated and a pointer to the
750 to-space closure will be returned.
752 If the THUNK_SELECTOR could not be evaluated (its selectee is still
753 a THUNK, for example), then the THUNK_SELECTOR itself will be
755 -------------------------------------------------------------------------- */
757 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
764 ASSERT(p->header.info == &stg_WHITEHOLE_info);
765 // val must be in to-space. Not always: when we recursively
766 // invoke eval_thunk_selector(), the recursive calls will not
767 // evacuate the value (because we want to select on the value,
768 // not evacuate it), so in this case val is in from-space.
769 // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED));
771 prev = (StgSelector*)((StgClosure *)p)->payload[0];
773 // Update the THUNK_SELECTOR with an indirection to the
774 // value. The value is still in from-space at this stage.
776 // (old note: Why not do upd_evacuee(q,p)? Because we have an
777 // invariant that an EVACUATED closure always points to an
778 // object in the same or an older generation (required by
779 // the short-cut test in the EVACUATED case, below).
780 if ((StgClosure *)p == val) {
781 // must be a loop; just leave a BLACKHOLE in place. This
782 // can happen when we have a chain of selectors that
783 // eventually loops back on itself. We can't leave an
784 // indirection pointing to itself, and we want the program
785 // to deadlock if it ever enters this closure, so
786 // BLACKHOLE is correct.
788 // XXX we do not have BLACKHOLEs any more; replace with
789 // a THUNK_SELECTOR again. This will go into a loop if it is
790 // entered, and should result in a NonTermination exception.
791 ((StgThunk *)p)->payload[0] = val;
793 SET_INFO(p, &stg_sel_0_upd_info);
795 ((StgInd *)p)->indirectee = val;
797 SET_INFO(p, &stg_IND_info);
800 // For the purposes of LDV profiling, we have created an
802 LDV_RECORD_CREATE(p);
809 eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
810 // NB. for legacy reasons, p & q are swapped around :(
815 StgClosure *selectee;
816 StgSelector *prev_thunk_selector;
820 prev_thunk_selector = NULL;
821 // this is a chain of THUNK_SELECTORs that we are going to update
822 // to point to the value of the current THUNK_SELECTOR. Each
823 // closure on the chain is a WHITEHOLE, and points to the next in the
824 // chain with payload[0].
828 bd = Bdescr((StgPtr)p);
829 if (HEAP_ALLOCED_GC(p)) {
830 // If the THUNK_SELECTOR is in to-space or in a generation that we
831 // are not collecting, then bale out early. We won't be able to
832 // save any space in any case, and updating with an indirection is
833 // trickier in a non-collected gen: we would have to update the
835 if (bd->flags & BF_EVACUATED) {
836 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
837 *q = (StgClosure *)p;
838 // shortcut, behave as for: if (evac) evacuate(q);
839 if (evac && bd->gen < gct->evac_gen) {
840 gct->failed_to_evac = rtsTrue;
841 TICK_GC_FAILED_PROMOTION();
845 // we don't update THUNK_SELECTORS in the compacted
846 // generation, because compaction does not remove the INDs
847 // that result, this causes confusion later
848 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
849 // bit is very tricky to get right. If you make changes
850 // around here, test by compiling stage 3 with +RTS -c -RTS.
851 if (bd->flags & BF_MARKED) {
852 // must call evacuate() to mark this closure if evac==rtsTrue
853 *q = (StgClosure *)p;
854 if (evac) evacuate(q);
855 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
861 // WHITEHOLE the selector thunk, since it is now under evaluation.
862 // This is important to stop us going into an infinite loop if
863 // this selector thunk eventually refers to itself.
864 #if defined(THREADED_RTS)
865 // In threaded mode, we'll use WHITEHOLE to lock the selector
866 // thunk while we evaluate it.
869 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
870 } while (info_ptr == (W_)&stg_WHITEHOLE_info);
872 // make sure someone else didn't get here first...
873 if (IS_FORWARDING_PTR(info_ptr) ||
874 INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
875 // v. tricky now. The THUNK_SELECTOR has been evacuated
876 // by another thread, and is now either a forwarding ptr or IND.
877 // We need to extract ourselves from the current situation
878 // as cleanly as possible.
879 // - unlock the closure
880 // - update *q, we may have done *some* evaluation
881 // - if evac, we need to call evacuate(), because we
882 // need the write-barrier stuff.
883 // - undo the chain we've built to point to p.
884 SET_INFO(p, (const StgInfoTable *)info_ptr);
885 *q = (StgClosure *)p;
886 if (evac) evacuate(q);
887 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
892 // Save the real info pointer (NOTE: not the same as get_itbl()).
893 info_ptr = (StgWord)p->header.info;
894 SET_INFO(p,&stg_WHITEHOLE_info);
897 field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
899 // The selectee might be a constructor closure,
900 // so we untag the pointer.
901 selectee = UNTAG_CLOSURE(p->selectee);
904 // selectee now points to the closure that we're trying to select
905 // a field from. It may or may not be in to-space: we try not to
906 // end up in to-space, but it's impractical to avoid it in
907 // general. The compacting GC scatters to-space pointers in
908 // from-space during marking, for example. We rely on the property
909 // that evacuate() doesn't mind if it gets passed a to-space pointer.
911 info = (StgInfoTable*)selectee->header.info;
913 if (IS_FORWARDING_PTR(info)) {
914 // We don't follow pointers into to-space; the constructor
915 // has already been evacuated, so we won't save any space
916 // leaks by evaluating this selector thunk anyhow.
920 info = INFO_PTR_TO_STRUCT(info);
921 switch (info->type) {
923 goto bale_out; // about to be evacuated by another thread (or a loop).
932 case CONSTR_NOCAF_STATIC:
934 // check that the size is in range
935 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
936 info->layout.payload.nptrs));
938 // Select the right field from the constructor
939 val = selectee->payload[field];
942 // For the purposes of LDV profiling, we have destroyed
943 // the original selector thunk, p.
944 SET_INFO(p, (StgInfoTable *)info_ptr);
945 OVERWRITING_CLOSURE((StgClosure*)p);
946 SET_INFO(p, &stg_WHITEHOLE_info);
949 // the closure in val is now the "value" of the
950 // THUNK_SELECTOR in p. However, val may itself be a
951 // THUNK_SELECTOR, in which case we want to continue
952 // evaluating until we find the real value, and then
953 // update the whole chain to point to the value.
955 info_ptr = (StgWord)UNTAG_CLOSURE(val)->header.info;
956 if (!IS_FORWARDING_PTR(info_ptr))
958 info = INFO_PTR_TO_STRUCT(info_ptr);
959 switch (info->type) {
963 val = ((StgInd *)val)->indirectee;
966 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
967 prev_thunk_selector = p;
968 p = (StgSelector*)val;
974 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
975 prev_thunk_selector = p;
979 // update the other selectors in the chain *before*
980 // evacuating the value. This is necessary in the case
981 // where the value turns out to be one of the selectors
982 // in the chain (i.e. we have a loop), and evacuating it
983 // would corrupt the chain.
984 unchain_thunk_selectors(prev_thunk_selector, val);
986 // evacuate() cannot recurse through
987 // eval_thunk_selector(), because we know val is not
989 if (evac) evacuate(q);
996 // Again, we might need to untag a constructor.
997 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1003 const StgInfoTable *i;
1004 r = ((StgInd*)selectee)->indirectee;
1006 // establish whether this BH has been updated, and is now an
1007 // indirection, as in evacuate().
1008 if (GET_CLOSURE_TAG(r) == 0) {
1010 if (IS_FORWARDING_PTR(i)) {
1011 r = (StgClosure *)UN_FORWARDING_PTR(i);
1014 if (i == &stg_TSO_info
1015 || i == &stg_WHITEHOLE_info
1016 || i == &stg_BLOCKING_QUEUE_CLEAN_info
1017 || i == &stg_BLOCKING_QUEUE_DIRTY_info) {
1020 ASSERT(i != &stg_IND_info);
1023 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
1027 case THUNK_SELECTOR:
1031 // recursively evaluate this selector. We don't want to
1032 // recurse indefinitely, so we impose a depth bound.
1033 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
1037 gct->thunk_selector_depth++;
1038 // rtsFalse says "don't evacuate the result". It will,
1039 // however, update any THUNK_SELECTORs that are evaluated
1041 eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
1042 gct->thunk_selector_depth--;
1044 // did we actually manage to evaluate it?
1045 if (val == selectee) goto bale_out;
1047 // Of course this pointer might be tagged...
1048 selectee = UNTAG_CLOSURE(val);
1061 // not evaluated yet
1065 barf("eval_thunk_selector: strange selectee %d",
1070 // We didn't manage to evaluate this thunk; restore the old info
1071 // pointer. But don't forget: we still need to evacuate the thunk itself.
1072 SET_INFO(p, (const StgInfoTable *)info_ptr);
1073 // THREADED_RTS: we just unlocked the thunk, so another thread
1074 // might get in and update it. copy() will lock it again and
1075 // check whether it was updated in the meantime.
1076 *q = (StgClosure *)p;
1078 copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest);
1080 unchain_thunk_selectors(prev_thunk_selector, *q);