1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
22 #include "LdvProfile.h"
24 /* Used to avoid long recursion due to selector thunks
26 #define MAX_THUNK_SELECTOR_DEPTH 16
28 static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
31 alloc_for_copy (nat size, step *stp)
37 /* Find out where we're going, using the handy "to" pointer in
38 * the step of the source object. If it turns out we need to
39 * evacuate to an older generation, adjust it here (see comment
42 if (stp->gen_no < gct->evac_gen) {
43 if (gct->eager_promotion) {
44 stp = &generations[gct->evac_gen].steps[0];
46 gct->failed_to_evac = rtsTrue;
50 ws = &gct->steps[stp->gen_no][stp->no];
52 /* chain a new block onto the to-space for the destination step if
57 if (to + size >= bd->start + BLOCK_SIZE_W) {
58 bd = gc_alloc_todo_block(ws);
67 alloc_for_copy_noscav (nat size, step *stp)
73 /* Find out where we're going, using the handy "to" pointer in
74 * the step of the source object. If it turns out we need to
75 * evacuate to an older generation, adjust it here (see comment
78 if (stp->gen_no < gct->evac_gen) {
79 if (gct->eager_promotion) {
80 stp = &generations[gct->evac_gen].steps[0];
82 gct->failed_to_evac = rtsTrue;
86 ws = &gct->steps[stp->gen_no][stp->no];
88 /* chain a new block onto the to-space for the destination step if
93 if (to + size >= bd->start + BLOCK_SIZE_W) {
94 bd = gc_alloc_scavd_block(ws);
102 STATIC_INLINE StgClosure *
103 copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
111 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
113 } while (info == (W_)&stg_WHITEHOLE_info);
114 if (info == (W_)&stg_EVACUATED_info) {
115 src->header.info = (const StgInfoTable *)info;
116 return evacuate(src); // does the failed_to_evac stuff
119 info = (W_)src->header.info;
120 src->header.info = &stg_EVACUATED_info;
123 to = alloc_for_copy(size,stp);
125 TICK_GC_WORDS_COPIED(size);
129 for (i = 1; i < size; i++) { // unroll for small i
133 // retag pointer before updating EVACUATE closure and returning
134 to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
136 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
137 // __builtin_prefetch(to + size + 2, 1);
140 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
143 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
147 // We store the size of the just evacuated object in the LDV word so that
148 // the profiler can guess the position of the next object later.
149 SET_EVACUAEE_FOR_LDV(from, size);
151 return (StgClosure *)to;
155 // Same as copy() above, except the object will be allocated in memory
156 // that will not be scavenged. Used for object that have no pointer
158 STATIC_INLINE StgClosure *
159 copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
167 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
168 } while (info == (W_)&stg_WHITEHOLE_info);
169 if (info == (W_)&stg_EVACUATED_info) {
170 src->header.info = (const StgInfoTable *)info;
171 return evacuate(src); // does the failed_to_evac stuff
174 info = (W_)src->header.info;
175 src->header.info = &stg_EVACUATED_info;
178 to = alloc_for_copy_noscav(size,stp);
180 TICK_GC_WORDS_COPIED(size);
184 for (i = 1; i < size; i++) { // unroll for small i
188 // retag pointer before updating EVACUATE closure and returning
189 to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
191 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
194 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
198 // We store the size of the just evacuated object in the LDV word so that
199 // the profiler can guess the position of the next object later.
200 SET_EVACUAEE_FOR_LDV(from, size);
202 return (StgClosure *)to;
206 /* Special version of copy() for when we only want to copy the info
207 * pointer of an object, but reserve some padding after it. This is
208 * used to optimise evacuation of BLACKHOLEs.
211 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
219 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
220 } while (info == (W_)&stg_WHITEHOLE_info);
221 if (info == (W_)&stg_EVACUATED_info) {
222 src->header.info = (const StgInfoTable *)info;
223 return evacuate(src); // does the failed_to_evac stuff
226 info = (W_)src->header.info;
227 src->header.info = &stg_EVACUATED_info;
230 to = alloc_for_copy(size_to_reserve, stp);
232 TICK_GC_WORDS_COPIED(size_to_copy);
236 for (i = 1; i < size_to_copy; i++) { // unroll for small i
240 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
243 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
247 // We store the size of the just evacuated object in the LDV word so that
248 // the profiler can guess the position of the next object later.
249 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
251 if (size_to_reserve - size_to_copy > 0)
252 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
254 return (StgClosure *)to;
258 /* Copy wrappers that don't tag the closure after copying */
259 STATIC_INLINE StgClosure *
260 copy(StgClosure *src, nat size, step *stp)
262 return copy_tag(src,size,stp,0);
265 STATIC_INLINE StgClosure *
266 copy_noscav(StgClosure *src, nat size, step *stp)
268 return copy_noscav_tag(src,size,stp,0);
271 /* -----------------------------------------------------------------------------
272 Evacuate a large object
274 This just consists of removing the object from the (doubly-linked)
275 step->large_objects list, and linking it on to the (singly-linked)
276 step->new_large_objects list, from where it will be scavenged later.
278 Convention: bd->flags has BF_EVACUATED set for a large object
279 that has been evacuated, or unset otherwise.
280 -------------------------------------------------------------------------- */
284 evacuate_large(StgPtr p)
286 bdescr *bd = Bdescr(p);
290 // object must be at the beginning of the block (or be a ByteArray)
291 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
292 (((W_)p & BLOCK_MASK) == 0));
294 // already evacuated?
295 if (bd->flags & BF_EVACUATED) {
296 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
297 * the desired destination (see comments in evacuate()).
299 if (bd->gen_no < gct->evac_gen) {
300 gct->failed_to_evac = rtsTrue;
301 TICK_GC_FAILED_PROMOTION();
308 ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
309 // remove from large_object list
311 bd->u.back->link = bd->link;
312 } else { // first object in the list
313 stp->large_objects = bd->link;
316 bd->link->u.back = bd->u.back;
318 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
320 /* link it on to the evacuated large object list of the destination step
323 if (stp->gen_no < gct->evac_gen) {
324 if (gct->eager_promotion) {
325 stp = &generations[gct->evac_gen].steps[0];
327 gct->failed_to_evac = rtsTrue;
331 ws = &gct->steps[stp->gen_no][stp->no];
333 bd->gen_no = stp->gen_no;
334 bd->link = ws->todo_large_objects;
335 ws->todo_large_objects = bd;
336 bd->flags |= BF_EVACUATED;
339 /* -----------------------------------------------------------------------------
342 This is called (eventually) for every live object in the system.
344 The caller to evacuate specifies a desired generation in the
345 gct->evac_gen thread-lock variable. The following conditions apply to
346 evacuating an object which resides in generation M when we're
347 collecting up to generation N
349 if M >= gct->evac_gen
351 else evac to step->to
353 if M < gct->evac_gen evac to gct->evac_gen, step 0
355 if the object is already evacuated, then we check which generation
358 if M >= gct->evac_gen do nothing
359 if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
360 didn't manage to evacuate this object into gct->evac_gen.
365 evacuate() is the single most important function performance-wise
366 in the GC. Various things have been tried to speed it up, but as
367 far as I can tell the code generated by gcc 3.2 with -O2 is about
368 as good as it's going to get. We pass the argument to evacuate()
369 in a register using the 'regparm' attribute (see the prototype for
370 evacuate() near the top of this file).
372 Changing evacuate() to take an (StgClosure **) rather than
373 returning the new pointer seems attractive, because we can avoid
374 writing back the pointer when it hasn't changed (eg. for a static
375 object, or an object in a generation > N). However, I tried it and
376 it doesn't help. One reason is that the (StgClosure **) pointer
377 gets spilled to the stack inside evacuate(), resulting in far more
378 extra reads/writes than we save.
379 -------------------------------------------------------------------------- */
381 REGPARM1 StgClosure *
382 evacuate(StgClosure *q)
386 const StgInfoTable *info;
390 /* The tag and the pointer are split, to be merged after evacing */
391 tag = GET_CLOSURE_TAG(q);
392 q = UNTAG_CLOSURE(q);
394 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
396 if (!HEAP_ALLOCED(q)) {
398 if (!major_gc) return TAG_CLOSURE(tag,q);
401 switch (info->type) {
404 if (info->srt_bitmap != 0) {
405 ACQUIRE_SPIN_LOCK(&static_objects_sync);
406 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
407 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
408 static_objects = (StgClosure *)q;
410 RELEASE_SPIN_LOCK(&static_objects_sync);
415 if (info->srt_bitmap != 0) {
416 ACQUIRE_SPIN_LOCK(&static_objects_sync);
417 if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
418 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
419 static_objects = (StgClosure *)q;
421 RELEASE_SPIN_LOCK(&static_objects_sync);
426 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
427 * on the CAF list, so don't do anything with it here (we'll
428 * scavenge it later).
430 if (((StgIndStatic *)q)->saved_info == NULL) {
431 ACQUIRE_SPIN_LOCK(&static_objects_sync);
432 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
433 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
434 static_objects = (StgClosure *)q;
436 RELEASE_SPIN_LOCK(&static_objects_sync);
441 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
442 ACQUIRE_SPIN_LOCK(&static_objects_sync);
443 // re-test, after acquiring lock
444 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
445 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
446 static_objects = (StgClosure *)q;
448 RELEASE_SPIN_LOCK(&static_objects_sync);
449 /* I am assuming that static_objects pointers are not
450 * written to other objects, and thus, no need to retag. */
452 return TAG_CLOSURE(tag,q);
454 case CONSTR_NOCAF_STATIC:
455 /* no need to put these on the static linked list, they don't need
458 return TAG_CLOSURE(tag,q);
461 barf("evacuate(static): strange closure type %d", (int)(info->type));
467 if (bd->gen_no > N) {
468 /* Can't evacuate this object, because it's in a generation
469 * older than the ones we're collecting. Let's hope that it's
470 * in gct->evac_gen or older, or we will have to arrange to track
471 * this pointer using the mutable list.
473 if (bd->gen_no < gct->evac_gen) {
475 gct->failed_to_evac = rtsTrue;
476 TICK_GC_FAILED_PROMOTION();
478 return TAG_CLOSURE(tag,q);
481 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
483 /* pointer into to-space: just return it. This normally
484 * shouldn't happen, but alllowing it makes certain things
485 * slightly easier (eg. the mutable list can contain the same
486 * object twice, for example).
488 if (bd->flags & BF_EVACUATED) {
489 if (bd->gen_no < gct->evac_gen) {
490 gct->failed_to_evac = rtsTrue;
491 TICK_GC_FAILED_PROMOTION();
493 return TAG_CLOSURE(tag,q);
496 /* evacuate large objects by re-linking them onto a different list.
498 if (bd->flags & BF_LARGE) {
500 if (info->type == TSO &&
501 ((StgTSO *)q)->what_next == ThreadRelocated) {
502 q = (StgClosure *)((StgTSO *)q)->link;
505 evacuate_large((P_)q);
506 return TAG_CLOSURE(tag,q);
509 /* If the object is in a step that we're compacting, then we
510 * need to use an alternative evacuate procedure.
512 if (bd->flags & BF_COMPACTED) {
513 if (!is_marked((P_)q,bd)) {
515 if (mark_stack_full()) {
516 mark_stack_overflowed = rtsTrue;
519 push_mark_stack((P_)q);
521 return TAG_CLOSURE(tag,q);
529 switch (info->type) {
538 return copy(q,sizeW_fromITBL(info),stp);
542 StgWord w = (StgWord)q->payload[0];
543 if (q->header.info == Czh_con_info &&
544 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
545 (StgChar)w <= MAX_CHARLIKE) {
546 return TAG_CLOSURE(tag,
547 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
550 if (q->header.info == Izh_con_info &&
551 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
552 return TAG_CLOSURE(tag,
553 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
557 return copy_noscav_tag(q,sizeofW(StgHeader)+1,stp,tag);
563 return copy_tag(q,sizeofW(StgHeader)+1,stp,tag);
567 return copy(q,sizeofW(StgThunk)+1,stp);
572 #ifdef NO_PROMOTE_THUNKS
573 if (bd->gen_no == 0 &&
575 bd->step->no == generations[bd->gen_no].n_steps-1) {
579 return copy(q,sizeofW(StgThunk)+2,stp);
586 return copy_tag(q,sizeofW(StgHeader)+2,stp,tag);
589 return copy_noscav_tag(q,sizeofW(StgHeader)+2,stp,tag);
592 return copy(q,thunk_sizeW_fromITBL(info),stp);
596 case IND_OLDGEN_PERM:
600 return copy_tag(q,sizeW_fromITBL(info),stp,tag);
603 return copy(q,bco_sizeW((StgBCO *)q),stp);
606 case SE_CAF_BLACKHOLE:
609 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
612 return eval_thunk_selector((StgSelector *)q, rtsTrue);
616 // follow chains of indirections, don't evacuate them
617 q = ((StgInd*)q)->indirectee;
627 case CATCH_STM_FRAME:
628 case CATCH_RETRY_FRAME:
629 case ATOMICALLY_FRAME:
630 // shouldn't see these
631 barf("evacuate: stack frame at %p\n", q);
634 return copy(q,pap_sizeW((StgPAP*)q),stp);
637 return copy(q,ap_sizeW((StgAP*)q),stp);
640 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
643 /* Already evacuated, just return the forwarding address.
644 * HOWEVER: if the requested destination generation (gct->evac_gen) is
645 * older than the actual generation (because the object was
646 * already evacuated to a younger generation) then we have to
647 * set the gct->failed_to_evac flag to indicate that we couldn't
648 * manage to promote the object to the desired generation.
651 * Optimisation: the check is fairly expensive, but we can often
652 * shortcut it if either the required generation is 0, or the
653 * current object (the EVACUATED) is in a high enough generation.
654 * We know that an EVACUATED always points to an object in the
655 * same or an older generation. stp is the lowest step that the
656 * current object would be evacuated to, so we only do the full
657 * check if stp is too low.
659 if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
660 StgClosure *p = ((StgEvacuated*)q)->evacuee;
661 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < gct->evac_gen) {
662 gct->failed_to_evac = rtsTrue;
663 TICK_GC_FAILED_PROMOTION();
666 return ((StgEvacuated*)q)->evacuee;
669 // just copy the block
670 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
672 case MUT_ARR_PTRS_CLEAN:
673 case MUT_ARR_PTRS_DIRTY:
674 case MUT_ARR_PTRS_FROZEN:
675 case MUT_ARR_PTRS_FROZEN0:
676 // just copy the block
677 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
681 StgTSO *tso = (StgTSO *)q;
683 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
685 if (tso->what_next == ThreadRelocated) {
686 q = (StgClosure *)tso->link;
690 /* To evacuate a small TSO, we need to relocate the update frame
697 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
699 sizeofW(StgTSO), stp);
700 move_TSO(tso, new_tso);
701 for (p = tso->sp, q = new_tso->sp;
702 p < tso->stack+tso->stack_size;) {
706 return (StgClosure *)new_tso;
711 return copy(q,sizeofW(StgTRecHeader),stp);
713 case TVAR_WATCH_QUEUE:
714 return copy(q,sizeofW(StgTVarWatchQueue),stp);
717 return copy(q,sizeofW(StgTVar),stp);
720 return copy(q,sizeofW(StgTRecChunk),stp);
722 case ATOMIC_INVARIANT:
723 return copy(q,sizeofW(StgAtomicInvariant),stp);
725 case INVARIANT_CHECK_QUEUE:
726 return copy(q,sizeofW(StgInvariantCheckQueue),stp);
729 barf("evacuate: strange closure type %d", (int)(info->type));
736 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
743 ASSERT(p->header.info == &stg_BLACKHOLE_info);
744 prev = (StgSelector*)((StgClosure *)p)->payload[0];
746 // Update the THUNK_SELECTOR with an indirection to the
747 // EVACUATED closure now at p. Why do this rather than
748 // upd_evacuee(q,p)? Because we have an invariant that an
749 // EVACUATED closure always points to an object in the
750 // same or an older generation (required by the short-cut
751 // test in the EVACUATED case, below).
752 SET_INFO(p, &stg_IND_info);
753 ((StgInd *)p)->indirectee = val;
755 // For the purposes of LDV profiling, we have created an
757 LDV_RECORD_CREATE(p);
763 /* -----------------------------------------------------------------------------
764 Evaluate a THUNK_SELECTOR if possible.
766 p points to a THUNK_SELECTOR that we want to evaluate. The
767 result of "evaluating" it will be evacuated and a pointer to the
768 to-space closure will be returned.
770 If the THUNK_SELECTOR could not be evaluated (its selectee is still
771 a THUNK, for example), then the THUNK_SELECTOR itself will be
773 -------------------------------------------------------------------------- */
776 eval_thunk_selector (StgSelector * p, rtsBool evac)
780 const StgInfoTable *info_ptr;
781 StgClosure *selectee;
782 StgSelector *prev_thunk_selector;
786 prev_thunk_selector = NULL;
787 // this is a chain of THUNK_SELECTORs that we are going to update
788 // to point to the value of the current THUNK_SELECTOR. Each
789 // closure on the chain is a BLACKHOLE, and points to the next in the
790 // chain with payload[0].
794 // The selectee might be a constructor closure,
795 // so we untag the pointer.
796 selectee = UNTAG_CLOSURE(p->selectee);
798 // Save the real info pointer (NOTE: not the same as get_itbl()).
799 info_ptr = p->header.info;
800 field = get_itbl(p)->layout.selector_offset;
802 bd = Bdescr((StgPtr)p);
803 if (HEAP_ALLOCED(p)) {
804 // If the THUNK_SELECTOR is in to-space or in a generation that we
805 // are not collecting, then bale out early. We won't be able to
806 // save any space in any case, and updating with an indirection is
807 // trickier in a non-collected gen: we would have to update the
809 if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
810 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
811 return (StgClosure *)p;
813 // we don't update THUNK_SELECTORS in the compacted
814 // generation, because compaction does not remove the INDs
815 // that result, this causes confusion later
816 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
817 // bit is very tricky to get right. If you make changes
818 // around here, test by compiling stage 3 with +RTS -c -RTS.
819 if (bd->flags & BF_COMPACTED) {
820 // must call evacuate() to mark this closure if evac==rtsTrue
821 if (evac) p = (StgSelector *)evacuate((StgClosure *)p);
822 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
823 return (StgClosure *)p;
827 // BLACKHOLE the selector thunk, since it is now under evaluation.
828 // This is important to stop us going into an infinite loop if
829 // this selector thunk eventually refers to itself.
830 SET_INFO(p,&stg_BLACKHOLE_info);
833 // selectee now points to the closure that we're trying to select
834 // a field from. It may or may not be in to-space: we try not to
835 // end up in to-space, but it's impractical to avoid it in
836 // general. The compacting GC scatters to-space pointers in
837 // from-space during marking, for example. We rely on the property
838 // that evacuate() doesn't mind if it gets passed a to-space pointer.
840 info = get_itbl(selectee);
841 switch (info->type) {
849 case CONSTR_NOCAF_STATIC:
851 // check that the size is in range
852 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
853 info->layout.payload.nptrs));
855 // Select the right field from the constructor
856 val = selectee->payload[field];
859 // For the purposes of LDV profiling, we have destroyed
860 // the original selector thunk, p.
861 SET_INFO(p, info_ptr);
862 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
863 SET_INFO(p, &stg_BLACKHOLE_info);
866 // the closure in val is now the "value" of the
867 // THUNK_SELECTOR in p. However, val may itself be a
868 // THUNK_SELECTOR, in which case we want to continue
869 // evaluating until we find the real value, and then
870 // update the whole chain to point to the value.
872 info = get_itbl(UNTAG_CLOSURE(val));
873 switch (info->type) {
877 case IND_OLDGEN_PERM:
879 val = ((StgInd *)val)->indirectee;
882 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
883 prev_thunk_selector = p;
884 p = (StgSelector*)val;
887 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
888 prev_thunk_selector = p;
890 if (evac) val = evacuate(val);
891 // evacuate() cannot recurse through
892 // eval_thunk_selector(), because we know val is not
894 unchain_thunk_selectors(prev_thunk_selector, val);
902 case IND_OLDGEN_PERM:
904 // Again, we might need to untag a constructor.
905 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
909 // We don't follow pointers into to-space; the constructor
910 // has already been evacuated, so we won't save any space
911 // leaks by evaluating this selector thunk anyhow.
918 // recursively evaluate this selector. We don't want to
919 // recurse indefinitely, so we impose a depth bound.
920 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
924 gct->thunk_selector_depth++;
925 // rtsFalse says "don't evacuate the result". It will,
926 // however, update any THUNK_SELECTORs that are evaluated
928 val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
929 gct->thunk_selector_depth--;
931 // did we actually manage to evaluate it?
932 if (val == selectee) goto bale_out;
934 // Of course this pointer might be tagged...
935 selectee = UNTAG_CLOSURE(val);
949 case SE_CAF_BLACKHOLE:
956 barf("eval_thunk_selector: strange selectee %d",
961 // We didn't manage to evaluate this thunk; restore the old info
962 // pointer. But don't forget: we still need to evacuate the thunk itself.
963 SET_INFO(p, info_ptr);
965 val = copy((StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
967 val = (StgClosure *)p;
969 unchain_thunk_selectors(prev_thunk_selector, val);
973 /* -----------------------------------------------------------------------------
974 move_TSO is called to update the TSO structure after it has been
975 moved from one place to another.
976 -------------------------------------------------------------------------- */
979 move_TSO (StgTSO *src, StgTSO *dest)
983 // relocate the stack pointer...
984 diff = (StgPtr)dest - (StgPtr)src; // In *words*
985 dest->sp = (StgPtr)dest->sp + diff;