1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
22 #include "LdvProfile.h"
24 /* Used to avoid long recursion due to selector thunks
26 #define MAX_THUNK_SELECTOR_DEPTH 16
28 static StgClosure * eval_thunk_selector (StgSelector * p, rtsBool);
31 upd_evacuee(StgClosure *p, StgClosure *dest)
33 // not true: (ToDo: perhaps it should be)
34 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
35 SET_INFO(p, &stg_EVACUATED_info);
36 ((StgEvacuated *)p)->evacuee = dest;
40 STATIC_INLINE StgClosure *
41 copy_tag(StgClosure *src, nat size, step *stp,StgWord tag)
48 TICK_GC_WORDS_COPIED(size);
49 /* Find out where we're going, using the handy "to" pointer in
50 * the step of the source object. If it turns out we need to
51 * evacuate to an older generation, adjust it here (see comment
54 if (stp->gen_no < gct->evac_gen) {
55 if (gct->eager_promotion) {
56 stp = &generations[gct->evac_gen].steps[0];
58 gct->failed_to_evac = rtsTrue;
62 ws = &gct->steps[stp->gen_no][stp->no];
64 /* chain a new block onto the to-space for the destination step if
69 if (to + size >= bd->start + BLOCK_SIZE_W) {
70 bd = gc_alloc_todo_block(ws);
76 for (i = 0; i < size; i++) { // unroll for small i
80 /* retag pointer before updating EVACUATE closure and returning */
81 to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
83 upd_evacuee((StgClosure *)from,(StgClosure *)to);
86 // We store the size of the just evacuated object in the LDV word so that
87 // the profiler can guess the position of the next object later.
88 SET_EVACUAEE_FOR_LDV(from, size);
90 return (StgClosure *)to;
93 // Same as copy() above, except the object will be allocated in memory
94 // that will not be scavenged. Used for object that have no pointer
96 STATIC_INLINE StgClosure *
97 copy_noscav_tag(StgClosure *src, nat size, step *stp, StgWord tag)
104 TICK_GC_WORDS_COPIED(size);
105 /* Find out where we're going, using the handy "to" pointer in
106 * the step of the source object. If it turns out we need to
107 * evacuate to an older generation, adjust it here (see comment
110 if (stp->gen_no < gct->evac_gen) {
111 if (gct->eager_promotion) {
112 stp = &generations[gct->evac_gen].steps[0];
114 gct->failed_to_evac = rtsTrue;
118 ws = &gct->steps[stp->gen_no][stp->no];
120 /* chain a new block onto the to-space for the destination step if
125 if (to + size >= bd->start + BLOCK_SIZE_W) {
126 bd = gc_alloc_scavd_block(ws);
131 bd->free = to + size;
132 for (i = 0; i < size; i++) { // unroll for small i
136 /* retag pointer before updating EVACUATE closure and returning */
137 to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
139 upd_evacuee((StgClosure *)from,(StgClosure *)to);
142 // We store the size of the just evacuated object in the LDV word so that
143 // the profiler can guess the position of the next object later.
144 SET_EVACUAEE_FOR_LDV(from, size);
146 return (StgClosure *)to;
150 /* Special version of copy() for when we only want to copy the info
151 * pointer of an object, but reserve some padding after it. This is
152 * used to optimise evacuation of BLACKHOLEs.
155 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
162 TICK_GC_WORDS_COPIED(size_to_copy);
163 if (stp->gen_no < gct->evac_gen) {
164 if (gct->eager_promotion) {
165 stp = &generations[gct->evac_gen].steps[0];
167 gct->failed_to_evac = rtsTrue;
171 ws = &gct->steps[stp->gen_no][stp->no];
175 if (to + size_to_reserve >= bd->start + BLOCK_SIZE_W) {
176 bd = gc_alloc_todo_block(ws);
181 bd->free = to + size_to_reserve;
182 for (i = 0; i < size_to_copy; i++) { // unroll for small i
186 upd_evacuee((StgClosure *)from,(StgClosure *)to);
189 // We store the size of the just evacuated object in the LDV word so that
190 // the profiler can guess the position of the next object later.
191 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
193 if (size_to_reserve - size_to_copy > 0)
194 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
196 return (StgClosure *)to;
200 /* Copy wrappers that don't tag the closure after copying */
201 STATIC_INLINE StgClosure *
202 copy(StgClosure *src, nat size, step *stp)
204 return copy_tag(src,size,stp,0);
207 STATIC_INLINE StgClosure *
208 copy_noscav(StgClosure *src, nat size, step *stp)
210 return copy_noscav_tag(src,size,stp,0);
213 /* -----------------------------------------------------------------------------
214 Evacuate a large object
216 This just consists of removing the object from the (doubly-linked)
217 step->large_objects list, and linking it on to the (singly-linked)
218 step->new_large_objects list, from where it will be scavenged later.
220 Convention: bd->flags has BF_EVACUATED set for a large object
221 that has been evacuated, or unset otherwise.
222 -------------------------------------------------------------------------- */
226 evacuate_large(StgPtr p)
228 bdescr *bd = Bdescr(p);
232 // object must be at the beginning of the block (or be a ByteArray)
233 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
234 (((W_)p & BLOCK_MASK) == 0));
236 // already evacuated?
237 if (bd->flags & BF_EVACUATED) {
238 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
239 * the desired destination (see comments in evacuate()).
241 if (bd->gen_no < gct->evac_gen) {
242 gct->failed_to_evac = rtsTrue;
243 TICK_GC_FAILED_PROMOTION();
250 ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
251 // remove from large_object list
253 bd->u.back->link = bd->link;
254 } else { // first object in the list
255 stp->large_objects = bd->link;
258 bd->link->u.back = bd->u.back;
260 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
262 /* link it on to the evacuated large object list of the destination step
265 if (stp->gen_no < gct->evac_gen) {
266 if (gct->eager_promotion) {
267 stp = &generations[gct->evac_gen].steps[0];
269 gct->failed_to_evac = rtsTrue;
273 ws = &gct->steps[stp->gen_no][stp->no];
275 bd->gen_no = stp->gen_no;
276 bd->link = ws->todo_large_objects;
277 ws->todo_large_objects = bd;
278 bd->flags |= BF_EVACUATED;
281 /* -----------------------------------------------------------------------------
284 This is called (eventually) for every live object in the system.
286 The caller to evacuate specifies a desired generation in the
287 gct->evac_gen thread-lock variable. The following conditions apply to
288 evacuating an object which resides in generation M when we're
289 collecting up to generation N
291 if M >= gct->evac_gen
293 else evac to step->to
295 if M < gct->evac_gen evac to gct->evac_gen, step 0
297 if the object is already evacuated, then we check which generation
300 if M >= gct->evac_gen do nothing
301 if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we
302 didn't manage to evacuate this object into gct->evac_gen.
307 evacuate() is the single most important function performance-wise
308 in the GC. Various things have been tried to speed it up, but as
309 far as I can tell the code generated by gcc 3.2 with -O2 is about
310 as good as it's going to get. We pass the argument to evacuate()
311 in a register using the 'regparm' attribute (see the prototype for
312 evacuate() near the top of this file).
314 Changing evacuate() to take an (StgClosure **) rather than
315 returning the new pointer seems attractive, because we can avoid
316 writing back the pointer when it hasn't changed (eg. for a static
317 object, or an object in a generation > N). However, I tried it and
318 it doesn't help. One reason is that the (StgClosure **) pointer
319 gets spilled to the stack inside evacuate(), resulting in far more
320 extra reads/writes than we save.
321 -------------------------------------------------------------------------- */
323 REGPARM1 StgClosure *
324 evacuate(StgClosure *q)
328 const StgInfoTable *info;
332 /* The tag and the pointer are split, to be merged after evacing */
333 tag = GET_CLOSURE_TAG(q);
334 q = UNTAG_CLOSURE(q);
336 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
338 if (!HEAP_ALLOCED(q)) {
340 if (!major_gc) return TAG_CLOSURE(tag,q);
343 switch (info->type) {
346 if (info->srt_bitmap != 0 &&
347 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
348 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
349 static_objects = (StgClosure *)q;
354 if (info->srt_bitmap != 0 &&
355 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
356 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
357 static_objects = (StgClosure *)q;
362 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
363 * on the CAF list, so don't do anything with it here (we'll
364 * scavenge it later).
366 if (((StgIndStatic *)q)->saved_info == NULL
367 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
368 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
369 static_objects = (StgClosure *)q;
374 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
375 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
376 static_objects = (StgClosure *)q;
377 /* I am assuming that static_objects pointers are not
378 * written to other objects, and thus, no need to retag. */
380 return TAG_CLOSURE(tag,q);
382 case CONSTR_NOCAF_STATIC:
383 /* no need to put these on the static linked list, they don't need
386 return TAG_CLOSURE(tag,q);
389 barf("evacuate(static): strange closure type %d", (int)(info->type));
395 if (bd->gen_no > N) {
396 /* Can't evacuate this object, because it's in a generation
397 * older than the ones we're collecting. Let's hope that it's
398 * in gct->evac_gen or older, or we will have to arrange to track
399 * this pointer using the mutable list.
401 if (bd->gen_no < gct->evac_gen) {
403 gct->failed_to_evac = rtsTrue;
404 TICK_GC_FAILED_PROMOTION();
406 return TAG_CLOSURE(tag,q);
409 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
411 /* pointer into to-space: just return it. This normally
412 * shouldn't happen, but alllowing it makes certain things
413 * slightly easier (eg. the mutable list can contain the same
414 * object twice, for example).
416 if (bd->flags & BF_EVACUATED) {
417 if (bd->gen_no < gct->evac_gen) {
418 gct->failed_to_evac = rtsTrue;
419 TICK_GC_FAILED_PROMOTION();
421 return TAG_CLOSURE(tag,q);
424 /* evacuate large objects by re-linking them onto a different list.
426 if (bd->flags & BF_LARGE) {
428 if (info->type == TSO &&
429 ((StgTSO *)q)->what_next == ThreadRelocated) {
430 q = (StgClosure *)((StgTSO *)q)->link;
433 evacuate_large((P_)q);
434 return TAG_CLOSURE(tag,q);
437 /* If the object is in a step that we're compacting, then we
438 * need to use an alternative evacuate procedure.
440 if (bd->flags & BF_COMPACTED) {
441 if (!is_marked((P_)q,bd)) {
443 if (mark_stack_full()) {
444 mark_stack_overflowed = rtsTrue;
447 push_mark_stack((P_)q);
449 return TAG_CLOSURE(tag,q);
457 switch (info->type) {
463 return copy(q,sizeW_fromITBL(info),stp);
467 StgWord w = (StgWord)q->payload[0];
468 if (q->header.info == Czh_con_info &&
469 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
470 (StgChar)w <= MAX_CHARLIKE) {
471 return TAG_CLOSURE(tag,
472 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
475 if (q->header.info == Izh_con_info &&
476 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
477 return TAG_CLOSURE(tag,
478 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
482 return copy_noscav_tag(q,sizeofW(StgHeader)+1,stp,tag);
488 return copy_tag(q,sizeofW(StgHeader)+1,stp,tag);
492 return copy(q,sizeofW(StgThunk)+1,stp);
497 #ifdef NO_PROMOTE_THUNKS
498 if (bd->gen_no == 0 &&
500 bd->step->no == generations[bd->gen_no].n_steps-1) {
504 return copy(q,sizeofW(StgThunk)+2,stp);
511 return copy_tag(q,sizeofW(StgHeader)+2,stp,tag);
514 return copy_noscav_tag(q,sizeofW(StgHeader)+2,stp,tag);
517 return copy(q,thunk_sizeW_fromITBL(info),stp);
521 case IND_OLDGEN_PERM:
525 return copy_tag(q,sizeW_fromITBL(info),stp,tag);
528 return copy(q,bco_sizeW((StgBCO *)q),stp);
531 case SE_CAF_BLACKHOLE:
534 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
537 return eval_thunk_selector((StgSelector *)q, rtsTrue);
541 // follow chains of indirections, don't evacuate them
542 q = ((StgInd*)q)->indirectee;
552 case CATCH_STM_FRAME:
553 case CATCH_RETRY_FRAME:
554 case ATOMICALLY_FRAME:
555 // shouldn't see these
556 barf("evacuate: stack frame at %p\n", q);
559 return copy(q,pap_sizeW((StgPAP*)q),stp);
562 return copy(q,ap_sizeW((StgAP*)q),stp);
565 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
568 /* Already evacuated, just return the forwarding address.
569 * HOWEVER: if the requested destination generation (gct->evac_gen) is
570 * older than the actual generation (because the object was
571 * already evacuated to a younger generation) then we have to
572 * set the gct->failed_to_evac flag to indicate that we couldn't
573 * manage to promote the object to the desired generation.
576 * Optimisation: the check is fairly expensive, but we can often
577 * shortcut it if either the required generation is 0, or the
578 * current object (the EVACUATED) is in a high enough generation.
579 * We know that an EVACUATED always points to an object in the
580 * same or an older generation. stp is the lowest step that the
581 * current object would be evacuated to, so we only do the full
582 * check if stp is too low.
584 if (gct->evac_gen > 0 && stp->gen_no < gct->evac_gen) { // optimisation
585 StgClosure *p = ((StgEvacuated*)q)->evacuee;
586 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < gct->evac_gen) {
587 gct->failed_to_evac = rtsTrue;
588 TICK_GC_FAILED_PROMOTION();
591 return ((StgEvacuated*)q)->evacuee;
594 // just copy the block
595 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
597 case MUT_ARR_PTRS_CLEAN:
598 case MUT_ARR_PTRS_DIRTY:
599 case MUT_ARR_PTRS_FROZEN:
600 case MUT_ARR_PTRS_FROZEN0:
601 // just copy the block
602 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
606 StgTSO *tso = (StgTSO *)q;
608 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
610 if (tso->what_next == ThreadRelocated) {
611 q = (StgClosure *)tso->link;
615 /* To evacuate a small TSO, we need to relocate the update frame
622 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
624 sizeofW(StgTSO), stp);
625 move_TSO(tso, new_tso);
626 for (p = tso->sp, q = new_tso->sp;
627 p < tso->stack+tso->stack_size;) {
631 return (StgClosure *)new_tso;
636 return copy(q,sizeofW(StgTRecHeader),stp);
638 case TVAR_WATCH_QUEUE:
639 return copy(q,sizeofW(StgTVarWatchQueue),stp);
642 return copy(q,sizeofW(StgTVar),stp);
645 return copy(q,sizeofW(StgTRecChunk),stp);
647 case ATOMIC_INVARIANT:
648 return copy(q,sizeofW(StgAtomicInvariant),stp);
650 case INVARIANT_CHECK_QUEUE:
651 return copy(q,sizeofW(StgInvariantCheckQueue),stp);
654 barf("evacuate: strange closure type %d", (int)(info->type));
661 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
668 ASSERT(p->header.info == &stg_BLACKHOLE_info);
669 prev = (StgSelector*)((StgClosure *)p)->payload[0];
671 // Update the THUNK_SELECTOR with an indirection to the
672 // EVACUATED closure now at p. Why do this rather than
673 // upd_evacuee(q,p)? Because we have an invariant that an
674 // EVACUATED closure always points to an object in the
675 // same or an older generation (required by the short-cut
676 // test in the EVACUATED case, below).
677 SET_INFO(p, &stg_IND_info);
678 ((StgInd *)p)->indirectee = val;
680 // For the purposes of LDV profiling, we have created an
682 LDV_RECORD_CREATE(p);
688 /* -----------------------------------------------------------------------------
689 Evaluate a THUNK_SELECTOR if possible.
691 p points to a THUNK_SELECTOR that we want to evaluate. The
692 result of "evaluating" it will be evacuated and a pointer to the
693 to-space closure will be returned.
695 If the THUNK_SELECTOR could not be evaluated (its selectee is still
696 a THUNK, for example), then the THUNK_SELECTOR itself will be
698 -------------------------------------------------------------------------- */
701 eval_thunk_selector (StgSelector * p, rtsBool evac)
705 const StgInfoTable *info_ptr;
706 StgClosure *selectee;
707 StgSelector *prev_thunk_selector;
711 prev_thunk_selector = NULL;
712 // this is a chain of THUNK_SELECTORs that we are going to update
713 // to point to the value of the current THUNK_SELECTOR. Each
714 // closure on the chain is a BLACKHOLE, and points to the next in the
715 // chain with payload[0].
719 // The selectee might be a constructor closure,
720 // so we untag the pointer.
721 selectee = UNTAG_CLOSURE(p->selectee);
723 // Save the real info pointer (NOTE: not the same as get_itbl()).
724 info_ptr = p->header.info;
725 field = get_itbl(p)->layout.selector_offset;
727 bd = Bdescr((StgPtr)p);
728 if (HEAP_ALLOCED(p)) {
729 // If the THUNK_SELECTOR is in to-space or in a generation that we
730 // are not collecting, then bale out early. We won't be able to
731 // save any space in any case, and updating with an indirection is
732 // trickier in a non-collected gen: we would have to update the
734 if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
735 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
736 return (StgClosure *)p;
738 // we don't update THUNK_SELECTORS in the compacted
739 // generation, because compaction does not remove the INDs
740 // that result, this causes confusion later
741 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
742 // bit is very tricky to get right. If you make changes
743 // around here, test by compiling stage 3 with +RTS -c -RTS.
744 if (bd->flags & BF_COMPACTED) {
745 // must call evacuate() to mark this closure if evac==rtsTrue
746 if (evac) p = (StgSelector *)evacuate((StgClosure *)p);
747 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
748 return (StgClosure *)p;
752 // BLACKHOLE the selector thunk, since it is now under evaluation.
753 // This is important to stop us going into an infinite loop if
754 // this selector thunk eventually refers to itself.
755 SET_INFO(p,&stg_BLACKHOLE_info);
758 // selectee now points to the closure that we're trying to select
759 // a field from. It may or may not be in to-space: we try not to
760 // end up in to-space, but it's impractical to avoid it in
761 // general. The compacting GC scatters to-space pointers in
762 // from-space during marking, for example. We rely on the property
763 // that evacuate() doesn't mind if it gets passed a to-space pointer.
765 info = get_itbl(selectee);
766 switch (info->type) {
774 case CONSTR_NOCAF_STATIC:
776 // check that the size is in range
777 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
778 info->layout.payload.nptrs));
780 // Select the right field from the constructor
781 val = selectee->payload[field];
784 // For the purposes of LDV profiling, we have destroyed
785 // the original selector thunk, p.
786 SET_INFO(p, info_ptr);
787 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
788 SET_INFO(p, &stg_BLACKHOLE_info);
791 // the closure in val is now the "value" of the
792 // THUNK_SELECTOR in p. However, val may itself be a
793 // THUNK_SELECTOR, in which case we want to continue
794 // evaluating until we find the real value, and then
795 // update the whole chain to point to the value.
797 info = get_itbl(UNTAG_CLOSURE(val));
798 switch (info->type) {
802 case IND_OLDGEN_PERM:
804 val = ((StgInd *)val)->indirectee;
807 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
808 prev_thunk_selector = p;
809 p = (StgSelector*)val;
812 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
813 prev_thunk_selector = p;
815 if (evac) val = evacuate(val);
816 // evacuate() cannot recurse through
817 // eval_thunk_selector(), because we know val is not
819 unchain_thunk_selectors(prev_thunk_selector, val);
827 case IND_OLDGEN_PERM:
829 // Again, we might need to untag a constructor.
830 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
834 // We don't follow pointers into to-space; the constructor
835 // has already been evacuated, so we won't save any space
836 // leaks by evaluating this selector thunk anyhow.
843 // recursively evaluate this selector. We don't want to
844 // recurse indefinitely, so we impose a depth bound.
845 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
849 gct->thunk_selector_depth++;
850 // rtsFalse says "don't evacuate the result". It will,
851 // however, update any THUNK_SELECTORs that are evaluated
853 val = eval_thunk_selector((StgSelector *)selectee, rtsFalse);
854 gct->thunk_selector_depth--;
856 // did we actually manage to evaluate it?
857 if (val == selectee) goto bale_out;
859 // Of course this pointer might be tagged...
860 selectee = UNTAG_CLOSURE(val);
874 case SE_CAF_BLACKHOLE:
881 barf("eval_thunk_selector: strange selectee %d",
886 // We didn't manage to evaluate this thunk; restore the old info
887 // pointer. But don't forget: we still need to evacuate the thunk itself.
888 SET_INFO(p, info_ptr);
890 val = copy((StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
892 val = (StgClosure *)p;
894 unchain_thunk_selectors(prev_thunk_selector, val);
898 /* -----------------------------------------------------------------------------
899 move_TSO is called to update the TSO structure after it has been
900 moved from one place to another.
901 -------------------------------------------------------------------------- */
904 move_TSO (StgTSO *src, StgTSO *dest)
908 // relocate the stack pointer...
909 diff = (StgPtr)dest - (StgPtr)src; // In *words*
910 dest->sp = (StgPtr)dest->sp + diff;