1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: evacuation functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
22 #include "LdvProfile.h"
24 /* Used to avoid long recursion due to selector thunks
26 #define MAX_THUNK_SELECTOR_DEPTH 16
28 static void eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool);
31 alloc_for_copy (nat size, step *stp)
37 /* Find out where we're going, using the handy "to" pointer in
38 * the step of the source object. If it turns out we need to
39 * evacuate to an older generation, adjust it here (see comment
42 if (stp < gct->evac_step) {
43 if (gct->eager_promotion) {
46 gct->failed_to_evac = rtsTrue;
50 ws = &gct->steps[stp->gen_no][stp->no];
52 /* chain a new block onto the to-space for the destination step if
57 if (to + size >= bd->start + BLOCK_SIZE_W) {
58 bd = gc_alloc_todo_block(ws);
67 copy_tag(StgClosure **p, StgClosure *src, nat size, step *stp,StgWord tag)
69 StgPtr to, tagged_to, from;
75 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
77 } while (info == (W_)&stg_WHITEHOLE_info);
78 if (info == (W_)&stg_EVACUATED_info) {
79 src->header.info = (const StgInfoTable *)info;
80 return evacuate(p); // does the failed_to_evac stuff
83 info = (W_)src->header.info;
84 src->header.info = &stg_EVACUATED_info;
87 to = alloc_for_copy(size,stp);
88 tagged_to = (StgPtr)TAG_CLOSURE(tag,(StgClosure*)to);
89 *p = (StgClosure *)tagged_to;
91 TICK_GC_WORDS_COPIED(size);
95 for (i = 1; i < size; i++) { // unroll for small i
99 ((StgEvacuated*)from)->evacuee = (StgClosure *)tagged_to;
101 // retag pointer before updating EVACUATE closure and returning
103 // if (to+size+2 < bd->start + BLOCK_SIZE_W) {
104 // __builtin_prefetch(to + size + 2, 1);
109 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
113 // We store the size of the just evacuated object in the LDV word so that
114 // the profiler can guess the position of the next object later.
115 SET_EVACUAEE_FOR_LDV(from, size);
119 /* Special version of copy() for when we only want to copy the info
120 * pointer of an object, but reserve some padding after it. This is
121 * used to optimise evacuation of BLACKHOLEs.
124 copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
132 info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info);
133 } while (info == (W_)&stg_WHITEHOLE_info);
134 if (info == (W_)&stg_EVACUATED_info) {
135 src->header.info = (const StgInfoTable *)info;
136 return evacuate(p); // does the failed_to_evac stuff
139 info = (W_)src->header.info;
140 src->header.info = &stg_EVACUATED_info;
143 to = alloc_for_copy(size_to_reserve, stp);
144 *p = (StgClosure *)to;
146 TICK_GC_WORDS_COPIED(size_to_copy);
150 for (i = 1; i < size_to_copy; i++) { // unroll for small i
154 ((StgEvacuated*)from)->evacuee = (StgClosure *)to;
157 ((StgEvacuated*)from)->header.info = &stg_EVACUATED_info;
161 // We store the size of the just evacuated object in the LDV word so that
162 // the profiler can guess the position of the next object later.
163 SET_EVACUAEE_FOR_LDV(from, size_to_reserve);
165 if (size_to_reserve - size_to_copy > 0)
166 LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy));
171 /* Copy wrappers that don't tag the closure after copying */
173 copy(StgClosure **p, StgClosure *src, nat size, step *stp)
175 copy_tag(p,src,size,stp,0);
178 /* -----------------------------------------------------------------------------
179 Evacuate a large object
181 This just consists of removing the object from the (doubly-linked)
182 step->large_objects list, and linking it on to the (singly-linked)
183 step->new_large_objects list, from where it will be scavenged later.
185 Convention: bd->flags has BF_EVACUATED set for a large object
186 that has been evacuated, or unset otherwise.
187 -------------------------------------------------------------------------- */
191 evacuate_large(StgPtr p)
193 bdescr *bd = Bdescr(p);
197 // object must be at the beginning of the block (or be a ByteArray)
198 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
199 (((W_)p & BLOCK_MASK) == 0));
201 // already evacuated?
202 if (bd->flags & BF_EVACUATED) {
203 /* Don't forget to set the gct->failed_to_evac flag if we didn't get
204 * the desired destination (see comments in evacuate()).
206 if (bd->step < gct->evac_step) {
207 gct->failed_to_evac = rtsTrue;
208 TICK_GC_FAILED_PROMOTION();
215 ACQUIRE_SPIN_LOCK(&stp->sync_large_objects);
216 // remove from large_object list
218 bd->u.back->link = bd->link;
219 } else { // first object in the list
220 stp->large_objects = bd->link;
223 bd->link->u.back = bd->u.back;
225 RELEASE_SPIN_LOCK(&stp->sync_large_objects);
227 /* link it on to the evacuated large object list of the destination step
230 if (stp < gct->evac_step) {
231 if (gct->eager_promotion) {
232 stp = gct->evac_step;
234 gct->failed_to_evac = rtsTrue;
238 ws = &gct->steps[stp->gen_no][stp->no];
240 bd->gen_no = stp->gen_no;
241 bd->link = ws->todo_large_objects;
242 ws->todo_large_objects = bd;
243 bd->flags |= BF_EVACUATED;
246 /* -----------------------------------------------------------------------------
249 This is called (eventually) for every live object in the system.
251 The caller to evacuate specifies a desired generation in the
252 gct->evac_step thread-local variable. The following conditions apply to
253 evacuating an object which resides in generation M when we're
254 collecting up to generation N
256 if M >= gct->evac_step
258 else evac to step->to
260 if M < gct->evac_step evac to gct->evac_step, step 0
262 if the object is already evacuated, then we check which generation
265 if M >= gct->evac_step do nothing
266 if M < gct->evac_step set gct->failed_to_evac flag to indicate that we
267 didn't manage to evacuate this object into gct->evac_step.
272 evacuate() is the single most important function performance-wise
273 in the GC. Various things have been tried to speed it up, but as
274 far as I can tell the code generated by gcc 3.2 with -O2 is about
275 as good as it's going to get. We pass the argument to evacuate()
276 in a register using the 'regparm' attribute (see the prototype for
277 evacuate() near the top of this file).
279 Changing evacuate() to take an (StgClosure **) rather than
280 returning the new pointer seems attractive, because we can avoid
281 writing back the pointer when it hasn't changed (eg. for a static
282 object, or an object in a generation > N). However, I tried it and
283 it doesn't help. One reason is that the (StgClosure **) pointer
284 gets spilled to the stack inside evacuate(), resulting in far more
285 extra reads/writes than we save.
286 -------------------------------------------------------------------------- */
289 evacuate(StgClosure **p)
294 const StgInfoTable *info;
300 /* The tag and the pointer are split, to be merged after evacing */
301 tag = GET_CLOSURE_TAG(q);
302 q = UNTAG_CLOSURE(q);
304 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
306 if (!HEAP_ALLOCED(q)) {
308 if (!major_gc) return;
311 switch (info->type) {
314 if (info->srt_bitmap != 0 &&
315 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
316 ACQUIRE_SPIN_LOCK(&static_objects_sync);
317 if (*THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
318 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
319 static_objects = (StgClosure *)q;
321 RELEASE_SPIN_LOCK(&static_objects_sync);
326 if (info->srt_bitmap != 0 &&
327 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
328 ACQUIRE_SPIN_LOCK(&static_objects_sync);
329 if (*FUN_STATIC_LINK((StgClosure *)q) == NULL) {
330 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
331 static_objects = (StgClosure *)q;
333 RELEASE_SPIN_LOCK(&static_objects_sync);
338 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
339 * on the CAF list, so don't do anything with it here (we'll
340 * scavenge it later).
342 if (((StgIndStatic *)q)->saved_info == NULL) {
343 ACQUIRE_SPIN_LOCK(&static_objects_sync);
344 if (*IND_STATIC_LINK((StgClosure *)q) == NULL) {
345 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
346 static_objects = (StgClosure *)q;
348 RELEASE_SPIN_LOCK(&static_objects_sync);
353 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
354 ACQUIRE_SPIN_LOCK(&static_objects_sync);
355 // re-test, after acquiring lock
356 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
357 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
358 static_objects = (StgClosure *)q;
360 RELEASE_SPIN_LOCK(&static_objects_sync);
361 /* I am assuming that static_objects pointers are not
362 * written to other objects, and thus, no need to retag. */
366 case CONSTR_NOCAF_STATIC:
367 /* no need to put these on the static linked list, they don't need
373 barf("evacuate(static): strange closure type %d", (int)(info->type));
379 if (bd->gen_no > N) {
380 /* Can't evacuate this object, because it's in a generation
381 * older than the ones we're collecting. Let's hope that it's
382 * in gct->evac_step or older, or we will have to arrange to track
383 * this pointer using the mutable list.
385 if (bd->step < gct->evac_step) {
387 gct->failed_to_evac = rtsTrue;
388 TICK_GC_FAILED_PROMOTION();
393 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
395 /* pointer into to-space: just return it. This normally
396 * shouldn't happen, but alllowing it makes certain things
397 * slightly easier (eg. the mutable list can contain the same
398 * object twice, for example).
400 if (bd->flags & BF_EVACUATED) {
401 if (bd->step < gct->evac_step) {
402 gct->failed_to_evac = rtsTrue;
403 TICK_GC_FAILED_PROMOTION();
408 /* evacuate large objects by re-linking them onto a different list.
410 if (bd->flags & BF_LARGE) {
412 if (info->type == TSO &&
413 ((StgTSO *)q)->what_next == ThreadRelocated) {
414 q = (StgClosure *)((StgTSO *)q)->link;
418 evacuate_large((P_)q);
422 /* If the object is in a step that we're compacting, then we
423 * need to use an alternative evacuate procedure.
425 if (bd->flags & BF_COMPACTED) {
426 if (!is_marked((P_)q,bd)) {
428 if (mark_stack_full()) {
429 mark_stack_overflowed = rtsTrue;
432 push_mark_stack((P_)q);
442 switch (info->type) {
451 copy(p,q,sizeW_fromITBL(info),stp);
456 StgWord w = (StgWord)q->payload[0];
457 if (q->header.info == Czh_con_info &&
458 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
459 (StgChar)w <= MAX_CHARLIKE) {
460 *p = TAG_CLOSURE(tag,
461 (StgClosure *)CHARLIKE_CLOSURE((StgChar)w)
464 if (q->header.info == Izh_con_info &&
465 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
466 *p = TAG_CLOSURE(tag,
467 (StgClosure *)INTLIKE_CLOSURE((StgInt)w)
471 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
479 copy_tag(p,q,sizeofW(StgHeader)+1,stp,tag);
484 copy(p,q,sizeofW(StgThunk)+1,stp);
490 #ifdef NO_PROMOTE_THUNKS
491 if (bd->gen_no == 0 &&
493 bd->step->no == generations[bd->gen_no].n_steps-1) {
497 copy(p,q,sizeofW(StgThunk)+2,stp);
505 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
509 copy_tag(p,q,sizeofW(StgHeader)+2,stp,tag);
513 copy(p,q,thunk_sizeW_fromITBL(info),stp);
518 case IND_OLDGEN_PERM:
522 copy_tag(p,q,sizeW_fromITBL(info),stp,tag);
526 copy(p,q,bco_sizeW((StgBCO *)q),stp);
530 case SE_CAF_BLACKHOLE:
533 copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
537 eval_thunk_selector(p, (StgSelector *)q, rtsTrue);
542 // follow chains of indirections, don't evacuate them
543 q = ((StgInd*)q)->indirectee;
554 case CATCH_STM_FRAME:
555 case CATCH_RETRY_FRAME:
556 case ATOMICALLY_FRAME:
557 // shouldn't see these
558 barf("evacuate: stack frame at %p\n", q);
561 copy(p,q,pap_sizeW((StgPAP*)q),stp);
565 copy(p,q,ap_sizeW((StgAP*)q),stp);
569 copy(p,q,ap_stack_sizeW((StgAP_STACK*)q),stp);
573 /* Already evacuated, just return the forwarding address.
574 * HOWEVER: if the requested destination generation (gct->evac_step) is
575 * older than the actual generation (because the object was
576 * already evacuated to a younger generation) then we have to
577 * set the gct->failed_to_evac flag to indicate that we couldn't
578 * manage to promote the object to the desired generation.
581 * Optimisation: the check is fairly expensive, but we can often
582 * shortcut it if either the required generation is 0, or the
583 * current object (the EVACUATED) is in a high enough generation.
584 * We know that an EVACUATED always points to an object in the
585 * same or an older generation. stp is the lowest step that the
586 * current object would be evacuated to, so we only do the full
587 * check if stp is too low.
590 StgClosure *e = ((StgEvacuated*)q)->evacuee;
592 if (stp < gct->evac_step) { // optimisation
593 if (HEAP_ALLOCED(e) && Bdescr((P_)e)->step < gct->evac_step) {
594 gct->failed_to_evac = rtsTrue;
595 TICK_GC_FAILED_PROMOTION();
602 // just copy the block
603 copy(p,q,arr_words_sizeW((StgArrWords *)q),stp);
606 case MUT_ARR_PTRS_CLEAN:
607 case MUT_ARR_PTRS_DIRTY:
608 case MUT_ARR_PTRS_FROZEN:
609 case MUT_ARR_PTRS_FROZEN0:
610 // just copy the block
611 copy(p,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
616 StgTSO *tso = (StgTSO *)q;
618 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
620 if (tso->what_next == ThreadRelocated) {
621 q = (StgClosure *)tso->link;
626 /* To evacuate a small TSO, we need to relocate the update frame
633 copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp);
634 new_tso = (StgTSO *)*p;
635 move_TSO(tso, new_tso);
636 for (r = tso->sp, s = new_tso->sp;
637 r < tso->stack+tso->stack_size;) {
645 copy(p,q,sizeofW(StgTRecHeader),stp);
648 case TVAR_WATCH_QUEUE:
649 copy(p,q,sizeofW(StgTVarWatchQueue),stp);
653 copy(p,q,sizeofW(StgTVar),stp);
657 copy(p,q,sizeofW(StgTRecChunk),stp);
660 case ATOMIC_INVARIANT:
661 copy(p,q,sizeofW(StgAtomicInvariant),stp);
664 case INVARIANT_CHECK_QUEUE:
665 copy(p,q,sizeofW(StgInvariantCheckQueue),stp);
669 barf("evacuate: strange closure type %d", (int)(info->type));
676 unchain_thunk_selectors(StgSelector *p, StgClosure *val)
683 ASSERT(p->header.info == &stg_BLACKHOLE_info);
684 prev = (StgSelector*)((StgClosure *)p)->payload[0];
686 // Update the THUNK_SELECTOR with an indirection to the
687 // EVACUATED closure now at p. Why do this rather than
688 // upd_evacuee(q,p)? Because we have an invariant that an
689 // EVACUATED closure always points to an object in the
690 // same or an older generation (required by the short-cut
691 // test in the EVACUATED case, below).
692 SET_INFO(p, &stg_IND_info);
693 ((StgInd *)p)->indirectee = val;
695 // For the purposes of LDV profiling, we have created an
697 LDV_RECORD_CREATE(p);
703 /* -----------------------------------------------------------------------------
704 Evaluate a THUNK_SELECTOR if possible.
706 p points to a THUNK_SELECTOR that we want to evaluate. The
707 result of "evaluating" it will be evacuated and a pointer to the
708 to-space closure will be returned.
710 If the THUNK_SELECTOR could not be evaluated (its selectee is still
711 a THUNK, for example), then the THUNK_SELECTOR itself will be
713 -------------------------------------------------------------------------- */
716 eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac)
717 // NB. for legacy reasons, p & q are swapped around :(
722 StgClosure *selectee;
723 StgSelector *prev_thunk_selector;
727 prev_thunk_selector = NULL;
728 // this is a chain of THUNK_SELECTORs that we are going to update
729 // to point to the value of the current THUNK_SELECTOR. Each
730 // closure on the chain is a BLACKHOLE, and points to the next in the
731 // chain with payload[0].
735 bd = Bdescr((StgPtr)p);
736 if (HEAP_ALLOCED(p)) {
737 // If the THUNK_SELECTOR is in to-space or in a generation that we
738 // are not collecting, then bale out early. We won't be able to
739 // save any space in any case, and updating with an indirection is
740 // trickier in a non-collected gen: we would have to update the
742 if ((bd->gen_no > N) || (bd->flags & BF_EVACUATED)) {
743 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
744 *q = (StgClosure *)p;
747 // we don't update THUNK_SELECTORS in the compacted
748 // generation, because compaction does not remove the INDs
749 // that result, this causes confusion later
750 // (scavenge_mark_stack doesn't deal with IND). BEWARE! This
751 // bit is very tricky to get right. If you make changes
752 // around here, test by compiling stage 3 with +RTS -c -RTS.
753 if (bd->flags & BF_COMPACTED) {
754 // must call evacuate() to mark this closure if evac==rtsTrue
755 *q = (StgClosure *)p;
756 if (evac) evacuate(q);
757 unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p);
763 // BLACKHOLE the selector thunk, since it is now under evaluation.
764 // This is important to stop us going into an infinite loop if
765 // this selector thunk eventually refers to itself.
766 #if defined(THREADED_RTS)
767 // In threaded mode, we'll use WHITEHOLE to lock the selector
768 // thunk while we evaluate it.
770 info_ptr = (StgInfoTable *)xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
771 if (info_ptr == (W_)&stg_WHITEHOLE_info) {
773 info_ptr = xchg((StgPtr)&p->header.info, (W_)&stg_WHITEHOLE_info);
774 } while (info_ptr == (W_)&stg_WHITEHOLE_info);
777 // make sure someone else didn't get here first
778 if (INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) {
783 // Save the real info pointer (NOTE: not the same as get_itbl()).
784 info_ptr = (StgWord)p->header.info;
785 SET_INFO(p,&stg_BLACKHOLE_info);
788 field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset;
790 // The selectee might be a constructor closure,
791 // so we untag the pointer.
792 selectee = UNTAG_CLOSURE(p->selectee);
795 // selectee now points to the closure that we're trying to select
796 // a field from. It may or may not be in to-space: we try not to
797 // end up in to-space, but it's impractical to avoid it in
798 // general. The compacting GC scatters to-space pointers in
799 // from-space during marking, for example. We rely on the property
800 // that evacuate() doesn't mind if it gets passed a to-space pointer.
802 info = get_itbl(selectee);
803 switch (info->type) {
805 goto bale_out; // about to be evacuated by another thread (or a loop).
814 case CONSTR_NOCAF_STATIC:
816 // check that the size is in range
817 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
818 info->layout.payload.nptrs));
820 // Select the right field from the constructor
821 val = selectee->payload[field];
824 // For the purposes of LDV profiling, we have destroyed
825 // the original selector thunk, p.
826 SET_INFO(p, info_ptr);
827 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p);
828 SET_INFO(p, &stg_BLACKHOLE_info);
831 // the closure in val is now the "value" of the
832 // THUNK_SELECTOR in p. However, val may itself be a
833 // THUNK_SELECTOR, in which case we want to continue
834 // evaluating until we find the real value, and then
835 // update the whole chain to point to the value.
837 info = get_itbl(UNTAG_CLOSURE(val));
838 switch (info->type) {
842 case IND_OLDGEN_PERM:
844 val = ((StgInd *)val)->indirectee;
847 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
848 prev_thunk_selector = p;
849 p = (StgSelector*)val;
852 ((StgClosure*)p)->payload[0] = (StgClosure *)prev_thunk_selector;
853 prev_thunk_selector = p;
856 if (evac) evacuate(q);
858 // evacuate() cannot recurse through
859 // eval_thunk_selector(), because we know val is not
861 unchain_thunk_selectors(prev_thunk_selector, val);
869 case IND_OLDGEN_PERM:
871 // Again, we might need to untag a constructor.
872 selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee );
876 // We don't follow pointers into to-space; the constructor
877 // has already been evacuated, so we won't save any space
878 // leaks by evaluating this selector thunk anyhow.
885 // recursively evaluate this selector. We don't want to
886 // recurse indefinitely, so we impose a depth bound.
887 if (gct->thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
891 gct->thunk_selector_depth++;
892 // rtsFalse says "don't evacuate the result". It will,
893 // however, update any THUNK_SELECTORs that are evaluated
895 eval_thunk_selector(&val, (StgSelector*)selectee, rtsFalse);
896 gct->thunk_selector_depth--;
898 // did we actually manage to evaluate it?
899 if (val == selectee) goto bale_out;
901 // Of course this pointer might be tagged...
902 selectee = UNTAG_CLOSURE(val);
916 case SE_CAF_BLACKHOLE:
923 barf("eval_thunk_selector: strange selectee %d",
928 // We didn't manage to evaluate this thunk; restore the old info
929 // pointer. But don't forget: we still need to evacuate the thunk itself.
930 SET_INFO(p, (const StgInfoTable *)info_ptr);
932 copy(&val,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to);
934 val = (StgClosure *)p;
937 unchain_thunk_selectors(prev_thunk_selector, val);
941 /* -----------------------------------------------------------------------------
942 move_TSO is called to update the TSO structure after it has been
943 moved from one place to another.
944 -------------------------------------------------------------------------- */
947 move_TSO (StgTSO *src, StgTSO *dest)
951 // relocate the stack pointer...
952 diff = (StgPtr)dest - (StgPtr)src; // In *words*
953 dest->sp = (StgPtr)dest->sp + diff;