1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
25 #include "LdvProfile.h"
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
35 /* Similar to scavenge_large_bitmap(), but we don't write back the
36 * pointers we get back from evacuate().
39 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
46 bitmap = large_srt->l.bitmap[b];
47 size = (nat)large_srt->l.size;
48 p = (StgClosure **)large_srt->srt;
49 for (i = 0; i < size; ) {
50 if ((bitmap & 1) != 0) {
55 if (i % BITS_IN(W_) == 0) {
57 bitmap = large_srt->l.bitmap[b];
64 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
65 * srt field in the info table. That's ok, because we'll
66 * never dereference it.
69 scavenge_srt (StgClosure **srt, nat srt_bitmap)
77 if (bitmap == (StgHalfWord)(-1)) {
78 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
83 if ((bitmap & 1) != 0) {
84 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
85 // Special-case to handle references to closures hiding out in DLLs, since
86 // double indirections required to get at those. The code generator knows
87 // which is which when generating the SRT, so it stores the (indirect)
88 // reference to the DLL closure in the table by first adding one to it.
89 // We check for this here, and undo the addition before evacuating it.
91 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
92 // closure that's fixed at link-time, and no extra magic is required.
93 if ( (unsigned long)(*srt) & 0x1 ) {
94 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
103 bitmap = bitmap >> 1;
109 scavenge_thunk_srt(const StgInfoTable *info)
111 StgThunkInfoTable *thunk_info;
113 if (!major_gc) return;
115 thunk_info = itbl_to_thunk_itbl(info);
116 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
120 scavenge_fun_srt(const StgInfoTable *info)
122 StgFunInfoTable *fun_info;
124 if (!major_gc) return;
126 fun_info = itbl_to_fun_itbl(info);
127 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
130 /* -----------------------------------------------------------------------------
132 -------------------------------------------------------------------------- */
135 scavengeTSO (StgTSO *tso)
139 if (tso->what_next == ThreadRelocated) {
140 // the only way this can happen is if the old TSO was on the
141 // mutable list. We might have other links to this defunct
142 // TSO, so we must update its link field.
143 evacuate((StgClosure**)&tso->_link);
147 saved_eager = gct->eager_promotion;
148 gct->eager_promotion = rtsFalse;
150 if ( tso->why_blocked == BlockedOnMVar
151 || tso->why_blocked == BlockedOnBlackHole
152 || tso->why_blocked == BlockedOnException
154 evacuate(&tso->block_info.closure);
156 evacuate((StgClosure **)&tso->blocked_exceptions);
158 // We don't always chase the link field: TSOs on the blackhole
159 // queue are not automatically alive, so the link field is a
160 // "weak" pointer in that case.
161 if (tso->why_blocked != BlockedOnBlackHole) {
162 evacuate((StgClosure **)&tso->link);
165 // scavange current transaction record
166 evacuate((StgClosure **)&tso->trec);
168 // scavenge this thread's stack
169 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
171 if (gct->failed_to_evac) {
172 tso->flags |= TSO_DIRTY;
174 tso->flags &= ~TSO_DIRTY;
177 gct->eager_promotion = saved_eager;
180 /* -----------------------------------------------------------------------------
181 Blocks of function args occur on the stack (at the top) and
183 -------------------------------------------------------------------------- */
186 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
193 switch (fun_info->f.fun_type) {
195 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
196 size = BITMAP_SIZE(fun_info->f.b.bitmap);
199 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
200 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
204 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
205 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
208 if ((bitmap & 1) == 0) {
209 evacuate((StgClosure **)p);
212 bitmap = bitmap >> 1;
221 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
225 StgFunInfoTable *fun_info;
227 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
228 ASSERT(fun_info->i.type != PAP);
231 switch (fun_info->f.fun_type) {
233 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
236 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
240 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
244 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
247 if ((bitmap & 1) == 0) {
248 evacuate((StgClosure **)p);
251 bitmap = bitmap >> 1;
260 scavenge_PAP (StgPAP *pap)
263 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
267 scavenge_AP (StgAP *ap)
270 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
273 /* -----------------------------------------------------------------------------
274 Scavenge everything on the mark stack.
276 This is slightly different from scavenge():
277 - we don't walk linearly through the objects, so the scavenger
278 doesn't need to advance the pointer on to the next object.
279 -------------------------------------------------------------------------- */
282 scavenge_mark_stack(void)
286 step *saved_evac_step;
288 gct->evac_step = &oldest_gen->steps[0];
289 saved_evac_step = gct->evac_step;
292 while (!mark_stack_empty()) {
293 p = pop_mark_stack();
295 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
296 info = get_itbl((StgClosure *)p);
299 switch (((volatile StgWord *)info)[1] & 0xffff) {
304 rtsBool saved_eager_promotion = gct->eager_promotion;
306 StgMVar *mvar = ((StgMVar *)p);
307 gct->eager_promotion = rtsFalse;
308 evacuate((StgClosure **)&mvar->head);
309 evacuate((StgClosure **)&mvar->tail);
310 evacuate((StgClosure **)&mvar->value);
311 gct->eager_promotion = saved_eager_promotion;
313 if (gct->failed_to_evac) {
314 mvar->header.info = &stg_MVAR_DIRTY_info;
316 mvar->header.info = &stg_MVAR_CLEAN_info;
322 scavenge_fun_srt(info);
323 evacuate(&((StgClosure *)p)->payload[1]);
324 evacuate(&((StgClosure *)p)->payload[0]);
328 scavenge_thunk_srt(info);
329 evacuate(&((StgThunk *)p)->payload[1]);
330 evacuate(&((StgThunk *)p)->payload[0]);
334 evacuate(&((StgClosure *)p)->payload[1]);
335 evacuate(&((StgClosure *)p)->payload[0]);
340 scavenge_fun_srt(info);
341 evacuate(&((StgClosure *)p)->payload[0]);
346 scavenge_thunk_srt(info);
347 evacuate(&((StgThunk *)p)->payload[0]);
352 evacuate(&((StgClosure *)p)->payload[0]);
357 scavenge_fun_srt(info);
362 scavenge_thunk_srt(info);
370 scavenge_fun_srt(info);
377 scavenge_thunk_srt(info);
378 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
379 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
380 evacuate((StgClosure **)p);
392 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
393 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
394 evacuate((StgClosure **)p);
400 StgBCO *bco = (StgBCO *)p;
401 evacuate((StgClosure **)&bco->instrs);
402 evacuate((StgClosure **)&bco->literals);
403 evacuate((StgClosure **)&bco->ptrs);
408 // don't need to do anything here: the only possible case
409 // is that we're in a 1-space compacting collector, with
410 // no "old" generation.
414 case IND_OLDGEN_PERM:
415 evacuate(&((StgInd *)p)->indirectee);
419 case MUT_VAR_DIRTY: {
420 rtsBool saved_eager_promotion = gct->eager_promotion;
422 gct->eager_promotion = rtsFalse;
423 evacuate(&((StgMutVar *)p)->var);
424 gct->eager_promotion = saved_eager_promotion;
426 if (gct->failed_to_evac) {
427 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
429 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
435 case SE_CAF_BLACKHOLE:
443 StgSelector *s = (StgSelector *)p;
444 evacuate(&s->selectee);
448 // A chunk of stack saved in a heap object
451 StgAP_STACK *ap = (StgAP_STACK *)p;
454 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
459 scavenge_PAP((StgPAP *)p);
463 scavenge_AP((StgAP *)p);
466 case MUT_ARR_PTRS_CLEAN:
467 case MUT_ARR_PTRS_DIRTY:
473 // We don't eagerly promote objects pointed to by a mutable
474 // array, but if we find the array only points to objects in
475 // the same or an older generation, we mark it "clean" and
476 // avoid traversing it during minor GCs.
477 saved_eager = gct->eager_promotion;
478 gct->eager_promotion = rtsFalse;
479 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
480 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
481 evacuate((StgClosure **)p);
483 gct->eager_promotion = saved_eager;
485 if (gct->failed_to_evac) {
486 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
488 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
491 gct->failed_to_evac = rtsTrue; // mutable anyhow.
495 case MUT_ARR_PTRS_FROZEN:
496 case MUT_ARR_PTRS_FROZEN0:
501 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
502 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
503 evacuate((StgClosure **)p);
506 // If we're going to put this object on the mutable list, then
507 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
508 if (gct->failed_to_evac) {
509 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
511 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
518 scavengeTSO((StgTSO*)p);
519 gct->failed_to_evac = rtsTrue; // always on the mutable list
523 case TVAR_WATCH_QUEUE:
525 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
527 evacuate((StgClosure **)&wq->closure);
528 evacuate((StgClosure **)&wq->next_queue_entry);
529 evacuate((StgClosure **)&wq->prev_queue_entry);
530 gct->evac_step = saved_evac_step;
531 gct->failed_to_evac = rtsTrue; // mutable
537 StgTVar *tvar = ((StgTVar *) p);
539 evacuate((StgClosure **)&tvar->current_value);
540 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
541 gct->evac_step = saved_evac_step;
542 gct->failed_to_evac = rtsTrue; // mutable
549 StgTRecChunk *tc = ((StgTRecChunk *) p);
550 TRecEntry *e = &(tc -> entries[0]);
552 evacuate((StgClosure **)&tc->prev_chunk);
553 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
554 evacuate((StgClosure **)&e->tvar);
555 evacuate((StgClosure **)&e->expected_value);
556 evacuate((StgClosure **)&e->new_value);
558 gct->evac_step = saved_evac_step;
559 gct->failed_to_evac = rtsTrue; // mutable
565 StgTRecHeader *trec = ((StgTRecHeader *) p);
567 evacuate((StgClosure **)&trec->enclosing_trec);
568 evacuate((StgClosure **)&trec->current_chunk);
569 evacuate((StgClosure **)&trec->invariants_to_check);
570 gct->evac_step = saved_evac_step;
571 gct->failed_to_evac = rtsTrue; // mutable
575 case ATOMIC_INVARIANT:
577 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
579 evacuate(&invariant->code);
580 evacuate((StgClosure **)&invariant->last_execution);
581 gct->evac_step = saved_evac_step;
582 gct->failed_to_evac = rtsTrue; // mutable
586 case INVARIANT_CHECK_QUEUE:
588 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
590 evacuate((StgClosure **)&queue->invariant);
591 evacuate((StgClosure **)&queue->my_execution);
592 evacuate((StgClosure **)&queue->next_queue_entry);
593 gct->evac_step = saved_evac_step;
594 gct->failed_to_evac = rtsTrue; // mutable
599 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
603 if (gct->failed_to_evac) {
604 gct->failed_to_evac = rtsFalse;
605 if (gct->evac_step) {
606 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
610 // mark the next bit to indicate "scavenged"
611 mark(q+1, Bdescr(q));
613 } // while (!mark_stack_empty())
615 // start a new linear scan if the mark stack overflowed at some point
616 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
617 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
618 mark_stack_overflowed = rtsFalse;
619 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
620 oldgen_scan = oldgen_scan_bd->start;
623 if (oldgen_scan_bd) {
624 // push a new thing on the mark stack
626 // find a closure that is marked but not scavenged, and start
628 while (oldgen_scan < oldgen_scan_bd->free
629 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
633 if (oldgen_scan < oldgen_scan_bd->free) {
635 // already scavenged?
636 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
637 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
640 push_mark_stack(oldgen_scan);
641 // ToDo: bump the linear scan by the actual size of the object
642 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
646 oldgen_scan_bd = oldgen_scan_bd->link;
647 if (oldgen_scan_bd != NULL) {
648 oldgen_scan = oldgen_scan_bd->start;
654 /* -----------------------------------------------------------------------------
657 This is used for objects that are temporarily marked as mutable
658 because they contain old-to-new generation pointers. Only certain
659 objects can have this property.
660 -------------------------------------------------------------------------- */
663 scavenge_one(StgPtr p)
665 const StgInfoTable *info;
666 step *saved_evac_step = gct->evac_step;
669 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
670 info = get_itbl((StgClosure *)p);
672 switch (info->type) {
677 rtsBool saved_eager_promotion = gct->eager_promotion;
679 StgMVar *mvar = ((StgMVar *)p);
680 gct->eager_promotion = rtsFalse;
681 evacuate((StgClosure **)&mvar->head);
682 evacuate((StgClosure **)&mvar->tail);
683 evacuate((StgClosure **)&mvar->value);
684 gct->eager_promotion = saved_eager_promotion;
686 if (gct->failed_to_evac) {
687 mvar->header.info = &stg_MVAR_DIRTY_info;
689 mvar->header.info = &stg_MVAR_CLEAN_info;
703 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
704 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
705 evacuate((StgClosure **)q);
711 case FUN_1_0: // hardly worth specialising these guys
727 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
728 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
729 evacuate((StgClosure **)q);
735 case MUT_VAR_DIRTY: {
737 rtsBool saved_eager_promotion = gct->eager_promotion;
739 gct->eager_promotion = rtsFalse;
740 evacuate(&((StgMutVar *)p)->var);
741 gct->eager_promotion = saved_eager_promotion;
743 if (gct->failed_to_evac) {
744 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
746 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
752 case SE_CAF_BLACKHOLE:
759 StgSelector *s = (StgSelector *)p;
760 evacuate(&s->selectee);
766 StgAP_STACK *ap = (StgAP_STACK *)p;
769 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
770 p = (StgPtr)ap->payload + ap->size;
775 p = scavenge_PAP((StgPAP *)p);
779 p = scavenge_AP((StgAP *)p);
786 case MUT_ARR_PTRS_CLEAN:
787 case MUT_ARR_PTRS_DIRTY:
792 // We don't eagerly promote objects pointed to by a mutable
793 // array, but if we find the array only points to objects in
794 // the same or an older generation, we mark it "clean" and
795 // avoid traversing it during minor GCs.
796 saved_eager = gct->eager_promotion;
797 gct->eager_promotion = rtsFalse;
799 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
800 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
801 evacuate((StgClosure **)p);
803 gct->eager_promotion = saved_eager;
805 if (gct->failed_to_evac) {
806 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
808 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
811 gct->failed_to_evac = rtsTrue;
815 case MUT_ARR_PTRS_FROZEN:
816 case MUT_ARR_PTRS_FROZEN0:
821 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
822 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
823 evacuate((StgClosure **)p);
826 // If we're going to put this object on the mutable list, then
827 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
828 if (gct->failed_to_evac) {
829 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
831 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
838 scavengeTSO((StgTSO*)p);
839 gct->failed_to_evac = rtsTrue; // always on the mutable list
843 case TVAR_WATCH_QUEUE:
845 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
847 evacuate((StgClosure **)&wq->closure);
848 evacuate((StgClosure **)&wq->next_queue_entry);
849 evacuate((StgClosure **)&wq->prev_queue_entry);
850 gct->evac_step = saved_evac_step;
851 gct->failed_to_evac = rtsTrue; // mutable
857 StgTVar *tvar = ((StgTVar *) p);
859 evacuate((StgClosure **)&tvar->current_value);
860 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
861 gct->evac_step = saved_evac_step;
862 gct->failed_to_evac = rtsTrue; // mutable
868 StgTRecHeader *trec = ((StgTRecHeader *) p);
870 evacuate((StgClosure **)&trec->enclosing_trec);
871 evacuate((StgClosure **)&trec->current_chunk);
872 evacuate((StgClosure **)&trec->invariants_to_check);
873 gct->evac_step = saved_evac_step;
874 gct->failed_to_evac = rtsTrue; // mutable
881 StgTRecChunk *tc = ((StgTRecChunk *) p);
882 TRecEntry *e = &(tc -> entries[0]);
884 evacuate((StgClosure **)&tc->prev_chunk);
885 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
886 evacuate((StgClosure **)&e->tvar);
887 evacuate((StgClosure **)&e->expected_value);
888 evacuate((StgClosure **)&e->new_value);
890 gct->evac_step = saved_evac_step;
891 gct->failed_to_evac = rtsTrue; // mutable
895 case ATOMIC_INVARIANT:
897 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
899 evacuate(&invariant->code);
900 evacuate((StgClosure **)&invariant->last_execution);
901 gct->evac_step = saved_evac_step;
902 gct->failed_to_evac = rtsTrue; // mutable
906 case INVARIANT_CHECK_QUEUE:
908 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
910 evacuate((StgClosure **)&queue->invariant);
911 evacuate((StgClosure **)&queue->my_execution);
912 evacuate((StgClosure **)&queue->next_queue_entry);
913 gct->evac_step = saved_evac_step;
914 gct->failed_to_evac = rtsTrue; // mutable
919 case IND_OLDGEN_PERM:
922 /* Careful here: a THUNK can be on the mutable list because
923 * it contains pointers to young gen objects. If such a thunk
924 * is updated, the IND_OLDGEN will be added to the mutable
925 * list again, and we'll scavenge it twice. evacuate()
926 * doesn't check whether the object has already been
927 * evacuated, so we perform that check here.
929 StgClosure *q = ((StgInd *)p)->indirectee;
930 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
933 evacuate(&((StgInd *)p)->indirectee);
936 #if 0 && defined(DEBUG)
937 if (RtsFlags.DebugFlags.gc)
938 /* Debugging code to print out the size of the thing we just
942 StgPtr start = gen->steps[0].scan;
943 bdescr *start_bd = gen->steps[0].scan_bd;
945 scavenge(&gen->steps[0]);
946 if (start_bd != gen->steps[0].scan_bd) {
947 size += (P_)BLOCK_ROUND_UP(start) - start;
948 start_bd = start_bd->link;
949 while (start_bd != gen->steps[0].scan_bd) {
950 size += BLOCK_SIZE_W;
951 start_bd = start_bd->link;
953 size += gen->steps[0].scan -
954 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
956 size = gen->steps[0].scan - start;
958 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
964 barf("scavenge_one: strange object %d", (int)(info->type));
967 no_luck = gct->failed_to_evac;
968 gct->failed_to_evac = rtsFalse;
972 /* -----------------------------------------------------------------------------
973 Scavenging mutable lists.
975 We treat the mutable list of each generation > N (i.e. all the
976 generations older than the one being collected) as roots. We also
977 remove non-mutable objects from the mutable list at this point.
978 -------------------------------------------------------------------------- */
981 scavenge_mutable_list(generation *gen)
986 bd = gen->saved_mut_list;
988 gct->evac_step = &gen->steps[0];
989 for (; bd != NULL; bd = bd->link) {
990 for (q = bd->start; q < bd->free; q++) {
992 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
995 switch (get_itbl((StgClosure *)p)->type) {
997 barf("MUT_VAR_CLEAN on mutable list");
999 mutlist_MUTVARS++; break;
1000 case MUT_ARR_PTRS_CLEAN:
1001 case MUT_ARR_PTRS_DIRTY:
1002 case MUT_ARR_PTRS_FROZEN:
1003 case MUT_ARR_PTRS_FROZEN0:
1004 mutlist_MUTARRS++; break;
1006 barf("MVAR_CLEAN on mutable list");
1008 mutlist_MVARS++; break;
1010 mutlist_OTHERS++; break;
1014 // Check whether this object is "clean", that is it
1015 // definitely doesn't point into a young generation.
1016 // Clean objects don't need to be scavenged. Some clean
1017 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1018 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1019 // TSO, are always on the mutable list.
1021 switch (get_itbl((StgClosure *)p)->type) {
1022 case MUT_ARR_PTRS_CLEAN:
1023 recordMutableGen_GC((StgClosure *)p,gen);
1026 StgTSO *tso = (StgTSO *)p;
1027 if ((tso->flags & TSO_DIRTY) == 0) {
1028 // A clean TSO: we don't have to traverse its
1029 // stack. However, we *do* follow the link field:
1030 // we don't want to have to mark a TSO dirty just
1031 // because we put it on a different queue.
1032 if (tso->why_blocked != BlockedOnBlackHole) {
1033 evacuate((StgClosure **)&tso->link);
1035 recordMutableGen_GC((StgClosure *)p,gen);
1043 if (scavenge_one(p)) {
1044 // didn't manage to promote everything, so put the
1045 // object back on the list.
1046 recordMutableGen_GC((StgClosure *)p,gen);
1051 // free the old mut_list
1052 freeChain_sync(gen->saved_mut_list);
1053 gen->saved_mut_list = NULL;
1056 /* -----------------------------------------------------------------------------
1057 Scavenging the static objects.
1059 We treat the mutable list of each generation > N (i.e. all the
1060 generations older than the one being collected) as roots. We also
1061 remove non-mutable objects from the mutable list at this point.
1062 -------------------------------------------------------------------------- */
1065 scavenge_static(void)
1068 const StgInfoTable *info;
1070 debugTrace(DEBUG_gc, "scavenging static objects");
1072 /* Always evacuate straight to the oldest generation for static
1074 gct->evac_step = &oldest_gen->steps[0];
1076 /* keep going until we've scavenged all the objects on the linked
1081 /* get the next static object from the list. Remember, there might
1082 * be more stuff on this list after each evacuation...
1083 * (static_objects is a global)
1085 p = gct->static_objects;
1086 if (p == END_OF_STATIC_LIST) {
1090 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1093 if (info->type==RBH)
1094 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1096 // make sure the info pointer is into text space
1098 /* Take this object *off* the static_objects list,
1099 * and put it on the scavenged_static_objects list.
1101 gct->static_objects = *STATIC_LINK(info,p);
1102 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1103 gct->scavenged_static_objects = p;
1105 switch (info -> type) {
1109 StgInd *ind = (StgInd *)p;
1110 evacuate(&ind->indirectee);
1112 /* might fail to evacuate it, in which case we have to pop it
1113 * back on the mutable list of the oldest generation. We
1114 * leave it *on* the scavenged_static_objects list, though,
1115 * in case we visit this object again.
1117 if (gct->failed_to_evac) {
1118 gct->failed_to_evac = rtsFalse;
1119 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1125 scavenge_thunk_srt(info);
1129 scavenge_fun_srt(info);
1136 next = (P_)p->payload + info->layout.payload.ptrs;
1137 // evacuate the pointers
1138 for (q = (P_)p->payload; q < next; q++) {
1139 evacuate((StgClosure **)q);
1145 barf("scavenge_static: strange closure %d", (int)(info->type));
1148 ASSERT(gct->failed_to_evac == rtsFalse);
1152 /* -----------------------------------------------------------------------------
1153 scavenge a chunk of memory described by a bitmap
1154 -------------------------------------------------------------------------- */
1157 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1163 bitmap = large_bitmap->bitmap[b];
1164 for (i = 0; i < size; ) {
1165 if ((bitmap & 1) == 0) {
1166 evacuate((StgClosure **)p);
1170 if (i % BITS_IN(W_) == 0) {
1172 bitmap = large_bitmap->bitmap[b];
1174 bitmap = bitmap >> 1;
1179 STATIC_INLINE StgPtr
1180 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1183 if ((bitmap & 1) == 0) {
1184 evacuate((StgClosure **)p);
1187 bitmap = bitmap >> 1;
1193 /* -----------------------------------------------------------------------------
1194 scavenge_stack walks over a section of stack and evacuates all the
1195 objects pointed to by it. We can use the same code for walking
1196 AP_STACK_UPDs, since these are just sections of copied stack.
1197 -------------------------------------------------------------------------- */
1200 scavenge_stack(StgPtr p, StgPtr stack_end)
1202 const StgRetInfoTable* info;
1207 * Each time around this loop, we are looking at a chunk of stack
1208 * that starts with an activation record.
1211 while (p < stack_end) {
1212 info = get_ret_itbl((StgClosure *)p);
1214 switch (info->i.type) {
1217 // In SMP, we can get update frames that point to indirections
1218 // when two threads evaluate the same thunk. We do attempt to
1219 // discover this situation in threadPaused(), but it's
1220 // possible that the following sequence occurs:
1229 // Now T is an indirection, and the update frame is already
1230 // marked on A's stack, so we won't traverse it again in
1231 // threadPaused(). We could traverse the whole stack again
1232 // before GC, but that seems like overkill.
1234 // Scavenging this update frame as normal would be disastrous;
1235 // the updatee would end up pointing to the value. So we turn
1236 // the indirection into an IND_PERM, so that evacuate will
1237 // copy the indirection into the old generation instead of
1241 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1243 ((StgUpdateFrame *)p)->updatee->header.info =
1244 (StgInfoTable *)&stg_IND_PERM_info;
1245 } else if (type == IND_OLDGEN) {
1246 ((StgUpdateFrame *)p)->updatee->header.info =
1247 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1249 evacuate(&((StgUpdateFrame *)p)->updatee);
1250 p += sizeofW(StgUpdateFrame);
1254 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1255 case CATCH_STM_FRAME:
1256 case CATCH_RETRY_FRAME:
1257 case ATOMICALLY_FRAME:
1261 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1262 size = BITMAP_SIZE(info->i.layout.bitmap);
1263 // NOTE: the payload starts immediately after the info-ptr, we
1264 // don't have an StgHeader in the same sense as a heap closure.
1266 p = scavenge_small_bitmap(p, size, bitmap);
1270 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1278 evacuate((StgClosure **)p);
1281 size = BCO_BITMAP_SIZE(bco);
1282 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1287 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1292 size = GET_LARGE_BITMAP(&info->i)->size;
1294 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1296 // and don't forget to follow the SRT
1300 // Dynamic bitmap: the mask is stored on the stack, and
1301 // there are a number of non-pointers followed by a number
1302 // of pointers above the bitmapped area. (see StgMacros.h,
1307 dyn = ((StgRetDyn *)p)->liveness;
1309 // traverse the bitmap first
1310 bitmap = RET_DYN_LIVENESS(dyn);
1311 p = (P_)&((StgRetDyn *)p)->payload[0];
1312 size = RET_DYN_BITMAP_SIZE;
1313 p = scavenge_small_bitmap(p, size, bitmap);
1315 // skip over the non-ptr words
1316 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1318 // follow the ptr words
1319 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1320 evacuate((StgClosure **)p);
1328 StgRetFun *ret_fun = (StgRetFun *)p;
1329 StgFunInfoTable *fun_info;
1331 evacuate(&ret_fun->fun);
1332 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1333 p = scavenge_arg_block(fun_info, ret_fun->payload);
1338 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1343 /*-----------------------------------------------------------------------------
1344 scavenge the large object list.
1346 evac_step set by caller; similar games played with evac_step as with
1347 scavenge() - see comment at the top of scavenge(). Most large
1348 objects are (repeatedly) mutable, so most of the time evac_step will
1350 --------------------------------------------------------------------------- */
1353 scavenge_large (step_workspace *ws)
1358 gct->evac_step = ws->step;
1360 bd = ws->todo_large_objects;
1362 for (; bd != NULL; bd = ws->todo_large_objects) {
1364 // take this object *off* the large objects list and put it on
1365 // the scavenged large objects list. This is so that we can
1366 // treat new_large_objects as a stack and push new objects on
1367 // the front when evacuating.
1368 ws->todo_large_objects = bd->link;
1370 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1371 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1372 ws->step->n_scavenged_large_blocks += bd->blocks;
1373 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1376 if (scavenge_one(p)) {
1377 if (ws->step->gen_no > 0) {
1378 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1383 gct->scanned += closure_sizeW((StgClosure*)p);
1387 /* ----------------------------------------------------------------------------
1389 ------------------------------------------------------------------------- */
1392 #include "Scav.c-inc"
1394 #include "Scav.c-inc"
1396 /* ----------------------------------------------------------------------------
1397 Look for work to do.
1399 We look for the oldest step that has either a todo block that can
1400 be scanned, or a block of work on the global queue that we can
1403 It is important to take work from the *oldest* generation that we
1404 has work available, because that minimizes the likelihood of
1405 evacuating objects into a young generation when they should have
1406 been eagerly promoted. This really does make a difference (the
1407 cacheprof benchmark is one that is affected).
1409 We also want to scan the todo block if possible before grabbing
1410 work from the global queue, the reason being that we don't want to
1411 steal work from the global queue and starve other threads if there
1412 is other work we can usefully be doing.
1413 ------------------------------------------------------------------------- */
1416 scavenge_find_work (void)
1420 rtsBool did_something, did_anything;
1423 gct->scav_find_work++;
1425 did_anything = rtsFalse;
1428 did_something = rtsFalse;
1429 for (s = total_steps-1; s >= 0; s--) {
1430 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1433 ws = &gct->steps[s];
1435 gct->scan_bd = NULL;
1437 // If we have a scan block with some work to do,
1438 // scavenge everything up to the free pointer.
1439 if (ws->todo_bd->u.scan < ws->todo_free)
1441 if (n_gc_threads == 1) {
1442 scavenge_block1(ws->todo_bd);
1444 scavenge_block(ws->todo_bd);
1446 did_something = rtsTrue;
1450 // If we have any large objects to scavenge, do them now.
1451 if (ws->todo_large_objects) {
1453 did_something = rtsTrue;
1457 if ((bd = grab_todo_block(ws)) != NULL) {
1458 // no need to assign this to ws->scan_bd, we're going
1459 // to scavenge the whole thing and then push it on
1460 // our scavd list. This saves pushing out the
1461 // scan_bd block, which might be partial.
1462 if (n_gc_threads == 1) {
1463 scavenge_block1(bd);
1467 did_something = rtsTrue;
1472 if (did_something) {
1473 did_anything = rtsTrue;
1476 // only return when there is no more work to do
1478 return did_anything;
1481 /* ----------------------------------------------------------------------------
1482 Scavenge until we can't find anything more to scavenge.
1483 ------------------------------------------------------------------------- */
1491 work_to_do = rtsFalse;
1493 // scavenge static objects
1494 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1495 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1499 // scavenge objects in compacted generation
1500 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1501 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1502 scavenge_mark_stack();
1503 work_to_do = rtsTrue;
1506 // Order is important here: we want to deal in full blocks as
1507 // much as possible, so go for global work in preference to
1508 // local work. Only if all the global work has been exhausted
1509 // do we start scavenging the fragments of blocks in the local
1511 if (scavenge_find_work()) goto loop;
1513 if (work_to_do) goto loop;
1526 // scavenge objects in compacted generation
1527 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1528 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1532 // Check for global work in any step. We don't need to check for
1533 // local work, because we have already exited scavenge_loop(),
1534 // which means there is no local work for this thread.
1535 for (s = total_steps-1; s >= 0; s--) {
1536 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1539 ws = &gct->steps[s];
1540 if (ws->todo_large_objects) return rtsTrue;
1541 if (ws->step->todos) return rtsTrue;