1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
27 #include "LdvProfile.h"
30 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32 static void scavenge_large_bitmap (StgPtr p,
33 StgLargeBitmap *large_bitmap,
37 /* Similar to scavenge_large_bitmap(), but we don't write back the
38 * pointers we get back from evacuate().
41 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
48 bitmap = large_srt->l.bitmap[b];
49 size = (nat)large_srt->l.size;
50 p = (StgClosure **)large_srt->srt;
51 for (i = 0; i < size; ) {
52 if ((bitmap & 1) != 0) {
57 if (i % BITS_IN(W_) == 0) {
59 bitmap = large_srt->l.bitmap[b];
66 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
67 * srt field in the info table. That's ok, because we'll
68 * never dereference it.
71 scavenge_srt (StgClosure **srt, nat srt_bitmap)
79 if (bitmap == (StgHalfWord)(-1)) {
80 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
85 if ((bitmap & 1) != 0) {
86 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
87 // Special-case to handle references to closures hiding out in DLLs, since
88 // double indirections required to get at those. The code generator knows
89 // which is which when generating the SRT, so it stores the (indirect)
90 // reference to the DLL closure in the table by first adding one to it.
91 // We check for this here, and undo the addition before evacuating it.
93 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
94 // closure that's fixed at link-time, and no extra magic is required.
95 if ( (unsigned long)(*srt) & 0x1 ) {
96 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 bitmap = bitmap >> 1;
111 scavenge_thunk_srt(const StgInfoTable *info)
113 StgThunkInfoTable *thunk_info;
115 if (!major_gc) return;
117 thunk_info = itbl_to_thunk_itbl(info);
118 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
122 scavenge_fun_srt(const StgInfoTable *info)
124 StgFunInfoTable *fun_info;
126 if (!major_gc) return;
128 fun_info = itbl_to_fun_itbl(info);
129 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
132 /* -----------------------------------------------------------------------------
134 -------------------------------------------------------------------------- */
137 scavengeTSO (StgTSO *tso)
141 if (tso->what_next == ThreadRelocated) {
142 // the only way this can happen is if the old TSO was on the
143 // mutable list. We might have other links to this defunct
144 // TSO, so we must update its link field.
145 evacuate((StgClosure**)&tso->_link);
149 saved_eager = gct->eager_promotion;
150 gct->eager_promotion = rtsFalse;
152 if ( tso->why_blocked == BlockedOnMVar
153 || tso->why_blocked == BlockedOnBlackHole
154 || tso->why_blocked == BlockedOnException
156 evacuate(&tso->block_info.closure);
158 evacuate((StgClosure **)&tso->blocked_exceptions);
160 // We don't always chase the link field: TSOs on the blackhole
161 // queue are not automatically alive, so the link field is a
162 // "weak" pointer in that case.
163 if (tso->why_blocked != BlockedOnBlackHole) {
164 evacuate((StgClosure **)&tso->link);
167 // scavange current transaction record
168 evacuate((StgClosure **)&tso->trec);
170 // scavenge this thread's stack
171 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
173 if (gct->failed_to_evac) {
174 tso->flags |= TSO_DIRTY;
176 tso->flags &= ~TSO_DIRTY;
179 gct->eager_promotion = saved_eager;
182 /* -----------------------------------------------------------------------------
183 Blocks of function args occur on the stack (at the top) and
185 -------------------------------------------------------------------------- */
188 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
195 switch (fun_info->f.fun_type) {
197 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
198 size = BITMAP_SIZE(fun_info->f.b.bitmap);
201 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
202 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
206 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
207 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
210 if ((bitmap & 1) == 0) {
211 evacuate((StgClosure **)p);
214 bitmap = bitmap >> 1;
223 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
227 StgFunInfoTable *fun_info;
229 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
230 ASSERT(fun_info->i.type != PAP);
233 switch (fun_info->f.fun_type) {
235 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
238 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
242 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
246 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
249 if ((bitmap & 1) == 0) {
250 evacuate((StgClosure **)p);
253 bitmap = bitmap >> 1;
262 scavenge_PAP (StgPAP *pap)
265 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
269 scavenge_AP (StgAP *ap)
272 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
275 /* -----------------------------------------------------------------------------
276 Scavenge everything on the mark stack.
278 This is slightly different from scavenge():
279 - we don't walk linearly through the objects, so the scavenger
280 doesn't need to advance the pointer on to the next object.
281 -------------------------------------------------------------------------- */
284 scavenge_mark_stack(void)
288 step *saved_evac_step;
290 gct->evac_step = &oldest_gen->steps[0];
291 saved_evac_step = gct->evac_step;
294 while (!mark_stack_empty()) {
295 p = pop_mark_stack();
297 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
298 info = get_itbl((StgClosure *)p);
301 switch (((volatile StgWord *)info)[1] & 0xffff) {
306 rtsBool saved_eager_promotion = gct->eager_promotion;
308 StgMVar *mvar = ((StgMVar *)p);
309 gct->eager_promotion = rtsFalse;
310 evacuate((StgClosure **)&mvar->head);
311 evacuate((StgClosure **)&mvar->tail);
312 evacuate((StgClosure **)&mvar->value);
313 gct->eager_promotion = saved_eager_promotion;
315 if (gct->failed_to_evac) {
316 mvar->header.info = &stg_MVAR_DIRTY_info;
318 mvar->header.info = &stg_MVAR_CLEAN_info;
324 scavenge_fun_srt(info);
325 evacuate(&((StgClosure *)p)->payload[1]);
326 evacuate(&((StgClosure *)p)->payload[0]);
330 scavenge_thunk_srt(info);
331 evacuate(&((StgThunk *)p)->payload[1]);
332 evacuate(&((StgThunk *)p)->payload[0]);
336 evacuate(&((StgClosure *)p)->payload[1]);
337 evacuate(&((StgClosure *)p)->payload[0]);
342 scavenge_fun_srt(info);
343 evacuate(&((StgClosure *)p)->payload[0]);
348 scavenge_thunk_srt(info);
349 evacuate(&((StgThunk *)p)->payload[0]);
354 evacuate(&((StgClosure *)p)->payload[0]);
359 scavenge_fun_srt(info);
364 scavenge_thunk_srt(info);
372 scavenge_fun_srt(info);
379 scavenge_thunk_srt(info);
380 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
381 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
382 evacuate((StgClosure **)p);
394 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
395 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
396 evacuate((StgClosure **)p);
402 StgBCO *bco = (StgBCO *)p;
403 evacuate((StgClosure **)&bco->instrs);
404 evacuate((StgClosure **)&bco->literals);
405 evacuate((StgClosure **)&bco->ptrs);
410 // don't need to do anything here: the only possible case
411 // is that we're in a 1-space compacting collector, with
412 // no "old" generation.
416 case IND_OLDGEN_PERM:
417 evacuate(&((StgInd *)p)->indirectee);
421 case MUT_VAR_DIRTY: {
422 rtsBool saved_eager_promotion = gct->eager_promotion;
424 gct->eager_promotion = rtsFalse;
425 evacuate(&((StgMutVar *)p)->var);
426 gct->eager_promotion = saved_eager_promotion;
428 if (gct->failed_to_evac) {
429 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
431 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
437 case SE_CAF_BLACKHOLE:
445 StgSelector *s = (StgSelector *)p;
446 evacuate(&s->selectee);
450 // A chunk of stack saved in a heap object
453 StgAP_STACK *ap = (StgAP_STACK *)p;
456 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
461 scavenge_PAP((StgPAP *)p);
465 scavenge_AP((StgAP *)p);
468 case MUT_ARR_PTRS_CLEAN:
469 case MUT_ARR_PTRS_DIRTY:
475 // We don't eagerly promote objects pointed to by a mutable
476 // array, but if we find the array only points to objects in
477 // the same or an older generation, we mark it "clean" and
478 // avoid traversing it during minor GCs.
479 saved_eager = gct->eager_promotion;
480 gct->eager_promotion = rtsFalse;
481 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
482 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
483 evacuate((StgClosure **)p);
485 gct->eager_promotion = saved_eager;
487 if (gct->failed_to_evac) {
488 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
490 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
493 gct->failed_to_evac = rtsTrue; // mutable anyhow.
497 case MUT_ARR_PTRS_FROZEN:
498 case MUT_ARR_PTRS_FROZEN0:
503 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
504 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
505 evacuate((StgClosure **)p);
508 // If we're going to put this object on the mutable list, then
509 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
510 if (gct->failed_to_evac) {
511 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
513 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
520 scavengeTSO((StgTSO*)p);
521 gct->failed_to_evac = rtsTrue; // always on the mutable list
525 case TVAR_WATCH_QUEUE:
527 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
529 evacuate((StgClosure **)&wq->closure);
530 evacuate((StgClosure **)&wq->next_queue_entry);
531 evacuate((StgClosure **)&wq->prev_queue_entry);
532 gct->evac_step = saved_evac_step;
533 gct->failed_to_evac = rtsTrue; // mutable
539 StgTVar *tvar = ((StgTVar *) p);
541 evacuate((StgClosure **)&tvar->current_value);
542 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
543 gct->evac_step = saved_evac_step;
544 gct->failed_to_evac = rtsTrue; // mutable
551 StgTRecChunk *tc = ((StgTRecChunk *) p);
552 TRecEntry *e = &(tc -> entries[0]);
554 evacuate((StgClosure **)&tc->prev_chunk);
555 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
556 evacuate((StgClosure **)&e->tvar);
557 evacuate((StgClosure **)&e->expected_value);
558 evacuate((StgClosure **)&e->new_value);
560 gct->evac_step = saved_evac_step;
561 gct->failed_to_evac = rtsTrue; // mutable
567 StgTRecHeader *trec = ((StgTRecHeader *) p);
569 evacuate((StgClosure **)&trec->enclosing_trec);
570 evacuate((StgClosure **)&trec->current_chunk);
571 evacuate((StgClosure **)&trec->invariants_to_check);
572 gct->evac_step = saved_evac_step;
573 gct->failed_to_evac = rtsTrue; // mutable
577 case ATOMIC_INVARIANT:
579 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
581 evacuate(&invariant->code);
582 evacuate((StgClosure **)&invariant->last_execution);
583 gct->evac_step = saved_evac_step;
584 gct->failed_to_evac = rtsTrue; // mutable
588 case INVARIANT_CHECK_QUEUE:
590 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
592 evacuate((StgClosure **)&queue->invariant);
593 evacuate((StgClosure **)&queue->my_execution);
594 evacuate((StgClosure **)&queue->next_queue_entry);
595 gct->evac_step = saved_evac_step;
596 gct->failed_to_evac = rtsTrue; // mutable
601 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
605 if (gct->failed_to_evac) {
606 gct->failed_to_evac = rtsFalse;
607 if (gct->evac_step) {
608 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
612 // mark the next bit to indicate "scavenged"
613 mark(q+1, Bdescr(q));
615 } // while (!mark_stack_empty())
617 // start a new linear scan if the mark stack overflowed at some point
618 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
619 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
620 mark_stack_overflowed = rtsFalse;
621 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
622 oldgen_scan = oldgen_scan_bd->start;
625 if (oldgen_scan_bd) {
626 // push a new thing on the mark stack
628 // find a closure that is marked but not scavenged, and start
630 while (oldgen_scan < oldgen_scan_bd->free
631 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
635 if (oldgen_scan < oldgen_scan_bd->free) {
637 // already scavenged?
638 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
639 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
642 push_mark_stack(oldgen_scan);
643 // ToDo: bump the linear scan by the actual size of the object
644 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
648 oldgen_scan_bd = oldgen_scan_bd->link;
649 if (oldgen_scan_bd != NULL) {
650 oldgen_scan = oldgen_scan_bd->start;
656 /* -----------------------------------------------------------------------------
659 This is used for objects that are temporarily marked as mutable
660 because they contain old-to-new generation pointers. Only certain
661 objects can have this property.
662 -------------------------------------------------------------------------- */
665 scavenge_one(StgPtr p)
667 const StgInfoTable *info;
668 step *saved_evac_step = gct->evac_step;
671 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
672 info = get_itbl((StgClosure *)p);
674 switch (info->type) {
679 rtsBool saved_eager_promotion = gct->eager_promotion;
681 StgMVar *mvar = ((StgMVar *)p);
682 gct->eager_promotion = rtsFalse;
683 evacuate((StgClosure **)&mvar->head);
684 evacuate((StgClosure **)&mvar->tail);
685 evacuate((StgClosure **)&mvar->value);
686 gct->eager_promotion = saved_eager_promotion;
688 if (gct->failed_to_evac) {
689 mvar->header.info = &stg_MVAR_DIRTY_info;
691 mvar->header.info = &stg_MVAR_CLEAN_info;
705 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
706 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
707 evacuate((StgClosure **)q);
713 case FUN_1_0: // hardly worth specialising these guys
729 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
730 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
731 evacuate((StgClosure **)q);
737 case MUT_VAR_DIRTY: {
739 rtsBool saved_eager_promotion = gct->eager_promotion;
741 gct->eager_promotion = rtsFalse;
742 evacuate(&((StgMutVar *)p)->var);
743 gct->eager_promotion = saved_eager_promotion;
745 if (gct->failed_to_evac) {
746 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
748 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
754 case SE_CAF_BLACKHOLE:
761 StgSelector *s = (StgSelector *)p;
762 evacuate(&s->selectee);
768 StgAP_STACK *ap = (StgAP_STACK *)p;
771 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
772 p = (StgPtr)ap->payload + ap->size;
777 p = scavenge_PAP((StgPAP *)p);
781 p = scavenge_AP((StgAP *)p);
788 case MUT_ARR_PTRS_CLEAN:
789 case MUT_ARR_PTRS_DIRTY:
794 // We don't eagerly promote objects pointed to by a mutable
795 // array, but if we find the array only points to objects in
796 // the same or an older generation, we mark it "clean" and
797 // avoid traversing it during minor GCs.
798 saved_eager = gct->eager_promotion;
799 gct->eager_promotion = rtsFalse;
801 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
802 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
803 evacuate((StgClosure **)p);
805 gct->eager_promotion = saved_eager;
807 if (gct->failed_to_evac) {
808 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
810 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
813 gct->failed_to_evac = rtsTrue;
817 case MUT_ARR_PTRS_FROZEN:
818 case MUT_ARR_PTRS_FROZEN0:
823 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
824 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
825 evacuate((StgClosure **)p);
828 // If we're going to put this object on the mutable list, then
829 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
830 if (gct->failed_to_evac) {
831 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
833 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
840 scavengeTSO((StgTSO*)p);
841 gct->failed_to_evac = rtsTrue; // always on the mutable list
845 case TVAR_WATCH_QUEUE:
847 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
849 evacuate((StgClosure **)&wq->closure);
850 evacuate((StgClosure **)&wq->next_queue_entry);
851 evacuate((StgClosure **)&wq->prev_queue_entry);
852 gct->evac_step = saved_evac_step;
853 gct->failed_to_evac = rtsTrue; // mutable
859 StgTVar *tvar = ((StgTVar *) p);
861 evacuate((StgClosure **)&tvar->current_value);
862 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
863 gct->evac_step = saved_evac_step;
864 gct->failed_to_evac = rtsTrue; // mutable
870 StgTRecHeader *trec = ((StgTRecHeader *) p);
872 evacuate((StgClosure **)&trec->enclosing_trec);
873 evacuate((StgClosure **)&trec->current_chunk);
874 evacuate((StgClosure **)&trec->invariants_to_check);
875 gct->evac_step = saved_evac_step;
876 gct->failed_to_evac = rtsTrue; // mutable
883 StgTRecChunk *tc = ((StgTRecChunk *) p);
884 TRecEntry *e = &(tc -> entries[0]);
886 evacuate((StgClosure **)&tc->prev_chunk);
887 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
888 evacuate((StgClosure **)&e->tvar);
889 evacuate((StgClosure **)&e->expected_value);
890 evacuate((StgClosure **)&e->new_value);
892 gct->evac_step = saved_evac_step;
893 gct->failed_to_evac = rtsTrue; // mutable
897 case ATOMIC_INVARIANT:
899 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
901 evacuate(&invariant->code);
902 evacuate((StgClosure **)&invariant->last_execution);
903 gct->evac_step = saved_evac_step;
904 gct->failed_to_evac = rtsTrue; // mutable
908 case INVARIANT_CHECK_QUEUE:
910 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
912 evacuate((StgClosure **)&queue->invariant);
913 evacuate((StgClosure **)&queue->my_execution);
914 evacuate((StgClosure **)&queue->next_queue_entry);
915 gct->evac_step = saved_evac_step;
916 gct->failed_to_evac = rtsTrue; // mutable
921 case IND_OLDGEN_PERM:
924 /* Careful here: a THUNK can be on the mutable list because
925 * it contains pointers to young gen objects. If such a thunk
926 * is updated, the IND_OLDGEN will be added to the mutable
927 * list again, and we'll scavenge it twice. evacuate()
928 * doesn't check whether the object has already been
929 * evacuated, so we perform that check here.
931 StgClosure *q = ((StgInd *)p)->indirectee;
932 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
935 evacuate(&((StgInd *)p)->indirectee);
938 #if 0 && defined(DEBUG)
939 if (RtsFlags.DebugFlags.gc)
940 /* Debugging code to print out the size of the thing we just
944 StgPtr start = gen->steps[0].scan;
945 bdescr *start_bd = gen->steps[0].scan_bd;
947 scavenge(&gen->steps[0]);
948 if (start_bd != gen->steps[0].scan_bd) {
949 size += (P_)BLOCK_ROUND_UP(start) - start;
950 start_bd = start_bd->link;
951 while (start_bd != gen->steps[0].scan_bd) {
952 size += BLOCK_SIZE_W;
953 start_bd = start_bd->link;
955 size += gen->steps[0].scan -
956 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
958 size = gen->steps[0].scan - start;
960 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
966 barf("scavenge_one: strange object %d", (int)(info->type));
969 no_luck = gct->failed_to_evac;
970 gct->failed_to_evac = rtsFalse;
974 /* -----------------------------------------------------------------------------
975 Scavenging mutable lists.
977 We treat the mutable list of each generation > N (i.e. all the
978 generations older than the one being collected) as roots. We also
979 remove non-mutable objects from the mutable list at this point.
980 -------------------------------------------------------------------------- */
983 scavenge_mutable_list(generation *gen)
988 bd = gen->saved_mut_list;
990 gct->evac_step = &gen->steps[0];
991 for (; bd != NULL; bd = bd->link) {
992 for (q = bd->start; q < bd->free; q++) {
994 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
997 switch (get_itbl((StgClosure *)p)->type) {
999 barf("MUT_VAR_CLEAN on mutable list");
1001 mutlist_MUTVARS++; break;
1002 case MUT_ARR_PTRS_CLEAN:
1003 case MUT_ARR_PTRS_DIRTY:
1004 case MUT_ARR_PTRS_FROZEN:
1005 case MUT_ARR_PTRS_FROZEN0:
1006 mutlist_MUTARRS++; break;
1008 barf("MVAR_CLEAN on mutable list");
1010 mutlist_MVARS++; break;
1012 mutlist_OTHERS++; break;
1016 // Check whether this object is "clean", that is it
1017 // definitely doesn't point into a young generation.
1018 // Clean objects don't need to be scavenged. Some clean
1019 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1020 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1021 // TSO, are always on the mutable list.
1023 switch (get_itbl((StgClosure *)p)->type) {
1024 case MUT_ARR_PTRS_CLEAN:
1025 recordMutableGen_GC((StgClosure *)p,gen);
1028 StgTSO *tso = (StgTSO *)p;
1029 if ((tso->flags & TSO_DIRTY) == 0) {
1030 // A clean TSO: we don't have to traverse its
1031 // stack. However, we *do* follow the link field:
1032 // we don't want to have to mark a TSO dirty just
1033 // because we put it on a different queue.
1034 if (tso->why_blocked != BlockedOnBlackHole) {
1035 evacuate((StgClosure **)&tso->link);
1037 recordMutableGen_GC((StgClosure *)p,gen);
1045 if (scavenge_one(p)) {
1046 // didn't manage to promote everything, so put the
1047 // object back on the list.
1048 recordMutableGen_GC((StgClosure *)p,gen);
1053 // free the old mut_list
1054 freeChain_sync(gen->saved_mut_list);
1055 gen->saved_mut_list = NULL;
1058 /* -----------------------------------------------------------------------------
1059 Scavenging the static objects.
1061 We treat the mutable list of each generation > N (i.e. all the
1062 generations older than the one being collected) as roots. We also
1063 remove non-mutable objects from the mutable list at this point.
1064 -------------------------------------------------------------------------- */
1067 scavenge_static(void)
1070 const StgInfoTable *info;
1072 /* Always evacuate straight to the oldest generation for static
1074 gct->evac_step = &oldest_gen->steps[0];
1076 /* keep going until we've scavenged all the objects on the linked
1081 ACQUIRE_SPIN_LOCK(&static_objects_sync);
1083 /* get the next static object from the list. Remember, there might
1084 * be more stuff on this list after each evacuation...
1085 * (static_objects is a global)
1088 if (p == END_OF_STATIC_LIST) {
1089 RELEASE_SPIN_LOCK(&static_objects_sync);
1093 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1096 if (info->type==RBH)
1097 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1099 // make sure the info pointer is into text space
1101 /* Take this object *off* the static_objects list,
1102 * and put it on the scavenged_static_objects list.
1104 static_objects = *STATIC_LINK(info,p);
1105 *STATIC_LINK(info,p) = scavenged_static_objects;
1106 scavenged_static_objects = p;
1108 RELEASE_SPIN_LOCK(&static_objects_sync);
1110 switch (info -> type) {
1114 StgInd *ind = (StgInd *)p;
1115 evacuate(&ind->indirectee);
1117 /* might fail to evacuate it, in which case we have to pop it
1118 * back on the mutable list of the oldest generation. We
1119 * leave it *on* the scavenged_static_objects list, though,
1120 * in case we visit this object again.
1122 if (gct->failed_to_evac) {
1123 gct->failed_to_evac = rtsFalse;
1124 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1130 scavenge_thunk_srt(info);
1134 scavenge_fun_srt(info);
1141 next = (P_)p->payload + info->layout.payload.ptrs;
1142 // evacuate the pointers
1143 for (q = (P_)p->payload; q < next; q++) {
1144 evacuate((StgClosure **)q);
1150 barf("scavenge_static: strange closure %d", (int)(info->type));
1153 ASSERT(gct->failed_to_evac == rtsFalse);
1157 /* -----------------------------------------------------------------------------
1158 scavenge a chunk of memory described by a bitmap
1159 -------------------------------------------------------------------------- */
1162 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1168 bitmap = large_bitmap->bitmap[b];
1169 for (i = 0; i < size; ) {
1170 if ((bitmap & 1) == 0) {
1171 evacuate((StgClosure **)p);
1175 if (i % BITS_IN(W_) == 0) {
1177 bitmap = large_bitmap->bitmap[b];
1179 bitmap = bitmap >> 1;
1184 STATIC_INLINE StgPtr
1185 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1188 if ((bitmap & 1) == 0) {
1189 evacuate((StgClosure **)p);
1192 bitmap = bitmap >> 1;
1198 /* -----------------------------------------------------------------------------
1199 scavenge_stack walks over a section of stack and evacuates all the
1200 objects pointed to by it. We can use the same code for walking
1201 AP_STACK_UPDs, since these are just sections of copied stack.
1202 -------------------------------------------------------------------------- */
1205 scavenge_stack(StgPtr p, StgPtr stack_end)
1207 const StgRetInfoTable* info;
1212 * Each time around this loop, we are looking at a chunk of stack
1213 * that starts with an activation record.
1216 while (p < stack_end) {
1217 info = get_ret_itbl((StgClosure *)p);
1219 switch (info->i.type) {
1222 // In SMP, we can get update frames that point to indirections
1223 // when two threads evaluate the same thunk. We do attempt to
1224 // discover this situation in threadPaused(), but it's
1225 // possible that the following sequence occurs:
1234 // Now T is an indirection, and the update frame is already
1235 // marked on A's stack, so we won't traverse it again in
1236 // threadPaused(). We could traverse the whole stack again
1237 // before GC, but that seems like overkill.
1239 // Scavenging this update frame as normal would be disastrous;
1240 // the updatee would end up pointing to the value. So we turn
1241 // the indirection into an IND_PERM, so that evacuate will
1242 // copy the indirection into the old generation instead of
1246 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1248 ((StgUpdateFrame *)p)->updatee->header.info =
1249 (StgInfoTable *)&stg_IND_PERM_info;
1250 } else if (type == IND_OLDGEN) {
1251 ((StgUpdateFrame *)p)->updatee->header.info =
1252 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1254 evacuate(&((StgUpdateFrame *)p)->updatee);
1255 p += sizeofW(StgUpdateFrame);
1259 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1260 case CATCH_STM_FRAME:
1261 case CATCH_RETRY_FRAME:
1262 case ATOMICALLY_FRAME:
1266 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1267 size = BITMAP_SIZE(info->i.layout.bitmap);
1268 // NOTE: the payload starts immediately after the info-ptr, we
1269 // don't have an StgHeader in the same sense as a heap closure.
1271 p = scavenge_small_bitmap(p, size, bitmap);
1275 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1283 evacuate((StgClosure **)p);
1286 size = BCO_BITMAP_SIZE(bco);
1287 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1292 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1297 size = GET_LARGE_BITMAP(&info->i)->size;
1299 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1301 // and don't forget to follow the SRT
1305 // Dynamic bitmap: the mask is stored on the stack, and
1306 // there are a number of non-pointers followed by a number
1307 // of pointers above the bitmapped area. (see StgMacros.h,
1312 dyn = ((StgRetDyn *)p)->liveness;
1314 // traverse the bitmap first
1315 bitmap = RET_DYN_LIVENESS(dyn);
1316 p = (P_)&((StgRetDyn *)p)->payload[0];
1317 size = RET_DYN_BITMAP_SIZE;
1318 p = scavenge_small_bitmap(p, size, bitmap);
1320 // skip over the non-ptr words
1321 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1323 // follow the ptr words
1324 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1325 evacuate((StgClosure **)p);
1333 StgRetFun *ret_fun = (StgRetFun *)p;
1334 StgFunInfoTable *fun_info;
1336 evacuate(&ret_fun->fun);
1337 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1338 p = scavenge_arg_block(fun_info, ret_fun->payload);
1343 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1348 /*-----------------------------------------------------------------------------
1349 scavenge the large object list.
1351 evac_step set by caller; similar games played with evac_step as with
1352 scavenge() - see comment at the top of scavenge(). Most large
1353 objects are (repeatedly) mutable, so most of the time evac_step will
1355 --------------------------------------------------------------------------- */
1358 scavenge_large (step_workspace *ws)
1363 gct->evac_step = ws->stp;
1365 bd = ws->todo_large_objects;
1367 for (; bd != NULL; bd = ws->todo_large_objects) {
1369 // take this object *off* the large objects list and put it on
1370 // the scavenged large objects list. This is so that we can
1371 // treat new_large_objects as a stack and push new objects on
1372 // the front when evacuating.
1373 ws->todo_large_objects = bd->link;
1375 ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
1376 dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
1377 ws->stp->n_scavenged_large_blocks += bd->blocks;
1378 RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
1381 if (scavenge_one(p)) {
1382 if (ws->stp->gen_no > 0) {
1383 recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
1389 /* ----------------------------------------------------------------------------
1391 ------------------------------------------------------------------------- */
1394 #include "Scav.c-inc"
1396 #include "Scav.c-inc"
1398 /* ----------------------------------------------------------------------------
1399 Find the oldest full block to scavenge, and scavenge it.
1400 ------------------------------------------------------------------------- */
1403 scavenge_find_global_work (void)
1410 gct->scav_global_work++;
1413 for (s = total_steps-1; s>=0; s--)
1415 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1418 ws = &gct->steps[s];
1420 // If we have any large objects to scavenge, do them now.
1421 if (ws->todo_large_objects) {
1426 if ((bd = grab_todo_block(ws)) != NULL) {
1427 // no need to assign this to ws->scan_bd, we're going
1428 // to scavenge the whole thing and then push it on
1429 // our scavd list. This saves pushing out the
1430 // scan_bd block, which might be partial.
1432 scavenge_block0(bd, bd->start);
1434 scavenge_block(bd, bd->start);
1436 push_scan_block(bd, ws);
1440 if (flag) return rtsTrue;
1445 /* ----------------------------------------------------------------------------
1446 Look for local work to do.
1448 We can have outstanding scavenging to do if, for any of the workspaces,
1450 - the scan block is the same as the todo block, and new objects
1451 have been evacuated to the todo block.
1453 - the scan block *was* the same as the todo block, but the todo
1454 block filled up and a new one has been allocated.
1455 ------------------------------------------------------------------------- */
1458 scavenge_find_local_work (void)
1464 gct->scav_local_work++;
1467 for (s = total_steps-1; s >= 0; s--) {
1468 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1471 ws = &gct->steps[s];
1473 if (ws->todo_bd != NULL)
1475 ws->todo_bd->free = ws->todo_free;
1478 // If we have a todo block and no scan block, start
1479 // scanning the todo block.
1480 if (ws->scan_bd == NULL && ws->todo_bd != NULL)
1482 ws->scan_bd = ws->todo_bd;
1483 ws->scan = ws->scan_bd->start;
1486 // If we have a scan block with some work to do,
1487 // scavenge everything up to the free pointer.
1488 if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
1491 scavenge_block0(ws->scan_bd, ws->scan);
1493 scavenge_block(ws->scan_bd, ws->scan);
1495 ws->scan = ws->scan_bd->free;
1499 if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
1500 && ws->scan_bd != ws->todo_bd)
1502 // we're not going to evac any more objects into
1503 // this block, so push it now.
1504 push_scan_block(ws->scan_bd, ws);
1507 // we might be able to scan the todo block now. But
1508 // don't do it right away: there might be full blocks
1509 // waiting to be scanned as a result of scavenge_block above.
1513 if (flag) return rtsTrue;
1518 /* ----------------------------------------------------------------------------
1519 Scavenge until we can't find anything more to scavenge.
1520 ------------------------------------------------------------------------- */
1528 work_to_do = rtsFalse;
1530 // scavenge static objects
1531 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1532 IF_DEBUG(sanity, checkStaticObjects(static_objects));
1536 // scavenge objects in compacted generation
1537 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1538 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1539 scavenge_mark_stack();
1540 work_to_do = rtsTrue;
1543 // Order is important here: we want to deal in full blocks as
1544 // much as possible, so go for global work in preference to
1545 // local work. Only if all the global work has been exhausted
1546 // do we start scavenging the fragments of blocks in the local
1548 if (scavenge_find_global_work()) goto loop;
1549 if (scavenge_find_local_work()) goto loop;
1551 if (work_to_do) goto loop;
1564 // scavenge static objects
1565 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1569 // scavenge objects in compacted generation
1570 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1571 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1575 // Check for global work in any step. We don't need to check for
1576 // local work, because we have already exited scavenge_loop(),
1577 // which means there is no local work for this thread.
1578 for (s = total_steps-1; s >= 0; s--) {
1579 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1582 ws = &gct->steps[s];
1583 if (ws->todo_large_objects) return rtsTrue;
1584 if (ws->stp->todos) return rtsTrue;