1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
29 static void scavenge_stack (StgPtr p, StgPtr stack_end);
31 static void scavenge_large_bitmap (StgPtr p,
32 StgLargeBitmap *large_bitmap,
35 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
36 # define evacuate(a) evacuate1(a)
37 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
38 # define scavenge_loop(a) scavenge_loop1(a)
39 # define scavenge_mutable_list(g) scavenge_mutable_list1(g)
42 /* -----------------------------------------------------------------------------
44 -------------------------------------------------------------------------- */
47 scavenge_TSO_link (StgTSO *tso)
49 // We don't always chase the link field: TSOs on the blackhole
50 // queue are not automatically alive, so the link field is a
51 // "weak" pointer in that case.
52 if (tso->why_blocked != BlockedOnBlackHole) {
53 evacuate((StgClosure **)&tso->_link);
58 scavengeTSO (StgTSO *tso)
62 if (tso->what_next == ThreadRelocated) {
63 // the only way this can happen is if the old TSO was on the
64 // mutable list. We might have other links to this defunct
65 // TSO, so we must update its link field.
66 evacuate((StgClosure**)&tso->_link);
70 saved_eager = gct->eager_promotion;
71 gct->eager_promotion = rtsFalse;
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnBlackHole
75 || tso->why_blocked == BlockedOnException
77 evacuate(&tso->block_info.closure);
79 evacuate((StgClosure **)&tso->blocked_exceptions);
81 // scavange current transaction record
82 evacuate((StgClosure **)&tso->trec);
84 // scavenge this thread's stack
85 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
87 if (gct->failed_to_evac) {
88 tso->flags |= TSO_DIRTY;
89 scavenge_TSO_link(tso);
91 tso->flags &= ~TSO_DIRTY;
92 scavenge_TSO_link(tso);
93 if (gct->failed_to_evac) {
94 tso->flags |= TSO_LINK_DIRTY;
96 tso->flags &= ~TSO_LINK_DIRTY;
100 gct->eager_promotion = saved_eager;
103 /* -----------------------------------------------------------------------------
104 Blocks of function args occur on the stack (at the top) and
106 -------------------------------------------------------------------------- */
109 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
116 switch (fun_info->f.fun_type) {
118 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
119 size = BITMAP_SIZE(fun_info->f.b.bitmap);
122 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
123 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
127 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
128 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
131 if ((bitmap & 1) == 0) {
132 evacuate((StgClosure **)p);
135 bitmap = bitmap >> 1;
143 STATIC_INLINE GNUC_ATTR_HOT StgPtr
144 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
148 StgFunInfoTable *fun_info;
150 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
151 ASSERT(fun_info->i.type != PAP);
154 switch (fun_info->f.fun_type) {
156 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
159 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
163 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
167 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
170 if ((bitmap & 1) == 0) {
171 evacuate((StgClosure **)p);
174 bitmap = bitmap >> 1;
182 STATIC_INLINE GNUC_ATTR_HOT StgPtr
183 scavenge_PAP (StgPAP *pap)
186 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
190 scavenge_AP (StgAP *ap)
193 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
196 /* -----------------------------------------------------------------------------
198 -------------------------------------------------------------------------- */
200 /* Similar to scavenge_large_bitmap(), but we don't write back the
201 * pointers we get back from evacuate().
204 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
211 bitmap = large_srt->l.bitmap[b];
212 size = (nat)large_srt->l.size;
213 p = (StgClosure **)large_srt->srt;
214 for (i = 0; i < size; ) {
215 if ((bitmap & 1) != 0) {
220 if (i % BITS_IN(W_) == 0) {
222 bitmap = large_srt->l.bitmap[b];
224 bitmap = bitmap >> 1;
229 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
230 * srt field in the info table. That's ok, because we'll
231 * never dereference it.
233 STATIC_INLINE GNUC_ATTR_HOT void
234 scavenge_srt (StgClosure **srt, nat srt_bitmap)
242 if (bitmap == (StgHalfWord)(-1)) {
243 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
247 while (bitmap != 0) {
248 if ((bitmap & 1) != 0) {
249 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
250 // Special-case to handle references to closures hiding out in DLLs, since
251 // double indirections required to get at those. The code generator knows
252 // which is which when generating the SRT, so it stores the (indirect)
253 // reference to the DLL closure in the table by first adding one to it.
254 // We check for this here, and undo the addition before evacuating it.
256 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
257 // closure that's fixed at link-time, and no extra magic is required.
258 if ( (unsigned long)(*srt) & 0x1 ) {
259 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
268 bitmap = bitmap >> 1;
273 STATIC_INLINE GNUC_ATTR_HOT void
274 scavenge_thunk_srt(const StgInfoTable *info)
276 StgThunkInfoTable *thunk_info;
278 if (!major_gc) return;
280 thunk_info = itbl_to_thunk_itbl(info);
281 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
284 STATIC_INLINE GNUC_ATTR_HOT void
285 scavenge_fun_srt(const StgInfoTable *info)
287 StgFunInfoTable *fun_info;
289 if (!major_gc) return;
291 fun_info = itbl_to_fun_itbl(info);
292 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
295 /* -----------------------------------------------------------------------------
296 Scavenge a block from the given scan pointer up to bd->free.
298 evac_step is set by the caller to be either zero (for a step in a
299 generation < N) or G where G is the generation of the step being
302 We sometimes temporarily change evac_step back to zero if we're
303 scavenging a mutable object where eager promotion isn't such a good
305 -------------------------------------------------------------------------- */
307 static GNUC_ATTR_HOT void
308 scavenge_block (bdescr *bd)
312 step *saved_evac_step;
313 rtsBool saved_eager_promotion;
316 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
317 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
320 gct->evac_step = bd->step;
321 saved_evac_step = gct->evac_step;
322 saved_eager_promotion = gct->eager_promotion;
323 gct->failed_to_evac = rtsFalse;
325 ws = &gct->steps[bd->step->abs_no];
329 // we might be evacuating into the very object that we're
330 // scavenging, so we have to check the real bd->free pointer each
331 // time around the loop.
332 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
335 info = get_itbl((StgClosure *)p);
337 ASSERT(gct->thunk_selector_depth == 0);
340 switch (info->type) {
345 StgMVar *mvar = ((StgMVar *)p);
346 gct->eager_promotion = rtsFalse;
347 evacuate((StgClosure **)&mvar->head);
348 evacuate((StgClosure **)&mvar->tail);
349 evacuate((StgClosure **)&mvar->value);
350 gct->eager_promotion = saved_eager_promotion;
352 if (gct->failed_to_evac) {
353 mvar->header.info = &stg_MVAR_DIRTY_info;
355 mvar->header.info = &stg_MVAR_CLEAN_info;
357 p += sizeofW(StgMVar);
362 scavenge_fun_srt(info);
363 evacuate(&((StgClosure *)p)->payload[1]);
364 evacuate(&((StgClosure *)p)->payload[0]);
365 p += sizeofW(StgHeader) + 2;
369 scavenge_thunk_srt(info);
370 evacuate(&((StgThunk *)p)->payload[1]);
371 evacuate(&((StgThunk *)p)->payload[0]);
372 p += sizeofW(StgThunk) + 2;
376 evacuate(&((StgClosure *)p)->payload[1]);
377 evacuate(&((StgClosure *)p)->payload[0]);
378 p += sizeofW(StgHeader) + 2;
382 scavenge_thunk_srt(info);
383 evacuate(&((StgThunk *)p)->payload[0]);
384 p += sizeofW(StgThunk) + 1;
388 scavenge_fun_srt(info);
390 evacuate(&((StgClosure *)p)->payload[0]);
391 p += sizeofW(StgHeader) + 1;
395 scavenge_thunk_srt(info);
396 p += sizeofW(StgThunk) + 1;
400 scavenge_fun_srt(info);
402 p += sizeofW(StgHeader) + 1;
406 scavenge_thunk_srt(info);
407 p += sizeofW(StgThunk) + 2;
411 scavenge_fun_srt(info);
413 p += sizeofW(StgHeader) + 2;
417 scavenge_thunk_srt(info);
418 evacuate(&((StgThunk *)p)->payload[0]);
419 p += sizeofW(StgThunk) + 2;
423 scavenge_fun_srt(info);
425 evacuate(&((StgClosure *)p)->payload[0]);
426 p += sizeofW(StgHeader) + 2;
430 scavenge_fun_srt(info);
437 scavenge_thunk_srt(info);
438 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
439 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
440 evacuate((StgClosure **)p);
442 p += info->layout.payload.nptrs;
453 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
454 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
455 evacuate((StgClosure **)p);
457 p += info->layout.payload.nptrs;
462 StgBCO *bco = (StgBCO *)p;
463 evacuate((StgClosure **)&bco->instrs);
464 evacuate((StgClosure **)&bco->literals);
465 evacuate((StgClosure **)&bco->ptrs);
471 if (bd->gen_no != 0) {
474 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
475 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
476 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
479 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
481 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
483 // We pretend that p has just been created.
484 LDV_RECORD_CREATE((StgClosure *)p);
487 case IND_OLDGEN_PERM:
488 evacuate(&((StgInd *)p)->indirectee);
489 p += sizeofW(StgInd);
494 gct->eager_promotion = rtsFalse;
495 evacuate(&((StgMutVar *)p)->var);
496 gct->eager_promotion = saved_eager_promotion;
498 if (gct->failed_to_evac) {
499 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
501 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
503 p += sizeofW(StgMutVar);
508 p += BLACKHOLE_sizeW();
513 StgSelector *s = (StgSelector *)p;
514 evacuate(&s->selectee);
515 p += THUNK_SELECTOR_sizeW();
519 // A chunk of stack saved in a heap object
522 StgAP_STACK *ap = (StgAP_STACK *)p;
525 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
526 p = (StgPtr)ap->payload + ap->size;
531 p = scavenge_PAP((StgPAP *)p);
535 p = scavenge_AP((StgAP *)p);
540 p += arr_words_sizeW((StgArrWords *)p);
543 case MUT_ARR_PTRS_CLEAN:
544 case MUT_ARR_PTRS_DIRTY:
549 // We don't eagerly promote objects pointed to by a mutable
550 // array, but if we find the array only points to objects in
551 // the same or an older generation, we mark it "clean" and
552 // avoid traversing it during minor GCs.
553 gct->eager_promotion = rtsFalse;
554 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
555 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
556 evacuate((StgClosure **)p);
558 gct->eager_promotion = saved_eager_promotion;
560 if (gct->failed_to_evac) {
561 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
563 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
566 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
570 case MUT_ARR_PTRS_FROZEN:
571 case MUT_ARR_PTRS_FROZEN0:
576 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
577 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
578 evacuate((StgClosure **)p);
581 // If we're going to put this object on the mutable list, then
582 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
583 if (gct->failed_to_evac) {
584 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
586 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
593 StgTSO *tso = (StgTSO *)p;
599 case TVAR_WATCH_QUEUE:
601 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
603 evacuate((StgClosure **)&wq->closure);
604 evacuate((StgClosure **)&wq->next_queue_entry);
605 evacuate((StgClosure **)&wq->prev_queue_entry);
606 gct->evac_step = saved_evac_step;
607 gct->failed_to_evac = rtsTrue; // mutable
608 p += sizeofW(StgTVarWatchQueue);
614 StgTVar *tvar = ((StgTVar *) p);
616 evacuate((StgClosure **)&tvar->current_value);
617 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
618 gct->evac_step = saved_evac_step;
619 gct->failed_to_evac = rtsTrue; // mutable
620 p += sizeofW(StgTVar);
626 StgTRecHeader *trec = ((StgTRecHeader *) p);
628 evacuate((StgClosure **)&trec->enclosing_trec);
629 evacuate((StgClosure **)&trec->current_chunk);
630 evacuate((StgClosure **)&trec->invariants_to_check);
631 gct->evac_step = saved_evac_step;
632 gct->failed_to_evac = rtsTrue; // mutable
633 p += sizeofW(StgTRecHeader);
640 StgTRecChunk *tc = ((StgTRecChunk *) p);
641 TRecEntry *e = &(tc -> entries[0]);
643 evacuate((StgClosure **)&tc->prev_chunk);
644 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
645 evacuate((StgClosure **)&e->tvar);
646 evacuate((StgClosure **)&e->expected_value);
647 evacuate((StgClosure **)&e->new_value);
649 gct->evac_step = saved_evac_step;
650 gct->failed_to_evac = rtsTrue; // mutable
651 p += sizeofW(StgTRecChunk);
655 case ATOMIC_INVARIANT:
657 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
659 evacuate(&invariant->code);
660 evacuate((StgClosure **)&invariant->last_execution);
661 gct->evac_step = saved_evac_step;
662 gct->failed_to_evac = rtsTrue; // mutable
663 p += sizeofW(StgAtomicInvariant);
667 case INVARIANT_CHECK_QUEUE:
669 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
671 evacuate((StgClosure **)&queue->invariant);
672 evacuate((StgClosure **)&queue->my_execution);
673 evacuate((StgClosure **)&queue->next_queue_entry);
674 gct->evac_step = saved_evac_step;
675 gct->failed_to_evac = rtsTrue; // mutable
676 p += sizeofW(StgInvariantCheckQueue);
681 barf("scavenge: unimplemented/strange closure type %d @ %p",
686 * We need to record the current object on the mutable list if
687 * (a) It is actually mutable, or
688 * (b) It contains pointers to a younger generation.
689 * Case (b) arises if we didn't manage to promote everything that
690 * the current object points to into the current generation.
692 if (gct->failed_to_evac) {
693 gct->failed_to_evac = rtsFalse;
694 if (bd->gen_no > 0) {
695 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
701 gct->copied += ws->todo_free - bd->free;
705 debugTrace(DEBUG_gc, " scavenged %ld bytes",
706 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
708 // update stats: this is a block that has been scavenged
709 gct->scanned += bd->free - bd->u.scan;
710 bd->u.scan = bd->free;
712 if (bd != ws->todo_bd) {
713 // we're not going to evac any more objects into
714 // this block, so push it now.
715 push_scanned_block(bd, ws);
720 /* -----------------------------------------------------------------------------
721 Scavenge everything on the mark stack.
723 This is slightly different from scavenge():
724 - we don't walk linearly through the objects, so the scavenger
725 doesn't need to advance the pointer on to the next object.
726 -------------------------------------------------------------------------- */
729 scavenge_mark_stack(void)
733 step *saved_evac_step;
735 gct->evac_step = &oldest_gen->steps[0];
736 saved_evac_step = gct->evac_step;
739 while (!mark_stack_empty()) {
740 p = pop_mark_stack();
742 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
743 info = get_itbl((StgClosure *)p);
746 switch (info->type) {
751 rtsBool saved_eager_promotion = gct->eager_promotion;
753 StgMVar *mvar = ((StgMVar *)p);
754 gct->eager_promotion = rtsFalse;
755 evacuate((StgClosure **)&mvar->head);
756 evacuate((StgClosure **)&mvar->tail);
757 evacuate((StgClosure **)&mvar->value);
758 gct->eager_promotion = saved_eager_promotion;
760 if (gct->failed_to_evac) {
761 mvar->header.info = &stg_MVAR_DIRTY_info;
763 mvar->header.info = &stg_MVAR_CLEAN_info;
769 scavenge_fun_srt(info);
770 evacuate(&((StgClosure *)p)->payload[1]);
771 evacuate(&((StgClosure *)p)->payload[0]);
775 scavenge_thunk_srt(info);
776 evacuate(&((StgThunk *)p)->payload[1]);
777 evacuate(&((StgThunk *)p)->payload[0]);
781 evacuate(&((StgClosure *)p)->payload[1]);
782 evacuate(&((StgClosure *)p)->payload[0]);
787 scavenge_fun_srt(info);
788 evacuate(&((StgClosure *)p)->payload[0]);
793 scavenge_thunk_srt(info);
794 evacuate(&((StgThunk *)p)->payload[0]);
799 evacuate(&((StgClosure *)p)->payload[0]);
804 scavenge_fun_srt(info);
809 scavenge_thunk_srt(info);
817 scavenge_fun_srt(info);
824 scavenge_thunk_srt(info);
825 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
826 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
827 evacuate((StgClosure **)p);
839 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
840 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
841 evacuate((StgClosure **)p);
847 StgBCO *bco = (StgBCO *)p;
848 evacuate((StgClosure **)&bco->instrs);
849 evacuate((StgClosure **)&bco->literals);
850 evacuate((StgClosure **)&bco->ptrs);
855 // don't need to do anything here: the only possible case
856 // is that we're in a 1-space compacting collector, with
857 // no "old" generation.
861 case IND_OLDGEN_PERM:
862 evacuate(&((StgInd *)p)->indirectee);
866 case MUT_VAR_DIRTY: {
867 rtsBool saved_eager_promotion = gct->eager_promotion;
869 gct->eager_promotion = rtsFalse;
870 evacuate(&((StgMutVar *)p)->var);
871 gct->eager_promotion = saved_eager_promotion;
873 if (gct->failed_to_evac) {
874 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
876 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
888 StgSelector *s = (StgSelector *)p;
889 evacuate(&s->selectee);
893 // A chunk of stack saved in a heap object
896 StgAP_STACK *ap = (StgAP_STACK *)p;
899 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
904 scavenge_PAP((StgPAP *)p);
908 scavenge_AP((StgAP *)p);
911 case MUT_ARR_PTRS_CLEAN:
912 case MUT_ARR_PTRS_DIRTY:
918 // We don't eagerly promote objects pointed to by a mutable
919 // array, but if we find the array only points to objects in
920 // the same or an older generation, we mark it "clean" and
921 // avoid traversing it during minor GCs.
922 saved_eager = gct->eager_promotion;
923 gct->eager_promotion = rtsFalse;
924 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
925 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
926 evacuate((StgClosure **)p);
928 gct->eager_promotion = saved_eager;
930 if (gct->failed_to_evac) {
931 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
933 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
936 gct->failed_to_evac = rtsTrue; // mutable anyhow.
940 case MUT_ARR_PTRS_FROZEN:
941 case MUT_ARR_PTRS_FROZEN0:
946 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
947 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
948 evacuate((StgClosure **)p);
951 // If we're going to put this object on the mutable list, then
952 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
953 if (gct->failed_to_evac) {
954 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
956 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
963 scavengeTSO((StgTSO*)p);
967 case TVAR_WATCH_QUEUE:
969 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
971 evacuate((StgClosure **)&wq->closure);
972 evacuate((StgClosure **)&wq->next_queue_entry);
973 evacuate((StgClosure **)&wq->prev_queue_entry);
974 gct->evac_step = saved_evac_step;
975 gct->failed_to_evac = rtsTrue; // mutable
981 StgTVar *tvar = ((StgTVar *) p);
983 evacuate((StgClosure **)&tvar->current_value);
984 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
985 gct->evac_step = saved_evac_step;
986 gct->failed_to_evac = rtsTrue; // mutable
993 StgTRecChunk *tc = ((StgTRecChunk *) p);
994 TRecEntry *e = &(tc -> entries[0]);
996 evacuate((StgClosure **)&tc->prev_chunk);
997 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
998 evacuate((StgClosure **)&e->tvar);
999 evacuate((StgClosure **)&e->expected_value);
1000 evacuate((StgClosure **)&e->new_value);
1002 gct->evac_step = saved_evac_step;
1003 gct->failed_to_evac = rtsTrue; // mutable
1009 StgTRecHeader *trec = ((StgTRecHeader *) p);
1011 evacuate((StgClosure **)&trec->enclosing_trec);
1012 evacuate((StgClosure **)&trec->current_chunk);
1013 evacuate((StgClosure **)&trec->invariants_to_check);
1014 gct->evac_step = saved_evac_step;
1015 gct->failed_to_evac = rtsTrue; // mutable
1019 case ATOMIC_INVARIANT:
1021 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1023 evacuate(&invariant->code);
1024 evacuate((StgClosure **)&invariant->last_execution);
1025 gct->evac_step = saved_evac_step;
1026 gct->failed_to_evac = rtsTrue; // mutable
1030 case INVARIANT_CHECK_QUEUE:
1032 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1034 evacuate((StgClosure **)&queue->invariant);
1035 evacuate((StgClosure **)&queue->my_execution);
1036 evacuate((StgClosure **)&queue->next_queue_entry);
1037 gct->evac_step = saved_evac_step;
1038 gct->failed_to_evac = rtsTrue; // mutable
1043 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1047 if (gct->failed_to_evac) {
1048 gct->failed_to_evac = rtsFalse;
1049 if (gct->evac_step) {
1050 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
1054 // mark the next bit to indicate "scavenged"
1055 mark(q+1, Bdescr(q));
1057 } // while (!mark_stack_empty())
1059 // start a new linear scan if the mark stack overflowed at some point
1060 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1061 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1062 mark_stack_overflowed = rtsFalse;
1063 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1064 oldgen_scan = oldgen_scan_bd->start;
1067 if (oldgen_scan_bd) {
1068 // push a new thing on the mark stack
1070 // find a closure that is marked but not scavenged, and start
1072 while (oldgen_scan < oldgen_scan_bd->free
1073 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1077 if (oldgen_scan < oldgen_scan_bd->free) {
1079 // already scavenged?
1080 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1081 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1084 push_mark_stack(oldgen_scan);
1085 // ToDo: bump the linear scan by the actual size of the object
1086 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1090 oldgen_scan_bd = oldgen_scan_bd->link;
1091 if (oldgen_scan_bd != NULL) {
1092 oldgen_scan = oldgen_scan_bd->start;
1098 /* -----------------------------------------------------------------------------
1099 Scavenge one object.
1101 This is used for objects that are temporarily marked as mutable
1102 because they contain old-to-new generation pointers. Only certain
1103 objects can have this property.
1104 -------------------------------------------------------------------------- */
1107 scavenge_one(StgPtr p)
1109 const StgInfoTable *info;
1110 step *saved_evac_step = gct->evac_step;
1113 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1114 info = get_itbl((StgClosure *)p);
1116 switch (info->type) {
1121 rtsBool saved_eager_promotion = gct->eager_promotion;
1123 StgMVar *mvar = ((StgMVar *)p);
1124 gct->eager_promotion = rtsFalse;
1125 evacuate((StgClosure **)&mvar->head);
1126 evacuate((StgClosure **)&mvar->tail);
1127 evacuate((StgClosure **)&mvar->value);
1128 gct->eager_promotion = saved_eager_promotion;
1130 if (gct->failed_to_evac) {
1131 mvar->header.info = &stg_MVAR_DIRTY_info;
1133 mvar->header.info = &stg_MVAR_CLEAN_info;
1147 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1148 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1149 evacuate((StgClosure **)q);
1155 case FUN_1_0: // hardly worth specialising these guys
1171 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1172 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1173 evacuate((StgClosure **)q);
1179 case MUT_VAR_DIRTY: {
1181 rtsBool saved_eager_promotion = gct->eager_promotion;
1183 gct->eager_promotion = rtsFalse;
1184 evacuate(&((StgMutVar *)p)->var);
1185 gct->eager_promotion = saved_eager_promotion;
1187 if (gct->failed_to_evac) {
1188 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1190 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1199 case THUNK_SELECTOR:
1201 StgSelector *s = (StgSelector *)p;
1202 evacuate(&s->selectee);
1208 StgAP_STACK *ap = (StgAP_STACK *)p;
1211 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1212 p = (StgPtr)ap->payload + ap->size;
1217 p = scavenge_PAP((StgPAP *)p);
1221 p = scavenge_AP((StgAP *)p);
1225 // nothing to follow
1228 case MUT_ARR_PTRS_CLEAN:
1229 case MUT_ARR_PTRS_DIRTY:
1232 rtsBool saved_eager;
1234 // We don't eagerly promote objects pointed to by a mutable
1235 // array, but if we find the array only points to objects in
1236 // the same or an older generation, we mark it "clean" and
1237 // avoid traversing it during minor GCs.
1238 saved_eager = gct->eager_promotion;
1239 gct->eager_promotion = rtsFalse;
1241 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1242 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1243 evacuate((StgClosure **)p);
1245 gct->eager_promotion = saved_eager;
1247 if (gct->failed_to_evac) {
1248 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1250 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1253 gct->failed_to_evac = rtsTrue;
1257 case MUT_ARR_PTRS_FROZEN:
1258 case MUT_ARR_PTRS_FROZEN0:
1260 // follow everything
1263 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1264 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1265 evacuate((StgClosure **)p);
1268 // If we're going to put this object on the mutable list, then
1269 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1270 if (gct->failed_to_evac) {
1271 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1273 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1280 scavengeTSO((StgTSO*)p);
1284 case TVAR_WATCH_QUEUE:
1286 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1288 evacuate((StgClosure **)&wq->closure);
1289 evacuate((StgClosure **)&wq->next_queue_entry);
1290 evacuate((StgClosure **)&wq->prev_queue_entry);
1291 gct->evac_step = saved_evac_step;
1292 gct->failed_to_evac = rtsTrue; // mutable
1298 StgTVar *tvar = ((StgTVar *) p);
1300 evacuate((StgClosure **)&tvar->current_value);
1301 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1302 gct->evac_step = saved_evac_step;
1303 gct->failed_to_evac = rtsTrue; // mutable
1309 StgTRecHeader *trec = ((StgTRecHeader *) p);
1311 evacuate((StgClosure **)&trec->enclosing_trec);
1312 evacuate((StgClosure **)&trec->current_chunk);
1313 evacuate((StgClosure **)&trec->invariants_to_check);
1314 gct->evac_step = saved_evac_step;
1315 gct->failed_to_evac = rtsTrue; // mutable
1322 StgTRecChunk *tc = ((StgTRecChunk *) p);
1323 TRecEntry *e = &(tc -> entries[0]);
1325 evacuate((StgClosure **)&tc->prev_chunk);
1326 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1327 evacuate((StgClosure **)&e->tvar);
1328 evacuate((StgClosure **)&e->expected_value);
1329 evacuate((StgClosure **)&e->new_value);
1331 gct->evac_step = saved_evac_step;
1332 gct->failed_to_evac = rtsTrue; // mutable
1336 case ATOMIC_INVARIANT:
1338 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1340 evacuate(&invariant->code);
1341 evacuate((StgClosure **)&invariant->last_execution);
1342 gct->evac_step = saved_evac_step;
1343 gct->failed_to_evac = rtsTrue; // mutable
1347 case INVARIANT_CHECK_QUEUE:
1349 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1351 evacuate((StgClosure **)&queue->invariant);
1352 evacuate((StgClosure **)&queue->my_execution);
1353 evacuate((StgClosure **)&queue->next_queue_entry);
1354 gct->evac_step = saved_evac_step;
1355 gct->failed_to_evac = rtsTrue; // mutable
1360 case IND_OLDGEN_PERM:
1363 /* Careful here: a THUNK can be on the mutable list because
1364 * it contains pointers to young gen objects. If such a thunk
1365 * is updated, the IND_OLDGEN will be added to the mutable
1366 * list again, and we'll scavenge it twice. evacuate()
1367 * doesn't check whether the object has already been
1368 * evacuated, so we perform that check here.
1370 StgClosure *q = ((StgInd *)p)->indirectee;
1371 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1374 evacuate(&((StgInd *)p)->indirectee);
1377 #if 0 && defined(DEBUG)
1378 if (RtsFlags.DebugFlags.gc)
1379 /* Debugging code to print out the size of the thing we just
1383 StgPtr start = gen->steps[0].scan;
1384 bdescr *start_bd = gen->steps[0].scan_bd;
1386 scavenge(&gen->steps[0]);
1387 if (start_bd != gen->steps[0].scan_bd) {
1388 size += (P_)BLOCK_ROUND_UP(start) - start;
1389 start_bd = start_bd->link;
1390 while (start_bd != gen->steps[0].scan_bd) {
1391 size += BLOCK_SIZE_W;
1392 start_bd = start_bd->link;
1394 size += gen->steps[0].scan -
1395 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1397 size = gen->steps[0].scan - start;
1399 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1405 barf("scavenge_one: strange object %d", (int)(info->type));
1408 no_luck = gct->failed_to_evac;
1409 gct->failed_to_evac = rtsFalse;
1413 /* -----------------------------------------------------------------------------
1414 Scavenging mutable lists.
1416 We treat the mutable list of each generation > N (i.e. all the
1417 generations older than the one being collected) as roots. We also
1418 remove non-mutable objects from the mutable list at this point.
1419 -------------------------------------------------------------------------- */
1422 scavenge_mutable_list(generation *gen)
1427 bd = gen->saved_mut_list;
1429 gct->evac_step = &gen->steps[0];
1430 for (; bd != NULL; bd = bd->link) {
1431 for (q = bd->start; q < bd->free; q++) {
1433 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1436 switch (get_itbl((StgClosure *)p)->type) {
1438 barf("MUT_VAR_CLEAN on mutable list");
1440 mutlist_MUTVARS++; break;
1441 case MUT_ARR_PTRS_CLEAN:
1442 case MUT_ARR_PTRS_DIRTY:
1443 case MUT_ARR_PTRS_FROZEN:
1444 case MUT_ARR_PTRS_FROZEN0:
1445 mutlist_MUTARRS++; break;
1447 barf("MVAR_CLEAN on mutable list");
1449 mutlist_MVARS++; break;
1451 mutlist_OTHERS++; break;
1455 // Check whether this object is "clean", that is it
1456 // definitely doesn't point into a young generation.
1457 // Clean objects don't need to be scavenged. Some clean
1458 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1459 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1460 // TSO, are always on the mutable list.
1462 switch (get_itbl((StgClosure *)p)->type) {
1463 case MUT_ARR_PTRS_CLEAN:
1464 recordMutableGen_GC((StgClosure *)p,gen);
1467 StgTSO *tso = (StgTSO *)p;
1468 if ((tso->flags & TSO_DIRTY) == 0) {
1469 // Must be on the mutable list because its link
1471 ASSERT(tso->flags & TSO_LINK_DIRTY);
1473 scavenge_TSO_link(tso);
1474 if (gct->failed_to_evac) {
1475 recordMutableGen_GC((StgClosure *)p,gen);
1476 gct->failed_to_evac = rtsFalse;
1478 tso->flags &= ~TSO_LINK_DIRTY;
1487 if (scavenge_one(p)) {
1488 // didn't manage to promote everything, so put the
1489 // object back on the list.
1490 recordMutableGen_GC((StgClosure *)p,gen);
1495 // free the old mut_list
1496 freeChain_sync(gen->saved_mut_list);
1497 gen->saved_mut_list = NULL;
1500 /* -----------------------------------------------------------------------------
1501 Scavenging the static objects.
1503 We treat the mutable list of each generation > N (i.e. all the
1504 generations older than the one being collected) as roots. We also
1505 remove non-mutable objects from the mutable list at this point.
1506 -------------------------------------------------------------------------- */
1509 scavenge_static(void)
1512 const StgInfoTable *info;
1514 debugTrace(DEBUG_gc, "scavenging static objects");
1516 /* Always evacuate straight to the oldest generation for static
1518 gct->evac_step = &oldest_gen->steps[0];
1520 /* keep going until we've scavenged all the objects on the linked
1525 /* get the next static object from the list. Remember, there might
1526 * be more stuff on this list after each evacuation...
1527 * (static_objects is a global)
1529 p = gct->static_objects;
1530 if (p == END_OF_STATIC_LIST) {
1534 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1537 if (info->type==RBH)
1538 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1540 // make sure the info pointer is into text space
1542 /* Take this object *off* the static_objects list,
1543 * and put it on the scavenged_static_objects list.
1545 gct->static_objects = *STATIC_LINK(info,p);
1546 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1547 gct->scavenged_static_objects = p;
1549 switch (info -> type) {
1553 StgInd *ind = (StgInd *)p;
1554 evacuate(&ind->indirectee);
1556 /* might fail to evacuate it, in which case we have to pop it
1557 * back on the mutable list of the oldest generation. We
1558 * leave it *on* the scavenged_static_objects list, though,
1559 * in case we visit this object again.
1561 if (gct->failed_to_evac) {
1562 gct->failed_to_evac = rtsFalse;
1563 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1569 scavenge_thunk_srt(info);
1573 scavenge_fun_srt(info);
1580 next = (P_)p->payload + info->layout.payload.ptrs;
1581 // evacuate the pointers
1582 for (q = (P_)p->payload; q < next; q++) {
1583 evacuate((StgClosure **)q);
1589 barf("scavenge_static: strange closure %d", (int)(info->type));
1592 ASSERT(gct->failed_to_evac == rtsFalse);
1596 /* -----------------------------------------------------------------------------
1597 scavenge a chunk of memory described by a bitmap
1598 -------------------------------------------------------------------------- */
1601 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1607 bitmap = large_bitmap->bitmap[b];
1608 for (i = 0; i < size; ) {
1609 if ((bitmap & 1) == 0) {
1610 evacuate((StgClosure **)p);
1614 if (i % BITS_IN(W_) == 0) {
1616 bitmap = large_bitmap->bitmap[b];
1618 bitmap = bitmap >> 1;
1623 STATIC_INLINE StgPtr
1624 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1627 if ((bitmap & 1) == 0) {
1628 evacuate((StgClosure **)p);
1631 bitmap = bitmap >> 1;
1637 /* -----------------------------------------------------------------------------
1638 scavenge_stack walks over a section of stack and evacuates all the
1639 objects pointed to by it. We can use the same code for walking
1640 AP_STACK_UPDs, since these are just sections of copied stack.
1641 -------------------------------------------------------------------------- */
1644 scavenge_stack(StgPtr p, StgPtr stack_end)
1646 const StgRetInfoTable* info;
1651 * Each time around this loop, we are looking at a chunk of stack
1652 * that starts with an activation record.
1655 while (p < stack_end) {
1656 info = get_ret_itbl((StgClosure *)p);
1658 switch (info->i.type) {
1661 // In SMP, we can get update frames that point to indirections
1662 // when two threads evaluate the same thunk. We do attempt to
1663 // discover this situation in threadPaused(), but it's
1664 // possible that the following sequence occurs:
1673 // Now T is an indirection, and the update frame is already
1674 // marked on A's stack, so we won't traverse it again in
1675 // threadPaused(). We could traverse the whole stack again
1676 // before GC, but that seems like overkill.
1678 // Scavenging this update frame as normal would be disastrous;
1679 // the updatee would end up pointing to the value. So we turn
1680 // the indirection into an IND_PERM, so that evacuate will
1681 // copy the indirection into the old generation instead of
1684 // Note [upd-black-hole]
1685 // One slight hiccup is that the THUNK_SELECTOR machinery can
1686 // overwrite the updatee with an IND. In parallel GC, this
1687 // could even be happening concurrently, so we can't check for
1688 // the IND. Fortunately if we assume that blackholing is
1689 // happening (either lazy or eager), then we can be sure that
1690 // the updatee is never a THUNK_SELECTOR and we're ok.
1691 // NB. this is a new invariant: blackholing is not optional.
1694 const StgInfoTable *i;
1695 StgClosure *updatee;
1697 updatee = ((StgUpdateFrame *)p)->updatee;
1698 i = updatee->header.info;
1699 if (!IS_FORWARDING_PTR(i)) {
1700 type = get_itbl(updatee)->type;
1702 updatee->header.info = &stg_IND_PERM_info;
1703 } else if (type == IND_OLDGEN) {
1704 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1707 evacuate(&((StgUpdateFrame *)p)->updatee);
1708 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1709 p += sizeofW(StgUpdateFrame);
1713 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1714 case CATCH_STM_FRAME:
1715 case CATCH_RETRY_FRAME:
1716 case ATOMICALLY_FRAME:
1720 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1721 size = BITMAP_SIZE(info->i.layout.bitmap);
1722 // NOTE: the payload starts immediately after the info-ptr, we
1723 // don't have an StgHeader in the same sense as a heap closure.
1725 p = scavenge_small_bitmap(p, size, bitmap);
1729 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1737 evacuate((StgClosure **)p);
1740 size = BCO_BITMAP_SIZE(bco);
1741 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1746 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1751 size = GET_LARGE_BITMAP(&info->i)->size;
1753 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1755 // and don't forget to follow the SRT
1759 // Dynamic bitmap: the mask is stored on the stack, and
1760 // there are a number of non-pointers followed by a number
1761 // of pointers above the bitmapped area. (see StgMacros.h,
1766 dyn = ((StgRetDyn *)p)->liveness;
1768 // traverse the bitmap first
1769 bitmap = RET_DYN_LIVENESS(dyn);
1770 p = (P_)&((StgRetDyn *)p)->payload[0];
1771 size = RET_DYN_BITMAP_SIZE;
1772 p = scavenge_small_bitmap(p, size, bitmap);
1774 // skip over the non-ptr words
1775 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1777 // follow the ptr words
1778 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1779 evacuate((StgClosure **)p);
1787 StgRetFun *ret_fun = (StgRetFun *)p;
1788 StgFunInfoTable *fun_info;
1790 evacuate(&ret_fun->fun);
1791 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1792 p = scavenge_arg_block(fun_info, ret_fun->payload);
1797 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1802 /*-----------------------------------------------------------------------------
1803 scavenge the large object list.
1805 evac_step set by caller; similar games played with evac_step as with
1806 scavenge() - see comment at the top of scavenge(). Most large
1807 objects are (repeatedly) mutable, so most of the time evac_step will
1809 --------------------------------------------------------------------------- */
1812 scavenge_large (step_workspace *ws)
1817 gct->evac_step = ws->step;
1819 bd = ws->todo_large_objects;
1821 for (; bd != NULL; bd = ws->todo_large_objects) {
1823 // take this object *off* the large objects list and put it on
1824 // the scavenged large objects list. This is so that we can
1825 // treat new_large_objects as a stack and push new objects on
1826 // the front when evacuating.
1827 ws->todo_large_objects = bd->link;
1829 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1830 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1831 ws->step->n_scavenged_large_blocks += bd->blocks;
1832 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1835 if (scavenge_one(p)) {
1836 if (ws->step->gen_no > 0) {
1837 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1842 gct->scanned += closure_sizeW((StgClosure*)p);
1846 /* ----------------------------------------------------------------------------
1847 Look for work to do.
1849 We look for the oldest step that has either a todo block that can
1850 be scanned, or a block of work on the global queue that we can
1853 It is important to take work from the *oldest* generation that we
1854 has work available, because that minimizes the likelihood of
1855 evacuating objects into a young generation when they should have
1856 been eagerly promoted. This really does make a difference (the
1857 cacheprof benchmark is one that is affected).
1859 We also want to scan the todo block if possible before grabbing
1860 work from the global queue, the reason being that we don't want to
1861 steal work from the global queue and starve other threads if there
1862 is other work we can usefully be doing.
1863 ------------------------------------------------------------------------- */
1866 scavenge_find_work (void)
1870 rtsBool did_something, did_anything;
1873 gct->scav_find_work++;
1875 did_anything = rtsFalse;
1878 did_something = rtsFalse;
1879 for (s = total_steps-1; s >= 0; s--) {
1880 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1883 ws = &gct->steps[s];
1885 gct->scan_bd = NULL;
1887 // If we have a scan block with some work to do,
1888 // scavenge everything up to the free pointer.
1889 if (ws->todo_bd->u.scan < ws->todo_free)
1891 scavenge_block(ws->todo_bd);
1892 did_something = rtsTrue;
1896 // If we have any large objects to scavenge, do them now.
1897 if (ws->todo_large_objects) {
1899 did_something = rtsTrue;
1903 if ((bd = grab_todo_block(ws)) != NULL) {
1905 did_something = rtsTrue;
1910 if (did_something) {
1911 did_anything = rtsTrue;
1914 // only return when there is no more work to do
1916 return did_anything;
1919 /* ----------------------------------------------------------------------------
1920 Scavenge until we can't find anything more to scavenge.
1921 ------------------------------------------------------------------------- */
1929 work_to_do = rtsFalse;
1931 // scavenge static objects
1932 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1933 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1937 // scavenge objects in compacted generation
1938 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1939 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1940 scavenge_mark_stack();
1941 work_to_do = rtsTrue;
1944 // Order is important here: we want to deal in full blocks as
1945 // much as possible, so go for global work in preference to
1946 // local work. Only if all the global work has been exhausted
1947 // do we start scavenging the fragments of blocks in the local
1949 if (scavenge_find_work()) goto loop;
1951 if (work_to_do) goto loop;