1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
29 static void scavenge_stack (StgPtr p, StgPtr stack_end);
31 static void scavenge_large_bitmap (StgPtr p,
32 StgLargeBitmap *large_bitmap,
35 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
36 # define evacuate(a) evacuate1(a)
37 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
38 # define scavenge_loop(a) scavenge_loop1(a)
39 # define scavenge_mutable_list(g) scavenge_mutable_list1(g)
42 /* -----------------------------------------------------------------------------
44 -------------------------------------------------------------------------- */
47 scavenge_TSO_link (StgTSO *tso)
49 // We don't always chase the link field: TSOs on the blackhole
50 // queue are not automatically alive, so the link field is a
51 // "weak" pointer in that case.
52 if (tso->why_blocked != BlockedOnBlackHole) {
53 evacuate((StgClosure **)&tso->_link);
58 scavengeTSO (StgTSO *tso)
62 if (tso->what_next == ThreadRelocated) {
63 // the only way this can happen is if the old TSO was on the
64 // mutable list. We might have other links to this defunct
65 // TSO, so we must update its link field.
66 evacuate((StgClosure**)&tso->_link);
70 saved_eager = gct->eager_promotion;
71 gct->eager_promotion = rtsFalse;
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnBlackHole
75 || tso->why_blocked == BlockedOnException
77 evacuate(&tso->block_info.closure);
79 evacuate((StgClosure **)&tso->blocked_exceptions);
81 // scavange current transaction record
82 evacuate((StgClosure **)&tso->trec);
84 // scavenge this thread's stack
85 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
87 if (gct->failed_to_evac) {
88 tso->flags |= TSO_DIRTY;
89 scavenge_TSO_link(tso);
91 tso->flags &= ~TSO_DIRTY;
92 scavenge_TSO_link(tso);
93 if (gct->failed_to_evac) {
94 tso->flags |= TSO_LINK_DIRTY;
96 tso->flags &= ~TSO_LINK_DIRTY;
100 gct->eager_promotion = saved_eager;
103 /* -----------------------------------------------------------------------------
104 Blocks of function args occur on the stack (at the top) and
106 -------------------------------------------------------------------------- */
109 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
116 switch (fun_info->f.fun_type) {
118 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
119 size = BITMAP_SIZE(fun_info->f.b.bitmap);
122 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
123 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
127 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
128 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
131 if ((bitmap & 1) == 0) {
132 evacuate((StgClosure **)p);
135 bitmap = bitmap >> 1;
143 STATIC_INLINE GNUC_ATTR_HOT StgPtr
144 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
148 StgFunInfoTable *fun_info;
150 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
151 ASSERT(fun_info->i.type != PAP);
154 switch (fun_info->f.fun_type) {
156 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
159 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
163 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
167 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
170 if ((bitmap & 1) == 0) {
171 evacuate((StgClosure **)p);
174 bitmap = bitmap >> 1;
182 STATIC_INLINE GNUC_ATTR_HOT StgPtr
183 scavenge_PAP (StgPAP *pap)
186 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
190 scavenge_AP (StgAP *ap)
193 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
196 /* -----------------------------------------------------------------------------
198 -------------------------------------------------------------------------- */
200 /* Similar to scavenge_large_bitmap(), but we don't write back the
201 * pointers we get back from evacuate().
204 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
211 bitmap = large_srt->l.bitmap[b];
212 size = (nat)large_srt->l.size;
213 p = (StgClosure **)large_srt->srt;
214 for (i = 0; i < size; ) {
215 if ((bitmap & 1) != 0) {
220 if (i % BITS_IN(W_) == 0) {
222 bitmap = large_srt->l.bitmap[b];
224 bitmap = bitmap >> 1;
229 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
230 * srt field in the info table. That's ok, because we'll
231 * never dereference it.
233 STATIC_INLINE GNUC_ATTR_HOT void
234 scavenge_srt (StgClosure **srt, nat srt_bitmap)
242 if (bitmap == (StgHalfWord)(-1)) {
243 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
247 while (bitmap != 0) {
248 if ((bitmap & 1) != 0) {
249 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
250 // Special-case to handle references to closures hiding out in DLLs, since
251 // double indirections required to get at those. The code generator knows
252 // which is which when generating the SRT, so it stores the (indirect)
253 // reference to the DLL closure in the table by first adding one to it.
254 // We check for this here, and undo the addition before evacuating it.
256 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
257 // closure that's fixed at link-time, and no extra magic is required.
258 if ( (unsigned long)(*srt) & 0x1 ) {
259 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
268 bitmap = bitmap >> 1;
273 STATIC_INLINE GNUC_ATTR_HOT void
274 scavenge_thunk_srt(const StgInfoTable *info)
276 StgThunkInfoTable *thunk_info;
278 if (!major_gc) return;
280 thunk_info = itbl_to_thunk_itbl(info);
281 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
284 STATIC_INLINE GNUC_ATTR_HOT void
285 scavenge_fun_srt(const StgInfoTable *info)
287 StgFunInfoTable *fun_info;
289 if (!major_gc) return;
291 fun_info = itbl_to_fun_itbl(info);
292 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
295 /* -----------------------------------------------------------------------------
296 Scavenge a block from the given scan pointer up to bd->free.
298 evac_step is set by the caller to be either zero (for a step in a
299 generation < N) or G where G is the generation of the step being
302 We sometimes temporarily change evac_step back to zero if we're
303 scavenging a mutable object where eager promotion isn't such a good
305 -------------------------------------------------------------------------- */
307 static GNUC_ATTR_HOT void
308 scavenge_block (bdescr *bd)
312 step *saved_evac_step;
313 rtsBool saved_eager_promotion;
316 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
317 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
320 gct->evac_step = bd->step;
321 saved_evac_step = gct->evac_step;
322 saved_eager_promotion = gct->eager_promotion;
323 gct->failed_to_evac = rtsFalse;
325 ws = &gct->steps[bd->step->abs_no];
329 // we might be evacuating into the very object that we're
330 // scavenging, so we have to check the real bd->free pointer each
331 // time around the loop.
332 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
334 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
335 info = get_itbl((StgClosure *)p);
337 ASSERT(gct->thunk_selector_depth == 0);
340 switch (info->type) {
345 StgMVar *mvar = ((StgMVar *)p);
346 gct->eager_promotion = rtsFalse;
347 evacuate((StgClosure **)&mvar->head);
348 evacuate((StgClosure **)&mvar->tail);
349 evacuate((StgClosure **)&mvar->value);
350 gct->eager_promotion = saved_eager_promotion;
352 if (gct->failed_to_evac) {
353 mvar->header.info = &stg_MVAR_DIRTY_info;
355 mvar->header.info = &stg_MVAR_CLEAN_info;
357 p += sizeofW(StgMVar);
362 scavenge_fun_srt(info);
363 evacuate(&((StgClosure *)p)->payload[1]);
364 evacuate(&((StgClosure *)p)->payload[0]);
365 p += sizeofW(StgHeader) + 2;
369 scavenge_thunk_srt(info);
370 evacuate(&((StgThunk *)p)->payload[1]);
371 evacuate(&((StgThunk *)p)->payload[0]);
372 p += sizeofW(StgThunk) + 2;
376 evacuate(&((StgClosure *)p)->payload[1]);
377 evacuate(&((StgClosure *)p)->payload[0]);
378 p += sizeofW(StgHeader) + 2;
382 scavenge_thunk_srt(info);
383 evacuate(&((StgThunk *)p)->payload[0]);
384 p += sizeofW(StgThunk) + 1;
388 scavenge_fun_srt(info);
390 evacuate(&((StgClosure *)p)->payload[0]);
391 p += sizeofW(StgHeader) + 1;
395 scavenge_thunk_srt(info);
396 p += sizeofW(StgThunk) + 1;
400 scavenge_fun_srt(info);
402 p += sizeofW(StgHeader) + 1;
406 scavenge_thunk_srt(info);
407 p += sizeofW(StgThunk) + 2;
411 scavenge_fun_srt(info);
413 p += sizeofW(StgHeader) + 2;
417 scavenge_thunk_srt(info);
418 evacuate(&((StgThunk *)p)->payload[0]);
419 p += sizeofW(StgThunk) + 2;
423 scavenge_fun_srt(info);
425 evacuate(&((StgClosure *)p)->payload[0]);
426 p += sizeofW(StgHeader) + 2;
430 scavenge_fun_srt(info);
437 scavenge_thunk_srt(info);
438 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
439 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
440 evacuate((StgClosure **)p);
442 p += info->layout.payload.nptrs;
453 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
454 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
455 evacuate((StgClosure **)p);
457 p += info->layout.payload.nptrs;
462 StgBCO *bco = (StgBCO *)p;
463 evacuate((StgClosure **)&bco->instrs);
464 evacuate((StgClosure **)&bco->literals);
465 evacuate((StgClosure **)&bco->ptrs);
471 if (bd->gen_no != 0) {
474 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
475 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
476 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
479 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
481 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
483 // We pretend that p has just been created.
484 LDV_RECORD_CREATE((StgClosure *)p);
487 case IND_OLDGEN_PERM:
488 evacuate(&((StgInd *)p)->indirectee);
489 p += sizeofW(StgInd);
494 gct->eager_promotion = rtsFalse;
495 evacuate(&((StgMutVar *)p)->var);
496 gct->eager_promotion = saved_eager_promotion;
498 if (gct->failed_to_evac) {
499 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
501 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
503 p += sizeofW(StgMutVar);
507 case SE_CAF_BLACKHOLE:
510 p += BLACKHOLE_sizeW();
515 StgSelector *s = (StgSelector *)p;
516 evacuate(&s->selectee);
517 p += THUNK_SELECTOR_sizeW();
521 // A chunk of stack saved in a heap object
524 StgAP_STACK *ap = (StgAP_STACK *)p;
527 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
528 p = (StgPtr)ap->payload + ap->size;
533 p = scavenge_PAP((StgPAP *)p);
537 p = scavenge_AP((StgAP *)p);
542 p += arr_words_sizeW((StgArrWords *)p);
545 case MUT_ARR_PTRS_CLEAN:
546 case MUT_ARR_PTRS_DIRTY:
551 // We don't eagerly promote objects pointed to by a mutable
552 // array, but if we find the array only points to objects in
553 // the same or an older generation, we mark it "clean" and
554 // avoid traversing it during minor GCs.
555 gct->eager_promotion = rtsFalse;
556 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
557 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
558 evacuate((StgClosure **)p);
560 gct->eager_promotion = saved_eager_promotion;
562 if (gct->failed_to_evac) {
563 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
565 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
568 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
572 case MUT_ARR_PTRS_FROZEN:
573 case MUT_ARR_PTRS_FROZEN0:
578 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
579 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
580 evacuate((StgClosure **)p);
583 // If we're going to put this object on the mutable list, then
584 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
585 if (gct->failed_to_evac) {
586 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
588 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
595 StgTSO *tso = (StgTSO *)p;
601 case TVAR_WATCH_QUEUE:
603 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
605 evacuate((StgClosure **)&wq->closure);
606 evacuate((StgClosure **)&wq->next_queue_entry);
607 evacuate((StgClosure **)&wq->prev_queue_entry);
608 gct->evac_step = saved_evac_step;
609 gct->failed_to_evac = rtsTrue; // mutable
610 p += sizeofW(StgTVarWatchQueue);
616 StgTVar *tvar = ((StgTVar *) p);
618 evacuate((StgClosure **)&tvar->current_value);
619 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
620 gct->evac_step = saved_evac_step;
621 gct->failed_to_evac = rtsTrue; // mutable
622 p += sizeofW(StgTVar);
628 StgTRecHeader *trec = ((StgTRecHeader *) p);
630 evacuate((StgClosure **)&trec->enclosing_trec);
631 evacuate((StgClosure **)&trec->current_chunk);
632 evacuate((StgClosure **)&trec->invariants_to_check);
633 gct->evac_step = saved_evac_step;
634 gct->failed_to_evac = rtsTrue; // mutable
635 p += sizeofW(StgTRecHeader);
642 StgTRecChunk *tc = ((StgTRecChunk *) p);
643 TRecEntry *e = &(tc -> entries[0]);
645 evacuate((StgClosure **)&tc->prev_chunk);
646 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
647 evacuate((StgClosure **)&e->tvar);
648 evacuate((StgClosure **)&e->expected_value);
649 evacuate((StgClosure **)&e->new_value);
651 gct->evac_step = saved_evac_step;
652 gct->failed_to_evac = rtsTrue; // mutable
653 p += sizeofW(StgTRecChunk);
657 case ATOMIC_INVARIANT:
659 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
661 evacuate(&invariant->code);
662 evacuate((StgClosure **)&invariant->last_execution);
663 gct->evac_step = saved_evac_step;
664 gct->failed_to_evac = rtsTrue; // mutable
665 p += sizeofW(StgAtomicInvariant);
669 case INVARIANT_CHECK_QUEUE:
671 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
673 evacuate((StgClosure **)&queue->invariant);
674 evacuate((StgClosure **)&queue->my_execution);
675 evacuate((StgClosure **)&queue->next_queue_entry);
676 gct->evac_step = saved_evac_step;
677 gct->failed_to_evac = rtsTrue; // mutable
678 p += sizeofW(StgInvariantCheckQueue);
683 barf("scavenge: unimplemented/strange closure type %d @ %p",
688 * We need to record the current object on the mutable list if
689 * (a) It is actually mutable, or
690 * (b) It contains pointers to a younger generation.
691 * Case (b) arises if we didn't manage to promote everything that
692 * the current object points to into the current generation.
694 if (gct->failed_to_evac) {
695 gct->failed_to_evac = rtsFalse;
696 if (bd->gen_no > 0) {
697 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
703 gct->copied += ws->todo_free - bd->free;
707 debugTrace(DEBUG_gc, " scavenged %ld bytes",
708 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
710 // update stats: this is a block that has been scavenged
711 gct->scanned += bd->free - bd->u.scan;
712 bd->u.scan = bd->free;
714 if (bd != ws->todo_bd) {
715 // we're not going to evac any more objects into
716 // this block, so push it now.
717 push_scanned_block(bd, ws);
722 /* -----------------------------------------------------------------------------
723 Scavenge everything on the mark stack.
725 This is slightly different from scavenge():
726 - we don't walk linearly through the objects, so the scavenger
727 doesn't need to advance the pointer on to the next object.
728 -------------------------------------------------------------------------- */
731 scavenge_mark_stack(void)
735 step *saved_evac_step;
737 gct->evac_step = &oldest_gen->steps[0];
738 saved_evac_step = gct->evac_step;
741 while (!mark_stack_empty()) {
742 p = pop_mark_stack();
744 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
745 info = get_itbl((StgClosure *)p);
748 switch (info->type) {
753 rtsBool saved_eager_promotion = gct->eager_promotion;
755 StgMVar *mvar = ((StgMVar *)p);
756 gct->eager_promotion = rtsFalse;
757 evacuate((StgClosure **)&mvar->head);
758 evacuate((StgClosure **)&mvar->tail);
759 evacuate((StgClosure **)&mvar->value);
760 gct->eager_promotion = saved_eager_promotion;
762 if (gct->failed_to_evac) {
763 mvar->header.info = &stg_MVAR_DIRTY_info;
765 mvar->header.info = &stg_MVAR_CLEAN_info;
771 scavenge_fun_srt(info);
772 evacuate(&((StgClosure *)p)->payload[1]);
773 evacuate(&((StgClosure *)p)->payload[0]);
777 scavenge_thunk_srt(info);
778 evacuate(&((StgThunk *)p)->payload[1]);
779 evacuate(&((StgThunk *)p)->payload[0]);
783 evacuate(&((StgClosure *)p)->payload[1]);
784 evacuate(&((StgClosure *)p)->payload[0]);
789 scavenge_fun_srt(info);
790 evacuate(&((StgClosure *)p)->payload[0]);
795 scavenge_thunk_srt(info);
796 evacuate(&((StgThunk *)p)->payload[0]);
801 evacuate(&((StgClosure *)p)->payload[0]);
806 scavenge_fun_srt(info);
811 scavenge_thunk_srt(info);
819 scavenge_fun_srt(info);
826 scavenge_thunk_srt(info);
827 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
828 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
829 evacuate((StgClosure **)p);
841 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
842 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
843 evacuate((StgClosure **)p);
849 StgBCO *bco = (StgBCO *)p;
850 evacuate((StgClosure **)&bco->instrs);
851 evacuate((StgClosure **)&bco->literals);
852 evacuate((StgClosure **)&bco->ptrs);
857 // don't need to do anything here: the only possible case
858 // is that we're in a 1-space compacting collector, with
859 // no "old" generation.
863 case IND_OLDGEN_PERM:
864 evacuate(&((StgInd *)p)->indirectee);
868 case MUT_VAR_DIRTY: {
869 rtsBool saved_eager_promotion = gct->eager_promotion;
871 gct->eager_promotion = rtsFalse;
872 evacuate(&((StgMutVar *)p)->var);
873 gct->eager_promotion = saved_eager_promotion;
875 if (gct->failed_to_evac) {
876 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
878 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
884 case SE_CAF_BLACKHOLE:
892 StgSelector *s = (StgSelector *)p;
893 evacuate(&s->selectee);
897 // A chunk of stack saved in a heap object
900 StgAP_STACK *ap = (StgAP_STACK *)p;
903 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
908 scavenge_PAP((StgPAP *)p);
912 scavenge_AP((StgAP *)p);
915 case MUT_ARR_PTRS_CLEAN:
916 case MUT_ARR_PTRS_DIRTY:
922 // We don't eagerly promote objects pointed to by a mutable
923 // array, but if we find the array only points to objects in
924 // the same or an older generation, we mark it "clean" and
925 // avoid traversing it during minor GCs.
926 saved_eager = gct->eager_promotion;
927 gct->eager_promotion = rtsFalse;
928 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
929 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
930 evacuate((StgClosure **)p);
932 gct->eager_promotion = saved_eager;
934 if (gct->failed_to_evac) {
935 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
937 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
940 gct->failed_to_evac = rtsTrue; // mutable anyhow.
944 case MUT_ARR_PTRS_FROZEN:
945 case MUT_ARR_PTRS_FROZEN0:
950 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
951 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
952 evacuate((StgClosure **)p);
955 // If we're going to put this object on the mutable list, then
956 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
957 if (gct->failed_to_evac) {
958 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
967 scavengeTSO((StgTSO*)p);
971 case TVAR_WATCH_QUEUE:
973 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
975 evacuate((StgClosure **)&wq->closure);
976 evacuate((StgClosure **)&wq->next_queue_entry);
977 evacuate((StgClosure **)&wq->prev_queue_entry);
978 gct->evac_step = saved_evac_step;
979 gct->failed_to_evac = rtsTrue; // mutable
985 StgTVar *tvar = ((StgTVar *) p);
987 evacuate((StgClosure **)&tvar->current_value);
988 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
989 gct->evac_step = saved_evac_step;
990 gct->failed_to_evac = rtsTrue; // mutable
997 StgTRecChunk *tc = ((StgTRecChunk *) p);
998 TRecEntry *e = &(tc -> entries[0]);
1000 evacuate((StgClosure **)&tc->prev_chunk);
1001 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1002 evacuate((StgClosure **)&e->tvar);
1003 evacuate((StgClosure **)&e->expected_value);
1004 evacuate((StgClosure **)&e->new_value);
1006 gct->evac_step = saved_evac_step;
1007 gct->failed_to_evac = rtsTrue; // mutable
1013 StgTRecHeader *trec = ((StgTRecHeader *) p);
1015 evacuate((StgClosure **)&trec->enclosing_trec);
1016 evacuate((StgClosure **)&trec->current_chunk);
1017 evacuate((StgClosure **)&trec->invariants_to_check);
1018 gct->evac_step = saved_evac_step;
1019 gct->failed_to_evac = rtsTrue; // mutable
1023 case ATOMIC_INVARIANT:
1025 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1027 evacuate(&invariant->code);
1028 evacuate((StgClosure **)&invariant->last_execution);
1029 gct->evac_step = saved_evac_step;
1030 gct->failed_to_evac = rtsTrue; // mutable
1034 case INVARIANT_CHECK_QUEUE:
1036 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1038 evacuate((StgClosure **)&queue->invariant);
1039 evacuate((StgClosure **)&queue->my_execution);
1040 evacuate((StgClosure **)&queue->next_queue_entry);
1041 gct->evac_step = saved_evac_step;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_step) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
1058 // mark the next bit to indicate "scavenged"
1059 mark(q+1, Bdescr(q));
1061 } // while (!mark_stack_empty())
1063 // start a new linear scan if the mark stack overflowed at some point
1064 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1065 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1066 mark_stack_overflowed = rtsFalse;
1067 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1068 oldgen_scan = oldgen_scan_bd->start;
1071 if (oldgen_scan_bd) {
1072 // push a new thing on the mark stack
1074 // find a closure that is marked but not scavenged, and start
1076 while (oldgen_scan < oldgen_scan_bd->free
1077 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1081 if (oldgen_scan < oldgen_scan_bd->free) {
1083 // already scavenged?
1084 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1085 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1088 push_mark_stack(oldgen_scan);
1089 // ToDo: bump the linear scan by the actual size of the object
1090 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1094 oldgen_scan_bd = oldgen_scan_bd->link;
1095 if (oldgen_scan_bd != NULL) {
1096 oldgen_scan = oldgen_scan_bd->start;
1102 /* -----------------------------------------------------------------------------
1103 Scavenge one object.
1105 This is used for objects that are temporarily marked as mutable
1106 because they contain old-to-new generation pointers. Only certain
1107 objects can have this property.
1108 -------------------------------------------------------------------------- */
1111 scavenge_one(StgPtr p)
1113 const StgInfoTable *info;
1114 step *saved_evac_step = gct->evac_step;
1117 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1118 info = get_itbl((StgClosure *)p);
1120 switch (info->type) {
1125 rtsBool saved_eager_promotion = gct->eager_promotion;
1127 StgMVar *mvar = ((StgMVar *)p);
1128 gct->eager_promotion = rtsFalse;
1129 evacuate((StgClosure **)&mvar->head);
1130 evacuate((StgClosure **)&mvar->tail);
1131 evacuate((StgClosure **)&mvar->value);
1132 gct->eager_promotion = saved_eager_promotion;
1134 if (gct->failed_to_evac) {
1135 mvar->header.info = &stg_MVAR_DIRTY_info;
1137 mvar->header.info = &stg_MVAR_CLEAN_info;
1151 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1152 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1153 evacuate((StgClosure **)q);
1159 case FUN_1_0: // hardly worth specialising these guys
1175 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1176 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1177 evacuate((StgClosure **)q);
1183 case MUT_VAR_DIRTY: {
1185 rtsBool saved_eager_promotion = gct->eager_promotion;
1187 gct->eager_promotion = rtsFalse;
1188 evacuate(&((StgMutVar *)p)->var);
1189 gct->eager_promotion = saved_eager_promotion;
1191 if (gct->failed_to_evac) {
1192 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1194 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1200 case SE_CAF_BLACKHOLE:
1205 case THUNK_SELECTOR:
1207 StgSelector *s = (StgSelector *)p;
1208 evacuate(&s->selectee);
1214 StgAP_STACK *ap = (StgAP_STACK *)p;
1217 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1218 p = (StgPtr)ap->payload + ap->size;
1223 p = scavenge_PAP((StgPAP *)p);
1227 p = scavenge_AP((StgAP *)p);
1231 // nothing to follow
1234 case MUT_ARR_PTRS_CLEAN:
1235 case MUT_ARR_PTRS_DIRTY:
1238 rtsBool saved_eager;
1240 // We don't eagerly promote objects pointed to by a mutable
1241 // array, but if we find the array only points to objects in
1242 // the same or an older generation, we mark it "clean" and
1243 // avoid traversing it during minor GCs.
1244 saved_eager = gct->eager_promotion;
1245 gct->eager_promotion = rtsFalse;
1247 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1248 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1249 evacuate((StgClosure **)p);
1251 gct->eager_promotion = saved_eager;
1253 if (gct->failed_to_evac) {
1254 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1256 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1259 gct->failed_to_evac = rtsTrue;
1263 case MUT_ARR_PTRS_FROZEN:
1264 case MUT_ARR_PTRS_FROZEN0:
1266 // follow everything
1269 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1270 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1271 evacuate((StgClosure **)p);
1274 // If we're going to put this object on the mutable list, then
1275 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1276 if (gct->failed_to_evac) {
1277 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1279 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1286 scavengeTSO((StgTSO*)p);
1290 case TVAR_WATCH_QUEUE:
1292 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1294 evacuate((StgClosure **)&wq->closure);
1295 evacuate((StgClosure **)&wq->next_queue_entry);
1296 evacuate((StgClosure **)&wq->prev_queue_entry);
1297 gct->evac_step = saved_evac_step;
1298 gct->failed_to_evac = rtsTrue; // mutable
1304 StgTVar *tvar = ((StgTVar *) p);
1306 evacuate((StgClosure **)&tvar->current_value);
1307 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1308 gct->evac_step = saved_evac_step;
1309 gct->failed_to_evac = rtsTrue; // mutable
1315 StgTRecHeader *trec = ((StgTRecHeader *) p);
1317 evacuate((StgClosure **)&trec->enclosing_trec);
1318 evacuate((StgClosure **)&trec->current_chunk);
1319 evacuate((StgClosure **)&trec->invariants_to_check);
1320 gct->evac_step = saved_evac_step;
1321 gct->failed_to_evac = rtsTrue; // mutable
1328 StgTRecChunk *tc = ((StgTRecChunk *) p);
1329 TRecEntry *e = &(tc -> entries[0]);
1331 evacuate((StgClosure **)&tc->prev_chunk);
1332 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1333 evacuate((StgClosure **)&e->tvar);
1334 evacuate((StgClosure **)&e->expected_value);
1335 evacuate((StgClosure **)&e->new_value);
1337 gct->evac_step = saved_evac_step;
1338 gct->failed_to_evac = rtsTrue; // mutable
1342 case ATOMIC_INVARIANT:
1344 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1346 evacuate(&invariant->code);
1347 evacuate((StgClosure **)&invariant->last_execution);
1348 gct->evac_step = saved_evac_step;
1349 gct->failed_to_evac = rtsTrue; // mutable
1353 case INVARIANT_CHECK_QUEUE:
1355 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1357 evacuate((StgClosure **)&queue->invariant);
1358 evacuate((StgClosure **)&queue->my_execution);
1359 evacuate((StgClosure **)&queue->next_queue_entry);
1360 gct->evac_step = saved_evac_step;
1361 gct->failed_to_evac = rtsTrue; // mutable
1366 case IND_OLDGEN_PERM:
1369 /* Careful here: a THUNK can be on the mutable list because
1370 * it contains pointers to young gen objects. If such a thunk
1371 * is updated, the IND_OLDGEN will be added to the mutable
1372 * list again, and we'll scavenge it twice. evacuate()
1373 * doesn't check whether the object has already been
1374 * evacuated, so we perform that check here.
1376 StgClosure *q = ((StgInd *)p)->indirectee;
1377 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1380 evacuate(&((StgInd *)p)->indirectee);
1383 #if 0 && defined(DEBUG)
1384 if (RtsFlags.DebugFlags.gc)
1385 /* Debugging code to print out the size of the thing we just
1389 StgPtr start = gen->steps[0].scan;
1390 bdescr *start_bd = gen->steps[0].scan_bd;
1392 scavenge(&gen->steps[0]);
1393 if (start_bd != gen->steps[0].scan_bd) {
1394 size += (P_)BLOCK_ROUND_UP(start) - start;
1395 start_bd = start_bd->link;
1396 while (start_bd != gen->steps[0].scan_bd) {
1397 size += BLOCK_SIZE_W;
1398 start_bd = start_bd->link;
1400 size += gen->steps[0].scan -
1401 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1403 size = gen->steps[0].scan - start;
1405 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1411 barf("scavenge_one: strange object %d", (int)(info->type));
1414 no_luck = gct->failed_to_evac;
1415 gct->failed_to_evac = rtsFalse;
1419 /* -----------------------------------------------------------------------------
1420 Scavenging mutable lists.
1422 We treat the mutable list of each generation > N (i.e. all the
1423 generations older than the one being collected) as roots. We also
1424 remove non-mutable objects from the mutable list at this point.
1425 -------------------------------------------------------------------------- */
1428 scavenge_mutable_list(generation *gen)
1433 bd = gen->saved_mut_list;
1435 gct->evac_step = &gen->steps[0];
1436 for (; bd != NULL; bd = bd->link) {
1437 for (q = bd->start; q < bd->free; q++) {
1439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1442 switch (get_itbl((StgClosure *)p)->type) {
1444 barf("MUT_VAR_CLEAN on mutable list");
1446 mutlist_MUTVARS++; break;
1447 case MUT_ARR_PTRS_CLEAN:
1448 case MUT_ARR_PTRS_DIRTY:
1449 case MUT_ARR_PTRS_FROZEN:
1450 case MUT_ARR_PTRS_FROZEN0:
1451 mutlist_MUTARRS++; break;
1453 barf("MVAR_CLEAN on mutable list");
1455 mutlist_MVARS++; break;
1457 mutlist_OTHERS++; break;
1461 // Check whether this object is "clean", that is it
1462 // definitely doesn't point into a young generation.
1463 // Clean objects don't need to be scavenged. Some clean
1464 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1465 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1466 // TSO, are always on the mutable list.
1468 switch (get_itbl((StgClosure *)p)->type) {
1469 case MUT_ARR_PTRS_CLEAN:
1470 recordMutableGen_GC((StgClosure *)p,gen);
1473 StgTSO *tso = (StgTSO *)p;
1474 if ((tso->flags & TSO_DIRTY) == 0) {
1475 // Must be on the mutable list because its link
1477 ASSERT(tso->flags & TSO_LINK_DIRTY);
1479 scavenge_TSO_link(tso);
1480 if (gct->failed_to_evac) {
1481 recordMutableGen_GC((StgClosure *)p,gen);
1482 gct->failed_to_evac = rtsFalse;
1484 tso->flags &= ~TSO_LINK_DIRTY;
1493 if (scavenge_one(p)) {
1494 // didn't manage to promote everything, so put the
1495 // object back on the list.
1496 recordMutableGen_GC((StgClosure *)p,gen);
1501 // free the old mut_list
1502 freeChain_sync(gen->saved_mut_list);
1503 gen->saved_mut_list = NULL;
1506 /* -----------------------------------------------------------------------------
1507 Scavenging the static objects.
1509 We treat the mutable list of each generation > N (i.e. all the
1510 generations older than the one being collected) as roots. We also
1511 remove non-mutable objects from the mutable list at this point.
1512 -------------------------------------------------------------------------- */
1515 scavenge_static(void)
1518 const StgInfoTable *info;
1520 debugTrace(DEBUG_gc, "scavenging static objects");
1522 /* Always evacuate straight to the oldest generation for static
1524 gct->evac_step = &oldest_gen->steps[0];
1526 /* keep going until we've scavenged all the objects on the linked
1531 /* get the next static object from the list. Remember, there might
1532 * be more stuff on this list after each evacuation...
1533 * (static_objects is a global)
1535 p = gct->static_objects;
1536 if (p == END_OF_STATIC_LIST) {
1540 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1543 if (info->type==RBH)
1544 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1546 // make sure the info pointer is into text space
1548 /* Take this object *off* the static_objects list,
1549 * and put it on the scavenged_static_objects list.
1551 gct->static_objects = *STATIC_LINK(info,p);
1552 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1553 gct->scavenged_static_objects = p;
1555 switch (info -> type) {
1559 StgInd *ind = (StgInd *)p;
1560 evacuate(&ind->indirectee);
1562 /* might fail to evacuate it, in which case we have to pop it
1563 * back on the mutable list of the oldest generation. We
1564 * leave it *on* the scavenged_static_objects list, though,
1565 * in case we visit this object again.
1567 if (gct->failed_to_evac) {
1568 gct->failed_to_evac = rtsFalse;
1569 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1575 scavenge_thunk_srt(info);
1579 scavenge_fun_srt(info);
1586 next = (P_)p->payload + info->layout.payload.ptrs;
1587 // evacuate the pointers
1588 for (q = (P_)p->payload; q < next; q++) {
1589 evacuate((StgClosure **)q);
1595 barf("scavenge_static: strange closure %d", (int)(info->type));
1598 ASSERT(gct->failed_to_evac == rtsFalse);
1602 /* -----------------------------------------------------------------------------
1603 scavenge a chunk of memory described by a bitmap
1604 -------------------------------------------------------------------------- */
1607 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1613 bitmap = large_bitmap->bitmap[b];
1614 for (i = 0; i < size; ) {
1615 if ((bitmap & 1) == 0) {
1616 evacuate((StgClosure **)p);
1620 if (i % BITS_IN(W_) == 0) {
1622 bitmap = large_bitmap->bitmap[b];
1624 bitmap = bitmap >> 1;
1629 STATIC_INLINE StgPtr
1630 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1633 if ((bitmap & 1) == 0) {
1634 evacuate((StgClosure **)p);
1637 bitmap = bitmap >> 1;
1643 /* -----------------------------------------------------------------------------
1644 scavenge_stack walks over a section of stack and evacuates all the
1645 objects pointed to by it. We can use the same code for walking
1646 AP_STACK_UPDs, since these are just sections of copied stack.
1647 -------------------------------------------------------------------------- */
1650 scavenge_stack(StgPtr p, StgPtr stack_end)
1652 const StgRetInfoTable* info;
1657 * Each time around this loop, we are looking at a chunk of stack
1658 * that starts with an activation record.
1661 while (p < stack_end) {
1662 info = get_ret_itbl((StgClosure *)p);
1664 switch (info->i.type) {
1667 // In SMP, we can get update frames that point to indirections
1668 // when two threads evaluate the same thunk. We do attempt to
1669 // discover this situation in threadPaused(), but it's
1670 // possible that the following sequence occurs:
1679 // Now T is an indirection, and the update frame is already
1680 // marked on A's stack, so we won't traverse it again in
1681 // threadPaused(). We could traverse the whole stack again
1682 // before GC, but that seems like overkill.
1684 // Scavenging this update frame as normal would be disastrous;
1685 // the updatee would end up pointing to the value. So we turn
1686 // the indirection into an IND_PERM, so that evacuate will
1687 // copy the indirection into the old generation instead of
1691 const StgInfoTable *i;
1693 i = ((StgUpdateFrame *)p)->updatee->header.info;
1694 if (!IS_FORWARDING_PTR(i)) {
1695 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1697 ((StgUpdateFrame *)p)->updatee->header.info =
1698 (StgInfoTable *)&stg_IND_PERM_info;
1699 } else if (type == IND_OLDGEN) {
1700 ((StgUpdateFrame *)p)->updatee->header.info =
1701 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1703 evacuate(&((StgUpdateFrame *)p)->updatee);
1704 p += sizeofW(StgUpdateFrame);
1709 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1710 case CATCH_STM_FRAME:
1711 case CATCH_RETRY_FRAME:
1712 case ATOMICALLY_FRAME:
1716 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1717 size = BITMAP_SIZE(info->i.layout.bitmap);
1718 // NOTE: the payload starts immediately after the info-ptr, we
1719 // don't have an StgHeader in the same sense as a heap closure.
1721 p = scavenge_small_bitmap(p, size, bitmap);
1725 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1733 evacuate((StgClosure **)p);
1736 size = BCO_BITMAP_SIZE(bco);
1737 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1742 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1747 size = GET_LARGE_BITMAP(&info->i)->size;
1749 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1751 // and don't forget to follow the SRT
1755 // Dynamic bitmap: the mask is stored on the stack, and
1756 // there are a number of non-pointers followed by a number
1757 // of pointers above the bitmapped area. (see StgMacros.h,
1762 dyn = ((StgRetDyn *)p)->liveness;
1764 // traverse the bitmap first
1765 bitmap = RET_DYN_LIVENESS(dyn);
1766 p = (P_)&((StgRetDyn *)p)->payload[0];
1767 size = RET_DYN_BITMAP_SIZE;
1768 p = scavenge_small_bitmap(p, size, bitmap);
1770 // skip over the non-ptr words
1771 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1773 // follow the ptr words
1774 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1775 evacuate((StgClosure **)p);
1783 StgRetFun *ret_fun = (StgRetFun *)p;
1784 StgFunInfoTable *fun_info;
1786 evacuate(&ret_fun->fun);
1787 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1788 p = scavenge_arg_block(fun_info, ret_fun->payload);
1793 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1798 /*-----------------------------------------------------------------------------
1799 scavenge the large object list.
1801 evac_step set by caller; similar games played with evac_step as with
1802 scavenge() - see comment at the top of scavenge(). Most large
1803 objects are (repeatedly) mutable, so most of the time evac_step will
1805 --------------------------------------------------------------------------- */
1808 scavenge_large (step_workspace *ws)
1813 gct->evac_step = ws->step;
1815 bd = ws->todo_large_objects;
1817 for (; bd != NULL; bd = ws->todo_large_objects) {
1819 // take this object *off* the large objects list and put it on
1820 // the scavenged large objects list. This is so that we can
1821 // treat new_large_objects as a stack and push new objects on
1822 // the front when evacuating.
1823 ws->todo_large_objects = bd->link;
1825 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1826 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1827 ws->step->n_scavenged_large_blocks += bd->blocks;
1828 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1831 if (scavenge_one(p)) {
1832 if (ws->step->gen_no > 0) {
1833 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1838 gct->scanned += closure_sizeW((StgClosure*)p);
1842 /* ----------------------------------------------------------------------------
1843 Look for work to do.
1845 We look for the oldest step that has either a todo block that can
1846 be scanned, or a block of work on the global queue that we can
1849 It is important to take work from the *oldest* generation that we
1850 has work available, because that minimizes the likelihood of
1851 evacuating objects into a young generation when they should have
1852 been eagerly promoted. This really does make a difference (the
1853 cacheprof benchmark is one that is affected).
1855 We also want to scan the todo block if possible before grabbing
1856 work from the global queue, the reason being that we don't want to
1857 steal work from the global queue and starve other threads if there
1858 is other work we can usefully be doing.
1859 ------------------------------------------------------------------------- */
1862 scavenge_find_work (void)
1866 rtsBool did_something, did_anything;
1869 gct->scav_find_work++;
1871 did_anything = rtsFalse;
1874 did_something = rtsFalse;
1875 for (s = total_steps-1; s >= 0; s--) {
1876 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1879 ws = &gct->steps[s];
1881 gct->scan_bd = NULL;
1883 // If we have a scan block with some work to do,
1884 // scavenge everything up to the free pointer.
1885 if (ws->todo_bd->u.scan < ws->todo_free)
1887 scavenge_block(ws->todo_bd);
1888 did_something = rtsTrue;
1892 // If we have any large objects to scavenge, do them now.
1893 if (ws->todo_large_objects) {
1895 did_something = rtsTrue;
1899 if ((bd = grab_todo_block(ws)) != NULL) {
1901 did_something = rtsTrue;
1906 if (did_something) {
1907 did_anything = rtsTrue;
1910 // only return when there is no more work to do
1912 return did_anything;
1915 /* ----------------------------------------------------------------------------
1916 Scavenge until we can't find anything more to scavenge.
1917 ------------------------------------------------------------------------- */
1925 work_to_do = rtsFalse;
1927 // scavenge static objects
1928 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1929 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1933 // scavenge objects in compacted generation
1934 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1935 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1936 scavenge_mark_stack();
1937 work_to_do = rtsTrue;
1940 // Order is important here: we want to deal in full blocks as
1941 // much as possible, so go for global work in preference to
1942 // local work. Only if all the global work has been exhausted
1943 // do we start scavenging the fragments of blocks in the local
1945 if (scavenge_find_work()) goto loop;
1947 if (work_to_do) goto loop;