1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 if (tso->what_next == ThreadRelocated) {
55 // the only way this can happen is if the old TSO was on the
56 // mutable list. We might have other links to this defunct
57 // TSO, so we must update its link field.
58 evacuate((StgClosure**)&tso->_link);
62 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
64 // update the pointer from the Task.
65 if (tso->bound != NULL) {
66 tso->bound->tso = tso;
69 saved_eager = gct->eager_promotion;
70 gct->eager_promotion = rtsFalse;
72 if ( tso->why_blocked == BlockedOnMVar
73 || tso->why_blocked == BlockedOnBlackHole
74 || tso->why_blocked == BlockedOnMsgWakeup
75 || tso->why_blocked == BlockedOnMsgThrowTo
77 evacuate(&tso->block_info.closure);
80 // in the THREADED_RTS, block_info.closure must always point to a
81 // valid closure, because we assume this in throwTo(). In the
82 // non-threaded RTS it might be a FD (for
83 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
85 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
89 evacuate((StgClosure **)&tso->blocked_exceptions);
90 evacuate((StgClosure **)&tso->bq);
92 // scavange current transaction record
93 evacuate((StgClosure **)&tso->trec);
95 // scavenge this thread's stack
96 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
98 if (gct->failed_to_evac) {
100 evacuate((StgClosure **)&tso->_link);
103 evacuate((StgClosure **)&tso->_link);
104 if (gct->failed_to_evac) {
105 tso->flags |= TSO_LINK_DIRTY;
107 tso->flags &= ~TSO_LINK_DIRTY;
111 gct->eager_promotion = saved_eager;
114 /* -----------------------------------------------------------------------------
115 Mutable arrays of pointers
116 -------------------------------------------------------------------------- */
118 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
124 any_failed = rtsFalse;
125 p = (StgPtr)&a->payload[0];
126 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
128 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
130 evacuate((StgClosure**)p);
132 if (gct->failed_to_evac) {
133 any_failed = rtsTrue;
134 *mutArrPtrsCard(a,m) = 1;
135 gct->failed_to_evac = rtsFalse;
137 *mutArrPtrsCard(a,m) = 0;
141 q = (StgPtr)&a->payload[a->ptrs];
144 evacuate((StgClosure**)p);
146 if (gct->failed_to_evac) {
147 any_failed = rtsTrue;
148 *mutArrPtrsCard(a,m) = 1;
149 gct->failed_to_evac = rtsFalse;
151 *mutArrPtrsCard(a,m) = 0;
155 gct->failed_to_evac = any_failed;
156 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
159 // scavenge only the marked areas of a MUT_ARR_PTRS
160 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
166 any_failed = rtsFalse;
167 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
169 if (*mutArrPtrsCard(a,m) != 0) {
170 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
171 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
172 (StgPtr)&a->payload[a->ptrs]);
174 evacuate((StgClosure**)p);
176 if (gct->failed_to_evac) {
177 any_failed = rtsTrue;
178 gct->failed_to_evac = rtsFalse;
180 *mutArrPtrsCard(a,m) = 0;
185 gct->failed_to_evac = any_failed;
186 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
189 /* -----------------------------------------------------------------------------
190 Blocks of function args occur on the stack (at the top) and
192 -------------------------------------------------------------------------- */
195 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
202 switch (fun_info->f.fun_type) {
204 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
205 size = BITMAP_SIZE(fun_info->f.b.bitmap);
208 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
209 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
213 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
214 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
217 if ((bitmap & 1) == 0) {
218 evacuate((StgClosure **)p);
221 bitmap = bitmap >> 1;
229 STATIC_INLINE GNUC_ATTR_HOT StgPtr
230 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
234 StgFunInfoTable *fun_info;
236 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
237 ASSERT(fun_info->i.type != PAP);
240 switch (fun_info->f.fun_type) {
242 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
245 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
249 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
253 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
256 if ((bitmap & 1) == 0) {
257 evacuate((StgClosure **)p);
260 bitmap = bitmap >> 1;
268 STATIC_INLINE GNUC_ATTR_HOT StgPtr
269 scavenge_PAP (StgPAP *pap)
272 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
276 scavenge_AP (StgAP *ap)
279 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
282 /* -----------------------------------------------------------------------------
284 -------------------------------------------------------------------------- */
286 /* Similar to scavenge_large_bitmap(), but we don't write back the
287 * pointers we get back from evacuate().
290 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
297 bitmap = large_srt->l.bitmap[b];
298 size = (nat)large_srt->l.size;
299 p = (StgClosure **)large_srt->srt;
300 for (i = 0; i < size; ) {
301 if ((bitmap & 1) != 0) {
306 if (i % BITS_IN(W_) == 0) {
308 bitmap = large_srt->l.bitmap[b];
310 bitmap = bitmap >> 1;
315 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
316 * srt field in the info table. That's ok, because we'll
317 * never dereference it.
319 STATIC_INLINE GNUC_ATTR_HOT void
320 scavenge_srt (StgClosure **srt, nat srt_bitmap)
328 if (bitmap == (StgHalfWord)(-1)) {
329 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
333 while (bitmap != 0) {
334 if ((bitmap & 1) != 0) {
335 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
336 // Special-case to handle references to closures hiding out in DLLs, since
337 // double indirections required to get at those. The code generator knows
338 // which is which when generating the SRT, so it stores the (indirect)
339 // reference to the DLL closure in the table by first adding one to it.
340 // We check for this here, and undo the addition before evacuating it.
342 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
343 // closure that's fixed at link-time, and no extra magic is required.
344 if ( (unsigned long)(*srt) & 0x1 ) {
345 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
354 bitmap = bitmap >> 1;
359 STATIC_INLINE GNUC_ATTR_HOT void
360 scavenge_thunk_srt(const StgInfoTable *info)
362 StgThunkInfoTable *thunk_info;
364 if (!major_gc) return;
366 thunk_info = itbl_to_thunk_itbl(info);
367 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
370 STATIC_INLINE GNUC_ATTR_HOT void
371 scavenge_fun_srt(const StgInfoTable *info)
373 StgFunInfoTable *fun_info;
375 if (!major_gc) return;
377 fun_info = itbl_to_fun_itbl(info);
378 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
381 /* -----------------------------------------------------------------------------
382 Scavenge a block from the given scan pointer up to bd->free.
384 evac_gen is set by the caller to be either zero (for a step in a
385 generation < N) or G where G is the generation of the step being
388 We sometimes temporarily change evac_gen back to zero if we're
389 scavenging a mutable object where eager promotion isn't such a good
391 -------------------------------------------------------------------------- */
393 static GNUC_ATTR_HOT void
394 scavenge_block (bdescr *bd)
398 rtsBool saved_eager_promotion;
401 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
402 bd->start, bd->gen_no, bd->u.scan);
405 gct->evac_gen = bd->gen;
406 saved_eager_promotion = gct->eager_promotion;
407 gct->failed_to_evac = rtsFalse;
409 ws = &gct->gens[bd->gen->no];
413 // we might be evacuating into the very object that we're
414 // scavenging, so we have to check the real bd->free pointer each
415 // time around the loop.
416 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
418 ASSERT(bd->link == NULL);
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
420 info = get_itbl((StgClosure *)p);
422 ASSERT(gct->thunk_selector_depth == 0);
425 switch (info->type) {
430 StgMVar *mvar = ((StgMVar *)p);
431 gct->eager_promotion = rtsFalse;
432 evacuate((StgClosure **)&mvar->head);
433 evacuate((StgClosure **)&mvar->tail);
434 evacuate((StgClosure **)&mvar->value);
435 gct->eager_promotion = saved_eager_promotion;
437 if (gct->failed_to_evac) {
438 mvar->header.info = &stg_MVAR_DIRTY_info;
440 mvar->header.info = &stg_MVAR_CLEAN_info;
442 p += sizeofW(StgMVar);
447 scavenge_fun_srt(info);
448 evacuate(&((StgClosure *)p)->payload[1]);
449 evacuate(&((StgClosure *)p)->payload[0]);
450 p += sizeofW(StgHeader) + 2;
454 scavenge_thunk_srt(info);
455 evacuate(&((StgThunk *)p)->payload[1]);
456 evacuate(&((StgThunk *)p)->payload[0]);
457 p += sizeofW(StgThunk) + 2;
461 evacuate(&((StgClosure *)p)->payload[1]);
462 evacuate(&((StgClosure *)p)->payload[0]);
463 p += sizeofW(StgHeader) + 2;
467 scavenge_thunk_srt(info);
468 evacuate(&((StgThunk *)p)->payload[0]);
469 p += sizeofW(StgThunk) + 1;
473 scavenge_fun_srt(info);
475 evacuate(&((StgClosure *)p)->payload[0]);
476 p += sizeofW(StgHeader) + 1;
480 scavenge_thunk_srt(info);
481 p += sizeofW(StgThunk) + 1;
485 scavenge_fun_srt(info);
487 p += sizeofW(StgHeader) + 1;
491 scavenge_thunk_srt(info);
492 p += sizeofW(StgThunk) + 2;
496 scavenge_fun_srt(info);
498 p += sizeofW(StgHeader) + 2;
502 scavenge_thunk_srt(info);
503 evacuate(&((StgThunk *)p)->payload[0]);
504 p += sizeofW(StgThunk) + 2;
508 scavenge_fun_srt(info);
510 evacuate(&((StgClosure *)p)->payload[0]);
511 p += sizeofW(StgHeader) + 2;
515 scavenge_fun_srt(info);
522 scavenge_thunk_srt(info);
523 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
524 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
525 evacuate((StgClosure **)p);
527 p += info->layout.payload.nptrs;
538 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
539 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
540 evacuate((StgClosure **)p);
542 p += info->layout.payload.nptrs;
547 StgBCO *bco = (StgBCO *)p;
548 evacuate((StgClosure **)&bco->instrs);
549 evacuate((StgClosure **)&bco->literals);
550 evacuate((StgClosure **)&bco->ptrs);
556 if (bd->gen_no != 0) {
559 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
560 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
561 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
564 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
566 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
568 // We pretend that p has just been created.
569 LDV_RECORD_CREATE((StgClosure *)p);
572 case IND_OLDGEN_PERM:
574 evacuate(&((StgInd *)p)->indirectee);
575 p += sizeofW(StgInd);
580 gct->eager_promotion = rtsFalse;
581 evacuate(&((StgMutVar *)p)->var);
582 gct->eager_promotion = saved_eager_promotion;
584 if (gct->failed_to_evac) {
585 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
587 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
589 p += sizeofW(StgMutVar);
594 StgBlockingQueue *bq = (StgBlockingQueue *)p;
596 gct->eager_promotion = rtsFalse;
598 evacuate((StgClosure**)&bq->owner);
599 evacuate((StgClosure**)&bq->queue);
600 evacuate((StgClosure**)&bq->link);
601 gct->eager_promotion = saved_eager_promotion;
603 if (gct->failed_to_evac) {
604 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
606 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
608 p += sizeofW(StgBlockingQueue);
614 StgSelector *s = (StgSelector *)p;
615 evacuate(&s->selectee);
616 p += THUNK_SELECTOR_sizeW();
620 // A chunk of stack saved in a heap object
623 StgAP_STACK *ap = (StgAP_STACK *)p;
626 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
627 p = (StgPtr)ap->payload + ap->size;
632 p = scavenge_PAP((StgPAP *)p);
636 p = scavenge_AP((StgAP *)p);
641 p += arr_words_sizeW((StgArrWords *)p);
644 case MUT_ARR_PTRS_CLEAN:
645 case MUT_ARR_PTRS_DIRTY:
647 // We don't eagerly promote objects pointed to by a mutable
648 // array, but if we find the array only points to objects in
649 // the same or an older generation, we mark it "clean" and
650 // avoid traversing it during minor GCs.
651 gct->eager_promotion = rtsFalse;
653 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
655 if (gct->failed_to_evac) {
656 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
658 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
661 gct->eager_promotion = saved_eager_promotion;
662 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
666 case MUT_ARR_PTRS_FROZEN:
667 case MUT_ARR_PTRS_FROZEN0:
670 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
672 // If we're going to put this object on the mutable list, then
673 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
674 if (gct->failed_to_evac) {
675 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
677 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
684 StgTSO *tso = (StgTSO *)p;
694 gct->eager_promotion = rtsFalse;
696 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
697 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
698 evacuate((StgClosure **)p);
700 p += info->layout.payload.nptrs;
702 gct->eager_promotion = saved_eager_promotion;
703 gct->failed_to_evac = rtsTrue; // mutable
710 StgTRecChunk *tc = ((StgTRecChunk *) p);
711 TRecEntry *e = &(tc -> entries[0]);
712 gct->eager_promotion = rtsFalse;
713 evacuate((StgClosure **)&tc->prev_chunk);
714 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
715 evacuate((StgClosure **)&e->tvar);
716 evacuate((StgClosure **)&e->expected_value);
717 evacuate((StgClosure **)&e->new_value);
719 gct->eager_promotion = saved_eager_promotion;
720 gct->failed_to_evac = rtsTrue; // mutable
721 p += sizeofW(StgTRecChunk);
726 barf("scavenge: unimplemented/strange closure type %d @ %p",
731 * We need to record the current object on the mutable list if
732 * (a) It is actually mutable, or
733 * (b) It contains pointers to a younger generation.
734 * Case (b) arises if we didn't manage to promote everything that
735 * the current object points to into the current generation.
737 if (gct->failed_to_evac) {
738 gct->failed_to_evac = rtsFalse;
739 if (bd->gen_no > 0) {
740 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
746 gct->copied += ws->todo_free - bd->free;
750 debugTrace(DEBUG_gc, " scavenged %ld bytes",
751 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
753 // update stats: this is a block that has been scavenged
754 gct->scanned += bd->free - bd->u.scan;
755 bd->u.scan = bd->free;
757 if (bd != ws->todo_bd) {
758 // we're not going to evac any more objects into
759 // this block, so push it now.
760 push_scanned_block(bd, ws);
765 /* -----------------------------------------------------------------------------
766 Scavenge everything on the mark stack.
768 This is slightly different from scavenge():
769 - we don't walk linearly through the objects, so the scavenger
770 doesn't need to advance the pointer on to the next object.
771 -------------------------------------------------------------------------- */
774 scavenge_mark_stack(void)
778 rtsBool saved_eager_promotion;
780 gct->evac_gen = oldest_gen;
781 saved_eager_promotion = gct->eager_promotion;
783 while ((p = pop_mark_stack())) {
785 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
786 info = get_itbl((StgClosure *)p);
789 switch (info->type) {
794 StgMVar *mvar = ((StgMVar *)p);
795 gct->eager_promotion = rtsFalse;
796 evacuate((StgClosure **)&mvar->head);
797 evacuate((StgClosure **)&mvar->tail);
798 evacuate((StgClosure **)&mvar->value);
799 gct->eager_promotion = saved_eager_promotion;
801 if (gct->failed_to_evac) {
802 mvar->header.info = &stg_MVAR_DIRTY_info;
804 mvar->header.info = &stg_MVAR_CLEAN_info;
810 scavenge_fun_srt(info);
811 evacuate(&((StgClosure *)p)->payload[1]);
812 evacuate(&((StgClosure *)p)->payload[0]);
816 scavenge_thunk_srt(info);
817 evacuate(&((StgThunk *)p)->payload[1]);
818 evacuate(&((StgThunk *)p)->payload[0]);
822 evacuate(&((StgClosure *)p)->payload[1]);
823 evacuate(&((StgClosure *)p)->payload[0]);
828 scavenge_fun_srt(info);
829 evacuate(&((StgClosure *)p)->payload[0]);
834 scavenge_thunk_srt(info);
835 evacuate(&((StgThunk *)p)->payload[0]);
840 evacuate(&((StgClosure *)p)->payload[0]);
845 scavenge_fun_srt(info);
850 scavenge_thunk_srt(info);
858 scavenge_fun_srt(info);
865 scavenge_thunk_srt(info);
866 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
867 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
868 evacuate((StgClosure **)p);
880 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
881 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
882 evacuate((StgClosure **)p);
888 StgBCO *bco = (StgBCO *)p;
889 evacuate((StgClosure **)&bco->instrs);
890 evacuate((StgClosure **)&bco->literals);
891 evacuate((StgClosure **)&bco->ptrs);
896 // don't need to do anything here: the only possible case
897 // is that we're in a 1-space compacting collector, with
898 // no "old" generation.
902 case IND_OLDGEN_PERM:
904 evacuate(&((StgInd *)p)->indirectee);
908 case MUT_VAR_DIRTY: {
909 gct->eager_promotion = rtsFalse;
910 evacuate(&((StgMutVar *)p)->var);
911 gct->eager_promotion = saved_eager_promotion;
913 if (gct->failed_to_evac) {
914 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
916 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
923 StgBlockingQueue *bq = (StgBlockingQueue *)p;
925 gct->eager_promotion = rtsFalse;
927 evacuate((StgClosure**)&bq->owner);
928 evacuate((StgClosure**)&bq->queue);
929 evacuate((StgClosure**)&bq->link);
930 gct->eager_promotion = saved_eager_promotion;
932 if (gct->failed_to_evac) {
933 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
935 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
945 StgSelector *s = (StgSelector *)p;
946 evacuate(&s->selectee);
950 // A chunk of stack saved in a heap object
953 StgAP_STACK *ap = (StgAP_STACK *)p;
956 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
961 scavenge_PAP((StgPAP *)p);
965 scavenge_AP((StgAP *)p);
968 case MUT_ARR_PTRS_CLEAN:
969 case MUT_ARR_PTRS_DIRTY:
972 // We don't eagerly promote objects pointed to by a mutable
973 // array, but if we find the array only points to objects in
974 // the same or an older generation, we mark it "clean" and
975 // avoid traversing it during minor GCs.
976 gct->eager_promotion = rtsFalse;
978 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
980 if (gct->failed_to_evac) {
981 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
983 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
986 gct->eager_promotion = saved_eager_promotion;
987 gct->failed_to_evac = rtsTrue; // mutable anyhow.
991 case MUT_ARR_PTRS_FROZEN:
992 case MUT_ARR_PTRS_FROZEN0:
997 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
999 // If we're going to put this object on the mutable list, then
1000 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1001 if (gct->failed_to_evac) {
1002 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1004 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1011 scavengeTSO((StgTSO*)p);
1019 gct->eager_promotion = rtsFalse;
1021 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1022 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1023 evacuate((StgClosure **)p);
1026 gct->eager_promotion = saved_eager_promotion;
1027 gct->failed_to_evac = rtsTrue; // mutable
1034 StgTRecChunk *tc = ((StgTRecChunk *) p);
1035 TRecEntry *e = &(tc -> entries[0]);
1036 gct->eager_promotion = rtsFalse;
1037 evacuate((StgClosure **)&tc->prev_chunk);
1038 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1039 evacuate((StgClosure **)&e->tvar);
1040 evacuate((StgClosure **)&e->expected_value);
1041 evacuate((StgClosure **)&e->new_value);
1043 gct->eager_promotion = saved_eager_promotion;
1044 gct->failed_to_evac = rtsTrue; // mutable
1049 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1053 if (gct->failed_to_evac) {
1054 gct->failed_to_evac = rtsFalse;
1055 if (gct->evac_gen) {
1056 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1059 } // while (p = pop_mark_stack())
1062 /* -----------------------------------------------------------------------------
1063 Scavenge one object.
1065 This is used for objects that are temporarily marked as mutable
1066 because they contain old-to-new generation pointers. Only certain
1067 objects can have this property.
1068 -------------------------------------------------------------------------- */
1071 scavenge_one(StgPtr p)
1073 const StgInfoTable *info;
1075 rtsBool saved_eager_promotion;
1077 saved_eager_promotion = gct->eager_promotion;
1079 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1080 info = get_itbl((StgClosure *)p);
1082 switch (info->type) {
1087 StgMVar *mvar = ((StgMVar *)p);
1088 gct->eager_promotion = rtsFalse;
1089 evacuate((StgClosure **)&mvar->head);
1090 evacuate((StgClosure **)&mvar->tail);
1091 evacuate((StgClosure **)&mvar->value);
1092 gct->eager_promotion = saved_eager_promotion;
1094 if (gct->failed_to_evac) {
1095 mvar->header.info = &stg_MVAR_DIRTY_info;
1097 mvar->header.info = &stg_MVAR_CLEAN_info;
1111 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1112 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1113 evacuate((StgClosure **)q);
1119 case FUN_1_0: // hardly worth specialising these guys
1136 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1137 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1138 evacuate((StgClosure **)q);
1144 case MUT_VAR_DIRTY: {
1147 gct->eager_promotion = rtsFalse;
1148 evacuate(&((StgMutVar *)p)->var);
1149 gct->eager_promotion = saved_eager_promotion;
1151 if (gct->failed_to_evac) {
1152 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1154 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1159 case BLOCKING_QUEUE:
1161 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1163 gct->eager_promotion = rtsFalse;
1165 evacuate((StgClosure**)&bq->owner);
1166 evacuate((StgClosure**)&bq->queue);
1167 evacuate((StgClosure**)&bq->link);
1168 gct->eager_promotion = saved_eager_promotion;
1170 if (gct->failed_to_evac) {
1171 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1173 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1178 case THUNK_SELECTOR:
1180 StgSelector *s = (StgSelector *)p;
1181 evacuate(&s->selectee);
1187 StgAP_STACK *ap = (StgAP_STACK *)p;
1190 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1191 p = (StgPtr)ap->payload + ap->size;
1196 p = scavenge_PAP((StgPAP *)p);
1200 p = scavenge_AP((StgAP *)p);
1204 // nothing to follow
1207 case MUT_ARR_PTRS_CLEAN:
1208 case MUT_ARR_PTRS_DIRTY:
1210 // We don't eagerly promote objects pointed to by a mutable
1211 // array, but if we find the array only points to objects in
1212 // the same or an older generation, we mark it "clean" and
1213 // avoid traversing it during minor GCs.
1214 gct->eager_promotion = rtsFalse;
1216 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1218 if (gct->failed_to_evac) {
1219 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1221 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1224 gct->eager_promotion = saved_eager_promotion;
1225 gct->failed_to_evac = rtsTrue;
1229 case MUT_ARR_PTRS_FROZEN:
1230 case MUT_ARR_PTRS_FROZEN0:
1232 // follow everything
1233 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1235 // If we're going to put this object on the mutable list, then
1236 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1237 if (gct->failed_to_evac) {
1238 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1240 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1247 scavengeTSO((StgTSO*)p);
1255 gct->eager_promotion = rtsFalse;
1257 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1258 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1259 evacuate((StgClosure **)p);
1262 gct->eager_promotion = saved_eager_promotion;
1263 gct->failed_to_evac = rtsTrue; // mutable
1271 StgTRecChunk *tc = ((StgTRecChunk *) p);
1272 TRecEntry *e = &(tc -> entries[0]);
1273 gct->eager_promotion = rtsFalse;
1274 evacuate((StgClosure **)&tc->prev_chunk);
1275 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1276 evacuate((StgClosure **)&e->tvar);
1277 evacuate((StgClosure **)&e->expected_value);
1278 evacuate((StgClosure **)&e->new_value);
1280 gct->eager_promotion = saved_eager_promotion;
1281 gct->failed_to_evac = rtsTrue; // mutable
1286 // IND can happen, for example, when the interpreter allocates
1287 // a gigantic AP closure (more than one block), which ends up
1288 // on the large-object list and then gets updated. See #3424.
1290 case IND_OLDGEN_PERM:
1293 evacuate(&((StgInd *)p)->indirectee);
1295 #if 0 && defined(DEBUG)
1296 if (RtsFlags.DebugFlags.gc)
1297 /* Debugging code to print out the size of the thing we just
1301 StgPtr start = gen->scan;
1302 bdescr *start_bd = gen->scan_bd;
1305 if (start_bd != gen->scan_bd) {
1306 size += (P_)BLOCK_ROUND_UP(start) - start;
1307 start_bd = start_bd->link;
1308 while (start_bd != gen->scan_bd) {
1309 size += BLOCK_SIZE_W;
1310 start_bd = start_bd->link;
1313 (P_)BLOCK_ROUND_DOWN(gen->scan);
1315 size = gen->scan - start;
1317 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1323 barf("scavenge_one: strange object %d", (int)(info->type));
1326 no_luck = gct->failed_to_evac;
1327 gct->failed_to_evac = rtsFalse;
1331 /* -----------------------------------------------------------------------------
1332 Scavenging mutable lists.
1334 We treat the mutable list of each generation > N (i.e. all the
1335 generations older than the one being collected) as roots. We also
1336 remove non-mutable objects from the mutable list at this point.
1337 -------------------------------------------------------------------------- */
1340 scavenge_mutable_list(bdescr *bd, generation *gen)
1344 gct->evac_gen = gen;
1345 for (; bd != NULL; bd = bd->link) {
1346 for (q = bd->start; q < bd->free; q++) {
1348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1351 switch (get_itbl((StgClosure *)p)->type) {
1353 // can happen due to concurrent writeMutVars
1355 mutlist_MUTVARS++; break;
1356 case MUT_ARR_PTRS_CLEAN:
1357 case MUT_ARR_PTRS_DIRTY:
1358 case MUT_ARR_PTRS_FROZEN:
1359 case MUT_ARR_PTRS_FROZEN0:
1360 mutlist_MUTARRS++; break;
1362 barf("MVAR_CLEAN on mutable list");
1364 mutlist_MVARS++; break;
1366 mutlist_OTHERS++; break;
1370 // Check whether this object is "clean", that is it
1371 // definitely doesn't point into a young generation.
1372 // Clean objects don't need to be scavenged. Some clean
1373 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1374 // list at all; others, such as TSO
1375 // are always on the mutable list.
1377 switch (get_itbl((StgClosure *)p)->type) {
1378 case MUT_ARR_PTRS_CLEAN:
1379 recordMutableGen_GC((StgClosure *)p,gen->no);
1381 case MUT_ARR_PTRS_DIRTY:
1383 rtsBool saved_eager_promotion;
1384 saved_eager_promotion = gct->eager_promotion;
1385 gct->eager_promotion = rtsFalse;
1387 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1389 if (gct->failed_to_evac) {
1390 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1392 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1395 gct->eager_promotion = saved_eager_promotion;
1396 gct->failed_to_evac = rtsFalse;
1397 recordMutableGen_GC((StgClosure *)p,gen->no);
1401 StgTSO *tso = (StgTSO *)p;
1402 if (tso->dirty == 0) {
1403 // Should be on the mutable list because its link
1404 // field is dirty. However, in parallel GC we may
1405 // have a thread on multiple mutable lists, so
1406 // this assertion would be invalid:
1407 // ASSERT(tso->flags & TSO_LINK_DIRTY);
1409 evacuate((StgClosure **)&tso->_link);
1410 if (gct->failed_to_evac) {
1411 recordMutableGen_GC((StgClosure *)p,gen->no);
1412 gct->failed_to_evac = rtsFalse;
1414 tso->flags &= ~TSO_LINK_DIRTY;
1423 if (scavenge_one(p)) {
1424 // didn't manage to promote everything, so put the
1425 // object back on the list.
1426 recordMutableGen_GC((StgClosure *)p,gen->no);
1433 scavenge_capability_mut_lists (Capability *cap)
1437 /* Mutable lists from each generation > N
1438 * we want to *scavenge* these roots, not evacuate them: they're not
1439 * going to move in this GC.
1440 * Also do them in reverse generation order, for the usual reason:
1441 * namely to reduce the likelihood of spurious old->new pointers.
1443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1444 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1445 freeChain_sync(cap->saved_mut_lists[g]);
1446 cap->saved_mut_lists[g] = NULL;
1450 /* -----------------------------------------------------------------------------
1451 Scavenging the static objects.
1453 We treat the mutable list of each generation > N (i.e. all the
1454 generations older than the one being collected) as roots. We also
1455 remove non-mutable objects from the mutable list at this point.
1456 -------------------------------------------------------------------------- */
1459 scavenge_static(void)
1462 const StgInfoTable *info;
1464 debugTrace(DEBUG_gc, "scavenging static objects");
1466 /* Always evacuate straight to the oldest generation for static
1468 gct->evac_gen = oldest_gen;
1470 /* keep going until we've scavenged all the objects on the linked
1475 /* get the next static object from the list. Remember, there might
1476 * be more stuff on this list after each evacuation...
1477 * (static_objects is a global)
1479 p = gct->static_objects;
1480 if (p == END_OF_STATIC_LIST) {
1484 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1487 if (info->type==RBH)
1488 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1490 // make sure the info pointer is into text space
1492 /* Take this object *off* the static_objects list,
1493 * and put it on the scavenged_static_objects list.
1495 gct->static_objects = *STATIC_LINK(info,p);
1496 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1497 gct->scavenged_static_objects = p;
1499 switch (info -> type) {
1503 StgInd *ind = (StgInd *)p;
1504 evacuate(&ind->indirectee);
1506 /* might fail to evacuate it, in which case we have to pop it
1507 * back on the mutable list of the oldest generation. We
1508 * leave it *on* the scavenged_static_objects list, though,
1509 * in case we visit this object again.
1511 if (gct->failed_to_evac) {
1512 gct->failed_to_evac = rtsFalse;
1513 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1519 scavenge_thunk_srt(info);
1523 scavenge_fun_srt(info);
1530 next = (P_)p->payload + info->layout.payload.ptrs;
1531 // evacuate the pointers
1532 for (q = (P_)p->payload; q < next; q++) {
1533 evacuate((StgClosure **)q);
1539 barf("scavenge_static: strange closure %d", (int)(info->type));
1542 ASSERT(gct->failed_to_evac == rtsFalse);
1546 /* -----------------------------------------------------------------------------
1547 scavenge a chunk of memory described by a bitmap
1548 -------------------------------------------------------------------------- */
1551 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1557 bitmap = large_bitmap->bitmap[b];
1558 for (i = 0; i < size; ) {
1559 if ((bitmap & 1) == 0) {
1560 evacuate((StgClosure **)p);
1564 if (i % BITS_IN(W_) == 0) {
1566 bitmap = large_bitmap->bitmap[b];
1568 bitmap = bitmap >> 1;
1573 STATIC_INLINE StgPtr
1574 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1577 if ((bitmap & 1) == 0) {
1578 evacuate((StgClosure **)p);
1581 bitmap = bitmap >> 1;
1587 /* -----------------------------------------------------------------------------
1588 scavenge_stack walks over a section of stack and evacuates all the
1589 objects pointed to by it. We can use the same code for walking
1590 AP_STACK_UPDs, since these are just sections of copied stack.
1591 -------------------------------------------------------------------------- */
1594 scavenge_stack(StgPtr p, StgPtr stack_end)
1596 const StgRetInfoTable* info;
1601 * Each time around this loop, we are looking at a chunk of stack
1602 * that starts with an activation record.
1605 while (p < stack_end) {
1606 info = get_ret_itbl((StgClosure *)p);
1608 switch (info->i.type) {
1611 // In SMP, we can get update frames that point to indirections
1612 // when two threads evaluate the same thunk. We do attempt to
1613 // discover this situation in threadPaused(), but it's
1614 // possible that the following sequence occurs:
1623 // Now T is an indirection, and the update frame is already
1624 // marked on A's stack, so we won't traverse it again in
1625 // threadPaused(). We could traverse the whole stack again
1626 // before GC, but that seems like overkill.
1628 // Scavenging this update frame as normal would be disastrous;
1629 // the updatee would end up pointing to the value. So we
1630 // check whether the value after evacuation is a BLACKHOLE,
1631 // and if not, we change the update frame to an stg_enter
1632 // frame that simply returns the value. Hence, blackholing is
1633 // compulsory (otherwise we would have to check for thunks
1636 // Note [upd-black-hole]
1637 // One slight hiccup is that the THUNK_SELECTOR machinery can
1638 // overwrite the updatee with an IND. In parallel GC, this
1639 // could even be happening concurrently, so we can't check for
1640 // the IND. Fortunately if we assume that blackholing is
1641 // happening (either lazy or eager), then we can be sure that
1642 // the updatee is never a THUNK_SELECTOR and we're ok.
1643 // NB. this is a new invariant: blackholing is not optional.
1645 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1648 evacuate(&frame->updatee);
1650 if (GET_CLOSURE_TAG(v) != 0 ||
1651 (get_itbl(v)->type != BLACKHOLE)) {
1652 // blackholing is compulsory, see above.
1653 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1655 ASSERT(v->header.info != &stg_TSO_info);
1656 p += sizeofW(StgUpdateFrame);
1660 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1661 case CATCH_STM_FRAME:
1662 case CATCH_RETRY_FRAME:
1663 case ATOMICALLY_FRAME:
1667 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1668 size = BITMAP_SIZE(info->i.layout.bitmap);
1669 // NOTE: the payload starts immediately after the info-ptr, we
1670 // don't have an StgHeader in the same sense as a heap closure.
1672 p = scavenge_small_bitmap(p, size, bitmap);
1676 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1684 evacuate((StgClosure **)p);
1687 size = BCO_BITMAP_SIZE(bco);
1688 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1693 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1698 size = GET_LARGE_BITMAP(&info->i)->size;
1700 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1702 // and don't forget to follow the SRT
1706 // Dynamic bitmap: the mask is stored on the stack, and
1707 // there are a number of non-pointers followed by a number
1708 // of pointers above the bitmapped area. (see StgMacros.h,
1713 dyn = ((StgRetDyn *)p)->liveness;
1715 // traverse the bitmap first
1716 bitmap = RET_DYN_LIVENESS(dyn);
1717 p = (P_)&((StgRetDyn *)p)->payload[0];
1718 size = RET_DYN_BITMAP_SIZE;
1719 p = scavenge_small_bitmap(p, size, bitmap);
1721 // skip over the non-ptr words
1722 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1724 // follow the ptr words
1725 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1726 evacuate((StgClosure **)p);
1734 StgRetFun *ret_fun = (StgRetFun *)p;
1735 StgFunInfoTable *fun_info;
1737 evacuate(&ret_fun->fun);
1738 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1739 p = scavenge_arg_block(fun_info, ret_fun->payload);
1744 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1749 /*-----------------------------------------------------------------------------
1750 scavenge the large object list.
1752 evac_gen set by caller; similar games played with evac_gen as with
1753 scavenge() - see comment at the top of scavenge(). Most large
1754 objects are (repeatedly) mutable, so most of the time evac_gen will
1756 --------------------------------------------------------------------------- */
1759 scavenge_large (gen_workspace *ws)
1764 gct->evac_gen = ws->gen;
1766 bd = ws->todo_large_objects;
1768 for (; bd != NULL; bd = ws->todo_large_objects) {
1770 // take this object *off* the large objects list and put it on
1771 // the scavenged large objects list. This is so that we can
1772 // treat new_large_objects as a stack and push new objects on
1773 // the front when evacuating.
1774 ws->todo_large_objects = bd->link;
1776 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1777 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1778 ws->gen->n_scavenged_large_blocks += bd->blocks;
1779 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1782 if (scavenge_one(p)) {
1783 if (ws->gen->no > 0) {
1784 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1789 gct->scanned += closure_sizeW((StgClosure*)p);
1793 /* ----------------------------------------------------------------------------
1794 Look for work to do.
1796 We look for the oldest gen that has either a todo block that can
1797 be scanned, or a block of work on the global queue that we can
1800 It is important to take work from the *oldest* generation that we
1801 has work available, because that minimizes the likelihood of
1802 evacuating objects into a young generation when they should have
1803 been eagerly promoted. This really does make a difference (the
1804 cacheprof benchmark is one that is affected).
1806 We also want to scan the todo block if possible before grabbing
1807 work from the global queue, the reason being that we don't want to
1808 steal work from the global queue and starve other threads if there
1809 is other work we can usefully be doing.
1810 ------------------------------------------------------------------------- */
1813 scavenge_find_work (void)
1817 rtsBool did_something, did_anything;
1820 gct->scav_find_work++;
1822 did_anything = rtsFalse;
1825 did_something = rtsFalse;
1826 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1829 gct->scan_bd = NULL;
1831 // If we have a scan block with some work to do,
1832 // scavenge everything up to the free pointer.
1833 if (ws->todo_bd->u.scan < ws->todo_free)
1835 scavenge_block(ws->todo_bd);
1836 did_something = rtsTrue;
1840 // If we have any large objects to scavenge, do them now.
1841 if (ws->todo_large_objects) {
1843 did_something = rtsTrue;
1847 if ((bd = grab_local_todo_block(ws)) != NULL) {
1849 did_something = rtsTrue;
1854 if (did_something) {
1855 did_anything = rtsTrue;
1859 #if defined(THREADED_RTS)
1860 if (work_stealing) {
1861 // look for work to steal
1862 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1863 if ((bd = steal_todo_block(g)) != NULL) {
1865 did_something = rtsTrue;
1870 if (did_something) {
1871 did_anything = rtsTrue;
1877 // only return when there is no more work to do
1879 return did_anything;
1882 /* ----------------------------------------------------------------------------
1883 Scavenge until we can't find anything more to scavenge.
1884 ------------------------------------------------------------------------- */
1892 work_to_do = rtsFalse;
1894 // scavenge static objects
1895 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1896 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1900 // scavenge objects in compacted generation
1901 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1902 scavenge_mark_stack();
1903 work_to_do = rtsTrue;
1906 // Order is important here: we want to deal in full blocks as
1907 // much as possible, so go for global work in preference to
1908 // local work. Only if all the global work has been exhausted
1909 // do we start scavenging the fragments of blocks in the local
1911 if (scavenge_find_work()) goto loop;
1913 if (work_to_do) goto loop;