1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 if (tso->what_next == ThreadRelocated) {
55 // the only way this can happen is if the old TSO was on the
56 // mutable list. We might have other links to this defunct
57 // TSO, so we must update its link field.
58 evacuate((StgClosure**)&tso->_link);
62 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
64 // update the pointer from the Task.
65 if (tso->bound != NULL) {
66 tso->bound->tso = tso;
69 saved_eager = gct->eager_promotion;
70 gct->eager_promotion = rtsFalse;
73 evacuate((StgClosure **)&tso->blocked_exceptions);
74 evacuate((StgClosure **)&tso->bq);
76 // scavange current transaction record
77 evacuate((StgClosure **)&tso->trec);
79 // scavenge this thread's stack
80 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
82 tso->dirty = gct->failed_to_evac;
84 evacuate((StgClosure **)&tso->_link);
85 if ( tso->why_blocked == BlockedOnMVar
86 || tso->why_blocked == BlockedOnBlackHole
87 || tso->why_blocked == BlockedOnMsgWakeup
88 || tso->why_blocked == BlockedOnMsgThrowTo
89 || tso->why_blocked == NotBlocked
91 evacuate(&tso->block_info.closure);
94 // in the THREADED_RTS, block_info.closure must always point to a
95 // valid closure, because we assume this in throwTo(). In the
96 // non-threaded RTS it might be a FD (for
97 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
99 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
103 if (tso->dirty == 0 && gct->failed_to_evac) {
104 tso->flags |= TSO_LINK_DIRTY;
106 tso->flags &= ~TSO_LINK_DIRTY;
109 gct->eager_promotion = saved_eager;
112 /* -----------------------------------------------------------------------------
113 Mutable arrays of pointers
114 -------------------------------------------------------------------------- */
116 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
122 any_failed = rtsFalse;
123 p = (StgPtr)&a->payload[0];
124 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
126 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
128 evacuate((StgClosure**)p);
130 if (gct->failed_to_evac) {
131 any_failed = rtsTrue;
132 *mutArrPtrsCard(a,m) = 1;
133 gct->failed_to_evac = rtsFalse;
135 *mutArrPtrsCard(a,m) = 0;
139 q = (StgPtr)&a->payload[a->ptrs];
142 evacuate((StgClosure**)p);
144 if (gct->failed_to_evac) {
145 any_failed = rtsTrue;
146 *mutArrPtrsCard(a,m) = 1;
147 gct->failed_to_evac = rtsFalse;
149 *mutArrPtrsCard(a,m) = 0;
153 gct->failed_to_evac = any_failed;
154 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
157 // scavenge only the marked areas of a MUT_ARR_PTRS
158 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
164 any_failed = rtsFalse;
165 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
167 if (*mutArrPtrsCard(a,m) != 0) {
168 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
169 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
170 (StgPtr)&a->payload[a->ptrs]);
172 evacuate((StgClosure**)p);
174 if (gct->failed_to_evac) {
175 any_failed = rtsTrue;
176 gct->failed_to_evac = rtsFalse;
178 *mutArrPtrsCard(a,m) = 0;
183 gct->failed_to_evac = any_failed;
184 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
187 /* -----------------------------------------------------------------------------
188 Blocks of function args occur on the stack (at the top) and
190 -------------------------------------------------------------------------- */
193 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
200 switch (fun_info->f.fun_type) {
202 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
203 size = BITMAP_SIZE(fun_info->f.b.bitmap);
206 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
207 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
211 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
212 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
215 if ((bitmap & 1) == 0) {
216 evacuate((StgClosure **)p);
219 bitmap = bitmap >> 1;
227 STATIC_INLINE GNUC_ATTR_HOT StgPtr
228 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
232 StgFunInfoTable *fun_info;
234 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
235 ASSERT(fun_info->i.type != PAP);
238 switch (fun_info->f.fun_type) {
240 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
243 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
247 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
251 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
254 if ((bitmap & 1) == 0) {
255 evacuate((StgClosure **)p);
258 bitmap = bitmap >> 1;
266 STATIC_INLINE GNUC_ATTR_HOT StgPtr
267 scavenge_PAP (StgPAP *pap)
270 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
274 scavenge_AP (StgAP *ap)
277 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
280 /* -----------------------------------------------------------------------------
282 -------------------------------------------------------------------------- */
284 /* Similar to scavenge_large_bitmap(), but we don't write back the
285 * pointers we get back from evacuate().
288 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
295 bitmap = large_srt->l.bitmap[b];
296 size = (nat)large_srt->l.size;
297 p = (StgClosure **)large_srt->srt;
298 for (i = 0; i < size; ) {
299 if ((bitmap & 1) != 0) {
304 if (i % BITS_IN(W_) == 0) {
306 bitmap = large_srt->l.bitmap[b];
308 bitmap = bitmap >> 1;
313 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
314 * srt field in the info table. That's ok, because we'll
315 * never dereference it.
317 STATIC_INLINE GNUC_ATTR_HOT void
318 scavenge_srt (StgClosure **srt, nat srt_bitmap)
326 if (bitmap == (StgHalfWord)(-1)) {
327 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
331 while (bitmap != 0) {
332 if ((bitmap & 1) != 0) {
333 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
334 // Special-case to handle references to closures hiding out in DLLs, since
335 // double indirections required to get at those. The code generator knows
336 // which is which when generating the SRT, so it stores the (indirect)
337 // reference to the DLL closure in the table by first adding one to it.
338 // We check for this here, and undo the addition before evacuating it.
340 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
341 // closure that's fixed at link-time, and no extra magic is required.
342 if ( (unsigned long)(*srt) & 0x1 ) {
343 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
352 bitmap = bitmap >> 1;
357 STATIC_INLINE GNUC_ATTR_HOT void
358 scavenge_thunk_srt(const StgInfoTable *info)
360 StgThunkInfoTable *thunk_info;
362 if (!major_gc) return;
364 thunk_info = itbl_to_thunk_itbl(info);
365 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
368 STATIC_INLINE GNUC_ATTR_HOT void
369 scavenge_fun_srt(const StgInfoTable *info)
371 StgFunInfoTable *fun_info;
373 if (!major_gc) return;
375 fun_info = itbl_to_fun_itbl(info);
376 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
379 /* -----------------------------------------------------------------------------
380 Scavenge a block from the given scan pointer up to bd->free.
382 evac_gen is set by the caller to be either zero (for a step in a
383 generation < N) or G where G is the generation of the step being
386 We sometimes temporarily change evac_gen back to zero if we're
387 scavenging a mutable object where eager promotion isn't such a good
389 -------------------------------------------------------------------------- */
391 static GNUC_ATTR_HOT void
392 scavenge_block (bdescr *bd)
396 rtsBool saved_eager_promotion;
399 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
400 bd->start, bd->gen_no, bd->u.scan);
403 gct->evac_gen = bd->gen;
404 saved_eager_promotion = gct->eager_promotion;
405 gct->failed_to_evac = rtsFalse;
407 ws = &gct->gens[bd->gen->no];
411 // we might be evacuating into the very object that we're
412 // scavenging, so we have to check the real bd->free pointer each
413 // time around the loop.
414 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
416 ASSERT(bd->link == NULL);
417 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
418 info = get_itbl((StgClosure *)p);
420 ASSERT(gct->thunk_selector_depth == 0);
423 switch (info->type) {
428 StgMVar *mvar = ((StgMVar *)p);
429 gct->eager_promotion = rtsFalse;
430 evacuate((StgClosure **)&mvar->head);
431 evacuate((StgClosure **)&mvar->tail);
432 evacuate((StgClosure **)&mvar->value);
433 gct->eager_promotion = saved_eager_promotion;
435 if (gct->failed_to_evac) {
436 mvar->header.info = &stg_MVAR_DIRTY_info;
438 mvar->header.info = &stg_MVAR_CLEAN_info;
440 p += sizeofW(StgMVar);
445 scavenge_fun_srt(info);
446 evacuate(&((StgClosure *)p)->payload[1]);
447 evacuate(&((StgClosure *)p)->payload[0]);
448 p += sizeofW(StgHeader) + 2;
452 scavenge_thunk_srt(info);
453 evacuate(&((StgThunk *)p)->payload[1]);
454 evacuate(&((StgThunk *)p)->payload[0]);
455 p += sizeofW(StgThunk) + 2;
459 evacuate(&((StgClosure *)p)->payload[1]);
460 evacuate(&((StgClosure *)p)->payload[0]);
461 p += sizeofW(StgHeader) + 2;
465 scavenge_thunk_srt(info);
466 evacuate(&((StgThunk *)p)->payload[0]);
467 p += sizeofW(StgThunk) + 1;
471 scavenge_fun_srt(info);
473 evacuate(&((StgClosure *)p)->payload[0]);
474 p += sizeofW(StgHeader) + 1;
478 scavenge_thunk_srt(info);
479 p += sizeofW(StgThunk) + 1;
483 scavenge_fun_srt(info);
485 p += sizeofW(StgHeader) + 1;
489 scavenge_thunk_srt(info);
490 p += sizeofW(StgThunk) + 2;
494 scavenge_fun_srt(info);
496 p += sizeofW(StgHeader) + 2;
500 scavenge_thunk_srt(info);
501 evacuate(&((StgThunk *)p)->payload[0]);
502 p += sizeofW(StgThunk) + 2;
506 scavenge_fun_srt(info);
508 evacuate(&((StgClosure *)p)->payload[0]);
509 p += sizeofW(StgHeader) + 2;
513 scavenge_fun_srt(info);
520 scavenge_thunk_srt(info);
521 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
522 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
523 evacuate((StgClosure **)p);
525 p += info->layout.payload.nptrs;
536 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
537 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
538 evacuate((StgClosure **)p);
540 p += info->layout.payload.nptrs;
545 StgBCO *bco = (StgBCO *)p;
546 evacuate((StgClosure **)&bco->instrs);
547 evacuate((StgClosure **)&bco->literals);
548 evacuate((StgClosure **)&bco->ptrs);
554 if (bd->gen_no != 0) {
557 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
558 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
559 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
562 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
564 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
566 // We pretend that p has just been created.
567 LDV_RECORD_CREATE((StgClosure *)p);
570 case IND_OLDGEN_PERM:
572 evacuate(&((StgInd *)p)->indirectee);
573 p += sizeofW(StgInd);
578 gct->eager_promotion = rtsFalse;
579 evacuate(&((StgMutVar *)p)->var);
580 gct->eager_promotion = saved_eager_promotion;
582 if (gct->failed_to_evac) {
583 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
585 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
587 p += sizeofW(StgMutVar);
592 StgBlockingQueue *bq = (StgBlockingQueue *)p;
594 gct->eager_promotion = rtsFalse;
596 evacuate((StgClosure**)&bq->owner);
597 evacuate((StgClosure**)&bq->queue);
598 evacuate((StgClosure**)&bq->link);
599 gct->eager_promotion = saved_eager_promotion;
601 if (gct->failed_to_evac) {
602 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
604 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
606 p += sizeofW(StgBlockingQueue);
612 StgSelector *s = (StgSelector *)p;
613 evacuate(&s->selectee);
614 p += THUNK_SELECTOR_sizeW();
618 // A chunk of stack saved in a heap object
621 StgAP_STACK *ap = (StgAP_STACK *)p;
624 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
625 p = (StgPtr)ap->payload + ap->size;
630 p = scavenge_PAP((StgPAP *)p);
634 p = scavenge_AP((StgAP *)p);
639 p += arr_words_sizeW((StgArrWords *)p);
642 case MUT_ARR_PTRS_CLEAN:
643 case MUT_ARR_PTRS_DIRTY:
645 // We don't eagerly promote objects pointed to by a mutable
646 // array, but if we find the array only points to objects in
647 // the same or an older generation, we mark it "clean" and
648 // avoid traversing it during minor GCs.
649 gct->eager_promotion = rtsFalse;
651 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
653 if (gct->failed_to_evac) {
654 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
656 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
659 gct->eager_promotion = saved_eager_promotion;
660 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
664 case MUT_ARR_PTRS_FROZEN:
665 case MUT_ARR_PTRS_FROZEN0:
668 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
670 // If we're going to put this object on the mutable list, then
671 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
672 if (gct->failed_to_evac) {
673 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
675 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
682 StgTSO *tso = (StgTSO *)p;
692 gct->eager_promotion = rtsFalse;
694 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
695 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
696 evacuate((StgClosure **)p);
698 p += info->layout.payload.nptrs;
700 gct->eager_promotion = saved_eager_promotion;
701 gct->failed_to_evac = rtsTrue; // mutable
708 StgTRecChunk *tc = ((StgTRecChunk *) p);
709 TRecEntry *e = &(tc -> entries[0]);
710 gct->eager_promotion = rtsFalse;
711 evacuate((StgClosure **)&tc->prev_chunk);
712 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
713 evacuate((StgClosure **)&e->tvar);
714 evacuate((StgClosure **)&e->expected_value);
715 evacuate((StgClosure **)&e->new_value);
717 gct->eager_promotion = saved_eager_promotion;
718 gct->failed_to_evac = rtsTrue; // mutable
719 p += sizeofW(StgTRecChunk);
724 barf("scavenge: unimplemented/strange closure type %d @ %p",
729 * We need to record the current object on the mutable list if
730 * (a) It is actually mutable, or
731 * (b) It contains pointers to a younger generation.
732 * Case (b) arises if we didn't manage to promote everything that
733 * the current object points to into the current generation.
735 if (gct->failed_to_evac) {
736 gct->failed_to_evac = rtsFalse;
737 if (bd->gen_no > 0) {
738 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
744 gct->copied += ws->todo_free - bd->free;
748 debugTrace(DEBUG_gc, " scavenged %ld bytes",
749 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
751 // update stats: this is a block that has been scavenged
752 gct->scanned += bd->free - bd->u.scan;
753 bd->u.scan = bd->free;
755 if (bd != ws->todo_bd) {
756 // we're not going to evac any more objects into
757 // this block, so push it now.
758 push_scanned_block(bd, ws);
763 /* -----------------------------------------------------------------------------
764 Scavenge everything on the mark stack.
766 This is slightly different from scavenge():
767 - we don't walk linearly through the objects, so the scavenger
768 doesn't need to advance the pointer on to the next object.
769 -------------------------------------------------------------------------- */
772 scavenge_mark_stack(void)
776 rtsBool saved_eager_promotion;
778 gct->evac_gen = oldest_gen;
779 saved_eager_promotion = gct->eager_promotion;
781 while ((p = pop_mark_stack())) {
783 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
784 info = get_itbl((StgClosure *)p);
787 switch (info->type) {
792 StgMVar *mvar = ((StgMVar *)p);
793 gct->eager_promotion = rtsFalse;
794 evacuate((StgClosure **)&mvar->head);
795 evacuate((StgClosure **)&mvar->tail);
796 evacuate((StgClosure **)&mvar->value);
797 gct->eager_promotion = saved_eager_promotion;
799 if (gct->failed_to_evac) {
800 mvar->header.info = &stg_MVAR_DIRTY_info;
802 mvar->header.info = &stg_MVAR_CLEAN_info;
808 scavenge_fun_srt(info);
809 evacuate(&((StgClosure *)p)->payload[1]);
810 evacuate(&((StgClosure *)p)->payload[0]);
814 scavenge_thunk_srt(info);
815 evacuate(&((StgThunk *)p)->payload[1]);
816 evacuate(&((StgThunk *)p)->payload[0]);
820 evacuate(&((StgClosure *)p)->payload[1]);
821 evacuate(&((StgClosure *)p)->payload[0]);
826 scavenge_fun_srt(info);
827 evacuate(&((StgClosure *)p)->payload[0]);
832 scavenge_thunk_srt(info);
833 evacuate(&((StgThunk *)p)->payload[0]);
838 evacuate(&((StgClosure *)p)->payload[0]);
843 scavenge_fun_srt(info);
848 scavenge_thunk_srt(info);
856 scavenge_fun_srt(info);
863 scavenge_thunk_srt(info);
864 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
865 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
866 evacuate((StgClosure **)p);
878 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
879 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
880 evacuate((StgClosure **)p);
886 StgBCO *bco = (StgBCO *)p;
887 evacuate((StgClosure **)&bco->instrs);
888 evacuate((StgClosure **)&bco->literals);
889 evacuate((StgClosure **)&bco->ptrs);
894 // don't need to do anything here: the only possible case
895 // is that we're in a 1-space compacting collector, with
896 // no "old" generation.
900 case IND_OLDGEN_PERM:
902 evacuate(&((StgInd *)p)->indirectee);
906 case MUT_VAR_DIRTY: {
907 gct->eager_promotion = rtsFalse;
908 evacuate(&((StgMutVar *)p)->var);
909 gct->eager_promotion = saved_eager_promotion;
911 if (gct->failed_to_evac) {
912 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
914 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
921 StgBlockingQueue *bq = (StgBlockingQueue *)p;
923 gct->eager_promotion = rtsFalse;
925 evacuate((StgClosure**)&bq->owner);
926 evacuate((StgClosure**)&bq->queue);
927 evacuate((StgClosure**)&bq->link);
928 gct->eager_promotion = saved_eager_promotion;
930 if (gct->failed_to_evac) {
931 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
933 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
943 StgSelector *s = (StgSelector *)p;
944 evacuate(&s->selectee);
948 // A chunk of stack saved in a heap object
951 StgAP_STACK *ap = (StgAP_STACK *)p;
954 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
959 scavenge_PAP((StgPAP *)p);
963 scavenge_AP((StgAP *)p);
966 case MUT_ARR_PTRS_CLEAN:
967 case MUT_ARR_PTRS_DIRTY:
970 // We don't eagerly promote objects pointed to by a mutable
971 // array, but if we find the array only points to objects in
972 // the same or an older generation, we mark it "clean" and
973 // avoid traversing it during minor GCs.
974 gct->eager_promotion = rtsFalse;
976 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
978 if (gct->failed_to_evac) {
979 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
981 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
984 gct->eager_promotion = saved_eager_promotion;
985 gct->failed_to_evac = rtsTrue; // mutable anyhow.
989 case MUT_ARR_PTRS_FROZEN:
990 case MUT_ARR_PTRS_FROZEN0:
995 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
997 // If we're going to put this object on the mutable list, then
998 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
999 if (gct->failed_to_evac) {
1000 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1002 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1009 scavengeTSO((StgTSO*)p);
1017 gct->eager_promotion = rtsFalse;
1019 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1020 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1021 evacuate((StgClosure **)p);
1024 gct->eager_promotion = saved_eager_promotion;
1025 gct->failed_to_evac = rtsTrue; // mutable
1032 StgTRecChunk *tc = ((StgTRecChunk *) p);
1033 TRecEntry *e = &(tc -> entries[0]);
1034 gct->eager_promotion = rtsFalse;
1035 evacuate((StgClosure **)&tc->prev_chunk);
1036 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1037 evacuate((StgClosure **)&e->tvar);
1038 evacuate((StgClosure **)&e->expected_value);
1039 evacuate((StgClosure **)&e->new_value);
1041 gct->eager_promotion = saved_eager_promotion;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_gen) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1057 } // while (p = pop_mark_stack())
1060 /* -----------------------------------------------------------------------------
1061 Scavenge one object.
1063 This is used for objects that are temporarily marked as mutable
1064 because they contain old-to-new generation pointers. Only certain
1065 objects can have this property.
1066 -------------------------------------------------------------------------- */
1069 scavenge_one(StgPtr p)
1071 const StgInfoTable *info;
1073 rtsBool saved_eager_promotion;
1075 saved_eager_promotion = gct->eager_promotion;
1077 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1078 info = get_itbl((StgClosure *)p);
1080 switch (info->type) {
1085 StgMVar *mvar = ((StgMVar *)p);
1086 gct->eager_promotion = rtsFalse;
1087 evacuate((StgClosure **)&mvar->head);
1088 evacuate((StgClosure **)&mvar->tail);
1089 evacuate((StgClosure **)&mvar->value);
1090 gct->eager_promotion = saved_eager_promotion;
1092 if (gct->failed_to_evac) {
1093 mvar->header.info = &stg_MVAR_DIRTY_info;
1095 mvar->header.info = &stg_MVAR_CLEAN_info;
1109 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1110 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1111 evacuate((StgClosure **)q);
1117 case FUN_1_0: // hardly worth specialising these guys
1134 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1135 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1136 evacuate((StgClosure **)q);
1142 case MUT_VAR_DIRTY: {
1145 gct->eager_promotion = rtsFalse;
1146 evacuate(&((StgMutVar *)p)->var);
1147 gct->eager_promotion = saved_eager_promotion;
1149 if (gct->failed_to_evac) {
1150 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1152 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1157 case BLOCKING_QUEUE:
1159 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1161 gct->eager_promotion = rtsFalse;
1163 evacuate((StgClosure**)&bq->owner);
1164 evacuate((StgClosure**)&bq->queue);
1165 evacuate((StgClosure**)&bq->link);
1166 gct->eager_promotion = saved_eager_promotion;
1168 if (gct->failed_to_evac) {
1169 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1171 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1176 case THUNK_SELECTOR:
1178 StgSelector *s = (StgSelector *)p;
1179 evacuate(&s->selectee);
1185 StgAP_STACK *ap = (StgAP_STACK *)p;
1188 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1189 p = (StgPtr)ap->payload + ap->size;
1194 p = scavenge_PAP((StgPAP *)p);
1198 p = scavenge_AP((StgAP *)p);
1202 // nothing to follow
1205 case MUT_ARR_PTRS_CLEAN:
1206 case MUT_ARR_PTRS_DIRTY:
1208 // We don't eagerly promote objects pointed to by a mutable
1209 // array, but if we find the array only points to objects in
1210 // the same or an older generation, we mark it "clean" and
1211 // avoid traversing it during minor GCs.
1212 gct->eager_promotion = rtsFalse;
1214 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1216 if (gct->failed_to_evac) {
1217 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1219 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1222 gct->eager_promotion = saved_eager_promotion;
1223 gct->failed_to_evac = rtsTrue;
1227 case MUT_ARR_PTRS_FROZEN:
1228 case MUT_ARR_PTRS_FROZEN0:
1230 // follow everything
1231 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1233 // If we're going to put this object on the mutable list, then
1234 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1235 if (gct->failed_to_evac) {
1236 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1238 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1245 scavengeTSO((StgTSO*)p);
1253 gct->eager_promotion = rtsFalse;
1255 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1256 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1257 evacuate((StgClosure **)p);
1260 gct->eager_promotion = saved_eager_promotion;
1261 gct->failed_to_evac = rtsTrue; // mutable
1269 StgTRecChunk *tc = ((StgTRecChunk *) p);
1270 TRecEntry *e = &(tc -> entries[0]);
1271 gct->eager_promotion = rtsFalse;
1272 evacuate((StgClosure **)&tc->prev_chunk);
1273 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1274 evacuate((StgClosure **)&e->tvar);
1275 evacuate((StgClosure **)&e->expected_value);
1276 evacuate((StgClosure **)&e->new_value);
1278 gct->eager_promotion = saved_eager_promotion;
1279 gct->failed_to_evac = rtsTrue; // mutable
1284 // IND can happen, for example, when the interpreter allocates
1285 // a gigantic AP closure (more than one block), which ends up
1286 // on the large-object list and then gets updated. See #3424.
1288 case IND_OLDGEN_PERM:
1291 evacuate(&((StgInd *)p)->indirectee);
1293 #if 0 && defined(DEBUG)
1294 if (RtsFlags.DebugFlags.gc)
1295 /* Debugging code to print out the size of the thing we just
1299 StgPtr start = gen->scan;
1300 bdescr *start_bd = gen->scan_bd;
1303 if (start_bd != gen->scan_bd) {
1304 size += (P_)BLOCK_ROUND_UP(start) - start;
1305 start_bd = start_bd->link;
1306 while (start_bd != gen->scan_bd) {
1307 size += BLOCK_SIZE_W;
1308 start_bd = start_bd->link;
1311 (P_)BLOCK_ROUND_DOWN(gen->scan);
1313 size = gen->scan - start;
1315 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1321 barf("scavenge_one: strange object %d", (int)(info->type));
1324 no_luck = gct->failed_to_evac;
1325 gct->failed_to_evac = rtsFalse;
1329 /* -----------------------------------------------------------------------------
1330 Scavenging mutable lists.
1332 We treat the mutable list of each generation > N (i.e. all the
1333 generations older than the one being collected) as roots. We also
1334 remove non-mutable objects from the mutable list at this point.
1335 -------------------------------------------------------------------------- */
1338 scavenge_mutable_list(bdescr *bd, generation *gen)
1342 gct->evac_gen = gen;
1343 for (; bd != NULL; bd = bd->link) {
1344 for (q = bd->start; q < bd->free; q++) {
1346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1349 switch (get_itbl((StgClosure *)p)->type) {
1351 // can happen due to concurrent writeMutVars
1353 mutlist_MUTVARS++; break;
1354 case MUT_ARR_PTRS_CLEAN:
1355 case MUT_ARR_PTRS_DIRTY:
1356 case MUT_ARR_PTRS_FROZEN:
1357 case MUT_ARR_PTRS_FROZEN0:
1358 mutlist_MUTARRS++; break;
1360 barf("MVAR_CLEAN on mutable list");
1362 mutlist_MVARS++; break;
1364 mutlist_OTHERS++; break;
1368 // Check whether this object is "clean", that is it
1369 // definitely doesn't point into a young generation.
1370 // Clean objects don't need to be scavenged. Some clean
1371 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1372 // list at all; others, such as TSO
1373 // are always on the mutable list.
1375 switch (get_itbl((StgClosure *)p)->type) {
1376 case MUT_ARR_PTRS_CLEAN:
1377 recordMutableGen_GC((StgClosure *)p,gen->no);
1379 case MUT_ARR_PTRS_DIRTY:
1381 rtsBool saved_eager_promotion;
1382 saved_eager_promotion = gct->eager_promotion;
1383 gct->eager_promotion = rtsFalse;
1385 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1387 if (gct->failed_to_evac) {
1388 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1390 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1393 gct->eager_promotion = saved_eager_promotion;
1394 gct->failed_to_evac = rtsFalse;
1395 recordMutableGen_GC((StgClosure *)p,gen->no);
1399 StgTSO *tso = (StgTSO *)p;
1400 if (tso->dirty == 0) {
1401 // Should be on the mutable list because its link
1402 // field is dirty. However, in parallel GC we may
1403 // have a thread on multiple mutable lists, so
1404 // this assertion would be invalid:
1405 // ASSERT(tso->flags & TSO_LINK_DIRTY);
1407 evacuate((StgClosure **)&tso->_link);
1408 if ( tso->why_blocked == BlockedOnMVar
1409 || tso->why_blocked == BlockedOnBlackHole
1410 || tso->why_blocked == BlockedOnMsgWakeup
1411 || tso->why_blocked == BlockedOnMsgThrowTo
1412 || tso->why_blocked == NotBlocked
1414 evacuate((StgClosure **)&tso->block_info.prev);
1416 if (gct->failed_to_evac) {
1417 recordMutableGen_GC((StgClosure *)p,gen->no);
1418 gct->failed_to_evac = rtsFalse;
1420 tso->flags &= ~TSO_LINK_DIRTY;
1429 if (scavenge_one(p)) {
1430 // didn't manage to promote everything, so put the
1431 // object back on the list.
1432 recordMutableGen_GC((StgClosure *)p,gen->no);
1439 scavenge_capability_mut_lists (Capability *cap)
1443 /* Mutable lists from each generation > N
1444 * we want to *scavenge* these roots, not evacuate them: they're not
1445 * going to move in this GC.
1446 * Also do them in reverse generation order, for the usual reason:
1447 * namely to reduce the likelihood of spurious old->new pointers.
1449 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1450 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1451 freeChain_sync(cap->saved_mut_lists[g]);
1452 cap->saved_mut_lists[g] = NULL;
1456 /* -----------------------------------------------------------------------------
1457 Scavenging the static objects.
1459 We treat the mutable list of each generation > N (i.e. all the
1460 generations older than the one being collected) as roots. We also
1461 remove non-mutable objects from the mutable list at this point.
1462 -------------------------------------------------------------------------- */
1465 scavenge_static(void)
1468 const StgInfoTable *info;
1470 debugTrace(DEBUG_gc, "scavenging static objects");
1472 /* Always evacuate straight to the oldest generation for static
1474 gct->evac_gen = oldest_gen;
1476 /* keep going until we've scavenged all the objects on the linked
1481 /* get the next static object from the list. Remember, there might
1482 * be more stuff on this list after each evacuation...
1483 * (static_objects is a global)
1485 p = gct->static_objects;
1486 if (p == END_OF_STATIC_LIST) {
1490 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1493 if (info->type==RBH)
1494 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1496 // make sure the info pointer is into text space
1498 /* Take this object *off* the static_objects list,
1499 * and put it on the scavenged_static_objects list.
1501 gct->static_objects = *STATIC_LINK(info,p);
1502 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1503 gct->scavenged_static_objects = p;
1505 switch (info -> type) {
1509 StgInd *ind = (StgInd *)p;
1510 evacuate(&ind->indirectee);
1512 /* might fail to evacuate it, in which case we have to pop it
1513 * back on the mutable list of the oldest generation. We
1514 * leave it *on* the scavenged_static_objects list, though,
1515 * in case we visit this object again.
1517 if (gct->failed_to_evac) {
1518 gct->failed_to_evac = rtsFalse;
1519 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1525 scavenge_thunk_srt(info);
1529 scavenge_fun_srt(info);
1536 next = (P_)p->payload + info->layout.payload.ptrs;
1537 // evacuate the pointers
1538 for (q = (P_)p->payload; q < next; q++) {
1539 evacuate((StgClosure **)q);
1545 barf("scavenge_static: strange closure %d", (int)(info->type));
1548 ASSERT(gct->failed_to_evac == rtsFalse);
1552 /* -----------------------------------------------------------------------------
1553 scavenge a chunk of memory described by a bitmap
1554 -------------------------------------------------------------------------- */
1557 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1563 bitmap = large_bitmap->bitmap[b];
1564 for (i = 0; i < size; ) {
1565 if ((bitmap & 1) == 0) {
1566 evacuate((StgClosure **)p);
1570 if (i % BITS_IN(W_) == 0) {
1572 bitmap = large_bitmap->bitmap[b];
1574 bitmap = bitmap >> 1;
1579 STATIC_INLINE StgPtr
1580 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1583 if ((bitmap & 1) == 0) {
1584 evacuate((StgClosure **)p);
1587 bitmap = bitmap >> 1;
1593 /* -----------------------------------------------------------------------------
1594 scavenge_stack walks over a section of stack and evacuates all the
1595 objects pointed to by it. We can use the same code for walking
1596 AP_STACK_UPDs, since these are just sections of copied stack.
1597 -------------------------------------------------------------------------- */
1600 scavenge_stack(StgPtr p, StgPtr stack_end)
1602 const StgRetInfoTable* info;
1607 * Each time around this loop, we are looking at a chunk of stack
1608 * that starts with an activation record.
1611 while (p < stack_end) {
1612 info = get_ret_itbl((StgClosure *)p);
1614 switch (info->i.type) {
1617 // In SMP, we can get update frames that point to indirections
1618 // when two threads evaluate the same thunk. We do attempt to
1619 // discover this situation in threadPaused(), but it's
1620 // possible that the following sequence occurs:
1629 // Now T is an indirection, and the update frame is already
1630 // marked on A's stack, so we won't traverse it again in
1631 // threadPaused(). We could traverse the whole stack again
1632 // before GC, but that seems like overkill.
1634 // Scavenging this update frame as normal would be disastrous;
1635 // the updatee would end up pointing to the value. So we
1636 // check whether the value after evacuation is a BLACKHOLE,
1637 // and if not, we change the update frame to an stg_enter
1638 // frame that simply returns the value. Hence, blackholing is
1639 // compulsory (otherwise we would have to check for thunks
1642 // Note [upd-black-hole]
1643 // One slight hiccup is that the THUNK_SELECTOR machinery can
1644 // overwrite the updatee with an IND. In parallel GC, this
1645 // could even be happening concurrently, so we can't check for
1646 // the IND. Fortunately if we assume that blackholing is
1647 // happening (either lazy or eager), then we can be sure that
1648 // the updatee is never a THUNK_SELECTOR and we're ok.
1649 // NB. this is a new invariant: blackholing is not optional.
1651 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1654 evacuate(&frame->updatee);
1656 if (GET_CLOSURE_TAG(v) != 0 ||
1657 (get_itbl(v)->type != BLACKHOLE)) {
1658 // blackholing is compulsory, see above.
1659 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1661 ASSERT(v->header.info != &stg_TSO_info);
1662 p += sizeofW(StgUpdateFrame);
1666 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1667 case CATCH_STM_FRAME:
1668 case CATCH_RETRY_FRAME:
1669 case ATOMICALLY_FRAME:
1673 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1674 size = BITMAP_SIZE(info->i.layout.bitmap);
1675 // NOTE: the payload starts immediately after the info-ptr, we
1676 // don't have an StgHeader in the same sense as a heap closure.
1678 p = scavenge_small_bitmap(p, size, bitmap);
1682 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1690 evacuate((StgClosure **)p);
1693 size = BCO_BITMAP_SIZE(bco);
1694 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1699 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1704 size = GET_LARGE_BITMAP(&info->i)->size;
1706 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1708 // and don't forget to follow the SRT
1712 // Dynamic bitmap: the mask is stored on the stack, and
1713 // there are a number of non-pointers followed by a number
1714 // of pointers above the bitmapped area. (see StgMacros.h,
1719 dyn = ((StgRetDyn *)p)->liveness;
1721 // traverse the bitmap first
1722 bitmap = RET_DYN_LIVENESS(dyn);
1723 p = (P_)&((StgRetDyn *)p)->payload[0];
1724 size = RET_DYN_BITMAP_SIZE;
1725 p = scavenge_small_bitmap(p, size, bitmap);
1727 // skip over the non-ptr words
1728 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1730 // follow the ptr words
1731 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1732 evacuate((StgClosure **)p);
1740 StgRetFun *ret_fun = (StgRetFun *)p;
1741 StgFunInfoTable *fun_info;
1743 evacuate(&ret_fun->fun);
1744 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1745 p = scavenge_arg_block(fun_info, ret_fun->payload);
1750 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1755 /*-----------------------------------------------------------------------------
1756 scavenge the large object list.
1758 evac_gen set by caller; similar games played with evac_gen as with
1759 scavenge() - see comment at the top of scavenge(). Most large
1760 objects are (repeatedly) mutable, so most of the time evac_gen will
1762 --------------------------------------------------------------------------- */
1765 scavenge_large (gen_workspace *ws)
1770 gct->evac_gen = ws->gen;
1772 bd = ws->todo_large_objects;
1774 for (; bd != NULL; bd = ws->todo_large_objects) {
1776 // take this object *off* the large objects list and put it on
1777 // the scavenged large objects list. This is so that we can
1778 // treat new_large_objects as a stack and push new objects on
1779 // the front when evacuating.
1780 ws->todo_large_objects = bd->link;
1782 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1783 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1784 ws->gen->n_scavenged_large_blocks += bd->blocks;
1785 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1788 if (scavenge_one(p)) {
1789 if (ws->gen->no > 0) {
1790 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1795 gct->scanned += closure_sizeW((StgClosure*)p);
1799 /* ----------------------------------------------------------------------------
1800 Look for work to do.
1802 We look for the oldest gen that has either a todo block that can
1803 be scanned, or a block of work on the global queue that we can
1806 It is important to take work from the *oldest* generation that we
1807 has work available, because that minimizes the likelihood of
1808 evacuating objects into a young generation when they should have
1809 been eagerly promoted. This really does make a difference (the
1810 cacheprof benchmark is one that is affected).
1812 We also want to scan the todo block if possible before grabbing
1813 work from the global queue, the reason being that we don't want to
1814 steal work from the global queue and starve other threads if there
1815 is other work we can usefully be doing.
1816 ------------------------------------------------------------------------- */
1819 scavenge_find_work (void)
1823 rtsBool did_something, did_anything;
1826 gct->scav_find_work++;
1828 did_anything = rtsFalse;
1831 did_something = rtsFalse;
1832 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1835 gct->scan_bd = NULL;
1837 // If we have a scan block with some work to do,
1838 // scavenge everything up to the free pointer.
1839 if (ws->todo_bd->u.scan < ws->todo_free)
1841 scavenge_block(ws->todo_bd);
1842 did_something = rtsTrue;
1846 // If we have any large objects to scavenge, do them now.
1847 if (ws->todo_large_objects) {
1849 did_something = rtsTrue;
1853 if ((bd = grab_local_todo_block(ws)) != NULL) {
1855 did_something = rtsTrue;
1860 if (did_something) {
1861 did_anything = rtsTrue;
1865 #if defined(THREADED_RTS)
1866 if (work_stealing) {
1867 // look for work to steal
1868 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1869 if ((bd = steal_todo_block(g)) != NULL) {
1871 did_something = rtsTrue;
1876 if (did_something) {
1877 did_anything = rtsTrue;
1883 // only return when there is no more work to do
1885 return did_anything;
1888 /* ----------------------------------------------------------------------------
1889 Scavenge until we can't find anything more to scavenge.
1890 ------------------------------------------------------------------------- */
1898 work_to_do = rtsFalse;
1900 // scavenge static objects
1901 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1902 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1906 // scavenge objects in compacted generation
1907 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1908 scavenge_mark_stack();
1909 work_to_do = rtsTrue;
1912 // Order is important here: we want to deal in full blocks as
1913 // much as possible, so go for global work in preference to
1914 // local work. Only if all the global work has been exhausted
1915 // do we start scavenging the fragments of blocks in the local
1917 if (scavenge_find_work()) goto loop;
1919 if (work_to_do) goto loop;