1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 if (tso->what_next == ThreadRelocated) {
55 // the only way this can happen is if the old TSO was on the
56 // mutable list. We might have other links to this defunct
57 // TSO, so we must update its link field.
58 evacuate((StgClosure**)&tso->_link);
62 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
64 // update the pointer from the Task.
65 if (tso->bound != NULL) {
66 tso->bound->tso = tso;
69 saved_eager = gct->eager_promotion;
70 gct->eager_promotion = rtsFalse;
73 evacuate((StgClosure **)&tso->blocked_exceptions);
74 evacuate((StgClosure **)&tso->bq);
76 // scavange current transaction record
77 evacuate((StgClosure **)&tso->trec);
79 // scavenge this thread's stack
80 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
82 tso->dirty = gct->failed_to_evac;
84 evacuate((StgClosure **)&tso->_link);
85 if ( tso->why_blocked == BlockedOnMVar
86 || tso->why_blocked == BlockedOnBlackHole
87 || tso->why_blocked == BlockedOnMsgThrowTo
88 || tso->why_blocked == NotBlocked
90 evacuate(&tso->block_info.closure);
93 // in the THREADED_RTS, block_info.closure must always point to a
94 // valid closure, because we assume this in throwTo(). In the
95 // non-threaded RTS it might be a FD (for
96 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
98 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
102 if (tso->dirty == 0 && gct->failed_to_evac) {
103 tso->flags |= TSO_LINK_DIRTY;
105 tso->flags &= ~TSO_LINK_DIRTY;
108 gct->eager_promotion = saved_eager;
111 /* -----------------------------------------------------------------------------
112 Mutable arrays of pointers
113 -------------------------------------------------------------------------- */
115 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
121 any_failed = rtsFalse;
122 p = (StgPtr)&a->payload[0];
123 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
125 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
127 evacuate((StgClosure**)p);
129 if (gct->failed_to_evac) {
130 any_failed = rtsTrue;
131 *mutArrPtrsCard(a,m) = 1;
132 gct->failed_to_evac = rtsFalse;
134 *mutArrPtrsCard(a,m) = 0;
138 q = (StgPtr)&a->payload[a->ptrs];
141 evacuate((StgClosure**)p);
143 if (gct->failed_to_evac) {
144 any_failed = rtsTrue;
145 *mutArrPtrsCard(a,m) = 1;
146 gct->failed_to_evac = rtsFalse;
148 *mutArrPtrsCard(a,m) = 0;
152 gct->failed_to_evac = any_failed;
153 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
156 // scavenge only the marked areas of a MUT_ARR_PTRS
157 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
163 any_failed = rtsFalse;
164 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
166 if (*mutArrPtrsCard(a,m) != 0) {
167 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
168 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
169 (StgPtr)&a->payload[a->ptrs]);
171 evacuate((StgClosure**)p);
173 if (gct->failed_to_evac) {
174 any_failed = rtsTrue;
175 gct->failed_to_evac = rtsFalse;
177 *mutArrPtrsCard(a,m) = 0;
182 gct->failed_to_evac = any_failed;
183 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
186 /* -----------------------------------------------------------------------------
187 Blocks of function args occur on the stack (at the top) and
189 -------------------------------------------------------------------------- */
192 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
199 switch (fun_info->f.fun_type) {
201 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
202 size = BITMAP_SIZE(fun_info->f.b.bitmap);
205 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
206 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
210 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
211 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
214 if ((bitmap & 1) == 0) {
215 evacuate((StgClosure **)p);
218 bitmap = bitmap >> 1;
226 STATIC_INLINE GNUC_ATTR_HOT StgPtr
227 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
231 StgFunInfoTable *fun_info;
233 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
234 ASSERT(fun_info->i.type != PAP);
237 switch (fun_info->f.fun_type) {
239 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
242 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
246 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
250 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
253 if ((bitmap & 1) == 0) {
254 evacuate((StgClosure **)p);
257 bitmap = bitmap >> 1;
265 STATIC_INLINE GNUC_ATTR_HOT StgPtr
266 scavenge_PAP (StgPAP *pap)
269 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
273 scavenge_AP (StgAP *ap)
276 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
279 /* -----------------------------------------------------------------------------
281 -------------------------------------------------------------------------- */
283 /* Similar to scavenge_large_bitmap(), but we don't write back the
284 * pointers we get back from evacuate().
287 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
294 bitmap = large_srt->l.bitmap[b];
295 size = (nat)large_srt->l.size;
296 p = (StgClosure **)large_srt->srt;
297 for (i = 0; i < size; ) {
298 if ((bitmap & 1) != 0) {
303 if (i % BITS_IN(W_) == 0) {
305 bitmap = large_srt->l.bitmap[b];
307 bitmap = bitmap >> 1;
312 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
313 * srt field in the info table. That's ok, because we'll
314 * never dereference it.
316 STATIC_INLINE GNUC_ATTR_HOT void
317 scavenge_srt (StgClosure **srt, nat srt_bitmap)
325 if (bitmap == (StgHalfWord)(-1)) {
326 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
330 while (bitmap != 0) {
331 if ((bitmap & 1) != 0) {
332 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
333 // Special-case to handle references to closures hiding out in DLLs, since
334 // double indirections required to get at those. The code generator knows
335 // which is which when generating the SRT, so it stores the (indirect)
336 // reference to the DLL closure in the table by first adding one to it.
337 // We check for this here, and undo the addition before evacuating it.
339 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
340 // closure that's fixed at link-time, and no extra magic is required.
341 if ( (unsigned long)(*srt) & 0x1 ) {
342 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
351 bitmap = bitmap >> 1;
356 STATIC_INLINE GNUC_ATTR_HOT void
357 scavenge_thunk_srt(const StgInfoTable *info)
359 StgThunkInfoTable *thunk_info;
361 if (!major_gc) return;
363 thunk_info = itbl_to_thunk_itbl(info);
364 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
367 STATIC_INLINE GNUC_ATTR_HOT void
368 scavenge_fun_srt(const StgInfoTable *info)
370 StgFunInfoTable *fun_info;
372 if (!major_gc) return;
374 fun_info = itbl_to_fun_itbl(info);
375 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
378 /* -----------------------------------------------------------------------------
379 Scavenge a block from the given scan pointer up to bd->free.
381 evac_gen is set by the caller to be either zero (for a step in a
382 generation < N) or G where G is the generation of the step being
385 We sometimes temporarily change evac_gen back to zero if we're
386 scavenging a mutable object where eager promotion isn't such a good
388 -------------------------------------------------------------------------- */
390 static GNUC_ATTR_HOT void
391 scavenge_block (bdescr *bd)
395 rtsBool saved_eager_promotion;
398 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
399 bd->start, bd->gen_no, bd->u.scan);
402 gct->evac_gen = bd->gen;
403 saved_eager_promotion = gct->eager_promotion;
404 gct->failed_to_evac = rtsFalse;
406 ws = &gct->gens[bd->gen->no];
410 // we might be evacuating into the very object that we're
411 // scavenging, so we have to check the real bd->free pointer each
412 // time around the loop.
413 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
415 ASSERT(bd->link == NULL);
416 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
417 info = get_itbl((StgClosure *)p);
419 ASSERT(gct->thunk_selector_depth == 0);
422 switch (info->type) {
427 StgMVar *mvar = ((StgMVar *)p);
428 gct->eager_promotion = rtsFalse;
429 evacuate((StgClosure **)&mvar->head);
430 evacuate((StgClosure **)&mvar->tail);
431 evacuate((StgClosure **)&mvar->value);
432 gct->eager_promotion = saved_eager_promotion;
434 if (gct->failed_to_evac) {
435 mvar->header.info = &stg_MVAR_DIRTY_info;
437 mvar->header.info = &stg_MVAR_CLEAN_info;
439 p += sizeofW(StgMVar);
444 scavenge_fun_srt(info);
445 evacuate(&((StgClosure *)p)->payload[1]);
446 evacuate(&((StgClosure *)p)->payload[0]);
447 p += sizeofW(StgHeader) + 2;
451 scavenge_thunk_srt(info);
452 evacuate(&((StgThunk *)p)->payload[1]);
453 evacuate(&((StgThunk *)p)->payload[0]);
454 p += sizeofW(StgThunk) + 2;
458 evacuate(&((StgClosure *)p)->payload[1]);
459 evacuate(&((StgClosure *)p)->payload[0]);
460 p += sizeofW(StgHeader) + 2;
464 scavenge_thunk_srt(info);
465 evacuate(&((StgThunk *)p)->payload[0]);
466 p += sizeofW(StgThunk) + 1;
470 scavenge_fun_srt(info);
472 evacuate(&((StgClosure *)p)->payload[0]);
473 p += sizeofW(StgHeader) + 1;
477 scavenge_thunk_srt(info);
478 p += sizeofW(StgThunk) + 1;
482 scavenge_fun_srt(info);
484 p += sizeofW(StgHeader) + 1;
488 scavenge_thunk_srt(info);
489 p += sizeofW(StgThunk) + 2;
493 scavenge_fun_srt(info);
495 p += sizeofW(StgHeader) + 2;
499 scavenge_thunk_srt(info);
500 evacuate(&((StgThunk *)p)->payload[0]);
501 p += sizeofW(StgThunk) + 2;
505 scavenge_fun_srt(info);
507 evacuate(&((StgClosure *)p)->payload[0]);
508 p += sizeofW(StgHeader) + 2;
512 scavenge_fun_srt(info);
519 scavenge_thunk_srt(info);
520 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
521 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
522 evacuate((StgClosure **)p);
524 p += info->layout.payload.nptrs;
535 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
536 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
537 evacuate((StgClosure **)p);
539 p += info->layout.payload.nptrs;
544 StgBCO *bco = (StgBCO *)p;
545 evacuate((StgClosure **)&bco->instrs);
546 evacuate((StgClosure **)&bco->literals);
547 evacuate((StgClosure **)&bco->ptrs);
553 if (bd->gen_no != 0) {
556 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
557 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
558 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
561 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
563 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
565 // We pretend that p has just been created.
566 LDV_RECORD_CREATE((StgClosure *)p);
569 case IND_OLDGEN_PERM:
571 evacuate(&((StgInd *)p)->indirectee);
572 p += sizeofW(StgInd);
577 gct->eager_promotion = rtsFalse;
578 evacuate(&((StgMutVar *)p)->var);
579 gct->eager_promotion = saved_eager_promotion;
581 if (gct->failed_to_evac) {
582 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
584 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
586 p += sizeofW(StgMutVar);
591 StgBlockingQueue *bq = (StgBlockingQueue *)p;
593 gct->eager_promotion = rtsFalse;
595 evacuate((StgClosure**)&bq->owner);
596 evacuate((StgClosure**)&bq->queue);
597 evacuate((StgClosure**)&bq->link);
598 gct->eager_promotion = saved_eager_promotion;
600 if (gct->failed_to_evac) {
601 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
603 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
605 p += sizeofW(StgBlockingQueue);
611 StgSelector *s = (StgSelector *)p;
612 evacuate(&s->selectee);
613 p += THUNK_SELECTOR_sizeW();
617 // A chunk of stack saved in a heap object
620 StgAP_STACK *ap = (StgAP_STACK *)p;
623 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
624 p = (StgPtr)ap->payload + ap->size;
629 p = scavenge_PAP((StgPAP *)p);
633 p = scavenge_AP((StgAP *)p);
638 p += arr_words_sizeW((StgArrWords *)p);
641 case MUT_ARR_PTRS_CLEAN:
642 case MUT_ARR_PTRS_DIRTY:
644 // We don't eagerly promote objects pointed to by a mutable
645 // array, but if we find the array only points to objects in
646 // the same or an older generation, we mark it "clean" and
647 // avoid traversing it during minor GCs.
648 gct->eager_promotion = rtsFalse;
650 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
652 if (gct->failed_to_evac) {
653 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
655 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
658 gct->eager_promotion = saved_eager_promotion;
659 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
663 case MUT_ARR_PTRS_FROZEN:
664 case MUT_ARR_PTRS_FROZEN0:
667 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
669 // If we're going to put this object on the mutable list, then
670 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
671 if (gct->failed_to_evac) {
672 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
674 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
681 StgTSO *tso = (StgTSO *)p;
691 gct->eager_promotion = rtsFalse;
693 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
694 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
695 evacuate((StgClosure **)p);
697 p += info->layout.payload.nptrs;
699 gct->eager_promotion = saved_eager_promotion;
700 gct->failed_to_evac = rtsTrue; // mutable
707 StgTRecChunk *tc = ((StgTRecChunk *) p);
708 TRecEntry *e = &(tc -> entries[0]);
709 gct->eager_promotion = rtsFalse;
710 evacuate((StgClosure **)&tc->prev_chunk);
711 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
712 evacuate((StgClosure **)&e->tvar);
713 evacuate((StgClosure **)&e->expected_value);
714 evacuate((StgClosure **)&e->new_value);
716 gct->eager_promotion = saved_eager_promotion;
717 gct->failed_to_evac = rtsTrue; // mutable
718 p += sizeofW(StgTRecChunk);
723 barf("scavenge: unimplemented/strange closure type %d @ %p",
728 * We need to record the current object on the mutable list if
729 * (a) It is actually mutable, or
730 * (b) It contains pointers to a younger generation.
731 * Case (b) arises if we didn't manage to promote everything that
732 * the current object points to into the current generation.
734 if (gct->failed_to_evac) {
735 gct->failed_to_evac = rtsFalse;
736 if (bd->gen_no > 0) {
737 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
743 gct->copied += ws->todo_free - bd->free;
747 debugTrace(DEBUG_gc, " scavenged %ld bytes",
748 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
750 // update stats: this is a block that has been scavenged
751 gct->scanned += bd->free - bd->u.scan;
752 bd->u.scan = bd->free;
754 if (bd != ws->todo_bd) {
755 // we're not going to evac any more objects into
756 // this block, so push it now.
757 push_scanned_block(bd, ws);
762 /* -----------------------------------------------------------------------------
763 Scavenge everything on the mark stack.
765 This is slightly different from scavenge():
766 - we don't walk linearly through the objects, so the scavenger
767 doesn't need to advance the pointer on to the next object.
768 -------------------------------------------------------------------------- */
771 scavenge_mark_stack(void)
775 rtsBool saved_eager_promotion;
777 gct->evac_gen = oldest_gen;
778 saved_eager_promotion = gct->eager_promotion;
780 while ((p = pop_mark_stack())) {
782 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
783 info = get_itbl((StgClosure *)p);
786 switch (info->type) {
791 StgMVar *mvar = ((StgMVar *)p);
792 gct->eager_promotion = rtsFalse;
793 evacuate((StgClosure **)&mvar->head);
794 evacuate((StgClosure **)&mvar->tail);
795 evacuate((StgClosure **)&mvar->value);
796 gct->eager_promotion = saved_eager_promotion;
798 if (gct->failed_to_evac) {
799 mvar->header.info = &stg_MVAR_DIRTY_info;
801 mvar->header.info = &stg_MVAR_CLEAN_info;
807 scavenge_fun_srt(info);
808 evacuate(&((StgClosure *)p)->payload[1]);
809 evacuate(&((StgClosure *)p)->payload[0]);
813 scavenge_thunk_srt(info);
814 evacuate(&((StgThunk *)p)->payload[1]);
815 evacuate(&((StgThunk *)p)->payload[0]);
819 evacuate(&((StgClosure *)p)->payload[1]);
820 evacuate(&((StgClosure *)p)->payload[0]);
825 scavenge_fun_srt(info);
826 evacuate(&((StgClosure *)p)->payload[0]);
831 scavenge_thunk_srt(info);
832 evacuate(&((StgThunk *)p)->payload[0]);
837 evacuate(&((StgClosure *)p)->payload[0]);
842 scavenge_fun_srt(info);
847 scavenge_thunk_srt(info);
855 scavenge_fun_srt(info);
862 scavenge_thunk_srt(info);
863 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
864 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
865 evacuate((StgClosure **)p);
877 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
878 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
879 evacuate((StgClosure **)p);
885 StgBCO *bco = (StgBCO *)p;
886 evacuate((StgClosure **)&bco->instrs);
887 evacuate((StgClosure **)&bco->literals);
888 evacuate((StgClosure **)&bco->ptrs);
893 // don't need to do anything here: the only possible case
894 // is that we're in a 1-space compacting collector, with
895 // no "old" generation.
900 case IND_OLDGEN_PERM:
902 evacuate(&((StgInd *)p)->indirectee);
906 case MUT_VAR_DIRTY: {
907 gct->eager_promotion = rtsFalse;
908 evacuate(&((StgMutVar *)p)->var);
909 gct->eager_promotion = saved_eager_promotion;
911 if (gct->failed_to_evac) {
912 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
914 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
921 StgBlockingQueue *bq = (StgBlockingQueue *)p;
923 gct->eager_promotion = rtsFalse;
925 evacuate((StgClosure**)&bq->owner);
926 evacuate((StgClosure**)&bq->queue);
927 evacuate((StgClosure**)&bq->link);
928 gct->eager_promotion = saved_eager_promotion;
930 if (gct->failed_to_evac) {
931 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
933 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
943 StgSelector *s = (StgSelector *)p;
944 evacuate(&s->selectee);
948 // A chunk of stack saved in a heap object
951 StgAP_STACK *ap = (StgAP_STACK *)p;
954 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
959 scavenge_PAP((StgPAP *)p);
963 scavenge_AP((StgAP *)p);
966 case MUT_ARR_PTRS_CLEAN:
967 case MUT_ARR_PTRS_DIRTY:
970 // We don't eagerly promote objects pointed to by a mutable
971 // array, but if we find the array only points to objects in
972 // the same or an older generation, we mark it "clean" and
973 // avoid traversing it during minor GCs.
974 gct->eager_promotion = rtsFalse;
976 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
978 if (gct->failed_to_evac) {
979 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
981 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
984 gct->eager_promotion = saved_eager_promotion;
985 gct->failed_to_evac = rtsTrue; // mutable anyhow.
989 case MUT_ARR_PTRS_FROZEN:
990 case MUT_ARR_PTRS_FROZEN0:
995 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
997 // If we're going to put this object on the mutable list, then
998 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
999 if (gct->failed_to_evac) {
1000 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1002 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1009 scavengeTSO((StgTSO*)p);
1017 gct->eager_promotion = rtsFalse;
1019 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1020 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1021 evacuate((StgClosure **)p);
1024 gct->eager_promotion = saved_eager_promotion;
1025 gct->failed_to_evac = rtsTrue; // mutable
1032 StgTRecChunk *tc = ((StgTRecChunk *) p);
1033 TRecEntry *e = &(tc -> entries[0]);
1034 gct->eager_promotion = rtsFalse;
1035 evacuate((StgClosure **)&tc->prev_chunk);
1036 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1037 evacuate((StgClosure **)&e->tvar);
1038 evacuate((StgClosure **)&e->expected_value);
1039 evacuate((StgClosure **)&e->new_value);
1041 gct->eager_promotion = saved_eager_promotion;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_gen) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1057 } // while (p = pop_mark_stack())
1060 /* -----------------------------------------------------------------------------
1061 Scavenge one object.
1063 This is used for objects that are temporarily marked as mutable
1064 because they contain old-to-new generation pointers. Only certain
1065 objects can have this property.
1066 -------------------------------------------------------------------------- */
1069 scavenge_one(StgPtr p)
1071 const StgInfoTable *info;
1073 rtsBool saved_eager_promotion;
1075 saved_eager_promotion = gct->eager_promotion;
1077 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1078 info = get_itbl((StgClosure *)p);
1080 switch (info->type) {
1085 StgMVar *mvar = ((StgMVar *)p);
1086 gct->eager_promotion = rtsFalse;
1087 evacuate((StgClosure **)&mvar->head);
1088 evacuate((StgClosure **)&mvar->tail);
1089 evacuate((StgClosure **)&mvar->value);
1090 gct->eager_promotion = saved_eager_promotion;
1092 if (gct->failed_to_evac) {
1093 mvar->header.info = &stg_MVAR_DIRTY_info;
1095 mvar->header.info = &stg_MVAR_CLEAN_info;
1109 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1110 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1111 evacuate((StgClosure **)q);
1117 case FUN_1_0: // hardly worth specialising these guys
1134 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1135 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1136 evacuate((StgClosure **)q);
1142 case MUT_VAR_DIRTY: {
1145 gct->eager_promotion = rtsFalse;
1146 evacuate(&((StgMutVar *)p)->var);
1147 gct->eager_promotion = saved_eager_promotion;
1149 if (gct->failed_to_evac) {
1150 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1152 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1157 case BLOCKING_QUEUE:
1159 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1161 gct->eager_promotion = rtsFalse;
1163 evacuate((StgClosure**)&bq->owner);
1164 evacuate((StgClosure**)&bq->queue);
1165 evacuate((StgClosure**)&bq->link);
1166 gct->eager_promotion = saved_eager_promotion;
1168 if (gct->failed_to_evac) {
1169 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1171 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1176 case THUNK_SELECTOR:
1178 StgSelector *s = (StgSelector *)p;
1179 evacuate(&s->selectee);
1185 StgAP_STACK *ap = (StgAP_STACK *)p;
1188 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1189 p = (StgPtr)ap->payload + ap->size;
1194 p = scavenge_PAP((StgPAP *)p);
1198 p = scavenge_AP((StgAP *)p);
1202 // nothing to follow
1205 case MUT_ARR_PTRS_CLEAN:
1206 case MUT_ARR_PTRS_DIRTY:
1208 // We don't eagerly promote objects pointed to by a mutable
1209 // array, but if we find the array only points to objects in
1210 // the same or an older generation, we mark it "clean" and
1211 // avoid traversing it during minor GCs.
1212 gct->eager_promotion = rtsFalse;
1214 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1216 if (gct->failed_to_evac) {
1217 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1219 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1222 gct->eager_promotion = saved_eager_promotion;
1223 gct->failed_to_evac = rtsTrue;
1227 case MUT_ARR_PTRS_FROZEN:
1228 case MUT_ARR_PTRS_FROZEN0:
1230 // follow everything
1231 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1233 // If we're going to put this object on the mutable list, then
1234 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1235 if (gct->failed_to_evac) {
1236 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1238 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1245 scavengeTSO((StgTSO*)p);
1253 gct->eager_promotion = rtsFalse;
1255 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1256 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1257 evacuate((StgClosure **)p);
1260 gct->eager_promotion = saved_eager_promotion;
1261 gct->failed_to_evac = rtsTrue; // mutable
1269 StgTRecChunk *tc = ((StgTRecChunk *) p);
1270 TRecEntry *e = &(tc -> entries[0]);
1271 gct->eager_promotion = rtsFalse;
1272 evacuate((StgClosure **)&tc->prev_chunk);
1273 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1274 evacuate((StgClosure **)&e->tvar);
1275 evacuate((StgClosure **)&e->expected_value);
1276 evacuate((StgClosure **)&e->new_value);
1278 gct->eager_promotion = saved_eager_promotion;
1279 gct->failed_to_evac = rtsTrue; // mutable
1284 // IND can happen, for example, when the interpreter allocates
1285 // a gigantic AP closure (more than one block), which ends up
1286 // on the large-object list and then gets updated. See #3424.
1288 case IND_OLDGEN_PERM:
1291 evacuate(&((StgInd *)p)->indirectee);
1293 #if 0 && defined(DEBUG)
1294 if (RtsFlags.DebugFlags.gc)
1295 /* Debugging code to print out the size of the thing we just
1299 StgPtr start = gen->scan;
1300 bdescr *start_bd = gen->scan_bd;
1303 if (start_bd != gen->scan_bd) {
1304 size += (P_)BLOCK_ROUND_UP(start) - start;
1305 start_bd = start_bd->link;
1306 while (start_bd != gen->scan_bd) {
1307 size += BLOCK_SIZE_W;
1308 start_bd = start_bd->link;
1311 (P_)BLOCK_ROUND_DOWN(gen->scan);
1313 size = gen->scan - start;
1315 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1321 barf("scavenge_one: strange object %d", (int)(info->type));
1324 no_luck = gct->failed_to_evac;
1325 gct->failed_to_evac = rtsFalse;
1329 /* -----------------------------------------------------------------------------
1330 Scavenging mutable lists.
1332 We treat the mutable list of each generation > N (i.e. all the
1333 generations older than the one being collected) as roots. We also
1334 remove non-mutable objects from the mutable list at this point.
1335 -------------------------------------------------------------------------- */
1338 scavenge_mutable_list(bdescr *bd, generation *gen)
1342 gct->evac_gen = gen;
1343 for (; bd != NULL; bd = bd->link) {
1344 for (q = bd->start; q < bd->free; q++) {
1346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1349 switch (get_itbl((StgClosure *)p)->type) {
1351 // can happen due to concurrent writeMutVars
1353 mutlist_MUTVARS++; break;
1354 case MUT_ARR_PTRS_CLEAN:
1355 case MUT_ARR_PTRS_DIRTY:
1356 case MUT_ARR_PTRS_FROZEN:
1357 case MUT_ARR_PTRS_FROZEN0:
1358 mutlist_MUTARRS++; break;
1360 barf("MVAR_CLEAN on mutable list");
1362 mutlist_MVARS++; break;
1364 mutlist_OTHERS++; break;
1368 // Check whether this object is "clean", that is it
1369 // definitely doesn't point into a young generation.
1370 // Clean objects don't need to be scavenged. Some clean
1371 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1372 // list at all; others, such as TSO
1373 // are always on the mutable list.
1375 switch (get_itbl((StgClosure *)p)->type) {
1376 case MUT_ARR_PTRS_CLEAN:
1377 recordMutableGen_GC((StgClosure *)p,gen->no);
1379 case MUT_ARR_PTRS_DIRTY:
1381 rtsBool saved_eager_promotion;
1382 saved_eager_promotion = gct->eager_promotion;
1383 gct->eager_promotion = rtsFalse;
1385 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1387 if (gct->failed_to_evac) {
1388 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1390 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1393 gct->eager_promotion = saved_eager_promotion;
1394 gct->failed_to_evac = rtsFalse;
1395 recordMutableGen_GC((StgClosure *)p,gen->no);
1399 StgTSO *tso = (StgTSO *)p;
1400 if (tso->dirty == 0) {
1401 // Should be on the mutable list because its link
1402 // field is dirty. However, in parallel GC we may
1403 // have a thread on multiple mutable lists, so
1404 // this assertion would be invalid:
1405 // ASSERT(tso->flags & TSO_LINK_DIRTY);
1407 evacuate((StgClosure **)&tso->_link);
1408 if ( tso->why_blocked == BlockedOnMVar
1409 || tso->why_blocked == BlockedOnBlackHole
1410 || tso->why_blocked == BlockedOnMsgThrowTo
1411 || tso->why_blocked == NotBlocked
1413 evacuate((StgClosure **)&tso->block_info.prev);
1415 if (gct->failed_to_evac) {
1416 recordMutableGen_GC((StgClosure *)p,gen->no);
1417 gct->failed_to_evac = rtsFalse;
1419 tso->flags &= ~TSO_LINK_DIRTY;
1428 if (scavenge_one(p)) {
1429 // didn't manage to promote everything, so put the
1430 // object back on the list.
1431 recordMutableGen_GC((StgClosure *)p,gen->no);
1438 scavenge_capability_mut_lists (Capability *cap)
1442 /* Mutable lists from each generation > N
1443 * we want to *scavenge* these roots, not evacuate them: they're not
1444 * going to move in this GC.
1445 * Also do them in reverse generation order, for the usual reason:
1446 * namely to reduce the likelihood of spurious old->new pointers.
1448 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1449 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1450 freeChain_sync(cap->saved_mut_lists[g]);
1451 cap->saved_mut_lists[g] = NULL;
1455 /* -----------------------------------------------------------------------------
1456 Scavenging the static objects.
1458 We treat the mutable list of each generation > N (i.e. all the
1459 generations older than the one being collected) as roots. We also
1460 remove non-mutable objects from the mutable list at this point.
1461 -------------------------------------------------------------------------- */
1464 scavenge_static(void)
1467 const StgInfoTable *info;
1469 debugTrace(DEBUG_gc, "scavenging static objects");
1471 /* Always evacuate straight to the oldest generation for static
1473 gct->evac_gen = oldest_gen;
1475 /* keep going until we've scavenged all the objects on the linked
1480 /* get the next static object from the list. Remember, there might
1481 * be more stuff on this list after each evacuation...
1482 * (static_objects is a global)
1484 p = gct->static_objects;
1485 if (p == END_OF_STATIC_LIST) {
1489 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1492 if (info->type==RBH)
1493 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1495 // make sure the info pointer is into text space
1497 /* Take this object *off* the static_objects list,
1498 * and put it on the scavenged_static_objects list.
1500 gct->static_objects = *STATIC_LINK(info,p);
1501 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1502 gct->scavenged_static_objects = p;
1504 switch (info -> type) {
1508 StgInd *ind = (StgInd *)p;
1509 evacuate(&ind->indirectee);
1511 /* might fail to evacuate it, in which case we have to pop it
1512 * back on the mutable list of the oldest generation. We
1513 * leave it *on* the scavenged_static_objects list, though,
1514 * in case we visit this object again.
1516 if (gct->failed_to_evac) {
1517 gct->failed_to_evac = rtsFalse;
1518 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1524 scavenge_thunk_srt(info);
1528 scavenge_fun_srt(info);
1535 next = (P_)p->payload + info->layout.payload.ptrs;
1536 // evacuate the pointers
1537 for (q = (P_)p->payload; q < next; q++) {
1538 evacuate((StgClosure **)q);
1544 barf("scavenge_static: strange closure %d", (int)(info->type));
1547 ASSERT(gct->failed_to_evac == rtsFalse);
1551 /* -----------------------------------------------------------------------------
1552 scavenge a chunk of memory described by a bitmap
1553 -------------------------------------------------------------------------- */
1556 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1562 bitmap = large_bitmap->bitmap[b];
1563 for (i = 0; i < size; ) {
1564 if ((bitmap & 1) == 0) {
1565 evacuate((StgClosure **)p);
1569 if (i % BITS_IN(W_) == 0) {
1571 bitmap = large_bitmap->bitmap[b];
1573 bitmap = bitmap >> 1;
1578 STATIC_INLINE StgPtr
1579 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1582 if ((bitmap & 1) == 0) {
1583 evacuate((StgClosure **)p);
1586 bitmap = bitmap >> 1;
1592 /* -----------------------------------------------------------------------------
1593 scavenge_stack walks over a section of stack and evacuates all the
1594 objects pointed to by it. We can use the same code for walking
1595 AP_STACK_UPDs, since these are just sections of copied stack.
1596 -------------------------------------------------------------------------- */
1599 scavenge_stack(StgPtr p, StgPtr stack_end)
1601 const StgRetInfoTable* info;
1606 * Each time around this loop, we are looking at a chunk of stack
1607 * that starts with an activation record.
1610 while (p < stack_end) {
1611 info = get_ret_itbl((StgClosure *)p);
1613 switch (info->i.type) {
1616 // In SMP, we can get update frames that point to indirections
1617 // when two threads evaluate the same thunk. We do attempt to
1618 // discover this situation in threadPaused(), but it's
1619 // possible that the following sequence occurs:
1628 // Now T is an indirection, and the update frame is already
1629 // marked on A's stack, so we won't traverse it again in
1630 // threadPaused(). We could traverse the whole stack again
1631 // before GC, but that seems like overkill.
1633 // Scavenging this update frame as normal would be disastrous;
1634 // the updatee would end up pointing to the value. So we
1635 // check whether the value after evacuation is a BLACKHOLE,
1636 // and if not, we change the update frame to an stg_enter
1637 // frame that simply returns the value. Hence, blackholing is
1638 // compulsory (otherwise we would have to check for thunks
1641 // Note [upd-black-hole]
1642 // One slight hiccup is that the THUNK_SELECTOR machinery can
1643 // overwrite the updatee with an IND. In parallel GC, this
1644 // could even be happening concurrently, so we can't check for
1645 // the IND. Fortunately if we assume that blackholing is
1646 // happening (either lazy or eager), then we can be sure that
1647 // the updatee is never a THUNK_SELECTOR and we're ok.
1648 // NB. this is a new invariant: blackholing is not optional.
1650 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1653 evacuate(&frame->updatee);
1655 if (GET_CLOSURE_TAG(v) != 0 ||
1656 (get_itbl(v)->type != BLACKHOLE)) {
1657 // blackholing is compulsory, see above.
1658 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1660 ASSERT(v->header.info != &stg_TSO_info);
1661 p += sizeofW(StgUpdateFrame);
1665 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1666 case CATCH_STM_FRAME:
1667 case CATCH_RETRY_FRAME:
1668 case ATOMICALLY_FRAME:
1672 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1673 size = BITMAP_SIZE(info->i.layout.bitmap);
1674 // NOTE: the payload starts immediately after the info-ptr, we
1675 // don't have an StgHeader in the same sense as a heap closure.
1677 p = scavenge_small_bitmap(p, size, bitmap);
1681 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1689 evacuate((StgClosure **)p);
1692 size = BCO_BITMAP_SIZE(bco);
1693 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1698 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1703 size = GET_LARGE_BITMAP(&info->i)->size;
1705 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1707 // and don't forget to follow the SRT
1711 // Dynamic bitmap: the mask is stored on the stack, and
1712 // there are a number of non-pointers followed by a number
1713 // of pointers above the bitmapped area. (see StgMacros.h,
1718 dyn = ((StgRetDyn *)p)->liveness;
1720 // traverse the bitmap first
1721 bitmap = RET_DYN_LIVENESS(dyn);
1722 p = (P_)&((StgRetDyn *)p)->payload[0];
1723 size = RET_DYN_BITMAP_SIZE;
1724 p = scavenge_small_bitmap(p, size, bitmap);
1726 // skip over the non-ptr words
1727 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1729 // follow the ptr words
1730 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1731 evacuate((StgClosure **)p);
1739 StgRetFun *ret_fun = (StgRetFun *)p;
1740 StgFunInfoTable *fun_info;
1742 evacuate(&ret_fun->fun);
1743 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1744 p = scavenge_arg_block(fun_info, ret_fun->payload);
1749 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1754 /*-----------------------------------------------------------------------------
1755 scavenge the large object list.
1757 evac_gen set by caller; similar games played with evac_gen as with
1758 scavenge() - see comment at the top of scavenge(). Most large
1759 objects are (repeatedly) mutable, so most of the time evac_gen will
1761 --------------------------------------------------------------------------- */
1764 scavenge_large (gen_workspace *ws)
1769 gct->evac_gen = ws->gen;
1771 bd = ws->todo_large_objects;
1773 for (; bd != NULL; bd = ws->todo_large_objects) {
1775 // take this object *off* the large objects list and put it on
1776 // the scavenged large objects list. This is so that we can
1777 // treat new_large_objects as a stack and push new objects on
1778 // the front when evacuating.
1779 ws->todo_large_objects = bd->link;
1781 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1782 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1783 ws->gen->n_scavenged_large_blocks += bd->blocks;
1784 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1787 if (scavenge_one(p)) {
1788 if (ws->gen->no > 0) {
1789 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1794 gct->scanned += closure_sizeW((StgClosure*)p);
1798 /* ----------------------------------------------------------------------------
1799 Look for work to do.
1801 We look for the oldest gen that has either a todo block that can
1802 be scanned, or a block of work on the global queue that we can
1805 It is important to take work from the *oldest* generation that we
1806 has work available, because that minimizes the likelihood of
1807 evacuating objects into a young generation when they should have
1808 been eagerly promoted. This really does make a difference (the
1809 cacheprof benchmark is one that is affected).
1811 We also want to scan the todo block if possible before grabbing
1812 work from the global queue, the reason being that we don't want to
1813 steal work from the global queue and starve other threads if there
1814 is other work we can usefully be doing.
1815 ------------------------------------------------------------------------- */
1818 scavenge_find_work (void)
1822 rtsBool did_something, did_anything;
1825 gct->scav_find_work++;
1827 did_anything = rtsFalse;
1830 did_something = rtsFalse;
1831 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1834 gct->scan_bd = NULL;
1836 // If we have a scan block with some work to do,
1837 // scavenge everything up to the free pointer.
1838 if (ws->todo_bd->u.scan < ws->todo_free)
1840 scavenge_block(ws->todo_bd);
1841 did_something = rtsTrue;
1845 // If we have any large objects to scavenge, do them now.
1846 if (ws->todo_large_objects) {
1848 did_something = rtsTrue;
1852 if ((bd = grab_local_todo_block(ws)) != NULL) {
1854 did_something = rtsTrue;
1859 if (did_something) {
1860 did_anything = rtsTrue;
1864 #if defined(THREADED_RTS)
1865 if (work_stealing) {
1866 // look for work to steal
1867 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1868 if ((bd = steal_todo_block(g)) != NULL) {
1870 did_something = rtsTrue;
1875 if (did_something) {
1876 did_anything = rtsTrue;
1882 // only return when there is no more work to do
1884 return did_anything;
1887 /* ----------------------------------------------------------------------------
1888 Scavenge until we can't find anything more to scavenge.
1889 ------------------------------------------------------------------------- */
1897 work_to_do = rtsFalse;
1899 // scavenge static objects
1900 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1901 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1905 // scavenge objects in compacted generation
1906 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1907 scavenge_mark_stack();
1908 work_to_do = rtsTrue;
1911 // Order is important here: we want to deal in full blocks as
1912 // much as possible, so go for global work in preference to
1913 // local work. Only if all the global work has been exhausted
1914 // do we start scavenging the fragments of blocks in the local
1916 if (scavenge_find_work()) goto loop;
1918 if (work_to_do) goto loop;