1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
40 # define scavenge_loop(a) scavenge_loop1(a)
41 # define scavenge_block(a) scavenge_block1(a)
42 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
43 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
46 /* -----------------------------------------------------------------------------
48 -------------------------------------------------------------------------- */
51 scavenge_TSO_link (StgTSO *tso)
53 // We don't always chase the link field: TSOs on the blackhole
54 // queue are not automatically alive, so the link field is a
55 // "weak" pointer in that case.
56 if (tso->why_blocked != BlockedOnBlackHole) {
57 evacuate((StgClosure **)&tso->_link);
62 scavengeTSO (StgTSO *tso)
66 if (tso->what_next == ThreadRelocated) {
67 // the only way this can happen is if the old TSO was on the
68 // mutable list. We might have other links to this defunct
69 // TSO, so we must update its link field.
70 evacuate((StgClosure**)&tso->_link);
74 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
76 // update the pointer from the Task.
77 if (tso->bound != NULL) {
78 tso->bound->tso = tso;
81 saved_eager = gct->eager_promotion;
82 gct->eager_promotion = rtsFalse;
84 if ( tso->why_blocked == BlockedOnMVar
85 || tso->why_blocked == BlockedOnBlackHole
86 || tso->why_blocked == BlockedOnException
88 evacuate(&tso->block_info.closure);
90 evacuate((StgClosure **)&tso->blocked_exceptions);
92 // scavange current transaction record
93 evacuate((StgClosure **)&tso->trec);
95 // scavenge this thread's stack
96 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
98 if (gct->failed_to_evac) {
100 scavenge_TSO_link(tso);
103 scavenge_TSO_link(tso);
104 if (gct->failed_to_evac) {
105 tso->flags |= TSO_LINK_DIRTY;
107 tso->flags &= ~TSO_LINK_DIRTY;
111 gct->eager_promotion = saved_eager;
114 /* -----------------------------------------------------------------------------
115 Blocks of function args occur on the stack (at the top) and
117 -------------------------------------------------------------------------- */
120 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
127 switch (fun_info->f.fun_type) {
129 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
130 size = BITMAP_SIZE(fun_info->f.b.bitmap);
133 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
134 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
138 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
139 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
142 if ((bitmap & 1) == 0) {
143 evacuate((StgClosure **)p);
146 bitmap = bitmap >> 1;
154 STATIC_INLINE GNUC_ATTR_HOT StgPtr
155 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
159 StgFunInfoTable *fun_info;
161 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
162 ASSERT(fun_info->i.type != PAP);
165 switch (fun_info->f.fun_type) {
167 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
170 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
174 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
178 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
181 if ((bitmap & 1) == 0) {
182 evacuate((StgClosure **)p);
185 bitmap = bitmap >> 1;
193 STATIC_INLINE GNUC_ATTR_HOT StgPtr
194 scavenge_PAP (StgPAP *pap)
197 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
201 scavenge_AP (StgAP *ap)
204 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
207 /* -----------------------------------------------------------------------------
209 -------------------------------------------------------------------------- */
211 /* Similar to scavenge_large_bitmap(), but we don't write back the
212 * pointers we get back from evacuate().
215 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
222 bitmap = large_srt->l.bitmap[b];
223 size = (nat)large_srt->l.size;
224 p = (StgClosure **)large_srt->srt;
225 for (i = 0; i < size; ) {
226 if ((bitmap & 1) != 0) {
231 if (i % BITS_IN(W_) == 0) {
233 bitmap = large_srt->l.bitmap[b];
235 bitmap = bitmap >> 1;
240 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
241 * srt field in the info table. That's ok, because we'll
242 * never dereference it.
244 STATIC_INLINE GNUC_ATTR_HOT void
245 scavenge_srt (StgClosure **srt, nat srt_bitmap)
253 if (bitmap == (StgHalfWord)(-1)) {
254 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
258 while (bitmap != 0) {
259 if ((bitmap & 1) != 0) {
260 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
261 // Special-case to handle references to closures hiding out in DLLs, since
262 // double indirections required to get at those. The code generator knows
263 // which is which when generating the SRT, so it stores the (indirect)
264 // reference to the DLL closure in the table by first adding one to it.
265 // We check for this here, and undo the addition before evacuating it.
267 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
268 // closure that's fixed at link-time, and no extra magic is required.
269 if ( (unsigned long)(*srt) & 0x1 ) {
270 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
279 bitmap = bitmap >> 1;
284 STATIC_INLINE GNUC_ATTR_HOT void
285 scavenge_thunk_srt(const StgInfoTable *info)
287 StgThunkInfoTable *thunk_info;
289 if (!major_gc) return;
291 thunk_info = itbl_to_thunk_itbl(info);
292 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
295 STATIC_INLINE GNUC_ATTR_HOT void
296 scavenge_fun_srt(const StgInfoTable *info)
298 StgFunInfoTable *fun_info;
300 if (!major_gc) return;
302 fun_info = itbl_to_fun_itbl(info);
303 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
306 /* -----------------------------------------------------------------------------
307 Scavenge a block from the given scan pointer up to bd->free.
309 evac_gen is set by the caller to be either zero (for a step in a
310 generation < N) or G where G is the generation of the step being
313 We sometimes temporarily change evac_gen back to zero if we're
314 scavenging a mutable object where eager promotion isn't such a good
316 -------------------------------------------------------------------------- */
318 static GNUC_ATTR_HOT void
319 scavenge_block (bdescr *bd)
323 generation *saved_evac_gen;
324 rtsBool saved_eager_promotion;
327 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
328 bd->start, bd->gen_no, bd->u.scan);
331 gct->evac_gen = bd->gen;
332 saved_evac_gen = gct->evac_gen;
333 saved_eager_promotion = gct->eager_promotion;
334 gct->failed_to_evac = rtsFalse;
336 ws = &gct->gens[bd->gen->no];
340 // we might be evacuating into the very object that we're
341 // scavenging, so we have to check the real bd->free pointer each
342 // time around the loop.
343 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
345 ASSERT(bd->link == NULL);
346 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
347 info = get_itbl((StgClosure *)p);
349 ASSERT(gct->thunk_selector_depth == 0);
352 switch (info->type) {
357 StgMVar *mvar = ((StgMVar *)p);
358 gct->eager_promotion = rtsFalse;
359 evacuate((StgClosure **)&mvar->head);
360 evacuate((StgClosure **)&mvar->tail);
361 evacuate((StgClosure **)&mvar->value);
362 gct->eager_promotion = saved_eager_promotion;
364 if (gct->failed_to_evac) {
365 mvar->header.info = &stg_MVAR_DIRTY_info;
367 mvar->header.info = &stg_MVAR_CLEAN_info;
369 p += sizeofW(StgMVar);
374 scavenge_fun_srt(info);
375 evacuate(&((StgClosure *)p)->payload[1]);
376 evacuate(&((StgClosure *)p)->payload[0]);
377 p += sizeofW(StgHeader) + 2;
381 scavenge_thunk_srt(info);
382 evacuate(&((StgThunk *)p)->payload[1]);
383 evacuate(&((StgThunk *)p)->payload[0]);
384 p += sizeofW(StgThunk) + 2;
388 evacuate(&((StgClosure *)p)->payload[1]);
389 evacuate(&((StgClosure *)p)->payload[0]);
390 p += sizeofW(StgHeader) + 2;
394 scavenge_thunk_srt(info);
395 evacuate(&((StgThunk *)p)->payload[0]);
396 p += sizeofW(StgThunk) + 1;
400 scavenge_fun_srt(info);
402 evacuate(&((StgClosure *)p)->payload[0]);
403 p += sizeofW(StgHeader) + 1;
407 scavenge_thunk_srt(info);
408 p += sizeofW(StgThunk) + 1;
412 scavenge_fun_srt(info);
414 p += sizeofW(StgHeader) + 1;
418 scavenge_thunk_srt(info);
419 p += sizeofW(StgThunk) + 2;
423 scavenge_fun_srt(info);
425 p += sizeofW(StgHeader) + 2;
429 scavenge_thunk_srt(info);
430 evacuate(&((StgThunk *)p)->payload[0]);
431 p += sizeofW(StgThunk) + 2;
435 scavenge_fun_srt(info);
437 evacuate(&((StgClosure *)p)->payload[0]);
438 p += sizeofW(StgHeader) + 2;
442 scavenge_fun_srt(info);
449 scavenge_thunk_srt(info);
450 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
451 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
452 evacuate((StgClosure **)p);
454 p += info->layout.payload.nptrs;
465 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
466 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
467 evacuate((StgClosure **)p);
469 p += info->layout.payload.nptrs;
474 StgBCO *bco = (StgBCO *)p;
475 evacuate((StgClosure **)&bco->instrs);
476 evacuate((StgClosure **)&bco->literals);
477 evacuate((StgClosure **)&bco->ptrs);
483 if (bd->gen_no != 0) {
486 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
487 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
488 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
491 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
493 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
495 // We pretend that p has just been created.
496 LDV_RECORD_CREATE((StgClosure *)p);
499 case IND_OLDGEN_PERM:
500 evacuate(&((StgInd *)p)->indirectee);
501 p += sizeofW(StgInd);
506 gct->eager_promotion = rtsFalse;
507 evacuate(&((StgMutVar *)p)->var);
508 gct->eager_promotion = saved_eager_promotion;
510 if (gct->failed_to_evac) {
511 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
513 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
515 p += sizeofW(StgMutVar);
520 p += BLACKHOLE_sizeW();
525 StgSelector *s = (StgSelector *)p;
526 evacuate(&s->selectee);
527 p += THUNK_SELECTOR_sizeW();
531 // A chunk of stack saved in a heap object
534 StgAP_STACK *ap = (StgAP_STACK *)p;
537 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
538 p = (StgPtr)ap->payload + ap->size;
543 p = scavenge_PAP((StgPAP *)p);
547 p = scavenge_AP((StgAP *)p);
552 p += arr_words_sizeW((StgArrWords *)p);
555 case MUT_ARR_PTRS_CLEAN:
556 case MUT_ARR_PTRS_DIRTY:
561 // We don't eagerly promote objects pointed to by a mutable
562 // array, but if we find the array only points to objects in
563 // the same or an older generation, we mark it "clean" and
564 // avoid traversing it during minor GCs.
565 gct->eager_promotion = rtsFalse;
566 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
567 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
568 evacuate((StgClosure **)p);
570 gct->eager_promotion = saved_eager_promotion;
572 if (gct->failed_to_evac) {
573 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
575 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
578 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
582 case MUT_ARR_PTRS_FROZEN:
583 case MUT_ARR_PTRS_FROZEN0:
588 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
589 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
590 evacuate((StgClosure **)p);
593 // If we're going to put this object on the mutable list, then
594 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
595 if (gct->failed_to_evac) {
596 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
598 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
605 StgTSO *tso = (StgTSO *)p;
611 case TVAR_WATCH_QUEUE:
613 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
615 evacuate((StgClosure **)&wq->closure);
616 evacuate((StgClosure **)&wq->next_queue_entry);
617 evacuate((StgClosure **)&wq->prev_queue_entry);
618 gct->evac_gen = saved_evac_gen;
619 gct->failed_to_evac = rtsTrue; // mutable
620 p += sizeofW(StgTVarWatchQueue);
626 StgTVar *tvar = ((StgTVar *) p);
628 evacuate((StgClosure **)&tvar->current_value);
629 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
630 gct->evac_gen = saved_evac_gen;
631 gct->failed_to_evac = rtsTrue; // mutable
632 p += sizeofW(StgTVar);
638 StgTRecHeader *trec = ((StgTRecHeader *) p);
640 evacuate((StgClosure **)&trec->enclosing_trec);
641 evacuate((StgClosure **)&trec->current_chunk);
642 evacuate((StgClosure **)&trec->invariants_to_check);
643 gct->evac_gen = saved_evac_gen;
644 gct->failed_to_evac = rtsTrue; // mutable
645 p += sizeofW(StgTRecHeader);
652 StgTRecChunk *tc = ((StgTRecChunk *) p);
653 TRecEntry *e = &(tc -> entries[0]);
655 evacuate((StgClosure **)&tc->prev_chunk);
656 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
657 evacuate((StgClosure **)&e->tvar);
658 evacuate((StgClosure **)&e->expected_value);
659 evacuate((StgClosure **)&e->new_value);
661 gct->evac_gen = saved_evac_gen;
662 gct->failed_to_evac = rtsTrue; // mutable
663 p += sizeofW(StgTRecChunk);
667 case ATOMIC_INVARIANT:
669 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
671 evacuate(&invariant->code);
672 evacuate((StgClosure **)&invariant->last_execution);
673 gct->evac_gen = saved_evac_gen;
674 gct->failed_to_evac = rtsTrue; // mutable
675 p += sizeofW(StgAtomicInvariant);
679 case INVARIANT_CHECK_QUEUE:
681 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
683 evacuate((StgClosure **)&queue->invariant);
684 evacuate((StgClosure **)&queue->my_execution);
685 evacuate((StgClosure **)&queue->next_queue_entry);
686 gct->evac_gen = saved_evac_gen;
687 gct->failed_to_evac = rtsTrue; // mutable
688 p += sizeofW(StgInvariantCheckQueue);
693 barf("scavenge: unimplemented/strange closure type %d @ %p",
698 * We need to record the current object on the mutable list if
699 * (a) It is actually mutable, or
700 * (b) It contains pointers to a younger generation.
701 * Case (b) arises if we didn't manage to promote everything that
702 * the current object points to into the current generation.
704 if (gct->failed_to_evac) {
705 gct->failed_to_evac = rtsFalse;
706 if (bd->gen_no > 0) {
707 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
713 gct->copied += ws->todo_free - bd->free;
717 debugTrace(DEBUG_gc, " scavenged %ld bytes",
718 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
720 // update stats: this is a block that has been scavenged
721 gct->scanned += bd->free - bd->u.scan;
722 bd->u.scan = bd->free;
724 if (bd != ws->todo_bd) {
725 // we're not going to evac any more objects into
726 // this block, so push it now.
727 push_scanned_block(bd, ws);
732 /* -----------------------------------------------------------------------------
733 Scavenge everything on the mark stack.
735 This is slightly different from scavenge():
736 - we don't walk linearly through the objects, so the scavenger
737 doesn't need to advance the pointer on to the next object.
738 -------------------------------------------------------------------------- */
741 scavenge_mark_stack(void)
745 generation *saved_evac_gen;
747 gct->evac_gen = oldest_gen;
748 saved_evac_gen = gct->evac_gen;
750 while ((p = pop_mark_stack())) {
752 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
753 info = get_itbl((StgClosure *)p);
756 switch (info->type) {
761 rtsBool saved_eager_promotion = gct->eager_promotion;
763 StgMVar *mvar = ((StgMVar *)p);
764 gct->eager_promotion = rtsFalse;
765 evacuate((StgClosure **)&mvar->head);
766 evacuate((StgClosure **)&mvar->tail);
767 evacuate((StgClosure **)&mvar->value);
768 gct->eager_promotion = saved_eager_promotion;
770 if (gct->failed_to_evac) {
771 mvar->header.info = &stg_MVAR_DIRTY_info;
773 mvar->header.info = &stg_MVAR_CLEAN_info;
779 scavenge_fun_srt(info);
780 evacuate(&((StgClosure *)p)->payload[1]);
781 evacuate(&((StgClosure *)p)->payload[0]);
785 scavenge_thunk_srt(info);
786 evacuate(&((StgThunk *)p)->payload[1]);
787 evacuate(&((StgThunk *)p)->payload[0]);
791 evacuate(&((StgClosure *)p)->payload[1]);
792 evacuate(&((StgClosure *)p)->payload[0]);
797 scavenge_fun_srt(info);
798 evacuate(&((StgClosure *)p)->payload[0]);
803 scavenge_thunk_srt(info);
804 evacuate(&((StgThunk *)p)->payload[0]);
809 evacuate(&((StgClosure *)p)->payload[0]);
814 scavenge_fun_srt(info);
819 scavenge_thunk_srt(info);
827 scavenge_fun_srt(info);
834 scavenge_thunk_srt(info);
835 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
836 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
837 evacuate((StgClosure **)p);
849 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
850 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
851 evacuate((StgClosure **)p);
857 StgBCO *bco = (StgBCO *)p;
858 evacuate((StgClosure **)&bco->instrs);
859 evacuate((StgClosure **)&bco->literals);
860 evacuate((StgClosure **)&bco->ptrs);
865 // don't need to do anything here: the only possible case
866 // is that we're in a 1-space compacting collector, with
867 // no "old" generation.
871 case IND_OLDGEN_PERM:
872 evacuate(&((StgInd *)p)->indirectee);
876 case MUT_VAR_DIRTY: {
877 rtsBool saved_eager_promotion = gct->eager_promotion;
879 gct->eager_promotion = rtsFalse;
880 evacuate(&((StgMutVar *)p)->var);
881 gct->eager_promotion = saved_eager_promotion;
883 if (gct->failed_to_evac) {
884 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
886 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
898 StgSelector *s = (StgSelector *)p;
899 evacuate(&s->selectee);
903 // A chunk of stack saved in a heap object
906 StgAP_STACK *ap = (StgAP_STACK *)p;
909 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
914 scavenge_PAP((StgPAP *)p);
918 scavenge_AP((StgAP *)p);
921 case MUT_ARR_PTRS_CLEAN:
922 case MUT_ARR_PTRS_DIRTY:
928 // We don't eagerly promote objects pointed to by a mutable
929 // array, but if we find the array only points to objects in
930 // the same or an older generation, we mark it "clean" and
931 // avoid traversing it during minor GCs.
932 saved_eager = gct->eager_promotion;
933 gct->eager_promotion = rtsFalse;
934 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
935 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
936 evacuate((StgClosure **)p);
938 gct->eager_promotion = saved_eager;
940 if (gct->failed_to_evac) {
941 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
943 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
946 gct->failed_to_evac = rtsTrue; // mutable anyhow.
950 case MUT_ARR_PTRS_FROZEN:
951 case MUT_ARR_PTRS_FROZEN0:
956 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
957 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
958 evacuate((StgClosure **)p);
961 // If we're going to put this object on the mutable list, then
962 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
963 if (gct->failed_to_evac) {
964 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
966 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
973 scavengeTSO((StgTSO*)p);
977 case TVAR_WATCH_QUEUE:
979 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
981 evacuate((StgClosure **)&wq->closure);
982 evacuate((StgClosure **)&wq->next_queue_entry);
983 evacuate((StgClosure **)&wq->prev_queue_entry);
984 gct->evac_gen = saved_evac_gen;
985 gct->failed_to_evac = rtsTrue; // mutable
991 StgTVar *tvar = ((StgTVar *) p);
993 evacuate((StgClosure **)&tvar->current_value);
994 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
995 gct->evac_gen = saved_evac_gen;
996 gct->failed_to_evac = rtsTrue; // mutable
1003 StgTRecChunk *tc = ((StgTRecChunk *) p);
1004 TRecEntry *e = &(tc -> entries[0]);
1006 evacuate((StgClosure **)&tc->prev_chunk);
1007 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1008 evacuate((StgClosure **)&e->tvar);
1009 evacuate((StgClosure **)&e->expected_value);
1010 evacuate((StgClosure **)&e->new_value);
1012 gct->evac_gen = saved_evac_gen;
1013 gct->failed_to_evac = rtsTrue; // mutable
1019 StgTRecHeader *trec = ((StgTRecHeader *) p);
1021 evacuate((StgClosure **)&trec->enclosing_trec);
1022 evacuate((StgClosure **)&trec->current_chunk);
1023 evacuate((StgClosure **)&trec->invariants_to_check);
1024 gct->evac_gen = saved_evac_gen;
1025 gct->failed_to_evac = rtsTrue; // mutable
1029 case ATOMIC_INVARIANT:
1031 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1033 evacuate(&invariant->code);
1034 evacuate((StgClosure **)&invariant->last_execution);
1035 gct->evac_gen = saved_evac_gen;
1036 gct->failed_to_evac = rtsTrue; // mutable
1040 case INVARIANT_CHECK_QUEUE:
1042 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1044 evacuate((StgClosure **)&queue->invariant);
1045 evacuate((StgClosure **)&queue->my_execution);
1046 evacuate((StgClosure **)&queue->next_queue_entry);
1047 gct->evac_gen = saved_evac_gen;
1048 gct->failed_to_evac = rtsTrue; // mutable
1053 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1057 if (gct->failed_to_evac) {
1058 gct->failed_to_evac = rtsFalse;
1059 if (gct->evac_gen) {
1060 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1063 } // while (p = pop_mark_stack())
1066 /* -----------------------------------------------------------------------------
1067 Scavenge one object.
1069 This is used for objects that are temporarily marked as mutable
1070 because they contain old-to-new generation pointers. Only certain
1071 objects can have this property.
1072 -------------------------------------------------------------------------- */
1075 scavenge_one(StgPtr p)
1077 const StgInfoTable *info;
1078 generation *saved_evac_gen = gct->evac_gen;
1081 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1082 info = get_itbl((StgClosure *)p);
1084 switch (info->type) {
1089 rtsBool saved_eager_promotion = gct->eager_promotion;
1091 StgMVar *mvar = ((StgMVar *)p);
1092 gct->eager_promotion = rtsFalse;
1093 evacuate((StgClosure **)&mvar->head);
1094 evacuate((StgClosure **)&mvar->tail);
1095 evacuate((StgClosure **)&mvar->value);
1096 gct->eager_promotion = saved_eager_promotion;
1098 if (gct->failed_to_evac) {
1099 mvar->header.info = &stg_MVAR_DIRTY_info;
1101 mvar->header.info = &stg_MVAR_CLEAN_info;
1115 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1116 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1117 evacuate((StgClosure **)q);
1123 case FUN_1_0: // hardly worth specialising these guys
1139 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1140 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1141 evacuate((StgClosure **)q);
1147 case MUT_VAR_DIRTY: {
1149 rtsBool saved_eager_promotion = gct->eager_promotion;
1151 gct->eager_promotion = rtsFalse;
1152 evacuate(&((StgMutVar *)p)->var);
1153 gct->eager_promotion = saved_eager_promotion;
1155 if (gct->failed_to_evac) {
1156 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1158 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1167 case THUNK_SELECTOR:
1169 StgSelector *s = (StgSelector *)p;
1170 evacuate(&s->selectee);
1176 StgAP_STACK *ap = (StgAP_STACK *)p;
1179 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1180 p = (StgPtr)ap->payload + ap->size;
1185 p = scavenge_PAP((StgPAP *)p);
1189 p = scavenge_AP((StgAP *)p);
1193 // nothing to follow
1196 case MUT_ARR_PTRS_CLEAN:
1197 case MUT_ARR_PTRS_DIRTY:
1200 rtsBool saved_eager;
1202 // We don't eagerly promote objects pointed to by a mutable
1203 // array, but if we find the array only points to objects in
1204 // the same or an older generation, we mark it "clean" and
1205 // avoid traversing it during minor GCs.
1206 saved_eager = gct->eager_promotion;
1207 gct->eager_promotion = rtsFalse;
1209 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1210 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1211 evacuate((StgClosure **)p);
1213 gct->eager_promotion = saved_eager;
1215 if (gct->failed_to_evac) {
1216 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1218 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1221 gct->failed_to_evac = rtsTrue;
1225 case MUT_ARR_PTRS_FROZEN:
1226 case MUT_ARR_PTRS_FROZEN0:
1228 // follow everything
1231 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1232 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1233 evacuate((StgClosure **)p);
1236 // If we're going to put this object on the mutable list, then
1237 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1238 if (gct->failed_to_evac) {
1239 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1241 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1248 scavengeTSO((StgTSO*)p);
1252 case TVAR_WATCH_QUEUE:
1254 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1256 evacuate((StgClosure **)&wq->closure);
1257 evacuate((StgClosure **)&wq->next_queue_entry);
1258 evacuate((StgClosure **)&wq->prev_queue_entry);
1259 gct->evac_gen = saved_evac_gen;
1260 gct->failed_to_evac = rtsTrue; // mutable
1266 StgTVar *tvar = ((StgTVar *) p);
1268 evacuate((StgClosure **)&tvar->current_value);
1269 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1270 gct->evac_gen = saved_evac_gen;
1271 gct->failed_to_evac = rtsTrue; // mutable
1277 StgTRecHeader *trec = ((StgTRecHeader *) p);
1279 evacuate((StgClosure **)&trec->enclosing_trec);
1280 evacuate((StgClosure **)&trec->current_chunk);
1281 evacuate((StgClosure **)&trec->invariants_to_check);
1282 gct->evac_gen = saved_evac_gen;
1283 gct->failed_to_evac = rtsTrue; // mutable
1290 StgTRecChunk *tc = ((StgTRecChunk *) p);
1291 TRecEntry *e = &(tc -> entries[0]);
1293 evacuate((StgClosure **)&tc->prev_chunk);
1294 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1295 evacuate((StgClosure **)&e->tvar);
1296 evacuate((StgClosure **)&e->expected_value);
1297 evacuate((StgClosure **)&e->new_value);
1299 gct->evac_gen = saved_evac_gen;
1300 gct->failed_to_evac = rtsTrue; // mutable
1304 case ATOMIC_INVARIANT:
1306 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1308 evacuate(&invariant->code);
1309 evacuate((StgClosure **)&invariant->last_execution);
1310 gct->evac_gen = saved_evac_gen;
1311 gct->failed_to_evac = rtsTrue; // mutable
1315 case INVARIANT_CHECK_QUEUE:
1317 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1319 evacuate((StgClosure **)&queue->invariant);
1320 evacuate((StgClosure **)&queue->my_execution);
1321 evacuate((StgClosure **)&queue->next_queue_entry);
1322 gct->evac_gen = saved_evac_gen;
1323 gct->failed_to_evac = rtsTrue; // mutable
1328 // IND can happen, for example, when the interpreter allocates
1329 // a gigantic AP closure (more than one block), which ends up
1330 // on the large-object list and then gets updated. See #3424.
1332 case IND_OLDGEN_PERM:
1334 evacuate(&((StgInd *)p)->indirectee);
1336 #if 0 && defined(DEBUG)
1337 if (RtsFlags.DebugFlags.gc)
1338 /* Debugging code to print out the size of the thing we just
1342 StgPtr start = gen->scan;
1343 bdescr *start_bd = gen->scan_bd;
1346 if (start_bd != gen->scan_bd) {
1347 size += (P_)BLOCK_ROUND_UP(start) - start;
1348 start_bd = start_bd->link;
1349 while (start_bd != gen->scan_bd) {
1350 size += BLOCK_SIZE_W;
1351 start_bd = start_bd->link;
1354 (P_)BLOCK_ROUND_DOWN(gen->scan);
1356 size = gen->scan - start;
1358 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1364 barf("scavenge_one: strange object %d", (int)(info->type));
1367 no_luck = gct->failed_to_evac;
1368 gct->failed_to_evac = rtsFalse;
1372 /* -----------------------------------------------------------------------------
1373 Scavenging mutable lists.
1375 We treat the mutable list of each generation > N (i.e. all the
1376 generations older than the one being collected) as roots. We also
1377 remove non-mutable objects from the mutable list at this point.
1378 -------------------------------------------------------------------------- */
1381 scavenge_mutable_list(bdescr *bd, generation *gen)
1385 gct->evac_gen = gen;
1386 for (; bd != NULL; bd = bd->link) {
1387 for (q = bd->start; q < bd->free; q++) {
1389 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1392 switch (get_itbl((StgClosure *)p)->type) {
1394 barf("MUT_VAR_CLEAN on mutable list");
1396 mutlist_MUTVARS++; break;
1397 case MUT_ARR_PTRS_CLEAN:
1398 case MUT_ARR_PTRS_DIRTY:
1399 case MUT_ARR_PTRS_FROZEN:
1400 case MUT_ARR_PTRS_FROZEN0:
1401 mutlist_MUTARRS++; break;
1403 barf("MVAR_CLEAN on mutable list");
1405 mutlist_MVARS++; break;
1407 mutlist_OTHERS++; break;
1411 // Check whether this object is "clean", that is it
1412 // definitely doesn't point into a young generation.
1413 // Clean objects don't need to be scavenged. Some clean
1414 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1415 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1416 // are always on the mutable list.
1418 switch (get_itbl((StgClosure *)p)->type) {
1419 case MUT_ARR_PTRS_CLEAN:
1420 recordMutableGen_GC((StgClosure *)p,gen->no);
1423 StgTSO *tso = (StgTSO *)p;
1424 if (tso->dirty == 0) {
1425 // Must be on the mutable list because its link
1427 ASSERT(tso->flags & TSO_LINK_DIRTY);
1429 scavenge_TSO_link(tso);
1430 if (gct->failed_to_evac) {
1431 recordMutableGen_GC((StgClosure *)p,gen->no);
1432 gct->failed_to_evac = rtsFalse;
1434 tso->flags &= ~TSO_LINK_DIRTY;
1443 if (scavenge_one(p)) {
1444 // didn't manage to promote everything, so put the
1445 // object back on the list.
1446 recordMutableGen_GC((StgClosure *)p,gen->no);
1453 scavenge_capability_mut_lists (Capability *cap)
1457 /* Mutable lists from each generation > N
1458 * we want to *scavenge* these roots, not evacuate them: they're not
1459 * going to move in this GC.
1460 * Also do them in reverse generation order, for the usual reason:
1461 * namely to reduce the likelihood of spurious old->new pointers.
1463 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1464 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1465 freeChain_sync(cap->saved_mut_lists[g]);
1466 cap->saved_mut_lists[g] = NULL;
1470 /* -----------------------------------------------------------------------------
1471 Scavenging the static objects.
1473 We treat the mutable list of each generation > N (i.e. all the
1474 generations older than the one being collected) as roots. We also
1475 remove non-mutable objects from the mutable list at this point.
1476 -------------------------------------------------------------------------- */
1479 scavenge_static(void)
1482 const StgInfoTable *info;
1484 debugTrace(DEBUG_gc, "scavenging static objects");
1486 /* Always evacuate straight to the oldest generation for static
1488 gct->evac_gen = oldest_gen;
1490 /* keep going until we've scavenged all the objects on the linked
1495 /* get the next static object from the list. Remember, there might
1496 * be more stuff on this list after each evacuation...
1497 * (static_objects is a global)
1499 p = gct->static_objects;
1500 if (p == END_OF_STATIC_LIST) {
1504 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1507 if (info->type==RBH)
1508 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1510 // make sure the info pointer is into text space
1512 /* Take this object *off* the static_objects list,
1513 * and put it on the scavenged_static_objects list.
1515 gct->static_objects = *STATIC_LINK(info,p);
1516 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1517 gct->scavenged_static_objects = p;
1519 switch (info -> type) {
1523 StgInd *ind = (StgInd *)p;
1524 evacuate(&ind->indirectee);
1526 /* might fail to evacuate it, in which case we have to pop it
1527 * back on the mutable list of the oldest generation. We
1528 * leave it *on* the scavenged_static_objects list, though,
1529 * in case we visit this object again.
1531 if (gct->failed_to_evac) {
1532 gct->failed_to_evac = rtsFalse;
1533 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1539 scavenge_thunk_srt(info);
1543 scavenge_fun_srt(info);
1550 next = (P_)p->payload + info->layout.payload.ptrs;
1551 // evacuate the pointers
1552 for (q = (P_)p->payload; q < next; q++) {
1553 evacuate((StgClosure **)q);
1559 barf("scavenge_static: strange closure %d", (int)(info->type));
1562 ASSERT(gct->failed_to_evac == rtsFalse);
1566 /* -----------------------------------------------------------------------------
1567 scavenge a chunk of memory described by a bitmap
1568 -------------------------------------------------------------------------- */
1571 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1577 bitmap = large_bitmap->bitmap[b];
1578 for (i = 0; i < size; ) {
1579 if ((bitmap & 1) == 0) {
1580 evacuate((StgClosure **)p);
1584 if (i % BITS_IN(W_) == 0) {
1586 bitmap = large_bitmap->bitmap[b];
1588 bitmap = bitmap >> 1;
1593 STATIC_INLINE StgPtr
1594 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1597 if ((bitmap & 1) == 0) {
1598 evacuate((StgClosure **)p);
1601 bitmap = bitmap >> 1;
1607 /* -----------------------------------------------------------------------------
1608 scavenge_stack walks over a section of stack and evacuates all the
1609 objects pointed to by it. We can use the same code for walking
1610 AP_STACK_UPDs, since these are just sections of copied stack.
1611 -------------------------------------------------------------------------- */
1614 scavenge_stack(StgPtr p, StgPtr stack_end)
1616 const StgRetInfoTable* info;
1621 * Each time around this loop, we are looking at a chunk of stack
1622 * that starts with an activation record.
1625 while (p < stack_end) {
1626 info = get_ret_itbl((StgClosure *)p);
1628 switch (info->i.type) {
1631 // In SMP, we can get update frames that point to indirections
1632 // when two threads evaluate the same thunk. We do attempt to
1633 // discover this situation in threadPaused(), but it's
1634 // possible that the following sequence occurs:
1643 // Now T is an indirection, and the update frame is already
1644 // marked on A's stack, so we won't traverse it again in
1645 // threadPaused(). We could traverse the whole stack again
1646 // before GC, but that seems like overkill.
1648 // Scavenging this update frame as normal would be disastrous;
1649 // the updatee would end up pointing to the value. So we turn
1650 // the indirection into an IND_PERM, so that evacuate will
1651 // copy the indirection into the old generation instead of
1654 // Note [upd-black-hole]
1655 // One slight hiccup is that the THUNK_SELECTOR machinery can
1656 // overwrite the updatee with an IND. In parallel GC, this
1657 // could even be happening concurrently, so we can't check for
1658 // the IND. Fortunately if we assume that blackholing is
1659 // happening (either lazy or eager), then we can be sure that
1660 // the updatee is never a THUNK_SELECTOR and we're ok.
1661 // NB. this is a new invariant: blackholing is not optional.
1664 const StgInfoTable *i;
1665 StgClosure *updatee;
1667 updatee = ((StgUpdateFrame *)p)->updatee;
1668 i = updatee->header.info;
1669 if (!IS_FORWARDING_PTR(i)) {
1670 type = get_itbl(updatee)->type;
1672 updatee->header.info = &stg_IND_PERM_info;
1673 } else if (type == IND_OLDGEN) {
1674 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1677 evacuate(&((StgUpdateFrame *)p)->updatee);
1678 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1679 p += sizeofW(StgUpdateFrame);
1683 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1684 case CATCH_STM_FRAME:
1685 case CATCH_RETRY_FRAME:
1686 case ATOMICALLY_FRAME:
1690 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1691 size = BITMAP_SIZE(info->i.layout.bitmap);
1692 // NOTE: the payload starts immediately after the info-ptr, we
1693 // don't have an StgHeader in the same sense as a heap closure.
1695 p = scavenge_small_bitmap(p, size, bitmap);
1699 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1707 evacuate((StgClosure **)p);
1710 size = BCO_BITMAP_SIZE(bco);
1711 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1716 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1721 size = GET_LARGE_BITMAP(&info->i)->size;
1723 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1725 // and don't forget to follow the SRT
1729 // Dynamic bitmap: the mask is stored on the stack, and
1730 // there are a number of non-pointers followed by a number
1731 // of pointers above the bitmapped area. (see StgMacros.h,
1736 dyn = ((StgRetDyn *)p)->liveness;
1738 // traverse the bitmap first
1739 bitmap = RET_DYN_LIVENESS(dyn);
1740 p = (P_)&((StgRetDyn *)p)->payload[0];
1741 size = RET_DYN_BITMAP_SIZE;
1742 p = scavenge_small_bitmap(p, size, bitmap);
1744 // skip over the non-ptr words
1745 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1747 // follow the ptr words
1748 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1749 evacuate((StgClosure **)p);
1757 StgRetFun *ret_fun = (StgRetFun *)p;
1758 StgFunInfoTable *fun_info;
1760 evacuate(&ret_fun->fun);
1761 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1762 p = scavenge_arg_block(fun_info, ret_fun->payload);
1767 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1772 /*-----------------------------------------------------------------------------
1773 scavenge the large object list.
1775 evac_gen set by caller; similar games played with evac_gen as with
1776 scavenge() - see comment at the top of scavenge(). Most large
1777 objects are (repeatedly) mutable, so most of the time evac_gen will
1779 --------------------------------------------------------------------------- */
1782 scavenge_large (gen_workspace *ws)
1787 gct->evac_gen = ws->gen;
1789 bd = ws->todo_large_objects;
1791 for (; bd != NULL; bd = ws->todo_large_objects) {
1793 // take this object *off* the large objects list and put it on
1794 // the scavenged large objects list. This is so that we can
1795 // treat new_large_objects as a stack and push new objects on
1796 // the front when evacuating.
1797 ws->todo_large_objects = bd->link;
1799 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1800 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1801 ws->gen->n_scavenged_large_blocks += bd->blocks;
1802 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1805 if (scavenge_one(p)) {
1806 if (ws->gen->no > 0) {
1807 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1812 gct->scanned += closure_sizeW((StgClosure*)p);
1816 /* ----------------------------------------------------------------------------
1817 Look for work to do.
1819 We look for the oldest gen that has either a todo block that can
1820 be scanned, or a block of work on the global queue that we can
1823 It is important to take work from the *oldest* generation that we
1824 has work available, because that minimizes the likelihood of
1825 evacuating objects into a young generation when they should have
1826 been eagerly promoted. This really does make a difference (the
1827 cacheprof benchmark is one that is affected).
1829 We also want to scan the todo block if possible before grabbing
1830 work from the global queue, the reason being that we don't want to
1831 steal work from the global queue and starve other threads if there
1832 is other work we can usefully be doing.
1833 ------------------------------------------------------------------------- */
1836 scavenge_find_work (void)
1840 rtsBool did_something, did_anything;
1843 gct->scav_find_work++;
1845 did_anything = rtsFalse;
1848 did_something = rtsFalse;
1849 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1852 gct->scan_bd = NULL;
1854 // If we have a scan block with some work to do,
1855 // scavenge everything up to the free pointer.
1856 if (ws->todo_bd->u.scan < ws->todo_free)
1858 scavenge_block(ws->todo_bd);
1859 did_something = rtsTrue;
1863 // If we have any large objects to scavenge, do them now.
1864 if (ws->todo_large_objects) {
1866 did_something = rtsTrue;
1870 if ((bd = grab_local_todo_block(ws)) != NULL) {
1872 did_something = rtsTrue;
1877 if (did_something) {
1878 did_anything = rtsTrue;
1882 #if defined(THREADED_RTS)
1883 if (work_stealing) {
1884 // look for work to steal
1885 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1886 if ((bd = steal_todo_block(g)) != NULL) {
1888 did_something = rtsTrue;
1893 if (did_something) {
1894 did_anything = rtsTrue;
1900 // only return when there is no more work to do
1902 return did_anything;
1905 /* ----------------------------------------------------------------------------
1906 Scavenge until we can't find anything more to scavenge.
1907 ------------------------------------------------------------------------- */
1915 work_to_do = rtsFalse;
1917 // scavenge static objects
1918 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1919 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1923 // scavenge objects in compacted generation
1924 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1925 scavenge_mark_stack();
1926 work_to_do = rtsTrue;
1929 // Order is important here: we want to deal in full blocks as
1930 // much as possible, so go for global work in preference to
1931 // local work. Only if all the global work has been exhausted
1932 // do we start scavenging the fragments of blocks in the local
1934 if (scavenge_find_work()) goto loop;
1936 if (work_to_do) goto loop;