1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
40 # define scavenge_loop(a) scavenge_loop1(a)
41 # define scavenge_block(a) scavenge_block1(a)
42 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
43 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
46 /* -----------------------------------------------------------------------------
48 -------------------------------------------------------------------------- */
51 scavenge_TSO_link (StgTSO *tso)
53 // We don't always chase the link field: TSOs on the blackhole
54 // queue are not automatically alive, so the link field is a
55 // "weak" pointer in that case.
56 if (tso->why_blocked != BlockedOnBlackHole) {
57 evacuate((StgClosure **)&tso->_link);
62 scavengeTSO (StgTSO *tso)
66 if (tso->what_next == ThreadRelocated) {
67 // the only way this can happen is if the old TSO was on the
68 // mutable list. We might have other links to this defunct
69 // TSO, so we must update its link field.
70 evacuate((StgClosure**)&tso->_link);
74 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
76 saved_eager = gct->eager_promotion;
77 gct->eager_promotion = rtsFalse;
79 if ( tso->why_blocked == BlockedOnMVar
80 || tso->why_blocked == BlockedOnBlackHole
81 || tso->why_blocked == BlockedOnException
83 evacuate(&tso->block_info.closure);
85 evacuate((StgClosure **)&tso->blocked_exceptions);
87 // scavange current transaction record
88 evacuate((StgClosure **)&tso->trec);
90 // scavenge this thread's stack
91 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
93 if (gct->failed_to_evac) {
95 scavenge_TSO_link(tso);
98 scavenge_TSO_link(tso);
99 if (gct->failed_to_evac) {
100 tso->flags |= TSO_LINK_DIRTY;
102 tso->flags &= ~TSO_LINK_DIRTY;
106 gct->eager_promotion = saved_eager;
109 /* -----------------------------------------------------------------------------
110 Blocks of function args occur on the stack (at the top) and
112 -------------------------------------------------------------------------- */
115 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
122 switch (fun_info->f.fun_type) {
124 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
125 size = BITMAP_SIZE(fun_info->f.b.bitmap);
128 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
129 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
133 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
134 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
137 if ((bitmap & 1) == 0) {
138 evacuate((StgClosure **)p);
141 bitmap = bitmap >> 1;
149 STATIC_INLINE GNUC_ATTR_HOT StgPtr
150 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
154 StgFunInfoTable *fun_info;
156 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
157 ASSERT(fun_info->i.type != PAP);
160 switch (fun_info->f.fun_type) {
162 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
165 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
169 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
173 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
176 if ((bitmap & 1) == 0) {
177 evacuate((StgClosure **)p);
180 bitmap = bitmap >> 1;
188 STATIC_INLINE GNUC_ATTR_HOT StgPtr
189 scavenge_PAP (StgPAP *pap)
192 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
196 scavenge_AP (StgAP *ap)
199 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
202 /* -----------------------------------------------------------------------------
204 -------------------------------------------------------------------------- */
206 /* Similar to scavenge_large_bitmap(), but we don't write back the
207 * pointers we get back from evacuate().
210 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
217 bitmap = large_srt->l.bitmap[b];
218 size = (nat)large_srt->l.size;
219 p = (StgClosure **)large_srt->srt;
220 for (i = 0; i < size; ) {
221 if ((bitmap & 1) != 0) {
226 if (i % BITS_IN(W_) == 0) {
228 bitmap = large_srt->l.bitmap[b];
230 bitmap = bitmap >> 1;
235 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
236 * srt field in the info table. That's ok, because we'll
237 * never dereference it.
239 STATIC_INLINE GNUC_ATTR_HOT void
240 scavenge_srt (StgClosure **srt, nat srt_bitmap)
248 if (bitmap == (StgHalfWord)(-1)) {
249 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
253 while (bitmap != 0) {
254 if ((bitmap & 1) != 0) {
255 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
256 // Special-case to handle references to closures hiding out in DLLs, since
257 // double indirections required to get at those. The code generator knows
258 // which is which when generating the SRT, so it stores the (indirect)
259 // reference to the DLL closure in the table by first adding one to it.
260 // We check for this here, and undo the addition before evacuating it.
262 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
263 // closure that's fixed at link-time, and no extra magic is required.
264 if ( (unsigned long)(*srt) & 0x1 ) {
265 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
274 bitmap = bitmap >> 1;
279 STATIC_INLINE GNUC_ATTR_HOT void
280 scavenge_thunk_srt(const StgInfoTable *info)
282 StgThunkInfoTable *thunk_info;
284 if (!major_gc) return;
286 thunk_info = itbl_to_thunk_itbl(info);
287 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
290 STATIC_INLINE GNUC_ATTR_HOT void
291 scavenge_fun_srt(const StgInfoTable *info)
293 StgFunInfoTable *fun_info;
295 if (!major_gc) return;
297 fun_info = itbl_to_fun_itbl(info);
298 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
301 /* -----------------------------------------------------------------------------
302 Scavenge a block from the given scan pointer up to bd->free.
304 evac_gen is set by the caller to be either zero (for a step in a
305 generation < N) or G where G is the generation of the step being
308 We sometimes temporarily change evac_gen back to zero if we're
309 scavenging a mutable object where eager promotion isn't such a good
311 -------------------------------------------------------------------------- */
313 static GNUC_ATTR_HOT void
314 scavenge_block (bdescr *bd)
318 generation *saved_evac_gen;
319 rtsBool saved_eager_promotion;
322 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
323 bd->start, bd->gen_no, bd->u.scan);
326 gct->evac_gen = bd->gen;
327 saved_evac_gen = gct->evac_gen;
328 saved_eager_promotion = gct->eager_promotion;
329 gct->failed_to_evac = rtsFalse;
331 ws = &gct->gens[bd->gen->no];
335 // we might be evacuating into the very object that we're
336 // scavenging, so we have to check the real bd->free pointer each
337 // time around the loop.
338 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
340 ASSERT(bd->link == NULL);
341 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
342 info = get_itbl((StgClosure *)p);
344 ASSERT(gct->thunk_selector_depth == 0);
347 switch (info->type) {
352 StgMVar *mvar = ((StgMVar *)p);
353 gct->eager_promotion = rtsFalse;
354 evacuate((StgClosure **)&mvar->head);
355 evacuate((StgClosure **)&mvar->tail);
356 evacuate((StgClosure **)&mvar->value);
357 gct->eager_promotion = saved_eager_promotion;
359 if (gct->failed_to_evac) {
360 mvar->header.info = &stg_MVAR_DIRTY_info;
362 mvar->header.info = &stg_MVAR_CLEAN_info;
364 p += sizeofW(StgMVar);
369 scavenge_fun_srt(info);
370 evacuate(&((StgClosure *)p)->payload[1]);
371 evacuate(&((StgClosure *)p)->payload[0]);
372 p += sizeofW(StgHeader) + 2;
376 scavenge_thunk_srt(info);
377 evacuate(&((StgThunk *)p)->payload[1]);
378 evacuate(&((StgThunk *)p)->payload[0]);
379 p += sizeofW(StgThunk) + 2;
383 evacuate(&((StgClosure *)p)->payload[1]);
384 evacuate(&((StgClosure *)p)->payload[0]);
385 p += sizeofW(StgHeader) + 2;
389 scavenge_thunk_srt(info);
390 evacuate(&((StgThunk *)p)->payload[0]);
391 p += sizeofW(StgThunk) + 1;
395 scavenge_fun_srt(info);
397 evacuate(&((StgClosure *)p)->payload[0]);
398 p += sizeofW(StgHeader) + 1;
402 scavenge_thunk_srt(info);
403 p += sizeofW(StgThunk) + 1;
407 scavenge_fun_srt(info);
409 p += sizeofW(StgHeader) + 1;
413 scavenge_thunk_srt(info);
414 p += sizeofW(StgThunk) + 2;
418 scavenge_fun_srt(info);
420 p += sizeofW(StgHeader) + 2;
424 scavenge_thunk_srt(info);
425 evacuate(&((StgThunk *)p)->payload[0]);
426 p += sizeofW(StgThunk) + 2;
430 scavenge_fun_srt(info);
432 evacuate(&((StgClosure *)p)->payload[0]);
433 p += sizeofW(StgHeader) + 2;
437 scavenge_fun_srt(info);
444 scavenge_thunk_srt(info);
445 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
446 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
447 evacuate((StgClosure **)p);
449 p += info->layout.payload.nptrs;
460 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
461 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
462 evacuate((StgClosure **)p);
464 p += info->layout.payload.nptrs;
469 StgBCO *bco = (StgBCO *)p;
470 evacuate((StgClosure **)&bco->instrs);
471 evacuate((StgClosure **)&bco->literals);
472 evacuate((StgClosure **)&bco->ptrs);
478 if (bd->gen_no != 0) {
481 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
482 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
483 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
486 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
488 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
490 // We pretend that p has just been created.
491 LDV_RECORD_CREATE((StgClosure *)p);
494 case IND_OLDGEN_PERM:
495 evacuate(&((StgInd *)p)->indirectee);
496 p += sizeofW(StgInd);
501 gct->eager_promotion = rtsFalse;
502 evacuate(&((StgMutVar *)p)->var);
503 gct->eager_promotion = saved_eager_promotion;
505 if (gct->failed_to_evac) {
506 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
508 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
510 p += sizeofW(StgMutVar);
515 p += BLACKHOLE_sizeW();
520 StgSelector *s = (StgSelector *)p;
521 evacuate(&s->selectee);
522 p += THUNK_SELECTOR_sizeW();
526 // A chunk of stack saved in a heap object
529 StgAP_STACK *ap = (StgAP_STACK *)p;
532 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
533 p = (StgPtr)ap->payload + ap->size;
538 p = scavenge_PAP((StgPAP *)p);
542 p = scavenge_AP((StgAP *)p);
547 p += arr_words_sizeW((StgArrWords *)p);
550 case MUT_ARR_PTRS_CLEAN:
551 case MUT_ARR_PTRS_DIRTY:
556 // We don't eagerly promote objects pointed to by a mutable
557 // array, but if we find the array only points to objects in
558 // the same or an older generation, we mark it "clean" and
559 // avoid traversing it during minor GCs.
560 gct->eager_promotion = rtsFalse;
561 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
562 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
563 evacuate((StgClosure **)p);
565 gct->eager_promotion = saved_eager_promotion;
567 if (gct->failed_to_evac) {
568 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
570 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
573 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
577 case MUT_ARR_PTRS_FROZEN:
578 case MUT_ARR_PTRS_FROZEN0:
583 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
584 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
585 evacuate((StgClosure **)p);
588 // If we're going to put this object on the mutable list, then
589 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
590 if (gct->failed_to_evac) {
591 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
593 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
600 StgTSO *tso = (StgTSO *)p;
606 case TVAR_WATCH_QUEUE:
608 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
610 evacuate((StgClosure **)&wq->closure);
611 evacuate((StgClosure **)&wq->next_queue_entry);
612 evacuate((StgClosure **)&wq->prev_queue_entry);
613 gct->evac_gen = saved_evac_gen;
614 gct->failed_to_evac = rtsTrue; // mutable
615 p += sizeofW(StgTVarWatchQueue);
621 StgTVar *tvar = ((StgTVar *) p);
623 evacuate((StgClosure **)&tvar->current_value);
624 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
625 gct->evac_gen = saved_evac_gen;
626 gct->failed_to_evac = rtsTrue; // mutable
627 p += sizeofW(StgTVar);
633 StgTRecHeader *trec = ((StgTRecHeader *) p);
635 evacuate((StgClosure **)&trec->enclosing_trec);
636 evacuate((StgClosure **)&trec->current_chunk);
637 evacuate((StgClosure **)&trec->invariants_to_check);
638 gct->evac_gen = saved_evac_gen;
639 gct->failed_to_evac = rtsTrue; // mutable
640 p += sizeofW(StgTRecHeader);
647 StgTRecChunk *tc = ((StgTRecChunk *) p);
648 TRecEntry *e = &(tc -> entries[0]);
650 evacuate((StgClosure **)&tc->prev_chunk);
651 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
652 evacuate((StgClosure **)&e->tvar);
653 evacuate((StgClosure **)&e->expected_value);
654 evacuate((StgClosure **)&e->new_value);
656 gct->evac_gen = saved_evac_gen;
657 gct->failed_to_evac = rtsTrue; // mutable
658 p += sizeofW(StgTRecChunk);
662 case ATOMIC_INVARIANT:
664 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
666 evacuate(&invariant->code);
667 evacuate((StgClosure **)&invariant->last_execution);
668 gct->evac_gen = saved_evac_gen;
669 gct->failed_to_evac = rtsTrue; // mutable
670 p += sizeofW(StgAtomicInvariant);
674 case INVARIANT_CHECK_QUEUE:
676 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
678 evacuate((StgClosure **)&queue->invariant);
679 evacuate((StgClosure **)&queue->my_execution);
680 evacuate((StgClosure **)&queue->next_queue_entry);
681 gct->evac_gen = saved_evac_gen;
682 gct->failed_to_evac = rtsTrue; // mutable
683 p += sizeofW(StgInvariantCheckQueue);
688 barf("scavenge: unimplemented/strange closure type %d @ %p",
693 * We need to record the current object on the mutable list if
694 * (a) It is actually mutable, or
695 * (b) It contains pointers to a younger generation.
696 * Case (b) arises if we didn't manage to promote everything that
697 * the current object points to into the current generation.
699 if (gct->failed_to_evac) {
700 gct->failed_to_evac = rtsFalse;
701 if (bd->gen_no > 0) {
702 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
708 gct->copied += ws->todo_free - bd->free;
712 debugTrace(DEBUG_gc, " scavenged %ld bytes",
713 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
715 // update stats: this is a block that has been scavenged
716 gct->scanned += bd->free - bd->u.scan;
717 bd->u.scan = bd->free;
719 if (bd != ws->todo_bd) {
720 // we're not going to evac any more objects into
721 // this block, so push it now.
722 push_scanned_block(bd, ws);
727 /* -----------------------------------------------------------------------------
728 Scavenge everything on the mark stack.
730 This is slightly different from scavenge():
731 - we don't walk linearly through the objects, so the scavenger
732 doesn't need to advance the pointer on to the next object.
733 -------------------------------------------------------------------------- */
736 scavenge_mark_stack(void)
740 generation *saved_evac_gen;
742 gct->evac_gen = oldest_gen;
743 saved_evac_gen = gct->evac_gen;
745 while ((p = pop_mark_stack())) {
747 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
748 info = get_itbl((StgClosure *)p);
751 switch (info->type) {
756 rtsBool saved_eager_promotion = gct->eager_promotion;
758 StgMVar *mvar = ((StgMVar *)p);
759 gct->eager_promotion = rtsFalse;
760 evacuate((StgClosure **)&mvar->head);
761 evacuate((StgClosure **)&mvar->tail);
762 evacuate((StgClosure **)&mvar->value);
763 gct->eager_promotion = saved_eager_promotion;
765 if (gct->failed_to_evac) {
766 mvar->header.info = &stg_MVAR_DIRTY_info;
768 mvar->header.info = &stg_MVAR_CLEAN_info;
774 scavenge_fun_srt(info);
775 evacuate(&((StgClosure *)p)->payload[1]);
776 evacuate(&((StgClosure *)p)->payload[0]);
780 scavenge_thunk_srt(info);
781 evacuate(&((StgThunk *)p)->payload[1]);
782 evacuate(&((StgThunk *)p)->payload[0]);
786 evacuate(&((StgClosure *)p)->payload[1]);
787 evacuate(&((StgClosure *)p)->payload[0]);
792 scavenge_fun_srt(info);
793 evacuate(&((StgClosure *)p)->payload[0]);
798 scavenge_thunk_srt(info);
799 evacuate(&((StgThunk *)p)->payload[0]);
804 evacuate(&((StgClosure *)p)->payload[0]);
809 scavenge_fun_srt(info);
814 scavenge_thunk_srt(info);
822 scavenge_fun_srt(info);
829 scavenge_thunk_srt(info);
830 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
831 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
832 evacuate((StgClosure **)p);
844 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
845 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
846 evacuate((StgClosure **)p);
852 StgBCO *bco = (StgBCO *)p;
853 evacuate((StgClosure **)&bco->instrs);
854 evacuate((StgClosure **)&bco->literals);
855 evacuate((StgClosure **)&bco->ptrs);
860 // don't need to do anything here: the only possible case
861 // is that we're in a 1-space compacting collector, with
862 // no "old" generation.
866 case IND_OLDGEN_PERM:
867 evacuate(&((StgInd *)p)->indirectee);
871 case MUT_VAR_DIRTY: {
872 rtsBool saved_eager_promotion = gct->eager_promotion;
874 gct->eager_promotion = rtsFalse;
875 evacuate(&((StgMutVar *)p)->var);
876 gct->eager_promotion = saved_eager_promotion;
878 if (gct->failed_to_evac) {
879 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
881 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
893 StgSelector *s = (StgSelector *)p;
894 evacuate(&s->selectee);
898 // A chunk of stack saved in a heap object
901 StgAP_STACK *ap = (StgAP_STACK *)p;
904 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
909 scavenge_PAP((StgPAP *)p);
913 scavenge_AP((StgAP *)p);
916 case MUT_ARR_PTRS_CLEAN:
917 case MUT_ARR_PTRS_DIRTY:
923 // We don't eagerly promote objects pointed to by a mutable
924 // array, but if we find the array only points to objects in
925 // the same or an older generation, we mark it "clean" and
926 // avoid traversing it during minor GCs.
927 saved_eager = gct->eager_promotion;
928 gct->eager_promotion = rtsFalse;
929 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
930 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
931 evacuate((StgClosure **)p);
933 gct->eager_promotion = saved_eager;
935 if (gct->failed_to_evac) {
936 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
938 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
941 gct->failed_to_evac = rtsTrue; // mutable anyhow.
945 case MUT_ARR_PTRS_FROZEN:
946 case MUT_ARR_PTRS_FROZEN0:
951 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
952 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
953 evacuate((StgClosure **)p);
956 // If we're going to put this object on the mutable list, then
957 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
958 if (gct->failed_to_evac) {
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
961 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
968 scavengeTSO((StgTSO*)p);
972 case TVAR_WATCH_QUEUE:
974 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
976 evacuate((StgClosure **)&wq->closure);
977 evacuate((StgClosure **)&wq->next_queue_entry);
978 evacuate((StgClosure **)&wq->prev_queue_entry);
979 gct->evac_gen = saved_evac_gen;
980 gct->failed_to_evac = rtsTrue; // mutable
986 StgTVar *tvar = ((StgTVar *) p);
988 evacuate((StgClosure **)&tvar->current_value);
989 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
990 gct->evac_gen = saved_evac_gen;
991 gct->failed_to_evac = rtsTrue; // mutable
998 StgTRecChunk *tc = ((StgTRecChunk *) p);
999 TRecEntry *e = &(tc -> entries[0]);
1001 evacuate((StgClosure **)&tc->prev_chunk);
1002 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1003 evacuate((StgClosure **)&e->tvar);
1004 evacuate((StgClosure **)&e->expected_value);
1005 evacuate((StgClosure **)&e->new_value);
1007 gct->evac_gen = saved_evac_gen;
1008 gct->failed_to_evac = rtsTrue; // mutable
1014 StgTRecHeader *trec = ((StgTRecHeader *) p);
1016 evacuate((StgClosure **)&trec->enclosing_trec);
1017 evacuate((StgClosure **)&trec->current_chunk);
1018 evacuate((StgClosure **)&trec->invariants_to_check);
1019 gct->evac_gen = saved_evac_gen;
1020 gct->failed_to_evac = rtsTrue; // mutable
1024 case ATOMIC_INVARIANT:
1026 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1028 evacuate(&invariant->code);
1029 evacuate((StgClosure **)&invariant->last_execution);
1030 gct->evac_gen = saved_evac_gen;
1031 gct->failed_to_evac = rtsTrue; // mutable
1035 case INVARIANT_CHECK_QUEUE:
1037 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1039 evacuate((StgClosure **)&queue->invariant);
1040 evacuate((StgClosure **)&queue->my_execution);
1041 evacuate((StgClosure **)&queue->next_queue_entry);
1042 gct->evac_gen = saved_evac_gen;
1043 gct->failed_to_evac = rtsTrue; // mutable
1048 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1052 if (gct->failed_to_evac) {
1053 gct->failed_to_evac = rtsFalse;
1054 if (gct->evac_gen) {
1055 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1058 } // while (p = pop_mark_stack())
1061 /* -----------------------------------------------------------------------------
1062 Scavenge one object.
1064 This is used for objects that are temporarily marked as mutable
1065 because they contain old-to-new generation pointers. Only certain
1066 objects can have this property.
1067 -------------------------------------------------------------------------- */
1070 scavenge_one(StgPtr p)
1072 const StgInfoTable *info;
1073 generation *saved_evac_gen = gct->evac_gen;
1076 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1077 info = get_itbl((StgClosure *)p);
1079 switch (info->type) {
1084 rtsBool saved_eager_promotion = gct->eager_promotion;
1086 StgMVar *mvar = ((StgMVar *)p);
1087 gct->eager_promotion = rtsFalse;
1088 evacuate((StgClosure **)&mvar->head);
1089 evacuate((StgClosure **)&mvar->tail);
1090 evacuate((StgClosure **)&mvar->value);
1091 gct->eager_promotion = saved_eager_promotion;
1093 if (gct->failed_to_evac) {
1094 mvar->header.info = &stg_MVAR_DIRTY_info;
1096 mvar->header.info = &stg_MVAR_CLEAN_info;
1110 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1111 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1112 evacuate((StgClosure **)q);
1118 case FUN_1_0: // hardly worth specialising these guys
1134 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1135 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1136 evacuate((StgClosure **)q);
1142 case MUT_VAR_DIRTY: {
1144 rtsBool saved_eager_promotion = gct->eager_promotion;
1146 gct->eager_promotion = rtsFalse;
1147 evacuate(&((StgMutVar *)p)->var);
1148 gct->eager_promotion = saved_eager_promotion;
1150 if (gct->failed_to_evac) {
1151 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1153 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1162 case THUNK_SELECTOR:
1164 StgSelector *s = (StgSelector *)p;
1165 evacuate(&s->selectee);
1171 StgAP_STACK *ap = (StgAP_STACK *)p;
1174 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1175 p = (StgPtr)ap->payload + ap->size;
1180 p = scavenge_PAP((StgPAP *)p);
1184 p = scavenge_AP((StgAP *)p);
1188 // nothing to follow
1191 case MUT_ARR_PTRS_CLEAN:
1192 case MUT_ARR_PTRS_DIRTY:
1195 rtsBool saved_eager;
1197 // We don't eagerly promote objects pointed to by a mutable
1198 // array, but if we find the array only points to objects in
1199 // the same or an older generation, we mark it "clean" and
1200 // avoid traversing it during minor GCs.
1201 saved_eager = gct->eager_promotion;
1202 gct->eager_promotion = rtsFalse;
1204 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1205 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1206 evacuate((StgClosure **)p);
1208 gct->eager_promotion = saved_eager;
1210 if (gct->failed_to_evac) {
1211 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1213 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1216 gct->failed_to_evac = rtsTrue;
1220 case MUT_ARR_PTRS_FROZEN:
1221 case MUT_ARR_PTRS_FROZEN0:
1223 // follow everything
1226 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1227 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1228 evacuate((StgClosure **)p);
1231 // If we're going to put this object on the mutable list, then
1232 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1233 if (gct->failed_to_evac) {
1234 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1236 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1243 scavengeTSO((StgTSO*)p);
1247 case TVAR_WATCH_QUEUE:
1249 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1251 evacuate((StgClosure **)&wq->closure);
1252 evacuate((StgClosure **)&wq->next_queue_entry);
1253 evacuate((StgClosure **)&wq->prev_queue_entry);
1254 gct->evac_gen = saved_evac_gen;
1255 gct->failed_to_evac = rtsTrue; // mutable
1261 StgTVar *tvar = ((StgTVar *) p);
1263 evacuate((StgClosure **)&tvar->current_value);
1264 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1265 gct->evac_gen = saved_evac_gen;
1266 gct->failed_to_evac = rtsTrue; // mutable
1272 StgTRecHeader *trec = ((StgTRecHeader *) p);
1274 evacuate((StgClosure **)&trec->enclosing_trec);
1275 evacuate((StgClosure **)&trec->current_chunk);
1276 evacuate((StgClosure **)&trec->invariants_to_check);
1277 gct->evac_gen = saved_evac_gen;
1278 gct->failed_to_evac = rtsTrue; // mutable
1285 StgTRecChunk *tc = ((StgTRecChunk *) p);
1286 TRecEntry *e = &(tc -> entries[0]);
1288 evacuate((StgClosure **)&tc->prev_chunk);
1289 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1290 evacuate((StgClosure **)&e->tvar);
1291 evacuate((StgClosure **)&e->expected_value);
1292 evacuate((StgClosure **)&e->new_value);
1294 gct->evac_gen = saved_evac_gen;
1295 gct->failed_to_evac = rtsTrue; // mutable
1299 case ATOMIC_INVARIANT:
1301 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1303 evacuate(&invariant->code);
1304 evacuate((StgClosure **)&invariant->last_execution);
1305 gct->evac_gen = saved_evac_gen;
1306 gct->failed_to_evac = rtsTrue; // mutable
1310 case INVARIANT_CHECK_QUEUE:
1312 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1314 evacuate((StgClosure **)&queue->invariant);
1315 evacuate((StgClosure **)&queue->my_execution);
1316 evacuate((StgClosure **)&queue->next_queue_entry);
1317 gct->evac_gen = saved_evac_gen;
1318 gct->failed_to_evac = rtsTrue; // mutable
1323 // IND can happen, for example, when the interpreter allocates
1324 // a gigantic AP closure (more than one block), which ends up
1325 // on the large-object list and then gets updated. See #3424.
1327 case IND_OLDGEN_PERM:
1329 evacuate(&((StgInd *)p)->indirectee);
1331 #if 0 && defined(DEBUG)
1332 if (RtsFlags.DebugFlags.gc)
1333 /* Debugging code to print out the size of the thing we just
1337 StgPtr start = gen->scan;
1338 bdescr *start_bd = gen->scan_bd;
1341 if (start_bd != gen->scan_bd) {
1342 size += (P_)BLOCK_ROUND_UP(start) - start;
1343 start_bd = start_bd->link;
1344 while (start_bd != gen->scan_bd) {
1345 size += BLOCK_SIZE_W;
1346 start_bd = start_bd->link;
1349 (P_)BLOCK_ROUND_DOWN(gen->scan);
1351 size = gen->scan - start;
1353 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1359 barf("scavenge_one: strange object %d", (int)(info->type));
1362 no_luck = gct->failed_to_evac;
1363 gct->failed_to_evac = rtsFalse;
1367 /* -----------------------------------------------------------------------------
1368 Scavenging mutable lists.
1370 We treat the mutable list of each generation > N (i.e. all the
1371 generations older than the one being collected) as roots. We also
1372 remove non-mutable objects from the mutable list at this point.
1373 -------------------------------------------------------------------------- */
1376 scavenge_mutable_list(bdescr *bd, generation *gen)
1380 gct->evac_gen = gen;
1381 for (; bd != NULL; bd = bd->link) {
1382 for (q = bd->start; q < bd->free; q++) {
1384 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1387 switch (get_itbl((StgClosure *)p)->type) {
1389 barf("MUT_VAR_CLEAN on mutable list");
1391 mutlist_MUTVARS++; break;
1392 case MUT_ARR_PTRS_CLEAN:
1393 case MUT_ARR_PTRS_DIRTY:
1394 case MUT_ARR_PTRS_FROZEN:
1395 case MUT_ARR_PTRS_FROZEN0:
1396 mutlist_MUTARRS++; break;
1398 barf("MVAR_CLEAN on mutable list");
1400 mutlist_MVARS++; break;
1402 mutlist_OTHERS++; break;
1406 // Check whether this object is "clean", that is it
1407 // definitely doesn't point into a young generation.
1408 // Clean objects don't need to be scavenged. Some clean
1409 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1410 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1411 // are always on the mutable list.
1413 switch (get_itbl((StgClosure *)p)->type) {
1414 case MUT_ARR_PTRS_CLEAN:
1415 recordMutableGen_GC((StgClosure *)p,gen->no);
1418 StgTSO *tso = (StgTSO *)p;
1419 if (tso->dirty == 0) {
1420 // Must be on the mutable list because its link
1422 ASSERT(tso->flags & TSO_LINK_DIRTY);
1424 scavenge_TSO_link(tso);
1425 if (gct->failed_to_evac) {
1426 recordMutableGen_GC((StgClosure *)p,gen->no);
1427 gct->failed_to_evac = rtsFalse;
1429 tso->flags &= ~TSO_LINK_DIRTY;
1438 if (scavenge_one(p)) {
1439 // didn't manage to promote everything, so put the
1440 // object back on the list.
1441 recordMutableGen_GC((StgClosure *)p,gen->no);
1448 scavenge_capability_mut_lists (Capability *cap)
1452 /* Mutable lists from each generation > N
1453 * we want to *scavenge* these roots, not evacuate them: they're not
1454 * going to move in this GC.
1455 * Also do them in reverse generation order, for the usual reason:
1456 * namely to reduce the likelihood of spurious old->new pointers.
1458 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1459 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1460 freeChain_sync(cap->saved_mut_lists[g]);
1461 cap->saved_mut_lists[g] = NULL;
1465 /* -----------------------------------------------------------------------------
1466 Scavenging the static objects.
1468 We treat the mutable list of each generation > N (i.e. all the
1469 generations older than the one being collected) as roots. We also
1470 remove non-mutable objects from the mutable list at this point.
1471 -------------------------------------------------------------------------- */
1474 scavenge_static(void)
1477 const StgInfoTable *info;
1479 debugTrace(DEBUG_gc, "scavenging static objects");
1481 /* Always evacuate straight to the oldest generation for static
1483 gct->evac_gen = oldest_gen;
1485 /* keep going until we've scavenged all the objects on the linked
1490 /* get the next static object from the list. Remember, there might
1491 * be more stuff on this list after each evacuation...
1492 * (static_objects is a global)
1494 p = gct->static_objects;
1495 if (p == END_OF_STATIC_LIST) {
1499 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1502 if (info->type==RBH)
1503 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1505 // make sure the info pointer is into text space
1507 /* Take this object *off* the static_objects list,
1508 * and put it on the scavenged_static_objects list.
1510 gct->static_objects = *STATIC_LINK(info,p);
1511 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1512 gct->scavenged_static_objects = p;
1514 switch (info -> type) {
1518 StgInd *ind = (StgInd *)p;
1519 evacuate(&ind->indirectee);
1521 /* might fail to evacuate it, in which case we have to pop it
1522 * back on the mutable list of the oldest generation. We
1523 * leave it *on* the scavenged_static_objects list, though,
1524 * in case we visit this object again.
1526 if (gct->failed_to_evac) {
1527 gct->failed_to_evac = rtsFalse;
1528 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1534 scavenge_thunk_srt(info);
1538 scavenge_fun_srt(info);
1545 next = (P_)p->payload + info->layout.payload.ptrs;
1546 // evacuate the pointers
1547 for (q = (P_)p->payload; q < next; q++) {
1548 evacuate((StgClosure **)q);
1554 barf("scavenge_static: strange closure %d", (int)(info->type));
1557 ASSERT(gct->failed_to_evac == rtsFalse);
1561 /* -----------------------------------------------------------------------------
1562 scavenge a chunk of memory described by a bitmap
1563 -------------------------------------------------------------------------- */
1566 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1572 bitmap = large_bitmap->bitmap[b];
1573 for (i = 0; i < size; ) {
1574 if ((bitmap & 1) == 0) {
1575 evacuate((StgClosure **)p);
1579 if (i % BITS_IN(W_) == 0) {
1581 bitmap = large_bitmap->bitmap[b];
1583 bitmap = bitmap >> 1;
1588 STATIC_INLINE StgPtr
1589 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1592 if ((bitmap & 1) == 0) {
1593 evacuate((StgClosure **)p);
1596 bitmap = bitmap >> 1;
1602 /* -----------------------------------------------------------------------------
1603 scavenge_stack walks over a section of stack and evacuates all the
1604 objects pointed to by it. We can use the same code for walking
1605 AP_STACK_UPDs, since these are just sections of copied stack.
1606 -------------------------------------------------------------------------- */
1609 scavenge_stack(StgPtr p, StgPtr stack_end)
1611 const StgRetInfoTable* info;
1616 * Each time around this loop, we are looking at a chunk of stack
1617 * that starts with an activation record.
1620 while (p < stack_end) {
1621 info = get_ret_itbl((StgClosure *)p);
1623 switch (info->i.type) {
1626 // In SMP, we can get update frames that point to indirections
1627 // when two threads evaluate the same thunk. We do attempt to
1628 // discover this situation in threadPaused(), but it's
1629 // possible that the following sequence occurs:
1638 // Now T is an indirection, and the update frame is already
1639 // marked on A's stack, so we won't traverse it again in
1640 // threadPaused(). We could traverse the whole stack again
1641 // before GC, but that seems like overkill.
1643 // Scavenging this update frame as normal would be disastrous;
1644 // the updatee would end up pointing to the value. So we turn
1645 // the indirection into an IND_PERM, so that evacuate will
1646 // copy the indirection into the old generation instead of
1649 // Note [upd-black-hole]
1650 // One slight hiccup is that the THUNK_SELECTOR machinery can
1651 // overwrite the updatee with an IND. In parallel GC, this
1652 // could even be happening concurrently, so we can't check for
1653 // the IND. Fortunately if we assume that blackholing is
1654 // happening (either lazy or eager), then we can be sure that
1655 // the updatee is never a THUNK_SELECTOR and we're ok.
1656 // NB. this is a new invariant: blackholing is not optional.
1659 const StgInfoTable *i;
1660 StgClosure *updatee;
1662 updatee = ((StgUpdateFrame *)p)->updatee;
1663 i = updatee->header.info;
1664 if (!IS_FORWARDING_PTR(i)) {
1665 type = get_itbl(updatee)->type;
1667 updatee->header.info = &stg_IND_PERM_info;
1668 } else if (type == IND_OLDGEN) {
1669 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1672 evacuate(&((StgUpdateFrame *)p)->updatee);
1673 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1674 p += sizeofW(StgUpdateFrame);
1678 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1679 case CATCH_STM_FRAME:
1680 case CATCH_RETRY_FRAME:
1681 case ATOMICALLY_FRAME:
1685 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1686 size = BITMAP_SIZE(info->i.layout.bitmap);
1687 // NOTE: the payload starts immediately after the info-ptr, we
1688 // don't have an StgHeader in the same sense as a heap closure.
1690 p = scavenge_small_bitmap(p, size, bitmap);
1694 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1702 evacuate((StgClosure **)p);
1705 size = BCO_BITMAP_SIZE(bco);
1706 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1711 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1716 size = GET_LARGE_BITMAP(&info->i)->size;
1718 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1720 // and don't forget to follow the SRT
1724 // Dynamic bitmap: the mask is stored on the stack, and
1725 // there are a number of non-pointers followed by a number
1726 // of pointers above the bitmapped area. (see StgMacros.h,
1731 dyn = ((StgRetDyn *)p)->liveness;
1733 // traverse the bitmap first
1734 bitmap = RET_DYN_LIVENESS(dyn);
1735 p = (P_)&((StgRetDyn *)p)->payload[0];
1736 size = RET_DYN_BITMAP_SIZE;
1737 p = scavenge_small_bitmap(p, size, bitmap);
1739 // skip over the non-ptr words
1740 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1742 // follow the ptr words
1743 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1744 evacuate((StgClosure **)p);
1752 StgRetFun *ret_fun = (StgRetFun *)p;
1753 StgFunInfoTable *fun_info;
1755 evacuate(&ret_fun->fun);
1756 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1757 p = scavenge_arg_block(fun_info, ret_fun->payload);
1762 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1767 /*-----------------------------------------------------------------------------
1768 scavenge the large object list.
1770 evac_gen set by caller; similar games played with evac_gen as with
1771 scavenge() - see comment at the top of scavenge(). Most large
1772 objects are (repeatedly) mutable, so most of the time evac_gen will
1774 --------------------------------------------------------------------------- */
1777 scavenge_large (gen_workspace *ws)
1782 gct->evac_gen = ws->gen;
1784 bd = ws->todo_large_objects;
1786 for (; bd != NULL; bd = ws->todo_large_objects) {
1788 // take this object *off* the large objects list and put it on
1789 // the scavenged large objects list. This is so that we can
1790 // treat new_large_objects as a stack and push new objects on
1791 // the front when evacuating.
1792 ws->todo_large_objects = bd->link;
1794 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1795 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1796 ws->gen->n_scavenged_large_blocks += bd->blocks;
1797 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1800 if (scavenge_one(p)) {
1801 if (ws->gen->no > 0) {
1802 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1807 gct->scanned += closure_sizeW((StgClosure*)p);
1811 /* ----------------------------------------------------------------------------
1812 Look for work to do.
1814 We look for the oldest gen that has either a todo block that can
1815 be scanned, or a block of work on the global queue that we can
1818 It is important to take work from the *oldest* generation that we
1819 has work available, because that minimizes the likelihood of
1820 evacuating objects into a young generation when they should have
1821 been eagerly promoted. This really does make a difference (the
1822 cacheprof benchmark is one that is affected).
1824 We also want to scan the todo block if possible before grabbing
1825 work from the global queue, the reason being that we don't want to
1826 steal work from the global queue and starve other threads if there
1827 is other work we can usefully be doing.
1828 ------------------------------------------------------------------------- */
1831 scavenge_find_work (void)
1835 rtsBool did_something, did_anything;
1838 gct->scav_find_work++;
1840 did_anything = rtsFalse;
1843 did_something = rtsFalse;
1844 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1847 gct->scan_bd = NULL;
1849 // If we have a scan block with some work to do,
1850 // scavenge everything up to the free pointer.
1851 if (ws->todo_bd->u.scan < ws->todo_free)
1853 scavenge_block(ws->todo_bd);
1854 did_something = rtsTrue;
1858 // If we have any large objects to scavenge, do them now.
1859 if (ws->todo_large_objects) {
1861 did_something = rtsTrue;
1865 if ((bd = grab_local_todo_block(ws)) != NULL) {
1867 did_something = rtsTrue;
1872 if (did_something) {
1873 did_anything = rtsTrue;
1877 #if defined(THREADED_RTS)
1878 if (work_stealing) {
1879 // look for work to steal
1880 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1881 if ((bd = steal_todo_block(g)) != NULL) {
1883 did_something = rtsTrue;
1888 if (did_something) {
1889 did_anything = rtsTrue;
1895 // only return when there is no more work to do
1897 return did_anything;
1900 /* ----------------------------------------------------------------------------
1901 Scavenge until we can't find anything more to scavenge.
1902 ------------------------------------------------------------------------- */
1910 work_to_do = rtsFalse;
1912 // scavenge static objects
1913 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1914 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1918 // scavenge objects in compacted generation
1919 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1920 scavenge_mark_stack();
1921 work_to_do = rtsTrue;
1924 // Order is important here: we want to deal in full blocks as
1925 // much as possible, so go for global work in preference to
1926 // local work. Only if all the global work has been exhausted
1927 // do we start scavenging the fragments of blocks in the local
1929 if (scavenge_find_work()) goto loop;
1931 if (work_to_do) goto loop;