1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
40 # define scavenge_loop(a) scavenge_loop1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavenge_TSO_link (StgTSO *tso)
52 // We don't always chase the link field: TSOs on the blackhole
53 // queue are not automatically alive, so the link field is a
54 // "weak" pointer in that case.
55 if (tso->why_blocked != BlockedOnBlackHole) {
56 evacuate((StgClosure **)&tso->_link);
61 scavengeTSO (StgTSO *tso)
65 if (tso->what_next == ThreadRelocated) {
66 // the only way this can happen is if the old TSO was on the
67 // mutable list. We might have other links to this defunct
68 // TSO, so we must update its link field.
69 evacuate((StgClosure**)&tso->_link);
73 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
75 saved_eager = gct->eager_promotion;
76 gct->eager_promotion = rtsFalse;
78 if ( tso->why_blocked == BlockedOnMVar
79 || tso->why_blocked == BlockedOnBlackHole
80 || tso->why_blocked == BlockedOnException
82 evacuate(&tso->block_info.closure);
84 evacuate((StgClosure **)&tso->blocked_exceptions);
86 // scavange current transaction record
87 evacuate((StgClosure **)&tso->trec);
89 // scavenge this thread's stack
90 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
92 if (gct->failed_to_evac) {
94 scavenge_TSO_link(tso);
97 scavenge_TSO_link(tso);
98 if (gct->failed_to_evac) {
99 tso->flags |= TSO_LINK_DIRTY;
101 tso->flags &= ~TSO_LINK_DIRTY;
105 gct->eager_promotion = saved_eager;
108 /* -----------------------------------------------------------------------------
109 Blocks of function args occur on the stack (at the top) and
111 -------------------------------------------------------------------------- */
114 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
121 switch (fun_info->f.fun_type) {
123 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
124 size = BITMAP_SIZE(fun_info->f.b.bitmap);
127 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
128 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
132 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
133 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
136 if ((bitmap & 1) == 0) {
137 evacuate((StgClosure **)p);
140 bitmap = bitmap >> 1;
148 STATIC_INLINE GNUC_ATTR_HOT StgPtr
149 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
153 StgFunInfoTable *fun_info;
155 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
156 ASSERT(fun_info->i.type != PAP);
159 switch (fun_info->f.fun_type) {
161 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
164 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
168 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
172 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
175 if ((bitmap & 1) == 0) {
176 evacuate((StgClosure **)p);
179 bitmap = bitmap >> 1;
187 STATIC_INLINE GNUC_ATTR_HOT StgPtr
188 scavenge_PAP (StgPAP *pap)
191 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
195 scavenge_AP (StgAP *ap)
198 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
201 /* -----------------------------------------------------------------------------
203 -------------------------------------------------------------------------- */
205 /* Similar to scavenge_large_bitmap(), but we don't write back the
206 * pointers we get back from evacuate().
209 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
216 bitmap = large_srt->l.bitmap[b];
217 size = (nat)large_srt->l.size;
218 p = (StgClosure **)large_srt->srt;
219 for (i = 0; i < size; ) {
220 if ((bitmap & 1) != 0) {
225 if (i % BITS_IN(W_) == 0) {
227 bitmap = large_srt->l.bitmap[b];
229 bitmap = bitmap >> 1;
234 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
235 * srt field in the info table. That's ok, because we'll
236 * never dereference it.
238 STATIC_INLINE GNUC_ATTR_HOT void
239 scavenge_srt (StgClosure **srt, nat srt_bitmap)
247 if (bitmap == (StgHalfWord)(-1)) {
248 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
252 while (bitmap != 0) {
253 if ((bitmap & 1) != 0) {
254 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
255 // Special-case to handle references to closures hiding out in DLLs, since
256 // double indirections required to get at those. The code generator knows
257 // which is which when generating the SRT, so it stores the (indirect)
258 // reference to the DLL closure in the table by first adding one to it.
259 // We check for this here, and undo the addition before evacuating it.
261 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
262 // closure that's fixed at link-time, and no extra magic is required.
263 if ( (unsigned long)(*srt) & 0x1 ) {
264 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
273 bitmap = bitmap >> 1;
278 STATIC_INLINE GNUC_ATTR_HOT void
279 scavenge_thunk_srt(const StgInfoTable *info)
281 StgThunkInfoTable *thunk_info;
283 if (!major_gc) return;
285 thunk_info = itbl_to_thunk_itbl(info);
286 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
289 STATIC_INLINE GNUC_ATTR_HOT void
290 scavenge_fun_srt(const StgInfoTable *info)
292 StgFunInfoTable *fun_info;
294 if (!major_gc) return;
296 fun_info = itbl_to_fun_itbl(info);
297 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
300 /* -----------------------------------------------------------------------------
301 Scavenge a block from the given scan pointer up to bd->free.
303 evac_step is set by the caller to be either zero (for a step in a
304 generation < N) or G where G is the generation of the step being
307 We sometimes temporarily change evac_step back to zero if we're
308 scavenging a mutable object where eager promotion isn't such a good
310 -------------------------------------------------------------------------- */
312 static GNUC_ATTR_HOT void
313 scavenge_block (bdescr *bd)
317 step *saved_evac_step;
318 rtsBool saved_eager_promotion;
321 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
322 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
325 gct->evac_step = bd->step;
326 saved_evac_step = gct->evac_step;
327 saved_eager_promotion = gct->eager_promotion;
328 gct->failed_to_evac = rtsFalse;
330 ws = &gct->steps[bd->step->abs_no];
334 // we might be evacuating into the very object that we're
335 // scavenging, so we have to check the real bd->free pointer each
336 // time around the loop.
337 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
339 ASSERT(bd->link == NULL);
340 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
341 info = get_itbl((StgClosure *)p);
343 ASSERT(gct->thunk_selector_depth == 0);
346 switch (info->type) {
351 StgMVar *mvar = ((StgMVar *)p);
352 gct->eager_promotion = rtsFalse;
353 evacuate((StgClosure **)&mvar->head);
354 evacuate((StgClosure **)&mvar->tail);
355 evacuate((StgClosure **)&mvar->value);
356 gct->eager_promotion = saved_eager_promotion;
358 if (gct->failed_to_evac) {
359 mvar->header.info = &stg_MVAR_DIRTY_info;
361 mvar->header.info = &stg_MVAR_CLEAN_info;
363 p += sizeofW(StgMVar);
368 scavenge_fun_srt(info);
369 evacuate(&((StgClosure *)p)->payload[1]);
370 evacuate(&((StgClosure *)p)->payload[0]);
371 p += sizeofW(StgHeader) + 2;
375 scavenge_thunk_srt(info);
376 evacuate(&((StgThunk *)p)->payload[1]);
377 evacuate(&((StgThunk *)p)->payload[0]);
378 p += sizeofW(StgThunk) + 2;
382 evacuate(&((StgClosure *)p)->payload[1]);
383 evacuate(&((StgClosure *)p)->payload[0]);
384 p += sizeofW(StgHeader) + 2;
388 scavenge_thunk_srt(info);
389 evacuate(&((StgThunk *)p)->payload[0]);
390 p += sizeofW(StgThunk) + 1;
394 scavenge_fun_srt(info);
396 evacuate(&((StgClosure *)p)->payload[0]);
397 p += sizeofW(StgHeader) + 1;
401 scavenge_thunk_srt(info);
402 p += sizeofW(StgThunk) + 1;
406 scavenge_fun_srt(info);
408 p += sizeofW(StgHeader) + 1;
412 scavenge_thunk_srt(info);
413 p += sizeofW(StgThunk) + 2;
417 scavenge_fun_srt(info);
419 p += sizeofW(StgHeader) + 2;
423 scavenge_thunk_srt(info);
424 evacuate(&((StgThunk *)p)->payload[0]);
425 p += sizeofW(StgThunk) + 2;
429 scavenge_fun_srt(info);
431 evacuate(&((StgClosure *)p)->payload[0]);
432 p += sizeofW(StgHeader) + 2;
436 scavenge_fun_srt(info);
443 scavenge_thunk_srt(info);
444 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
445 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
446 evacuate((StgClosure **)p);
448 p += info->layout.payload.nptrs;
459 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
460 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
461 evacuate((StgClosure **)p);
463 p += info->layout.payload.nptrs;
468 StgBCO *bco = (StgBCO *)p;
469 evacuate((StgClosure **)&bco->instrs);
470 evacuate((StgClosure **)&bco->literals);
471 evacuate((StgClosure **)&bco->ptrs);
477 if (bd->gen_no != 0) {
480 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
481 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
482 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
485 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
487 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
489 // We pretend that p has just been created.
490 LDV_RECORD_CREATE((StgClosure *)p);
493 case IND_OLDGEN_PERM:
494 evacuate(&((StgInd *)p)->indirectee);
495 p += sizeofW(StgInd);
500 gct->eager_promotion = rtsFalse;
501 evacuate(&((StgMutVar *)p)->var);
502 gct->eager_promotion = saved_eager_promotion;
504 if (gct->failed_to_evac) {
505 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
507 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
509 p += sizeofW(StgMutVar);
514 p += BLACKHOLE_sizeW();
519 StgSelector *s = (StgSelector *)p;
520 evacuate(&s->selectee);
521 p += THUNK_SELECTOR_sizeW();
525 // A chunk of stack saved in a heap object
528 StgAP_STACK *ap = (StgAP_STACK *)p;
531 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
532 p = (StgPtr)ap->payload + ap->size;
537 p = scavenge_PAP((StgPAP *)p);
541 p = scavenge_AP((StgAP *)p);
546 p += arr_words_sizeW((StgArrWords *)p);
549 case MUT_ARR_PTRS_CLEAN:
550 case MUT_ARR_PTRS_DIRTY:
555 // We don't eagerly promote objects pointed to by a mutable
556 // array, but if we find the array only points to objects in
557 // the same or an older generation, we mark it "clean" and
558 // avoid traversing it during minor GCs.
559 gct->eager_promotion = rtsFalse;
560 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
561 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
562 evacuate((StgClosure **)p);
564 gct->eager_promotion = saved_eager_promotion;
566 if (gct->failed_to_evac) {
567 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
569 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
572 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
576 case MUT_ARR_PTRS_FROZEN:
577 case MUT_ARR_PTRS_FROZEN0:
582 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
583 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
584 evacuate((StgClosure **)p);
587 // If we're going to put this object on the mutable list, then
588 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
589 if (gct->failed_to_evac) {
590 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
592 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
599 StgTSO *tso = (StgTSO *)p;
605 case TVAR_WATCH_QUEUE:
607 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
609 evacuate((StgClosure **)&wq->closure);
610 evacuate((StgClosure **)&wq->next_queue_entry);
611 evacuate((StgClosure **)&wq->prev_queue_entry);
612 gct->evac_step = saved_evac_step;
613 gct->failed_to_evac = rtsTrue; // mutable
614 p += sizeofW(StgTVarWatchQueue);
620 StgTVar *tvar = ((StgTVar *) p);
622 evacuate((StgClosure **)&tvar->current_value);
623 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
624 gct->evac_step = saved_evac_step;
625 gct->failed_to_evac = rtsTrue; // mutable
626 p += sizeofW(StgTVar);
632 StgTRecHeader *trec = ((StgTRecHeader *) p);
634 evacuate((StgClosure **)&trec->enclosing_trec);
635 evacuate((StgClosure **)&trec->current_chunk);
636 evacuate((StgClosure **)&trec->invariants_to_check);
637 gct->evac_step = saved_evac_step;
638 gct->failed_to_evac = rtsTrue; // mutable
639 p += sizeofW(StgTRecHeader);
646 StgTRecChunk *tc = ((StgTRecChunk *) p);
647 TRecEntry *e = &(tc -> entries[0]);
649 evacuate((StgClosure **)&tc->prev_chunk);
650 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
651 evacuate((StgClosure **)&e->tvar);
652 evacuate((StgClosure **)&e->expected_value);
653 evacuate((StgClosure **)&e->new_value);
655 gct->evac_step = saved_evac_step;
656 gct->failed_to_evac = rtsTrue; // mutable
657 p += sizeofW(StgTRecChunk);
661 case ATOMIC_INVARIANT:
663 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
665 evacuate(&invariant->code);
666 evacuate((StgClosure **)&invariant->last_execution);
667 gct->evac_step = saved_evac_step;
668 gct->failed_to_evac = rtsTrue; // mutable
669 p += sizeofW(StgAtomicInvariant);
673 case INVARIANT_CHECK_QUEUE:
675 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
677 evacuate((StgClosure **)&queue->invariant);
678 evacuate((StgClosure **)&queue->my_execution);
679 evacuate((StgClosure **)&queue->next_queue_entry);
680 gct->evac_step = saved_evac_step;
681 gct->failed_to_evac = rtsTrue; // mutable
682 p += sizeofW(StgInvariantCheckQueue);
687 barf("scavenge: unimplemented/strange closure type %d @ %p",
692 * We need to record the current object on the mutable list if
693 * (a) It is actually mutable, or
694 * (b) It contains pointers to a younger generation.
695 * Case (b) arises if we didn't manage to promote everything that
696 * the current object points to into the current generation.
698 if (gct->failed_to_evac) {
699 gct->failed_to_evac = rtsFalse;
700 if (bd->gen_no > 0) {
701 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
707 gct->copied += ws->todo_free - bd->free;
711 debugTrace(DEBUG_gc, " scavenged %ld bytes",
712 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
714 // update stats: this is a block that has been scavenged
715 gct->scanned += bd->free - bd->u.scan;
716 bd->u.scan = bd->free;
718 if (bd != ws->todo_bd) {
719 // we're not going to evac any more objects into
720 // this block, so push it now.
721 push_scanned_block(bd, ws);
726 /* -----------------------------------------------------------------------------
727 Scavenge everything on the mark stack.
729 This is slightly different from scavenge():
730 - we don't walk linearly through the objects, so the scavenger
731 doesn't need to advance the pointer on to the next object.
732 -------------------------------------------------------------------------- */
735 scavenge_mark_stack(void)
739 step *saved_evac_step;
741 gct->evac_step = &oldest_gen->steps[0];
742 saved_evac_step = gct->evac_step;
744 while ((p = pop_mark_stack())) {
746 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
747 info = get_itbl((StgClosure *)p);
750 switch (info->type) {
755 rtsBool saved_eager_promotion = gct->eager_promotion;
757 StgMVar *mvar = ((StgMVar *)p);
758 gct->eager_promotion = rtsFalse;
759 evacuate((StgClosure **)&mvar->head);
760 evacuate((StgClosure **)&mvar->tail);
761 evacuate((StgClosure **)&mvar->value);
762 gct->eager_promotion = saved_eager_promotion;
764 if (gct->failed_to_evac) {
765 mvar->header.info = &stg_MVAR_DIRTY_info;
767 mvar->header.info = &stg_MVAR_CLEAN_info;
773 scavenge_fun_srt(info);
774 evacuate(&((StgClosure *)p)->payload[1]);
775 evacuate(&((StgClosure *)p)->payload[0]);
779 scavenge_thunk_srt(info);
780 evacuate(&((StgThunk *)p)->payload[1]);
781 evacuate(&((StgThunk *)p)->payload[0]);
785 evacuate(&((StgClosure *)p)->payload[1]);
786 evacuate(&((StgClosure *)p)->payload[0]);
791 scavenge_fun_srt(info);
792 evacuate(&((StgClosure *)p)->payload[0]);
797 scavenge_thunk_srt(info);
798 evacuate(&((StgThunk *)p)->payload[0]);
803 evacuate(&((StgClosure *)p)->payload[0]);
808 scavenge_fun_srt(info);
813 scavenge_thunk_srt(info);
821 scavenge_fun_srt(info);
828 scavenge_thunk_srt(info);
829 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
830 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
831 evacuate((StgClosure **)p);
843 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
844 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
845 evacuate((StgClosure **)p);
851 StgBCO *bco = (StgBCO *)p;
852 evacuate((StgClosure **)&bco->instrs);
853 evacuate((StgClosure **)&bco->literals);
854 evacuate((StgClosure **)&bco->ptrs);
859 // don't need to do anything here: the only possible case
860 // is that we're in a 1-space compacting collector, with
861 // no "old" generation.
865 case IND_OLDGEN_PERM:
866 evacuate(&((StgInd *)p)->indirectee);
870 case MUT_VAR_DIRTY: {
871 rtsBool saved_eager_promotion = gct->eager_promotion;
873 gct->eager_promotion = rtsFalse;
874 evacuate(&((StgMutVar *)p)->var);
875 gct->eager_promotion = saved_eager_promotion;
877 if (gct->failed_to_evac) {
878 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
880 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
892 StgSelector *s = (StgSelector *)p;
893 evacuate(&s->selectee);
897 // A chunk of stack saved in a heap object
900 StgAP_STACK *ap = (StgAP_STACK *)p;
903 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
908 scavenge_PAP((StgPAP *)p);
912 scavenge_AP((StgAP *)p);
915 case MUT_ARR_PTRS_CLEAN:
916 case MUT_ARR_PTRS_DIRTY:
922 // We don't eagerly promote objects pointed to by a mutable
923 // array, but if we find the array only points to objects in
924 // the same or an older generation, we mark it "clean" and
925 // avoid traversing it during minor GCs.
926 saved_eager = gct->eager_promotion;
927 gct->eager_promotion = rtsFalse;
928 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
929 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
930 evacuate((StgClosure **)p);
932 gct->eager_promotion = saved_eager;
934 if (gct->failed_to_evac) {
935 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
937 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
940 gct->failed_to_evac = rtsTrue; // mutable anyhow.
944 case MUT_ARR_PTRS_FROZEN:
945 case MUT_ARR_PTRS_FROZEN0:
950 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
951 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
952 evacuate((StgClosure **)p);
955 // If we're going to put this object on the mutable list, then
956 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
957 if (gct->failed_to_evac) {
958 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
967 scavengeTSO((StgTSO*)p);
971 case TVAR_WATCH_QUEUE:
973 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
975 evacuate((StgClosure **)&wq->closure);
976 evacuate((StgClosure **)&wq->next_queue_entry);
977 evacuate((StgClosure **)&wq->prev_queue_entry);
978 gct->evac_step = saved_evac_step;
979 gct->failed_to_evac = rtsTrue; // mutable
985 StgTVar *tvar = ((StgTVar *) p);
987 evacuate((StgClosure **)&tvar->current_value);
988 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
989 gct->evac_step = saved_evac_step;
990 gct->failed_to_evac = rtsTrue; // mutable
997 StgTRecChunk *tc = ((StgTRecChunk *) p);
998 TRecEntry *e = &(tc -> entries[0]);
1000 evacuate((StgClosure **)&tc->prev_chunk);
1001 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1002 evacuate((StgClosure **)&e->tvar);
1003 evacuate((StgClosure **)&e->expected_value);
1004 evacuate((StgClosure **)&e->new_value);
1006 gct->evac_step = saved_evac_step;
1007 gct->failed_to_evac = rtsTrue; // mutable
1013 StgTRecHeader *trec = ((StgTRecHeader *) p);
1015 evacuate((StgClosure **)&trec->enclosing_trec);
1016 evacuate((StgClosure **)&trec->current_chunk);
1017 evacuate((StgClosure **)&trec->invariants_to_check);
1018 gct->evac_step = saved_evac_step;
1019 gct->failed_to_evac = rtsTrue; // mutable
1023 case ATOMIC_INVARIANT:
1025 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1027 evacuate(&invariant->code);
1028 evacuate((StgClosure **)&invariant->last_execution);
1029 gct->evac_step = saved_evac_step;
1030 gct->failed_to_evac = rtsTrue; // mutable
1034 case INVARIANT_CHECK_QUEUE:
1036 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1038 evacuate((StgClosure **)&queue->invariant);
1039 evacuate((StgClosure **)&queue->my_execution);
1040 evacuate((StgClosure **)&queue->next_queue_entry);
1041 gct->evac_step = saved_evac_step;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_step) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1057 } // while (p = pop_mark_stack())
1060 /* -----------------------------------------------------------------------------
1061 Scavenge one object.
1063 This is used for objects that are temporarily marked as mutable
1064 because they contain old-to-new generation pointers. Only certain
1065 objects can have this property.
1066 -------------------------------------------------------------------------- */
1069 scavenge_one(StgPtr p)
1071 const StgInfoTable *info;
1072 step *saved_evac_step = gct->evac_step;
1075 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1076 info = get_itbl((StgClosure *)p);
1078 switch (info->type) {
1083 rtsBool saved_eager_promotion = gct->eager_promotion;
1085 StgMVar *mvar = ((StgMVar *)p);
1086 gct->eager_promotion = rtsFalse;
1087 evacuate((StgClosure **)&mvar->head);
1088 evacuate((StgClosure **)&mvar->tail);
1089 evacuate((StgClosure **)&mvar->value);
1090 gct->eager_promotion = saved_eager_promotion;
1092 if (gct->failed_to_evac) {
1093 mvar->header.info = &stg_MVAR_DIRTY_info;
1095 mvar->header.info = &stg_MVAR_CLEAN_info;
1109 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1110 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1111 evacuate((StgClosure **)q);
1117 case FUN_1_0: // hardly worth specialising these guys
1133 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1134 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1135 evacuate((StgClosure **)q);
1141 case MUT_VAR_DIRTY: {
1143 rtsBool saved_eager_promotion = gct->eager_promotion;
1145 gct->eager_promotion = rtsFalse;
1146 evacuate(&((StgMutVar *)p)->var);
1147 gct->eager_promotion = saved_eager_promotion;
1149 if (gct->failed_to_evac) {
1150 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1152 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1161 case THUNK_SELECTOR:
1163 StgSelector *s = (StgSelector *)p;
1164 evacuate(&s->selectee);
1170 StgAP_STACK *ap = (StgAP_STACK *)p;
1173 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1174 p = (StgPtr)ap->payload + ap->size;
1179 p = scavenge_PAP((StgPAP *)p);
1183 p = scavenge_AP((StgAP *)p);
1187 // nothing to follow
1190 case MUT_ARR_PTRS_CLEAN:
1191 case MUT_ARR_PTRS_DIRTY:
1194 rtsBool saved_eager;
1196 // We don't eagerly promote objects pointed to by a mutable
1197 // array, but if we find the array only points to objects in
1198 // the same or an older generation, we mark it "clean" and
1199 // avoid traversing it during minor GCs.
1200 saved_eager = gct->eager_promotion;
1201 gct->eager_promotion = rtsFalse;
1203 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1204 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1205 evacuate((StgClosure **)p);
1207 gct->eager_promotion = saved_eager;
1209 if (gct->failed_to_evac) {
1210 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1212 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1215 gct->failed_to_evac = rtsTrue;
1219 case MUT_ARR_PTRS_FROZEN:
1220 case MUT_ARR_PTRS_FROZEN0:
1222 // follow everything
1225 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1226 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1227 evacuate((StgClosure **)p);
1230 // If we're going to put this object on the mutable list, then
1231 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1232 if (gct->failed_to_evac) {
1233 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1235 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1242 scavengeTSO((StgTSO*)p);
1246 case TVAR_WATCH_QUEUE:
1248 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1250 evacuate((StgClosure **)&wq->closure);
1251 evacuate((StgClosure **)&wq->next_queue_entry);
1252 evacuate((StgClosure **)&wq->prev_queue_entry);
1253 gct->evac_step = saved_evac_step;
1254 gct->failed_to_evac = rtsTrue; // mutable
1260 StgTVar *tvar = ((StgTVar *) p);
1262 evacuate((StgClosure **)&tvar->current_value);
1263 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1264 gct->evac_step = saved_evac_step;
1265 gct->failed_to_evac = rtsTrue; // mutable
1271 StgTRecHeader *trec = ((StgTRecHeader *) p);
1273 evacuate((StgClosure **)&trec->enclosing_trec);
1274 evacuate((StgClosure **)&trec->current_chunk);
1275 evacuate((StgClosure **)&trec->invariants_to_check);
1276 gct->evac_step = saved_evac_step;
1277 gct->failed_to_evac = rtsTrue; // mutable
1284 StgTRecChunk *tc = ((StgTRecChunk *) p);
1285 TRecEntry *e = &(tc -> entries[0]);
1287 evacuate((StgClosure **)&tc->prev_chunk);
1288 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1289 evacuate((StgClosure **)&e->tvar);
1290 evacuate((StgClosure **)&e->expected_value);
1291 evacuate((StgClosure **)&e->new_value);
1293 gct->evac_step = saved_evac_step;
1294 gct->failed_to_evac = rtsTrue; // mutable
1298 case ATOMIC_INVARIANT:
1300 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1302 evacuate(&invariant->code);
1303 evacuate((StgClosure **)&invariant->last_execution);
1304 gct->evac_step = saved_evac_step;
1305 gct->failed_to_evac = rtsTrue; // mutable
1309 case INVARIANT_CHECK_QUEUE:
1311 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1313 evacuate((StgClosure **)&queue->invariant);
1314 evacuate((StgClosure **)&queue->my_execution);
1315 evacuate((StgClosure **)&queue->next_queue_entry);
1316 gct->evac_step = saved_evac_step;
1317 gct->failed_to_evac = rtsTrue; // mutable
1322 // IND can happen, for example, when the interpreter allocates
1323 // a gigantic AP closure (more than one block), which ends up
1324 // on the large-object list and then gets updated. See #3424.
1326 case IND_OLDGEN_PERM:
1328 evacuate(&((StgInd *)p)->indirectee);
1330 #if 0 && defined(DEBUG)
1331 if (RtsFlags.DebugFlags.gc)
1332 /* Debugging code to print out the size of the thing we just
1336 StgPtr start = gen->steps[0].scan;
1337 bdescr *start_bd = gen->steps[0].scan_bd;
1339 scavenge(&gen->steps[0]);
1340 if (start_bd != gen->steps[0].scan_bd) {
1341 size += (P_)BLOCK_ROUND_UP(start) - start;
1342 start_bd = start_bd->link;
1343 while (start_bd != gen->steps[0].scan_bd) {
1344 size += BLOCK_SIZE_W;
1345 start_bd = start_bd->link;
1347 size += gen->steps[0].scan -
1348 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1350 size = gen->steps[0].scan - start;
1352 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1358 barf("scavenge_one: strange object %d", (int)(info->type));
1361 no_luck = gct->failed_to_evac;
1362 gct->failed_to_evac = rtsFalse;
1366 /* -----------------------------------------------------------------------------
1367 Scavenging mutable lists.
1369 We treat the mutable list of each generation > N (i.e. all the
1370 generations older than the one being collected) as roots. We also
1371 remove non-mutable objects from the mutable list at this point.
1372 -------------------------------------------------------------------------- */
1375 scavenge_mutable_list(bdescr *bd, generation *gen)
1379 gct->evac_step = &gen->steps[0];
1380 for (; bd != NULL; bd = bd->link) {
1381 for (q = bd->start; q < bd->free; q++) {
1383 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1386 switch (get_itbl((StgClosure *)p)->type) {
1388 barf("MUT_VAR_CLEAN on mutable list");
1390 mutlist_MUTVARS++; break;
1391 case MUT_ARR_PTRS_CLEAN:
1392 case MUT_ARR_PTRS_DIRTY:
1393 case MUT_ARR_PTRS_FROZEN:
1394 case MUT_ARR_PTRS_FROZEN0:
1395 mutlist_MUTARRS++; break;
1397 barf("MVAR_CLEAN on mutable list");
1399 mutlist_MVARS++; break;
1401 mutlist_OTHERS++; break;
1405 // Check whether this object is "clean", that is it
1406 // definitely doesn't point into a young generation.
1407 // Clean objects don't need to be scavenged. Some clean
1408 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1409 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1410 // are always on the mutable list.
1412 switch (get_itbl((StgClosure *)p)->type) {
1413 case MUT_ARR_PTRS_CLEAN:
1414 recordMutableGen_GC((StgClosure *)p,gen->no);
1417 StgTSO *tso = (StgTSO *)p;
1418 if (tso->dirty == 0) {
1419 // Must be on the mutable list because its link
1421 ASSERT(tso->flags & TSO_LINK_DIRTY);
1423 scavenge_TSO_link(tso);
1424 if (gct->failed_to_evac) {
1425 recordMutableGen_GC((StgClosure *)p,gen->no);
1426 gct->failed_to_evac = rtsFalse;
1428 tso->flags &= ~TSO_LINK_DIRTY;
1437 if (scavenge_one(p)) {
1438 // didn't manage to promote everything, so put the
1439 // object back on the list.
1440 recordMutableGen_GC((StgClosure *)p,gen->no);
1447 scavenge_capability_mut_lists (Capability *cap)
1451 /* Mutable lists from each generation > N
1452 * we want to *scavenge* these roots, not evacuate them: they're not
1453 * going to move in this GC.
1454 * Also do them in reverse generation order, for the usual reason:
1455 * namely to reduce the likelihood of spurious old->new pointers.
1457 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1458 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1459 freeChain_sync(cap->saved_mut_lists[g]);
1460 cap->saved_mut_lists[g] = NULL;
1464 /* -----------------------------------------------------------------------------
1465 Scavenging the static objects.
1467 We treat the mutable list of each generation > N (i.e. all the
1468 generations older than the one being collected) as roots. We also
1469 remove non-mutable objects from the mutable list at this point.
1470 -------------------------------------------------------------------------- */
1473 scavenge_static(void)
1476 const StgInfoTable *info;
1478 debugTrace(DEBUG_gc, "scavenging static objects");
1480 /* Always evacuate straight to the oldest generation for static
1482 gct->evac_step = &oldest_gen->steps[0];
1484 /* keep going until we've scavenged all the objects on the linked
1489 /* get the next static object from the list. Remember, there might
1490 * be more stuff on this list after each evacuation...
1491 * (static_objects is a global)
1493 p = gct->static_objects;
1494 if (p == END_OF_STATIC_LIST) {
1498 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1501 if (info->type==RBH)
1502 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1504 // make sure the info pointer is into text space
1506 /* Take this object *off* the static_objects list,
1507 * and put it on the scavenged_static_objects list.
1509 gct->static_objects = *STATIC_LINK(info,p);
1510 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1511 gct->scavenged_static_objects = p;
1513 switch (info -> type) {
1517 StgInd *ind = (StgInd *)p;
1518 evacuate(&ind->indirectee);
1520 /* might fail to evacuate it, in which case we have to pop it
1521 * back on the mutable list of the oldest generation. We
1522 * leave it *on* the scavenged_static_objects list, though,
1523 * in case we visit this object again.
1525 if (gct->failed_to_evac) {
1526 gct->failed_to_evac = rtsFalse;
1527 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1533 scavenge_thunk_srt(info);
1537 scavenge_fun_srt(info);
1544 next = (P_)p->payload + info->layout.payload.ptrs;
1545 // evacuate the pointers
1546 for (q = (P_)p->payload; q < next; q++) {
1547 evacuate((StgClosure **)q);
1553 barf("scavenge_static: strange closure %d", (int)(info->type));
1556 ASSERT(gct->failed_to_evac == rtsFalse);
1560 /* -----------------------------------------------------------------------------
1561 scavenge a chunk of memory described by a bitmap
1562 -------------------------------------------------------------------------- */
1565 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1571 bitmap = large_bitmap->bitmap[b];
1572 for (i = 0; i < size; ) {
1573 if ((bitmap & 1) == 0) {
1574 evacuate((StgClosure **)p);
1578 if (i % BITS_IN(W_) == 0) {
1580 bitmap = large_bitmap->bitmap[b];
1582 bitmap = bitmap >> 1;
1587 STATIC_INLINE StgPtr
1588 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1591 if ((bitmap & 1) == 0) {
1592 evacuate((StgClosure **)p);
1595 bitmap = bitmap >> 1;
1601 /* -----------------------------------------------------------------------------
1602 scavenge_stack walks over a section of stack and evacuates all the
1603 objects pointed to by it. We can use the same code for walking
1604 AP_STACK_UPDs, since these are just sections of copied stack.
1605 -------------------------------------------------------------------------- */
1608 scavenge_stack(StgPtr p, StgPtr stack_end)
1610 const StgRetInfoTable* info;
1615 * Each time around this loop, we are looking at a chunk of stack
1616 * that starts with an activation record.
1619 while (p < stack_end) {
1620 info = get_ret_itbl((StgClosure *)p);
1622 switch (info->i.type) {
1625 // In SMP, we can get update frames that point to indirections
1626 // when two threads evaluate the same thunk. We do attempt to
1627 // discover this situation in threadPaused(), but it's
1628 // possible that the following sequence occurs:
1637 // Now T is an indirection, and the update frame is already
1638 // marked on A's stack, so we won't traverse it again in
1639 // threadPaused(). We could traverse the whole stack again
1640 // before GC, but that seems like overkill.
1642 // Scavenging this update frame as normal would be disastrous;
1643 // the updatee would end up pointing to the value. So we turn
1644 // the indirection into an IND_PERM, so that evacuate will
1645 // copy the indirection into the old generation instead of
1648 // Note [upd-black-hole]
1649 // One slight hiccup is that the THUNK_SELECTOR machinery can
1650 // overwrite the updatee with an IND. In parallel GC, this
1651 // could even be happening concurrently, so we can't check for
1652 // the IND. Fortunately if we assume that blackholing is
1653 // happening (either lazy or eager), then we can be sure that
1654 // the updatee is never a THUNK_SELECTOR and we're ok.
1655 // NB. this is a new invariant: blackholing is not optional.
1658 const StgInfoTable *i;
1659 StgClosure *updatee;
1661 updatee = ((StgUpdateFrame *)p)->updatee;
1662 i = updatee->header.info;
1663 if (!IS_FORWARDING_PTR(i)) {
1664 type = get_itbl(updatee)->type;
1666 updatee->header.info = &stg_IND_PERM_info;
1667 } else if (type == IND_OLDGEN) {
1668 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1671 evacuate(&((StgUpdateFrame *)p)->updatee);
1672 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1673 p += sizeofW(StgUpdateFrame);
1677 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1678 case CATCH_STM_FRAME:
1679 case CATCH_RETRY_FRAME:
1680 case ATOMICALLY_FRAME:
1684 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1685 size = BITMAP_SIZE(info->i.layout.bitmap);
1686 // NOTE: the payload starts immediately after the info-ptr, we
1687 // don't have an StgHeader in the same sense as a heap closure.
1689 p = scavenge_small_bitmap(p, size, bitmap);
1693 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1701 evacuate((StgClosure **)p);
1704 size = BCO_BITMAP_SIZE(bco);
1705 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1710 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1715 size = GET_LARGE_BITMAP(&info->i)->size;
1717 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1719 // and don't forget to follow the SRT
1723 // Dynamic bitmap: the mask is stored on the stack, and
1724 // there are a number of non-pointers followed by a number
1725 // of pointers above the bitmapped area. (see StgMacros.h,
1730 dyn = ((StgRetDyn *)p)->liveness;
1732 // traverse the bitmap first
1733 bitmap = RET_DYN_LIVENESS(dyn);
1734 p = (P_)&((StgRetDyn *)p)->payload[0];
1735 size = RET_DYN_BITMAP_SIZE;
1736 p = scavenge_small_bitmap(p, size, bitmap);
1738 // skip over the non-ptr words
1739 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1741 // follow the ptr words
1742 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1743 evacuate((StgClosure **)p);
1751 StgRetFun *ret_fun = (StgRetFun *)p;
1752 StgFunInfoTable *fun_info;
1754 evacuate(&ret_fun->fun);
1755 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1756 p = scavenge_arg_block(fun_info, ret_fun->payload);
1761 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1766 /*-----------------------------------------------------------------------------
1767 scavenge the large object list.
1769 evac_step set by caller; similar games played with evac_step as with
1770 scavenge() - see comment at the top of scavenge(). Most large
1771 objects are (repeatedly) mutable, so most of the time evac_step will
1773 --------------------------------------------------------------------------- */
1776 scavenge_large (step_workspace *ws)
1781 gct->evac_step = ws->step;
1783 bd = ws->todo_large_objects;
1785 for (; bd != NULL; bd = ws->todo_large_objects) {
1787 // take this object *off* the large objects list and put it on
1788 // the scavenged large objects list. This is so that we can
1789 // treat new_large_objects as a stack and push new objects on
1790 // the front when evacuating.
1791 ws->todo_large_objects = bd->link;
1793 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1794 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1795 ws->step->n_scavenged_large_blocks += bd->blocks;
1796 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1799 if (scavenge_one(p)) {
1800 if (ws->step->gen_no > 0) {
1801 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1806 gct->scanned += closure_sizeW((StgClosure*)p);
1810 /* ----------------------------------------------------------------------------
1811 Look for work to do.
1813 We look for the oldest step that has either a todo block that can
1814 be scanned, or a block of work on the global queue that we can
1817 It is important to take work from the *oldest* generation that we
1818 has work available, because that minimizes the likelihood of
1819 evacuating objects into a young generation when they should have
1820 been eagerly promoted. This really does make a difference (the
1821 cacheprof benchmark is one that is affected).
1823 We also want to scan the todo block if possible before grabbing
1824 work from the global queue, the reason being that we don't want to
1825 steal work from the global queue and starve other threads if there
1826 is other work we can usefully be doing.
1827 ------------------------------------------------------------------------- */
1830 scavenge_find_work (void)
1834 rtsBool did_something, did_anything;
1837 gct->scav_find_work++;
1839 did_anything = rtsFalse;
1842 did_something = rtsFalse;
1843 for (s = total_steps-1; s >= 0; s--) {
1844 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1847 ws = &gct->steps[s];
1849 gct->scan_bd = NULL;
1851 // If we have a scan block with some work to do,
1852 // scavenge everything up to the free pointer.
1853 if (ws->todo_bd->u.scan < ws->todo_free)
1855 scavenge_block(ws->todo_bd);
1856 did_something = rtsTrue;
1860 // If we have any large objects to scavenge, do them now.
1861 if (ws->todo_large_objects) {
1863 did_something = rtsTrue;
1867 if ((bd = grab_local_todo_block(ws)) != NULL) {
1869 did_something = rtsTrue;
1874 if (did_something) {
1875 did_anything = rtsTrue;
1879 #if defined(THREADED_RTS)
1880 if (work_stealing) {
1881 // look for work to steal
1882 for (s = total_steps-1; s >= 0; s--) {
1883 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1886 if ((bd = steal_todo_block(s)) != NULL) {
1888 did_something = rtsTrue;
1893 if (did_something) {
1894 did_anything = rtsTrue;
1900 // only return when there is no more work to do
1902 return did_anything;
1905 /* ----------------------------------------------------------------------------
1906 Scavenge until we can't find anything more to scavenge.
1907 ------------------------------------------------------------------------- */
1915 work_to_do = rtsFalse;
1917 // scavenge static objects
1918 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1919 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1923 // scavenge objects in compacted generation
1924 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1925 scavenge_mark_stack();
1926 work_to_do = rtsTrue;
1929 // Order is important here: we want to deal in full blocks as
1930 // much as possible, so go for global work in preference to
1931 // local work. Only if all the global work has been exhausted
1932 // do we start scavenging the fragments of blocks in the local
1934 if (scavenge_find_work()) goto loop;
1936 if (work_to_do) goto loop;