1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavenge_TSO_link (StgTSO *tso)
52 // We don't always chase the link field: TSOs on the blackhole
53 // queue are not automatically alive, so the link field is a
54 // "weak" pointer in that case.
55 if (tso->why_blocked != BlockedOnBlackHole) {
56 evacuate((StgClosure **)&tso->_link);
61 scavengeTSO (StgTSO *tso)
65 if (tso->what_next == ThreadRelocated) {
66 // the only way this can happen is if the old TSO was on the
67 // mutable list. We might have other links to this defunct
68 // TSO, so we must update its link field.
69 evacuate((StgClosure**)&tso->_link);
73 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
75 // update the pointer from the Task.
76 if (tso->bound != NULL) {
77 tso->bound->tso = tso;
80 saved_eager = gct->eager_promotion;
81 gct->eager_promotion = rtsFalse;
83 if ( tso->why_blocked == BlockedOnMVar
84 || tso->why_blocked == BlockedOnBlackHole
85 || tso->why_blocked == BlockedOnException
87 evacuate(&tso->block_info.closure);
89 evacuate((StgClosure **)&tso->blocked_exceptions);
91 // scavange current transaction record
92 evacuate((StgClosure **)&tso->trec);
94 // scavenge this thread's stack
95 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
97 if (gct->failed_to_evac) {
99 scavenge_TSO_link(tso);
102 scavenge_TSO_link(tso);
103 if (gct->failed_to_evac) {
104 tso->flags |= TSO_LINK_DIRTY;
106 tso->flags &= ~TSO_LINK_DIRTY;
110 gct->eager_promotion = saved_eager;
113 /* -----------------------------------------------------------------------------
114 Mutable arrays of pointers
115 -------------------------------------------------------------------------- */
117 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
123 any_failed = rtsFalse;
124 p = (StgPtr)&a->payload[0];
125 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
127 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
129 evacuate((StgClosure**)p);
131 if (gct->failed_to_evac) {
132 any_failed = rtsTrue;
133 *mutArrPtrsCard(a,m) = 1;
134 gct->failed_to_evac = rtsFalse;
136 *mutArrPtrsCard(a,m) = 0;
140 q = (StgPtr)&a->payload[a->ptrs];
143 evacuate((StgClosure**)p);
145 if (gct->failed_to_evac) {
146 any_failed = rtsTrue;
147 *mutArrPtrsCard(a,m) = 1;
148 gct->failed_to_evac = rtsFalse;
150 *mutArrPtrsCard(a,m) = 0;
154 gct->failed_to_evac = any_failed;
155 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
158 // scavenge only the marked areas of a MUT_ARR_PTRS
159 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
165 any_failed = rtsFalse;
166 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
168 if (*mutArrPtrsCard(a,m) != 0) {
169 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
170 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
171 (StgPtr)&a->payload[a->ptrs]);
173 evacuate((StgClosure**)p);
175 if (gct->failed_to_evac) {
176 any_failed = rtsTrue;
177 gct->failed_to_evac = rtsFalse;
179 *mutArrPtrsCard(a,m) = 0;
184 gct->failed_to_evac = any_failed;
185 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
188 /* -----------------------------------------------------------------------------
189 Blocks of function args occur on the stack (at the top) and
191 -------------------------------------------------------------------------- */
194 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
201 switch (fun_info->f.fun_type) {
203 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
204 size = BITMAP_SIZE(fun_info->f.b.bitmap);
207 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
208 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
212 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
213 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
216 if ((bitmap & 1) == 0) {
217 evacuate((StgClosure **)p);
220 bitmap = bitmap >> 1;
228 STATIC_INLINE GNUC_ATTR_HOT StgPtr
229 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
233 StgFunInfoTable *fun_info;
235 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
236 ASSERT(fun_info->i.type != PAP);
239 switch (fun_info->f.fun_type) {
241 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
244 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
248 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
252 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
255 if ((bitmap & 1) == 0) {
256 evacuate((StgClosure **)p);
259 bitmap = bitmap >> 1;
267 STATIC_INLINE GNUC_ATTR_HOT StgPtr
268 scavenge_PAP (StgPAP *pap)
271 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
275 scavenge_AP (StgAP *ap)
278 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
281 /* -----------------------------------------------------------------------------
283 -------------------------------------------------------------------------- */
285 /* Similar to scavenge_large_bitmap(), but we don't write back the
286 * pointers we get back from evacuate().
289 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
296 bitmap = large_srt->l.bitmap[b];
297 size = (nat)large_srt->l.size;
298 p = (StgClosure **)large_srt->srt;
299 for (i = 0; i < size; ) {
300 if ((bitmap & 1) != 0) {
305 if (i % BITS_IN(W_) == 0) {
307 bitmap = large_srt->l.bitmap[b];
309 bitmap = bitmap >> 1;
314 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
315 * srt field in the info table. That's ok, because we'll
316 * never dereference it.
318 STATIC_INLINE GNUC_ATTR_HOT void
319 scavenge_srt (StgClosure **srt, nat srt_bitmap)
327 if (bitmap == (StgHalfWord)(-1)) {
328 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
332 while (bitmap != 0) {
333 if ((bitmap & 1) != 0) {
334 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
335 // Special-case to handle references to closures hiding out in DLLs, since
336 // double indirections required to get at those. The code generator knows
337 // which is which when generating the SRT, so it stores the (indirect)
338 // reference to the DLL closure in the table by first adding one to it.
339 // We check for this here, and undo the addition before evacuating it.
341 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
342 // closure that's fixed at link-time, and no extra magic is required.
343 if ( (unsigned long)(*srt) & 0x1 ) {
344 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
353 bitmap = bitmap >> 1;
358 STATIC_INLINE GNUC_ATTR_HOT void
359 scavenge_thunk_srt(const StgInfoTable *info)
361 StgThunkInfoTable *thunk_info;
363 if (!major_gc) return;
365 thunk_info = itbl_to_thunk_itbl(info);
366 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
369 STATIC_INLINE GNUC_ATTR_HOT void
370 scavenge_fun_srt(const StgInfoTable *info)
372 StgFunInfoTable *fun_info;
374 if (!major_gc) return;
376 fun_info = itbl_to_fun_itbl(info);
377 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
380 /* -----------------------------------------------------------------------------
381 Scavenge a block from the given scan pointer up to bd->free.
383 evac_gen is set by the caller to be either zero (for a step in a
384 generation < N) or G where G is the generation of the step being
387 We sometimes temporarily change evac_gen back to zero if we're
388 scavenging a mutable object where eager promotion isn't such a good
390 -------------------------------------------------------------------------- */
392 static GNUC_ATTR_HOT void
393 scavenge_block (bdescr *bd)
397 generation *saved_evac_gen;
398 rtsBool saved_eager_promotion;
401 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
402 bd->start, bd->gen_no, bd->u.scan);
405 gct->evac_gen = bd->gen;
406 saved_evac_gen = gct->evac_gen;
407 saved_eager_promotion = gct->eager_promotion;
408 gct->failed_to_evac = rtsFalse;
410 ws = &gct->gens[bd->gen->no];
414 // we might be evacuating into the very object that we're
415 // scavenging, so we have to check the real bd->free pointer each
416 // time around the loop.
417 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
419 ASSERT(bd->link == NULL);
420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
421 info = get_itbl((StgClosure *)p);
423 ASSERT(gct->thunk_selector_depth == 0);
426 switch (info->type) {
431 StgMVar *mvar = ((StgMVar *)p);
432 gct->eager_promotion = rtsFalse;
433 evacuate((StgClosure **)&mvar->head);
434 evacuate((StgClosure **)&mvar->tail);
435 evacuate((StgClosure **)&mvar->value);
436 gct->eager_promotion = saved_eager_promotion;
438 if (gct->failed_to_evac) {
439 mvar->header.info = &stg_MVAR_DIRTY_info;
441 mvar->header.info = &stg_MVAR_CLEAN_info;
443 p += sizeofW(StgMVar);
448 scavenge_fun_srt(info);
449 evacuate(&((StgClosure *)p)->payload[1]);
450 evacuate(&((StgClosure *)p)->payload[0]);
451 p += sizeofW(StgHeader) + 2;
455 scavenge_thunk_srt(info);
456 evacuate(&((StgThunk *)p)->payload[1]);
457 evacuate(&((StgThunk *)p)->payload[0]);
458 p += sizeofW(StgThunk) + 2;
462 evacuate(&((StgClosure *)p)->payload[1]);
463 evacuate(&((StgClosure *)p)->payload[0]);
464 p += sizeofW(StgHeader) + 2;
468 scavenge_thunk_srt(info);
469 evacuate(&((StgThunk *)p)->payload[0]);
470 p += sizeofW(StgThunk) + 1;
474 scavenge_fun_srt(info);
476 evacuate(&((StgClosure *)p)->payload[0]);
477 p += sizeofW(StgHeader) + 1;
481 scavenge_thunk_srt(info);
482 p += sizeofW(StgThunk) + 1;
486 scavenge_fun_srt(info);
488 p += sizeofW(StgHeader) + 1;
492 scavenge_thunk_srt(info);
493 p += sizeofW(StgThunk) + 2;
497 scavenge_fun_srt(info);
499 p += sizeofW(StgHeader) + 2;
503 scavenge_thunk_srt(info);
504 evacuate(&((StgThunk *)p)->payload[0]);
505 p += sizeofW(StgThunk) + 2;
509 scavenge_fun_srt(info);
511 evacuate(&((StgClosure *)p)->payload[0]);
512 p += sizeofW(StgHeader) + 2;
516 scavenge_fun_srt(info);
523 scavenge_thunk_srt(info);
524 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
525 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
526 evacuate((StgClosure **)p);
528 p += info->layout.payload.nptrs;
539 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
540 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
541 evacuate((StgClosure **)p);
543 p += info->layout.payload.nptrs;
548 StgBCO *bco = (StgBCO *)p;
549 evacuate((StgClosure **)&bco->instrs);
550 evacuate((StgClosure **)&bco->literals);
551 evacuate((StgClosure **)&bco->ptrs);
557 if (bd->gen_no != 0) {
560 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
561 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
562 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
565 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
567 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
569 // We pretend that p has just been created.
570 LDV_RECORD_CREATE((StgClosure *)p);
573 case IND_OLDGEN_PERM:
574 evacuate(&((StgInd *)p)->indirectee);
575 p += sizeofW(StgInd);
580 gct->eager_promotion = rtsFalse;
581 evacuate(&((StgMutVar *)p)->var);
582 gct->eager_promotion = saved_eager_promotion;
584 if (gct->failed_to_evac) {
585 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
587 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
589 p += sizeofW(StgMutVar);
594 p += BLACKHOLE_sizeW();
599 StgSelector *s = (StgSelector *)p;
600 evacuate(&s->selectee);
601 p += THUNK_SELECTOR_sizeW();
605 // A chunk of stack saved in a heap object
608 StgAP_STACK *ap = (StgAP_STACK *)p;
611 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
612 p = (StgPtr)ap->payload + ap->size;
617 p = scavenge_PAP((StgPAP *)p);
621 p = scavenge_AP((StgAP *)p);
626 p += arr_words_sizeW((StgArrWords *)p);
629 case MUT_ARR_PTRS_CLEAN:
630 case MUT_ARR_PTRS_DIRTY:
632 // We don't eagerly promote objects pointed to by a mutable
633 // array, but if we find the array only points to objects in
634 // the same or an older generation, we mark it "clean" and
635 // avoid traversing it during minor GCs.
636 gct->eager_promotion = rtsFalse;
638 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
640 if (gct->failed_to_evac) {
641 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
643 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
646 gct->eager_promotion = saved_eager_promotion;
647 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
651 case MUT_ARR_PTRS_FROZEN:
652 case MUT_ARR_PTRS_FROZEN0:
655 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
657 // If we're going to put this object on the mutable list, then
658 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
659 if (gct->failed_to_evac) {
660 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
662 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
669 StgTSO *tso = (StgTSO *)p;
675 case TVAR_WATCH_QUEUE:
677 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
679 evacuate((StgClosure **)&wq->closure);
680 evacuate((StgClosure **)&wq->next_queue_entry);
681 evacuate((StgClosure **)&wq->prev_queue_entry);
682 gct->evac_gen = saved_evac_gen;
683 gct->failed_to_evac = rtsTrue; // mutable
684 p += sizeofW(StgTVarWatchQueue);
690 StgTVar *tvar = ((StgTVar *) p);
692 evacuate((StgClosure **)&tvar->current_value);
693 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
694 gct->evac_gen = saved_evac_gen;
695 gct->failed_to_evac = rtsTrue; // mutable
696 p += sizeofW(StgTVar);
702 StgTRecHeader *trec = ((StgTRecHeader *) p);
704 evacuate((StgClosure **)&trec->enclosing_trec);
705 evacuate((StgClosure **)&trec->current_chunk);
706 evacuate((StgClosure **)&trec->invariants_to_check);
707 gct->evac_gen = saved_evac_gen;
708 gct->failed_to_evac = rtsTrue; // mutable
709 p += sizeofW(StgTRecHeader);
716 StgTRecChunk *tc = ((StgTRecChunk *) p);
717 TRecEntry *e = &(tc -> entries[0]);
719 evacuate((StgClosure **)&tc->prev_chunk);
720 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
721 evacuate((StgClosure **)&e->tvar);
722 evacuate((StgClosure **)&e->expected_value);
723 evacuate((StgClosure **)&e->new_value);
725 gct->evac_gen = saved_evac_gen;
726 gct->failed_to_evac = rtsTrue; // mutable
727 p += sizeofW(StgTRecChunk);
731 case ATOMIC_INVARIANT:
733 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
735 evacuate(&invariant->code);
736 evacuate((StgClosure **)&invariant->last_execution);
737 gct->evac_gen = saved_evac_gen;
738 gct->failed_to_evac = rtsTrue; // mutable
739 p += sizeofW(StgAtomicInvariant);
743 case INVARIANT_CHECK_QUEUE:
745 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
747 evacuate((StgClosure **)&queue->invariant);
748 evacuate((StgClosure **)&queue->my_execution);
749 evacuate((StgClosure **)&queue->next_queue_entry);
750 gct->evac_gen = saved_evac_gen;
751 gct->failed_to_evac = rtsTrue; // mutable
752 p += sizeofW(StgInvariantCheckQueue);
757 barf("scavenge: unimplemented/strange closure type %d @ %p",
762 * We need to record the current object on the mutable list if
763 * (a) It is actually mutable, or
764 * (b) It contains pointers to a younger generation.
765 * Case (b) arises if we didn't manage to promote everything that
766 * the current object points to into the current generation.
768 if (gct->failed_to_evac) {
769 gct->failed_to_evac = rtsFalse;
770 if (bd->gen_no > 0) {
771 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
777 gct->copied += ws->todo_free - bd->free;
781 debugTrace(DEBUG_gc, " scavenged %ld bytes",
782 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
784 // update stats: this is a block that has been scavenged
785 gct->scanned += bd->free - bd->u.scan;
786 bd->u.scan = bd->free;
788 if (bd != ws->todo_bd) {
789 // we're not going to evac any more objects into
790 // this block, so push it now.
791 push_scanned_block(bd, ws);
796 /* -----------------------------------------------------------------------------
797 Scavenge everything on the mark stack.
799 This is slightly different from scavenge():
800 - we don't walk linearly through the objects, so the scavenger
801 doesn't need to advance the pointer on to the next object.
802 -------------------------------------------------------------------------- */
805 scavenge_mark_stack(void)
809 generation *saved_evac_gen;
811 gct->evac_gen = oldest_gen;
812 saved_evac_gen = gct->evac_gen;
814 while ((p = pop_mark_stack())) {
816 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
817 info = get_itbl((StgClosure *)p);
820 switch (info->type) {
825 rtsBool saved_eager_promotion = gct->eager_promotion;
827 StgMVar *mvar = ((StgMVar *)p);
828 gct->eager_promotion = rtsFalse;
829 evacuate((StgClosure **)&mvar->head);
830 evacuate((StgClosure **)&mvar->tail);
831 evacuate((StgClosure **)&mvar->value);
832 gct->eager_promotion = saved_eager_promotion;
834 if (gct->failed_to_evac) {
835 mvar->header.info = &stg_MVAR_DIRTY_info;
837 mvar->header.info = &stg_MVAR_CLEAN_info;
843 scavenge_fun_srt(info);
844 evacuate(&((StgClosure *)p)->payload[1]);
845 evacuate(&((StgClosure *)p)->payload[0]);
849 scavenge_thunk_srt(info);
850 evacuate(&((StgThunk *)p)->payload[1]);
851 evacuate(&((StgThunk *)p)->payload[0]);
855 evacuate(&((StgClosure *)p)->payload[1]);
856 evacuate(&((StgClosure *)p)->payload[0]);
861 scavenge_fun_srt(info);
862 evacuate(&((StgClosure *)p)->payload[0]);
867 scavenge_thunk_srt(info);
868 evacuate(&((StgThunk *)p)->payload[0]);
873 evacuate(&((StgClosure *)p)->payload[0]);
878 scavenge_fun_srt(info);
883 scavenge_thunk_srt(info);
891 scavenge_fun_srt(info);
898 scavenge_thunk_srt(info);
899 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
900 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
901 evacuate((StgClosure **)p);
913 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
914 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
915 evacuate((StgClosure **)p);
921 StgBCO *bco = (StgBCO *)p;
922 evacuate((StgClosure **)&bco->instrs);
923 evacuate((StgClosure **)&bco->literals);
924 evacuate((StgClosure **)&bco->ptrs);
929 // don't need to do anything here: the only possible case
930 // is that we're in a 1-space compacting collector, with
931 // no "old" generation.
935 case IND_OLDGEN_PERM:
936 evacuate(&((StgInd *)p)->indirectee);
940 case MUT_VAR_DIRTY: {
941 rtsBool saved_eager_promotion = gct->eager_promotion;
943 gct->eager_promotion = rtsFalse;
944 evacuate(&((StgMutVar *)p)->var);
945 gct->eager_promotion = saved_eager_promotion;
947 if (gct->failed_to_evac) {
948 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
950 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
962 StgSelector *s = (StgSelector *)p;
963 evacuate(&s->selectee);
967 // A chunk of stack saved in a heap object
970 StgAP_STACK *ap = (StgAP_STACK *)p;
973 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
978 scavenge_PAP((StgPAP *)p);
982 scavenge_AP((StgAP *)p);
985 case MUT_ARR_PTRS_CLEAN:
986 case MUT_ARR_PTRS_DIRTY:
991 // We don't eagerly promote objects pointed to by a mutable
992 // array, but if we find the array only points to objects in
993 // the same or an older generation, we mark it "clean" and
994 // avoid traversing it during minor GCs.
995 saved_eager = gct->eager_promotion;
996 gct->eager_promotion = rtsFalse;
998 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1000 if (gct->failed_to_evac) {
1001 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1003 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1006 gct->eager_promotion = saved_eager;
1007 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1011 case MUT_ARR_PTRS_FROZEN:
1012 case MUT_ARR_PTRS_FROZEN0:
1013 // follow everything
1017 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1019 // If we're going to put this object on the mutable list, then
1020 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1021 if (gct->failed_to_evac) {
1022 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1024 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1031 scavengeTSO((StgTSO*)p);
1035 case TVAR_WATCH_QUEUE:
1037 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1039 evacuate((StgClosure **)&wq->closure);
1040 evacuate((StgClosure **)&wq->next_queue_entry);
1041 evacuate((StgClosure **)&wq->prev_queue_entry);
1042 gct->evac_gen = saved_evac_gen;
1043 gct->failed_to_evac = rtsTrue; // mutable
1049 StgTVar *tvar = ((StgTVar *) p);
1051 evacuate((StgClosure **)&tvar->current_value);
1052 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1053 gct->evac_gen = saved_evac_gen;
1054 gct->failed_to_evac = rtsTrue; // mutable
1061 StgTRecChunk *tc = ((StgTRecChunk *) p);
1062 TRecEntry *e = &(tc -> entries[0]);
1064 evacuate((StgClosure **)&tc->prev_chunk);
1065 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1066 evacuate((StgClosure **)&e->tvar);
1067 evacuate((StgClosure **)&e->expected_value);
1068 evacuate((StgClosure **)&e->new_value);
1070 gct->evac_gen = saved_evac_gen;
1071 gct->failed_to_evac = rtsTrue; // mutable
1077 StgTRecHeader *trec = ((StgTRecHeader *) p);
1079 evacuate((StgClosure **)&trec->enclosing_trec);
1080 evacuate((StgClosure **)&trec->current_chunk);
1081 evacuate((StgClosure **)&trec->invariants_to_check);
1082 gct->evac_gen = saved_evac_gen;
1083 gct->failed_to_evac = rtsTrue; // mutable
1087 case ATOMIC_INVARIANT:
1089 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1091 evacuate(&invariant->code);
1092 evacuate((StgClosure **)&invariant->last_execution);
1093 gct->evac_gen = saved_evac_gen;
1094 gct->failed_to_evac = rtsTrue; // mutable
1098 case INVARIANT_CHECK_QUEUE:
1100 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1102 evacuate((StgClosure **)&queue->invariant);
1103 evacuate((StgClosure **)&queue->my_execution);
1104 evacuate((StgClosure **)&queue->next_queue_entry);
1105 gct->evac_gen = saved_evac_gen;
1106 gct->failed_to_evac = rtsTrue; // mutable
1111 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1115 if (gct->failed_to_evac) {
1116 gct->failed_to_evac = rtsFalse;
1117 if (gct->evac_gen) {
1118 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1121 } // while (p = pop_mark_stack())
1124 /* -----------------------------------------------------------------------------
1125 Scavenge one object.
1127 This is used for objects that are temporarily marked as mutable
1128 because they contain old-to-new generation pointers. Only certain
1129 objects can have this property.
1130 -------------------------------------------------------------------------- */
1133 scavenge_one(StgPtr p)
1135 const StgInfoTable *info;
1136 generation *saved_evac_gen = gct->evac_gen;
1139 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1140 info = get_itbl((StgClosure *)p);
1142 switch (info->type) {
1147 rtsBool saved_eager_promotion = gct->eager_promotion;
1149 StgMVar *mvar = ((StgMVar *)p);
1150 gct->eager_promotion = rtsFalse;
1151 evacuate((StgClosure **)&mvar->head);
1152 evacuate((StgClosure **)&mvar->tail);
1153 evacuate((StgClosure **)&mvar->value);
1154 gct->eager_promotion = saved_eager_promotion;
1156 if (gct->failed_to_evac) {
1157 mvar->header.info = &stg_MVAR_DIRTY_info;
1159 mvar->header.info = &stg_MVAR_CLEAN_info;
1173 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1174 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1175 evacuate((StgClosure **)q);
1181 case FUN_1_0: // hardly worth specialising these guys
1197 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1198 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1199 evacuate((StgClosure **)q);
1205 case MUT_VAR_DIRTY: {
1207 rtsBool saved_eager_promotion = gct->eager_promotion;
1209 gct->eager_promotion = rtsFalse;
1210 evacuate(&((StgMutVar *)p)->var);
1211 gct->eager_promotion = saved_eager_promotion;
1213 if (gct->failed_to_evac) {
1214 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1216 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1225 case THUNK_SELECTOR:
1227 StgSelector *s = (StgSelector *)p;
1228 evacuate(&s->selectee);
1234 StgAP_STACK *ap = (StgAP_STACK *)p;
1237 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1238 p = (StgPtr)ap->payload + ap->size;
1243 p = scavenge_PAP((StgPAP *)p);
1247 p = scavenge_AP((StgAP *)p);
1251 // nothing to follow
1254 case MUT_ARR_PTRS_CLEAN:
1255 case MUT_ARR_PTRS_DIRTY:
1257 rtsBool saved_eager;
1259 // We don't eagerly promote objects pointed to by a mutable
1260 // array, but if we find the array only points to objects in
1261 // the same or an older generation, we mark it "clean" and
1262 // avoid traversing it during minor GCs.
1263 saved_eager = gct->eager_promotion;
1264 gct->eager_promotion = rtsFalse;
1266 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1268 if (gct->failed_to_evac) {
1269 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1271 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1274 gct->eager_promotion = saved_eager;
1275 gct->failed_to_evac = rtsTrue;
1279 case MUT_ARR_PTRS_FROZEN:
1280 case MUT_ARR_PTRS_FROZEN0:
1282 // follow everything
1283 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1285 // If we're going to put this object on the mutable list, then
1286 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1287 if (gct->failed_to_evac) {
1288 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1290 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1297 scavengeTSO((StgTSO*)p);
1301 case TVAR_WATCH_QUEUE:
1303 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1305 evacuate((StgClosure **)&wq->closure);
1306 evacuate((StgClosure **)&wq->next_queue_entry);
1307 evacuate((StgClosure **)&wq->prev_queue_entry);
1308 gct->evac_gen = saved_evac_gen;
1309 gct->failed_to_evac = rtsTrue; // mutable
1315 StgTVar *tvar = ((StgTVar *) p);
1317 evacuate((StgClosure **)&tvar->current_value);
1318 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1319 gct->evac_gen = saved_evac_gen;
1320 gct->failed_to_evac = rtsTrue; // mutable
1326 StgTRecHeader *trec = ((StgTRecHeader *) p);
1328 evacuate((StgClosure **)&trec->enclosing_trec);
1329 evacuate((StgClosure **)&trec->current_chunk);
1330 evacuate((StgClosure **)&trec->invariants_to_check);
1331 gct->evac_gen = saved_evac_gen;
1332 gct->failed_to_evac = rtsTrue; // mutable
1339 StgTRecChunk *tc = ((StgTRecChunk *) p);
1340 TRecEntry *e = &(tc -> entries[0]);
1342 evacuate((StgClosure **)&tc->prev_chunk);
1343 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1344 evacuate((StgClosure **)&e->tvar);
1345 evacuate((StgClosure **)&e->expected_value);
1346 evacuate((StgClosure **)&e->new_value);
1348 gct->evac_gen = saved_evac_gen;
1349 gct->failed_to_evac = rtsTrue; // mutable
1353 case ATOMIC_INVARIANT:
1355 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1357 evacuate(&invariant->code);
1358 evacuate((StgClosure **)&invariant->last_execution);
1359 gct->evac_gen = saved_evac_gen;
1360 gct->failed_to_evac = rtsTrue; // mutable
1364 case INVARIANT_CHECK_QUEUE:
1366 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1368 evacuate((StgClosure **)&queue->invariant);
1369 evacuate((StgClosure **)&queue->my_execution);
1370 evacuate((StgClosure **)&queue->next_queue_entry);
1371 gct->evac_gen = saved_evac_gen;
1372 gct->failed_to_evac = rtsTrue; // mutable
1377 // IND can happen, for example, when the interpreter allocates
1378 // a gigantic AP closure (more than one block), which ends up
1379 // on the large-object list and then gets updated. See #3424.
1381 case IND_OLDGEN_PERM:
1383 evacuate(&((StgInd *)p)->indirectee);
1385 #if 0 && defined(DEBUG)
1386 if (RtsFlags.DebugFlags.gc)
1387 /* Debugging code to print out the size of the thing we just
1391 StgPtr start = gen->scan;
1392 bdescr *start_bd = gen->scan_bd;
1395 if (start_bd != gen->scan_bd) {
1396 size += (P_)BLOCK_ROUND_UP(start) - start;
1397 start_bd = start_bd->link;
1398 while (start_bd != gen->scan_bd) {
1399 size += BLOCK_SIZE_W;
1400 start_bd = start_bd->link;
1403 (P_)BLOCK_ROUND_DOWN(gen->scan);
1405 size = gen->scan - start;
1407 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1413 barf("scavenge_one: strange object %d", (int)(info->type));
1416 no_luck = gct->failed_to_evac;
1417 gct->failed_to_evac = rtsFalse;
1421 /* -----------------------------------------------------------------------------
1422 Scavenging mutable lists.
1424 We treat the mutable list of each generation > N (i.e. all the
1425 generations older than the one being collected) as roots. We also
1426 remove non-mutable objects from the mutable list at this point.
1427 -------------------------------------------------------------------------- */
1430 scavenge_mutable_list(bdescr *bd, generation *gen)
1434 gct->evac_gen = gen;
1435 for (; bd != NULL; bd = bd->link) {
1436 for (q = bd->start; q < bd->free; q++) {
1438 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1441 switch (get_itbl((StgClosure *)p)->type) {
1443 barf("MUT_VAR_CLEAN on mutable list");
1445 mutlist_MUTVARS++; break;
1446 case MUT_ARR_PTRS_CLEAN:
1447 case MUT_ARR_PTRS_DIRTY:
1448 case MUT_ARR_PTRS_FROZEN:
1449 case MUT_ARR_PTRS_FROZEN0:
1450 mutlist_MUTARRS++; break;
1452 barf("MVAR_CLEAN on mutable list");
1454 mutlist_MVARS++; break;
1456 mutlist_OTHERS++; break;
1460 // Check whether this object is "clean", that is it
1461 // definitely doesn't point into a young generation.
1462 // Clean objects don't need to be scavenged. Some clean
1463 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1464 // list at all; others, such as TSO
1465 // are always on the mutable list.
1467 switch (get_itbl((StgClosure *)p)->type) {
1468 case MUT_ARR_PTRS_CLEAN:
1469 recordMutableGen_GC((StgClosure *)p,gen->no);
1471 case MUT_ARR_PTRS_DIRTY:
1473 rtsBool saved_eager;
1474 saved_eager = gct->eager_promotion;
1475 gct->eager_promotion = rtsFalse;
1477 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1479 if (gct->failed_to_evac) {
1480 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1482 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1485 gct->eager_promotion = saved_eager;
1486 gct->failed_to_evac = rtsFalse;
1487 recordMutableGen_GC((StgClosure *)p,gen->no);
1491 StgTSO *tso = (StgTSO *)p;
1492 if (tso->dirty == 0) {
1493 // Must be on the mutable list because its link
1495 ASSERT(tso->flags & TSO_LINK_DIRTY);
1497 scavenge_TSO_link(tso);
1498 if (gct->failed_to_evac) {
1499 recordMutableGen_GC((StgClosure *)p,gen->no);
1500 gct->failed_to_evac = rtsFalse;
1502 tso->flags &= ~TSO_LINK_DIRTY;
1511 if (scavenge_one(p)) {
1512 // didn't manage to promote everything, so put the
1513 // object back on the list.
1514 recordMutableGen_GC((StgClosure *)p,gen->no);
1521 scavenge_capability_mut_lists (Capability *cap)
1525 /* Mutable lists from each generation > N
1526 * we want to *scavenge* these roots, not evacuate them: they're not
1527 * going to move in this GC.
1528 * Also do them in reverse generation order, for the usual reason:
1529 * namely to reduce the likelihood of spurious old->new pointers.
1531 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1532 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1533 freeChain_sync(cap->saved_mut_lists[g]);
1534 cap->saved_mut_lists[g] = NULL;
1538 /* -----------------------------------------------------------------------------
1539 Scavenging the static objects.
1541 We treat the mutable list of each generation > N (i.e. all the
1542 generations older than the one being collected) as roots. We also
1543 remove non-mutable objects from the mutable list at this point.
1544 -------------------------------------------------------------------------- */
1547 scavenge_static(void)
1550 const StgInfoTable *info;
1552 debugTrace(DEBUG_gc, "scavenging static objects");
1554 /* Always evacuate straight to the oldest generation for static
1556 gct->evac_gen = oldest_gen;
1558 /* keep going until we've scavenged all the objects on the linked
1563 /* get the next static object from the list. Remember, there might
1564 * be more stuff on this list after each evacuation...
1565 * (static_objects is a global)
1567 p = gct->static_objects;
1568 if (p == END_OF_STATIC_LIST) {
1572 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1575 if (info->type==RBH)
1576 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1578 // make sure the info pointer is into text space
1580 /* Take this object *off* the static_objects list,
1581 * and put it on the scavenged_static_objects list.
1583 gct->static_objects = *STATIC_LINK(info,p);
1584 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1585 gct->scavenged_static_objects = p;
1587 switch (info -> type) {
1591 StgInd *ind = (StgInd *)p;
1592 evacuate(&ind->indirectee);
1594 /* might fail to evacuate it, in which case we have to pop it
1595 * back on the mutable list of the oldest generation. We
1596 * leave it *on* the scavenged_static_objects list, though,
1597 * in case we visit this object again.
1599 if (gct->failed_to_evac) {
1600 gct->failed_to_evac = rtsFalse;
1601 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1607 scavenge_thunk_srt(info);
1611 scavenge_fun_srt(info);
1618 next = (P_)p->payload + info->layout.payload.ptrs;
1619 // evacuate the pointers
1620 for (q = (P_)p->payload; q < next; q++) {
1621 evacuate((StgClosure **)q);
1627 barf("scavenge_static: strange closure %d", (int)(info->type));
1630 ASSERT(gct->failed_to_evac == rtsFalse);
1634 /* -----------------------------------------------------------------------------
1635 scavenge a chunk of memory described by a bitmap
1636 -------------------------------------------------------------------------- */
1639 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1645 bitmap = large_bitmap->bitmap[b];
1646 for (i = 0; i < size; ) {
1647 if ((bitmap & 1) == 0) {
1648 evacuate((StgClosure **)p);
1652 if (i % BITS_IN(W_) == 0) {
1654 bitmap = large_bitmap->bitmap[b];
1656 bitmap = bitmap >> 1;
1661 STATIC_INLINE StgPtr
1662 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1665 if ((bitmap & 1) == 0) {
1666 evacuate((StgClosure **)p);
1669 bitmap = bitmap >> 1;
1675 /* -----------------------------------------------------------------------------
1676 scavenge_stack walks over a section of stack and evacuates all the
1677 objects pointed to by it. We can use the same code for walking
1678 AP_STACK_UPDs, since these are just sections of copied stack.
1679 -------------------------------------------------------------------------- */
1682 scavenge_stack(StgPtr p, StgPtr stack_end)
1684 const StgRetInfoTable* info;
1689 * Each time around this loop, we are looking at a chunk of stack
1690 * that starts with an activation record.
1693 while (p < stack_end) {
1694 info = get_ret_itbl((StgClosure *)p);
1696 switch (info->i.type) {
1699 // In SMP, we can get update frames that point to indirections
1700 // when two threads evaluate the same thunk. We do attempt to
1701 // discover this situation in threadPaused(), but it's
1702 // possible that the following sequence occurs:
1711 // Now T is an indirection, and the update frame is already
1712 // marked on A's stack, so we won't traverse it again in
1713 // threadPaused(). We could traverse the whole stack again
1714 // before GC, but that seems like overkill.
1716 // Scavenging this update frame as normal would be disastrous;
1717 // the updatee would end up pointing to the value. So we turn
1718 // the indirection into an IND_PERM, so that evacuate will
1719 // copy the indirection into the old generation instead of
1722 // Note [upd-black-hole]
1723 // One slight hiccup is that the THUNK_SELECTOR machinery can
1724 // overwrite the updatee with an IND. In parallel GC, this
1725 // could even be happening concurrently, so we can't check for
1726 // the IND. Fortunately if we assume that blackholing is
1727 // happening (either lazy or eager), then we can be sure that
1728 // the updatee is never a THUNK_SELECTOR and we're ok.
1729 // NB. this is a new invariant: blackholing is not optional.
1732 const StgInfoTable *i;
1733 StgClosure *updatee;
1735 updatee = ((StgUpdateFrame *)p)->updatee;
1736 i = updatee->header.info;
1737 if (!IS_FORWARDING_PTR(i)) {
1738 type = get_itbl(updatee)->type;
1740 updatee->header.info = &stg_IND_PERM_info;
1741 } else if (type == IND_OLDGEN) {
1742 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1745 evacuate(&((StgUpdateFrame *)p)->updatee);
1746 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1747 p += sizeofW(StgUpdateFrame);
1751 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1752 case CATCH_STM_FRAME:
1753 case CATCH_RETRY_FRAME:
1754 case ATOMICALLY_FRAME:
1758 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1759 size = BITMAP_SIZE(info->i.layout.bitmap);
1760 // NOTE: the payload starts immediately after the info-ptr, we
1761 // don't have an StgHeader in the same sense as a heap closure.
1763 p = scavenge_small_bitmap(p, size, bitmap);
1767 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1775 evacuate((StgClosure **)p);
1778 size = BCO_BITMAP_SIZE(bco);
1779 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1784 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1789 size = GET_LARGE_BITMAP(&info->i)->size;
1791 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1793 // and don't forget to follow the SRT
1797 // Dynamic bitmap: the mask is stored on the stack, and
1798 // there are a number of non-pointers followed by a number
1799 // of pointers above the bitmapped area. (see StgMacros.h,
1804 dyn = ((StgRetDyn *)p)->liveness;
1806 // traverse the bitmap first
1807 bitmap = RET_DYN_LIVENESS(dyn);
1808 p = (P_)&((StgRetDyn *)p)->payload[0];
1809 size = RET_DYN_BITMAP_SIZE;
1810 p = scavenge_small_bitmap(p, size, bitmap);
1812 // skip over the non-ptr words
1813 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1815 // follow the ptr words
1816 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1817 evacuate((StgClosure **)p);
1825 StgRetFun *ret_fun = (StgRetFun *)p;
1826 StgFunInfoTable *fun_info;
1828 evacuate(&ret_fun->fun);
1829 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1830 p = scavenge_arg_block(fun_info, ret_fun->payload);
1835 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1840 /*-----------------------------------------------------------------------------
1841 scavenge the large object list.
1843 evac_gen set by caller; similar games played with evac_gen as with
1844 scavenge() - see comment at the top of scavenge(). Most large
1845 objects are (repeatedly) mutable, so most of the time evac_gen will
1847 --------------------------------------------------------------------------- */
1850 scavenge_large (gen_workspace *ws)
1855 gct->evac_gen = ws->gen;
1857 bd = ws->todo_large_objects;
1859 for (; bd != NULL; bd = ws->todo_large_objects) {
1861 // take this object *off* the large objects list and put it on
1862 // the scavenged large objects list. This is so that we can
1863 // treat new_large_objects as a stack and push new objects on
1864 // the front when evacuating.
1865 ws->todo_large_objects = bd->link;
1867 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1868 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1869 ws->gen->n_scavenged_large_blocks += bd->blocks;
1870 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1873 if (scavenge_one(p)) {
1874 if (ws->gen->no > 0) {
1875 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1880 gct->scanned += closure_sizeW((StgClosure*)p);
1884 /* ----------------------------------------------------------------------------
1885 Look for work to do.
1887 We look for the oldest gen that has either a todo block that can
1888 be scanned, or a block of work on the global queue that we can
1891 It is important to take work from the *oldest* generation that we
1892 has work available, because that minimizes the likelihood of
1893 evacuating objects into a young generation when they should have
1894 been eagerly promoted. This really does make a difference (the
1895 cacheprof benchmark is one that is affected).
1897 We also want to scan the todo block if possible before grabbing
1898 work from the global queue, the reason being that we don't want to
1899 steal work from the global queue and starve other threads if there
1900 is other work we can usefully be doing.
1901 ------------------------------------------------------------------------- */
1904 scavenge_find_work (void)
1908 rtsBool did_something, did_anything;
1911 gct->scav_find_work++;
1913 did_anything = rtsFalse;
1916 did_something = rtsFalse;
1917 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1920 gct->scan_bd = NULL;
1922 // If we have a scan block with some work to do,
1923 // scavenge everything up to the free pointer.
1924 if (ws->todo_bd->u.scan < ws->todo_free)
1926 scavenge_block(ws->todo_bd);
1927 did_something = rtsTrue;
1931 // If we have any large objects to scavenge, do them now.
1932 if (ws->todo_large_objects) {
1934 did_something = rtsTrue;
1938 if ((bd = grab_local_todo_block(ws)) != NULL) {
1940 did_something = rtsTrue;
1945 if (did_something) {
1946 did_anything = rtsTrue;
1950 #if defined(THREADED_RTS)
1951 if (work_stealing) {
1952 // look for work to steal
1953 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1954 if ((bd = steal_todo_block(g)) != NULL) {
1956 did_something = rtsTrue;
1961 if (did_something) {
1962 did_anything = rtsTrue;
1968 // only return when there is no more work to do
1970 return did_anything;
1973 /* ----------------------------------------------------------------------------
1974 Scavenge until we can't find anything more to scavenge.
1975 ------------------------------------------------------------------------- */
1983 work_to_do = rtsFalse;
1985 // scavenge static objects
1986 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1987 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1991 // scavenge objects in compacted generation
1992 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1993 scavenge_mark_stack();
1994 work_to_do = rtsTrue;
1997 // Order is important here: we want to deal in full blocks as
1998 // much as possible, so go for global work in preference to
1999 // local work. Only if all the global work has been exhausted
2000 // do we start scavenging the fragments of blocks in the local
2002 if (scavenge_find_work()) goto loop;
2004 if (work_to_do) goto loop;