1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
40 # define scavenge_loop(a) scavenge_loop1(a)
41 # define scavenge_block(a) scavenge_block1(a)
42 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
43 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
46 /* -----------------------------------------------------------------------------
48 -------------------------------------------------------------------------- */
51 scavenge_TSO_link (StgTSO *tso)
53 // We don't always chase the link field: TSOs on the blackhole
54 // queue are not automatically alive, so the link field is a
55 // "weak" pointer in that case.
56 if (tso->why_blocked != BlockedOnBlackHole) {
57 evacuate((StgClosure **)&tso->_link);
62 scavengeTSO (StgTSO *tso)
66 if (tso->what_next == ThreadRelocated) {
67 // the only way this can happen is if the old TSO was on the
68 // mutable list. We might have other links to this defunct
69 // TSO, so we must update its link field.
70 evacuate((StgClosure**)&tso->_link);
74 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
76 // update the pointer from the Task.
77 if (tso->bound != NULL) {
78 tso->bound->tso = tso;
81 saved_eager = gct->eager_promotion;
82 gct->eager_promotion = rtsFalse;
84 if ( tso->why_blocked == BlockedOnMVar
85 || tso->why_blocked == BlockedOnBlackHole
86 || tso->why_blocked == BlockedOnException
88 evacuate(&tso->block_info.closure);
90 evacuate((StgClosure **)&tso->blocked_exceptions);
92 // scavange current transaction record
93 evacuate((StgClosure **)&tso->trec);
95 // scavenge this thread's stack
96 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
98 if (gct->failed_to_evac) {
100 scavenge_TSO_link(tso);
103 scavenge_TSO_link(tso);
104 if (gct->failed_to_evac) {
105 tso->flags |= TSO_LINK_DIRTY;
107 tso->flags &= ~TSO_LINK_DIRTY;
111 gct->eager_promotion = saved_eager;
114 /* -----------------------------------------------------------------------------
115 Mutable arrays of pointers
116 -------------------------------------------------------------------------- */
118 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
124 any_failed = rtsFalse;
125 p = (StgPtr)&a->payload[0];
126 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
128 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
130 evacuate((StgClosure**)p);
132 if (gct->failed_to_evac) {
133 any_failed = rtsTrue;
134 *mutArrPtrsCard(a,m) = 1;
135 gct->failed_to_evac = rtsFalse;
137 *mutArrPtrsCard(a,m) = 0;
141 q = (StgPtr)&a->payload[a->ptrs];
144 evacuate((StgClosure**)p);
146 if (gct->failed_to_evac) {
147 any_failed = rtsTrue;
148 *mutArrPtrsCard(a,m) = 1;
149 gct->failed_to_evac = rtsFalse;
151 *mutArrPtrsCard(a,m) = 0;
155 gct->failed_to_evac = any_failed;
156 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
159 // scavenge only the marked areas of a MUT_ARR_PTRS
160 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
166 any_failed = rtsFalse;
167 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
169 if (*mutArrPtrsCard(a,m) != 0) {
170 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
171 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
172 (StgPtr)&a->payload[a->ptrs]);
174 evacuate((StgClosure**)p);
176 if (gct->failed_to_evac) {
177 any_failed = rtsTrue;
178 gct->failed_to_evac = rtsFalse;
180 *mutArrPtrsCard(a,m) = 0;
185 gct->failed_to_evac = any_failed;
186 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
189 /* -----------------------------------------------------------------------------
190 Blocks of function args occur on the stack (at the top) and
192 -------------------------------------------------------------------------- */
195 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
202 switch (fun_info->f.fun_type) {
204 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
205 size = BITMAP_SIZE(fun_info->f.b.bitmap);
208 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
209 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
213 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
214 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
217 if ((bitmap & 1) == 0) {
218 evacuate((StgClosure **)p);
221 bitmap = bitmap >> 1;
229 STATIC_INLINE GNUC_ATTR_HOT StgPtr
230 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
234 StgFunInfoTable *fun_info;
236 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
237 ASSERT(fun_info->i.type != PAP);
240 switch (fun_info->f.fun_type) {
242 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
245 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
249 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
253 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
256 if ((bitmap & 1) == 0) {
257 evacuate((StgClosure **)p);
260 bitmap = bitmap >> 1;
268 STATIC_INLINE GNUC_ATTR_HOT StgPtr
269 scavenge_PAP (StgPAP *pap)
272 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
276 scavenge_AP (StgAP *ap)
279 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
282 /* -----------------------------------------------------------------------------
284 -------------------------------------------------------------------------- */
286 /* Similar to scavenge_large_bitmap(), but we don't write back the
287 * pointers we get back from evacuate().
290 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
297 bitmap = large_srt->l.bitmap[b];
298 size = (nat)large_srt->l.size;
299 p = (StgClosure **)large_srt->srt;
300 for (i = 0; i < size; ) {
301 if ((bitmap & 1) != 0) {
306 if (i % BITS_IN(W_) == 0) {
308 bitmap = large_srt->l.bitmap[b];
310 bitmap = bitmap >> 1;
315 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
316 * srt field in the info table. That's ok, because we'll
317 * never dereference it.
319 STATIC_INLINE GNUC_ATTR_HOT void
320 scavenge_srt (StgClosure **srt, nat srt_bitmap)
328 if (bitmap == (StgHalfWord)(-1)) {
329 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
333 while (bitmap != 0) {
334 if ((bitmap & 1) != 0) {
335 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
336 // Special-case to handle references to closures hiding out in DLLs, since
337 // double indirections required to get at those. The code generator knows
338 // which is which when generating the SRT, so it stores the (indirect)
339 // reference to the DLL closure in the table by first adding one to it.
340 // We check for this here, and undo the addition before evacuating it.
342 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
343 // closure that's fixed at link-time, and no extra magic is required.
344 if ( (unsigned long)(*srt) & 0x1 ) {
345 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
354 bitmap = bitmap >> 1;
359 STATIC_INLINE GNUC_ATTR_HOT void
360 scavenge_thunk_srt(const StgInfoTable *info)
362 StgThunkInfoTable *thunk_info;
364 if (!major_gc) return;
366 thunk_info = itbl_to_thunk_itbl(info);
367 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
370 STATIC_INLINE GNUC_ATTR_HOT void
371 scavenge_fun_srt(const StgInfoTable *info)
373 StgFunInfoTable *fun_info;
375 if (!major_gc) return;
377 fun_info = itbl_to_fun_itbl(info);
378 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
381 /* -----------------------------------------------------------------------------
382 Scavenge a block from the given scan pointer up to bd->free.
384 evac_gen is set by the caller to be either zero (for a step in a
385 generation < N) or G where G is the generation of the step being
388 We sometimes temporarily change evac_gen back to zero if we're
389 scavenging a mutable object where eager promotion isn't such a good
391 -------------------------------------------------------------------------- */
393 static GNUC_ATTR_HOT void
394 scavenge_block (bdescr *bd)
398 generation *saved_evac_gen;
399 rtsBool saved_eager_promotion;
402 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
403 bd->start, bd->gen_no, bd->u.scan);
406 gct->evac_gen = bd->gen;
407 saved_evac_gen = gct->evac_gen;
408 saved_eager_promotion = gct->eager_promotion;
409 gct->failed_to_evac = rtsFalse;
411 ws = &gct->gens[bd->gen->no];
415 // we might be evacuating into the very object that we're
416 // scavenging, so we have to check the real bd->free pointer each
417 // time around the loop.
418 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
420 ASSERT(bd->link == NULL);
421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
422 info = get_itbl((StgClosure *)p);
424 ASSERT(gct->thunk_selector_depth == 0);
427 switch (info->type) {
432 StgMVar *mvar = ((StgMVar *)p);
433 gct->eager_promotion = rtsFalse;
434 evacuate((StgClosure **)&mvar->head);
435 evacuate((StgClosure **)&mvar->tail);
436 evacuate((StgClosure **)&mvar->value);
437 gct->eager_promotion = saved_eager_promotion;
439 if (gct->failed_to_evac) {
440 mvar->header.info = &stg_MVAR_DIRTY_info;
442 mvar->header.info = &stg_MVAR_CLEAN_info;
444 p += sizeofW(StgMVar);
449 scavenge_fun_srt(info);
450 evacuate(&((StgClosure *)p)->payload[1]);
451 evacuate(&((StgClosure *)p)->payload[0]);
452 p += sizeofW(StgHeader) + 2;
456 scavenge_thunk_srt(info);
457 evacuate(&((StgThunk *)p)->payload[1]);
458 evacuate(&((StgThunk *)p)->payload[0]);
459 p += sizeofW(StgThunk) + 2;
463 evacuate(&((StgClosure *)p)->payload[1]);
464 evacuate(&((StgClosure *)p)->payload[0]);
465 p += sizeofW(StgHeader) + 2;
469 scavenge_thunk_srt(info);
470 evacuate(&((StgThunk *)p)->payload[0]);
471 p += sizeofW(StgThunk) + 1;
475 scavenge_fun_srt(info);
477 evacuate(&((StgClosure *)p)->payload[0]);
478 p += sizeofW(StgHeader) + 1;
482 scavenge_thunk_srt(info);
483 p += sizeofW(StgThunk) + 1;
487 scavenge_fun_srt(info);
489 p += sizeofW(StgHeader) + 1;
493 scavenge_thunk_srt(info);
494 p += sizeofW(StgThunk) + 2;
498 scavenge_fun_srt(info);
500 p += sizeofW(StgHeader) + 2;
504 scavenge_thunk_srt(info);
505 evacuate(&((StgThunk *)p)->payload[0]);
506 p += sizeofW(StgThunk) + 2;
510 scavenge_fun_srt(info);
512 evacuate(&((StgClosure *)p)->payload[0]);
513 p += sizeofW(StgHeader) + 2;
517 scavenge_fun_srt(info);
524 scavenge_thunk_srt(info);
525 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
526 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
527 evacuate((StgClosure **)p);
529 p += info->layout.payload.nptrs;
540 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
541 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
542 evacuate((StgClosure **)p);
544 p += info->layout.payload.nptrs;
549 StgBCO *bco = (StgBCO *)p;
550 evacuate((StgClosure **)&bco->instrs);
551 evacuate((StgClosure **)&bco->literals);
552 evacuate((StgClosure **)&bco->ptrs);
558 if (bd->gen_no != 0) {
561 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
562 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
563 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
566 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
568 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
570 // We pretend that p has just been created.
571 LDV_RECORD_CREATE((StgClosure *)p);
574 case IND_OLDGEN_PERM:
575 evacuate(&((StgInd *)p)->indirectee);
576 p += sizeofW(StgInd);
581 gct->eager_promotion = rtsFalse;
582 evacuate(&((StgMutVar *)p)->var);
583 gct->eager_promotion = saved_eager_promotion;
585 if (gct->failed_to_evac) {
586 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
588 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
590 p += sizeofW(StgMutVar);
595 p += BLACKHOLE_sizeW();
600 StgSelector *s = (StgSelector *)p;
601 evacuate(&s->selectee);
602 p += THUNK_SELECTOR_sizeW();
606 // A chunk of stack saved in a heap object
609 StgAP_STACK *ap = (StgAP_STACK *)p;
612 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
613 p = (StgPtr)ap->payload + ap->size;
618 p = scavenge_PAP((StgPAP *)p);
622 p = scavenge_AP((StgAP *)p);
627 p += arr_words_sizeW((StgArrWords *)p);
630 case MUT_ARR_PTRS_CLEAN:
631 case MUT_ARR_PTRS_DIRTY:
633 // We don't eagerly promote objects pointed to by a mutable
634 // array, but if we find the array only points to objects in
635 // the same or an older generation, we mark it "clean" and
636 // avoid traversing it during minor GCs.
637 gct->eager_promotion = rtsFalse;
639 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
641 if (gct->failed_to_evac) {
642 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
644 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
647 gct->eager_promotion = saved_eager_promotion;
648 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
652 case MUT_ARR_PTRS_FROZEN:
653 case MUT_ARR_PTRS_FROZEN0:
656 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
658 // If we're going to put this object on the mutable list, then
659 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
660 if (gct->failed_to_evac) {
661 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
663 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
670 StgTSO *tso = (StgTSO *)p;
676 case TVAR_WATCH_QUEUE:
678 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
680 evacuate((StgClosure **)&wq->closure);
681 evacuate((StgClosure **)&wq->next_queue_entry);
682 evacuate((StgClosure **)&wq->prev_queue_entry);
683 gct->evac_gen = saved_evac_gen;
684 gct->failed_to_evac = rtsTrue; // mutable
685 p += sizeofW(StgTVarWatchQueue);
691 StgTVar *tvar = ((StgTVar *) p);
693 evacuate((StgClosure **)&tvar->current_value);
694 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
695 gct->evac_gen = saved_evac_gen;
696 gct->failed_to_evac = rtsTrue; // mutable
697 p += sizeofW(StgTVar);
703 StgTRecHeader *trec = ((StgTRecHeader *) p);
705 evacuate((StgClosure **)&trec->enclosing_trec);
706 evacuate((StgClosure **)&trec->current_chunk);
707 evacuate((StgClosure **)&trec->invariants_to_check);
708 gct->evac_gen = saved_evac_gen;
709 gct->failed_to_evac = rtsTrue; // mutable
710 p += sizeofW(StgTRecHeader);
717 StgTRecChunk *tc = ((StgTRecChunk *) p);
718 TRecEntry *e = &(tc -> entries[0]);
720 evacuate((StgClosure **)&tc->prev_chunk);
721 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
722 evacuate((StgClosure **)&e->tvar);
723 evacuate((StgClosure **)&e->expected_value);
724 evacuate((StgClosure **)&e->new_value);
726 gct->evac_gen = saved_evac_gen;
727 gct->failed_to_evac = rtsTrue; // mutable
728 p += sizeofW(StgTRecChunk);
732 case ATOMIC_INVARIANT:
734 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
736 evacuate(&invariant->code);
737 evacuate((StgClosure **)&invariant->last_execution);
738 gct->evac_gen = saved_evac_gen;
739 gct->failed_to_evac = rtsTrue; // mutable
740 p += sizeofW(StgAtomicInvariant);
744 case INVARIANT_CHECK_QUEUE:
746 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
748 evacuate((StgClosure **)&queue->invariant);
749 evacuate((StgClosure **)&queue->my_execution);
750 evacuate((StgClosure **)&queue->next_queue_entry);
751 gct->evac_gen = saved_evac_gen;
752 gct->failed_to_evac = rtsTrue; // mutable
753 p += sizeofW(StgInvariantCheckQueue);
758 barf("scavenge: unimplemented/strange closure type %d @ %p",
763 * We need to record the current object on the mutable list if
764 * (a) It is actually mutable, or
765 * (b) It contains pointers to a younger generation.
766 * Case (b) arises if we didn't manage to promote everything that
767 * the current object points to into the current generation.
769 if (gct->failed_to_evac) {
770 gct->failed_to_evac = rtsFalse;
771 if (bd->gen_no > 0) {
772 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
778 gct->copied += ws->todo_free - bd->free;
782 debugTrace(DEBUG_gc, " scavenged %ld bytes",
783 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
785 // update stats: this is a block that has been scavenged
786 gct->scanned += bd->free - bd->u.scan;
787 bd->u.scan = bd->free;
789 if (bd != ws->todo_bd) {
790 // we're not going to evac any more objects into
791 // this block, so push it now.
792 push_scanned_block(bd, ws);
797 /* -----------------------------------------------------------------------------
798 Scavenge everything on the mark stack.
800 This is slightly different from scavenge():
801 - we don't walk linearly through the objects, so the scavenger
802 doesn't need to advance the pointer on to the next object.
803 -------------------------------------------------------------------------- */
806 scavenge_mark_stack(void)
810 generation *saved_evac_gen;
812 gct->evac_gen = oldest_gen;
813 saved_evac_gen = gct->evac_gen;
815 while ((p = pop_mark_stack())) {
817 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
818 info = get_itbl((StgClosure *)p);
821 switch (info->type) {
826 rtsBool saved_eager_promotion = gct->eager_promotion;
828 StgMVar *mvar = ((StgMVar *)p);
829 gct->eager_promotion = rtsFalse;
830 evacuate((StgClosure **)&mvar->head);
831 evacuate((StgClosure **)&mvar->tail);
832 evacuate((StgClosure **)&mvar->value);
833 gct->eager_promotion = saved_eager_promotion;
835 if (gct->failed_to_evac) {
836 mvar->header.info = &stg_MVAR_DIRTY_info;
838 mvar->header.info = &stg_MVAR_CLEAN_info;
844 scavenge_fun_srt(info);
845 evacuate(&((StgClosure *)p)->payload[1]);
846 evacuate(&((StgClosure *)p)->payload[0]);
850 scavenge_thunk_srt(info);
851 evacuate(&((StgThunk *)p)->payload[1]);
852 evacuate(&((StgThunk *)p)->payload[0]);
856 evacuate(&((StgClosure *)p)->payload[1]);
857 evacuate(&((StgClosure *)p)->payload[0]);
862 scavenge_fun_srt(info);
863 evacuate(&((StgClosure *)p)->payload[0]);
868 scavenge_thunk_srt(info);
869 evacuate(&((StgThunk *)p)->payload[0]);
874 evacuate(&((StgClosure *)p)->payload[0]);
879 scavenge_fun_srt(info);
884 scavenge_thunk_srt(info);
892 scavenge_fun_srt(info);
899 scavenge_thunk_srt(info);
900 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
901 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
902 evacuate((StgClosure **)p);
914 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
915 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
916 evacuate((StgClosure **)p);
922 StgBCO *bco = (StgBCO *)p;
923 evacuate((StgClosure **)&bco->instrs);
924 evacuate((StgClosure **)&bco->literals);
925 evacuate((StgClosure **)&bco->ptrs);
930 // don't need to do anything here: the only possible case
931 // is that we're in a 1-space compacting collector, with
932 // no "old" generation.
936 case IND_OLDGEN_PERM:
937 evacuate(&((StgInd *)p)->indirectee);
941 case MUT_VAR_DIRTY: {
942 rtsBool saved_eager_promotion = gct->eager_promotion;
944 gct->eager_promotion = rtsFalse;
945 evacuate(&((StgMutVar *)p)->var);
946 gct->eager_promotion = saved_eager_promotion;
948 if (gct->failed_to_evac) {
949 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
951 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
963 StgSelector *s = (StgSelector *)p;
964 evacuate(&s->selectee);
968 // A chunk of stack saved in a heap object
971 StgAP_STACK *ap = (StgAP_STACK *)p;
974 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
979 scavenge_PAP((StgPAP *)p);
983 scavenge_AP((StgAP *)p);
986 case MUT_ARR_PTRS_CLEAN:
987 case MUT_ARR_PTRS_DIRTY:
992 // We don't eagerly promote objects pointed to by a mutable
993 // array, but if we find the array only points to objects in
994 // the same or an older generation, we mark it "clean" and
995 // avoid traversing it during minor GCs.
996 saved_eager = gct->eager_promotion;
997 gct->eager_promotion = rtsFalse;
999 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1001 if (gct->failed_to_evac) {
1002 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1004 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1007 gct->eager_promotion = saved_eager;
1008 gct->failed_to_evac = rtsTrue; // mutable anyhow.
1012 case MUT_ARR_PTRS_FROZEN:
1013 case MUT_ARR_PTRS_FROZEN0:
1014 // follow everything
1018 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1020 // If we're going to put this object on the mutable list, then
1021 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1022 if (gct->failed_to_evac) {
1023 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1025 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1032 scavengeTSO((StgTSO*)p);
1036 case TVAR_WATCH_QUEUE:
1038 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1040 evacuate((StgClosure **)&wq->closure);
1041 evacuate((StgClosure **)&wq->next_queue_entry);
1042 evacuate((StgClosure **)&wq->prev_queue_entry);
1043 gct->evac_gen = saved_evac_gen;
1044 gct->failed_to_evac = rtsTrue; // mutable
1050 StgTVar *tvar = ((StgTVar *) p);
1052 evacuate((StgClosure **)&tvar->current_value);
1053 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1054 gct->evac_gen = saved_evac_gen;
1055 gct->failed_to_evac = rtsTrue; // mutable
1062 StgTRecChunk *tc = ((StgTRecChunk *) p);
1063 TRecEntry *e = &(tc -> entries[0]);
1065 evacuate((StgClosure **)&tc->prev_chunk);
1066 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1067 evacuate((StgClosure **)&e->tvar);
1068 evacuate((StgClosure **)&e->expected_value);
1069 evacuate((StgClosure **)&e->new_value);
1071 gct->evac_gen = saved_evac_gen;
1072 gct->failed_to_evac = rtsTrue; // mutable
1078 StgTRecHeader *trec = ((StgTRecHeader *) p);
1080 evacuate((StgClosure **)&trec->enclosing_trec);
1081 evacuate((StgClosure **)&trec->current_chunk);
1082 evacuate((StgClosure **)&trec->invariants_to_check);
1083 gct->evac_gen = saved_evac_gen;
1084 gct->failed_to_evac = rtsTrue; // mutable
1088 case ATOMIC_INVARIANT:
1090 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1092 evacuate(&invariant->code);
1093 evacuate((StgClosure **)&invariant->last_execution);
1094 gct->evac_gen = saved_evac_gen;
1095 gct->failed_to_evac = rtsTrue; // mutable
1099 case INVARIANT_CHECK_QUEUE:
1101 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1103 evacuate((StgClosure **)&queue->invariant);
1104 evacuate((StgClosure **)&queue->my_execution);
1105 evacuate((StgClosure **)&queue->next_queue_entry);
1106 gct->evac_gen = saved_evac_gen;
1107 gct->failed_to_evac = rtsTrue; // mutable
1112 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1116 if (gct->failed_to_evac) {
1117 gct->failed_to_evac = rtsFalse;
1118 if (gct->evac_gen) {
1119 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1122 } // while (p = pop_mark_stack())
1125 /* -----------------------------------------------------------------------------
1126 Scavenge one object.
1128 This is used for objects that are temporarily marked as mutable
1129 because they contain old-to-new generation pointers. Only certain
1130 objects can have this property.
1131 -------------------------------------------------------------------------- */
1134 scavenge_one(StgPtr p)
1136 const StgInfoTable *info;
1137 generation *saved_evac_gen = gct->evac_gen;
1140 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1141 info = get_itbl((StgClosure *)p);
1143 switch (info->type) {
1148 rtsBool saved_eager_promotion = gct->eager_promotion;
1150 StgMVar *mvar = ((StgMVar *)p);
1151 gct->eager_promotion = rtsFalse;
1152 evacuate((StgClosure **)&mvar->head);
1153 evacuate((StgClosure **)&mvar->tail);
1154 evacuate((StgClosure **)&mvar->value);
1155 gct->eager_promotion = saved_eager_promotion;
1157 if (gct->failed_to_evac) {
1158 mvar->header.info = &stg_MVAR_DIRTY_info;
1160 mvar->header.info = &stg_MVAR_CLEAN_info;
1174 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1175 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1176 evacuate((StgClosure **)q);
1182 case FUN_1_0: // hardly worth specialising these guys
1198 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1199 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1200 evacuate((StgClosure **)q);
1206 case MUT_VAR_DIRTY: {
1208 rtsBool saved_eager_promotion = gct->eager_promotion;
1210 gct->eager_promotion = rtsFalse;
1211 evacuate(&((StgMutVar *)p)->var);
1212 gct->eager_promotion = saved_eager_promotion;
1214 if (gct->failed_to_evac) {
1215 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1217 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1226 case THUNK_SELECTOR:
1228 StgSelector *s = (StgSelector *)p;
1229 evacuate(&s->selectee);
1235 StgAP_STACK *ap = (StgAP_STACK *)p;
1238 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1239 p = (StgPtr)ap->payload + ap->size;
1244 p = scavenge_PAP((StgPAP *)p);
1248 p = scavenge_AP((StgAP *)p);
1252 // nothing to follow
1255 case MUT_ARR_PTRS_CLEAN:
1256 case MUT_ARR_PTRS_DIRTY:
1258 rtsBool saved_eager;
1260 // We don't eagerly promote objects pointed to by a mutable
1261 // array, but if we find the array only points to objects in
1262 // the same or an older generation, we mark it "clean" and
1263 // avoid traversing it during minor GCs.
1264 saved_eager = gct->eager_promotion;
1265 gct->eager_promotion = rtsFalse;
1267 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1269 if (gct->failed_to_evac) {
1270 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1272 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1275 gct->eager_promotion = saved_eager;
1276 gct->failed_to_evac = rtsTrue;
1280 case MUT_ARR_PTRS_FROZEN:
1281 case MUT_ARR_PTRS_FROZEN0:
1283 // follow everything
1284 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1286 // If we're going to put this object on the mutable list, then
1287 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1288 if (gct->failed_to_evac) {
1289 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1291 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1298 scavengeTSO((StgTSO*)p);
1302 case TVAR_WATCH_QUEUE:
1304 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1306 evacuate((StgClosure **)&wq->closure);
1307 evacuate((StgClosure **)&wq->next_queue_entry);
1308 evacuate((StgClosure **)&wq->prev_queue_entry);
1309 gct->evac_gen = saved_evac_gen;
1310 gct->failed_to_evac = rtsTrue; // mutable
1316 StgTVar *tvar = ((StgTVar *) p);
1318 evacuate((StgClosure **)&tvar->current_value);
1319 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1320 gct->evac_gen = saved_evac_gen;
1321 gct->failed_to_evac = rtsTrue; // mutable
1327 StgTRecHeader *trec = ((StgTRecHeader *) p);
1329 evacuate((StgClosure **)&trec->enclosing_trec);
1330 evacuate((StgClosure **)&trec->current_chunk);
1331 evacuate((StgClosure **)&trec->invariants_to_check);
1332 gct->evac_gen = saved_evac_gen;
1333 gct->failed_to_evac = rtsTrue; // mutable
1340 StgTRecChunk *tc = ((StgTRecChunk *) p);
1341 TRecEntry *e = &(tc -> entries[0]);
1343 evacuate((StgClosure **)&tc->prev_chunk);
1344 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1345 evacuate((StgClosure **)&e->tvar);
1346 evacuate((StgClosure **)&e->expected_value);
1347 evacuate((StgClosure **)&e->new_value);
1349 gct->evac_gen = saved_evac_gen;
1350 gct->failed_to_evac = rtsTrue; // mutable
1354 case ATOMIC_INVARIANT:
1356 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1358 evacuate(&invariant->code);
1359 evacuate((StgClosure **)&invariant->last_execution);
1360 gct->evac_gen = saved_evac_gen;
1361 gct->failed_to_evac = rtsTrue; // mutable
1365 case INVARIANT_CHECK_QUEUE:
1367 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1369 evacuate((StgClosure **)&queue->invariant);
1370 evacuate((StgClosure **)&queue->my_execution);
1371 evacuate((StgClosure **)&queue->next_queue_entry);
1372 gct->evac_gen = saved_evac_gen;
1373 gct->failed_to_evac = rtsTrue; // mutable
1378 // IND can happen, for example, when the interpreter allocates
1379 // a gigantic AP closure (more than one block), which ends up
1380 // on the large-object list and then gets updated. See #3424.
1382 case IND_OLDGEN_PERM:
1384 evacuate(&((StgInd *)p)->indirectee);
1386 #if 0 && defined(DEBUG)
1387 if (RtsFlags.DebugFlags.gc)
1388 /* Debugging code to print out the size of the thing we just
1392 StgPtr start = gen->scan;
1393 bdescr *start_bd = gen->scan_bd;
1396 if (start_bd != gen->scan_bd) {
1397 size += (P_)BLOCK_ROUND_UP(start) - start;
1398 start_bd = start_bd->link;
1399 while (start_bd != gen->scan_bd) {
1400 size += BLOCK_SIZE_W;
1401 start_bd = start_bd->link;
1404 (P_)BLOCK_ROUND_DOWN(gen->scan);
1406 size = gen->scan - start;
1408 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1414 barf("scavenge_one: strange object %d", (int)(info->type));
1417 no_luck = gct->failed_to_evac;
1418 gct->failed_to_evac = rtsFalse;
1422 /* -----------------------------------------------------------------------------
1423 Scavenging mutable lists.
1425 We treat the mutable list of each generation > N (i.e. all the
1426 generations older than the one being collected) as roots. We also
1427 remove non-mutable objects from the mutable list at this point.
1428 -------------------------------------------------------------------------- */
1431 scavenge_mutable_list(bdescr *bd, generation *gen)
1435 gct->evac_gen = gen;
1436 for (; bd != NULL; bd = bd->link) {
1437 for (q = bd->start; q < bd->free; q++) {
1439 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1442 switch (get_itbl((StgClosure *)p)->type) {
1444 barf("MUT_VAR_CLEAN on mutable list");
1446 mutlist_MUTVARS++; break;
1447 case MUT_ARR_PTRS_CLEAN:
1448 case MUT_ARR_PTRS_DIRTY:
1449 case MUT_ARR_PTRS_FROZEN:
1450 case MUT_ARR_PTRS_FROZEN0:
1451 mutlist_MUTARRS++; break;
1453 barf("MVAR_CLEAN on mutable list");
1455 mutlist_MVARS++; break;
1457 mutlist_OTHERS++; break;
1461 // Check whether this object is "clean", that is it
1462 // definitely doesn't point into a young generation.
1463 // Clean objects don't need to be scavenged. Some clean
1464 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1465 // list at all; others, such as TSO
1466 // are always on the mutable list.
1468 switch (get_itbl((StgClosure *)p)->type) {
1469 case MUT_ARR_PTRS_CLEAN:
1470 recordMutableGen_GC((StgClosure *)p,gen->no);
1472 case MUT_ARR_PTRS_DIRTY:
1474 rtsBool saved_eager;
1475 saved_eager = gct->eager_promotion;
1476 gct->eager_promotion = rtsFalse;
1478 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1480 if (gct->failed_to_evac) {
1481 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1483 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1486 gct->eager_promotion = saved_eager;
1487 gct->failed_to_evac = rtsFalse;
1488 recordMutableGen_GC((StgClosure *)p,gen->no);
1492 StgTSO *tso = (StgTSO *)p;
1493 if (tso->dirty == 0) {
1494 // Must be on the mutable list because its link
1496 ASSERT(tso->flags & TSO_LINK_DIRTY);
1498 scavenge_TSO_link(tso);
1499 if (gct->failed_to_evac) {
1500 recordMutableGen_GC((StgClosure *)p,gen->no);
1501 gct->failed_to_evac = rtsFalse;
1503 tso->flags &= ~TSO_LINK_DIRTY;
1512 if (scavenge_one(p)) {
1513 // didn't manage to promote everything, so put the
1514 // object back on the list.
1515 recordMutableGen_GC((StgClosure *)p,gen->no);
1522 scavenge_capability_mut_lists (Capability *cap)
1526 /* Mutable lists from each generation > N
1527 * we want to *scavenge* these roots, not evacuate them: they're not
1528 * going to move in this GC.
1529 * Also do them in reverse generation order, for the usual reason:
1530 * namely to reduce the likelihood of spurious old->new pointers.
1532 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1533 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1534 freeChain_sync(cap->saved_mut_lists[g]);
1535 cap->saved_mut_lists[g] = NULL;
1539 /* -----------------------------------------------------------------------------
1540 Scavenging the static objects.
1542 We treat the mutable list of each generation > N (i.e. all the
1543 generations older than the one being collected) as roots. We also
1544 remove non-mutable objects from the mutable list at this point.
1545 -------------------------------------------------------------------------- */
1548 scavenge_static(void)
1551 const StgInfoTable *info;
1553 debugTrace(DEBUG_gc, "scavenging static objects");
1555 /* Always evacuate straight to the oldest generation for static
1557 gct->evac_gen = oldest_gen;
1559 /* keep going until we've scavenged all the objects on the linked
1564 /* get the next static object from the list. Remember, there might
1565 * be more stuff on this list after each evacuation...
1566 * (static_objects is a global)
1568 p = gct->static_objects;
1569 if (p == END_OF_STATIC_LIST) {
1573 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1576 if (info->type==RBH)
1577 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1579 // make sure the info pointer is into text space
1581 /* Take this object *off* the static_objects list,
1582 * and put it on the scavenged_static_objects list.
1584 gct->static_objects = *STATIC_LINK(info,p);
1585 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1586 gct->scavenged_static_objects = p;
1588 switch (info -> type) {
1592 StgInd *ind = (StgInd *)p;
1593 evacuate(&ind->indirectee);
1595 /* might fail to evacuate it, in which case we have to pop it
1596 * back on the mutable list of the oldest generation. We
1597 * leave it *on* the scavenged_static_objects list, though,
1598 * in case we visit this object again.
1600 if (gct->failed_to_evac) {
1601 gct->failed_to_evac = rtsFalse;
1602 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1608 scavenge_thunk_srt(info);
1612 scavenge_fun_srt(info);
1619 next = (P_)p->payload + info->layout.payload.ptrs;
1620 // evacuate the pointers
1621 for (q = (P_)p->payload; q < next; q++) {
1622 evacuate((StgClosure **)q);
1628 barf("scavenge_static: strange closure %d", (int)(info->type));
1631 ASSERT(gct->failed_to_evac == rtsFalse);
1635 /* -----------------------------------------------------------------------------
1636 scavenge a chunk of memory described by a bitmap
1637 -------------------------------------------------------------------------- */
1640 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1646 bitmap = large_bitmap->bitmap[b];
1647 for (i = 0; i < size; ) {
1648 if ((bitmap & 1) == 0) {
1649 evacuate((StgClosure **)p);
1653 if (i % BITS_IN(W_) == 0) {
1655 bitmap = large_bitmap->bitmap[b];
1657 bitmap = bitmap >> 1;
1662 STATIC_INLINE StgPtr
1663 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1666 if ((bitmap & 1) == 0) {
1667 evacuate((StgClosure **)p);
1670 bitmap = bitmap >> 1;
1676 /* -----------------------------------------------------------------------------
1677 scavenge_stack walks over a section of stack and evacuates all the
1678 objects pointed to by it. We can use the same code for walking
1679 AP_STACK_UPDs, since these are just sections of copied stack.
1680 -------------------------------------------------------------------------- */
1683 scavenge_stack(StgPtr p, StgPtr stack_end)
1685 const StgRetInfoTable* info;
1690 * Each time around this loop, we are looking at a chunk of stack
1691 * that starts with an activation record.
1694 while (p < stack_end) {
1695 info = get_ret_itbl((StgClosure *)p);
1697 switch (info->i.type) {
1700 // In SMP, we can get update frames that point to indirections
1701 // when two threads evaluate the same thunk. We do attempt to
1702 // discover this situation in threadPaused(), but it's
1703 // possible that the following sequence occurs:
1712 // Now T is an indirection, and the update frame is already
1713 // marked on A's stack, so we won't traverse it again in
1714 // threadPaused(). We could traverse the whole stack again
1715 // before GC, but that seems like overkill.
1717 // Scavenging this update frame as normal would be disastrous;
1718 // the updatee would end up pointing to the value. So we turn
1719 // the indirection into an IND_PERM, so that evacuate will
1720 // copy the indirection into the old generation instead of
1723 // Note [upd-black-hole]
1724 // One slight hiccup is that the THUNK_SELECTOR machinery can
1725 // overwrite the updatee with an IND. In parallel GC, this
1726 // could even be happening concurrently, so we can't check for
1727 // the IND. Fortunately if we assume that blackholing is
1728 // happening (either lazy or eager), then we can be sure that
1729 // the updatee is never a THUNK_SELECTOR and we're ok.
1730 // NB. this is a new invariant: blackholing is not optional.
1733 const StgInfoTable *i;
1734 StgClosure *updatee;
1736 updatee = ((StgUpdateFrame *)p)->updatee;
1737 i = updatee->header.info;
1738 if (!IS_FORWARDING_PTR(i)) {
1739 type = get_itbl(updatee)->type;
1741 updatee->header.info = &stg_IND_PERM_info;
1742 } else if (type == IND_OLDGEN) {
1743 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1746 evacuate(&((StgUpdateFrame *)p)->updatee);
1747 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1748 p += sizeofW(StgUpdateFrame);
1752 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1753 case CATCH_STM_FRAME:
1754 case CATCH_RETRY_FRAME:
1755 case ATOMICALLY_FRAME:
1759 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1760 size = BITMAP_SIZE(info->i.layout.bitmap);
1761 // NOTE: the payload starts immediately after the info-ptr, we
1762 // don't have an StgHeader in the same sense as a heap closure.
1764 p = scavenge_small_bitmap(p, size, bitmap);
1768 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1776 evacuate((StgClosure **)p);
1779 size = BCO_BITMAP_SIZE(bco);
1780 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1785 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1790 size = GET_LARGE_BITMAP(&info->i)->size;
1792 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1794 // and don't forget to follow the SRT
1798 // Dynamic bitmap: the mask is stored on the stack, and
1799 // there are a number of non-pointers followed by a number
1800 // of pointers above the bitmapped area. (see StgMacros.h,
1805 dyn = ((StgRetDyn *)p)->liveness;
1807 // traverse the bitmap first
1808 bitmap = RET_DYN_LIVENESS(dyn);
1809 p = (P_)&((StgRetDyn *)p)->payload[0];
1810 size = RET_DYN_BITMAP_SIZE;
1811 p = scavenge_small_bitmap(p, size, bitmap);
1813 // skip over the non-ptr words
1814 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1816 // follow the ptr words
1817 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1818 evacuate((StgClosure **)p);
1826 StgRetFun *ret_fun = (StgRetFun *)p;
1827 StgFunInfoTable *fun_info;
1829 evacuate(&ret_fun->fun);
1830 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1831 p = scavenge_arg_block(fun_info, ret_fun->payload);
1836 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1841 /*-----------------------------------------------------------------------------
1842 scavenge the large object list.
1844 evac_gen set by caller; similar games played with evac_gen as with
1845 scavenge() - see comment at the top of scavenge(). Most large
1846 objects are (repeatedly) mutable, so most of the time evac_gen will
1848 --------------------------------------------------------------------------- */
1851 scavenge_large (gen_workspace *ws)
1856 gct->evac_gen = ws->gen;
1858 bd = ws->todo_large_objects;
1860 for (; bd != NULL; bd = ws->todo_large_objects) {
1862 // take this object *off* the large objects list and put it on
1863 // the scavenged large objects list. This is so that we can
1864 // treat new_large_objects as a stack and push new objects on
1865 // the front when evacuating.
1866 ws->todo_large_objects = bd->link;
1868 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1869 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1870 ws->gen->n_scavenged_large_blocks += bd->blocks;
1871 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1874 if (scavenge_one(p)) {
1875 if (ws->gen->no > 0) {
1876 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1881 gct->scanned += closure_sizeW((StgClosure*)p);
1885 /* ----------------------------------------------------------------------------
1886 Look for work to do.
1888 We look for the oldest gen that has either a todo block that can
1889 be scanned, or a block of work on the global queue that we can
1892 It is important to take work from the *oldest* generation that we
1893 has work available, because that minimizes the likelihood of
1894 evacuating objects into a young generation when they should have
1895 been eagerly promoted. This really does make a difference (the
1896 cacheprof benchmark is one that is affected).
1898 We also want to scan the todo block if possible before grabbing
1899 work from the global queue, the reason being that we don't want to
1900 steal work from the global queue and starve other threads if there
1901 is other work we can usefully be doing.
1902 ------------------------------------------------------------------------- */
1905 scavenge_find_work (void)
1909 rtsBool did_something, did_anything;
1912 gct->scav_find_work++;
1914 did_anything = rtsFalse;
1917 did_something = rtsFalse;
1918 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1921 gct->scan_bd = NULL;
1923 // If we have a scan block with some work to do,
1924 // scavenge everything up to the free pointer.
1925 if (ws->todo_bd->u.scan < ws->todo_free)
1927 scavenge_block(ws->todo_bd);
1928 did_something = rtsTrue;
1932 // If we have any large objects to scavenge, do them now.
1933 if (ws->todo_large_objects) {
1935 did_something = rtsTrue;
1939 if ((bd = grab_local_todo_block(ws)) != NULL) {
1941 did_something = rtsTrue;
1946 if (did_something) {
1947 did_anything = rtsTrue;
1951 #if defined(THREADED_RTS)
1952 if (work_stealing) {
1953 // look for work to steal
1954 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1955 if ((bd = steal_todo_block(g)) != NULL) {
1957 did_something = rtsTrue;
1962 if (did_something) {
1963 did_anything = rtsTrue;
1969 // only return when there is no more work to do
1971 return did_anything;
1974 /* ----------------------------------------------------------------------------
1975 Scavenge until we can't find anything more to scavenge.
1976 ------------------------------------------------------------------------- */
1984 work_to_do = rtsFalse;
1986 // scavenge static objects
1987 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1988 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1992 // scavenge objects in compacted generation
1993 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1994 scavenge_mark_stack();
1995 work_to_do = rtsTrue;
1998 // Order is important here: we want to deal in full blocks as
1999 // much as possible, so go for global work in preference to
2000 // local work. Only if all the global work has been exhausted
2001 // do we start scavenging the fragments of blocks in the local
2003 if (scavenge_find_work()) goto loop;
2005 if (work_to_do) goto loop;