1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
27 #include "Capability.h"
29 static void scavenge_stack (StgPtr p, StgPtr stack_end);
31 static void scavenge_large_bitmap (StgPtr p,
32 StgLargeBitmap *large_bitmap,
35 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
36 # define evacuate(a) evacuate1(a)
37 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
38 # define scavenge_loop(a) scavenge_loop1(a)
39 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
40 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
43 /* -----------------------------------------------------------------------------
45 -------------------------------------------------------------------------- */
48 scavenge_TSO_link (StgTSO *tso)
50 // We don't always chase the link field: TSOs on the blackhole
51 // queue are not automatically alive, so the link field is a
52 // "weak" pointer in that case.
53 if (tso->why_blocked != BlockedOnBlackHole) {
54 evacuate((StgClosure **)&tso->_link);
59 scavengeTSO (StgTSO *tso)
63 if (tso->what_next == ThreadRelocated) {
64 // the only way this can happen is if the old TSO was on the
65 // mutable list. We might have other links to this defunct
66 // TSO, so we must update its link field.
67 evacuate((StgClosure**)&tso->_link);
71 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
73 saved_eager = gct->eager_promotion;
74 gct->eager_promotion = rtsFalse;
76 if ( tso->why_blocked == BlockedOnMVar
77 || tso->why_blocked == BlockedOnBlackHole
78 || tso->why_blocked == BlockedOnException
80 evacuate(&tso->block_info.closure);
82 evacuate((StgClosure **)&tso->blocked_exceptions);
84 // scavange current transaction record
85 evacuate((StgClosure **)&tso->trec);
87 // scavenge this thread's stack
88 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
90 if (gct->failed_to_evac) {
91 tso->flags |= TSO_DIRTY;
92 scavenge_TSO_link(tso);
94 tso->flags &= ~TSO_DIRTY;
95 scavenge_TSO_link(tso);
96 if (gct->failed_to_evac) {
97 tso->flags |= TSO_LINK_DIRTY;
99 tso->flags &= ~TSO_LINK_DIRTY;
103 gct->eager_promotion = saved_eager;
106 /* -----------------------------------------------------------------------------
107 Blocks of function args occur on the stack (at the top) and
109 -------------------------------------------------------------------------- */
112 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
119 switch (fun_info->f.fun_type) {
121 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
122 size = BITMAP_SIZE(fun_info->f.b.bitmap);
125 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
126 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
130 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
131 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
134 if ((bitmap & 1) == 0) {
135 evacuate((StgClosure **)p);
138 bitmap = bitmap >> 1;
146 STATIC_INLINE GNUC_ATTR_HOT StgPtr
147 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
151 StgFunInfoTable *fun_info;
153 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
154 ASSERT(fun_info->i.type != PAP);
157 switch (fun_info->f.fun_type) {
159 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
162 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
166 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
170 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
173 if ((bitmap & 1) == 0) {
174 evacuate((StgClosure **)p);
177 bitmap = bitmap >> 1;
185 STATIC_INLINE GNUC_ATTR_HOT StgPtr
186 scavenge_PAP (StgPAP *pap)
189 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
193 scavenge_AP (StgAP *ap)
196 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
199 /* -----------------------------------------------------------------------------
201 -------------------------------------------------------------------------- */
203 /* Similar to scavenge_large_bitmap(), but we don't write back the
204 * pointers we get back from evacuate().
207 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
214 bitmap = large_srt->l.bitmap[b];
215 size = (nat)large_srt->l.size;
216 p = (StgClosure **)large_srt->srt;
217 for (i = 0; i < size; ) {
218 if ((bitmap & 1) != 0) {
223 if (i % BITS_IN(W_) == 0) {
225 bitmap = large_srt->l.bitmap[b];
227 bitmap = bitmap >> 1;
232 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
233 * srt field in the info table. That's ok, because we'll
234 * never dereference it.
236 STATIC_INLINE GNUC_ATTR_HOT void
237 scavenge_srt (StgClosure **srt, nat srt_bitmap)
245 if (bitmap == (StgHalfWord)(-1)) {
246 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
250 while (bitmap != 0) {
251 if ((bitmap & 1) != 0) {
252 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
253 // Special-case to handle references to closures hiding out in DLLs, since
254 // double indirections required to get at those. The code generator knows
255 // which is which when generating the SRT, so it stores the (indirect)
256 // reference to the DLL closure in the table by first adding one to it.
257 // We check for this here, and undo the addition before evacuating it.
259 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
260 // closure that's fixed at link-time, and no extra magic is required.
261 if ( (unsigned long)(*srt) & 0x1 ) {
262 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
271 bitmap = bitmap >> 1;
276 STATIC_INLINE GNUC_ATTR_HOT void
277 scavenge_thunk_srt(const StgInfoTable *info)
279 StgThunkInfoTable *thunk_info;
281 if (!major_gc) return;
283 thunk_info = itbl_to_thunk_itbl(info);
284 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
287 STATIC_INLINE GNUC_ATTR_HOT void
288 scavenge_fun_srt(const StgInfoTable *info)
290 StgFunInfoTable *fun_info;
292 if (!major_gc) return;
294 fun_info = itbl_to_fun_itbl(info);
295 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
298 /* -----------------------------------------------------------------------------
299 Scavenge a block from the given scan pointer up to bd->free.
301 evac_step is set by the caller to be either zero (for a step in a
302 generation < N) or G where G is the generation of the step being
305 We sometimes temporarily change evac_step back to zero if we're
306 scavenging a mutable object where eager promotion isn't such a good
308 -------------------------------------------------------------------------- */
310 static GNUC_ATTR_HOT void
311 scavenge_block (bdescr *bd)
315 step *saved_evac_step;
316 rtsBool saved_eager_promotion;
319 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
320 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
323 gct->evac_step = bd->step;
324 saved_evac_step = gct->evac_step;
325 saved_eager_promotion = gct->eager_promotion;
326 gct->failed_to_evac = rtsFalse;
328 ws = &gct->steps[bd->step->abs_no];
332 // we might be evacuating into the very object that we're
333 // scavenging, so we have to check the real bd->free pointer each
334 // time around the loop.
335 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
337 ASSERT(bd->link == NULL);
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
339 info = get_itbl((StgClosure *)p);
341 ASSERT(gct->thunk_selector_depth == 0);
344 switch (info->type) {
349 StgMVar *mvar = ((StgMVar *)p);
350 gct->eager_promotion = rtsFalse;
351 evacuate((StgClosure **)&mvar->head);
352 evacuate((StgClosure **)&mvar->tail);
353 evacuate((StgClosure **)&mvar->value);
354 gct->eager_promotion = saved_eager_promotion;
356 if (gct->failed_to_evac) {
357 mvar->header.info = &stg_MVAR_DIRTY_info;
359 mvar->header.info = &stg_MVAR_CLEAN_info;
361 p += sizeofW(StgMVar);
366 scavenge_fun_srt(info);
367 evacuate(&((StgClosure *)p)->payload[1]);
368 evacuate(&((StgClosure *)p)->payload[0]);
369 p += sizeofW(StgHeader) + 2;
373 scavenge_thunk_srt(info);
374 evacuate(&((StgThunk *)p)->payload[1]);
375 evacuate(&((StgThunk *)p)->payload[0]);
376 p += sizeofW(StgThunk) + 2;
380 evacuate(&((StgClosure *)p)->payload[1]);
381 evacuate(&((StgClosure *)p)->payload[0]);
382 p += sizeofW(StgHeader) + 2;
386 scavenge_thunk_srt(info);
387 evacuate(&((StgThunk *)p)->payload[0]);
388 p += sizeofW(StgThunk) + 1;
392 scavenge_fun_srt(info);
394 evacuate(&((StgClosure *)p)->payload[0]);
395 p += sizeofW(StgHeader) + 1;
399 scavenge_thunk_srt(info);
400 p += sizeofW(StgThunk) + 1;
404 scavenge_fun_srt(info);
406 p += sizeofW(StgHeader) + 1;
410 scavenge_thunk_srt(info);
411 p += sizeofW(StgThunk) + 2;
415 scavenge_fun_srt(info);
417 p += sizeofW(StgHeader) + 2;
421 scavenge_thunk_srt(info);
422 evacuate(&((StgThunk *)p)->payload[0]);
423 p += sizeofW(StgThunk) + 2;
427 scavenge_fun_srt(info);
429 evacuate(&((StgClosure *)p)->payload[0]);
430 p += sizeofW(StgHeader) + 2;
434 scavenge_fun_srt(info);
441 scavenge_thunk_srt(info);
442 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
443 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
444 evacuate((StgClosure **)p);
446 p += info->layout.payload.nptrs;
457 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
458 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
459 evacuate((StgClosure **)p);
461 p += info->layout.payload.nptrs;
466 StgBCO *bco = (StgBCO *)p;
467 evacuate((StgClosure **)&bco->instrs);
468 evacuate((StgClosure **)&bco->literals);
469 evacuate((StgClosure **)&bco->ptrs);
475 if (bd->gen_no != 0) {
478 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
479 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
480 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
483 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
485 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
487 // We pretend that p has just been created.
488 LDV_RECORD_CREATE((StgClosure *)p);
491 case IND_OLDGEN_PERM:
492 evacuate(&((StgInd *)p)->indirectee);
493 p += sizeofW(StgInd);
498 gct->eager_promotion = rtsFalse;
499 evacuate(&((StgMutVar *)p)->var);
500 gct->eager_promotion = saved_eager_promotion;
502 if (gct->failed_to_evac) {
503 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
505 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
507 p += sizeofW(StgMutVar);
512 p += BLACKHOLE_sizeW();
517 StgSelector *s = (StgSelector *)p;
518 evacuate(&s->selectee);
519 p += THUNK_SELECTOR_sizeW();
523 // A chunk of stack saved in a heap object
526 StgAP_STACK *ap = (StgAP_STACK *)p;
529 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
530 p = (StgPtr)ap->payload + ap->size;
535 p = scavenge_PAP((StgPAP *)p);
539 p = scavenge_AP((StgAP *)p);
544 p += arr_words_sizeW((StgArrWords *)p);
547 case MUT_ARR_PTRS_CLEAN:
548 case MUT_ARR_PTRS_DIRTY:
553 // We don't eagerly promote objects pointed to by a mutable
554 // array, but if we find the array only points to objects in
555 // the same or an older generation, we mark it "clean" and
556 // avoid traversing it during minor GCs.
557 gct->eager_promotion = rtsFalse;
558 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
559 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
560 evacuate((StgClosure **)p);
562 gct->eager_promotion = saved_eager_promotion;
564 if (gct->failed_to_evac) {
565 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
567 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
570 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
574 case MUT_ARR_PTRS_FROZEN:
575 case MUT_ARR_PTRS_FROZEN0:
580 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
581 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
582 evacuate((StgClosure **)p);
585 // If we're going to put this object on the mutable list, then
586 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
587 if (gct->failed_to_evac) {
588 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
590 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
597 StgTSO *tso = (StgTSO *)p;
603 case TVAR_WATCH_QUEUE:
605 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
607 evacuate((StgClosure **)&wq->closure);
608 evacuate((StgClosure **)&wq->next_queue_entry);
609 evacuate((StgClosure **)&wq->prev_queue_entry);
610 gct->evac_step = saved_evac_step;
611 gct->failed_to_evac = rtsTrue; // mutable
612 p += sizeofW(StgTVarWatchQueue);
618 StgTVar *tvar = ((StgTVar *) p);
620 evacuate((StgClosure **)&tvar->current_value);
621 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
622 gct->evac_step = saved_evac_step;
623 gct->failed_to_evac = rtsTrue; // mutable
624 p += sizeofW(StgTVar);
630 StgTRecHeader *trec = ((StgTRecHeader *) p);
632 evacuate((StgClosure **)&trec->enclosing_trec);
633 evacuate((StgClosure **)&trec->current_chunk);
634 evacuate((StgClosure **)&trec->invariants_to_check);
635 gct->evac_step = saved_evac_step;
636 gct->failed_to_evac = rtsTrue; // mutable
637 p += sizeofW(StgTRecHeader);
644 StgTRecChunk *tc = ((StgTRecChunk *) p);
645 TRecEntry *e = &(tc -> entries[0]);
647 evacuate((StgClosure **)&tc->prev_chunk);
648 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
649 evacuate((StgClosure **)&e->tvar);
650 evacuate((StgClosure **)&e->expected_value);
651 evacuate((StgClosure **)&e->new_value);
653 gct->evac_step = saved_evac_step;
654 gct->failed_to_evac = rtsTrue; // mutable
655 p += sizeofW(StgTRecChunk);
659 case ATOMIC_INVARIANT:
661 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
663 evacuate(&invariant->code);
664 evacuate((StgClosure **)&invariant->last_execution);
665 gct->evac_step = saved_evac_step;
666 gct->failed_to_evac = rtsTrue; // mutable
667 p += sizeofW(StgAtomicInvariant);
671 case INVARIANT_CHECK_QUEUE:
673 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
675 evacuate((StgClosure **)&queue->invariant);
676 evacuate((StgClosure **)&queue->my_execution);
677 evacuate((StgClosure **)&queue->next_queue_entry);
678 gct->evac_step = saved_evac_step;
679 gct->failed_to_evac = rtsTrue; // mutable
680 p += sizeofW(StgInvariantCheckQueue);
685 barf("scavenge: unimplemented/strange closure type %d @ %p",
690 * We need to record the current object on the mutable list if
691 * (a) It is actually mutable, or
692 * (b) It contains pointers to a younger generation.
693 * Case (b) arises if we didn't manage to promote everything that
694 * the current object points to into the current generation.
696 if (gct->failed_to_evac) {
697 gct->failed_to_evac = rtsFalse;
698 if (bd->gen_no > 0) {
699 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
705 gct->copied += ws->todo_free - bd->free;
709 debugTrace(DEBUG_gc, " scavenged %ld bytes",
710 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
712 // update stats: this is a block that has been scavenged
713 gct->scanned += bd->free - bd->u.scan;
714 bd->u.scan = bd->free;
716 if (bd != ws->todo_bd) {
717 // we're not going to evac any more objects into
718 // this block, so push it now.
719 push_scanned_block(bd, ws);
724 /* -----------------------------------------------------------------------------
725 Scavenge everything on the mark stack.
727 This is slightly different from scavenge():
728 - we don't walk linearly through the objects, so the scavenger
729 doesn't need to advance the pointer on to the next object.
730 -------------------------------------------------------------------------- */
733 scavenge_mark_stack(void)
737 step *saved_evac_step;
739 gct->evac_step = &oldest_gen->steps[0];
740 saved_evac_step = gct->evac_step;
743 while (!mark_stack_empty()) {
744 p = pop_mark_stack();
746 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
747 info = get_itbl((StgClosure *)p);
750 switch (info->type) {
755 rtsBool saved_eager_promotion = gct->eager_promotion;
757 StgMVar *mvar = ((StgMVar *)p);
758 gct->eager_promotion = rtsFalse;
759 evacuate((StgClosure **)&mvar->head);
760 evacuate((StgClosure **)&mvar->tail);
761 evacuate((StgClosure **)&mvar->value);
762 gct->eager_promotion = saved_eager_promotion;
764 if (gct->failed_to_evac) {
765 mvar->header.info = &stg_MVAR_DIRTY_info;
767 mvar->header.info = &stg_MVAR_CLEAN_info;
773 scavenge_fun_srt(info);
774 evacuate(&((StgClosure *)p)->payload[1]);
775 evacuate(&((StgClosure *)p)->payload[0]);
779 scavenge_thunk_srt(info);
780 evacuate(&((StgThunk *)p)->payload[1]);
781 evacuate(&((StgThunk *)p)->payload[0]);
785 evacuate(&((StgClosure *)p)->payload[1]);
786 evacuate(&((StgClosure *)p)->payload[0]);
791 scavenge_fun_srt(info);
792 evacuate(&((StgClosure *)p)->payload[0]);
797 scavenge_thunk_srt(info);
798 evacuate(&((StgThunk *)p)->payload[0]);
803 evacuate(&((StgClosure *)p)->payload[0]);
808 scavenge_fun_srt(info);
813 scavenge_thunk_srt(info);
821 scavenge_fun_srt(info);
828 scavenge_thunk_srt(info);
829 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
830 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
831 evacuate((StgClosure **)p);
843 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
844 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
845 evacuate((StgClosure **)p);
851 StgBCO *bco = (StgBCO *)p;
852 evacuate((StgClosure **)&bco->instrs);
853 evacuate((StgClosure **)&bco->literals);
854 evacuate((StgClosure **)&bco->ptrs);
859 // don't need to do anything here: the only possible case
860 // is that we're in a 1-space compacting collector, with
861 // no "old" generation.
865 case IND_OLDGEN_PERM:
866 evacuate(&((StgInd *)p)->indirectee);
870 case MUT_VAR_DIRTY: {
871 rtsBool saved_eager_promotion = gct->eager_promotion;
873 gct->eager_promotion = rtsFalse;
874 evacuate(&((StgMutVar *)p)->var);
875 gct->eager_promotion = saved_eager_promotion;
877 if (gct->failed_to_evac) {
878 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
880 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
892 StgSelector *s = (StgSelector *)p;
893 evacuate(&s->selectee);
897 // A chunk of stack saved in a heap object
900 StgAP_STACK *ap = (StgAP_STACK *)p;
903 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
908 scavenge_PAP((StgPAP *)p);
912 scavenge_AP((StgAP *)p);
915 case MUT_ARR_PTRS_CLEAN:
916 case MUT_ARR_PTRS_DIRTY:
922 // We don't eagerly promote objects pointed to by a mutable
923 // array, but if we find the array only points to objects in
924 // the same or an older generation, we mark it "clean" and
925 // avoid traversing it during minor GCs.
926 saved_eager = gct->eager_promotion;
927 gct->eager_promotion = rtsFalse;
928 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
929 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
930 evacuate((StgClosure **)p);
932 gct->eager_promotion = saved_eager;
934 if (gct->failed_to_evac) {
935 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
937 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
940 gct->failed_to_evac = rtsTrue; // mutable anyhow.
944 case MUT_ARR_PTRS_FROZEN:
945 case MUT_ARR_PTRS_FROZEN0:
950 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
951 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
952 evacuate((StgClosure **)p);
955 // If we're going to put this object on the mutable list, then
956 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
957 if (gct->failed_to_evac) {
958 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
967 scavengeTSO((StgTSO*)p);
971 case TVAR_WATCH_QUEUE:
973 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
975 evacuate((StgClosure **)&wq->closure);
976 evacuate((StgClosure **)&wq->next_queue_entry);
977 evacuate((StgClosure **)&wq->prev_queue_entry);
978 gct->evac_step = saved_evac_step;
979 gct->failed_to_evac = rtsTrue; // mutable
985 StgTVar *tvar = ((StgTVar *) p);
987 evacuate((StgClosure **)&tvar->current_value);
988 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
989 gct->evac_step = saved_evac_step;
990 gct->failed_to_evac = rtsTrue; // mutable
997 StgTRecChunk *tc = ((StgTRecChunk *) p);
998 TRecEntry *e = &(tc -> entries[0]);
1000 evacuate((StgClosure **)&tc->prev_chunk);
1001 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1002 evacuate((StgClosure **)&e->tvar);
1003 evacuate((StgClosure **)&e->expected_value);
1004 evacuate((StgClosure **)&e->new_value);
1006 gct->evac_step = saved_evac_step;
1007 gct->failed_to_evac = rtsTrue; // mutable
1013 StgTRecHeader *trec = ((StgTRecHeader *) p);
1015 evacuate((StgClosure **)&trec->enclosing_trec);
1016 evacuate((StgClosure **)&trec->current_chunk);
1017 evacuate((StgClosure **)&trec->invariants_to_check);
1018 gct->evac_step = saved_evac_step;
1019 gct->failed_to_evac = rtsTrue; // mutable
1023 case ATOMIC_INVARIANT:
1025 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1027 evacuate(&invariant->code);
1028 evacuate((StgClosure **)&invariant->last_execution);
1029 gct->evac_step = saved_evac_step;
1030 gct->failed_to_evac = rtsTrue; // mutable
1034 case INVARIANT_CHECK_QUEUE:
1036 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1038 evacuate((StgClosure **)&queue->invariant);
1039 evacuate((StgClosure **)&queue->my_execution);
1040 evacuate((StgClosure **)&queue->next_queue_entry);
1041 gct->evac_step = saved_evac_step;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_step) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1058 // mark the next bit to indicate "scavenged"
1059 mark(q+1, Bdescr(q));
1061 } // while (!mark_stack_empty())
1063 // start a new linear scan if the mark stack overflowed at some point
1064 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1065 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1066 mark_stack_overflowed = rtsFalse;
1067 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1068 oldgen_scan = oldgen_scan_bd->start;
1071 if (oldgen_scan_bd) {
1072 // push a new thing on the mark stack
1074 // find a closure that is marked but not scavenged, and start
1076 while (oldgen_scan < oldgen_scan_bd->free
1077 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1081 if (oldgen_scan < oldgen_scan_bd->free) {
1083 // already scavenged?
1084 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1085 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1088 push_mark_stack(oldgen_scan);
1089 // ToDo: bump the linear scan by the actual size of the object
1090 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1094 oldgen_scan_bd = oldgen_scan_bd->link;
1095 if (oldgen_scan_bd != NULL) {
1096 oldgen_scan = oldgen_scan_bd->start;
1102 /* -----------------------------------------------------------------------------
1103 Scavenge one object.
1105 This is used for objects that are temporarily marked as mutable
1106 because they contain old-to-new generation pointers. Only certain
1107 objects can have this property.
1108 -------------------------------------------------------------------------- */
1111 scavenge_one(StgPtr p)
1113 const StgInfoTable *info;
1114 step *saved_evac_step = gct->evac_step;
1117 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1118 info = get_itbl((StgClosure *)p);
1120 switch (info->type) {
1125 rtsBool saved_eager_promotion = gct->eager_promotion;
1127 StgMVar *mvar = ((StgMVar *)p);
1128 gct->eager_promotion = rtsFalse;
1129 evacuate((StgClosure **)&mvar->head);
1130 evacuate((StgClosure **)&mvar->tail);
1131 evacuate((StgClosure **)&mvar->value);
1132 gct->eager_promotion = saved_eager_promotion;
1134 if (gct->failed_to_evac) {
1135 mvar->header.info = &stg_MVAR_DIRTY_info;
1137 mvar->header.info = &stg_MVAR_CLEAN_info;
1151 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1152 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1153 evacuate((StgClosure **)q);
1159 case FUN_1_0: // hardly worth specialising these guys
1175 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1176 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1177 evacuate((StgClosure **)q);
1183 case MUT_VAR_DIRTY: {
1185 rtsBool saved_eager_promotion = gct->eager_promotion;
1187 gct->eager_promotion = rtsFalse;
1188 evacuate(&((StgMutVar *)p)->var);
1189 gct->eager_promotion = saved_eager_promotion;
1191 if (gct->failed_to_evac) {
1192 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1194 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1203 case THUNK_SELECTOR:
1205 StgSelector *s = (StgSelector *)p;
1206 evacuate(&s->selectee);
1212 StgAP_STACK *ap = (StgAP_STACK *)p;
1215 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1216 p = (StgPtr)ap->payload + ap->size;
1221 p = scavenge_PAP((StgPAP *)p);
1225 p = scavenge_AP((StgAP *)p);
1229 // nothing to follow
1232 case MUT_ARR_PTRS_CLEAN:
1233 case MUT_ARR_PTRS_DIRTY:
1236 rtsBool saved_eager;
1238 // We don't eagerly promote objects pointed to by a mutable
1239 // array, but if we find the array only points to objects in
1240 // the same or an older generation, we mark it "clean" and
1241 // avoid traversing it during minor GCs.
1242 saved_eager = gct->eager_promotion;
1243 gct->eager_promotion = rtsFalse;
1245 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1246 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1247 evacuate((StgClosure **)p);
1249 gct->eager_promotion = saved_eager;
1251 if (gct->failed_to_evac) {
1252 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1254 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1257 gct->failed_to_evac = rtsTrue;
1261 case MUT_ARR_PTRS_FROZEN:
1262 case MUT_ARR_PTRS_FROZEN0:
1264 // follow everything
1267 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1268 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1269 evacuate((StgClosure **)p);
1272 // If we're going to put this object on the mutable list, then
1273 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1274 if (gct->failed_to_evac) {
1275 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1277 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1284 scavengeTSO((StgTSO*)p);
1288 case TVAR_WATCH_QUEUE:
1290 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1292 evacuate((StgClosure **)&wq->closure);
1293 evacuate((StgClosure **)&wq->next_queue_entry);
1294 evacuate((StgClosure **)&wq->prev_queue_entry);
1295 gct->evac_step = saved_evac_step;
1296 gct->failed_to_evac = rtsTrue; // mutable
1302 StgTVar *tvar = ((StgTVar *) p);
1304 evacuate((StgClosure **)&tvar->current_value);
1305 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1306 gct->evac_step = saved_evac_step;
1307 gct->failed_to_evac = rtsTrue; // mutable
1313 StgTRecHeader *trec = ((StgTRecHeader *) p);
1315 evacuate((StgClosure **)&trec->enclosing_trec);
1316 evacuate((StgClosure **)&trec->current_chunk);
1317 evacuate((StgClosure **)&trec->invariants_to_check);
1318 gct->evac_step = saved_evac_step;
1319 gct->failed_to_evac = rtsTrue; // mutable
1326 StgTRecChunk *tc = ((StgTRecChunk *) p);
1327 TRecEntry *e = &(tc -> entries[0]);
1329 evacuate((StgClosure **)&tc->prev_chunk);
1330 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1331 evacuate((StgClosure **)&e->tvar);
1332 evacuate((StgClosure **)&e->expected_value);
1333 evacuate((StgClosure **)&e->new_value);
1335 gct->evac_step = saved_evac_step;
1336 gct->failed_to_evac = rtsTrue; // mutable
1340 case ATOMIC_INVARIANT:
1342 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1344 evacuate(&invariant->code);
1345 evacuate((StgClosure **)&invariant->last_execution);
1346 gct->evac_step = saved_evac_step;
1347 gct->failed_to_evac = rtsTrue; // mutable
1351 case INVARIANT_CHECK_QUEUE:
1353 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1355 evacuate((StgClosure **)&queue->invariant);
1356 evacuate((StgClosure **)&queue->my_execution);
1357 evacuate((StgClosure **)&queue->next_queue_entry);
1358 gct->evac_step = saved_evac_step;
1359 gct->failed_to_evac = rtsTrue; // mutable
1364 case IND_OLDGEN_PERM:
1366 evacuate(&((StgInd *)p)->indirectee);
1368 #if 0 && defined(DEBUG)
1369 if (RtsFlags.DebugFlags.gc)
1370 /* Debugging code to print out the size of the thing we just
1374 StgPtr start = gen->steps[0].scan;
1375 bdescr *start_bd = gen->steps[0].scan_bd;
1377 scavenge(&gen->steps[0]);
1378 if (start_bd != gen->steps[0].scan_bd) {
1379 size += (P_)BLOCK_ROUND_UP(start) - start;
1380 start_bd = start_bd->link;
1381 while (start_bd != gen->steps[0].scan_bd) {
1382 size += BLOCK_SIZE_W;
1383 start_bd = start_bd->link;
1385 size += gen->steps[0].scan -
1386 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1388 size = gen->steps[0].scan - start;
1390 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1396 barf("scavenge_one: strange object %d", (int)(info->type));
1399 no_luck = gct->failed_to_evac;
1400 gct->failed_to_evac = rtsFalse;
1404 /* -----------------------------------------------------------------------------
1405 Scavenging mutable lists.
1407 We treat the mutable list of each generation > N (i.e. all the
1408 generations older than the one being collected) as roots. We also
1409 remove non-mutable objects from the mutable list at this point.
1410 -------------------------------------------------------------------------- */
1413 scavenge_mutable_list(bdescr *bd, generation *gen)
1417 gct->evac_step = &gen->steps[0];
1418 for (; bd != NULL; bd = bd->link) {
1419 for (q = bd->start; q < bd->free; q++) {
1421 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1424 switch (get_itbl((StgClosure *)p)->type) {
1426 barf("MUT_VAR_CLEAN on mutable list");
1428 mutlist_MUTVARS++; break;
1429 case MUT_ARR_PTRS_CLEAN:
1430 case MUT_ARR_PTRS_DIRTY:
1431 case MUT_ARR_PTRS_FROZEN:
1432 case MUT_ARR_PTRS_FROZEN0:
1433 mutlist_MUTARRS++; break;
1435 barf("MVAR_CLEAN on mutable list");
1437 mutlist_MVARS++; break;
1439 mutlist_OTHERS++; break;
1443 // Check whether this object is "clean", that is it
1444 // definitely doesn't point into a young generation.
1445 // Clean objects don't need to be scavenged. Some clean
1446 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1447 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1448 // are always on the mutable list.
1450 switch (get_itbl((StgClosure *)p)->type) {
1451 case MUT_ARR_PTRS_CLEAN:
1452 recordMutableGen_GC((StgClosure *)p,gen->no);
1455 StgTSO *tso = (StgTSO *)p;
1456 if ((tso->flags & TSO_DIRTY) == 0) {
1457 // Must be on the mutable list because its link
1459 ASSERT(tso->flags & TSO_LINK_DIRTY);
1461 scavenge_TSO_link(tso);
1462 if (gct->failed_to_evac) {
1463 recordMutableGen_GC((StgClosure *)p,gen->no);
1464 gct->failed_to_evac = rtsFalse;
1466 tso->flags &= ~TSO_LINK_DIRTY;
1475 if (scavenge_one(p)) {
1476 // didn't manage to promote everything, so put the
1477 // object back on the list.
1478 recordMutableGen_GC((StgClosure *)p,gen->no);
1485 scavenge_capability_mut_lists (Capability *cap)
1489 /* Mutable lists from each generation > N
1490 * we want to *scavenge* these roots, not evacuate them: they're not
1491 * going to move in this GC.
1492 * Also do them in reverse generation order, for the usual reason:
1493 * namely to reduce the likelihood of spurious old->new pointers.
1495 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1496 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1497 freeChain_sync(cap->saved_mut_lists[g]);
1498 cap->saved_mut_lists[g] = NULL;
1502 /* -----------------------------------------------------------------------------
1503 Scavenging the static objects.
1505 We treat the mutable list of each generation > N (i.e. all the
1506 generations older than the one being collected) as roots. We also
1507 remove non-mutable objects from the mutable list at this point.
1508 -------------------------------------------------------------------------- */
1511 scavenge_static(void)
1514 const StgInfoTable *info;
1516 debugTrace(DEBUG_gc, "scavenging static objects");
1518 /* Always evacuate straight to the oldest generation for static
1520 gct->evac_step = &oldest_gen->steps[0];
1522 /* keep going until we've scavenged all the objects on the linked
1527 /* get the next static object from the list. Remember, there might
1528 * be more stuff on this list after each evacuation...
1529 * (static_objects is a global)
1531 p = gct->static_objects;
1532 if (p == END_OF_STATIC_LIST) {
1536 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1539 if (info->type==RBH)
1540 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1542 // make sure the info pointer is into text space
1544 /* Take this object *off* the static_objects list,
1545 * and put it on the scavenged_static_objects list.
1547 gct->static_objects = *STATIC_LINK(info,p);
1548 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1549 gct->scavenged_static_objects = p;
1551 switch (info -> type) {
1555 StgInd *ind = (StgInd *)p;
1556 evacuate(&ind->indirectee);
1558 /* might fail to evacuate it, in which case we have to pop it
1559 * back on the mutable list of the oldest generation. We
1560 * leave it *on* the scavenged_static_objects list, though,
1561 * in case we visit this object again.
1563 if (gct->failed_to_evac) {
1564 gct->failed_to_evac = rtsFalse;
1565 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1571 scavenge_thunk_srt(info);
1575 scavenge_fun_srt(info);
1582 next = (P_)p->payload + info->layout.payload.ptrs;
1583 // evacuate the pointers
1584 for (q = (P_)p->payload; q < next; q++) {
1585 evacuate((StgClosure **)q);
1591 barf("scavenge_static: strange closure %d", (int)(info->type));
1594 ASSERT(gct->failed_to_evac == rtsFalse);
1598 /* -----------------------------------------------------------------------------
1599 scavenge a chunk of memory described by a bitmap
1600 -------------------------------------------------------------------------- */
1603 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1609 bitmap = large_bitmap->bitmap[b];
1610 for (i = 0; i < size; ) {
1611 if ((bitmap & 1) == 0) {
1612 evacuate((StgClosure **)p);
1616 if (i % BITS_IN(W_) == 0) {
1618 bitmap = large_bitmap->bitmap[b];
1620 bitmap = bitmap >> 1;
1625 STATIC_INLINE StgPtr
1626 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1629 if ((bitmap & 1) == 0) {
1630 evacuate((StgClosure **)p);
1633 bitmap = bitmap >> 1;
1639 /* -----------------------------------------------------------------------------
1640 scavenge_stack walks over a section of stack and evacuates all the
1641 objects pointed to by it. We can use the same code for walking
1642 AP_STACK_UPDs, since these are just sections of copied stack.
1643 -------------------------------------------------------------------------- */
1646 scavenge_stack(StgPtr p, StgPtr stack_end)
1648 const StgRetInfoTable* info;
1653 * Each time around this loop, we are looking at a chunk of stack
1654 * that starts with an activation record.
1657 while (p < stack_end) {
1658 info = get_ret_itbl((StgClosure *)p);
1660 switch (info->i.type) {
1663 // In SMP, we can get update frames that point to indirections
1664 // when two threads evaluate the same thunk. We do attempt to
1665 // discover this situation in threadPaused(), but it's
1666 // possible that the following sequence occurs:
1675 // Now T is an indirection, and the update frame is already
1676 // marked on A's stack, so we won't traverse it again in
1677 // threadPaused(). We could traverse the whole stack again
1678 // before GC, but that seems like overkill.
1680 // Scavenging this update frame as normal would be disastrous;
1681 // the updatee would end up pointing to the value. So we turn
1682 // the indirection into an IND_PERM, so that evacuate will
1683 // copy the indirection into the old generation instead of
1686 // Note [upd-black-hole]
1687 // One slight hiccup is that the THUNK_SELECTOR machinery can
1688 // overwrite the updatee with an IND. In parallel GC, this
1689 // could even be happening concurrently, so we can't check for
1690 // the IND. Fortunately if we assume that blackholing is
1691 // happening (either lazy or eager), then we can be sure that
1692 // the updatee is never a THUNK_SELECTOR and we're ok.
1693 // NB. this is a new invariant: blackholing is not optional.
1696 const StgInfoTable *i;
1697 StgClosure *updatee;
1699 updatee = ((StgUpdateFrame *)p)->updatee;
1700 i = updatee->header.info;
1701 if (!IS_FORWARDING_PTR(i)) {
1702 type = get_itbl(updatee)->type;
1704 updatee->header.info = &stg_IND_PERM_info;
1705 } else if (type == IND_OLDGEN) {
1706 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1709 evacuate(&((StgUpdateFrame *)p)->updatee);
1710 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1711 p += sizeofW(StgUpdateFrame);
1715 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1716 case CATCH_STM_FRAME:
1717 case CATCH_RETRY_FRAME:
1718 case ATOMICALLY_FRAME:
1722 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1723 size = BITMAP_SIZE(info->i.layout.bitmap);
1724 // NOTE: the payload starts immediately after the info-ptr, we
1725 // don't have an StgHeader in the same sense as a heap closure.
1727 p = scavenge_small_bitmap(p, size, bitmap);
1731 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1739 evacuate((StgClosure **)p);
1742 size = BCO_BITMAP_SIZE(bco);
1743 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1748 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1753 size = GET_LARGE_BITMAP(&info->i)->size;
1755 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1757 // and don't forget to follow the SRT
1761 // Dynamic bitmap: the mask is stored on the stack, and
1762 // there are a number of non-pointers followed by a number
1763 // of pointers above the bitmapped area. (see StgMacros.h,
1768 dyn = ((StgRetDyn *)p)->liveness;
1770 // traverse the bitmap first
1771 bitmap = RET_DYN_LIVENESS(dyn);
1772 p = (P_)&((StgRetDyn *)p)->payload[0];
1773 size = RET_DYN_BITMAP_SIZE;
1774 p = scavenge_small_bitmap(p, size, bitmap);
1776 // skip over the non-ptr words
1777 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1779 // follow the ptr words
1780 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1781 evacuate((StgClosure **)p);
1789 StgRetFun *ret_fun = (StgRetFun *)p;
1790 StgFunInfoTable *fun_info;
1792 evacuate(&ret_fun->fun);
1793 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1794 p = scavenge_arg_block(fun_info, ret_fun->payload);
1799 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1804 /*-----------------------------------------------------------------------------
1805 scavenge the large object list.
1807 evac_step set by caller; similar games played with evac_step as with
1808 scavenge() - see comment at the top of scavenge(). Most large
1809 objects are (repeatedly) mutable, so most of the time evac_step will
1811 --------------------------------------------------------------------------- */
1814 scavenge_large (step_workspace *ws)
1819 gct->evac_step = ws->step;
1821 bd = ws->todo_large_objects;
1823 for (; bd != NULL; bd = ws->todo_large_objects) {
1825 // take this object *off* the large objects list and put it on
1826 // the scavenged large objects list. This is so that we can
1827 // treat new_large_objects as a stack and push new objects on
1828 // the front when evacuating.
1829 ws->todo_large_objects = bd->link;
1831 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1832 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1833 ws->step->n_scavenged_large_blocks += bd->blocks;
1834 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1837 if (scavenge_one(p)) {
1838 if (ws->step->gen_no > 0) {
1839 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1844 gct->scanned += closure_sizeW((StgClosure*)p);
1848 /* ----------------------------------------------------------------------------
1849 Look for work to do.
1851 We look for the oldest step that has either a todo block that can
1852 be scanned, or a block of work on the global queue that we can
1855 It is important to take work from the *oldest* generation that we
1856 has work available, because that minimizes the likelihood of
1857 evacuating objects into a young generation when they should have
1858 been eagerly promoted. This really does make a difference (the
1859 cacheprof benchmark is one that is affected).
1861 We also want to scan the todo block if possible before grabbing
1862 work from the global queue, the reason being that we don't want to
1863 steal work from the global queue and starve other threads if there
1864 is other work we can usefully be doing.
1865 ------------------------------------------------------------------------- */
1868 scavenge_find_work (void)
1872 rtsBool did_something, did_anything;
1875 gct->scav_find_work++;
1877 did_anything = rtsFalse;
1880 did_something = rtsFalse;
1881 for (s = total_steps-1; s >= 0; s--) {
1882 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1885 ws = &gct->steps[s];
1887 gct->scan_bd = NULL;
1889 // If we have a scan block with some work to do,
1890 // scavenge everything up to the free pointer.
1891 if (ws->todo_bd->u.scan < ws->todo_free)
1893 scavenge_block(ws->todo_bd);
1894 did_something = rtsTrue;
1898 // If we have any large objects to scavenge, do them now.
1899 if (ws->todo_large_objects) {
1901 did_something = rtsTrue;
1905 if ((bd = grab_local_todo_block(ws)) != NULL) {
1907 did_something = rtsTrue;
1912 if (did_something) {
1913 did_anything = rtsTrue;
1917 #if defined(THREADED_RTS)
1918 if (work_stealing) {
1919 // look for work to steal
1920 for (s = total_steps-1; s >= 0; s--) {
1921 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1924 if ((bd = steal_todo_block(s)) != NULL) {
1926 did_something = rtsTrue;
1931 if (did_something) {
1932 did_anything = rtsTrue;
1938 // only return when there is no more work to do
1940 return did_anything;
1943 /* ----------------------------------------------------------------------------
1944 Scavenge until we can't find anything more to scavenge.
1945 ------------------------------------------------------------------------- */
1953 work_to_do = rtsFalse;
1955 // scavenge static objects
1956 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1957 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1961 // scavenge objects in compacted generation
1962 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1963 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1964 scavenge_mark_stack();
1965 work_to_do = rtsTrue;
1968 // Order is important here: we want to deal in full blocks as
1969 // much as possible, so go for global work in preference to
1970 // local work. Only if all the global work has been exhausted
1971 // do we start scavenging the fragments of blocks in the local
1973 if (scavenge_find_work()) goto loop;
1975 if (work_to_do) goto loop;