1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
27 #include "Capability.h"
28 #include "LdvProfile.h"
30 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32 static void scavenge_large_bitmap (StgPtr p,
33 StgLargeBitmap *large_bitmap,
36 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
37 # define evacuate(a) evacuate1(a)
38 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
41 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
44 /* -----------------------------------------------------------------------------
46 -------------------------------------------------------------------------- */
49 scavenge_TSO_link (StgTSO *tso)
51 // We don't always chase the link field: TSOs on the blackhole
52 // queue are not automatically alive, so the link field is a
53 // "weak" pointer in that case.
54 if (tso->why_blocked != BlockedOnBlackHole) {
55 evacuate((StgClosure **)&tso->_link);
60 scavengeTSO (StgTSO *tso)
64 if (tso->what_next == ThreadRelocated) {
65 // the only way this can happen is if the old TSO was on the
66 // mutable list. We might have other links to this defunct
67 // TSO, so we must update its link field.
68 evacuate((StgClosure**)&tso->_link);
72 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
74 saved_eager = gct->eager_promotion;
75 gct->eager_promotion = rtsFalse;
77 if ( tso->why_blocked == BlockedOnMVar
78 || tso->why_blocked == BlockedOnBlackHole
79 || tso->why_blocked == BlockedOnException
81 evacuate(&tso->block_info.closure);
83 evacuate((StgClosure **)&tso->blocked_exceptions);
85 // scavange current transaction record
86 evacuate((StgClosure **)&tso->trec);
88 // scavenge this thread's stack
89 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
91 if (gct->failed_to_evac) {
92 tso->flags |= TSO_DIRTY;
93 scavenge_TSO_link(tso);
95 tso->flags &= ~TSO_DIRTY;
96 scavenge_TSO_link(tso);
97 if (gct->failed_to_evac) {
98 tso->flags |= TSO_LINK_DIRTY;
100 tso->flags &= ~TSO_LINK_DIRTY;
104 gct->eager_promotion = saved_eager;
107 /* -----------------------------------------------------------------------------
108 Blocks of function args occur on the stack (at the top) and
110 -------------------------------------------------------------------------- */
113 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
120 switch (fun_info->f.fun_type) {
122 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
123 size = BITMAP_SIZE(fun_info->f.b.bitmap);
126 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
127 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
131 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
132 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
135 if ((bitmap & 1) == 0) {
136 evacuate((StgClosure **)p);
139 bitmap = bitmap >> 1;
147 STATIC_INLINE GNUC_ATTR_HOT StgPtr
148 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
152 StgFunInfoTable *fun_info;
154 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
155 ASSERT(fun_info->i.type != PAP);
158 switch (fun_info->f.fun_type) {
160 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
163 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
167 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
171 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
174 if ((bitmap & 1) == 0) {
175 evacuate((StgClosure **)p);
178 bitmap = bitmap >> 1;
186 STATIC_INLINE GNUC_ATTR_HOT StgPtr
187 scavenge_PAP (StgPAP *pap)
190 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
194 scavenge_AP (StgAP *ap)
197 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
200 /* -----------------------------------------------------------------------------
202 -------------------------------------------------------------------------- */
204 /* Similar to scavenge_large_bitmap(), but we don't write back the
205 * pointers we get back from evacuate().
208 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
215 bitmap = large_srt->l.bitmap[b];
216 size = (nat)large_srt->l.size;
217 p = (StgClosure **)large_srt->srt;
218 for (i = 0; i < size; ) {
219 if ((bitmap & 1) != 0) {
224 if (i % BITS_IN(W_) == 0) {
226 bitmap = large_srt->l.bitmap[b];
228 bitmap = bitmap >> 1;
233 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
234 * srt field in the info table. That's ok, because we'll
235 * never dereference it.
237 STATIC_INLINE GNUC_ATTR_HOT void
238 scavenge_srt (StgClosure **srt, nat srt_bitmap)
246 if (bitmap == (StgHalfWord)(-1)) {
247 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
251 while (bitmap != 0) {
252 if ((bitmap & 1) != 0) {
253 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
254 // Special-case to handle references to closures hiding out in DLLs, since
255 // double indirections required to get at those. The code generator knows
256 // which is which when generating the SRT, so it stores the (indirect)
257 // reference to the DLL closure in the table by first adding one to it.
258 // We check for this here, and undo the addition before evacuating it.
260 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
261 // closure that's fixed at link-time, and no extra magic is required.
262 if ( (unsigned long)(*srt) & 0x1 ) {
263 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
272 bitmap = bitmap >> 1;
277 STATIC_INLINE GNUC_ATTR_HOT void
278 scavenge_thunk_srt(const StgInfoTable *info)
280 StgThunkInfoTable *thunk_info;
282 if (!major_gc) return;
284 thunk_info = itbl_to_thunk_itbl(info);
285 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
288 STATIC_INLINE GNUC_ATTR_HOT void
289 scavenge_fun_srt(const StgInfoTable *info)
291 StgFunInfoTable *fun_info;
293 if (!major_gc) return;
295 fun_info = itbl_to_fun_itbl(info);
296 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
299 /* -----------------------------------------------------------------------------
300 Scavenge a block from the given scan pointer up to bd->free.
302 evac_step is set by the caller to be either zero (for a step in a
303 generation < N) or G where G is the generation of the step being
306 We sometimes temporarily change evac_step back to zero if we're
307 scavenging a mutable object where eager promotion isn't such a good
309 -------------------------------------------------------------------------- */
311 static GNUC_ATTR_HOT void
312 scavenge_block (bdescr *bd)
316 step *saved_evac_step;
317 rtsBool saved_eager_promotion;
320 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
321 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
324 gct->evac_step = bd->step;
325 saved_evac_step = gct->evac_step;
326 saved_eager_promotion = gct->eager_promotion;
327 gct->failed_to_evac = rtsFalse;
329 ws = &gct->steps[bd->step->abs_no];
333 // we might be evacuating into the very object that we're
334 // scavenging, so we have to check the real bd->free pointer each
335 // time around the loop.
336 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
338 ASSERT(bd->link == NULL);
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
340 info = get_itbl((StgClosure *)p);
342 ASSERT(gct->thunk_selector_depth == 0);
345 switch (info->type) {
350 StgMVar *mvar = ((StgMVar *)p);
351 gct->eager_promotion = rtsFalse;
352 evacuate((StgClosure **)&mvar->head);
353 evacuate((StgClosure **)&mvar->tail);
354 evacuate((StgClosure **)&mvar->value);
355 gct->eager_promotion = saved_eager_promotion;
357 if (gct->failed_to_evac) {
358 mvar->header.info = &stg_MVAR_DIRTY_info;
360 mvar->header.info = &stg_MVAR_CLEAN_info;
362 p += sizeofW(StgMVar);
367 scavenge_fun_srt(info);
368 evacuate(&((StgClosure *)p)->payload[1]);
369 evacuate(&((StgClosure *)p)->payload[0]);
370 p += sizeofW(StgHeader) + 2;
374 scavenge_thunk_srt(info);
375 evacuate(&((StgThunk *)p)->payload[1]);
376 evacuate(&((StgThunk *)p)->payload[0]);
377 p += sizeofW(StgThunk) + 2;
381 evacuate(&((StgClosure *)p)->payload[1]);
382 evacuate(&((StgClosure *)p)->payload[0]);
383 p += sizeofW(StgHeader) + 2;
387 scavenge_thunk_srt(info);
388 evacuate(&((StgThunk *)p)->payload[0]);
389 p += sizeofW(StgThunk) + 1;
393 scavenge_fun_srt(info);
395 evacuate(&((StgClosure *)p)->payload[0]);
396 p += sizeofW(StgHeader) + 1;
400 scavenge_thunk_srt(info);
401 p += sizeofW(StgThunk) + 1;
405 scavenge_fun_srt(info);
407 p += sizeofW(StgHeader) + 1;
411 scavenge_thunk_srt(info);
412 p += sizeofW(StgThunk) + 2;
416 scavenge_fun_srt(info);
418 p += sizeofW(StgHeader) + 2;
422 scavenge_thunk_srt(info);
423 evacuate(&((StgThunk *)p)->payload[0]);
424 p += sizeofW(StgThunk) + 2;
428 scavenge_fun_srt(info);
430 evacuate(&((StgClosure *)p)->payload[0]);
431 p += sizeofW(StgHeader) + 2;
435 scavenge_fun_srt(info);
442 scavenge_thunk_srt(info);
443 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
444 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
445 evacuate((StgClosure **)p);
447 p += info->layout.payload.nptrs;
458 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
459 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
460 evacuate((StgClosure **)p);
462 p += info->layout.payload.nptrs;
467 StgBCO *bco = (StgBCO *)p;
468 evacuate((StgClosure **)&bco->instrs);
469 evacuate((StgClosure **)&bco->literals);
470 evacuate((StgClosure **)&bco->ptrs);
476 if (bd->gen_no != 0) {
479 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
480 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
481 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
484 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
486 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
488 // We pretend that p has just been created.
489 LDV_RECORD_CREATE((StgClosure *)p);
492 case IND_OLDGEN_PERM:
493 evacuate(&((StgInd *)p)->indirectee);
494 p += sizeofW(StgInd);
499 gct->eager_promotion = rtsFalse;
500 evacuate(&((StgMutVar *)p)->var);
501 gct->eager_promotion = saved_eager_promotion;
503 if (gct->failed_to_evac) {
504 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
506 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
508 p += sizeofW(StgMutVar);
513 p += BLACKHOLE_sizeW();
518 StgSelector *s = (StgSelector *)p;
519 evacuate(&s->selectee);
520 p += THUNK_SELECTOR_sizeW();
524 // A chunk of stack saved in a heap object
527 StgAP_STACK *ap = (StgAP_STACK *)p;
530 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
531 p = (StgPtr)ap->payload + ap->size;
536 p = scavenge_PAP((StgPAP *)p);
540 p = scavenge_AP((StgAP *)p);
545 p += arr_words_sizeW((StgArrWords *)p);
548 case MUT_ARR_PTRS_CLEAN:
549 case MUT_ARR_PTRS_DIRTY:
554 // We don't eagerly promote objects pointed to by a mutable
555 // array, but if we find the array only points to objects in
556 // the same or an older generation, we mark it "clean" and
557 // avoid traversing it during minor GCs.
558 gct->eager_promotion = rtsFalse;
559 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
560 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
561 evacuate((StgClosure **)p);
563 gct->eager_promotion = saved_eager_promotion;
565 if (gct->failed_to_evac) {
566 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
568 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
571 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
575 case MUT_ARR_PTRS_FROZEN:
576 case MUT_ARR_PTRS_FROZEN0:
581 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
582 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
583 evacuate((StgClosure **)p);
586 // If we're going to put this object on the mutable list, then
587 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
588 if (gct->failed_to_evac) {
589 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
591 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
598 StgTSO *tso = (StgTSO *)p;
604 case TVAR_WATCH_QUEUE:
606 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
608 evacuate((StgClosure **)&wq->closure);
609 evacuate((StgClosure **)&wq->next_queue_entry);
610 evacuate((StgClosure **)&wq->prev_queue_entry);
611 gct->evac_step = saved_evac_step;
612 gct->failed_to_evac = rtsTrue; // mutable
613 p += sizeofW(StgTVarWatchQueue);
619 StgTVar *tvar = ((StgTVar *) p);
621 evacuate((StgClosure **)&tvar->current_value);
622 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
623 gct->evac_step = saved_evac_step;
624 gct->failed_to_evac = rtsTrue; // mutable
625 p += sizeofW(StgTVar);
631 StgTRecHeader *trec = ((StgTRecHeader *) p);
633 evacuate((StgClosure **)&trec->enclosing_trec);
634 evacuate((StgClosure **)&trec->current_chunk);
635 evacuate((StgClosure **)&trec->invariants_to_check);
636 gct->evac_step = saved_evac_step;
637 gct->failed_to_evac = rtsTrue; // mutable
638 p += sizeofW(StgTRecHeader);
645 StgTRecChunk *tc = ((StgTRecChunk *) p);
646 TRecEntry *e = &(tc -> entries[0]);
648 evacuate((StgClosure **)&tc->prev_chunk);
649 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
650 evacuate((StgClosure **)&e->tvar);
651 evacuate((StgClosure **)&e->expected_value);
652 evacuate((StgClosure **)&e->new_value);
654 gct->evac_step = saved_evac_step;
655 gct->failed_to_evac = rtsTrue; // mutable
656 p += sizeofW(StgTRecChunk);
660 case ATOMIC_INVARIANT:
662 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
664 evacuate(&invariant->code);
665 evacuate((StgClosure **)&invariant->last_execution);
666 gct->evac_step = saved_evac_step;
667 gct->failed_to_evac = rtsTrue; // mutable
668 p += sizeofW(StgAtomicInvariant);
672 case INVARIANT_CHECK_QUEUE:
674 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
676 evacuate((StgClosure **)&queue->invariant);
677 evacuate((StgClosure **)&queue->my_execution);
678 evacuate((StgClosure **)&queue->next_queue_entry);
679 gct->evac_step = saved_evac_step;
680 gct->failed_to_evac = rtsTrue; // mutable
681 p += sizeofW(StgInvariantCheckQueue);
686 barf("scavenge: unimplemented/strange closure type %d @ %p",
691 * We need to record the current object on the mutable list if
692 * (a) It is actually mutable, or
693 * (b) It contains pointers to a younger generation.
694 * Case (b) arises if we didn't manage to promote everything that
695 * the current object points to into the current generation.
697 if (gct->failed_to_evac) {
698 gct->failed_to_evac = rtsFalse;
699 if (bd->gen_no > 0) {
700 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
706 gct->copied += ws->todo_free - bd->free;
710 debugTrace(DEBUG_gc, " scavenged %ld bytes",
711 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
713 // update stats: this is a block that has been scavenged
714 gct->scanned += bd->free - bd->u.scan;
715 bd->u.scan = bd->free;
717 if (bd != ws->todo_bd) {
718 // we're not going to evac any more objects into
719 // this block, so push it now.
720 push_scanned_block(bd, ws);
725 /* -----------------------------------------------------------------------------
726 Scavenge everything on the mark stack.
728 This is slightly different from scavenge():
729 - we don't walk linearly through the objects, so the scavenger
730 doesn't need to advance the pointer on to the next object.
731 -------------------------------------------------------------------------- */
734 scavenge_mark_stack(void)
738 step *saved_evac_step;
740 gct->evac_step = &oldest_gen->steps[0];
741 saved_evac_step = gct->evac_step;
744 while (!mark_stack_empty()) {
745 p = pop_mark_stack();
747 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
748 info = get_itbl((StgClosure *)p);
751 switch (info->type) {
756 rtsBool saved_eager_promotion = gct->eager_promotion;
758 StgMVar *mvar = ((StgMVar *)p);
759 gct->eager_promotion = rtsFalse;
760 evacuate((StgClosure **)&mvar->head);
761 evacuate((StgClosure **)&mvar->tail);
762 evacuate((StgClosure **)&mvar->value);
763 gct->eager_promotion = saved_eager_promotion;
765 if (gct->failed_to_evac) {
766 mvar->header.info = &stg_MVAR_DIRTY_info;
768 mvar->header.info = &stg_MVAR_CLEAN_info;
774 scavenge_fun_srt(info);
775 evacuate(&((StgClosure *)p)->payload[1]);
776 evacuate(&((StgClosure *)p)->payload[0]);
780 scavenge_thunk_srt(info);
781 evacuate(&((StgThunk *)p)->payload[1]);
782 evacuate(&((StgThunk *)p)->payload[0]);
786 evacuate(&((StgClosure *)p)->payload[1]);
787 evacuate(&((StgClosure *)p)->payload[0]);
792 scavenge_fun_srt(info);
793 evacuate(&((StgClosure *)p)->payload[0]);
798 scavenge_thunk_srt(info);
799 evacuate(&((StgThunk *)p)->payload[0]);
804 evacuate(&((StgClosure *)p)->payload[0]);
809 scavenge_fun_srt(info);
814 scavenge_thunk_srt(info);
822 scavenge_fun_srt(info);
829 scavenge_thunk_srt(info);
830 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
831 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
832 evacuate((StgClosure **)p);
844 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
845 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
846 evacuate((StgClosure **)p);
852 StgBCO *bco = (StgBCO *)p;
853 evacuate((StgClosure **)&bco->instrs);
854 evacuate((StgClosure **)&bco->literals);
855 evacuate((StgClosure **)&bco->ptrs);
860 // don't need to do anything here: the only possible case
861 // is that we're in a 1-space compacting collector, with
862 // no "old" generation.
866 case IND_OLDGEN_PERM:
867 evacuate(&((StgInd *)p)->indirectee);
871 case MUT_VAR_DIRTY: {
872 rtsBool saved_eager_promotion = gct->eager_promotion;
874 gct->eager_promotion = rtsFalse;
875 evacuate(&((StgMutVar *)p)->var);
876 gct->eager_promotion = saved_eager_promotion;
878 if (gct->failed_to_evac) {
879 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
881 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
893 StgSelector *s = (StgSelector *)p;
894 evacuate(&s->selectee);
898 // A chunk of stack saved in a heap object
901 StgAP_STACK *ap = (StgAP_STACK *)p;
904 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
909 scavenge_PAP((StgPAP *)p);
913 scavenge_AP((StgAP *)p);
916 case MUT_ARR_PTRS_CLEAN:
917 case MUT_ARR_PTRS_DIRTY:
923 // We don't eagerly promote objects pointed to by a mutable
924 // array, but if we find the array only points to objects in
925 // the same or an older generation, we mark it "clean" and
926 // avoid traversing it during minor GCs.
927 saved_eager = gct->eager_promotion;
928 gct->eager_promotion = rtsFalse;
929 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
930 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
931 evacuate((StgClosure **)p);
933 gct->eager_promotion = saved_eager;
935 if (gct->failed_to_evac) {
936 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
938 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
941 gct->failed_to_evac = rtsTrue; // mutable anyhow.
945 case MUT_ARR_PTRS_FROZEN:
946 case MUT_ARR_PTRS_FROZEN0:
951 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
952 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
953 evacuate((StgClosure **)p);
956 // If we're going to put this object on the mutable list, then
957 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
958 if (gct->failed_to_evac) {
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
961 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
968 scavengeTSO((StgTSO*)p);
972 case TVAR_WATCH_QUEUE:
974 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
976 evacuate((StgClosure **)&wq->closure);
977 evacuate((StgClosure **)&wq->next_queue_entry);
978 evacuate((StgClosure **)&wq->prev_queue_entry);
979 gct->evac_step = saved_evac_step;
980 gct->failed_to_evac = rtsTrue; // mutable
986 StgTVar *tvar = ((StgTVar *) p);
988 evacuate((StgClosure **)&tvar->current_value);
989 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
990 gct->evac_step = saved_evac_step;
991 gct->failed_to_evac = rtsTrue; // mutable
998 StgTRecChunk *tc = ((StgTRecChunk *) p);
999 TRecEntry *e = &(tc -> entries[0]);
1001 evacuate((StgClosure **)&tc->prev_chunk);
1002 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1003 evacuate((StgClosure **)&e->tvar);
1004 evacuate((StgClosure **)&e->expected_value);
1005 evacuate((StgClosure **)&e->new_value);
1007 gct->evac_step = saved_evac_step;
1008 gct->failed_to_evac = rtsTrue; // mutable
1014 StgTRecHeader *trec = ((StgTRecHeader *) p);
1016 evacuate((StgClosure **)&trec->enclosing_trec);
1017 evacuate((StgClosure **)&trec->current_chunk);
1018 evacuate((StgClosure **)&trec->invariants_to_check);
1019 gct->evac_step = saved_evac_step;
1020 gct->failed_to_evac = rtsTrue; // mutable
1024 case ATOMIC_INVARIANT:
1026 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1028 evacuate(&invariant->code);
1029 evacuate((StgClosure **)&invariant->last_execution);
1030 gct->evac_step = saved_evac_step;
1031 gct->failed_to_evac = rtsTrue; // mutable
1035 case INVARIANT_CHECK_QUEUE:
1037 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1039 evacuate((StgClosure **)&queue->invariant);
1040 evacuate((StgClosure **)&queue->my_execution);
1041 evacuate((StgClosure **)&queue->next_queue_entry);
1042 gct->evac_step = saved_evac_step;
1043 gct->failed_to_evac = rtsTrue; // mutable
1048 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1052 if (gct->failed_to_evac) {
1053 gct->failed_to_evac = rtsFalse;
1054 if (gct->evac_step) {
1055 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1059 // mark the next bit to indicate "scavenged"
1060 mark(q+1, Bdescr(q));
1062 } // while (!mark_stack_empty())
1064 // start a new linear scan if the mark stack overflowed at some point
1065 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1066 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1067 mark_stack_overflowed = rtsFalse;
1068 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1069 oldgen_scan = oldgen_scan_bd->start;
1072 if (oldgen_scan_bd) {
1073 // push a new thing on the mark stack
1075 // find a closure that is marked but not scavenged, and start
1077 while (oldgen_scan < oldgen_scan_bd->free
1078 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1082 if (oldgen_scan < oldgen_scan_bd->free) {
1084 // already scavenged?
1085 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1086 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1089 push_mark_stack(oldgen_scan);
1090 // ToDo: bump the linear scan by the actual size of the object
1091 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1095 oldgen_scan_bd = oldgen_scan_bd->link;
1096 if (oldgen_scan_bd != NULL) {
1097 oldgen_scan = oldgen_scan_bd->start;
1103 /* -----------------------------------------------------------------------------
1104 Scavenge one object.
1106 This is used for objects that are temporarily marked as mutable
1107 because they contain old-to-new generation pointers. Only certain
1108 objects can have this property.
1109 -------------------------------------------------------------------------- */
1112 scavenge_one(StgPtr p)
1114 const StgInfoTable *info;
1115 step *saved_evac_step = gct->evac_step;
1118 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1119 info = get_itbl((StgClosure *)p);
1121 switch (info->type) {
1126 rtsBool saved_eager_promotion = gct->eager_promotion;
1128 StgMVar *mvar = ((StgMVar *)p);
1129 gct->eager_promotion = rtsFalse;
1130 evacuate((StgClosure **)&mvar->head);
1131 evacuate((StgClosure **)&mvar->tail);
1132 evacuate((StgClosure **)&mvar->value);
1133 gct->eager_promotion = saved_eager_promotion;
1135 if (gct->failed_to_evac) {
1136 mvar->header.info = &stg_MVAR_DIRTY_info;
1138 mvar->header.info = &stg_MVAR_CLEAN_info;
1152 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1153 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1154 evacuate((StgClosure **)q);
1160 case FUN_1_0: // hardly worth specialising these guys
1176 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1177 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1178 evacuate((StgClosure **)q);
1184 case MUT_VAR_DIRTY: {
1186 rtsBool saved_eager_promotion = gct->eager_promotion;
1188 gct->eager_promotion = rtsFalse;
1189 evacuate(&((StgMutVar *)p)->var);
1190 gct->eager_promotion = saved_eager_promotion;
1192 if (gct->failed_to_evac) {
1193 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1195 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1204 case THUNK_SELECTOR:
1206 StgSelector *s = (StgSelector *)p;
1207 evacuate(&s->selectee);
1213 StgAP_STACK *ap = (StgAP_STACK *)p;
1216 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1217 p = (StgPtr)ap->payload + ap->size;
1222 p = scavenge_PAP((StgPAP *)p);
1226 p = scavenge_AP((StgAP *)p);
1230 // nothing to follow
1233 case MUT_ARR_PTRS_CLEAN:
1234 case MUT_ARR_PTRS_DIRTY:
1237 rtsBool saved_eager;
1239 // We don't eagerly promote objects pointed to by a mutable
1240 // array, but if we find the array only points to objects in
1241 // the same or an older generation, we mark it "clean" and
1242 // avoid traversing it during minor GCs.
1243 saved_eager = gct->eager_promotion;
1244 gct->eager_promotion = rtsFalse;
1246 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1247 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1248 evacuate((StgClosure **)p);
1250 gct->eager_promotion = saved_eager;
1252 if (gct->failed_to_evac) {
1253 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1255 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1258 gct->failed_to_evac = rtsTrue;
1262 case MUT_ARR_PTRS_FROZEN:
1263 case MUT_ARR_PTRS_FROZEN0:
1265 // follow everything
1268 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1269 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1270 evacuate((StgClosure **)p);
1273 // If we're going to put this object on the mutable list, then
1274 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1275 if (gct->failed_to_evac) {
1276 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1278 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1285 scavengeTSO((StgTSO*)p);
1289 case TVAR_WATCH_QUEUE:
1291 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1293 evacuate((StgClosure **)&wq->closure);
1294 evacuate((StgClosure **)&wq->next_queue_entry);
1295 evacuate((StgClosure **)&wq->prev_queue_entry);
1296 gct->evac_step = saved_evac_step;
1297 gct->failed_to_evac = rtsTrue; // mutable
1303 StgTVar *tvar = ((StgTVar *) p);
1305 evacuate((StgClosure **)&tvar->current_value);
1306 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1307 gct->evac_step = saved_evac_step;
1308 gct->failed_to_evac = rtsTrue; // mutable
1314 StgTRecHeader *trec = ((StgTRecHeader *) p);
1316 evacuate((StgClosure **)&trec->enclosing_trec);
1317 evacuate((StgClosure **)&trec->current_chunk);
1318 evacuate((StgClosure **)&trec->invariants_to_check);
1319 gct->evac_step = saved_evac_step;
1320 gct->failed_to_evac = rtsTrue; // mutable
1327 StgTRecChunk *tc = ((StgTRecChunk *) p);
1328 TRecEntry *e = &(tc -> entries[0]);
1330 evacuate((StgClosure **)&tc->prev_chunk);
1331 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1332 evacuate((StgClosure **)&e->tvar);
1333 evacuate((StgClosure **)&e->expected_value);
1334 evacuate((StgClosure **)&e->new_value);
1336 gct->evac_step = saved_evac_step;
1337 gct->failed_to_evac = rtsTrue; // mutable
1341 case ATOMIC_INVARIANT:
1343 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1345 evacuate(&invariant->code);
1346 evacuate((StgClosure **)&invariant->last_execution);
1347 gct->evac_step = saved_evac_step;
1348 gct->failed_to_evac = rtsTrue; // mutable
1352 case INVARIANT_CHECK_QUEUE:
1354 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1356 evacuate((StgClosure **)&queue->invariant);
1357 evacuate((StgClosure **)&queue->my_execution);
1358 evacuate((StgClosure **)&queue->next_queue_entry);
1359 gct->evac_step = saved_evac_step;
1360 gct->failed_to_evac = rtsTrue; // mutable
1365 case IND_OLDGEN_PERM:
1367 evacuate(&((StgInd *)p)->indirectee);
1369 #if 0 && defined(DEBUG)
1370 if (RtsFlags.DebugFlags.gc)
1371 /* Debugging code to print out the size of the thing we just
1375 StgPtr start = gen->steps[0].scan;
1376 bdescr *start_bd = gen->steps[0].scan_bd;
1378 scavenge(&gen->steps[0]);
1379 if (start_bd != gen->steps[0].scan_bd) {
1380 size += (P_)BLOCK_ROUND_UP(start) - start;
1381 start_bd = start_bd->link;
1382 while (start_bd != gen->steps[0].scan_bd) {
1383 size += BLOCK_SIZE_W;
1384 start_bd = start_bd->link;
1386 size += gen->steps[0].scan -
1387 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1389 size = gen->steps[0].scan - start;
1391 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1397 barf("scavenge_one: strange object %d", (int)(info->type));
1400 no_luck = gct->failed_to_evac;
1401 gct->failed_to_evac = rtsFalse;
1405 /* -----------------------------------------------------------------------------
1406 Scavenging mutable lists.
1408 We treat the mutable list of each generation > N (i.e. all the
1409 generations older than the one being collected) as roots. We also
1410 remove non-mutable objects from the mutable list at this point.
1411 -------------------------------------------------------------------------- */
1414 scavenge_mutable_list(bdescr *bd, generation *gen)
1418 gct->evac_step = &gen->steps[0];
1419 for (; bd != NULL; bd = bd->link) {
1420 for (q = bd->start; q < bd->free; q++) {
1422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1425 switch (get_itbl((StgClosure *)p)->type) {
1427 barf("MUT_VAR_CLEAN on mutable list");
1429 mutlist_MUTVARS++; break;
1430 case MUT_ARR_PTRS_CLEAN:
1431 case MUT_ARR_PTRS_DIRTY:
1432 case MUT_ARR_PTRS_FROZEN:
1433 case MUT_ARR_PTRS_FROZEN0:
1434 mutlist_MUTARRS++; break;
1436 barf("MVAR_CLEAN on mutable list");
1438 mutlist_MVARS++; break;
1440 mutlist_OTHERS++; break;
1444 // Check whether this object is "clean", that is it
1445 // definitely doesn't point into a young generation.
1446 // Clean objects don't need to be scavenged. Some clean
1447 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1448 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1449 // are always on the mutable list.
1451 switch (get_itbl((StgClosure *)p)->type) {
1452 case MUT_ARR_PTRS_CLEAN:
1453 recordMutableGen_GC((StgClosure *)p,gen->no);
1456 StgTSO *tso = (StgTSO *)p;
1457 if ((tso->flags & TSO_DIRTY) == 0) {
1458 // Must be on the mutable list because its link
1460 ASSERT(tso->flags & TSO_LINK_DIRTY);
1462 scavenge_TSO_link(tso);
1463 if (gct->failed_to_evac) {
1464 recordMutableGen_GC((StgClosure *)p,gen->no);
1465 gct->failed_to_evac = rtsFalse;
1467 tso->flags &= ~TSO_LINK_DIRTY;
1476 if (scavenge_one(p)) {
1477 // didn't manage to promote everything, so put the
1478 // object back on the list.
1479 recordMutableGen_GC((StgClosure *)p,gen->no);
1486 scavenge_capability_mut_lists (Capability *cap)
1490 /* Mutable lists from each generation > N
1491 * we want to *scavenge* these roots, not evacuate them: they're not
1492 * going to move in this GC.
1493 * Also do them in reverse generation order, for the usual reason:
1494 * namely to reduce the likelihood of spurious old->new pointers.
1496 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1497 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1498 freeChain_sync(cap->saved_mut_lists[g]);
1499 cap->saved_mut_lists[g] = NULL;
1503 /* -----------------------------------------------------------------------------
1504 Scavenging the static objects.
1506 We treat the mutable list of each generation > N (i.e. all the
1507 generations older than the one being collected) as roots. We also
1508 remove non-mutable objects from the mutable list at this point.
1509 -------------------------------------------------------------------------- */
1512 scavenge_static(void)
1515 const StgInfoTable *info;
1517 debugTrace(DEBUG_gc, "scavenging static objects");
1519 /* Always evacuate straight to the oldest generation for static
1521 gct->evac_step = &oldest_gen->steps[0];
1523 /* keep going until we've scavenged all the objects on the linked
1528 /* get the next static object from the list. Remember, there might
1529 * be more stuff on this list after each evacuation...
1530 * (static_objects is a global)
1532 p = gct->static_objects;
1533 if (p == END_OF_STATIC_LIST) {
1537 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1540 if (info->type==RBH)
1541 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1543 // make sure the info pointer is into text space
1545 /* Take this object *off* the static_objects list,
1546 * and put it on the scavenged_static_objects list.
1548 gct->static_objects = *STATIC_LINK(info,p);
1549 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1550 gct->scavenged_static_objects = p;
1552 switch (info -> type) {
1556 StgInd *ind = (StgInd *)p;
1557 evacuate(&ind->indirectee);
1559 /* might fail to evacuate it, in which case we have to pop it
1560 * back on the mutable list of the oldest generation. We
1561 * leave it *on* the scavenged_static_objects list, though,
1562 * in case we visit this object again.
1564 if (gct->failed_to_evac) {
1565 gct->failed_to_evac = rtsFalse;
1566 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1572 scavenge_thunk_srt(info);
1576 scavenge_fun_srt(info);
1583 next = (P_)p->payload + info->layout.payload.ptrs;
1584 // evacuate the pointers
1585 for (q = (P_)p->payload; q < next; q++) {
1586 evacuate((StgClosure **)q);
1592 barf("scavenge_static: strange closure %d", (int)(info->type));
1595 ASSERT(gct->failed_to_evac == rtsFalse);
1599 /* -----------------------------------------------------------------------------
1600 scavenge a chunk of memory described by a bitmap
1601 -------------------------------------------------------------------------- */
1604 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1610 bitmap = large_bitmap->bitmap[b];
1611 for (i = 0; i < size; ) {
1612 if ((bitmap & 1) == 0) {
1613 evacuate((StgClosure **)p);
1617 if (i % BITS_IN(W_) == 0) {
1619 bitmap = large_bitmap->bitmap[b];
1621 bitmap = bitmap >> 1;
1626 STATIC_INLINE StgPtr
1627 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1630 if ((bitmap & 1) == 0) {
1631 evacuate((StgClosure **)p);
1634 bitmap = bitmap >> 1;
1640 /* -----------------------------------------------------------------------------
1641 scavenge_stack walks over a section of stack and evacuates all the
1642 objects pointed to by it. We can use the same code for walking
1643 AP_STACK_UPDs, since these are just sections of copied stack.
1644 -------------------------------------------------------------------------- */
1647 scavenge_stack(StgPtr p, StgPtr stack_end)
1649 const StgRetInfoTable* info;
1654 * Each time around this loop, we are looking at a chunk of stack
1655 * that starts with an activation record.
1658 while (p < stack_end) {
1659 info = get_ret_itbl((StgClosure *)p);
1661 switch (info->i.type) {
1664 // In SMP, we can get update frames that point to indirections
1665 // when two threads evaluate the same thunk. We do attempt to
1666 // discover this situation in threadPaused(), but it's
1667 // possible that the following sequence occurs:
1676 // Now T is an indirection, and the update frame is already
1677 // marked on A's stack, so we won't traverse it again in
1678 // threadPaused(). We could traverse the whole stack again
1679 // before GC, but that seems like overkill.
1681 // Scavenging this update frame as normal would be disastrous;
1682 // the updatee would end up pointing to the value. So we turn
1683 // the indirection into an IND_PERM, so that evacuate will
1684 // copy the indirection into the old generation instead of
1687 // Note [upd-black-hole]
1688 // One slight hiccup is that the THUNK_SELECTOR machinery can
1689 // overwrite the updatee with an IND. In parallel GC, this
1690 // could even be happening concurrently, so we can't check for
1691 // the IND. Fortunately if we assume that blackholing is
1692 // happening (either lazy or eager), then we can be sure that
1693 // the updatee is never a THUNK_SELECTOR and we're ok.
1694 // NB. this is a new invariant: blackholing is not optional.
1697 const StgInfoTable *i;
1698 StgClosure *updatee;
1700 updatee = ((StgUpdateFrame *)p)->updatee;
1701 i = updatee->header.info;
1702 if (!IS_FORWARDING_PTR(i)) {
1703 type = get_itbl(updatee)->type;
1705 updatee->header.info = &stg_IND_PERM_info;
1706 } else if (type == IND_OLDGEN) {
1707 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1710 evacuate(&((StgUpdateFrame *)p)->updatee);
1711 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1712 p += sizeofW(StgUpdateFrame);
1716 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1717 case CATCH_STM_FRAME:
1718 case CATCH_RETRY_FRAME:
1719 case ATOMICALLY_FRAME:
1723 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1724 size = BITMAP_SIZE(info->i.layout.bitmap);
1725 // NOTE: the payload starts immediately after the info-ptr, we
1726 // don't have an StgHeader in the same sense as a heap closure.
1728 p = scavenge_small_bitmap(p, size, bitmap);
1732 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1740 evacuate((StgClosure **)p);
1743 size = BCO_BITMAP_SIZE(bco);
1744 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1749 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1754 size = GET_LARGE_BITMAP(&info->i)->size;
1756 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1758 // and don't forget to follow the SRT
1762 // Dynamic bitmap: the mask is stored on the stack, and
1763 // there are a number of non-pointers followed by a number
1764 // of pointers above the bitmapped area. (see StgMacros.h,
1769 dyn = ((StgRetDyn *)p)->liveness;
1771 // traverse the bitmap first
1772 bitmap = RET_DYN_LIVENESS(dyn);
1773 p = (P_)&((StgRetDyn *)p)->payload[0];
1774 size = RET_DYN_BITMAP_SIZE;
1775 p = scavenge_small_bitmap(p, size, bitmap);
1777 // skip over the non-ptr words
1778 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1780 // follow the ptr words
1781 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1782 evacuate((StgClosure **)p);
1790 StgRetFun *ret_fun = (StgRetFun *)p;
1791 StgFunInfoTable *fun_info;
1793 evacuate(&ret_fun->fun);
1794 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1795 p = scavenge_arg_block(fun_info, ret_fun->payload);
1800 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1805 /*-----------------------------------------------------------------------------
1806 scavenge the large object list.
1808 evac_step set by caller; similar games played with evac_step as with
1809 scavenge() - see comment at the top of scavenge(). Most large
1810 objects are (repeatedly) mutable, so most of the time evac_step will
1812 --------------------------------------------------------------------------- */
1815 scavenge_large (step_workspace *ws)
1820 gct->evac_step = ws->step;
1822 bd = ws->todo_large_objects;
1824 for (; bd != NULL; bd = ws->todo_large_objects) {
1826 // take this object *off* the large objects list and put it on
1827 // the scavenged large objects list. This is so that we can
1828 // treat new_large_objects as a stack and push new objects on
1829 // the front when evacuating.
1830 ws->todo_large_objects = bd->link;
1832 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1833 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1834 ws->step->n_scavenged_large_blocks += bd->blocks;
1835 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1838 if (scavenge_one(p)) {
1839 if (ws->step->gen_no > 0) {
1840 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1845 gct->scanned += closure_sizeW((StgClosure*)p);
1849 /* ----------------------------------------------------------------------------
1850 Look for work to do.
1852 We look for the oldest step that has either a todo block that can
1853 be scanned, or a block of work on the global queue that we can
1856 It is important to take work from the *oldest* generation that we
1857 has work available, because that minimizes the likelihood of
1858 evacuating objects into a young generation when they should have
1859 been eagerly promoted. This really does make a difference (the
1860 cacheprof benchmark is one that is affected).
1862 We also want to scan the todo block if possible before grabbing
1863 work from the global queue, the reason being that we don't want to
1864 steal work from the global queue and starve other threads if there
1865 is other work we can usefully be doing.
1866 ------------------------------------------------------------------------- */
1869 scavenge_find_work (void)
1873 rtsBool did_something, did_anything;
1876 gct->scav_find_work++;
1878 did_anything = rtsFalse;
1881 did_something = rtsFalse;
1882 for (s = total_steps-1; s >= 0; s--) {
1883 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1886 ws = &gct->steps[s];
1888 gct->scan_bd = NULL;
1890 // If we have a scan block with some work to do,
1891 // scavenge everything up to the free pointer.
1892 if (ws->todo_bd->u.scan < ws->todo_free)
1894 scavenge_block(ws->todo_bd);
1895 did_something = rtsTrue;
1899 // If we have any large objects to scavenge, do them now.
1900 if (ws->todo_large_objects) {
1902 did_something = rtsTrue;
1906 if ((bd = grab_local_todo_block(ws)) != NULL) {
1908 did_something = rtsTrue;
1913 if (did_something) {
1914 did_anything = rtsTrue;
1918 #if defined(THREADED_RTS)
1919 if (work_stealing) {
1920 // look for work to steal
1921 for (s = total_steps-1; s >= 0; s--) {
1922 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1925 if ((bd = steal_todo_block(s)) != NULL) {
1927 did_something = rtsTrue;
1932 if (did_something) {
1933 did_anything = rtsTrue;
1939 // only return when there is no more work to do
1941 return did_anything;
1944 /* ----------------------------------------------------------------------------
1945 Scavenge until we can't find anything more to scavenge.
1946 ------------------------------------------------------------------------- */
1954 work_to_do = rtsFalse;
1956 // scavenge static objects
1957 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1958 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1962 // scavenge objects in compacted generation
1963 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1964 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1965 scavenge_mark_stack();
1966 work_to_do = rtsTrue;
1969 // Order is important here: we want to deal in full blocks as
1970 // much as possible, so go for global work in preference to
1971 // local work. Only if all the global work has been exhausted
1972 // do we start scavenging the fragments of blocks in the local
1974 if (scavenge_find_work()) goto loop;
1976 if (work_to_do) goto loop;