1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
27 #include "Capability.h"
28 #include "LdvProfile.h"
30 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32 static void scavenge_large_bitmap (StgPtr p,
33 StgLargeBitmap *large_bitmap,
36 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
37 # define evacuate(a) evacuate1(a)
38 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
41 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
44 /* -----------------------------------------------------------------------------
46 -------------------------------------------------------------------------- */
49 scavenge_TSO_link (StgTSO *tso)
51 // We don't always chase the link field: TSOs on the blackhole
52 // queue are not automatically alive, so the link field is a
53 // "weak" pointer in that case.
54 if (tso->why_blocked != BlockedOnBlackHole) {
55 evacuate((StgClosure **)&tso->_link);
60 scavengeTSO (StgTSO *tso)
64 if (tso->what_next == ThreadRelocated) {
65 // the only way this can happen is if the old TSO was on the
66 // mutable list. We might have other links to this defunct
67 // TSO, so we must update its link field.
68 evacuate((StgClosure**)&tso->_link);
72 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
74 saved_eager = gct->eager_promotion;
75 gct->eager_promotion = rtsFalse;
77 if ( tso->why_blocked == BlockedOnMVar
78 || tso->why_blocked == BlockedOnBlackHole
79 || tso->why_blocked == BlockedOnException
81 evacuate(&tso->block_info.closure);
83 evacuate((StgClosure **)&tso->blocked_exceptions);
85 // scavange current transaction record
86 evacuate((StgClosure **)&tso->trec);
88 // scavenge this thread's stack
89 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
91 if (gct->failed_to_evac) {
93 scavenge_TSO_link(tso);
96 scavenge_TSO_link(tso);
97 if (gct->failed_to_evac) {
98 tso->flags |= TSO_LINK_DIRTY;
100 tso->flags &= ~TSO_LINK_DIRTY;
104 gct->eager_promotion = saved_eager;
107 /* -----------------------------------------------------------------------------
108 Blocks of function args occur on the stack (at the top) and
110 -------------------------------------------------------------------------- */
113 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
120 switch (fun_info->f.fun_type) {
122 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
123 size = BITMAP_SIZE(fun_info->f.b.bitmap);
126 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
127 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
131 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
132 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
135 if ((bitmap & 1) == 0) {
136 evacuate((StgClosure **)p);
139 bitmap = bitmap >> 1;
147 STATIC_INLINE GNUC_ATTR_HOT StgPtr
148 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
152 StgFunInfoTable *fun_info;
154 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
155 ASSERT(fun_info->i.type != PAP);
158 switch (fun_info->f.fun_type) {
160 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
163 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
167 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
171 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
174 if ((bitmap & 1) == 0) {
175 evacuate((StgClosure **)p);
178 bitmap = bitmap >> 1;
186 STATIC_INLINE GNUC_ATTR_HOT StgPtr
187 scavenge_PAP (StgPAP *pap)
190 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
194 scavenge_AP (StgAP *ap)
197 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
200 /* -----------------------------------------------------------------------------
202 -------------------------------------------------------------------------- */
204 /* Similar to scavenge_large_bitmap(), but we don't write back the
205 * pointers we get back from evacuate().
208 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
215 bitmap = large_srt->l.bitmap[b];
216 size = (nat)large_srt->l.size;
217 p = (StgClosure **)large_srt->srt;
218 for (i = 0; i < size; ) {
219 if ((bitmap & 1) != 0) {
224 if (i % BITS_IN(W_) == 0) {
226 bitmap = large_srt->l.bitmap[b];
228 bitmap = bitmap >> 1;
233 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
234 * srt field in the info table. That's ok, because we'll
235 * never dereference it.
237 STATIC_INLINE GNUC_ATTR_HOT void
238 scavenge_srt (StgClosure **srt, nat srt_bitmap)
246 if (bitmap == (StgHalfWord)(-1)) {
247 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
251 while (bitmap != 0) {
252 if ((bitmap & 1) != 0) {
253 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
254 // Special-case to handle references to closures hiding out in DLLs, since
255 // double indirections required to get at those. The code generator knows
256 // which is which when generating the SRT, so it stores the (indirect)
257 // reference to the DLL closure in the table by first adding one to it.
258 // We check for this here, and undo the addition before evacuating it.
260 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
261 // closure that's fixed at link-time, and no extra magic is required.
262 if ( (unsigned long)(*srt) & 0x1 ) {
263 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
272 bitmap = bitmap >> 1;
277 STATIC_INLINE GNUC_ATTR_HOT void
278 scavenge_thunk_srt(const StgInfoTable *info)
280 StgThunkInfoTable *thunk_info;
282 if (!major_gc) return;
284 thunk_info = itbl_to_thunk_itbl(info);
285 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
288 STATIC_INLINE GNUC_ATTR_HOT void
289 scavenge_fun_srt(const StgInfoTable *info)
291 StgFunInfoTable *fun_info;
293 if (!major_gc) return;
295 fun_info = itbl_to_fun_itbl(info);
296 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
299 /* -----------------------------------------------------------------------------
300 Scavenge a block from the given scan pointer up to bd->free.
302 evac_step is set by the caller to be either zero (for a step in a
303 generation < N) or G where G is the generation of the step being
306 We sometimes temporarily change evac_step back to zero if we're
307 scavenging a mutable object where eager promotion isn't such a good
309 -------------------------------------------------------------------------- */
311 static GNUC_ATTR_HOT void
312 scavenge_block (bdescr *bd)
316 step *saved_evac_step;
317 rtsBool saved_eager_promotion;
320 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
321 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
324 gct->evac_step = bd->step;
325 saved_evac_step = gct->evac_step;
326 saved_eager_promotion = gct->eager_promotion;
327 gct->failed_to_evac = rtsFalse;
329 ws = &gct->steps[bd->step->abs_no];
333 // we might be evacuating into the very object that we're
334 // scavenging, so we have to check the real bd->free pointer each
335 // time around the loop.
336 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
338 ASSERT(bd->link == NULL);
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
340 info = get_itbl((StgClosure *)p);
342 ASSERT(gct->thunk_selector_depth == 0);
345 switch (info->type) {
350 StgMVar *mvar = ((StgMVar *)p);
351 gct->eager_promotion = rtsFalse;
352 evacuate((StgClosure **)&mvar->head);
353 evacuate((StgClosure **)&mvar->tail);
354 evacuate((StgClosure **)&mvar->value);
355 gct->eager_promotion = saved_eager_promotion;
357 if (gct->failed_to_evac) {
358 mvar->header.info = &stg_MVAR_DIRTY_info;
360 mvar->header.info = &stg_MVAR_CLEAN_info;
362 p += sizeofW(StgMVar);
367 scavenge_fun_srt(info);
368 evacuate(&((StgClosure *)p)->payload[1]);
369 evacuate(&((StgClosure *)p)->payload[0]);
370 p += sizeofW(StgHeader) + 2;
374 scavenge_thunk_srt(info);
375 evacuate(&((StgThunk *)p)->payload[1]);
376 evacuate(&((StgThunk *)p)->payload[0]);
377 p += sizeofW(StgThunk) + 2;
381 evacuate(&((StgClosure *)p)->payload[1]);
382 evacuate(&((StgClosure *)p)->payload[0]);
383 p += sizeofW(StgHeader) + 2;
387 scavenge_thunk_srt(info);
388 evacuate(&((StgThunk *)p)->payload[0]);
389 p += sizeofW(StgThunk) + 1;
393 scavenge_fun_srt(info);
395 evacuate(&((StgClosure *)p)->payload[0]);
396 p += sizeofW(StgHeader) + 1;
400 scavenge_thunk_srt(info);
401 p += sizeofW(StgThunk) + 1;
405 scavenge_fun_srt(info);
407 p += sizeofW(StgHeader) + 1;
411 scavenge_thunk_srt(info);
412 p += sizeofW(StgThunk) + 2;
416 scavenge_fun_srt(info);
418 p += sizeofW(StgHeader) + 2;
422 scavenge_thunk_srt(info);
423 evacuate(&((StgThunk *)p)->payload[0]);
424 p += sizeofW(StgThunk) + 2;
428 scavenge_fun_srt(info);
430 evacuate(&((StgClosure *)p)->payload[0]);
431 p += sizeofW(StgHeader) + 2;
435 scavenge_fun_srt(info);
442 scavenge_thunk_srt(info);
443 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
444 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
445 evacuate((StgClosure **)p);
447 p += info->layout.payload.nptrs;
458 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
459 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
460 evacuate((StgClosure **)p);
462 p += info->layout.payload.nptrs;
467 StgBCO *bco = (StgBCO *)p;
468 evacuate((StgClosure **)&bco->instrs);
469 evacuate((StgClosure **)&bco->literals);
470 evacuate((StgClosure **)&bco->ptrs);
476 if (bd->gen_no != 0) {
479 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
480 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
481 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
484 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
486 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
488 // We pretend that p has just been created.
489 LDV_RECORD_CREATE((StgClosure *)p);
492 case IND_OLDGEN_PERM:
493 evacuate(&((StgInd *)p)->indirectee);
494 p += sizeofW(StgInd);
499 gct->eager_promotion = rtsFalse;
500 evacuate(&((StgMutVar *)p)->var);
501 gct->eager_promotion = saved_eager_promotion;
503 if (gct->failed_to_evac) {
504 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
506 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
508 p += sizeofW(StgMutVar);
513 p += BLACKHOLE_sizeW();
518 StgSelector *s = (StgSelector *)p;
519 evacuate(&s->selectee);
520 p += THUNK_SELECTOR_sizeW();
524 // A chunk of stack saved in a heap object
527 StgAP_STACK *ap = (StgAP_STACK *)p;
530 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
531 p = (StgPtr)ap->payload + ap->size;
536 p = scavenge_PAP((StgPAP *)p);
540 p = scavenge_AP((StgAP *)p);
545 p += arr_words_sizeW((StgArrWords *)p);
548 case MUT_ARR_PTRS_CLEAN:
549 case MUT_ARR_PTRS_DIRTY:
554 // We don't eagerly promote objects pointed to by a mutable
555 // array, but if we find the array only points to objects in
556 // the same or an older generation, we mark it "clean" and
557 // avoid traversing it during minor GCs.
558 gct->eager_promotion = rtsFalse;
559 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
560 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
561 evacuate((StgClosure **)p);
563 gct->eager_promotion = saved_eager_promotion;
565 if (gct->failed_to_evac) {
566 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
568 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
571 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
575 case MUT_ARR_PTRS_FROZEN:
576 case MUT_ARR_PTRS_FROZEN0:
581 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
582 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
583 evacuate((StgClosure **)p);
586 // If we're going to put this object on the mutable list, then
587 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
588 if (gct->failed_to_evac) {
589 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
591 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
598 StgTSO *tso = (StgTSO *)p;
604 case TVAR_WATCH_QUEUE:
606 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
608 evacuate((StgClosure **)&wq->closure);
609 evacuate((StgClosure **)&wq->next_queue_entry);
610 evacuate((StgClosure **)&wq->prev_queue_entry);
611 gct->evac_step = saved_evac_step;
612 gct->failed_to_evac = rtsTrue; // mutable
613 p += sizeofW(StgTVarWatchQueue);
619 StgTVar *tvar = ((StgTVar *) p);
621 evacuate((StgClosure **)&tvar->current_value);
622 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
623 gct->evac_step = saved_evac_step;
624 gct->failed_to_evac = rtsTrue; // mutable
625 p += sizeofW(StgTVar);
631 StgTRecHeader *trec = ((StgTRecHeader *) p);
633 evacuate((StgClosure **)&trec->enclosing_trec);
634 evacuate((StgClosure **)&trec->current_chunk);
635 evacuate((StgClosure **)&trec->invariants_to_check);
636 gct->evac_step = saved_evac_step;
637 gct->failed_to_evac = rtsTrue; // mutable
638 p += sizeofW(StgTRecHeader);
645 StgTRecChunk *tc = ((StgTRecChunk *) p);
646 TRecEntry *e = &(tc -> entries[0]);
648 evacuate((StgClosure **)&tc->prev_chunk);
649 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
650 evacuate((StgClosure **)&e->tvar);
651 evacuate((StgClosure **)&e->expected_value);
652 evacuate((StgClosure **)&e->new_value);
654 gct->evac_step = saved_evac_step;
655 gct->failed_to_evac = rtsTrue; // mutable
656 p += sizeofW(StgTRecChunk);
660 case ATOMIC_INVARIANT:
662 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
664 evacuate(&invariant->code);
665 evacuate((StgClosure **)&invariant->last_execution);
666 gct->evac_step = saved_evac_step;
667 gct->failed_to_evac = rtsTrue; // mutable
668 p += sizeofW(StgAtomicInvariant);
672 case INVARIANT_CHECK_QUEUE:
674 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
676 evacuate((StgClosure **)&queue->invariant);
677 evacuate((StgClosure **)&queue->my_execution);
678 evacuate((StgClosure **)&queue->next_queue_entry);
679 gct->evac_step = saved_evac_step;
680 gct->failed_to_evac = rtsTrue; // mutable
681 p += sizeofW(StgInvariantCheckQueue);
686 barf("scavenge: unimplemented/strange closure type %d @ %p",
691 * We need to record the current object on the mutable list if
692 * (a) It is actually mutable, or
693 * (b) It contains pointers to a younger generation.
694 * Case (b) arises if we didn't manage to promote everything that
695 * the current object points to into the current generation.
697 if (gct->failed_to_evac) {
698 gct->failed_to_evac = rtsFalse;
699 if (bd->gen_no > 0) {
700 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
706 gct->copied += ws->todo_free - bd->free;
710 debugTrace(DEBUG_gc, " scavenged %ld bytes",
711 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
713 // update stats: this is a block that has been scavenged
714 gct->scanned += bd->free - bd->u.scan;
715 bd->u.scan = bd->free;
717 if (bd != ws->todo_bd) {
718 // we're not going to evac any more objects into
719 // this block, so push it now.
720 push_scanned_block(bd, ws);
725 /* -----------------------------------------------------------------------------
726 Scavenge everything on the mark stack.
728 This is slightly different from scavenge():
729 - we don't walk linearly through the objects, so the scavenger
730 doesn't need to advance the pointer on to the next object.
731 -------------------------------------------------------------------------- */
734 scavenge_mark_stack(void)
738 step *saved_evac_step;
740 gct->evac_step = &oldest_gen->steps[0];
741 saved_evac_step = gct->evac_step;
744 while (!mark_stack_empty()) {
745 p = pop_mark_stack();
747 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
748 info = get_itbl((StgClosure *)p);
751 switch (info->type) {
756 rtsBool saved_eager_promotion = gct->eager_promotion;
758 StgMVar *mvar = ((StgMVar *)p);
759 gct->eager_promotion = rtsFalse;
760 evacuate((StgClosure **)&mvar->head);
761 evacuate((StgClosure **)&mvar->tail);
762 evacuate((StgClosure **)&mvar->value);
763 gct->eager_promotion = saved_eager_promotion;
765 if (gct->failed_to_evac) {
766 mvar->header.info = &stg_MVAR_DIRTY_info;
768 mvar->header.info = &stg_MVAR_CLEAN_info;
774 scavenge_fun_srt(info);
775 evacuate(&((StgClosure *)p)->payload[1]);
776 evacuate(&((StgClosure *)p)->payload[0]);
780 scavenge_thunk_srt(info);
781 evacuate(&((StgThunk *)p)->payload[1]);
782 evacuate(&((StgThunk *)p)->payload[0]);
786 evacuate(&((StgClosure *)p)->payload[1]);
787 evacuate(&((StgClosure *)p)->payload[0]);
792 scavenge_fun_srt(info);
793 evacuate(&((StgClosure *)p)->payload[0]);
798 scavenge_thunk_srt(info);
799 evacuate(&((StgThunk *)p)->payload[0]);
804 evacuate(&((StgClosure *)p)->payload[0]);
809 scavenge_fun_srt(info);
814 scavenge_thunk_srt(info);
822 scavenge_fun_srt(info);
829 scavenge_thunk_srt(info);
830 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
831 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
832 evacuate((StgClosure **)p);
844 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
845 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
846 evacuate((StgClosure **)p);
852 StgBCO *bco = (StgBCO *)p;
853 evacuate((StgClosure **)&bco->instrs);
854 evacuate((StgClosure **)&bco->literals);
855 evacuate((StgClosure **)&bco->ptrs);
860 // don't need to do anything here: the only possible case
861 // is that we're in a 1-space compacting collector, with
862 // no "old" generation.
866 case IND_OLDGEN_PERM:
867 evacuate(&((StgInd *)p)->indirectee);
871 case MUT_VAR_DIRTY: {
872 rtsBool saved_eager_promotion = gct->eager_promotion;
874 gct->eager_promotion = rtsFalse;
875 evacuate(&((StgMutVar *)p)->var);
876 gct->eager_promotion = saved_eager_promotion;
878 if (gct->failed_to_evac) {
879 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
881 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
893 StgSelector *s = (StgSelector *)p;
894 evacuate(&s->selectee);
898 // A chunk of stack saved in a heap object
901 StgAP_STACK *ap = (StgAP_STACK *)p;
904 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
909 scavenge_PAP((StgPAP *)p);
913 scavenge_AP((StgAP *)p);
916 case MUT_ARR_PTRS_CLEAN:
917 case MUT_ARR_PTRS_DIRTY:
923 // We don't eagerly promote objects pointed to by a mutable
924 // array, but if we find the array only points to objects in
925 // the same or an older generation, we mark it "clean" and
926 // avoid traversing it during minor GCs.
927 saved_eager = gct->eager_promotion;
928 gct->eager_promotion = rtsFalse;
929 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
930 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
931 evacuate((StgClosure **)p);
933 gct->eager_promotion = saved_eager;
935 if (gct->failed_to_evac) {
936 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
938 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
941 gct->failed_to_evac = rtsTrue; // mutable anyhow.
945 case MUT_ARR_PTRS_FROZEN:
946 case MUT_ARR_PTRS_FROZEN0:
951 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
952 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
953 evacuate((StgClosure **)p);
956 // If we're going to put this object on the mutable list, then
957 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
958 if (gct->failed_to_evac) {
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
961 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
968 scavengeTSO((StgTSO*)p);
972 case TVAR_WATCH_QUEUE:
974 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
976 evacuate((StgClosure **)&wq->closure);
977 evacuate((StgClosure **)&wq->next_queue_entry);
978 evacuate((StgClosure **)&wq->prev_queue_entry);
979 gct->evac_step = saved_evac_step;
980 gct->failed_to_evac = rtsTrue; // mutable
986 StgTVar *tvar = ((StgTVar *) p);
988 evacuate((StgClosure **)&tvar->current_value);
989 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
990 gct->evac_step = saved_evac_step;
991 gct->failed_to_evac = rtsTrue; // mutable
998 StgTRecChunk *tc = ((StgTRecChunk *) p);
999 TRecEntry *e = &(tc -> entries[0]);
1001 evacuate((StgClosure **)&tc->prev_chunk);
1002 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1003 evacuate((StgClosure **)&e->tvar);
1004 evacuate((StgClosure **)&e->expected_value);
1005 evacuate((StgClosure **)&e->new_value);
1007 gct->evac_step = saved_evac_step;
1008 gct->failed_to_evac = rtsTrue; // mutable
1014 StgTRecHeader *trec = ((StgTRecHeader *) p);
1016 evacuate((StgClosure **)&trec->enclosing_trec);
1017 evacuate((StgClosure **)&trec->current_chunk);
1018 evacuate((StgClosure **)&trec->invariants_to_check);
1019 gct->evac_step = saved_evac_step;
1020 gct->failed_to_evac = rtsTrue; // mutable
1024 case ATOMIC_INVARIANT:
1026 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1028 evacuate(&invariant->code);
1029 evacuate((StgClosure **)&invariant->last_execution);
1030 gct->evac_step = saved_evac_step;
1031 gct->failed_to_evac = rtsTrue; // mutable
1035 case INVARIANT_CHECK_QUEUE:
1037 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1039 evacuate((StgClosure **)&queue->invariant);
1040 evacuate((StgClosure **)&queue->my_execution);
1041 evacuate((StgClosure **)&queue->next_queue_entry);
1042 gct->evac_step = saved_evac_step;
1043 gct->failed_to_evac = rtsTrue; // mutable
1048 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1052 if (gct->failed_to_evac) {
1053 gct->failed_to_evac = rtsFalse;
1054 if (gct->evac_step) {
1055 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1059 // mark the next bit to indicate "scavenged"
1060 mark(q+1, Bdescr(q));
1062 } // while (!mark_stack_empty())
1064 // start a new linear scan if the mark stack overflowed at some point
1065 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1066 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1067 mark_stack_overflowed = rtsFalse;
1068 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1069 oldgen_scan = oldgen_scan_bd->start;
1072 if (oldgen_scan_bd) {
1073 // push a new thing on the mark stack
1075 // find a closure that is marked but not scavenged, and start
1077 while (oldgen_scan < oldgen_scan_bd->free
1078 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1082 if (oldgen_scan < oldgen_scan_bd->free) {
1084 // already scavenged?
1085 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1086 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1089 push_mark_stack(oldgen_scan);
1090 // ToDo: bump the linear scan by the actual size of the object
1091 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1095 oldgen_scan_bd = oldgen_scan_bd->link;
1096 if (oldgen_scan_bd != NULL) {
1097 oldgen_scan = oldgen_scan_bd->start;
1103 /* -----------------------------------------------------------------------------
1104 Scavenge one object.
1106 This is used for objects that are temporarily marked as mutable
1107 because they contain old-to-new generation pointers. Only certain
1108 objects can have this property.
1109 -------------------------------------------------------------------------- */
1112 scavenge_one(StgPtr p)
1114 const StgInfoTable *info;
1115 step *saved_evac_step = gct->evac_step;
1118 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1119 info = get_itbl((StgClosure *)p);
1121 switch (info->type) {
1126 rtsBool saved_eager_promotion = gct->eager_promotion;
1128 StgMVar *mvar = ((StgMVar *)p);
1129 gct->eager_promotion = rtsFalse;
1130 evacuate((StgClosure **)&mvar->head);
1131 evacuate((StgClosure **)&mvar->tail);
1132 evacuate((StgClosure **)&mvar->value);
1133 gct->eager_promotion = saved_eager_promotion;
1135 if (gct->failed_to_evac) {
1136 mvar->header.info = &stg_MVAR_DIRTY_info;
1138 mvar->header.info = &stg_MVAR_CLEAN_info;
1152 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1153 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1154 evacuate((StgClosure **)q);
1160 case FUN_1_0: // hardly worth specialising these guys
1176 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1177 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1178 evacuate((StgClosure **)q);
1184 case MUT_VAR_DIRTY: {
1186 rtsBool saved_eager_promotion = gct->eager_promotion;
1188 gct->eager_promotion = rtsFalse;
1189 evacuate(&((StgMutVar *)p)->var);
1190 gct->eager_promotion = saved_eager_promotion;
1192 if (gct->failed_to_evac) {
1193 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1195 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1204 case THUNK_SELECTOR:
1206 StgSelector *s = (StgSelector *)p;
1207 evacuate(&s->selectee);
1213 StgAP_STACK *ap = (StgAP_STACK *)p;
1216 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1217 p = (StgPtr)ap->payload + ap->size;
1222 p = scavenge_PAP((StgPAP *)p);
1226 p = scavenge_AP((StgAP *)p);
1230 // nothing to follow
1233 case MUT_ARR_PTRS_CLEAN:
1234 case MUT_ARR_PTRS_DIRTY:
1237 rtsBool saved_eager;
1239 // We don't eagerly promote objects pointed to by a mutable
1240 // array, but if we find the array only points to objects in
1241 // the same or an older generation, we mark it "clean" and
1242 // avoid traversing it during minor GCs.
1243 saved_eager = gct->eager_promotion;
1244 gct->eager_promotion = rtsFalse;
1246 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1247 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1248 evacuate((StgClosure **)p);
1250 gct->eager_promotion = saved_eager;
1252 if (gct->failed_to_evac) {
1253 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1255 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1258 gct->failed_to_evac = rtsTrue;
1262 case MUT_ARR_PTRS_FROZEN:
1263 case MUT_ARR_PTRS_FROZEN0:
1265 // follow everything
1268 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1269 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1270 evacuate((StgClosure **)p);
1273 // If we're going to put this object on the mutable list, then
1274 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1275 if (gct->failed_to_evac) {
1276 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1278 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1285 scavengeTSO((StgTSO*)p);
1289 case TVAR_WATCH_QUEUE:
1291 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1293 evacuate((StgClosure **)&wq->closure);
1294 evacuate((StgClosure **)&wq->next_queue_entry);
1295 evacuate((StgClosure **)&wq->prev_queue_entry);
1296 gct->evac_step = saved_evac_step;
1297 gct->failed_to_evac = rtsTrue; // mutable
1303 StgTVar *tvar = ((StgTVar *) p);
1305 evacuate((StgClosure **)&tvar->current_value);
1306 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1307 gct->evac_step = saved_evac_step;
1308 gct->failed_to_evac = rtsTrue; // mutable
1314 StgTRecHeader *trec = ((StgTRecHeader *) p);
1316 evacuate((StgClosure **)&trec->enclosing_trec);
1317 evacuate((StgClosure **)&trec->current_chunk);
1318 evacuate((StgClosure **)&trec->invariants_to_check);
1319 gct->evac_step = saved_evac_step;
1320 gct->failed_to_evac = rtsTrue; // mutable
1327 StgTRecChunk *tc = ((StgTRecChunk *) p);
1328 TRecEntry *e = &(tc -> entries[0]);
1330 evacuate((StgClosure **)&tc->prev_chunk);
1331 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1332 evacuate((StgClosure **)&e->tvar);
1333 evacuate((StgClosure **)&e->expected_value);
1334 evacuate((StgClosure **)&e->new_value);
1336 gct->evac_step = saved_evac_step;
1337 gct->failed_to_evac = rtsTrue; // mutable
1341 case ATOMIC_INVARIANT:
1343 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1345 evacuate(&invariant->code);
1346 evacuate((StgClosure **)&invariant->last_execution);
1347 gct->evac_step = saved_evac_step;
1348 gct->failed_to_evac = rtsTrue; // mutable
1352 case INVARIANT_CHECK_QUEUE:
1354 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1356 evacuate((StgClosure **)&queue->invariant);
1357 evacuate((StgClosure **)&queue->my_execution);
1358 evacuate((StgClosure **)&queue->next_queue_entry);
1359 gct->evac_step = saved_evac_step;
1360 gct->failed_to_evac = rtsTrue; // mutable
1365 // IND can happen, for example, when the interpreter allocates
1366 // a gigantic AP closure (more than one block), which ends up
1367 // on the large-object list and then gets updated. See #3424.
1369 case IND_OLDGEN_PERM:
1371 evacuate(&((StgInd *)p)->indirectee);
1373 #if 0 && defined(DEBUG)
1374 if (RtsFlags.DebugFlags.gc)
1375 /* Debugging code to print out the size of the thing we just
1379 StgPtr start = gen->steps[0].scan;
1380 bdescr *start_bd = gen->steps[0].scan_bd;
1382 scavenge(&gen->steps[0]);
1383 if (start_bd != gen->steps[0].scan_bd) {
1384 size += (P_)BLOCK_ROUND_UP(start) - start;
1385 start_bd = start_bd->link;
1386 while (start_bd != gen->steps[0].scan_bd) {
1387 size += BLOCK_SIZE_W;
1388 start_bd = start_bd->link;
1390 size += gen->steps[0].scan -
1391 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1393 size = gen->steps[0].scan - start;
1395 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1401 barf("scavenge_one: strange object %d", (int)(info->type));
1404 no_luck = gct->failed_to_evac;
1405 gct->failed_to_evac = rtsFalse;
1409 /* -----------------------------------------------------------------------------
1410 Scavenging mutable lists.
1412 We treat the mutable list of each generation > N (i.e. all the
1413 generations older than the one being collected) as roots. We also
1414 remove non-mutable objects from the mutable list at this point.
1415 -------------------------------------------------------------------------- */
1418 scavenge_mutable_list(bdescr *bd, generation *gen)
1422 gct->evac_step = &gen->steps[0];
1423 for (; bd != NULL; bd = bd->link) {
1424 for (q = bd->start; q < bd->free; q++) {
1426 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1429 switch (get_itbl((StgClosure *)p)->type) {
1431 barf("MUT_VAR_CLEAN on mutable list");
1433 mutlist_MUTVARS++; break;
1434 case MUT_ARR_PTRS_CLEAN:
1435 case MUT_ARR_PTRS_DIRTY:
1436 case MUT_ARR_PTRS_FROZEN:
1437 case MUT_ARR_PTRS_FROZEN0:
1438 mutlist_MUTARRS++; break;
1440 barf("MVAR_CLEAN on mutable list");
1442 mutlist_MVARS++; break;
1444 mutlist_OTHERS++; break;
1448 // Check whether this object is "clean", that is it
1449 // definitely doesn't point into a young generation.
1450 // Clean objects don't need to be scavenged. Some clean
1451 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1452 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1453 // are always on the mutable list.
1455 switch (get_itbl((StgClosure *)p)->type) {
1456 case MUT_ARR_PTRS_CLEAN:
1457 recordMutableGen_GC((StgClosure *)p,gen->no);
1460 StgTSO *tso = (StgTSO *)p;
1461 if (tso->dirty == 0) {
1462 // Must be on the mutable list because its link
1464 ASSERT(tso->flags & TSO_LINK_DIRTY);
1466 scavenge_TSO_link(tso);
1467 if (gct->failed_to_evac) {
1468 recordMutableGen_GC((StgClosure *)p,gen->no);
1469 gct->failed_to_evac = rtsFalse;
1471 tso->flags &= ~TSO_LINK_DIRTY;
1480 if (scavenge_one(p)) {
1481 // didn't manage to promote everything, so put the
1482 // object back on the list.
1483 recordMutableGen_GC((StgClosure *)p,gen->no);
1490 scavenge_capability_mut_lists (Capability *cap)
1494 /* Mutable lists from each generation > N
1495 * we want to *scavenge* these roots, not evacuate them: they're not
1496 * going to move in this GC.
1497 * Also do them in reverse generation order, for the usual reason:
1498 * namely to reduce the likelihood of spurious old->new pointers.
1500 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1501 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1502 freeChain_sync(cap->saved_mut_lists[g]);
1503 cap->saved_mut_lists[g] = NULL;
1507 /* -----------------------------------------------------------------------------
1508 Scavenging the static objects.
1510 We treat the mutable list of each generation > N (i.e. all the
1511 generations older than the one being collected) as roots. We also
1512 remove non-mutable objects from the mutable list at this point.
1513 -------------------------------------------------------------------------- */
1516 scavenge_static(void)
1519 const StgInfoTable *info;
1521 debugTrace(DEBUG_gc, "scavenging static objects");
1523 /* Always evacuate straight to the oldest generation for static
1525 gct->evac_step = &oldest_gen->steps[0];
1527 /* keep going until we've scavenged all the objects on the linked
1532 /* get the next static object from the list. Remember, there might
1533 * be more stuff on this list after each evacuation...
1534 * (static_objects is a global)
1536 p = gct->static_objects;
1537 if (p == END_OF_STATIC_LIST) {
1541 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1544 if (info->type==RBH)
1545 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1547 // make sure the info pointer is into text space
1549 /* Take this object *off* the static_objects list,
1550 * and put it on the scavenged_static_objects list.
1552 gct->static_objects = *STATIC_LINK(info,p);
1553 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1554 gct->scavenged_static_objects = p;
1556 switch (info -> type) {
1560 StgInd *ind = (StgInd *)p;
1561 evacuate(&ind->indirectee);
1563 /* might fail to evacuate it, in which case we have to pop it
1564 * back on the mutable list of the oldest generation. We
1565 * leave it *on* the scavenged_static_objects list, though,
1566 * in case we visit this object again.
1568 if (gct->failed_to_evac) {
1569 gct->failed_to_evac = rtsFalse;
1570 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1576 scavenge_thunk_srt(info);
1580 scavenge_fun_srt(info);
1587 next = (P_)p->payload + info->layout.payload.ptrs;
1588 // evacuate the pointers
1589 for (q = (P_)p->payload; q < next; q++) {
1590 evacuate((StgClosure **)q);
1596 barf("scavenge_static: strange closure %d", (int)(info->type));
1599 ASSERT(gct->failed_to_evac == rtsFalse);
1603 /* -----------------------------------------------------------------------------
1604 scavenge a chunk of memory described by a bitmap
1605 -------------------------------------------------------------------------- */
1608 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1614 bitmap = large_bitmap->bitmap[b];
1615 for (i = 0; i < size; ) {
1616 if ((bitmap & 1) == 0) {
1617 evacuate((StgClosure **)p);
1621 if (i % BITS_IN(W_) == 0) {
1623 bitmap = large_bitmap->bitmap[b];
1625 bitmap = bitmap >> 1;
1630 STATIC_INLINE StgPtr
1631 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1634 if ((bitmap & 1) == 0) {
1635 evacuate((StgClosure **)p);
1638 bitmap = bitmap >> 1;
1644 /* -----------------------------------------------------------------------------
1645 scavenge_stack walks over a section of stack and evacuates all the
1646 objects pointed to by it. We can use the same code for walking
1647 AP_STACK_UPDs, since these are just sections of copied stack.
1648 -------------------------------------------------------------------------- */
1651 scavenge_stack(StgPtr p, StgPtr stack_end)
1653 const StgRetInfoTable* info;
1658 * Each time around this loop, we are looking at a chunk of stack
1659 * that starts with an activation record.
1662 while (p < stack_end) {
1663 info = get_ret_itbl((StgClosure *)p);
1665 switch (info->i.type) {
1668 // In SMP, we can get update frames that point to indirections
1669 // when two threads evaluate the same thunk. We do attempt to
1670 // discover this situation in threadPaused(), but it's
1671 // possible that the following sequence occurs:
1680 // Now T is an indirection, and the update frame is already
1681 // marked on A's stack, so we won't traverse it again in
1682 // threadPaused(). We could traverse the whole stack again
1683 // before GC, but that seems like overkill.
1685 // Scavenging this update frame as normal would be disastrous;
1686 // the updatee would end up pointing to the value. So we turn
1687 // the indirection into an IND_PERM, so that evacuate will
1688 // copy the indirection into the old generation instead of
1691 // Note [upd-black-hole]
1692 // One slight hiccup is that the THUNK_SELECTOR machinery can
1693 // overwrite the updatee with an IND. In parallel GC, this
1694 // could even be happening concurrently, so we can't check for
1695 // the IND. Fortunately if we assume that blackholing is
1696 // happening (either lazy or eager), then we can be sure that
1697 // the updatee is never a THUNK_SELECTOR and we're ok.
1698 // NB. this is a new invariant: blackholing is not optional.
1701 const StgInfoTable *i;
1702 StgClosure *updatee;
1704 updatee = ((StgUpdateFrame *)p)->updatee;
1705 i = updatee->header.info;
1706 if (!IS_FORWARDING_PTR(i)) {
1707 type = get_itbl(updatee)->type;
1709 updatee->header.info = &stg_IND_PERM_info;
1710 } else if (type == IND_OLDGEN) {
1711 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1714 evacuate(&((StgUpdateFrame *)p)->updatee);
1715 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1716 p += sizeofW(StgUpdateFrame);
1720 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1721 case CATCH_STM_FRAME:
1722 case CATCH_RETRY_FRAME:
1723 case ATOMICALLY_FRAME:
1727 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1728 size = BITMAP_SIZE(info->i.layout.bitmap);
1729 // NOTE: the payload starts immediately after the info-ptr, we
1730 // don't have an StgHeader in the same sense as a heap closure.
1732 p = scavenge_small_bitmap(p, size, bitmap);
1736 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1744 evacuate((StgClosure **)p);
1747 size = BCO_BITMAP_SIZE(bco);
1748 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1753 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1758 size = GET_LARGE_BITMAP(&info->i)->size;
1760 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1762 // and don't forget to follow the SRT
1766 // Dynamic bitmap: the mask is stored on the stack, and
1767 // there are a number of non-pointers followed by a number
1768 // of pointers above the bitmapped area. (see StgMacros.h,
1773 dyn = ((StgRetDyn *)p)->liveness;
1775 // traverse the bitmap first
1776 bitmap = RET_DYN_LIVENESS(dyn);
1777 p = (P_)&((StgRetDyn *)p)->payload[0];
1778 size = RET_DYN_BITMAP_SIZE;
1779 p = scavenge_small_bitmap(p, size, bitmap);
1781 // skip over the non-ptr words
1782 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1784 // follow the ptr words
1785 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1786 evacuate((StgClosure **)p);
1794 StgRetFun *ret_fun = (StgRetFun *)p;
1795 StgFunInfoTable *fun_info;
1797 evacuate(&ret_fun->fun);
1798 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1799 p = scavenge_arg_block(fun_info, ret_fun->payload);
1804 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1809 /*-----------------------------------------------------------------------------
1810 scavenge the large object list.
1812 evac_step set by caller; similar games played with evac_step as with
1813 scavenge() - see comment at the top of scavenge(). Most large
1814 objects are (repeatedly) mutable, so most of the time evac_step will
1816 --------------------------------------------------------------------------- */
1819 scavenge_large (step_workspace *ws)
1824 gct->evac_step = ws->step;
1826 bd = ws->todo_large_objects;
1828 for (; bd != NULL; bd = ws->todo_large_objects) {
1830 // take this object *off* the large objects list and put it on
1831 // the scavenged large objects list. This is so that we can
1832 // treat new_large_objects as a stack and push new objects on
1833 // the front when evacuating.
1834 ws->todo_large_objects = bd->link;
1836 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1837 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1838 ws->step->n_scavenged_large_blocks += bd->blocks;
1839 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1842 if (scavenge_one(p)) {
1843 if (ws->step->gen_no > 0) {
1844 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1849 gct->scanned += closure_sizeW((StgClosure*)p);
1853 /* ----------------------------------------------------------------------------
1854 Look for work to do.
1856 We look for the oldest step that has either a todo block that can
1857 be scanned, or a block of work on the global queue that we can
1860 It is important to take work from the *oldest* generation that we
1861 has work available, because that minimizes the likelihood of
1862 evacuating objects into a young generation when they should have
1863 been eagerly promoted. This really does make a difference (the
1864 cacheprof benchmark is one that is affected).
1866 We also want to scan the todo block if possible before grabbing
1867 work from the global queue, the reason being that we don't want to
1868 steal work from the global queue and starve other threads if there
1869 is other work we can usefully be doing.
1870 ------------------------------------------------------------------------- */
1873 scavenge_find_work (void)
1877 rtsBool did_something, did_anything;
1880 gct->scav_find_work++;
1882 did_anything = rtsFalse;
1885 did_something = rtsFalse;
1886 for (s = total_steps-1; s >= 0; s--) {
1887 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1890 ws = &gct->steps[s];
1892 gct->scan_bd = NULL;
1894 // If we have a scan block with some work to do,
1895 // scavenge everything up to the free pointer.
1896 if (ws->todo_bd->u.scan < ws->todo_free)
1898 scavenge_block(ws->todo_bd);
1899 did_something = rtsTrue;
1903 // If we have any large objects to scavenge, do them now.
1904 if (ws->todo_large_objects) {
1906 did_something = rtsTrue;
1910 if ((bd = grab_local_todo_block(ws)) != NULL) {
1912 did_something = rtsTrue;
1917 if (did_something) {
1918 did_anything = rtsTrue;
1922 #if defined(THREADED_RTS)
1923 if (work_stealing) {
1924 // look for work to steal
1925 for (s = total_steps-1; s >= 0; s--) {
1926 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1929 if ((bd = steal_todo_block(s)) != NULL) {
1931 did_something = rtsTrue;
1936 if (did_something) {
1937 did_anything = rtsTrue;
1943 // only return when there is no more work to do
1945 return did_anything;
1948 /* ----------------------------------------------------------------------------
1949 Scavenge until we can't find anything more to scavenge.
1950 ------------------------------------------------------------------------- */
1958 work_to_do = rtsFalse;
1960 // scavenge static objects
1961 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1962 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1966 // scavenge objects in compacted generation
1967 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1968 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1969 scavenge_mark_stack();
1970 work_to_do = rtsTrue;
1973 // Order is important here: we want to deal in full blocks as
1974 // much as possible, so go for global work in preference to
1975 // local work. Only if all the global work has been exhausted
1976 // do we start scavenging the fragments of blocks in the local
1978 if (scavenge_find_work()) goto loop;
1980 if (work_to_do) goto loop;