1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
28 #include "Capability.h"
30 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32 static void scavenge_large_bitmap (StgPtr p,
33 StgLargeBitmap *large_bitmap,
36 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
37 # define evacuate(a) evacuate1(a)
38 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
41 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
44 /* -----------------------------------------------------------------------------
46 -------------------------------------------------------------------------- */
49 scavenge_TSO_link (StgTSO *tso)
51 // We don't always chase the link field: TSOs on the blackhole
52 // queue are not automatically alive, so the link field is a
53 // "weak" pointer in that case.
54 if (tso->why_blocked != BlockedOnBlackHole) {
55 evacuate((StgClosure **)&tso->_link);
60 scavengeTSO (StgTSO *tso)
64 if (tso->what_next == ThreadRelocated) {
65 // the only way this can happen is if the old TSO was on the
66 // mutable list. We might have other links to this defunct
67 // TSO, so we must update its link field.
68 evacuate((StgClosure**)&tso->_link);
72 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
74 saved_eager = gct->eager_promotion;
75 gct->eager_promotion = rtsFalse;
77 if ( tso->why_blocked == BlockedOnMVar
78 || tso->why_blocked == BlockedOnBlackHole
79 || tso->why_blocked == BlockedOnException
81 evacuate(&tso->block_info.closure);
83 evacuate((StgClosure **)&tso->blocked_exceptions);
85 // scavange current transaction record
86 evacuate((StgClosure **)&tso->trec);
88 // scavenge this thread's stack
89 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
91 if (gct->failed_to_evac) {
92 tso->flags |= TSO_DIRTY;
93 scavenge_TSO_link(tso);
95 tso->flags &= ~TSO_DIRTY;
96 scavenge_TSO_link(tso);
97 if (gct->failed_to_evac) {
98 tso->flags |= TSO_LINK_DIRTY;
100 tso->flags &= ~TSO_LINK_DIRTY;
104 gct->eager_promotion = saved_eager;
107 /* -----------------------------------------------------------------------------
108 Blocks of function args occur on the stack (at the top) and
110 -------------------------------------------------------------------------- */
113 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
120 switch (fun_info->f.fun_type) {
122 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
123 size = BITMAP_SIZE(fun_info->f.b.bitmap);
126 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
127 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
131 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
132 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
135 if ((bitmap & 1) == 0) {
136 evacuate((StgClosure **)p);
139 bitmap = bitmap >> 1;
147 STATIC_INLINE GNUC_ATTR_HOT StgPtr
148 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
152 StgFunInfoTable *fun_info;
154 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
155 ASSERT(fun_info->i.type != PAP);
158 switch (fun_info->f.fun_type) {
160 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
163 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
167 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
171 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
174 if ((bitmap & 1) == 0) {
175 evacuate((StgClosure **)p);
178 bitmap = bitmap >> 1;
186 STATIC_INLINE GNUC_ATTR_HOT StgPtr
187 scavenge_PAP (StgPAP *pap)
190 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
194 scavenge_AP (StgAP *ap)
197 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
200 /* -----------------------------------------------------------------------------
202 -------------------------------------------------------------------------- */
204 /* Similar to scavenge_large_bitmap(), but we don't write back the
205 * pointers we get back from evacuate().
208 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
215 bitmap = large_srt->l.bitmap[b];
216 size = (nat)large_srt->l.size;
217 p = (StgClosure **)large_srt->srt;
218 for (i = 0; i < size; ) {
219 if ((bitmap & 1) != 0) {
224 if (i % BITS_IN(W_) == 0) {
226 bitmap = large_srt->l.bitmap[b];
228 bitmap = bitmap >> 1;
233 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
234 * srt field in the info table. That's ok, because we'll
235 * never dereference it.
237 STATIC_INLINE GNUC_ATTR_HOT void
238 scavenge_srt (StgClosure **srt, nat srt_bitmap)
246 if (bitmap == (StgHalfWord)(-1)) {
247 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
251 while (bitmap != 0) {
252 if ((bitmap & 1) != 0) {
253 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
254 // Special-case to handle references to closures hiding out in DLLs, since
255 // double indirections required to get at those. The code generator knows
256 // which is which when generating the SRT, so it stores the (indirect)
257 // reference to the DLL closure in the table by first adding one to it.
258 // We check for this here, and undo the addition before evacuating it.
260 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
261 // closure that's fixed at link-time, and no extra magic is required.
262 if ( (unsigned long)(*srt) & 0x1 ) {
263 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
272 bitmap = bitmap >> 1;
277 STATIC_INLINE GNUC_ATTR_HOT void
278 scavenge_thunk_srt(const StgInfoTable *info)
280 StgThunkInfoTable *thunk_info;
282 if (!major_gc) return;
284 thunk_info = itbl_to_thunk_itbl(info);
285 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
288 STATIC_INLINE GNUC_ATTR_HOT void
289 scavenge_fun_srt(const StgInfoTable *info)
291 StgFunInfoTable *fun_info;
293 if (!major_gc) return;
295 fun_info = itbl_to_fun_itbl(info);
296 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
299 /* -----------------------------------------------------------------------------
300 Scavenge a block from the given scan pointer up to bd->free.
302 evac_step is set by the caller to be either zero (for a step in a
303 generation < N) or G where G is the generation of the step being
306 We sometimes temporarily change evac_step back to zero if we're
307 scavenging a mutable object where eager promotion isn't such a good
309 -------------------------------------------------------------------------- */
311 static GNUC_ATTR_HOT void
312 scavenge_block (bdescr *bd)
316 step *saved_evac_step;
317 rtsBool saved_eager_promotion;
320 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
321 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
324 gct->evac_step = bd->step;
325 saved_evac_step = gct->evac_step;
326 saved_eager_promotion = gct->eager_promotion;
327 gct->failed_to_evac = rtsFalse;
329 ws = &gct->steps[bd->step->abs_no];
333 // we might be evacuating into the very object that we're
334 // scavenging, so we have to check the real bd->free pointer each
335 // time around the loop.
336 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
338 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
339 info = get_itbl((StgClosure *)p);
341 ASSERT(gct->thunk_selector_depth == 0);
344 switch (info->type) {
349 StgMVar *mvar = ((StgMVar *)p);
350 gct->eager_promotion = rtsFalse;
351 evacuate((StgClosure **)&mvar->head);
352 evacuate((StgClosure **)&mvar->tail);
353 evacuate((StgClosure **)&mvar->value);
354 gct->eager_promotion = saved_eager_promotion;
356 if (gct->failed_to_evac) {
357 mvar->header.info = &stg_MVAR_DIRTY_info;
359 mvar->header.info = &stg_MVAR_CLEAN_info;
361 p += sizeofW(StgMVar);
366 scavenge_fun_srt(info);
367 evacuate(&((StgClosure *)p)->payload[1]);
368 evacuate(&((StgClosure *)p)->payload[0]);
369 p += sizeofW(StgHeader) + 2;
373 scavenge_thunk_srt(info);
374 evacuate(&((StgThunk *)p)->payload[1]);
375 evacuate(&((StgThunk *)p)->payload[0]);
376 p += sizeofW(StgThunk) + 2;
380 evacuate(&((StgClosure *)p)->payload[1]);
381 evacuate(&((StgClosure *)p)->payload[0]);
382 p += sizeofW(StgHeader) + 2;
386 scavenge_thunk_srt(info);
387 evacuate(&((StgThunk *)p)->payload[0]);
388 p += sizeofW(StgThunk) + 1;
392 scavenge_fun_srt(info);
394 evacuate(&((StgClosure *)p)->payload[0]);
395 p += sizeofW(StgHeader) + 1;
399 scavenge_thunk_srt(info);
400 p += sizeofW(StgThunk) + 1;
404 scavenge_fun_srt(info);
406 p += sizeofW(StgHeader) + 1;
410 scavenge_thunk_srt(info);
411 p += sizeofW(StgThunk) + 2;
415 scavenge_fun_srt(info);
417 p += sizeofW(StgHeader) + 2;
421 scavenge_thunk_srt(info);
422 evacuate(&((StgThunk *)p)->payload[0]);
423 p += sizeofW(StgThunk) + 2;
427 scavenge_fun_srt(info);
429 evacuate(&((StgClosure *)p)->payload[0]);
430 p += sizeofW(StgHeader) + 2;
434 scavenge_fun_srt(info);
441 scavenge_thunk_srt(info);
442 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
443 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
444 evacuate((StgClosure **)p);
446 p += info->layout.payload.nptrs;
457 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
458 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
459 evacuate((StgClosure **)p);
461 p += info->layout.payload.nptrs;
466 StgBCO *bco = (StgBCO *)p;
467 evacuate((StgClosure **)&bco->instrs);
468 evacuate((StgClosure **)&bco->literals);
469 evacuate((StgClosure **)&bco->ptrs);
475 if (bd->gen_no != 0) {
478 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
479 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
480 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
483 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
485 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
487 // We pretend that p has just been created.
488 LDV_RECORD_CREATE((StgClosure *)p);
491 case IND_OLDGEN_PERM:
492 evacuate(&((StgInd *)p)->indirectee);
493 p += sizeofW(StgInd);
498 gct->eager_promotion = rtsFalse;
499 evacuate(&((StgMutVar *)p)->var);
500 gct->eager_promotion = saved_eager_promotion;
502 if (gct->failed_to_evac) {
503 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
505 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
507 p += sizeofW(StgMutVar);
512 p += BLACKHOLE_sizeW();
517 StgSelector *s = (StgSelector *)p;
518 evacuate(&s->selectee);
519 p += THUNK_SELECTOR_sizeW();
523 // A chunk of stack saved in a heap object
526 StgAP_STACK *ap = (StgAP_STACK *)p;
529 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
530 p = (StgPtr)ap->payload + ap->size;
535 p = scavenge_PAP((StgPAP *)p);
539 p = scavenge_AP((StgAP *)p);
544 p += arr_words_sizeW((StgArrWords *)p);
547 case MUT_ARR_PTRS_CLEAN:
548 case MUT_ARR_PTRS_DIRTY:
553 // We don't eagerly promote objects pointed to by a mutable
554 // array, but if we find the array only points to objects in
555 // the same or an older generation, we mark it "clean" and
556 // avoid traversing it during minor GCs.
557 gct->eager_promotion = rtsFalse;
558 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
559 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
560 evacuate((StgClosure **)p);
562 gct->eager_promotion = saved_eager_promotion;
564 if (gct->failed_to_evac) {
565 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
567 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
570 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
574 case MUT_ARR_PTRS_FROZEN:
575 case MUT_ARR_PTRS_FROZEN0:
580 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
581 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
582 evacuate((StgClosure **)p);
585 // If we're going to put this object on the mutable list, then
586 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
587 if (gct->failed_to_evac) {
588 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
590 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
597 StgTSO *tso = (StgTSO *)p;
603 case TVAR_WATCH_QUEUE:
605 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
607 evacuate((StgClosure **)&wq->closure);
608 evacuate((StgClosure **)&wq->next_queue_entry);
609 evacuate((StgClosure **)&wq->prev_queue_entry);
610 gct->evac_step = saved_evac_step;
611 gct->failed_to_evac = rtsTrue; // mutable
612 p += sizeofW(StgTVarWatchQueue);
618 StgTVar *tvar = ((StgTVar *) p);
620 evacuate((StgClosure **)&tvar->current_value);
621 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
622 gct->evac_step = saved_evac_step;
623 gct->failed_to_evac = rtsTrue; // mutable
624 p += sizeofW(StgTVar);
630 StgTRecHeader *trec = ((StgTRecHeader *) p);
632 evacuate((StgClosure **)&trec->enclosing_trec);
633 evacuate((StgClosure **)&trec->current_chunk);
634 evacuate((StgClosure **)&trec->invariants_to_check);
635 gct->evac_step = saved_evac_step;
636 gct->failed_to_evac = rtsTrue; // mutable
637 p += sizeofW(StgTRecHeader);
644 StgTRecChunk *tc = ((StgTRecChunk *) p);
645 TRecEntry *e = &(tc -> entries[0]);
647 evacuate((StgClosure **)&tc->prev_chunk);
648 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
649 evacuate((StgClosure **)&e->tvar);
650 evacuate((StgClosure **)&e->expected_value);
651 evacuate((StgClosure **)&e->new_value);
653 gct->evac_step = saved_evac_step;
654 gct->failed_to_evac = rtsTrue; // mutable
655 p += sizeofW(StgTRecChunk);
659 case ATOMIC_INVARIANT:
661 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
663 evacuate(&invariant->code);
664 evacuate((StgClosure **)&invariant->last_execution);
665 gct->evac_step = saved_evac_step;
666 gct->failed_to_evac = rtsTrue; // mutable
667 p += sizeofW(StgAtomicInvariant);
671 case INVARIANT_CHECK_QUEUE:
673 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
675 evacuate((StgClosure **)&queue->invariant);
676 evacuate((StgClosure **)&queue->my_execution);
677 evacuate((StgClosure **)&queue->next_queue_entry);
678 gct->evac_step = saved_evac_step;
679 gct->failed_to_evac = rtsTrue; // mutable
680 p += sizeofW(StgInvariantCheckQueue);
685 barf("scavenge: unimplemented/strange closure type %d @ %p",
690 * We need to record the current object on the mutable list if
691 * (a) It is actually mutable, or
692 * (b) It contains pointers to a younger generation.
693 * Case (b) arises if we didn't manage to promote everything that
694 * the current object points to into the current generation.
696 if (gct->failed_to_evac) {
697 gct->failed_to_evac = rtsFalse;
698 if (bd->gen_no > 0) {
699 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
705 gct->copied += ws->todo_free - bd->free;
709 debugTrace(DEBUG_gc, " scavenged %ld bytes",
710 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
712 // update stats: this is a block that has been scavenged
713 gct->scanned += bd->free - bd->u.scan;
714 bd->u.scan = bd->free;
716 if (bd != ws->todo_bd) {
717 // we're not going to evac any more objects into
718 // this block, so push it now.
719 push_scanned_block(bd, ws);
724 /* -----------------------------------------------------------------------------
725 Scavenge everything on the mark stack.
727 This is slightly different from scavenge():
728 - we don't walk linearly through the objects, so the scavenger
729 doesn't need to advance the pointer on to the next object.
730 -------------------------------------------------------------------------- */
733 scavenge_mark_stack(void)
737 step *saved_evac_step;
739 gct->evac_step = &oldest_gen->steps[0];
740 saved_evac_step = gct->evac_step;
743 while (!mark_stack_empty()) {
744 p = pop_mark_stack();
746 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
747 info = get_itbl((StgClosure *)p);
750 switch (info->type) {
755 rtsBool saved_eager_promotion = gct->eager_promotion;
757 StgMVar *mvar = ((StgMVar *)p);
758 gct->eager_promotion = rtsFalse;
759 evacuate((StgClosure **)&mvar->head);
760 evacuate((StgClosure **)&mvar->tail);
761 evacuate((StgClosure **)&mvar->value);
762 gct->eager_promotion = saved_eager_promotion;
764 if (gct->failed_to_evac) {
765 mvar->header.info = &stg_MVAR_DIRTY_info;
767 mvar->header.info = &stg_MVAR_CLEAN_info;
773 scavenge_fun_srt(info);
774 evacuate(&((StgClosure *)p)->payload[1]);
775 evacuate(&((StgClosure *)p)->payload[0]);
779 scavenge_thunk_srt(info);
780 evacuate(&((StgThunk *)p)->payload[1]);
781 evacuate(&((StgThunk *)p)->payload[0]);
785 evacuate(&((StgClosure *)p)->payload[1]);
786 evacuate(&((StgClosure *)p)->payload[0]);
791 scavenge_fun_srt(info);
792 evacuate(&((StgClosure *)p)->payload[0]);
797 scavenge_thunk_srt(info);
798 evacuate(&((StgThunk *)p)->payload[0]);
803 evacuate(&((StgClosure *)p)->payload[0]);
808 scavenge_fun_srt(info);
813 scavenge_thunk_srt(info);
821 scavenge_fun_srt(info);
828 scavenge_thunk_srt(info);
829 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
830 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
831 evacuate((StgClosure **)p);
843 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
844 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
845 evacuate((StgClosure **)p);
851 StgBCO *bco = (StgBCO *)p;
852 evacuate((StgClosure **)&bco->instrs);
853 evacuate((StgClosure **)&bco->literals);
854 evacuate((StgClosure **)&bco->ptrs);
859 // don't need to do anything here: the only possible case
860 // is that we're in a 1-space compacting collector, with
861 // no "old" generation.
865 case IND_OLDGEN_PERM:
866 evacuate(&((StgInd *)p)->indirectee);
870 case MUT_VAR_DIRTY: {
871 rtsBool saved_eager_promotion = gct->eager_promotion;
873 gct->eager_promotion = rtsFalse;
874 evacuate(&((StgMutVar *)p)->var);
875 gct->eager_promotion = saved_eager_promotion;
877 if (gct->failed_to_evac) {
878 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
880 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
892 StgSelector *s = (StgSelector *)p;
893 evacuate(&s->selectee);
897 // A chunk of stack saved in a heap object
900 StgAP_STACK *ap = (StgAP_STACK *)p;
903 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
908 scavenge_PAP((StgPAP *)p);
912 scavenge_AP((StgAP *)p);
915 case MUT_ARR_PTRS_CLEAN:
916 case MUT_ARR_PTRS_DIRTY:
922 // We don't eagerly promote objects pointed to by a mutable
923 // array, but if we find the array only points to objects in
924 // the same or an older generation, we mark it "clean" and
925 // avoid traversing it during minor GCs.
926 saved_eager = gct->eager_promotion;
927 gct->eager_promotion = rtsFalse;
928 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
929 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
930 evacuate((StgClosure **)p);
932 gct->eager_promotion = saved_eager;
934 if (gct->failed_to_evac) {
935 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
937 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
940 gct->failed_to_evac = rtsTrue; // mutable anyhow.
944 case MUT_ARR_PTRS_FROZEN:
945 case MUT_ARR_PTRS_FROZEN0:
950 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
951 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
952 evacuate((StgClosure **)p);
955 // If we're going to put this object on the mutable list, then
956 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
957 if (gct->failed_to_evac) {
958 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
967 scavengeTSO((StgTSO*)p);
971 case TVAR_WATCH_QUEUE:
973 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
975 evacuate((StgClosure **)&wq->closure);
976 evacuate((StgClosure **)&wq->next_queue_entry);
977 evacuate((StgClosure **)&wq->prev_queue_entry);
978 gct->evac_step = saved_evac_step;
979 gct->failed_to_evac = rtsTrue; // mutable
985 StgTVar *tvar = ((StgTVar *) p);
987 evacuate((StgClosure **)&tvar->current_value);
988 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
989 gct->evac_step = saved_evac_step;
990 gct->failed_to_evac = rtsTrue; // mutable
997 StgTRecChunk *tc = ((StgTRecChunk *) p);
998 TRecEntry *e = &(tc -> entries[0]);
1000 evacuate((StgClosure **)&tc->prev_chunk);
1001 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1002 evacuate((StgClosure **)&e->tvar);
1003 evacuate((StgClosure **)&e->expected_value);
1004 evacuate((StgClosure **)&e->new_value);
1006 gct->evac_step = saved_evac_step;
1007 gct->failed_to_evac = rtsTrue; // mutable
1013 StgTRecHeader *trec = ((StgTRecHeader *) p);
1015 evacuate((StgClosure **)&trec->enclosing_trec);
1016 evacuate((StgClosure **)&trec->current_chunk);
1017 evacuate((StgClosure **)&trec->invariants_to_check);
1018 gct->evac_step = saved_evac_step;
1019 gct->failed_to_evac = rtsTrue; // mutable
1023 case ATOMIC_INVARIANT:
1025 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1027 evacuate(&invariant->code);
1028 evacuate((StgClosure **)&invariant->last_execution);
1029 gct->evac_step = saved_evac_step;
1030 gct->failed_to_evac = rtsTrue; // mutable
1034 case INVARIANT_CHECK_QUEUE:
1036 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1038 evacuate((StgClosure **)&queue->invariant);
1039 evacuate((StgClosure **)&queue->my_execution);
1040 evacuate((StgClosure **)&queue->next_queue_entry);
1041 gct->evac_step = saved_evac_step;
1042 gct->failed_to_evac = rtsTrue; // mutable
1047 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1051 if (gct->failed_to_evac) {
1052 gct->failed_to_evac = rtsFalse;
1053 if (gct->evac_step) {
1054 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1058 // mark the next bit to indicate "scavenged"
1059 mark(q+1, Bdescr(q));
1061 } // while (!mark_stack_empty())
1063 // start a new linear scan if the mark stack overflowed at some point
1064 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1065 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1066 mark_stack_overflowed = rtsFalse;
1067 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1068 oldgen_scan = oldgen_scan_bd->start;
1071 if (oldgen_scan_bd) {
1072 // push a new thing on the mark stack
1074 // find a closure that is marked but not scavenged, and start
1076 while (oldgen_scan < oldgen_scan_bd->free
1077 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1081 if (oldgen_scan < oldgen_scan_bd->free) {
1083 // already scavenged?
1084 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1085 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1088 push_mark_stack(oldgen_scan);
1089 // ToDo: bump the linear scan by the actual size of the object
1090 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1094 oldgen_scan_bd = oldgen_scan_bd->link;
1095 if (oldgen_scan_bd != NULL) {
1096 oldgen_scan = oldgen_scan_bd->start;
1102 /* -----------------------------------------------------------------------------
1103 Scavenge one object.
1105 This is used for objects that are temporarily marked as mutable
1106 because they contain old-to-new generation pointers. Only certain
1107 objects can have this property.
1108 -------------------------------------------------------------------------- */
1111 scavenge_one(StgPtr p)
1113 const StgInfoTable *info;
1114 step *saved_evac_step = gct->evac_step;
1117 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1118 info = get_itbl((StgClosure *)p);
1120 switch (info->type) {
1125 rtsBool saved_eager_promotion = gct->eager_promotion;
1127 StgMVar *mvar = ((StgMVar *)p);
1128 gct->eager_promotion = rtsFalse;
1129 evacuate((StgClosure **)&mvar->head);
1130 evacuate((StgClosure **)&mvar->tail);
1131 evacuate((StgClosure **)&mvar->value);
1132 gct->eager_promotion = saved_eager_promotion;
1134 if (gct->failed_to_evac) {
1135 mvar->header.info = &stg_MVAR_DIRTY_info;
1137 mvar->header.info = &stg_MVAR_CLEAN_info;
1151 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1152 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1153 evacuate((StgClosure **)q);
1159 case FUN_1_0: // hardly worth specialising these guys
1175 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1176 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1177 evacuate((StgClosure **)q);
1183 case MUT_VAR_DIRTY: {
1185 rtsBool saved_eager_promotion = gct->eager_promotion;
1187 gct->eager_promotion = rtsFalse;
1188 evacuate(&((StgMutVar *)p)->var);
1189 gct->eager_promotion = saved_eager_promotion;
1191 if (gct->failed_to_evac) {
1192 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1194 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1203 case THUNK_SELECTOR:
1205 StgSelector *s = (StgSelector *)p;
1206 evacuate(&s->selectee);
1212 StgAP_STACK *ap = (StgAP_STACK *)p;
1215 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1216 p = (StgPtr)ap->payload + ap->size;
1221 p = scavenge_PAP((StgPAP *)p);
1225 p = scavenge_AP((StgAP *)p);
1229 // nothing to follow
1232 case MUT_ARR_PTRS_CLEAN:
1233 case MUT_ARR_PTRS_DIRTY:
1236 rtsBool saved_eager;
1238 // We don't eagerly promote objects pointed to by a mutable
1239 // array, but if we find the array only points to objects in
1240 // the same or an older generation, we mark it "clean" and
1241 // avoid traversing it during minor GCs.
1242 saved_eager = gct->eager_promotion;
1243 gct->eager_promotion = rtsFalse;
1245 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1246 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1247 evacuate((StgClosure **)p);
1249 gct->eager_promotion = saved_eager;
1251 if (gct->failed_to_evac) {
1252 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1254 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1257 gct->failed_to_evac = rtsTrue;
1261 case MUT_ARR_PTRS_FROZEN:
1262 case MUT_ARR_PTRS_FROZEN0:
1264 // follow everything
1267 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1268 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1269 evacuate((StgClosure **)p);
1272 // If we're going to put this object on the mutable list, then
1273 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1274 if (gct->failed_to_evac) {
1275 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1277 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1284 scavengeTSO((StgTSO*)p);
1288 case TVAR_WATCH_QUEUE:
1290 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1292 evacuate((StgClosure **)&wq->closure);
1293 evacuate((StgClosure **)&wq->next_queue_entry);
1294 evacuate((StgClosure **)&wq->prev_queue_entry);
1295 gct->evac_step = saved_evac_step;
1296 gct->failed_to_evac = rtsTrue; // mutable
1302 StgTVar *tvar = ((StgTVar *) p);
1304 evacuate((StgClosure **)&tvar->current_value);
1305 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1306 gct->evac_step = saved_evac_step;
1307 gct->failed_to_evac = rtsTrue; // mutable
1313 StgTRecHeader *trec = ((StgTRecHeader *) p);
1315 evacuate((StgClosure **)&trec->enclosing_trec);
1316 evacuate((StgClosure **)&trec->current_chunk);
1317 evacuate((StgClosure **)&trec->invariants_to_check);
1318 gct->evac_step = saved_evac_step;
1319 gct->failed_to_evac = rtsTrue; // mutable
1326 StgTRecChunk *tc = ((StgTRecChunk *) p);
1327 TRecEntry *e = &(tc -> entries[0]);
1329 evacuate((StgClosure **)&tc->prev_chunk);
1330 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1331 evacuate((StgClosure **)&e->tvar);
1332 evacuate((StgClosure **)&e->expected_value);
1333 evacuate((StgClosure **)&e->new_value);
1335 gct->evac_step = saved_evac_step;
1336 gct->failed_to_evac = rtsTrue; // mutable
1340 case ATOMIC_INVARIANT:
1342 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1344 evacuate(&invariant->code);
1345 evacuate((StgClosure **)&invariant->last_execution);
1346 gct->evac_step = saved_evac_step;
1347 gct->failed_to_evac = rtsTrue; // mutable
1351 case INVARIANT_CHECK_QUEUE:
1353 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1355 evacuate((StgClosure **)&queue->invariant);
1356 evacuate((StgClosure **)&queue->my_execution);
1357 evacuate((StgClosure **)&queue->next_queue_entry);
1358 gct->evac_step = saved_evac_step;
1359 gct->failed_to_evac = rtsTrue; // mutable
1364 case IND_OLDGEN_PERM:
1367 /* Careful here: a THUNK can be on the mutable list because
1368 * it contains pointers to young gen objects. If such a thunk
1369 * is updated, the IND_OLDGEN will be added to the mutable
1370 * list again, and we'll scavenge it twice. evacuate()
1371 * doesn't check whether the object has already been
1372 * evacuated, so we perform that check here.
1374 StgClosure *q = ((StgInd *)p)->indirectee;
1375 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1378 evacuate(&((StgInd *)p)->indirectee);
1381 #if 0 && defined(DEBUG)
1382 if (RtsFlags.DebugFlags.gc)
1383 /* Debugging code to print out the size of the thing we just
1387 StgPtr start = gen->steps[0].scan;
1388 bdescr *start_bd = gen->steps[0].scan_bd;
1390 scavenge(&gen->steps[0]);
1391 if (start_bd != gen->steps[0].scan_bd) {
1392 size += (P_)BLOCK_ROUND_UP(start) - start;
1393 start_bd = start_bd->link;
1394 while (start_bd != gen->steps[0].scan_bd) {
1395 size += BLOCK_SIZE_W;
1396 start_bd = start_bd->link;
1398 size += gen->steps[0].scan -
1399 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1401 size = gen->steps[0].scan - start;
1403 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1409 barf("scavenge_one: strange object %d", (int)(info->type));
1412 no_luck = gct->failed_to_evac;
1413 gct->failed_to_evac = rtsFalse;
1417 /* -----------------------------------------------------------------------------
1418 Scavenging mutable lists.
1420 We treat the mutable list of each generation > N (i.e. all the
1421 generations older than the one being collected) as roots. We also
1422 remove non-mutable objects from the mutable list at this point.
1423 -------------------------------------------------------------------------- */
1426 scavenge_mutable_list(bdescr *bd, generation *gen)
1430 gct->evac_step = &gen->steps[0];
1431 for (; bd != NULL; bd = bd->link) {
1432 for (q = bd->start; q < bd->free; q++) {
1434 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1437 switch (get_itbl((StgClosure *)p)->type) {
1439 barf("MUT_VAR_CLEAN on mutable list");
1441 mutlist_MUTVARS++; break;
1442 case MUT_ARR_PTRS_CLEAN:
1443 case MUT_ARR_PTRS_DIRTY:
1444 case MUT_ARR_PTRS_FROZEN:
1445 case MUT_ARR_PTRS_FROZEN0:
1446 mutlist_MUTARRS++; break;
1448 barf("MVAR_CLEAN on mutable list");
1450 mutlist_MVARS++; break;
1452 mutlist_OTHERS++; break;
1456 // Check whether this object is "clean", that is it
1457 // definitely doesn't point into a young generation.
1458 // Clean objects don't need to be scavenged. Some clean
1459 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1460 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1461 // are always on the mutable list.
1463 switch (get_itbl((StgClosure *)p)->type) {
1464 case MUT_ARR_PTRS_CLEAN:
1465 recordMutableGen_GC((StgClosure *)p,gen->no);
1468 StgTSO *tso = (StgTSO *)p;
1469 if ((tso->flags & TSO_DIRTY) == 0) {
1470 // Must be on the mutable list because its link
1472 ASSERT(tso->flags & TSO_LINK_DIRTY);
1474 scavenge_TSO_link(tso);
1475 if (gct->failed_to_evac) {
1476 recordMutableGen_GC((StgClosure *)p,gen->no);
1477 gct->failed_to_evac = rtsFalse;
1479 tso->flags &= ~TSO_LINK_DIRTY;
1488 if (scavenge_one(p)) {
1489 // didn't manage to promote everything, so put the
1490 // object back on the list.
1491 recordMutableGen_GC((StgClosure *)p,gen->no);
1498 scavenge_capability_mut_lists (Capability *cap)
1502 /* Mutable lists from each generation > N
1503 * we want to *scavenge* these roots, not evacuate them: they're not
1504 * going to move in this GC.
1505 * Also do them in reverse generation order, for the usual reason:
1506 * namely to reduce the likelihood of spurious old->new pointers.
1508 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1509 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1510 freeChain_sync(cap->saved_mut_lists[g]);
1511 cap->saved_mut_lists[g] = NULL;
1515 /* -----------------------------------------------------------------------------
1516 Scavenging the static objects.
1518 We treat the mutable list of each generation > N (i.e. all the
1519 generations older than the one being collected) as roots. We also
1520 remove non-mutable objects from the mutable list at this point.
1521 -------------------------------------------------------------------------- */
1524 scavenge_static(void)
1527 const StgInfoTable *info;
1529 debugTrace(DEBUG_gc, "scavenging static objects");
1531 /* Always evacuate straight to the oldest generation for static
1533 gct->evac_step = &oldest_gen->steps[0];
1535 /* keep going until we've scavenged all the objects on the linked
1540 /* get the next static object from the list. Remember, there might
1541 * be more stuff on this list after each evacuation...
1542 * (static_objects is a global)
1544 p = gct->static_objects;
1545 if (p == END_OF_STATIC_LIST) {
1549 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1552 if (info->type==RBH)
1553 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1555 // make sure the info pointer is into text space
1557 /* Take this object *off* the static_objects list,
1558 * and put it on the scavenged_static_objects list.
1560 gct->static_objects = *STATIC_LINK(info,p);
1561 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1562 gct->scavenged_static_objects = p;
1564 switch (info -> type) {
1568 StgInd *ind = (StgInd *)p;
1569 evacuate(&ind->indirectee);
1571 /* might fail to evacuate it, in which case we have to pop it
1572 * back on the mutable list of the oldest generation. We
1573 * leave it *on* the scavenged_static_objects list, though,
1574 * in case we visit this object again.
1576 if (gct->failed_to_evac) {
1577 gct->failed_to_evac = rtsFalse;
1578 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1584 scavenge_thunk_srt(info);
1588 scavenge_fun_srt(info);
1595 next = (P_)p->payload + info->layout.payload.ptrs;
1596 // evacuate the pointers
1597 for (q = (P_)p->payload; q < next; q++) {
1598 evacuate((StgClosure **)q);
1604 barf("scavenge_static: strange closure %d", (int)(info->type));
1607 ASSERT(gct->failed_to_evac == rtsFalse);
1611 /* -----------------------------------------------------------------------------
1612 scavenge a chunk of memory described by a bitmap
1613 -------------------------------------------------------------------------- */
1616 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1622 bitmap = large_bitmap->bitmap[b];
1623 for (i = 0; i < size; ) {
1624 if ((bitmap & 1) == 0) {
1625 evacuate((StgClosure **)p);
1629 if (i % BITS_IN(W_) == 0) {
1631 bitmap = large_bitmap->bitmap[b];
1633 bitmap = bitmap >> 1;
1638 STATIC_INLINE StgPtr
1639 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1642 if ((bitmap & 1) == 0) {
1643 evacuate((StgClosure **)p);
1646 bitmap = bitmap >> 1;
1652 /* -----------------------------------------------------------------------------
1653 scavenge_stack walks over a section of stack and evacuates all the
1654 objects pointed to by it. We can use the same code for walking
1655 AP_STACK_UPDs, since these are just sections of copied stack.
1656 -------------------------------------------------------------------------- */
1659 scavenge_stack(StgPtr p, StgPtr stack_end)
1661 const StgRetInfoTable* info;
1666 * Each time around this loop, we are looking at a chunk of stack
1667 * that starts with an activation record.
1670 while (p < stack_end) {
1671 info = get_ret_itbl((StgClosure *)p);
1673 switch (info->i.type) {
1676 // In SMP, we can get update frames that point to indirections
1677 // when two threads evaluate the same thunk. We do attempt to
1678 // discover this situation in threadPaused(), but it's
1679 // possible that the following sequence occurs:
1688 // Now T is an indirection, and the update frame is already
1689 // marked on A's stack, so we won't traverse it again in
1690 // threadPaused(). We could traverse the whole stack again
1691 // before GC, but that seems like overkill.
1693 // Scavenging this update frame as normal would be disastrous;
1694 // the updatee would end up pointing to the value. So we turn
1695 // the indirection into an IND_PERM, so that evacuate will
1696 // copy the indirection into the old generation instead of
1699 // Note [upd-black-hole]
1700 // One slight hiccup is that the THUNK_SELECTOR machinery can
1701 // overwrite the updatee with an IND. In parallel GC, this
1702 // could even be happening concurrently, so we can't check for
1703 // the IND. Fortunately if we assume that blackholing is
1704 // happening (either lazy or eager), then we can be sure that
1705 // the updatee is never a THUNK_SELECTOR and we're ok.
1706 // NB. this is a new invariant: blackholing is not optional.
1709 const StgInfoTable *i;
1710 StgClosure *updatee;
1712 updatee = ((StgUpdateFrame *)p)->updatee;
1713 i = updatee->header.info;
1714 if (!IS_FORWARDING_PTR(i)) {
1715 type = get_itbl(updatee)->type;
1717 updatee->header.info = &stg_IND_PERM_info;
1718 } else if (type == IND_OLDGEN) {
1719 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1722 evacuate(&((StgUpdateFrame *)p)->updatee);
1723 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1724 p += sizeofW(StgUpdateFrame);
1728 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1729 case CATCH_STM_FRAME:
1730 case CATCH_RETRY_FRAME:
1731 case ATOMICALLY_FRAME:
1735 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1736 size = BITMAP_SIZE(info->i.layout.bitmap);
1737 // NOTE: the payload starts immediately after the info-ptr, we
1738 // don't have an StgHeader in the same sense as a heap closure.
1740 p = scavenge_small_bitmap(p, size, bitmap);
1744 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1752 evacuate((StgClosure **)p);
1755 size = BCO_BITMAP_SIZE(bco);
1756 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1761 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1766 size = GET_LARGE_BITMAP(&info->i)->size;
1768 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1770 // and don't forget to follow the SRT
1774 // Dynamic bitmap: the mask is stored on the stack, and
1775 // there are a number of non-pointers followed by a number
1776 // of pointers above the bitmapped area. (see StgMacros.h,
1781 dyn = ((StgRetDyn *)p)->liveness;
1783 // traverse the bitmap first
1784 bitmap = RET_DYN_LIVENESS(dyn);
1785 p = (P_)&((StgRetDyn *)p)->payload[0];
1786 size = RET_DYN_BITMAP_SIZE;
1787 p = scavenge_small_bitmap(p, size, bitmap);
1789 // skip over the non-ptr words
1790 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1792 // follow the ptr words
1793 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1794 evacuate((StgClosure **)p);
1802 StgRetFun *ret_fun = (StgRetFun *)p;
1803 StgFunInfoTable *fun_info;
1805 evacuate(&ret_fun->fun);
1806 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1807 p = scavenge_arg_block(fun_info, ret_fun->payload);
1812 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1817 /*-----------------------------------------------------------------------------
1818 scavenge the large object list.
1820 evac_step set by caller; similar games played with evac_step as with
1821 scavenge() - see comment at the top of scavenge(). Most large
1822 objects are (repeatedly) mutable, so most of the time evac_step will
1824 --------------------------------------------------------------------------- */
1827 scavenge_large (step_workspace *ws)
1832 gct->evac_step = ws->step;
1834 bd = ws->todo_large_objects;
1836 for (; bd != NULL; bd = ws->todo_large_objects) {
1838 // take this object *off* the large objects list and put it on
1839 // the scavenged large objects list. This is so that we can
1840 // treat new_large_objects as a stack and push new objects on
1841 // the front when evacuating.
1842 ws->todo_large_objects = bd->link;
1844 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1845 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1846 ws->step->n_scavenged_large_blocks += bd->blocks;
1847 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1850 if (scavenge_one(p)) {
1851 if (ws->step->gen_no > 0) {
1852 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1857 gct->scanned += closure_sizeW((StgClosure*)p);
1861 /* ----------------------------------------------------------------------------
1862 Look for work to do.
1864 We look for the oldest step that has either a todo block that can
1865 be scanned, or a block of work on the global queue that we can
1868 It is important to take work from the *oldest* generation that we
1869 has work available, because that minimizes the likelihood of
1870 evacuating objects into a young generation when they should have
1871 been eagerly promoted. This really does make a difference (the
1872 cacheprof benchmark is one that is affected).
1874 We also want to scan the todo block if possible before grabbing
1875 work from the global queue, the reason being that we don't want to
1876 steal work from the global queue and starve other threads if there
1877 is other work we can usefully be doing.
1878 ------------------------------------------------------------------------- */
1881 scavenge_find_work (void)
1885 rtsBool did_something, did_anything;
1888 gct->scav_find_work++;
1890 did_anything = rtsFalse;
1893 did_something = rtsFalse;
1894 for (s = total_steps-1; s >= 0; s--) {
1895 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1898 ws = &gct->steps[s];
1900 gct->scan_bd = NULL;
1902 // If we have a scan block with some work to do,
1903 // scavenge everything up to the free pointer.
1904 if (ws->todo_bd->u.scan < ws->todo_free)
1906 scavenge_block(ws->todo_bd);
1907 did_something = rtsTrue;
1911 // If we have any large objects to scavenge, do them now.
1912 if (ws->todo_large_objects) {
1914 did_something = rtsTrue;
1918 if ((bd = grab_todo_block(ws)) != NULL) {
1920 did_something = rtsTrue;
1925 if (did_something) {
1926 did_anything = rtsTrue;
1929 // only return when there is no more work to do
1931 return did_anything;
1934 /* ----------------------------------------------------------------------------
1935 Scavenge until we can't find anything more to scavenge.
1936 ------------------------------------------------------------------------- */
1944 work_to_do = rtsFalse;
1946 // scavenge static objects
1947 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1948 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1952 // scavenge objects in compacted generation
1953 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1954 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1955 scavenge_mark_stack();
1956 work_to_do = rtsTrue;
1959 // Order is important here: we want to deal in full blocks as
1960 // much as possible, so go for global work in preference to
1961 // local work. Only if all the global work has been exhausted
1962 // do we start scavenging the fragments of blocks in the local
1964 if (scavenge_find_work()) goto loop;
1966 if (work_to_do) goto loop;