1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
28 #include "Capability.h"
30 static void scavenge_stack (StgPtr p, StgPtr stack_end);
32 static void scavenge_large_bitmap (StgPtr p,
33 StgLargeBitmap *large_bitmap,
36 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
37 # define evacuate(a) evacuate1(a)
38 # define recordMutableGen_GC(a,b) recordMutableGen(a,b)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
41 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
44 /* -----------------------------------------------------------------------------
46 -------------------------------------------------------------------------- */
49 scavenge_TSO_link (StgTSO *tso)
51 // We don't always chase the link field: TSOs on the blackhole
52 // queue are not automatically alive, so the link field is a
53 // "weak" pointer in that case.
54 if (tso->why_blocked != BlockedOnBlackHole) {
55 evacuate((StgClosure **)&tso->_link);
60 scavengeTSO (StgTSO *tso)
64 if (tso->what_next == ThreadRelocated) {
65 // the only way this can happen is if the old TSO was on the
66 // mutable list. We might have other links to this defunct
67 // TSO, so we must update its link field.
68 evacuate((StgClosure**)&tso->_link);
72 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
74 saved_eager = gct->eager_promotion;
75 gct->eager_promotion = rtsFalse;
77 if ( tso->why_blocked == BlockedOnMVar
78 || tso->why_blocked == BlockedOnBlackHole
79 || tso->why_blocked == BlockedOnException
81 evacuate(&tso->block_info.closure);
83 evacuate((StgClosure **)&tso->blocked_exceptions);
85 // scavange current transaction record
86 evacuate((StgClosure **)&tso->trec);
88 // scavenge this thread's stack
89 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
91 if (gct->failed_to_evac) {
92 tso->flags |= TSO_DIRTY;
93 scavenge_TSO_link(tso);
95 tso->flags &= ~TSO_DIRTY;
96 scavenge_TSO_link(tso);
97 if (gct->failed_to_evac) {
98 tso->flags |= TSO_LINK_DIRTY;
100 tso->flags &= ~TSO_LINK_DIRTY;
104 gct->eager_promotion = saved_eager;
107 /* -----------------------------------------------------------------------------
108 Blocks of function args occur on the stack (at the top) and
110 -------------------------------------------------------------------------- */
113 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
120 switch (fun_info->f.fun_type) {
122 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
123 size = BITMAP_SIZE(fun_info->f.b.bitmap);
126 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
127 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
131 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
132 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
135 if ((bitmap & 1) == 0) {
136 evacuate((StgClosure **)p);
139 bitmap = bitmap >> 1;
147 STATIC_INLINE GNUC_ATTR_HOT StgPtr
148 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
152 StgFunInfoTable *fun_info;
154 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
155 ASSERT(fun_info->i.type != PAP);
158 switch (fun_info->f.fun_type) {
160 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
163 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
167 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
171 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
174 if ((bitmap & 1) == 0) {
175 evacuate((StgClosure **)p);
178 bitmap = bitmap >> 1;
186 STATIC_INLINE GNUC_ATTR_HOT StgPtr
187 scavenge_PAP (StgPAP *pap)
190 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
194 scavenge_AP (StgAP *ap)
197 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
200 /* -----------------------------------------------------------------------------
202 -------------------------------------------------------------------------- */
204 /* Similar to scavenge_large_bitmap(), but we don't write back the
205 * pointers we get back from evacuate().
208 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
215 bitmap = large_srt->l.bitmap[b];
216 size = (nat)large_srt->l.size;
217 p = (StgClosure **)large_srt->srt;
218 for (i = 0; i < size; ) {
219 if ((bitmap & 1) != 0) {
224 if (i % BITS_IN(W_) == 0) {
226 bitmap = large_srt->l.bitmap[b];
228 bitmap = bitmap >> 1;
233 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
234 * srt field in the info table. That's ok, because we'll
235 * never dereference it.
237 STATIC_INLINE GNUC_ATTR_HOT void
238 scavenge_srt (StgClosure **srt, nat srt_bitmap)
246 if (bitmap == (StgHalfWord)(-1)) {
247 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
251 while (bitmap != 0) {
252 if ((bitmap & 1) != 0) {
253 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
254 // Special-case to handle references to closures hiding out in DLLs, since
255 // double indirections required to get at those. The code generator knows
256 // which is which when generating the SRT, so it stores the (indirect)
257 // reference to the DLL closure in the table by first adding one to it.
258 // We check for this here, and undo the addition before evacuating it.
260 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
261 // closure that's fixed at link-time, and no extra magic is required.
262 if ( (unsigned long)(*srt) & 0x1 ) {
263 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
272 bitmap = bitmap >> 1;
277 STATIC_INLINE GNUC_ATTR_HOT void
278 scavenge_thunk_srt(const StgInfoTable *info)
280 StgThunkInfoTable *thunk_info;
282 if (!major_gc) return;
284 thunk_info = itbl_to_thunk_itbl(info);
285 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
288 STATIC_INLINE GNUC_ATTR_HOT void
289 scavenge_fun_srt(const StgInfoTable *info)
291 StgFunInfoTable *fun_info;
293 if (!major_gc) return;
295 fun_info = itbl_to_fun_itbl(info);
296 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
299 /* -----------------------------------------------------------------------------
300 Scavenge a block from the given scan pointer up to bd->free.
302 evac_step is set by the caller to be either zero (for a step in a
303 generation < N) or G where G is the generation of the step being
306 We sometimes temporarily change evac_step back to zero if we're
307 scavenging a mutable object where eager promotion isn't such a good
309 -------------------------------------------------------------------------- */
311 static GNUC_ATTR_HOT void
312 scavenge_block (bdescr *bd)
316 step *saved_evac_step;
317 rtsBool saved_eager_promotion;
320 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
321 bd->start, bd->gen_no, bd->step->no, bd->u.scan);
324 gct->evac_step = bd->step;
325 saved_evac_step = gct->evac_step;
326 saved_eager_promotion = gct->eager_promotion;
327 gct->failed_to_evac = rtsFalse;
329 ws = &gct->steps[bd->step->abs_no];
333 // we might be evacuating into the very object that we're
334 // scavenging, so we have to check the real bd->free pointer each
335 // time around the loop.
336 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
338 ASSERT(bd->link == NULL);
339 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
340 info = get_itbl((StgClosure *)p);
342 ASSERT(gct->thunk_selector_depth == 0);
345 switch (info->type) {
350 StgMVar *mvar = ((StgMVar *)p);
351 gct->eager_promotion = rtsFalse;
352 evacuate((StgClosure **)&mvar->head);
353 evacuate((StgClosure **)&mvar->tail);
354 evacuate((StgClosure **)&mvar->value);
355 gct->eager_promotion = saved_eager_promotion;
357 if (gct->failed_to_evac) {
358 mvar->header.info = &stg_MVAR_DIRTY_info;
360 mvar->header.info = &stg_MVAR_CLEAN_info;
362 p += sizeofW(StgMVar);
367 scavenge_fun_srt(info);
368 evacuate(&((StgClosure *)p)->payload[1]);
369 evacuate(&((StgClosure *)p)->payload[0]);
370 p += sizeofW(StgHeader) + 2;
374 scavenge_thunk_srt(info);
375 evacuate(&((StgThunk *)p)->payload[1]);
376 evacuate(&((StgThunk *)p)->payload[0]);
377 p += sizeofW(StgThunk) + 2;
381 evacuate(&((StgClosure *)p)->payload[1]);
382 evacuate(&((StgClosure *)p)->payload[0]);
383 p += sizeofW(StgHeader) + 2;
387 scavenge_thunk_srt(info);
388 evacuate(&((StgThunk *)p)->payload[0]);
389 p += sizeofW(StgThunk) + 1;
393 scavenge_fun_srt(info);
395 evacuate(&((StgClosure *)p)->payload[0]);
396 p += sizeofW(StgHeader) + 1;
400 scavenge_thunk_srt(info);
401 p += sizeofW(StgThunk) + 1;
405 scavenge_fun_srt(info);
407 p += sizeofW(StgHeader) + 1;
411 scavenge_thunk_srt(info);
412 p += sizeofW(StgThunk) + 2;
416 scavenge_fun_srt(info);
418 p += sizeofW(StgHeader) + 2;
422 scavenge_thunk_srt(info);
423 evacuate(&((StgThunk *)p)->payload[0]);
424 p += sizeofW(StgThunk) + 2;
428 scavenge_fun_srt(info);
430 evacuate(&((StgClosure *)p)->payload[0]);
431 p += sizeofW(StgHeader) + 2;
435 scavenge_fun_srt(info);
442 scavenge_thunk_srt(info);
443 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
444 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
445 evacuate((StgClosure **)p);
447 p += info->layout.payload.nptrs;
458 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
459 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
460 evacuate((StgClosure **)p);
462 p += info->layout.payload.nptrs;
467 StgBCO *bco = (StgBCO *)p;
468 evacuate((StgClosure **)&bco->instrs);
469 evacuate((StgClosure **)&bco->literals);
470 evacuate((StgClosure **)&bco->ptrs);
476 if (bd->gen_no != 0) {
479 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
480 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
481 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
484 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
486 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
488 // We pretend that p has just been created.
489 LDV_RECORD_CREATE((StgClosure *)p);
492 case IND_OLDGEN_PERM:
493 evacuate(&((StgInd *)p)->indirectee);
494 p += sizeofW(StgInd);
499 gct->eager_promotion = rtsFalse;
500 evacuate(&((StgMutVar *)p)->var);
501 gct->eager_promotion = saved_eager_promotion;
503 if (gct->failed_to_evac) {
504 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
506 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
508 p += sizeofW(StgMutVar);
513 p += BLACKHOLE_sizeW();
518 StgSelector *s = (StgSelector *)p;
519 evacuate(&s->selectee);
520 p += THUNK_SELECTOR_sizeW();
524 // A chunk of stack saved in a heap object
527 StgAP_STACK *ap = (StgAP_STACK *)p;
530 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
531 p = (StgPtr)ap->payload + ap->size;
536 p = scavenge_PAP((StgPAP *)p);
540 p = scavenge_AP((StgAP *)p);
545 p += arr_words_sizeW((StgArrWords *)p);
548 case MUT_ARR_PTRS_CLEAN:
549 case MUT_ARR_PTRS_DIRTY:
554 // We don't eagerly promote objects pointed to by a mutable
555 // array, but if we find the array only points to objects in
556 // the same or an older generation, we mark it "clean" and
557 // avoid traversing it during minor GCs.
558 gct->eager_promotion = rtsFalse;
559 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
560 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
561 evacuate((StgClosure **)p);
563 gct->eager_promotion = saved_eager_promotion;
565 if (gct->failed_to_evac) {
566 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
568 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
571 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
575 case MUT_ARR_PTRS_FROZEN:
576 case MUT_ARR_PTRS_FROZEN0:
581 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
582 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
583 evacuate((StgClosure **)p);
586 // If we're going to put this object on the mutable list, then
587 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
588 if (gct->failed_to_evac) {
589 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
591 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
598 StgTSO *tso = (StgTSO *)p;
604 case TVAR_WATCH_QUEUE:
606 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
608 evacuate((StgClosure **)&wq->closure);
609 evacuate((StgClosure **)&wq->next_queue_entry);
610 evacuate((StgClosure **)&wq->prev_queue_entry);
611 gct->evac_step = saved_evac_step;
612 gct->failed_to_evac = rtsTrue; // mutable
613 p += sizeofW(StgTVarWatchQueue);
619 StgTVar *tvar = ((StgTVar *) p);
621 evacuate((StgClosure **)&tvar->current_value);
622 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
623 gct->evac_step = saved_evac_step;
624 gct->failed_to_evac = rtsTrue; // mutable
625 p += sizeofW(StgTVar);
631 StgTRecHeader *trec = ((StgTRecHeader *) p);
633 evacuate((StgClosure **)&trec->enclosing_trec);
634 evacuate((StgClosure **)&trec->current_chunk);
635 evacuate((StgClosure **)&trec->invariants_to_check);
636 gct->evac_step = saved_evac_step;
637 gct->failed_to_evac = rtsTrue; // mutable
638 p += sizeofW(StgTRecHeader);
645 StgTRecChunk *tc = ((StgTRecChunk *) p);
646 TRecEntry *e = &(tc -> entries[0]);
648 evacuate((StgClosure **)&tc->prev_chunk);
649 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
650 evacuate((StgClosure **)&e->tvar);
651 evacuate((StgClosure **)&e->expected_value);
652 evacuate((StgClosure **)&e->new_value);
654 gct->evac_step = saved_evac_step;
655 gct->failed_to_evac = rtsTrue; // mutable
656 p += sizeofW(StgTRecChunk);
660 case ATOMIC_INVARIANT:
662 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
664 evacuate(&invariant->code);
665 evacuate((StgClosure **)&invariant->last_execution);
666 gct->evac_step = saved_evac_step;
667 gct->failed_to_evac = rtsTrue; // mutable
668 p += sizeofW(StgAtomicInvariant);
672 case INVARIANT_CHECK_QUEUE:
674 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
676 evacuate((StgClosure **)&queue->invariant);
677 evacuate((StgClosure **)&queue->my_execution);
678 evacuate((StgClosure **)&queue->next_queue_entry);
679 gct->evac_step = saved_evac_step;
680 gct->failed_to_evac = rtsTrue; // mutable
681 p += sizeofW(StgInvariantCheckQueue);
686 barf("scavenge: unimplemented/strange closure type %d @ %p",
691 * We need to record the current object on the mutable list if
692 * (a) It is actually mutable, or
693 * (b) It contains pointers to a younger generation.
694 * Case (b) arises if we didn't manage to promote everything that
695 * the current object points to into the current generation.
697 if (gct->failed_to_evac) {
698 gct->failed_to_evac = rtsFalse;
699 if (bd->gen_no > 0) {
700 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
706 gct->copied += ws->todo_free - bd->free;
710 debugTrace(DEBUG_gc, " scavenged %ld bytes",
711 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
713 // update stats: this is a block that has been scavenged
714 gct->scanned += bd->free - bd->u.scan;
715 bd->u.scan = bd->free;
717 if (bd != ws->todo_bd) {
718 // we're not going to evac any more objects into
719 // this block, so push it now.
720 push_scanned_block(bd, ws);
725 /* -----------------------------------------------------------------------------
726 Scavenge everything on the mark stack.
728 This is slightly different from scavenge():
729 - we don't walk linearly through the objects, so the scavenger
730 doesn't need to advance the pointer on to the next object.
731 -------------------------------------------------------------------------- */
734 scavenge_mark_stack(void)
738 step *saved_evac_step;
740 gct->evac_step = &oldest_gen->steps[0];
741 saved_evac_step = gct->evac_step;
744 while (!mark_stack_empty()) {
745 p = pop_mark_stack();
747 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
748 info = get_itbl((StgClosure *)p);
751 switch (info->type) {
756 rtsBool saved_eager_promotion = gct->eager_promotion;
758 StgMVar *mvar = ((StgMVar *)p);
759 gct->eager_promotion = rtsFalse;
760 evacuate((StgClosure **)&mvar->head);
761 evacuate((StgClosure **)&mvar->tail);
762 evacuate((StgClosure **)&mvar->value);
763 gct->eager_promotion = saved_eager_promotion;
765 if (gct->failed_to_evac) {
766 mvar->header.info = &stg_MVAR_DIRTY_info;
768 mvar->header.info = &stg_MVAR_CLEAN_info;
774 scavenge_fun_srt(info);
775 evacuate(&((StgClosure *)p)->payload[1]);
776 evacuate(&((StgClosure *)p)->payload[0]);
780 scavenge_thunk_srt(info);
781 evacuate(&((StgThunk *)p)->payload[1]);
782 evacuate(&((StgThunk *)p)->payload[0]);
786 evacuate(&((StgClosure *)p)->payload[1]);
787 evacuate(&((StgClosure *)p)->payload[0]);
792 scavenge_fun_srt(info);
793 evacuate(&((StgClosure *)p)->payload[0]);
798 scavenge_thunk_srt(info);
799 evacuate(&((StgThunk *)p)->payload[0]);
804 evacuate(&((StgClosure *)p)->payload[0]);
809 scavenge_fun_srt(info);
814 scavenge_thunk_srt(info);
822 scavenge_fun_srt(info);
829 scavenge_thunk_srt(info);
830 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
831 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
832 evacuate((StgClosure **)p);
844 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
845 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
846 evacuate((StgClosure **)p);
852 StgBCO *bco = (StgBCO *)p;
853 evacuate((StgClosure **)&bco->instrs);
854 evacuate((StgClosure **)&bco->literals);
855 evacuate((StgClosure **)&bco->ptrs);
860 // don't need to do anything here: the only possible case
861 // is that we're in a 1-space compacting collector, with
862 // no "old" generation.
866 case IND_OLDGEN_PERM:
867 evacuate(&((StgInd *)p)->indirectee);
871 case MUT_VAR_DIRTY: {
872 rtsBool saved_eager_promotion = gct->eager_promotion;
874 gct->eager_promotion = rtsFalse;
875 evacuate(&((StgMutVar *)p)->var);
876 gct->eager_promotion = saved_eager_promotion;
878 if (gct->failed_to_evac) {
879 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
881 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
893 StgSelector *s = (StgSelector *)p;
894 evacuate(&s->selectee);
898 // A chunk of stack saved in a heap object
901 StgAP_STACK *ap = (StgAP_STACK *)p;
904 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
909 scavenge_PAP((StgPAP *)p);
913 scavenge_AP((StgAP *)p);
916 case MUT_ARR_PTRS_CLEAN:
917 case MUT_ARR_PTRS_DIRTY:
923 // We don't eagerly promote objects pointed to by a mutable
924 // array, but if we find the array only points to objects in
925 // the same or an older generation, we mark it "clean" and
926 // avoid traversing it during minor GCs.
927 saved_eager = gct->eager_promotion;
928 gct->eager_promotion = rtsFalse;
929 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
930 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
931 evacuate((StgClosure **)p);
933 gct->eager_promotion = saved_eager;
935 if (gct->failed_to_evac) {
936 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
938 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
941 gct->failed_to_evac = rtsTrue; // mutable anyhow.
945 case MUT_ARR_PTRS_FROZEN:
946 case MUT_ARR_PTRS_FROZEN0:
951 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
952 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
953 evacuate((StgClosure **)p);
956 // If we're going to put this object on the mutable list, then
957 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
958 if (gct->failed_to_evac) {
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
961 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
968 scavengeTSO((StgTSO*)p);
972 case TVAR_WATCH_QUEUE:
974 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
976 evacuate((StgClosure **)&wq->closure);
977 evacuate((StgClosure **)&wq->next_queue_entry);
978 evacuate((StgClosure **)&wq->prev_queue_entry);
979 gct->evac_step = saved_evac_step;
980 gct->failed_to_evac = rtsTrue; // mutable
986 StgTVar *tvar = ((StgTVar *) p);
988 evacuate((StgClosure **)&tvar->current_value);
989 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
990 gct->evac_step = saved_evac_step;
991 gct->failed_to_evac = rtsTrue; // mutable
998 StgTRecChunk *tc = ((StgTRecChunk *) p);
999 TRecEntry *e = &(tc -> entries[0]);
1001 evacuate((StgClosure **)&tc->prev_chunk);
1002 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1003 evacuate((StgClosure **)&e->tvar);
1004 evacuate((StgClosure **)&e->expected_value);
1005 evacuate((StgClosure **)&e->new_value);
1007 gct->evac_step = saved_evac_step;
1008 gct->failed_to_evac = rtsTrue; // mutable
1014 StgTRecHeader *trec = ((StgTRecHeader *) p);
1016 evacuate((StgClosure **)&trec->enclosing_trec);
1017 evacuate((StgClosure **)&trec->current_chunk);
1018 evacuate((StgClosure **)&trec->invariants_to_check);
1019 gct->evac_step = saved_evac_step;
1020 gct->failed_to_evac = rtsTrue; // mutable
1024 case ATOMIC_INVARIANT:
1026 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1028 evacuate(&invariant->code);
1029 evacuate((StgClosure **)&invariant->last_execution);
1030 gct->evac_step = saved_evac_step;
1031 gct->failed_to_evac = rtsTrue; // mutable
1035 case INVARIANT_CHECK_QUEUE:
1037 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1039 evacuate((StgClosure **)&queue->invariant);
1040 evacuate((StgClosure **)&queue->my_execution);
1041 evacuate((StgClosure **)&queue->next_queue_entry);
1042 gct->evac_step = saved_evac_step;
1043 gct->failed_to_evac = rtsTrue; // mutable
1048 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1052 if (gct->failed_to_evac) {
1053 gct->failed_to_evac = rtsFalse;
1054 if (gct->evac_step) {
1055 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
1059 // mark the next bit to indicate "scavenged"
1060 mark(q+1, Bdescr(q));
1062 } // while (!mark_stack_empty())
1064 // start a new linear scan if the mark stack overflowed at some point
1065 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1066 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1067 mark_stack_overflowed = rtsFalse;
1068 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1069 oldgen_scan = oldgen_scan_bd->start;
1072 if (oldgen_scan_bd) {
1073 // push a new thing on the mark stack
1075 // find a closure that is marked but not scavenged, and start
1077 while (oldgen_scan < oldgen_scan_bd->free
1078 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1082 if (oldgen_scan < oldgen_scan_bd->free) {
1084 // already scavenged?
1085 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1086 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1089 push_mark_stack(oldgen_scan);
1090 // ToDo: bump the linear scan by the actual size of the object
1091 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1095 oldgen_scan_bd = oldgen_scan_bd->link;
1096 if (oldgen_scan_bd != NULL) {
1097 oldgen_scan = oldgen_scan_bd->start;
1103 /* -----------------------------------------------------------------------------
1104 Scavenge one object.
1106 This is used for objects that are temporarily marked as mutable
1107 because they contain old-to-new generation pointers. Only certain
1108 objects can have this property.
1109 -------------------------------------------------------------------------- */
1112 scavenge_one(StgPtr p)
1114 const StgInfoTable *info;
1115 step *saved_evac_step = gct->evac_step;
1118 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1119 info = get_itbl((StgClosure *)p);
1121 switch (info->type) {
1126 rtsBool saved_eager_promotion = gct->eager_promotion;
1128 StgMVar *mvar = ((StgMVar *)p);
1129 gct->eager_promotion = rtsFalse;
1130 evacuate((StgClosure **)&mvar->head);
1131 evacuate((StgClosure **)&mvar->tail);
1132 evacuate((StgClosure **)&mvar->value);
1133 gct->eager_promotion = saved_eager_promotion;
1135 if (gct->failed_to_evac) {
1136 mvar->header.info = &stg_MVAR_DIRTY_info;
1138 mvar->header.info = &stg_MVAR_CLEAN_info;
1152 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1153 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1154 evacuate((StgClosure **)q);
1160 case FUN_1_0: // hardly worth specialising these guys
1176 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1177 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1178 evacuate((StgClosure **)q);
1184 case MUT_VAR_DIRTY: {
1186 rtsBool saved_eager_promotion = gct->eager_promotion;
1188 gct->eager_promotion = rtsFalse;
1189 evacuate(&((StgMutVar *)p)->var);
1190 gct->eager_promotion = saved_eager_promotion;
1192 if (gct->failed_to_evac) {
1193 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1195 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1204 case THUNK_SELECTOR:
1206 StgSelector *s = (StgSelector *)p;
1207 evacuate(&s->selectee);
1213 StgAP_STACK *ap = (StgAP_STACK *)p;
1216 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1217 p = (StgPtr)ap->payload + ap->size;
1222 p = scavenge_PAP((StgPAP *)p);
1226 p = scavenge_AP((StgAP *)p);
1230 // nothing to follow
1233 case MUT_ARR_PTRS_CLEAN:
1234 case MUT_ARR_PTRS_DIRTY:
1237 rtsBool saved_eager;
1239 // We don't eagerly promote objects pointed to by a mutable
1240 // array, but if we find the array only points to objects in
1241 // the same or an older generation, we mark it "clean" and
1242 // avoid traversing it during minor GCs.
1243 saved_eager = gct->eager_promotion;
1244 gct->eager_promotion = rtsFalse;
1246 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1247 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1248 evacuate((StgClosure **)p);
1250 gct->eager_promotion = saved_eager;
1252 if (gct->failed_to_evac) {
1253 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1255 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1258 gct->failed_to_evac = rtsTrue;
1262 case MUT_ARR_PTRS_FROZEN:
1263 case MUT_ARR_PTRS_FROZEN0:
1265 // follow everything
1268 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1269 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1270 evacuate((StgClosure **)p);
1273 // If we're going to put this object on the mutable list, then
1274 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1275 if (gct->failed_to_evac) {
1276 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1278 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1285 scavengeTSO((StgTSO*)p);
1289 case TVAR_WATCH_QUEUE:
1291 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1293 evacuate((StgClosure **)&wq->closure);
1294 evacuate((StgClosure **)&wq->next_queue_entry);
1295 evacuate((StgClosure **)&wq->prev_queue_entry);
1296 gct->evac_step = saved_evac_step;
1297 gct->failed_to_evac = rtsTrue; // mutable
1303 StgTVar *tvar = ((StgTVar *) p);
1305 evacuate((StgClosure **)&tvar->current_value);
1306 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1307 gct->evac_step = saved_evac_step;
1308 gct->failed_to_evac = rtsTrue; // mutable
1314 StgTRecHeader *trec = ((StgTRecHeader *) p);
1316 evacuate((StgClosure **)&trec->enclosing_trec);
1317 evacuate((StgClosure **)&trec->current_chunk);
1318 evacuate((StgClosure **)&trec->invariants_to_check);
1319 gct->evac_step = saved_evac_step;
1320 gct->failed_to_evac = rtsTrue; // mutable
1327 StgTRecChunk *tc = ((StgTRecChunk *) p);
1328 TRecEntry *e = &(tc -> entries[0]);
1330 evacuate((StgClosure **)&tc->prev_chunk);
1331 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1332 evacuate((StgClosure **)&e->tvar);
1333 evacuate((StgClosure **)&e->expected_value);
1334 evacuate((StgClosure **)&e->new_value);
1336 gct->evac_step = saved_evac_step;
1337 gct->failed_to_evac = rtsTrue; // mutable
1341 case ATOMIC_INVARIANT:
1343 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1345 evacuate(&invariant->code);
1346 evacuate((StgClosure **)&invariant->last_execution);
1347 gct->evac_step = saved_evac_step;
1348 gct->failed_to_evac = rtsTrue; // mutable
1352 case INVARIANT_CHECK_QUEUE:
1354 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1356 evacuate((StgClosure **)&queue->invariant);
1357 evacuate((StgClosure **)&queue->my_execution);
1358 evacuate((StgClosure **)&queue->next_queue_entry);
1359 gct->evac_step = saved_evac_step;
1360 gct->failed_to_evac = rtsTrue; // mutable
1365 case IND_OLDGEN_PERM:
1368 /* Careful here: a THUNK can be on the mutable list because
1369 * it contains pointers to young gen objects. If such a thunk
1370 * is updated, the IND_OLDGEN will be added to the mutable
1371 * list again, and we'll scavenge it twice. evacuate()
1372 * doesn't check whether the object has already been
1373 * evacuated, so we perform that check here.
1375 StgClosure *q = ((StgInd *)p)->indirectee;
1376 if (HEAP_ALLOCED_GC(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1379 evacuate(&((StgInd *)p)->indirectee);
1382 #if 0 && defined(DEBUG)
1383 if (RtsFlags.DebugFlags.gc)
1384 /* Debugging code to print out the size of the thing we just
1388 StgPtr start = gen->steps[0].scan;
1389 bdescr *start_bd = gen->steps[0].scan_bd;
1391 scavenge(&gen->steps[0]);
1392 if (start_bd != gen->steps[0].scan_bd) {
1393 size += (P_)BLOCK_ROUND_UP(start) - start;
1394 start_bd = start_bd->link;
1395 while (start_bd != gen->steps[0].scan_bd) {
1396 size += BLOCK_SIZE_W;
1397 start_bd = start_bd->link;
1399 size += gen->steps[0].scan -
1400 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1402 size = gen->steps[0].scan - start;
1404 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1410 barf("scavenge_one: strange object %d", (int)(info->type));
1413 no_luck = gct->failed_to_evac;
1414 gct->failed_to_evac = rtsFalse;
1418 /* -----------------------------------------------------------------------------
1419 Scavenging mutable lists.
1421 We treat the mutable list of each generation > N (i.e. all the
1422 generations older than the one being collected) as roots. We also
1423 remove non-mutable objects from the mutable list at this point.
1424 -------------------------------------------------------------------------- */
1427 scavenge_mutable_list(bdescr *bd, generation *gen)
1431 gct->evac_step = &gen->steps[0];
1432 for (; bd != NULL; bd = bd->link) {
1433 for (q = bd->start; q < bd->free; q++) {
1435 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1438 switch (get_itbl((StgClosure *)p)->type) {
1440 barf("MUT_VAR_CLEAN on mutable list");
1442 mutlist_MUTVARS++; break;
1443 case MUT_ARR_PTRS_CLEAN:
1444 case MUT_ARR_PTRS_DIRTY:
1445 case MUT_ARR_PTRS_FROZEN:
1446 case MUT_ARR_PTRS_FROZEN0:
1447 mutlist_MUTARRS++; break;
1449 barf("MVAR_CLEAN on mutable list");
1451 mutlist_MVARS++; break;
1453 mutlist_OTHERS++; break;
1457 // Check whether this object is "clean", that is it
1458 // definitely doesn't point into a young generation.
1459 // Clean objects don't need to be scavenged. Some clean
1460 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1461 // list at all; others, such as MUT_ARR_PTRS_CLEAN
1462 // are always on the mutable list.
1464 switch (get_itbl((StgClosure *)p)->type) {
1465 case MUT_ARR_PTRS_CLEAN:
1466 recordMutableGen_GC((StgClosure *)p,gen->no);
1469 StgTSO *tso = (StgTSO *)p;
1470 if ((tso->flags & TSO_DIRTY) == 0) {
1471 // Must be on the mutable list because its link
1473 ASSERT(tso->flags & TSO_LINK_DIRTY);
1475 scavenge_TSO_link(tso);
1476 if (gct->failed_to_evac) {
1477 recordMutableGen_GC((StgClosure *)p,gen->no);
1478 gct->failed_to_evac = rtsFalse;
1480 tso->flags &= ~TSO_LINK_DIRTY;
1489 if (scavenge_one(p)) {
1490 // didn't manage to promote everything, so put the
1491 // object back on the list.
1492 recordMutableGen_GC((StgClosure *)p,gen->no);
1499 scavenge_capability_mut_lists (Capability *cap)
1503 /* Mutable lists from each generation > N
1504 * we want to *scavenge* these roots, not evacuate them: they're not
1505 * going to move in this GC.
1506 * Also do them in reverse generation order, for the usual reason:
1507 * namely to reduce the likelihood of spurious old->new pointers.
1509 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1510 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1511 freeChain_sync(cap->saved_mut_lists[g]);
1512 cap->saved_mut_lists[g] = NULL;
1516 /* -----------------------------------------------------------------------------
1517 Scavenging the static objects.
1519 We treat the mutable list of each generation > N (i.e. all the
1520 generations older than the one being collected) as roots. We also
1521 remove non-mutable objects from the mutable list at this point.
1522 -------------------------------------------------------------------------- */
1525 scavenge_static(void)
1528 const StgInfoTable *info;
1530 debugTrace(DEBUG_gc, "scavenging static objects");
1532 /* Always evacuate straight to the oldest generation for static
1534 gct->evac_step = &oldest_gen->steps[0];
1536 /* keep going until we've scavenged all the objects on the linked
1541 /* get the next static object from the list. Remember, there might
1542 * be more stuff on this list after each evacuation...
1543 * (static_objects is a global)
1545 p = gct->static_objects;
1546 if (p == END_OF_STATIC_LIST) {
1550 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1553 if (info->type==RBH)
1554 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1556 // make sure the info pointer is into text space
1558 /* Take this object *off* the static_objects list,
1559 * and put it on the scavenged_static_objects list.
1561 gct->static_objects = *STATIC_LINK(info,p);
1562 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1563 gct->scavenged_static_objects = p;
1565 switch (info -> type) {
1569 StgInd *ind = (StgInd *)p;
1570 evacuate(&ind->indirectee);
1572 /* might fail to evacuate it, in which case we have to pop it
1573 * back on the mutable list of the oldest generation. We
1574 * leave it *on* the scavenged_static_objects list, though,
1575 * in case we visit this object again.
1577 if (gct->failed_to_evac) {
1578 gct->failed_to_evac = rtsFalse;
1579 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1585 scavenge_thunk_srt(info);
1589 scavenge_fun_srt(info);
1596 next = (P_)p->payload + info->layout.payload.ptrs;
1597 // evacuate the pointers
1598 for (q = (P_)p->payload; q < next; q++) {
1599 evacuate((StgClosure **)q);
1605 barf("scavenge_static: strange closure %d", (int)(info->type));
1608 ASSERT(gct->failed_to_evac == rtsFalse);
1612 /* -----------------------------------------------------------------------------
1613 scavenge a chunk of memory described by a bitmap
1614 -------------------------------------------------------------------------- */
1617 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1623 bitmap = large_bitmap->bitmap[b];
1624 for (i = 0; i < size; ) {
1625 if ((bitmap & 1) == 0) {
1626 evacuate((StgClosure **)p);
1630 if (i % BITS_IN(W_) == 0) {
1632 bitmap = large_bitmap->bitmap[b];
1634 bitmap = bitmap >> 1;
1639 STATIC_INLINE StgPtr
1640 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1643 if ((bitmap & 1) == 0) {
1644 evacuate((StgClosure **)p);
1647 bitmap = bitmap >> 1;
1653 /* -----------------------------------------------------------------------------
1654 scavenge_stack walks over a section of stack and evacuates all the
1655 objects pointed to by it. We can use the same code for walking
1656 AP_STACK_UPDs, since these are just sections of copied stack.
1657 -------------------------------------------------------------------------- */
1660 scavenge_stack(StgPtr p, StgPtr stack_end)
1662 const StgRetInfoTable* info;
1667 * Each time around this loop, we are looking at a chunk of stack
1668 * that starts with an activation record.
1671 while (p < stack_end) {
1672 info = get_ret_itbl((StgClosure *)p);
1674 switch (info->i.type) {
1677 // In SMP, we can get update frames that point to indirections
1678 // when two threads evaluate the same thunk. We do attempt to
1679 // discover this situation in threadPaused(), but it's
1680 // possible that the following sequence occurs:
1689 // Now T is an indirection, and the update frame is already
1690 // marked on A's stack, so we won't traverse it again in
1691 // threadPaused(). We could traverse the whole stack again
1692 // before GC, but that seems like overkill.
1694 // Scavenging this update frame as normal would be disastrous;
1695 // the updatee would end up pointing to the value. So we turn
1696 // the indirection into an IND_PERM, so that evacuate will
1697 // copy the indirection into the old generation instead of
1700 // Note [upd-black-hole]
1701 // One slight hiccup is that the THUNK_SELECTOR machinery can
1702 // overwrite the updatee with an IND. In parallel GC, this
1703 // could even be happening concurrently, so we can't check for
1704 // the IND. Fortunately if we assume that blackholing is
1705 // happening (either lazy or eager), then we can be sure that
1706 // the updatee is never a THUNK_SELECTOR and we're ok.
1707 // NB. this is a new invariant: blackholing is not optional.
1710 const StgInfoTable *i;
1711 StgClosure *updatee;
1713 updatee = ((StgUpdateFrame *)p)->updatee;
1714 i = updatee->header.info;
1715 if (!IS_FORWARDING_PTR(i)) {
1716 type = get_itbl(updatee)->type;
1718 updatee->header.info = &stg_IND_PERM_info;
1719 } else if (type == IND_OLDGEN) {
1720 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1723 evacuate(&((StgUpdateFrame *)p)->updatee);
1724 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1725 p += sizeofW(StgUpdateFrame);
1729 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1730 case CATCH_STM_FRAME:
1731 case CATCH_RETRY_FRAME:
1732 case ATOMICALLY_FRAME:
1736 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1737 size = BITMAP_SIZE(info->i.layout.bitmap);
1738 // NOTE: the payload starts immediately after the info-ptr, we
1739 // don't have an StgHeader in the same sense as a heap closure.
1741 p = scavenge_small_bitmap(p, size, bitmap);
1745 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1753 evacuate((StgClosure **)p);
1756 size = BCO_BITMAP_SIZE(bco);
1757 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1762 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1767 size = GET_LARGE_BITMAP(&info->i)->size;
1769 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1771 // and don't forget to follow the SRT
1775 // Dynamic bitmap: the mask is stored on the stack, and
1776 // there are a number of non-pointers followed by a number
1777 // of pointers above the bitmapped area. (see StgMacros.h,
1782 dyn = ((StgRetDyn *)p)->liveness;
1784 // traverse the bitmap first
1785 bitmap = RET_DYN_LIVENESS(dyn);
1786 p = (P_)&((StgRetDyn *)p)->payload[0];
1787 size = RET_DYN_BITMAP_SIZE;
1788 p = scavenge_small_bitmap(p, size, bitmap);
1790 // skip over the non-ptr words
1791 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1793 // follow the ptr words
1794 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1795 evacuate((StgClosure **)p);
1803 StgRetFun *ret_fun = (StgRetFun *)p;
1804 StgFunInfoTable *fun_info;
1806 evacuate(&ret_fun->fun);
1807 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1808 p = scavenge_arg_block(fun_info, ret_fun->payload);
1813 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1818 /*-----------------------------------------------------------------------------
1819 scavenge the large object list.
1821 evac_step set by caller; similar games played with evac_step as with
1822 scavenge() - see comment at the top of scavenge(). Most large
1823 objects are (repeatedly) mutable, so most of the time evac_step will
1825 --------------------------------------------------------------------------- */
1828 scavenge_large (step_workspace *ws)
1833 gct->evac_step = ws->step;
1835 bd = ws->todo_large_objects;
1837 for (; bd != NULL; bd = ws->todo_large_objects) {
1839 // take this object *off* the large objects list and put it on
1840 // the scavenged large objects list. This is so that we can
1841 // treat new_large_objects as a stack and push new objects on
1842 // the front when evacuating.
1843 ws->todo_large_objects = bd->link;
1845 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1846 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1847 ws->step->n_scavenged_large_blocks += bd->blocks;
1848 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1851 if (scavenge_one(p)) {
1852 if (ws->step->gen_no > 0) {
1853 recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
1858 gct->scanned += closure_sizeW((StgClosure*)p);
1862 /* ----------------------------------------------------------------------------
1863 Look for work to do.
1865 We look for the oldest step that has either a todo block that can
1866 be scanned, or a block of work on the global queue that we can
1869 It is important to take work from the *oldest* generation that we
1870 has work available, because that minimizes the likelihood of
1871 evacuating objects into a young generation when they should have
1872 been eagerly promoted. This really does make a difference (the
1873 cacheprof benchmark is one that is affected).
1875 We also want to scan the todo block if possible before grabbing
1876 work from the global queue, the reason being that we don't want to
1877 steal work from the global queue and starve other threads if there
1878 is other work we can usefully be doing.
1879 ------------------------------------------------------------------------- */
1882 scavenge_find_work (void)
1886 rtsBool did_something, did_anything;
1889 gct->scav_find_work++;
1891 did_anything = rtsFalse;
1894 did_something = rtsFalse;
1895 for (s = total_steps-1; s >= 0; s--) {
1896 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1899 ws = &gct->steps[s];
1901 gct->scan_bd = NULL;
1903 // If we have a scan block with some work to do,
1904 // scavenge everything up to the free pointer.
1905 if (ws->todo_bd->u.scan < ws->todo_free)
1907 scavenge_block(ws->todo_bd);
1908 did_something = rtsTrue;
1912 // If we have any large objects to scavenge, do them now.
1913 if (ws->todo_large_objects) {
1915 did_something = rtsTrue;
1919 if ((bd = grab_local_todo_block(ws)) != NULL) {
1921 did_something = rtsTrue;
1926 if (did_something) {
1927 did_anything = rtsTrue;
1931 #if defined(THREADED_RTS)
1932 if (work_stealing) {
1933 // look for work to steal
1934 for (s = total_steps-1; s >= 0; s--) {
1935 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1938 if ((bd = steal_todo_block(s)) != NULL) {
1940 did_something = rtsTrue;
1945 if (did_something) {
1946 did_anything = rtsTrue;
1952 // only return when there is no more work to do
1954 return did_anything;
1957 /* ----------------------------------------------------------------------------
1958 Scavenge until we can't find anything more to scavenge.
1959 ------------------------------------------------------------------------- */
1967 work_to_do = rtsFalse;
1969 // scavenge static objects
1970 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1971 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1975 // scavenge objects in compacted generation
1976 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1977 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1978 scavenge_mark_stack();
1979 work_to_do = rtsTrue;
1982 // Order is important here: we want to deal in full blocks as
1983 // much as possible, so go for global work in preference to
1984 // local work. Only if all the global work has been exhausted
1985 // do we start scavenging the fragments of blocks in the local
1987 if (scavenge_find_work()) goto loop;
1989 if (work_to_do) goto loop;