1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
23 #include "LdvProfile.h"
25 static void scavenge_stack (StgPtr p, StgPtr stack_end);
27 static void scavenge_large_bitmap (StgPtr p,
28 StgLargeBitmap *large_bitmap,
31 /* Similar to scavenge_large_bitmap(), but we don't write back the
32 * pointers we get back from evacuate().
35 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
42 bitmap = large_srt->l.bitmap[b];
43 size = (nat)large_srt->l.size;
44 p = (StgClosure **)large_srt->srt;
45 for (i = 0; i < size; ) {
46 if ((bitmap & 1) != 0) {
51 if (i % BITS_IN(W_) == 0) {
53 bitmap = large_srt->l.bitmap[b];
60 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
61 * srt field in the info table. That's ok, because we'll
62 * never dereference it.
65 scavenge_srt (StgClosure **srt, nat srt_bitmap)
73 if (bitmap == (StgHalfWord)(-1)) {
74 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
79 if ((bitmap & 1) != 0) {
80 #ifdef ENABLE_WIN32_DLL_SUPPORT
81 // Special-case to handle references to closures hiding out in DLLs, since
82 // double indirections required to get at those. The code generator knows
83 // which is which when generating the SRT, so it stores the (indirect)
84 // reference to the DLL closure in the table by first adding one to it.
85 // We check for this here, and undo the addition before evacuating it.
87 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
88 // closure that's fixed at link-time, and no extra magic is required.
89 if ( (unsigned long)(*srt) & 0x1 ) {
90 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 scavenge_thunk_srt(const StgInfoTable *info)
107 StgThunkInfoTable *thunk_info;
109 if (!major_gc) return;
111 thunk_info = itbl_to_thunk_itbl(info);
112 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
116 scavenge_fun_srt(const StgInfoTable *info)
118 StgFunInfoTable *fun_info;
120 if (!major_gc) return;
122 fun_info = itbl_to_fun_itbl(info);
123 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
126 /* -----------------------------------------------------------------------------
128 -------------------------------------------------------------------------- */
131 scavengeTSO (StgTSO *tso)
133 if ( tso->why_blocked == BlockedOnMVar
134 || tso->why_blocked == BlockedOnBlackHole
135 || tso->why_blocked == BlockedOnException
137 tso->block_info.closure = evacuate(tso->block_info.closure);
139 tso->blocked_exceptions =
140 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
142 // We don't always chase the link field: TSOs on the blackhole
143 // queue are not automatically alive, so the link field is a
144 // "weak" pointer in that case.
145 if (tso->why_blocked != BlockedOnBlackHole) {
146 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
149 // scavange current transaction record
150 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
152 // scavenge this thread's stack
153 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
156 /* -----------------------------------------------------------------------------
157 Blocks of function args occur on the stack (at the top) and
159 -------------------------------------------------------------------------- */
162 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
169 switch (fun_info->f.fun_type) {
171 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
172 size = BITMAP_SIZE(fun_info->f.b.bitmap);
175 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
176 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
180 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
181 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
184 if ((bitmap & 1) == 0) {
185 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
188 bitmap = bitmap >> 1;
197 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
201 StgFunInfoTable *fun_info;
203 fun_info = get_fun_itbl(fun);
204 ASSERT(fun_info->i.type != PAP);
207 switch (fun_info->f.fun_type) {
209 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
216 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
220 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
223 if ((bitmap & 1) == 0) {
224 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
227 bitmap = bitmap >> 1;
236 scavenge_PAP (StgPAP *pap)
238 pap->fun = evacuate(pap->fun);
239 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
243 scavenge_AP (StgAP *ap)
245 ap->fun = evacuate(ap->fun);
246 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
249 /* -----------------------------------------------------------------------------
250 Scavenge a given step until there are no more objects in this step
253 evac_gen is set by the caller to be either zero (for a step in a
254 generation < N) or G where G is the generation of the step being
257 We sometimes temporarily change evac_gen back to zero if we're
258 scavenging a mutable object where early promotion isn't such a good
260 -------------------------------------------------------------------------- */
268 nat saved_evac_gen = evac_gen;
273 failed_to_evac = rtsFalse;
275 /* scavenge phase - standard breadth-first scavenging of the
279 while (bd != stp->hp_bd || p < stp->hp) {
281 // If we're at the end of this block, move on to the next block
282 if (bd != stp->hp_bd && p == bd->free) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
289 info = get_itbl((StgClosure *)p);
291 ASSERT(thunk_selector_depth == 0);
294 switch (info->type) {
298 StgMVar *mvar = ((StgMVar *)p);
300 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
301 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
302 mvar->value = evacuate((StgClosure *)mvar->value);
303 evac_gen = saved_evac_gen;
304 failed_to_evac = rtsTrue; // mutable.
305 p += sizeofW(StgMVar);
310 scavenge_fun_srt(info);
311 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
312 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
313 p += sizeofW(StgHeader) + 2;
317 scavenge_thunk_srt(info);
318 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
319 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
320 p += sizeofW(StgThunk) + 2;
324 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
325 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
326 p += sizeofW(StgHeader) + 2;
330 scavenge_thunk_srt(info);
331 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
332 p += sizeofW(StgThunk) + 1;
336 scavenge_fun_srt(info);
338 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
339 p += sizeofW(StgHeader) + 1;
343 scavenge_thunk_srt(info);
344 p += sizeofW(StgThunk) + 1;
348 scavenge_fun_srt(info);
350 p += sizeofW(StgHeader) + 1;
354 scavenge_thunk_srt(info);
355 p += sizeofW(StgThunk) + 2;
359 scavenge_fun_srt(info);
361 p += sizeofW(StgHeader) + 2;
365 scavenge_thunk_srt(info);
366 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
367 p += sizeofW(StgThunk) + 2;
371 scavenge_fun_srt(info);
373 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
374 p += sizeofW(StgHeader) + 2;
378 scavenge_fun_srt(info);
385 scavenge_thunk_srt(info);
386 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
387 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
388 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
390 p += info->layout.payload.nptrs;
401 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
402 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
403 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
405 p += info->layout.payload.nptrs;
410 StgBCO *bco = (StgBCO *)p;
411 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
412 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
413 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
414 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
420 if (stp->gen->no != 0) {
423 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
424 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
425 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
428 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
430 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
432 // We pretend that p has just been created.
433 LDV_RECORD_CREATE((StgClosure *)p);
436 case IND_OLDGEN_PERM:
437 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
438 p += sizeofW(StgInd);
442 case MUT_VAR_DIRTY: {
443 rtsBool saved_eager_promotion = eager_promotion;
445 eager_promotion = rtsFalse;
446 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
447 eager_promotion = saved_eager_promotion;
449 if (failed_to_evac) {
450 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
452 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
454 p += sizeofW(StgMutVar);
459 case SE_CAF_BLACKHOLE:
462 p += BLACKHOLE_sizeW();
467 StgSelector *s = (StgSelector *)p;
468 s->selectee = evacuate(s->selectee);
469 p += THUNK_SELECTOR_sizeW();
473 // A chunk of stack saved in a heap object
476 StgAP_STACK *ap = (StgAP_STACK *)p;
478 ap->fun = evacuate(ap->fun);
479 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
480 p = (StgPtr)ap->payload + ap->size;
485 p = scavenge_PAP((StgPAP *)p);
489 p = scavenge_AP((StgAP *)p);
494 p += arr_words_sizeW((StgArrWords *)p);
497 case MUT_ARR_PTRS_CLEAN:
498 case MUT_ARR_PTRS_DIRTY:
504 // We don't eagerly promote objects pointed to by a mutable
505 // array, but if we find the array only points to objects in
506 // the same or an older generation, we mark it "clean" and
507 // avoid traversing it during minor GCs.
508 saved_eager = eager_promotion;
509 eager_promotion = rtsFalse;
510 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
511 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
512 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
514 eager_promotion = saved_eager;
516 if (failed_to_evac) {
517 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
519 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
522 failed_to_evac = rtsTrue; // always put it on the mutable list.
526 case MUT_ARR_PTRS_FROZEN:
527 case MUT_ARR_PTRS_FROZEN0:
532 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
533 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
534 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
537 // If we're going to put this object on the mutable list, then
538 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
539 if (failed_to_evac) {
540 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
542 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
549 StgTSO *tso = (StgTSO *)p;
550 rtsBool saved_eager = eager_promotion;
552 eager_promotion = rtsFalse;
554 eager_promotion = saved_eager;
556 if (failed_to_evac) {
557 tso->flags |= TSO_DIRTY;
559 tso->flags &= ~TSO_DIRTY;
562 failed_to_evac = rtsTrue; // always on the mutable list
567 case TVAR_WATCH_QUEUE:
569 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
571 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
572 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
573 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
574 evac_gen = saved_evac_gen;
575 failed_to_evac = rtsTrue; // mutable
576 p += sizeofW(StgTVarWatchQueue);
582 StgTVar *tvar = ((StgTVar *) p);
584 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
585 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
586 evac_gen = saved_evac_gen;
587 failed_to_evac = rtsTrue; // mutable
588 p += sizeofW(StgTVar);
594 StgTRecHeader *trec = ((StgTRecHeader *) p);
596 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
597 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
598 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
599 evac_gen = saved_evac_gen;
600 failed_to_evac = rtsTrue; // mutable
601 p += sizeofW(StgTRecHeader);
608 StgTRecChunk *tc = ((StgTRecChunk *) p);
609 TRecEntry *e = &(tc -> entries[0]);
611 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
612 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
613 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
614 e->expected_value = evacuate((StgClosure*)e->expected_value);
615 e->new_value = evacuate((StgClosure*)e->new_value);
617 evac_gen = saved_evac_gen;
618 failed_to_evac = rtsTrue; // mutable
619 p += sizeofW(StgTRecChunk);
623 case ATOMIC_INVARIANT:
625 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
627 invariant->code = (StgClosure *)evacuate(invariant->code);
628 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
629 evac_gen = saved_evac_gen;
630 failed_to_evac = rtsTrue; // mutable
631 p += sizeofW(StgAtomicInvariant);
635 case INVARIANT_CHECK_QUEUE:
637 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
639 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
640 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
641 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
642 evac_gen = saved_evac_gen;
643 failed_to_evac = rtsTrue; // mutable
644 p += sizeofW(StgInvariantCheckQueue);
649 barf("scavenge: unimplemented/strange closure type %d @ %p",
654 * We need to record the current object on the mutable list if
655 * (a) It is actually mutable, or
656 * (b) It contains pointers to a younger generation.
657 * Case (b) arises if we didn't manage to promote everything that
658 * the current object points to into the current generation.
660 if (failed_to_evac) {
661 failed_to_evac = rtsFalse;
662 if (stp->gen_no > 0) {
663 recordMutableGen((StgClosure *)q, stp->gen);
672 /* -----------------------------------------------------------------------------
673 Scavenge everything on the mark stack.
675 This is slightly different from scavenge():
676 - we don't walk linearly through the objects, so the scavenger
677 doesn't need to advance the pointer on to the next object.
678 -------------------------------------------------------------------------- */
681 scavenge_mark_stack(void)
687 evac_gen = oldest_gen->no;
688 saved_evac_gen = evac_gen;
691 while (!mark_stack_empty()) {
692 p = pop_mark_stack();
694 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
695 info = get_itbl((StgClosure *)p);
698 switch (info->type) {
702 StgMVar *mvar = ((StgMVar *)p);
704 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
705 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
706 mvar->value = evacuate((StgClosure *)mvar->value);
707 evac_gen = saved_evac_gen;
708 failed_to_evac = rtsTrue; // mutable.
713 scavenge_fun_srt(info);
714 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
715 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
719 scavenge_thunk_srt(info);
720 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
721 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
725 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
726 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
731 scavenge_fun_srt(info);
732 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
737 scavenge_thunk_srt(info);
738 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
743 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
748 scavenge_fun_srt(info);
753 scavenge_thunk_srt(info);
761 scavenge_fun_srt(info);
768 scavenge_thunk_srt(info);
769 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
770 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
771 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
783 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
784 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
785 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
791 StgBCO *bco = (StgBCO *)p;
792 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
793 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
794 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
795 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
800 // don't need to do anything here: the only possible case
801 // is that we're in a 1-space compacting collector, with
802 // no "old" generation.
806 case IND_OLDGEN_PERM:
807 ((StgInd *)p)->indirectee =
808 evacuate(((StgInd *)p)->indirectee);
812 case MUT_VAR_DIRTY: {
813 rtsBool saved_eager_promotion = eager_promotion;
815 eager_promotion = rtsFalse;
816 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
817 eager_promotion = saved_eager_promotion;
819 if (failed_to_evac) {
820 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
822 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
828 case SE_CAF_BLACKHOLE:
836 StgSelector *s = (StgSelector *)p;
837 s->selectee = evacuate(s->selectee);
841 // A chunk of stack saved in a heap object
844 StgAP_STACK *ap = (StgAP_STACK *)p;
846 ap->fun = evacuate(ap->fun);
847 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
852 scavenge_PAP((StgPAP *)p);
856 scavenge_AP((StgAP *)p);
859 case MUT_ARR_PTRS_CLEAN:
860 case MUT_ARR_PTRS_DIRTY:
866 // We don't eagerly promote objects pointed to by a mutable
867 // array, but if we find the array only points to objects in
868 // the same or an older generation, we mark it "clean" and
869 // avoid traversing it during minor GCs.
870 saved_eager = eager_promotion;
871 eager_promotion = rtsFalse;
872 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
873 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
874 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
876 eager_promotion = saved_eager;
878 if (failed_to_evac) {
879 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
881 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
884 failed_to_evac = rtsTrue; // mutable anyhow.
888 case MUT_ARR_PTRS_FROZEN:
889 case MUT_ARR_PTRS_FROZEN0:
894 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
895 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
896 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
899 // If we're going to put this object on the mutable list, then
900 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
901 if (failed_to_evac) {
902 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
904 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
911 StgTSO *tso = (StgTSO *)p;
912 rtsBool saved_eager = eager_promotion;
914 eager_promotion = rtsFalse;
916 eager_promotion = saved_eager;
918 if (failed_to_evac) {
919 tso->flags |= TSO_DIRTY;
921 tso->flags &= ~TSO_DIRTY;
924 failed_to_evac = rtsTrue; // always on the mutable list
928 case TVAR_WATCH_QUEUE:
930 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
932 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
933 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
934 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
935 evac_gen = saved_evac_gen;
936 failed_to_evac = rtsTrue; // mutable
942 StgTVar *tvar = ((StgTVar *) p);
944 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
945 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
946 evac_gen = saved_evac_gen;
947 failed_to_evac = rtsTrue; // mutable
954 StgTRecChunk *tc = ((StgTRecChunk *) p);
955 TRecEntry *e = &(tc -> entries[0]);
957 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
958 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
959 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
960 e->expected_value = evacuate((StgClosure*)e->expected_value);
961 e->new_value = evacuate((StgClosure*)e->new_value);
963 evac_gen = saved_evac_gen;
964 failed_to_evac = rtsTrue; // mutable
970 StgTRecHeader *trec = ((StgTRecHeader *) p);
972 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
973 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
974 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
975 evac_gen = saved_evac_gen;
976 failed_to_evac = rtsTrue; // mutable
980 case ATOMIC_INVARIANT:
982 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
984 invariant->code = (StgClosure *)evacuate(invariant->code);
985 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
986 evac_gen = saved_evac_gen;
987 failed_to_evac = rtsTrue; // mutable
991 case INVARIANT_CHECK_QUEUE:
993 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
995 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
996 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
997 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
998 evac_gen = saved_evac_gen;
999 failed_to_evac = rtsTrue; // mutable
1004 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1008 if (failed_to_evac) {
1009 failed_to_evac = rtsFalse;
1011 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
1015 // mark the next bit to indicate "scavenged"
1016 mark(q+1, Bdescr(q));
1018 } // while (!mark_stack_empty())
1020 // start a new linear scan if the mark stack overflowed at some point
1021 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1022 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1023 mark_stack_overflowed = rtsFalse;
1024 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1025 oldgen_scan = oldgen_scan_bd->start;
1028 if (oldgen_scan_bd) {
1029 // push a new thing on the mark stack
1031 // find a closure that is marked but not scavenged, and start
1033 while (oldgen_scan < oldgen_scan_bd->free
1034 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1038 if (oldgen_scan < oldgen_scan_bd->free) {
1040 // already scavenged?
1041 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1042 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1045 push_mark_stack(oldgen_scan);
1046 // ToDo: bump the linear scan by the actual size of the object
1047 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1051 oldgen_scan_bd = oldgen_scan_bd->link;
1052 if (oldgen_scan_bd != NULL) {
1053 oldgen_scan = oldgen_scan_bd->start;
1059 /* -----------------------------------------------------------------------------
1060 Scavenge one object.
1062 This is used for objects that are temporarily marked as mutable
1063 because they contain old-to-new generation pointers. Only certain
1064 objects can have this property.
1065 -------------------------------------------------------------------------- */
1068 scavenge_one(StgPtr p)
1070 const StgInfoTable *info;
1071 nat saved_evac_gen = evac_gen;
1074 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1075 info = get_itbl((StgClosure *)p);
1077 switch (info->type) {
1081 StgMVar *mvar = ((StgMVar *)p);
1083 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1084 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1085 mvar->value = evacuate((StgClosure *)mvar->value);
1086 evac_gen = saved_evac_gen;
1087 failed_to_evac = rtsTrue; // mutable.
1100 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1101 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1102 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1108 case FUN_1_0: // hardly worth specialising these guys
1124 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1125 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1126 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1132 case MUT_VAR_DIRTY: {
1134 rtsBool saved_eager_promotion = eager_promotion;
1136 eager_promotion = rtsFalse;
1137 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1138 eager_promotion = saved_eager_promotion;
1140 if (failed_to_evac) {
1141 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1143 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1149 case SE_CAF_BLACKHOLE:
1154 case THUNK_SELECTOR:
1156 StgSelector *s = (StgSelector *)p;
1157 s->selectee = evacuate(s->selectee);
1163 StgAP_STACK *ap = (StgAP_STACK *)p;
1165 ap->fun = evacuate(ap->fun);
1166 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1167 p = (StgPtr)ap->payload + ap->size;
1172 p = scavenge_PAP((StgPAP *)p);
1176 p = scavenge_AP((StgAP *)p);
1180 // nothing to follow
1183 case MUT_ARR_PTRS_CLEAN:
1184 case MUT_ARR_PTRS_DIRTY:
1187 rtsBool saved_eager;
1189 // We don't eagerly promote objects pointed to by a mutable
1190 // array, but if we find the array only points to objects in
1191 // the same or an older generation, we mark it "clean" and
1192 // avoid traversing it during minor GCs.
1193 saved_eager = eager_promotion;
1194 eager_promotion = rtsFalse;
1196 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1197 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1198 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1200 eager_promotion = saved_eager;
1202 if (failed_to_evac) {
1203 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1205 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1208 failed_to_evac = rtsTrue;
1212 case MUT_ARR_PTRS_FROZEN:
1213 case MUT_ARR_PTRS_FROZEN0:
1215 // follow everything
1218 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1219 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1220 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1223 // If we're going to put this object on the mutable list, then
1224 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1225 if (failed_to_evac) {
1226 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1228 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1235 StgTSO *tso = (StgTSO *)p;
1236 rtsBool saved_eager = eager_promotion;
1238 eager_promotion = rtsFalse;
1240 eager_promotion = saved_eager;
1242 if (failed_to_evac) {
1243 tso->flags |= TSO_DIRTY;
1245 tso->flags &= ~TSO_DIRTY;
1248 failed_to_evac = rtsTrue; // always on the mutable list
1252 case TVAR_WATCH_QUEUE:
1254 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1256 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1257 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1258 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1259 evac_gen = saved_evac_gen;
1260 failed_to_evac = rtsTrue; // mutable
1266 StgTVar *tvar = ((StgTVar *) p);
1268 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1269 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1270 evac_gen = saved_evac_gen;
1271 failed_to_evac = rtsTrue; // mutable
1277 StgTRecHeader *trec = ((StgTRecHeader *) p);
1279 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1280 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1281 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1282 evac_gen = saved_evac_gen;
1283 failed_to_evac = rtsTrue; // mutable
1290 StgTRecChunk *tc = ((StgTRecChunk *) p);
1291 TRecEntry *e = &(tc -> entries[0]);
1293 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1294 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1295 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1296 e->expected_value = evacuate((StgClosure*)e->expected_value);
1297 e->new_value = evacuate((StgClosure*)e->new_value);
1299 evac_gen = saved_evac_gen;
1300 failed_to_evac = rtsTrue; // mutable
1304 case ATOMIC_INVARIANT:
1306 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1308 invariant->code = (StgClosure *)evacuate(invariant->code);
1309 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1310 evac_gen = saved_evac_gen;
1311 failed_to_evac = rtsTrue; // mutable
1315 case INVARIANT_CHECK_QUEUE:
1317 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1319 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1320 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1321 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1322 evac_gen = saved_evac_gen;
1323 failed_to_evac = rtsTrue; // mutable
1328 case IND_OLDGEN_PERM:
1331 /* Careful here: a THUNK can be on the mutable list because
1332 * it contains pointers to young gen objects. If such a thunk
1333 * is updated, the IND_OLDGEN will be added to the mutable
1334 * list again, and we'll scavenge it twice. evacuate()
1335 * doesn't check whether the object has already been
1336 * evacuated, so we perform that check here.
1338 StgClosure *q = ((StgInd *)p)->indirectee;
1339 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1342 ((StgInd *)p)->indirectee = evacuate(q);
1345 #if 0 && defined(DEBUG)
1346 if (RtsFlags.DebugFlags.gc)
1347 /* Debugging code to print out the size of the thing we just
1351 StgPtr start = gen->steps[0].scan;
1352 bdescr *start_bd = gen->steps[0].scan_bd;
1354 scavenge(&gen->steps[0]);
1355 if (start_bd != gen->steps[0].scan_bd) {
1356 size += (P_)BLOCK_ROUND_UP(start) - start;
1357 start_bd = start_bd->link;
1358 while (start_bd != gen->steps[0].scan_bd) {
1359 size += BLOCK_SIZE_W;
1360 start_bd = start_bd->link;
1362 size += gen->steps[0].scan -
1363 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1365 size = gen->steps[0].scan - start;
1367 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1373 barf("scavenge_one: strange object %d", (int)(info->type));
1376 no_luck = failed_to_evac;
1377 failed_to_evac = rtsFalse;
1381 /* -----------------------------------------------------------------------------
1382 Scavenging mutable lists.
1384 We treat the mutable list of each generation > N (i.e. all the
1385 generations older than the one being collected) as roots. We also
1386 remove non-mutable objects from the mutable list at this point.
1387 -------------------------------------------------------------------------- */
1390 scavenge_mutable_list(generation *gen)
1395 bd = gen->saved_mut_list;
1398 for (; bd != NULL; bd = bd->link) {
1399 for (q = bd->start; q < bd->free; q++) {
1401 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1404 switch (get_itbl((StgClosure *)p)->type) {
1406 barf("MUT_VAR_CLEAN on mutable list");
1408 mutlist_MUTVARS++; break;
1409 case MUT_ARR_PTRS_CLEAN:
1410 case MUT_ARR_PTRS_DIRTY:
1411 case MUT_ARR_PTRS_FROZEN:
1412 case MUT_ARR_PTRS_FROZEN0:
1413 mutlist_MUTARRS++; break;
1415 mutlist_OTHERS++; break;
1419 // Check whether this object is "clean", that is it
1420 // definitely doesn't point into a young generation.
1421 // Clean objects don't need to be scavenged. Some clean
1422 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1423 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1424 // TSO, are always on the mutable list.
1426 switch (get_itbl((StgClosure *)p)->type) {
1427 case MUT_ARR_PTRS_CLEAN:
1428 recordMutableGen((StgClosure *)p,gen);
1431 StgTSO *tso = (StgTSO *)p;
1432 if ((tso->flags & TSO_DIRTY) == 0) {
1433 // A clean TSO: we don't have to traverse its
1434 // stack. However, we *do* follow the link field:
1435 // we don't want to have to mark a TSO dirty just
1436 // because we put it on a different queue.
1437 if (tso->why_blocked != BlockedOnBlackHole) {
1438 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1440 recordMutableGen((StgClosure *)p,gen);
1448 if (scavenge_one(p)) {
1449 // didn't manage to promote everything, so put the
1450 // object back on the list.
1451 recordMutableGen((StgClosure *)p,gen);
1456 // free the old mut_list
1457 freeChain(gen->saved_mut_list);
1458 gen->saved_mut_list = NULL;
1461 /* -----------------------------------------------------------------------------
1462 Scavenging the static objects.
1464 We treat the mutable list of each generation > N (i.e. all the
1465 generations older than the one being collected) as roots. We also
1466 remove non-mutable objects from the mutable list at this point.
1467 -------------------------------------------------------------------------- */
1470 scavenge_static(void)
1472 StgClosure* p = static_objects;
1473 const StgInfoTable *info;
1475 /* Always evacuate straight to the oldest generation for static
1477 evac_gen = oldest_gen->no;
1479 /* keep going until we've scavenged all the objects on the linked
1481 while (p != END_OF_STATIC_LIST) {
1483 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1486 if (info->type==RBH)
1487 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1489 // make sure the info pointer is into text space
1491 /* Take this object *off* the static_objects list,
1492 * and put it on the scavenged_static_objects list.
1494 static_objects = *STATIC_LINK(info,p);
1495 *STATIC_LINK(info,p) = scavenged_static_objects;
1496 scavenged_static_objects = p;
1498 switch (info -> type) {
1502 StgInd *ind = (StgInd *)p;
1503 ind->indirectee = evacuate(ind->indirectee);
1505 /* might fail to evacuate it, in which case we have to pop it
1506 * back on the mutable list of the oldest generation. We
1507 * leave it *on* the scavenged_static_objects list, though,
1508 * in case we visit this object again.
1510 if (failed_to_evac) {
1511 failed_to_evac = rtsFalse;
1512 recordMutableGen((StgClosure *)p,oldest_gen);
1518 scavenge_thunk_srt(info);
1522 scavenge_fun_srt(info);
1529 next = (P_)p->payload + info->layout.payload.ptrs;
1530 // evacuate the pointers
1531 for (q = (P_)p->payload; q < next; q++) {
1532 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1538 barf("scavenge_static: strange closure %d", (int)(info->type));
1541 ASSERT(failed_to_evac == rtsFalse);
1543 /* get the next static object from the list. Remember, there might
1544 * be more stuff on this list now that we've done some evacuating!
1545 * (static_objects is a global)
1551 /* -----------------------------------------------------------------------------
1552 scavenge a chunk of memory described by a bitmap
1553 -------------------------------------------------------------------------- */
1556 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1562 bitmap = large_bitmap->bitmap[b];
1563 for (i = 0; i < size; ) {
1564 if ((bitmap & 1) == 0) {
1565 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1569 if (i % BITS_IN(W_) == 0) {
1571 bitmap = large_bitmap->bitmap[b];
1573 bitmap = bitmap >> 1;
1578 STATIC_INLINE StgPtr
1579 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1582 if ((bitmap & 1) == 0) {
1583 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1586 bitmap = bitmap >> 1;
1592 /* -----------------------------------------------------------------------------
1593 scavenge_stack walks over a section of stack and evacuates all the
1594 objects pointed to by it. We can use the same code for walking
1595 AP_STACK_UPDs, since these are just sections of copied stack.
1596 -------------------------------------------------------------------------- */
1599 scavenge_stack(StgPtr p, StgPtr stack_end)
1601 const StgRetInfoTable* info;
1606 * Each time around this loop, we are looking at a chunk of stack
1607 * that starts with an activation record.
1610 while (p < stack_end) {
1611 info = get_ret_itbl((StgClosure *)p);
1613 switch (info->i.type) {
1616 // In SMP, we can get update frames that point to indirections
1617 // when two threads evaluate the same thunk. We do attempt to
1618 // discover this situation in threadPaused(), but it's
1619 // possible that the following sequence occurs:
1628 // Now T is an indirection, and the update frame is already
1629 // marked on A's stack, so we won't traverse it again in
1630 // threadPaused(). We could traverse the whole stack again
1631 // before GC, but that seems like overkill.
1633 // Scavenging this update frame as normal would be disastrous;
1634 // the updatee would end up pointing to the value. So we turn
1635 // the indirection into an IND_PERM, so that evacuate will
1636 // copy the indirection into the old generation instead of
1638 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
1639 ((StgUpdateFrame *)p)->updatee->header.info =
1640 (StgInfoTable *)&stg_IND_PERM_info;
1642 ((StgUpdateFrame *)p)->updatee
1643 = evacuate(((StgUpdateFrame *)p)->updatee);
1644 p += sizeofW(StgUpdateFrame);
1647 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1648 case CATCH_STM_FRAME:
1649 case CATCH_RETRY_FRAME:
1650 case ATOMICALLY_FRAME:
1655 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1656 size = BITMAP_SIZE(info->i.layout.bitmap);
1657 // NOTE: the payload starts immediately after the info-ptr, we
1658 // don't have an StgHeader in the same sense as a heap closure.
1660 p = scavenge_small_bitmap(p, size, bitmap);
1664 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1672 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1675 size = BCO_BITMAP_SIZE(bco);
1676 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1681 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1687 size = GET_LARGE_BITMAP(&info->i)->size;
1689 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1691 // and don't forget to follow the SRT
1695 // Dynamic bitmap: the mask is stored on the stack, and
1696 // there are a number of non-pointers followed by a number
1697 // of pointers above the bitmapped area. (see StgMacros.h,
1702 dyn = ((StgRetDyn *)p)->liveness;
1704 // traverse the bitmap first
1705 bitmap = RET_DYN_LIVENESS(dyn);
1706 p = (P_)&((StgRetDyn *)p)->payload[0];
1707 size = RET_DYN_BITMAP_SIZE;
1708 p = scavenge_small_bitmap(p, size, bitmap);
1710 // skip over the non-ptr words
1711 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1713 // follow the ptr words
1714 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1715 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1723 StgRetFun *ret_fun = (StgRetFun *)p;
1724 StgFunInfoTable *fun_info;
1726 ret_fun->fun = evacuate(ret_fun->fun);
1727 fun_info = get_fun_itbl(ret_fun->fun);
1728 p = scavenge_arg_block(fun_info, ret_fun->payload);
1733 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1738 /*-----------------------------------------------------------------------------
1739 scavenge the large object list.
1741 evac_gen set by caller; similar games played with evac_gen as with
1742 scavenge() - see comment at the top of scavenge(). Most large
1743 objects are (repeatedly) mutable, so most of the time evac_gen will
1745 --------------------------------------------------------------------------- */
1748 scavenge_large(step *stp)
1753 bd = stp->new_large_objects;
1755 for (; bd != NULL; bd = stp->new_large_objects) {
1757 /* take this object *off* the large objects list and put it on
1758 * the scavenged large objects list. This is so that we can
1759 * treat new_large_objects as a stack and push new objects on
1760 * the front when evacuating.
1762 stp->new_large_objects = bd->link;
1763 dbl_link_onto(bd, &stp->scavenged_large_objects);
1765 // update the block count in this step.
1766 stp->n_scavenged_large_blocks += bd->blocks;
1769 if (scavenge_one(p)) {
1770 if (stp->gen_no > 0) {
1771 recordMutableGen((StgClosure *)p, stp->gen);