1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
23 #include "LdvProfile.h"
25 static void scavenge_stack (StgPtr p, StgPtr stack_end);
27 static void scavenge_large_bitmap (StgPtr p,
28 StgLargeBitmap *large_bitmap,
31 /* Similar to scavenge_large_bitmap(), but we don't write back the
32 * pointers we get back from evacuate().
35 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
42 bitmap = large_srt->l.bitmap[b];
43 size = (nat)large_srt->l.size;
44 p = (StgClosure **)large_srt->srt;
45 for (i = 0; i < size; ) {
46 if ((bitmap & 1) != 0) {
51 if (i % BITS_IN(W_) == 0) {
53 bitmap = large_srt->l.bitmap[b];
60 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
61 * srt field in the info table. That's ok, because we'll
62 * never dereference it.
65 scavenge_srt (StgClosure **srt, nat srt_bitmap)
73 if (bitmap == (StgHalfWord)(-1)) {
74 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
79 if ((bitmap & 1) != 0) {
80 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
81 // Special-case to handle references to closures hiding out in DLLs, since
82 // double indirections required to get at those. The code generator knows
83 // which is which when generating the SRT, so it stores the (indirect)
84 // reference to the DLL closure in the table by first adding one to it.
85 // We check for this here, and undo the addition before evacuating it.
87 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
88 // closure that's fixed at link-time, and no extra magic is required.
89 if ( (unsigned long)(*srt) & 0x1 ) {
90 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 scavenge_thunk_srt(const StgInfoTable *info)
107 StgThunkInfoTable *thunk_info;
109 if (!major_gc) return;
111 thunk_info = itbl_to_thunk_itbl(info);
112 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
116 scavenge_fun_srt(const StgInfoTable *info)
118 StgFunInfoTable *fun_info;
120 if (!major_gc) return;
122 fun_info = itbl_to_fun_itbl(info);
123 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
126 /* -----------------------------------------------------------------------------
128 -------------------------------------------------------------------------- */
131 scavengeTSO (StgTSO *tso)
133 if ( tso->why_blocked == BlockedOnMVar
134 || tso->why_blocked == BlockedOnBlackHole
135 || tso->why_blocked == BlockedOnException
137 tso->block_info.closure = evacuate(tso->block_info.closure);
139 tso->blocked_exceptions =
140 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
142 // We don't always chase the link field: TSOs on the blackhole
143 // queue are not automatically alive, so the link field is a
144 // "weak" pointer in that case.
145 if (tso->why_blocked != BlockedOnBlackHole) {
146 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
149 // scavange current transaction record
150 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
152 // scavenge this thread's stack
153 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
156 /* -----------------------------------------------------------------------------
157 Blocks of function args occur on the stack (at the top) and
159 -------------------------------------------------------------------------- */
162 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
169 switch (fun_info->f.fun_type) {
171 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
172 size = BITMAP_SIZE(fun_info->f.b.bitmap);
175 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
176 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
180 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
181 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
184 if ((bitmap & 1) == 0) {
185 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
188 bitmap = bitmap >> 1;
197 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
201 StgFunInfoTable *fun_info;
203 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
204 ASSERT(fun_info->i.type != PAP);
207 switch (fun_info->f.fun_type) {
209 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
216 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
220 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
223 if ((bitmap & 1) == 0) {
224 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
227 bitmap = bitmap >> 1;
236 scavenge_PAP (StgPAP *pap)
238 pap->fun = evacuate(pap->fun);
239 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
243 scavenge_AP (StgAP *ap)
245 ap->fun = evacuate(ap->fun);
246 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
249 /* -----------------------------------------------------------------------------
250 Scavenge a given step until there are no more objects in this step
253 evac_gen is set by the caller to be either zero (for a step in a
254 generation < N) or G where G is the generation of the step being
257 We sometimes temporarily change evac_gen back to zero if we're
258 scavenging a mutable object where early promotion isn't such a good
260 -------------------------------------------------------------------------- */
268 nat saved_evac_gen = evac_gen;
273 failed_to_evac = rtsFalse;
275 /* scavenge phase - standard breadth-first scavenging of the
279 while (bd != stp->hp_bd || p < stp->hp) {
281 // If we're at the end of this block, move on to the next block
282 if (bd != stp->hp_bd && p == bd->free) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
289 info = get_itbl((StgClosure *)p);
291 ASSERT(thunk_selector_depth == 0);
294 switch (info->type) {
298 StgMVar *mvar = ((StgMVar *)p);
300 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
301 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
302 mvar->value = evacuate((StgClosure *)mvar->value);
303 evac_gen = saved_evac_gen;
304 failed_to_evac = rtsTrue; // mutable.
305 p += sizeofW(StgMVar);
310 scavenge_fun_srt(info);
311 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
312 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
313 p += sizeofW(StgHeader) + 2;
317 scavenge_thunk_srt(info);
318 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
319 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
320 p += sizeofW(StgThunk) + 2;
324 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
325 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
326 p += sizeofW(StgHeader) + 2;
330 scavenge_thunk_srt(info);
331 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
332 p += sizeofW(StgThunk) + 1;
336 scavenge_fun_srt(info);
338 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
339 p += sizeofW(StgHeader) + 1;
343 scavenge_thunk_srt(info);
344 p += sizeofW(StgThunk) + 1;
348 scavenge_fun_srt(info);
350 p += sizeofW(StgHeader) + 1;
354 scavenge_thunk_srt(info);
355 p += sizeofW(StgThunk) + 2;
359 scavenge_fun_srt(info);
361 p += sizeofW(StgHeader) + 2;
365 scavenge_thunk_srt(info);
366 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
367 p += sizeofW(StgThunk) + 2;
371 scavenge_fun_srt(info);
373 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
374 p += sizeofW(StgHeader) + 2;
378 scavenge_fun_srt(info);
385 scavenge_thunk_srt(info);
386 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
387 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
388 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
390 p += info->layout.payload.nptrs;
401 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
402 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
403 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
405 p += info->layout.payload.nptrs;
410 StgBCO *bco = (StgBCO *)p;
411 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
412 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
413 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
419 if (stp->gen->no != 0) {
422 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
423 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
424 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
427 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
429 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
431 // We pretend that p has just been created.
432 LDV_RECORD_CREATE((StgClosure *)p);
435 case IND_OLDGEN_PERM:
436 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
437 p += sizeofW(StgInd);
441 case MUT_VAR_DIRTY: {
442 rtsBool saved_eager_promotion = eager_promotion;
444 eager_promotion = rtsFalse;
445 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
446 eager_promotion = saved_eager_promotion;
448 if (failed_to_evac) {
449 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
451 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
453 p += sizeofW(StgMutVar);
458 case SE_CAF_BLACKHOLE:
461 p += BLACKHOLE_sizeW();
466 StgSelector *s = (StgSelector *)p;
467 s->selectee = evacuate(s->selectee);
468 p += THUNK_SELECTOR_sizeW();
472 // A chunk of stack saved in a heap object
475 StgAP_STACK *ap = (StgAP_STACK *)p;
477 ap->fun = evacuate(ap->fun);
478 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
479 p = (StgPtr)ap->payload + ap->size;
484 p = scavenge_PAP((StgPAP *)p);
488 p = scavenge_AP((StgAP *)p);
493 p += arr_words_sizeW((StgArrWords *)p);
496 case MUT_ARR_PTRS_CLEAN:
497 case MUT_ARR_PTRS_DIRTY:
503 // We don't eagerly promote objects pointed to by a mutable
504 // array, but if we find the array only points to objects in
505 // the same or an older generation, we mark it "clean" and
506 // avoid traversing it during minor GCs.
507 saved_eager = eager_promotion;
508 eager_promotion = rtsFalse;
509 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
510 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
511 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
513 eager_promotion = saved_eager;
515 if (failed_to_evac) {
516 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
518 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
521 failed_to_evac = rtsTrue; // always put it on the mutable list.
525 case MUT_ARR_PTRS_FROZEN:
526 case MUT_ARR_PTRS_FROZEN0:
531 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
532 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
533 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
536 // If we're going to put this object on the mutable list, then
537 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
538 if (failed_to_evac) {
539 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
541 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
548 StgTSO *tso = (StgTSO *)p;
549 rtsBool saved_eager = eager_promotion;
551 eager_promotion = rtsFalse;
553 eager_promotion = saved_eager;
555 if (failed_to_evac) {
556 tso->flags |= TSO_DIRTY;
558 tso->flags &= ~TSO_DIRTY;
561 failed_to_evac = rtsTrue; // always on the mutable list
566 case TVAR_WATCH_QUEUE:
568 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
570 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
571 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
572 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
573 evac_gen = saved_evac_gen;
574 failed_to_evac = rtsTrue; // mutable
575 p += sizeofW(StgTVarWatchQueue);
581 StgTVar *tvar = ((StgTVar *) p);
583 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
584 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
585 evac_gen = saved_evac_gen;
586 failed_to_evac = rtsTrue; // mutable
587 p += sizeofW(StgTVar);
593 StgTRecHeader *trec = ((StgTRecHeader *) p);
595 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
596 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
597 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
598 evac_gen = saved_evac_gen;
599 failed_to_evac = rtsTrue; // mutable
600 p += sizeofW(StgTRecHeader);
607 StgTRecChunk *tc = ((StgTRecChunk *) p);
608 TRecEntry *e = &(tc -> entries[0]);
610 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
611 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
612 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
613 e->expected_value = evacuate((StgClosure*)e->expected_value);
614 e->new_value = evacuate((StgClosure*)e->new_value);
616 evac_gen = saved_evac_gen;
617 failed_to_evac = rtsTrue; // mutable
618 p += sizeofW(StgTRecChunk);
622 case ATOMIC_INVARIANT:
624 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
626 invariant->code = (StgClosure *)evacuate(invariant->code);
627 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
628 evac_gen = saved_evac_gen;
629 failed_to_evac = rtsTrue; // mutable
630 p += sizeofW(StgAtomicInvariant);
634 case INVARIANT_CHECK_QUEUE:
636 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
638 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
639 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
640 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
641 evac_gen = saved_evac_gen;
642 failed_to_evac = rtsTrue; // mutable
643 p += sizeofW(StgInvariantCheckQueue);
648 barf("scavenge: unimplemented/strange closure type %d @ %p",
653 * We need to record the current object on the mutable list if
654 * (a) It is actually mutable, or
655 * (b) It contains pointers to a younger generation.
656 * Case (b) arises if we didn't manage to promote everything that
657 * the current object points to into the current generation.
659 if (failed_to_evac) {
660 failed_to_evac = rtsFalse;
661 if (stp->gen_no > 0) {
662 recordMutableGen((StgClosure *)q, stp->gen);
671 /* -----------------------------------------------------------------------------
672 Scavenge everything on the mark stack.
674 This is slightly different from scavenge():
675 - we don't walk linearly through the objects, so the scavenger
676 doesn't need to advance the pointer on to the next object.
677 -------------------------------------------------------------------------- */
680 scavenge_mark_stack(void)
686 evac_gen = oldest_gen->no;
687 saved_evac_gen = evac_gen;
690 while (!mark_stack_empty()) {
691 p = pop_mark_stack();
693 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
694 info = get_itbl((StgClosure *)p);
697 switch (info->type) {
701 StgMVar *mvar = ((StgMVar *)p);
703 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
704 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
705 mvar->value = evacuate((StgClosure *)mvar->value);
706 evac_gen = saved_evac_gen;
707 failed_to_evac = rtsTrue; // mutable.
712 scavenge_fun_srt(info);
713 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
714 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
718 scavenge_thunk_srt(info);
719 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
720 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
724 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
725 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
730 scavenge_fun_srt(info);
731 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
736 scavenge_thunk_srt(info);
737 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
742 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
747 scavenge_fun_srt(info);
752 scavenge_thunk_srt(info);
760 scavenge_fun_srt(info);
767 scavenge_thunk_srt(info);
768 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
769 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
770 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
782 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
783 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
784 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
790 StgBCO *bco = (StgBCO *)p;
791 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
792 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
793 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
798 // don't need to do anything here: the only possible case
799 // is that we're in a 1-space compacting collector, with
800 // no "old" generation.
804 case IND_OLDGEN_PERM:
805 ((StgInd *)p)->indirectee =
806 evacuate(((StgInd *)p)->indirectee);
810 case MUT_VAR_DIRTY: {
811 rtsBool saved_eager_promotion = eager_promotion;
813 eager_promotion = rtsFalse;
814 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
815 eager_promotion = saved_eager_promotion;
817 if (failed_to_evac) {
818 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
820 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
826 case SE_CAF_BLACKHOLE:
834 StgSelector *s = (StgSelector *)p;
835 s->selectee = evacuate(s->selectee);
839 // A chunk of stack saved in a heap object
842 StgAP_STACK *ap = (StgAP_STACK *)p;
844 ap->fun = evacuate(ap->fun);
845 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
850 scavenge_PAP((StgPAP *)p);
854 scavenge_AP((StgAP *)p);
857 case MUT_ARR_PTRS_CLEAN:
858 case MUT_ARR_PTRS_DIRTY:
864 // We don't eagerly promote objects pointed to by a mutable
865 // array, but if we find the array only points to objects in
866 // the same or an older generation, we mark it "clean" and
867 // avoid traversing it during minor GCs.
868 saved_eager = eager_promotion;
869 eager_promotion = rtsFalse;
870 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
871 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
872 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
874 eager_promotion = saved_eager;
876 if (failed_to_evac) {
877 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
879 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
882 failed_to_evac = rtsTrue; // mutable anyhow.
886 case MUT_ARR_PTRS_FROZEN:
887 case MUT_ARR_PTRS_FROZEN0:
892 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
893 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
894 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
897 // If we're going to put this object on the mutable list, then
898 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
899 if (failed_to_evac) {
900 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
902 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
909 StgTSO *tso = (StgTSO *)p;
910 rtsBool saved_eager = eager_promotion;
912 eager_promotion = rtsFalse;
914 eager_promotion = saved_eager;
916 if (failed_to_evac) {
917 tso->flags |= TSO_DIRTY;
919 tso->flags &= ~TSO_DIRTY;
922 failed_to_evac = rtsTrue; // always on the mutable list
926 case TVAR_WATCH_QUEUE:
928 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
930 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
931 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
932 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
933 evac_gen = saved_evac_gen;
934 failed_to_evac = rtsTrue; // mutable
940 StgTVar *tvar = ((StgTVar *) p);
942 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
943 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
944 evac_gen = saved_evac_gen;
945 failed_to_evac = rtsTrue; // mutable
952 StgTRecChunk *tc = ((StgTRecChunk *) p);
953 TRecEntry *e = &(tc -> entries[0]);
955 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
956 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
957 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
958 e->expected_value = evacuate((StgClosure*)e->expected_value);
959 e->new_value = evacuate((StgClosure*)e->new_value);
961 evac_gen = saved_evac_gen;
962 failed_to_evac = rtsTrue; // mutable
968 StgTRecHeader *trec = ((StgTRecHeader *) p);
970 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
971 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
972 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
973 evac_gen = saved_evac_gen;
974 failed_to_evac = rtsTrue; // mutable
978 case ATOMIC_INVARIANT:
980 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
982 invariant->code = (StgClosure *)evacuate(invariant->code);
983 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
984 evac_gen = saved_evac_gen;
985 failed_to_evac = rtsTrue; // mutable
989 case INVARIANT_CHECK_QUEUE:
991 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
993 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
994 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
995 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
996 evac_gen = saved_evac_gen;
997 failed_to_evac = rtsTrue; // mutable
1002 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1006 if (failed_to_evac) {
1007 failed_to_evac = rtsFalse;
1009 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
1013 // mark the next bit to indicate "scavenged"
1014 mark(q+1, Bdescr(q));
1016 } // while (!mark_stack_empty())
1018 // start a new linear scan if the mark stack overflowed at some point
1019 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1020 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1021 mark_stack_overflowed = rtsFalse;
1022 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1023 oldgen_scan = oldgen_scan_bd->start;
1026 if (oldgen_scan_bd) {
1027 // push a new thing on the mark stack
1029 // find a closure that is marked but not scavenged, and start
1031 while (oldgen_scan < oldgen_scan_bd->free
1032 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1036 if (oldgen_scan < oldgen_scan_bd->free) {
1038 // already scavenged?
1039 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1040 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1043 push_mark_stack(oldgen_scan);
1044 // ToDo: bump the linear scan by the actual size of the object
1045 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1049 oldgen_scan_bd = oldgen_scan_bd->link;
1050 if (oldgen_scan_bd != NULL) {
1051 oldgen_scan = oldgen_scan_bd->start;
1057 /* -----------------------------------------------------------------------------
1058 Scavenge one object.
1060 This is used for objects that are temporarily marked as mutable
1061 because they contain old-to-new generation pointers. Only certain
1062 objects can have this property.
1063 -------------------------------------------------------------------------- */
1066 scavenge_one(StgPtr p)
1068 const StgInfoTable *info;
1069 nat saved_evac_gen = evac_gen;
1072 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1073 info = get_itbl((StgClosure *)p);
1075 switch (info->type) {
1079 StgMVar *mvar = ((StgMVar *)p);
1081 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1082 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1083 mvar->value = evacuate((StgClosure *)mvar->value);
1084 evac_gen = saved_evac_gen;
1085 failed_to_evac = rtsTrue; // mutable.
1098 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1099 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1100 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1106 case FUN_1_0: // hardly worth specialising these guys
1122 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1123 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1124 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1130 case MUT_VAR_DIRTY: {
1132 rtsBool saved_eager_promotion = eager_promotion;
1134 eager_promotion = rtsFalse;
1135 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1136 eager_promotion = saved_eager_promotion;
1138 if (failed_to_evac) {
1139 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1141 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1147 case SE_CAF_BLACKHOLE:
1152 case THUNK_SELECTOR:
1154 StgSelector *s = (StgSelector *)p;
1155 s->selectee = evacuate(s->selectee);
1161 StgAP_STACK *ap = (StgAP_STACK *)p;
1163 ap->fun = evacuate(ap->fun);
1164 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1165 p = (StgPtr)ap->payload + ap->size;
1170 p = scavenge_PAP((StgPAP *)p);
1174 p = scavenge_AP((StgAP *)p);
1178 // nothing to follow
1181 case MUT_ARR_PTRS_CLEAN:
1182 case MUT_ARR_PTRS_DIRTY:
1185 rtsBool saved_eager;
1187 // We don't eagerly promote objects pointed to by a mutable
1188 // array, but if we find the array only points to objects in
1189 // the same or an older generation, we mark it "clean" and
1190 // avoid traversing it during minor GCs.
1191 saved_eager = eager_promotion;
1192 eager_promotion = rtsFalse;
1194 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1195 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1196 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1198 eager_promotion = saved_eager;
1200 if (failed_to_evac) {
1201 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1203 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1206 failed_to_evac = rtsTrue;
1210 case MUT_ARR_PTRS_FROZEN:
1211 case MUT_ARR_PTRS_FROZEN0:
1213 // follow everything
1216 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1217 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1218 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1221 // If we're going to put this object on the mutable list, then
1222 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1223 if (failed_to_evac) {
1224 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1226 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1233 StgTSO *tso = (StgTSO *)p;
1234 rtsBool saved_eager = eager_promotion;
1236 eager_promotion = rtsFalse;
1238 eager_promotion = saved_eager;
1240 if (failed_to_evac) {
1241 tso->flags |= TSO_DIRTY;
1243 tso->flags &= ~TSO_DIRTY;
1246 failed_to_evac = rtsTrue; // always on the mutable list
1250 case TVAR_WATCH_QUEUE:
1252 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1254 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1255 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1256 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1257 evac_gen = saved_evac_gen;
1258 failed_to_evac = rtsTrue; // mutable
1264 StgTVar *tvar = ((StgTVar *) p);
1266 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1267 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1268 evac_gen = saved_evac_gen;
1269 failed_to_evac = rtsTrue; // mutable
1275 StgTRecHeader *trec = ((StgTRecHeader *) p);
1277 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1278 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1279 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1280 evac_gen = saved_evac_gen;
1281 failed_to_evac = rtsTrue; // mutable
1288 StgTRecChunk *tc = ((StgTRecChunk *) p);
1289 TRecEntry *e = &(tc -> entries[0]);
1291 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1292 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1293 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1294 e->expected_value = evacuate((StgClosure*)e->expected_value);
1295 e->new_value = evacuate((StgClosure*)e->new_value);
1297 evac_gen = saved_evac_gen;
1298 failed_to_evac = rtsTrue; // mutable
1302 case ATOMIC_INVARIANT:
1304 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1306 invariant->code = (StgClosure *)evacuate(invariant->code);
1307 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1308 evac_gen = saved_evac_gen;
1309 failed_to_evac = rtsTrue; // mutable
1313 case INVARIANT_CHECK_QUEUE:
1315 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1317 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1318 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1319 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1320 evac_gen = saved_evac_gen;
1321 failed_to_evac = rtsTrue; // mutable
1326 case IND_OLDGEN_PERM:
1329 /* Careful here: a THUNK can be on the mutable list because
1330 * it contains pointers to young gen objects. If such a thunk
1331 * is updated, the IND_OLDGEN will be added to the mutable
1332 * list again, and we'll scavenge it twice. evacuate()
1333 * doesn't check whether the object has already been
1334 * evacuated, so we perform that check here.
1336 StgClosure *q = ((StgInd *)p)->indirectee;
1337 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1340 ((StgInd *)p)->indirectee = evacuate(q);
1343 #if 0 && defined(DEBUG)
1344 if (RtsFlags.DebugFlags.gc)
1345 /* Debugging code to print out the size of the thing we just
1349 StgPtr start = gen->steps[0].scan;
1350 bdescr *start_bd = gen->steps[0].scan_bd;
1352 scavenge(&gen->steps[0]);
1353 if (start_bd != gen->steps[0].scan_bd) {
1354 size += (P_)BLOCK_ROUND_UP(start) - start;
1355 start_bd = start_bd->link;
1356 while (start_bd != gen->steps[0].scan_bd) {
1357 size += BLOCK_SIZE_W;
1358 start_bd = start_bd->link;
1360 size += gen->steps[0].scan -
1361 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1363 size = gen->steps[0].scan - start;
1365 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1371 barf("scavenge_one: strange object %d", (int)(info->type));
1374 no_luck = failed_to_evac;
1375 failed_to_evac = rtsFalse;
1379 /* -----------------------------------------------------------------------------
1380 Scavenging mutable lists.
1382 We treat the mutable list of each generation > N (i.e. all the
1383 generations older than the one being collected) as roots. We also
1384 remove non-mutable objects from the mutable list at this point.
1385 -------------------------------------------------------------------------- */
1388 scavenge_mutable_list(generation *gen)
1393 bd = gen->saved_mut_list;
1396 for (; bd != NULL; bd = bd->link) {
1397 for (q = bd->start; q < bd->free; q++) {
1399 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1402 switch (get_itbl((StgClosure *)p)->type) {
1404 barf("MUT_VAR_CLEAN on mutable list");
1406 mutlist_MUTVARS++; break;
1407 case MUT_ARR_PTRS_CLEAN:
1408 case MUT_ARR_PTRS_DIRTY:
1409 case MUT_ARR_PTRS_FROZEN:
1410 case MUT_ARR_PTRS_FROZEN0:
1411 mutlist_MUTARRS++; break;
1413 mutlist_OTHERS++; break;
1417 // Check whether this object is "clean", that is it
1418 // definitely doesn't point into a young generation.
1419 // Clean objects don't need to be scavenged. Some clean
1420 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1421 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1422 // TSO, are always on the mutable list.
1424 switch (get_itbl((StgClosure *)p)->type) {
1425 case MUT_ARR_PTRS_CLEAN:
1426 recordMutableGen((StgClosure *)p,gen);
1429 StgTSO *tso = (StgTSO *)p;
1430 if ((tso->flags & TSO_DIRTY) == 0) {
1431 // A clean TSO: we don't have to traverse its
1432 // stack. However, we *do* follow the link field:
1433 // we don't want to have to mark a TSO dirty just
1434 // because we put it on a different queue.
1435 if (tso->why_blocked != BlockedOnBlackHole) {
1436 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1438 recordMutableGen((StgClosure *)p,gen);
1446 if (scavenge_one(p)) {
1447 // didn't manage to promote everything, so put the
1448 // object back on the list.
1449 recordMutableGen((StgClosure *)p,gen);
1454 // free the old mut_list
1455 freeChain(gen->saved_mut_list);
1456 gen->saved_mut_list = NULL;
1459 /* -----------------------------------------------------------------------------
1460 Scavenging the static objects.
1462 We treat the mutable list of each generation > N (i.e. all the
1463 generations older than the one being collected) as roots. We also
1464 remove non-mutable objects from the mutable list at this point.
1465 -------------------------------------------------------------------------- */
1468 scavenge_static(void)
1470 StgClosure* p = static_objects;
1471 const StgInfoTable *info;
1473 /* Always evacuate straight to the oldest generation for static
1475 evac_gen = oldest_gen->no;
1477 /* keep going until we've scavenged all the objects on the linked
1479 while (p != END_OF_STATIC_LIST) {
1481 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1484 if (info->type==RBH)
1485 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1487 // make sure the info pointer is into text space
1489 /* Take this object *off* the static_objects list,
1490 * and put it on the scavenged_static_objects list.
1492 static_objects = *STATIC_LINK(info,p);
1493 *STATIC_LINK(info,p) = scavenged_static_objects;
1494 scavenged_static_objects = p;
1496 switch (info -> type) {
1500 StgInd *ind = (StgInd *)p;
1501 ind->indirectee = evacuate(ind->indirectee);
1503 /* might fail to evacuate it, in which case we have to pop it
1504 * back on the mutable list of the oldest generation. We
1505 * leave it *on* the scavenged_static_objects list, though,
1506 * in case we visit this object again.
1508 if (failed_to_evac) {
1509 failed_to_evac = rtsFalse;
1510 recordMutableGen((StgClosure *)p,oldest_gen);
1516 scavenge_thunk_srt(info);
1520 scavenge_fun_srt(info);
1527 next = (P_)p->payload + info->layout.payload.ptrs;
1528 // evacuate the pointers
1529 for (q = (P_)p->payload; q < next; q++) {
1530 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1536 barf("scavenge_static: strange closure %d", (int)(info->type));
1539 ASSERT(failed_to_evac == rtsFalse);
1541 /* get the next static object from the list. Remember, there might
1542 * be more stuff on this list now that we've done some evacuating!
1543 * (static_objects is a global)
1549 /* -----------------------------------------------------------------------------
1550 scavenge a chunk of memory described by a bitmap
1551 -------------------------------------------------------------------------- */
1554 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1560 bitmap = large_bitmap->bitmap[b];
1561 for (i = 0; i < size; ) {
1562 if ((bitmap & 1) == 0) {
1563 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1567 if (i % BITS_IN(W_) == 0) {
1569 bitmap = large_bitmap->bitmap[b];
1571 bitmap = bitmap >> 1;
1576 STATIC_INLINE StgPtr
1577 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1580 if ((bitmap & 1) == 0) {
1581 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1584 bitmap = bitmap >> 1;
1590 /* -----------------------------------------------------------------------------
1591 scavenge_stack walks over a section of stack and evacuates all the
1592 objects pointed to by it. We can use the same code for walking
1593 AP_STACK_UPDs, since these are just sections of copied stack.
1594 -------------------------------------------------------------------------- */
1597 scavenge_stack(StgPtr p, StgPtr stack_end)
1599 const StgRetInfoTable* info;
1604 * Each time around this loop, we are looking at a chunk of stack
1605 * that starts with an activation record.
1608 while (p < stack_end) {
1609 info = get_ret_itbl((StgClosure *)p);
1611 switch (info->i.type) {
1614 // In SMP, we can get update frames that point to indirections
1615 // when two threads evaluate the same thunk. We do attempt to
1616 // discover this situation in threadPaused(), but it's
1617 // possible that the following sequence occurs:
1626 // Now T is an indirection, and the update frame is already
1627 // marked on A's stack, so we won't traverse it again in
1628 // threadPaused(). We could traverse the whole stack again
1629 // before GC, but that seems like overkill.
1631 // Scavenging this update frame as normal would be disastrous;
1632 // the updatee would end up pointing to the value. So we turn
1633 // the indirection into an IND_PERM, so that evacuate will
1634 // copy the indirection into the old generation instead of
1636 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
1637 ((StgUpdateFrame *)p)->updatee->header.info =
1638 (StgInfoTable *)&stg_IND_PERM_info;
1640 ((StgUpdateFrame *)p)->updatee
1641 = evacuate(((StgUpdateFrame *)p)->updatee);
1642 p += sizeofW(StgUpdateFrame);
1645 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1646 case CATCH_STM_FRAME:
1647 case CATCH_RETRY_FRAME:
1648 case ATOMICALLY_FRAME:
1652 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1653 size = BITMAP_SIZE(info->i.layout.bitmap);
1654 // NOTE: the payload starts immediately after the info-ptr, we
1655 // don't have an StgHeader in the same sense as a heap closure.
1657 p = scavenge_small_bitmap(p, size, bitmap);
1661 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1669 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1672 size = BCO_BITMAP_SIZE(bco);
1673 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1678 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1683 size = GET_LARGE_BITMAP(&info->i)->size;
1685 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1687 // and don't forget to follow the SRT
1691 // Dynamic bitmap: the mask is stored on the stack, and
1692 // there are a number of non-pointers followed by a number
1693 // of pointers above the bitmapped area. (see StgMacros.h,
1698 dyn = ((StgRetDyn *)p)->liveness;
1700 // traverse the bitmap first
1701 bitmap = RET_DYN_LIVENESS(dyn);
1702 p = (P_)&((StgRetDyn *)p)->payload[0];
1703 size = RET_DYN_BITMAP_SIZE;
1704 p = scavenge_small_bitmap(p, size, bitmap);
1706 // skip over the non-ptr words
1707 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1709 // follow the ptr words
1710 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1711 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1719 StgRetFun *ret_fun = (StgRetFun *)p;
1720 StgFunInfoTable *fun_info;
1722 ret_fun->fun = evacuate(ret_fun->fun);
1723 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1724 p = scavenge_arg_block(fun_info, ret_fun->payload);
1729 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1734 /*-----------------------------------------------------------------------------
1735 scavenge the large object list.
1737 evac_gen set by caller; similar games played with evac_gen as with
1738 scavenge() - see comment at the top of scavenge(). Most large
1739 objects are (repeatedly) mutable, so most of the time evac_gen will
1741 --------------------------------------------------------------------------- */
1744 scavenge_large(step *stp)
1749 bd = stp->new_large_objects;
1751 for (; bd != NULL; bd = stp->new_large_objects) {
1753 /* take this object *off* the large objects list and put it on
1754 * the scavenged large objects list. This is so that we can
1755 * treat new_large_objects as a stack and push new objects on
1756 * the front when evacuating.
1758 stp->new_large_objects = bd->link;
1759 dbl_link_onto(bd, &stp->scavenged_large_objects);
1761 // update the block count in this step.
1762 stp->n_scavenged_large_blocks += bd->blocks;
1765 if (scavenge_one(p)) {
1766 if (stp->gen_no > 0) {
1767 recordMutableGen((StgClosure *)p, stp->gen);