1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
23 #include "LdvProfile.h"
25 static void scavenge_stack (StgPtr p, StgPtr stack_end);
27 static void scavenge_large_bitmap (StgPtr p,
28 StgLargeBitmap *large_bitmap,
31 /* Similar to scavenge_large_bitmap(), but we don't write back the
32 * pointers we get back from evacuate().
35 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
42 bitmap = large_srt->l.bitmap[b];
43 size = (nat)large_srt->l.size;
44 p = (StgClosure **)large_srt->srt;
45 for (i = 0; i < size; ) {
46 if ((bitmap & 1) != 0) {
51 if (i % BITS_IN(W_) == 0) {
53 bitmap = large_srt->l.bitmap[b];
60 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
61 * srt field in the info table. That's ok, because we'll
62 * never dereference it.
65 scavenge_srt (StgClosure **srt, nat srt_bitmap)
73 if (bitmap == (StgHalfWord)(-1)) {
74 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
79 if ((bitmap & 1) != 0) {
80 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
81 // Special-case to handle references to closures hiding out in DLLs, since
82 // double indirections required to get at those. The code generator knows
83 // which is which when generating the SRT, so it stores the (indirect)
84 // reference to the DLL closure in the table by first adding one to it.
85 // We check for this here, and undo the addition before evacuating it.
87 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
88 // closure that's fixed at link-time, and no extra magic is required.
89 if ( (unsigned long)(*srt) & 0x1 ) {
90 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 scavenge_thunk_srt(const StgInfoTable *info)
107 StgThunkInfoTable *thunk_info;
109 if (!major_gc) return;
111 thunk_info = itbl_to_thunk_itbl(info);
112 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
116 scavenge_fun_srt(const StgInfoTable *info)
118 StgFunInfoTable *fun_info;
120 if (!major_gc) return;
122 fun_info = itbl_to_fun_itbl(info);
123 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
126 /* -----------------------------------------------------------------------------
128 -------------------------------------------------------------------------- */
131 scavengeTSO (StgTSO *tso)
133 if ( tso->why_blocked == BlockedOnMVar
134 || tso->why_blocked == BlockedOnBlackHole
135 || tso->why_blocked == BlockedOnException
137 tso->block_info.closure = evacuate(tso->block_info.closure);
139 tso->blocked_exceptions =
140 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
142 // We don't always chase the link field: TSOs on the blackhole
143 // queue are not automatically alive, so the link field is a
144 // "weak" pointer in that case.
145 if (tso->why_blocked != BlockedOnBlackHole) {
146 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
149 // scavange current transaction record
150 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
152 // scavenge this thread's stack
153 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
156 /* -----------------------------------------------------------------------------
157 Blocks of function args occur on the stack (at the top) and
159 -------------------------------------------------------------------------- */
162 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
169 switch (fun_info->f.fun_type) {
171 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
172 size = BITMAP_SIZE(fun_info->f.b.bitmap);
175 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
176 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
180 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
181 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
184 if ((bitmap & 1) == 0) {
185 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
188 bitmap = bitmap >> 1;
197 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
201 StgFunInfoTable *fun_info;
203 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
204 ASSERT(fun_info->i.type != PAP);
207 switch (fun_info->f.fun_type) {
209 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
216 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
220 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
223 if ((bitmap & 1) == 0) {
224 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
227 bitmap = bitmap >> 1;
236 scavenge_PAP (StgPAP *pap)
238 pap->fun = evacuate(pap->fun);
239 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
243 scavenge_AP (StgAP *ap)
245 ap->fun = evacuate(ap->fun);
246 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
249 /* -----------------------------------------------------------------------------
250 Scavenge a given step until there are no more objects in this step
253 evac_gen is set by the caller to be either zero (for a step in a
254 generation < N) or G where G is the generation of the step being
257 We sometimes temporarily change evac_gen back to zero if we're
258 scavenging a mutable object where early promotion isn't such a good
260 -------------------------------------------------------------------------- */
268 nat saved_evac_gen = evac_gen;
273 failed_to_evac = rtsFalse;
275 /* scavenge phase - standard breadth-first scavenging of the
279 while (bd != stp->hp_bd || p < stp->hp) {
281 // If we're at the end of this block, move on to the next block
282 if (bd != stp->hp_bd && p == bd->free) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
289 info = get_itbl((StgClosure *)p);
291 ASSERT(thunk_selector_depth == 0);
294 switch (info->type) {
299 rtsBool saved_eager_promotion = eager_promotion;
301 StgMVar *mvar = ((StgMVar *)p);
302 eager_promotion = rtsFalse;
303 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
304 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
305 mvar->value = evacuate((StgClosure *)mvar->value);
306 eager_promotion = saved_eager_promotion;
308 if (failed_to_evac) {
309 mvar->header.info = &stg_MVAR_DIRTY_info;
311 mvar->header.info = &stg_MVAR_CLEAN_info;
313 p += sizeofW(StgMVar);
318 scavenge_fun_srt(info);
319 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
320 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
321 p += sizeofW(StgHeader) + 2;
325 scavenge_thunk_srt(info);
326 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
327 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
328 p += sizeofW(StgThunk) + 2;
332 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
333 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
334 p += sizeofW(StgHeader) + 2;
338 scavenge_thunk_srt(info);
339 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
340 p += sizeofW(StgThunk) + 1;
344 scavenge_fun_srt(info);
346 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
347 p += sizeofW(StgHeader) + 1;
351 scavenge_thunk_srt(info);
352 p += sizeofW(StgThunk) + 1;
356 scavenge_fun_srt(info);
358 p += sizeofW(StgHeader) + 1;
362 scavenge_thunk_srt(info);
363 p += sizeofW(StgThunk) + 2;
367 scavenge_fun_srt(info);
369 p += sizeofW(StgHeader) + 2;
373 scavenge_thunk_srt(info);
374 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
375 p += sizeofW(StgThunk) + 2;
379 scavenge_fun_srt(info);
381 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
382 p += sizeofW(StgHeader) + 2;
386 scavenge_fun_srt(info);
393 scavenge_thunk_srt(info);
394 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
395 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
396 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
398 p += info->layout.payload.nptrs;
409 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
410 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
411 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
413 p += info->layout.payload.nptrs;
418 StgBCO *bco = (StgBCO *)p;
419 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
420 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
421 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
427 if (stp->gen->no != 0) {
430 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
431 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
432 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
435 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
437 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
439 // We pretend that p has just been created.
440 LDV_RECORD_CREATE((StgClosure *)p);
443 case IND_OLDGEN_PERM:
444 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
445 p += sizeofW(StgInd);
449 case MUT_VAR_DIRTY: {
450 rtsBool saved_eager_promotion = eager_promotion;
452 eager_promotion = rtsFalse;
453 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
454 eager_promotion = saved_eager_promotion;
456 if (failed_to_evac) {
457 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
459 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
461 p += sizeofW(StgMutVar);
466 case SE_CAF_BLACKHOLE:
469 p += BLACKHOLE_sizeW();
474 StgSelector *s = (StgSelector *)p;
475 s->selectee = evacuate(s->selectee);
476 p += THUNK_SELECTOR_sizeW();
480 // A chunk of stack saved in a heap object
483 StgAP_STACK *ap = (StgAP_STACK *)p;
485 ap->fun = evacuate(ap->fun);
486 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
487 p = (StgPtr)ap->payload + ap->size;
492 p = scavenge_PAP((StgPAP *)p);
496 p = scavenge_AP((StgAP *)p);
501 p += arr_words_sizeW((StgArrWords *)p);
504 case MUT_ARR_PTRS_CLEAN:
505 case MUT_ARR_PTRS_DIRTY:
511 // We don't eagerly promote objects pointed to by a mutable
512 // array, but if we find the array only points to objects in
513 // the same or an older generation, we mark it "clean" and
514 // avoid traversing it during minor GCs.
515 saved_eager = eager_promotion;
516 eager_promotion = rtsFalse;
517 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
518 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
519 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
521 eager_promotion = saved_eager;
523 if (failed_to_evac) {
524 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
526 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
529 failed_to_evac = rtsTrue; // always put it on the mutable list.
533 case MUT_ARR_PTRS_FROZEN:
534 case MUT_ARR_PTRS_FROZEN0:
539 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
540 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
541 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
544 // If we're going to put this object on the mutable list, then
545 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
546 if (failed_to_evac) {
547 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
549 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
556 StgTSO *tso = (StgTSO *)p;
557 rtsBool saved_eager = eager_promotion;
559 eager_promotion = rtsFalse;
561 eager_promotion = saved_eager;
563 if (failed_to_evac) {
564 tso->flags |= TSO_DIRTY;
566 tso->flags &= ~TSO_DIRTY;
569 failed_to_evac = rtsTrue; // always on the mutable list
574 case TVAR_WATCH_QUEUE:
576 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
578 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
579 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
580 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
581 evac_gen = saved_evac_gen;
582 failed_to_evac = rtsTrue; // mutable
583 p += sizeofW(StgTVarWatchQueue);
589 StgTVar *tvar = ((StgTVar *) p);
591 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
592 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
593 evac_gen = saved_evac_gen;
594 failed_to_evac = rtsTrue; // mutable
595 p += sizeofW(StgTVar);
601 StgTRecHeader *trec = ((StgTRecHeader *) p);
603 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
604 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
605 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
606 evac_gen = saved_evac_gen;
607 failed_to_evac = rtsTrue; // mutable
608 p += sizeofW(StgTRecHeader);
615 StgTRecChunk *tc = ((StgTRecChunk *) p);
616 TRecEntry *e = &(tc -> entries[0]);
618 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
619 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
620 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
621 e->expected_value = evacuate((StgClosure*)e->expected_value);
622 e->new_value = evacuate((StgClosure*)e->new_value);
624 evac_gen = saved_evac_gen;
625 failed_to_evac = rtsTrue; // mutable
626 p += sizeofW(StgTRecChunk);
630 case ATOMIC_INVARIANT:
632 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
634 invariant->code = (StgClosure *)evacuate(invariant->code);
635 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
636 evac_gen = saved_evac_gen;
637 failed_to_evac = rtsTrue; // mutable
638 p += sizeofW(StgAtomicInvariant);
642 case INVARIANT_CHECK_QUEUE:
644 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
646 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
647 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
648 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
649 evac_gen = saved_evac_gen;
650 failed_to_evac = rtsTrue; // mutable
651 p += sizeofW(StgInvariantCheckQueue);
656 barf("scavenge: unimplemented/strange closure type %d @ %p",
661 * We need to record the current object on the mutable list if
662 * (a) It is actually mutable, or
663 * (b) It contains pointers to a younger generation.
664 * Case (b) arises if we didn't manage to promote everything that
665 * the current object points to into the current generation.
667 if (failed_to_evac) {
668 failed_to_evac = rtsFalse;
669 if (stp->gen_no > 0) {
670 recordMutableGen((StgClosure *)q, stp->gen);
679 /* -----------------------------------------------------------------------------
680 Scavenge everything on the mark stack.
682 This is slightly different from scavenge():
683 - we don't walk linearly through the objects, so the scavenger
684 doesn't need to advance the pointer on to the next object.
685 -------------------------------------------------------------------------- */
688 scavenge_mark_stack(void)
694 evac_gen = oldest_gen->no;
695 saved_evac_gen = evac_gen;
698 while (!mark_stack_empty()) {
699 p = pop_mark_stack();
701 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
702 info = get_itbl((StgClosure *)p);
705 switch (info->type) {
710 rtsBool saved_eager_promotion = eager_promotion;
712 StgMVar *mvar = ((StgMVar *)p);
713 eager_promotion = rtsFalse;
714 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
715 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
716 mvar->value = evacuate((StgClosure *)mvar->value);
717 eager_promotion = saved_eager_promotion;
719 if (failed_to_evac) {
720 mvar->header.info = &stg_MVAR_DIRTY_info;
722 mvar->header.info = &stg_MVAR_CLEAN_info;
728 scavenge_fun_srt(info);
729 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
730 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
734 scavenge_thunk_srt(info);
735 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
736 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
740 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
741 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
746 scavenge_fun_srt(info);
747 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
752 scavenge_thunk_srt(info);
753 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
758 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
763 scavenge_fun_srt(info);
768 scavenge_thunk_srt(info);
776 scavenge_fun_srt(info);
783 scavenge_thunk_srt(info);
784 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
785 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
786 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
798 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
799 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
800 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
806 StgBCO *bco = (StgBCO *)p;
807 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
808 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
809 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
814 // don't need to do anything here: the only possible case
815 // is that we're in a 1-space compacting collector, with
816 // no "old" generation.
820 case IND_OLDGEN_PERM:
821 ((StgInd *)p)->indirectee =
822 evacuate(((StgInd *)p)->indirectee);
826 case MUT_VAR_DIRTY: {
827 rtsBool saved_eager_promotion = eager_promotion;
829 eager_promotion = rtsFalse;
830 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
831 eager_promotion = saved_eager_promotion;
833 if (failed_to_evac) {
834 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
836 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
842 case SE_CAF_BLACKHOLE:
850 StgSelector *s = (StgSelector *)p;
851 s->selectee = evacuate(s->selectee);
855 // A chunk of stack saved in a heap object
858 StgAP_STACK *ap = (StgAP_STACK *)p;
860 ap->fun = evacuate(ap->fun);
861 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
866 scavenge_PAP((StgPAP *)p);
870 scavenge_AP((StgAP *)p);
873 case MUT_ARR_PTRS_CLEAN:
874 case MUT_ARR_PTRS_DIRTY:
880 // We don't eagerly promote objects pointed to by a mutable
881 // array, but if we find the array only points to objects in
882 // the same or an older generation, we mark it "clean" and
883 // avoid traversing it during minor GCs.
884 saved_eager = eager_promotion;
885 eager_promotion = rtsFalse;
886 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
887 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
888 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
890 eager_promotion = saved_eager;
892 if (failed_to_evac) {
893 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
895 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
898 failed_to_evac = rtsTrue; // mutable anyhow.
902 case MUT_ARR_PTRS_FROZEN:
903 case MUT_ARR_PTRS_FROZEN0:
908 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
909 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
910 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
913 // If we're going to put this object on the mutable list, then
914 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
915 if (failed_to_evac) {
916 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
918 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
925 StgTSO *tso = (StgTSO *)p;
926 rtsBool saved_eager = eager_promotion;
928 eager_promotion = rtsFalse;
930 eager_promotion = saved_eager;
932 if (failed_to_evac) {
933 tso->flags |= TSO_DIRTY;
935 tso->flags &= ~TSO_DIRTY;
938 failed_to_evac = rtsTrue; // always on the mutable list
942 case TVAR_WATCH_QUEUE:
944 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
946 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
947 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
948 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
949 evac_gen = saved_evac_gen;
950 failed_to_evac = rtsTrue; // mutable
956 StgTVar *tvar = ((StgTVar *) p);
958 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
959 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
960 evac_gen = saved_evac_gen;
961 failed_to_evac = rtsTrue; // mutable
968 StgTRecChunk *tc = ((StgTRecChunk *) p);
969 TRecEntry *e = &(tc -> entries[0]);
971 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
972 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
973 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
974 e->expected_value = evacuate((StgClosure*)e->expected_value);
975 e->new_value = evacuate((StgClosure*)e->new_value);
977 evac_gen = saved_evac_gen;
978 failed_to_evac = rtsTrue; // mutable
984 StgTRecHeader *trec = ((StgTRecHeader *) p);
986 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
987 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
988 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
989 evac_gen = saved_evac_gen;
990 failed_to_evac = rtsTrue; // mutable
994 case ATOMIC_INVARIANT:
996 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
998 invariant->code = (StgClosure *)evacuate(invariant->code);
999 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1000 evac_gen = saved_evac_gen;
1001 failed_to_evac = rtsTrue; // mutable
1005 case INVARIANT_CHECK_QUEUE:
1007 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1009 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1010 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1011 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1012 evac_gen = saved_evac_gen;
1013 failed_to_evac = rtsTrue; // mutable
1018 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1022 if (failed_to_evac) {
1023 failed_to_evac = rtsFalse;
1025 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
1029 // mark the next bit to indicate "scavenged"
1030 mark(q+1, Bdescr(q));
1032 } // while (!mark_stack_empty())
1034 // start a new linear scan if the mark stack overflowed at some point
1035 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1036 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1037 mark_stack_overflowed = rtsFalse;
1038 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1039 oldgen_scan = oldgen_scan_bd->start;
1042 if (oldgen_scan_bd) {
1043 // push a new thing on the mark stack
1045 // find a closure that is marked but not scavenged, and start
1047 while (oldgen_scan < oldgen_scan_bd->free
1048 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1052 if (oldgen_scan < oldgen_scan_bd->free) {
1054 // already scavenged?
1055 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1056 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1059 push_mark_stack(oldgen_scan);
1060 // ToDo: bump the linear scan by the actual size of the object
1061 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1065 oldgen_scan_bd = oldgen_scan_bd->link;
1066 if (oldgen_scan_bd != NULL) {
1067 oldgen_scan = oldgen_scan_bd->start;
1073 /* -----------------------------------------------------------------------------
1074 Scavenge one object.
1076 This is used for objects that are temporarily marked as mutable
1077 because they contain old-to-new generation pointers. Only certain
1078 objects can have this property.
1079 -------------------------------------------------------------------------- */
1082 scavenge_one(StgPtr p)
1084 const StgInfoTable *info;
1085 nat saved_evac_gen = evac_gen;
1088 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1089 info = get_itbl((StgClosure *)p);
1091 switch (info->type) {
1096 rtsBool saved_eager_promotion = eager_promotion;
1098 StgMVar *mvar = ((StgMVar *)p);
1099 eager_promotion = rtsFalse;
1100 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1101 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1102 mvar->value = evacuate((StgClosure *)mvar->value);
1103 eager_promotion = saved_eager_promotion;
1105 if (failed_to_evac) {
1106 mvar->header.info = &stg_MVAR_DIRTY_info;
1108 mvar->header.info = &stg_MVAR_CLEAN_info;
1122 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1123 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1124 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1130 case FUN_1_0: // hardly worth specialising these guys
1146 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1147 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1148 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1154 case MUT_VAR_DIRTY: {
1156 rtsBool saved_eager_promotion = eager_promotion;
1158 eager_promotion = rtsFalse;
1159 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1160 eager_promotion = saved_eager_promotion;
1162 if (failed_to_evac) {
1163 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1165 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1171 case SE_CAF_BLACKHOLE:
1176 case THUNK_SELECTOR:
1178 StgSelector *s = (StgSelector *)p;
1179 s->selectee = evacuate(s->selectee);
1185 StgAP_STACK *ap = (StgAP_STACK *)p;
1187 ap->fun = evacuate(ap->fun);
1188 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1189 p = (StgPtr)ap->payload + ap->size;
1194 p = scavenge_PAP((StgPAP *)p);
1198 p = scavenge_AP((StgAP *)p);
1202 // nothing to follow
1205 case MUT_ARR_PTRS_CLEAN:
1206 case MUT_ARR_PTRS_DIRTY:
1209 rtsBool saved_eager;
1211 // We don't eagerly promote objects pointed to by a mutable
1212 // array, but if we find the array only points to objects in
1213 // the same or an older generation, we mark it "clean" and
1214 // avoid traversing it during minor GCs.
1215 saved_eager = eager_promotion;
1216 eager_promotion = rtsFalse;
1218 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1219 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1220 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1222 eager_promotion = saved_eager;
1224 if (failed_to_evac) {
1225 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1227 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1230 failed_to_evac = rtsTrue;
1234 case MUT_ARR_PTRS_FROZEN:
1235 case MUT_ARR_PTRS_FROZEN0:
1237 // follow everything
1240 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1241 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1242 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1245 // If we're going to put this object on the mutable list, then
1246 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1247 if (failed_to_evac) {
1248 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1250 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1257 StgTSO *tso = (StgTSO *)p;
1258 rtsBool saved_eager = eager_promotion;
1260 eager_promotion = rtsFalse;
1262 eager_promotion = saved_eager;
1264 if (failed_to_evac) {
1265 tso->flags |= TSO_DIRTY;
1267 tso->flags &= ~TSO_DIRTY;
1270 failed_to_evac = rtsTrue; // always on the mutable list
1274 case TVAR_WATCH_QUEUE:
1276 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1278 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1279 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1280 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1281 evac_gen = saved_evac_gen;
1282 failed_to_evac = rtsTrue; // mutable
1288 StgTVar *tvar = ((StgTVar *) p);
1290 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1291 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1292 evac_gen = saved_evac_gen;
1293 failed_to_evac = rtsTrue; // mutable
1299 StgTRecHeader *trec = ((StgTRecHeader *) p);
1301 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1302 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1303 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1304 evac_gen = saved_evac_gen;
1305 failed_to_evac = rtsTrue; // mutable
1312 StgTRecChunk *tc = ((StgTRecChunk *) p);
1313 TRecEntry *e = &(tc -> entries[0]);
1315 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1316 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1317 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1318 e->expected_value = evacuate((StgClosure*)e->expected_value);
1319 e->new_value = evacuate((StgClosure*)e->new_value);
1321 evac_gen = saved_evac_gen;
1322 failed_to_evac = rtsTrue; // mutable
1326 case ATOMIC_INVARIANT:
1328 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1330 invariant->code = (StgClosure *)evacuate(invariant->code);
1331 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1332 evac_gen = saved_evac_gen;
1333 failed_to_evac = rtsTrue; // mutable
1337 case INVARIANT_CHECK_QUEUE:
1339 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1341 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1342 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1343 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1344 evac_gen = saved_evac_gen;
1345 failed_to_evac = rtsTrue; // mutable
1350 case IND_OLDGEN_PERM:
1353 /* Careful here: a THUNK can be on the mutable list because
1354 * it contains pointers to young gen objects. If such a thunk
1355 * is updated, the IND_OLDGEN will be added to the mutable
1356 * list again, and we'll scavenge it twice. evacuate()
1357 * doesn't check whether the object has already been
1358 * evacuated, so we perform that check here.
1360 StgClosure *q = ((StgInd *)p)->indirectee;
1361 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1364 ((StgInd *)p)->indirectee = evacuate(q);
1367 #if 0 && defined(DEBUG)
1368 if (RtsFlags.DebugFlags.gc)
1369 /* Debugging code to print out the size of the thing we just
1373 StgPtr start = gen->steps[0].scan;
1374 bdescr *start_bd = gen->steps[0].scan_bd;
1376 scavenge(&gen->steps[0]);
1377 if (start_bd != gen->steps[0].scan_bd) {
1378 size += (P_)BLOCK_ROUND_UP(start) - start;
1379 start_bd = start_bd->link;
1380 while (start_bd != gen->steps[0].scan_bd) {
1381 size += BLOCK_SIZE_W;
1382 start_bd = start_bd->link;
1384 size += gen->steps[0].scan -
1385 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1387 size = gen->steps[0].scan - start;
1389 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1395 barf("scavenge_one: strange object %d", (int)(info->type));
1398 no_luck = failed_to_evac;
1399 failed_to_evac = rtsFalse;
1403 /* -----------------------------------------------------------------------------
1404 Scavenging mutable lists.
1406 We treat the mutable list of each generation > N (i.e. all the
1407 generations older than the one being collected) as roots. We also
1408 remove non-mutable objects from the mutable list at this point.
1409 -------------------------------------------------------------------------- */
1412 scavenge_mutable_list(generation *gen)
1417 bd = gen->saved_mut_list;
1420 for (; bd != NULL; bd = bd->link) {
1421 for (q = bd->start; q < bd->free; q++) {
1423 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1426 switch (get_itbl((StgClosure *)p)->type) {
1428 barf("MUT_VAR_CLEAN on mutable list");
1430 mutlist_MUTVARS++; break;
1431 case MUT_ARR_PTRS_CLEAN:
1432 case MUT_ARR_PTRS_DIRTY:
1433 case MUT_ARR_PTRS_FROZEN:
1434 case MUT_ARR_PTRS_FROZEN0:
1435 mutlist_MUTARRS++; break;
1437 barf("MVAR_CLEAN on mutable list");
1439 mutlist_MVARS++; break;
1441 mutlist_OTHERS++; break;
1445 // Check whether this object is "clean", that is it
1446 // definitely doesn't point into a young generation.
1447 // Clean objects don't need to be scavenged. Some clean
1448 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1449 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1450 // TSO, are always on the mutable list.
1452 switch (get_itbl((StgClosure *)p)->type) {
1453 case MUT_ARR_PTRS_CLEAN:
1454 recordMutableGen((StgClosure *)p,gen);
1457 StgTSO *tso = (StgTSO *)p;
1458 if ((tso->flags & TSO_DIRTY) == 0) {
1459 // A clean TSO: we don't have to traverse its
1460 // stack. However, we *do* follow the link field:
1461 // we don't want to have to mark a TSO dirty just
1462 // because we put it on a different queue.
1463 if (tso->why_blocked != BlockedOnBlackHole) {
1464 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1466 recordMutableGen((StgClosure *)p,gen);
1474 if (scavenge_one(p)) {
1475 // didn't manage to promote everything, so put the
1476 // object back on the list.
1477 recordMutableGen((StgClosure *)p,gen);
1482 // free the old mut_list
1483 freeChain(gen->saved_mut_list);
1484 gen->saved_mut_list = NULL;
1487 /* -----------------------------------------------------------------------------
1488 Scavenging the static objects.
1490 We treat the mutable list of each generation > N (i.e. all the
1491 generations older than the one being collected) as roots. We also
1492 remove non-mutable objects from the mutable list at this point.
1493 -------------------------------------------------------------------------- */
1496 scavenge_static(void)
1498 StgClosure* p = static_objects;
1499 const StgInfoTable *info;
1501 /* Always evacuate straight to the oldest generation for static
1503 evac_gen = oldest_gen->no;
1505 /* keep going until we've scavenged all the objects on the linked
1507 while (p != END_OF_STATIC_LIST) {
1509 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1512 if (info->type==RBH)
1513 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1515 // make sure the info pointer is into text space
1517 /* Take this object *off* the static_objects list,
1518 * and put it on the scavenged_static_objects list.
1520 static_objects = *STATIC_LINK(info,p);
1521 *STATIC_LINK(info,p) = scavenged_static_objects;
1522 scavenged_static_objects = p;
1524 switch (info -> type) {
1528 StgInd *ind = (StgInd *)p;
1529 ind->indirectee = evacuate(ind->indirectee);
1531 /* might fail to evacuate it, in which case we have to pop it
1532 * back on the mutable list of the oldest generation. We
1533 * leave it *on* the scavenged_static_objects list, though,
1534 * in case we visit this object again.
1536 if (failed_to_evac) {
1537 failed_to_evac = rtsFalse;
1538 recordMutableGen((StgClosure *)p,oldest_gen);
1544 scavenge_thunk_srt(info);
1548 scavenge_fun_srt(info);
1555 next = (P_)p->payload + info->layout.payload.ptrs;
1556 // evacuate the pointers
1557 for (q = (P_)p->payload; q < next; q++) {
1558 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1564 barf("scavenge_static: strange closure %d", (int)(info->type));
1567 ASSERT(failed_to_evac == rtsFalse);
1569 /* get the next static object from the list. Remember, there might
1570 * be more stuff on this list now that we've done some evacuating!
1571 * (static_objects is a global)
1577 /* -----------------------------------------------------------------------------
1578 scavenge a chunk of memory described by a bitmap
1579 -------------------------------------------------------------------------- */
1582 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1588 bitmap = large_bitmap->bitmap[b];
1589 for (i = 0; i < size; ) {
1590 if ((bitmap & 1) == 0) {
1591 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1595 if (i % BITS_IN(W_) == 0) {
1597 bitmap = large_bitmap->bitmap[b];
1599 bitmap = bitmap >> 1;
1604 STATIC_INLINE StgPtr
1605 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1608 if ((bitmap & 1) == 0) {
1609 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1612 bitmap = bitmap >> 1;
1618 /* -----------------------------------------------------------------------------
1619 scavenge_stack walks over a section of stack and evacuates all the
1620 objects pointed to by it. We can use the same code for walking
1621 AP_STACK_UPDs, since these are just sections of copied stack.
1622 -------------------------------------------------------------------------- */
1625 scavenge_stack(StgPtr p, StgPtr stack_end)
1627 const StgRetInfoTable* info;
1632 * Each time around this loop, we are looking at a chunk of stack
1633 * that starts with an activation record.
1636 while (p < stack_end) {
1637 info = get_ret_itbl((StgClosure *)p);
1639 switch (info->i.type) {
1642 // In SMP, we can get update frames that point to indirections
1643 // when two threads evaluate the same thunk. We do attempt to
1644 // discover this situation in threadPaused(), but it's
1645 // possible that the following sequence occurs:
1654 // Now T is an indirection, and the update frame is already
1655 // marked on A's stack, so we won't traverse it again in
1656 // threadPaused(). We could traverse the whole stack again
1657 // before GC, but that seems like overkill.
1659 // Scavenging this update frame as normal would be disastrous;
1660 // the updatee would end up pointing to the value. So we turn
1661 // the indirection into an IND_PERM, so that evacuate will
1662 // copy the indirection into the old generation instead of
1666 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1668 ((StgUpdateFrame *)p)->updatee->header.info =
1669 (StgInfoTable *)&stg_IND_PERM_info;
1670 } else if (type == IND_OLDGEN) {
1671 ((StgUpdateFrame *)p)->updatee->header.info =
1672 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1674 ((StgUpdateFrame *)p)->updatee
1675 = evacuate(((StgUpdateFrame *)p)->updatee);
1676 p += sizeofW(StgUpdateFrame);
1680 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1681 case CATCH_STM_FRAME:
1682 case CATCH_RETRY_FRAME:
1683 case ATOMICALLY_FRAME:
1687 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1688 size = BITMAP_SIZE(info->i.layout.bitmap);
1689 // NOTE: the payload starts immediately after the info-ptr, we
1690 // don't have an StgHeader in the same sense as a heap closure.
1692 p = scavenge_small_bitmap(p, size, bitmap);
1696 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1704 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1707 size = BCO_BITMAP_SIZE(bco);
1708 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1713 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1718 size = GET_LARGE_BITMAP(&info->i)->size;
1720 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1722 // and don't forget to follow the SRT
1726 // Dynamic bitmap: the mask is stored on the stack, and
1727 // there are a number of non-pointers followed by a number
1728 // of pointers above the bitmapped area. (see StgMacros.h,
1733 dyn = ((StgRetDyn *)p)->liveness;
1735 // traverse the bitmap first
1736 bitmap = RET_DYN_LIVENESS(dyn);
1737 p = (P_)&((StgRetDyn *)p)->payload[0];
1738 size = RET_DYN_BITMAP_SIZE;
1739 p = scavenge_small_bitmap(p, size, bitmap);
1741 // skip over the non-ptr words
1742 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1744 // follow the ptr words
1745 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1746 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1754 StgRetFun *ret_fun = (StgRetFun *)p;
1755 StgFunInfoTable *fun_info;
1757 ret_fun->fun = evacuate(ret_fun->fun);
1758 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1759 p = scavenge_arg_block(fun_info, ret_fun->payload);
1764 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1769 /*-----------------------------------------------------------------------------
1770 scavenge the large object list.
1772 evac_gen set by caller; similar games played with evac_gen as with
1773 scavenge() - see comment at the top of scavenge(). Most large
1774 objects are (repeatedly) mutable, so most of the time evac_gen will
1776 --------------------------------------------------------------------------- */
1779 scavenge_large(step *stp)
1784 bd = stp->new_large_objects;
1786 for (; bd != NULL; bd = stp->new_large_objects) {
1788 /* take this object *off* the large objects list and put it on
1789 * the scavenged large objects list. This is so that we can
1790 * treat new_large_objects as a stack and push new objects on
1791 * the front when evacuating.
1793 stp->new_large_objects = bd->link;
1794 dbl_link_onto(bd, &stp->scavenged_large_objects);
1796 // update the block count in this step.
1797 stp->n_scavenged_large_blocks += bd->blocks;
1800 if (scavenge_one(p)) {
1801 if (stp->gen_no > 0) {
1802 recordMutableGen((StgClosure *)p, stp->gen);