1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * ---------------------------------------------------------------------------*/
18 #include "LdvProfile.h"
20 static void scavenge_stack (StgPtr p, StgPtr stack_end);
22 static void scavenge_large_bitmap (StgPtr p,
23 StgLargeBitmap *large_bitmap,
26 /* Similar to scavenge_large_bitmap(), but we don't write back the
27 * pointers we get back from evacuate().
30 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
37 bitmap = large_srt->l.bitmap[b];
38 size = (nat)large_srt->l.size;
39 p = (StgClosure **)large_srt->srt;
40 for (i = 0; i < size; ) {
41 if ((bitmap & 1) != 0) {
46 if (i % BITS_IN(W_) == 0) {
48 bitmap = large_srt->l.bitmap[b];
55 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
56 * srt field in the info table. That's ok, because we'll
57 * never dereference it.
60 scavenge_srt (StgClosure **srt, nat srt_bitmap)
68 if (bitmap == (StgHalfWord)(-1)) {
69 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
74 if ((bitmap & 1) != 0) {
75 #ifdef ENABLE_WIN32_DLL_SUPPORT
76 // Special-case to handle references to closures hiding out in DLLs, since
77 // double indirections required to get at those. The code generator knows
78 // which is which when generating the SRT, so it stores the (indirect)
79 // reference to the DLL closure in the table by first adding one to it.
80 // We check for this here, and undo the addition before evacuating it.
82 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
83 // closure that's fixed at link-time, and no extra magic is required.
84 if ( (unsigned long)(*srt) & 0x1 ) {
85 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
100 scavenge_thunk_srt(const StgInfoTable *info)
102 StgThunkInfoTable *thunk_info;
104 if (!major_gc) return;
106 thunk_info = itbl_to_thunk_itbl(info);
107 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
111 scavenge_fun_srt(const StgInfoTable *info)
113 StgFunInfoTable *fun_info;
115 if (!major_gc) return;
117 fun_info = itbl_to_fun_itbl(info);
118 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
121 /* -----------------------------------------------------------------------------
123 -------------------------------------------------------------------------- */
126 scavengeTSO (StgTSO *tso)
128 if ( tso->why_blocked == BlockedOnMVar
129 || tso->why_blocked == BlockedOnBlackHole
130 || tso->why_blocked == BlockedOnException
132 tso->block_info.closure = evacuate(tso->block_info.closure);
134 tso->blocked_exceptions =
135 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
137 // We don't always chase the link field: TSOs on the blackhole
138 // queue are not automatically alive, so the link field is a
139 // "weak" pointer in that case.
140 if (tso->why_blocked != BlockedOnBlackHole) {
141 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
144 // scavange current transaction record
145 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
147 // scavenge this thread's stack
148 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
151 /* -----------------------------------------------------------------------------
152 Blocks of function args occur on the stack (at the top) and
154 -------------------------------------------------------------------------- */
157 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
164 switch (fun_info->f.fun_type) {
166 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
167 size = BITMAP_SIZE(fun_info->f.b.bitmap);
170 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
171 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
175 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
176 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
179 if ((bitmap & 1) == 0) {
180 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
183 bitmap = bitmap >> 1;
192 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
196 StgFunInfoTable *fun_info;
198 fun_info = get_fun_itbl(fun);
199 ASSERT(fun_info->i.type != PAP);
202 switch (fun_info->f.fun_type) {
204 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
207 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
211 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
215 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
218 if ((bitmap & 1) == 0) {
219 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
222 bitmap = bitmap >> 1;
231 scavenge_PAP (StgPAP *pap)
233 pap->fun = evacuate(pap->fun);
234 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
238 scavenge_AP (StgAP *ap)
240 ap->fun = evacuate(ap->fun);
241 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
244 /* -----------------------------------------------------------------------------
245 Scavenge a given step until there are no more objects in this step
248 evac_gen is set by the caller to be either zero (for a step in a
249 generation < N) or G where G is the generation of the step being
252 We sometimes temporarily change evac_gen back to zero if we're
253 scavenging a mutable object where early promotion isn't such a good
255 -------------------------------------------------------------------------- */
263 nat saved_evac_gen = evac_gen;
268 failed_to_evac = rtsFalse;
270 /* scavenge phase - standard breadth-first scavenging of the
274 while (bd != stp->hp_bd || p < stp->hp) {
276 // If we're at the end of this block, move on to the next block
277 if (bd != stp->hp_bd && p == bd->free) {
283 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
284 info = get_itbl((StgClosure *)p);
286 ASSERT(thunk_selector_depth == 0);
289 switch (info->type) {
293 StgMVar *mvar = ((StgMVar *)p);
295 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
296 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
297 mvar->value = evacuate((StgClosure *)mvar->value);
298 evac_gen = saved_evac_gen;
299 failed_to_evac = rtsTrue; // mutable.
300 p += sizeofW(StgMVar);
305 scavenge_fun_srt(info);
306 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
307 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
308 p += sizeofW(StgHeader) + 2;
312 scavenge_thunk_srt(info);
313 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
314 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
315 p += sizeofW(StgThunk) + 2;
319 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
320 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
321 p += sizeofW(StgHeader) + 2;
325 scavenge_thunk_srt(info);
326 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
327 p += sizeofW(StgThunk) + 1;
331 scavenge_fun_srt(info);
333 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
334 p += sizeofW(StgHeader) + 1;
338 scavenge_thunk_srt(info);
339 p += sizeofW(StgThunk) + 1;
343 scavenge_fun_srt(info);
345 p += sizeofW(StgHeader) + 1;
349 scavenge_thunk_srt(info);
350 p += sizeofW(StgThunk) + 2;
354 scavenge_fun_srt(info);
356 p += sizeofW(StgHeader) + 2;
360 scavenge_thunk_srt(info);
361 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
362 p += sizeofW(StgThunk) + 2;
366 scavenge_fun_srt(info);
368 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
369 p += sizeofW(StgHeader) + 2;
373 scavenge_fun_srt(info);
380 scavenge_thunk_srt(info);
381 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
382 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
383 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
385 p += info->layout.payload.nptrs;
396 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
397 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
398 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
400 p += info->layout.payload.nptrs;
405 StgBCO *bco = (StgBCO *)p;
406 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
407 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
408 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
409 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
415 if (stp->gen->no != 0) {
418 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
419 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
420 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
423 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
425 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
427 // We pretend that p has just been created.
428 LDV_RECORD_CREATE((StgClosure *)p);
431 case IND_OLDGEN_PERM:
432 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
433 p += sizeofW(StgInd);
437 case MUT_VAR_DIRTY: {
438 rtsBool saved_eager_promotion = eager_promotion;
440 eager_promotion = rtsFalse;
441 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
442 eager_promotion = saved_eager_promotion;
444 if (failed_to_evac) {
445 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
447 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
449 p += sizeofW(StgMutVar);
454 case SE_CAF_BLACKHOLE:
457 p += BLACKHOLE_sizeW();
462 StgSelector *s = (StgSelector *)p;
463 s->selectee = evacuate(s->selectee);
464 p += THUNK_SELECTOR_sizeW();
468 // A chunk of stack saved in a heap object
471 StgAP_STACK *ap = (StgAP_STACK *)p;
473 ap->fun = evacuate(ap->fun);
474 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
475 p = (StgPtr)ap->payload + ap->size;
480 p = scavenge_PAP((StgPAP *)p);
484 p = scavenge_AP((StgAP *)p);
489 p += arr_words_sizeW((StgArrWords *)p);
492 case MUT_ARR_PTRS_CLEAN:
493 case MUT_ARR_PTRS_DIRTY:
499 // We don't eagerly promote objects pointed to by a mutable
500 // array, but if we find the array only points to objects in
501 // the same or an older generation, we mark it "clean" and
502 // avoid traversing it during minor GCs.
503 saved_eager = eager_promotion;
504 eager_promotion = rtsFalse;
505 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
506 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
507 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
509 eager_promotion = saved_eager;
511 if (failed_to_evac) {
512 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
514 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
517 failed_to_evac = rtsTrue; // always put it on the mutable list.
521 case MUT_ARR_PTRS_FROZEN:
522 case MUT_ARR_PTRS_FROZEN0:
527 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
528 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
529 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
532 // If we're going to put this object on the mutable list, then
533 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
534 if (failed_to_evac) {
535 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
537 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
544 StgTSO *tso = (StgTSO *)p;
545 rtsBool saved_eager = eager_promotion;
547 eager_promotion = rtsFalse;
549 eager_promotion = saved_eager;
551 if (failed_to_evac) {
552 tso->flags |= TSO_DIRTY;
554 tso->flags &= ~TSO_DIRTY;
557 failed_to_evac = rtsTrue; // always on the mutable list
562 case TVAR_WATCH_QUEUE:
564 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
566 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
567 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
568 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
569 evac_gen = saved_evac_gen;
570 failed_to_evac = rtsTrue; // mutable
571 p += sizeofW(StgTVarWatchQueue);
577 StgTVar *tvar = ((StgTVar *) p);
579 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
580 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
581 evac_gen = saved_evac_gen;
582 failed_to_evac = rtsTrue; // mutable
583 p += sizeofW(StgTVar);
589 StgTRecHeader *trec = ((StgTRecHeader *) p);
591 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
592 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
593 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
594 evac_gen = saved_evac_gen;
595 failed_to_evac = rtsTrue; // mutable
596 p += sizeofW(StgTRecHeader);
603 StgTRecChunk *tc = ((StgTRecChunk *) p);
604 TRecEntry *e = &(tc -> entries[0]);
606 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
607 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
608 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
609 e->expected_value = evacuate((StgClosure*)e->expected_value);
610 e->new_value = evacuate((StgClosure*)e->new_value);
612 evac_gen = saved_evac_gen;
613 failed_to_evac = rtsTrue; // mutable
614 p += sizeofW(StgTRecChunk);
618 case ATOMIC_INVARIANT:
620 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
622 invariant->code = (StgClosure *)evacuate(invariant->code);
623 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
624 evac_gen = saved_evac_gen;
625 failed_to_evac = rtsTrue; // mutable
626 p += sizeofW(StgAtomicInvariant);
630 case INVARIANT_CHECK_QUEUE:
632 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
634 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
635 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
636 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
637 evac_gen = saved_evac_gen;
638 failed_to_evac = rtsTrue; // mutable
639 p += sizeofW(StgInvariantCheckQueue);
644 barf("scavenge: unimplemented/strange closure type %d @ %p",
649 * We need to record the current object on the mutable list if
650 * (a) It is actually mutable, or
651 * (b) It contains pointers to a younger generation.
652 * Case (b) arises if we didn't manage to promote everything that
653 * the current object points to into the current generation.
655 if (failed_to_evac) {
656 failed_to_evac = rtsFalse;
657 if (stp->gen_no > 0) {
658 recordMutableGen((StgClosure *)q, stp->gen);
667 /* -----------------------------------------------------------------------------
668 Scavenge everything on the mark stack.
670 This is slightly different from scavenge():
671 - we don't walk linearly through the objects, so the scavenger
672 doesn't need to advance the pointer on to the next object.
673 -------------------------------------------------------------------------- */
676 scavenge_mark_stack(void)
682 evac_gen = oldest_gen->no;
683 saved_evac_gen = evac_gen;
686 while (!mark_stack_empty()) {
687 p = pop_mark_stack();
689 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
690 info = get_itbl((StgClosure *)p);
693 switch (info->type) {
697 StgMVar *mvar = ((StgMVar *)p);
699 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
700 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
701 mvar->value = evacuate((StgClosure *)mvar->value);
702 evac_gen = saved_evac_gen;
703 failed_to_evac = rtsTrue; // mutable.
708 scavenge_fun_srt(info);
709 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
710 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
714 scavenge_thunk_srt(info);
715 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
716 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
720 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
721 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
726 scavenge_fun_srt(info);
727 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
732 scavenge_thunk_srt(info);
733 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
738 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
743 scavenge_fun_srt(info);
748 scavenge_thunk_srt(info);
756 scavenge_fun_srt(info);
763 scavenge_thunk_srt(info);
764 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
765 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
766 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
778 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
779 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
780 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
786 StgBCO *bco = (StgBCO *)p;
787 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
788 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
789 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
790 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
795 // don't need to do anything here: the only possible case
796 // is that we're in a 1-space compacting collector, with
797 // no "old" generation.
801 case IND_OLDGEN_PERM:
802 ((StgInd *)p)->indirectee =
803 evacuate(((StgInd *)p)->indirectee);
807 case MUT_VAR_DIRTY: {
808 rtsBool saved_eager_promotion = eager_promotion;
810 eager_promotion = rtsFalse;
811 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
812 eager_promotion = saved_eager_promotion;
814 if (failed_to_evac) {
815 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
817 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
823 case SE_CAF_BLACKHOLE:
831 StgSelector *s = (StgSelector *)p;
832 s->selectee = evacuate(s->selectee);
836 // A chunk of stack saved in a heap object
839 StgAP_STACK *ap = (StgAP_STACK *)p;
841 ap->fun = evacuate(ap->fun);
842 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
847 scavenge_PAP((StgPAP *)p);
851 scavenge_AP((StgAP *)p);
854 case MUT_ARR_PTRS_CLEAN:
855 case MUT_ARR_PTRS_DIRTY:
861 // We don't eagerly promote objects pointed to by a mutable
862 // array, but if we find the array only points to objects in
863 // the same or an older generation, we mark it "clean" and
864 // avoid traversing it during minor GCs.
865 saved_eager = eager_promotion;
866 eager_promotion = rtsFalse;
867 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
868 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
869 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
871 eager_promotion = saved_eager;
873 if (failed_to_evac) {
874 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
876 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
879 failed_to_evac = rtsTrue; // mutable anyhow.
883 case MUT_ARR_PTRS_FROZEN:
884 case MUT_ARR_PTRS_FROZEN0:
889 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
890 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
891 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
894 // If we're going to put this object on the mutable list, then
895 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
896 if (failed_to_evac) {
897 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
899 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
906 StgTSO *tso = (StgTSO *)p;
907 rtsBool saved_eager = eager_promotion;
909 eager_promotion = rtsFalse;
911 eager_promotion = saved_eager;
913 if (failed_to_evac) {
914 tso->flags |= TSO_DIRTY;
916 tso->flags &= ~TSO_DIRTY;
919 failed_to_evac = rtsTrue; // always on the mutable list
923 case TVAR_WATCH_QUEUE:
925 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
927 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
928 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
929 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
930 evac_gen = saved_evac_gen;
931 failed_to_evac = rtsTrue; // mutable
937 StgTVar *tvar = ((StgTVar *) p);
939 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
940 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
941 evac_gen = saved_evac_gen;
942 failed_to_evac = rtsTrue; // mutable
949 StgTRecChunk *tc = ((StgTRecChunk *) p);
950 TRecEntry *e = &(tc -> entries[0]);
952 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
953 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
954 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
955 e->expected_value = evacuate((StgClosure*)e->expected_value);
956 e->new_value = evacuate((StgClosure*)e->new_value);
958 evac_gen = saved_evac_gen;
959 failed_to_evac = rtsTrue; // mutable
965 StgTRecHeader *trec = ((StgTRecHeader *) p);
967 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
968 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
969 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
970 evac_gen = saved_evac_gen;
971 failed_to_evac = rtsTrue; // mutable
975 case ATOMIC_INVARIANT:
977 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
979 invariant->code = (StgClosure *)evacuate(invariant->code);
980 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
981 evac_gen = saved_evac_gen;
982 failed_to_evac = rtsTrue; // mutable
986 case INVARIANT_CHECK_QUEUE:
988 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
990 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
991 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
992 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
993 evac_gen = saved_evac_gen;
994 failed_to_evac = rtsTrue; // mutable
999 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1003 if (failed_to_evac) {
1004 failed_to_evac = rtsFalse;
1006 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
1010 // mark the next bit to indicate "scavenged"
1011 mark(q+1, Bdescr(q));
1013 } // while (!mark_stack_empty())
1015 // start a new linear scan if the mark stack overflowed at some point
1016 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1017 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1018 mark_stack_overflowed = rtsFalse;
1019 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1020 oldgen_scan = oldgen_scan_bd->start;
1023 if (oldgen_scan_bd) {
1024 // push a new thing on the mark stack
1026 // find a closure that is marked but not scavenged, and start
1028 while (oldgen_scan < oldgen_scan_bd->free
1029 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1033 if (oldgen_scan < oldgen_scan_bd->free) {
1035 // already scavenged?
1036 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1037 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1040 push_mark_stack(oldgen_scan);
1041 // ToDo: bump the linear scan by the actual size of the object
1042 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1046 oldgen_scan_bd = oldgen_scan_bd->link;
1047 if (oldgen_scan_bd != NULL) {
1048 oldgen_scan = oldgen_scan_bd->start;
1054 /* -----------------------------------------------------------------------------
1055 Scavenge one object.
1057 This is used for objects that are temporarily marked as mutable
1058 because they contain old-to-new generation pointers. Only certain
1059 objects can have this property.
1060 -------------------------------------------------------------------------- */
1063 scavenge_one(StgPtr p)
1065 const StgInfoTable *info;
1066 nat saved_evac_gen = evac_gen;
1069 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1070 info = get_itbl((StgClosure *)p);
1072 switch (info->type) {
1076 StgMVar *mvar = ((StgMVar *)p);
1078 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1079 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1080 mvar->value = evacuate((StgClosure *)mvar->value);
1081 evac_gen = saved_evac_gen;
1082 failed_to_evac = rtsTrue; // mutable.
1095 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1096 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1097 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1103 case FUN_1_0: // hardly worth specialising these guys
1119 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1120 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1121 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1127 case MUT_VAR_DIRTY: {
1129 rtsBool saved_eager_promotion = eager_promotion;
1131 eager_promotion = rtsFalse;
1132 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1133 eager_promotion = saved_eager_promotion;
1135 if (failed_to_evac) {
1136 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1138 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1144 case SE_CAF_BLACKHOLE:
1149 case THUNK_SELECTOR:
1151 StgSelector *s = (StgSelector *)p;
1152 s->selectee = evacuate(s->selectee);
1158 StgAP_STACK *ap = (StgAP_STACK *)p;
1160 ap->fun = evacuate(ap->fun);
1161 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1162 p = (StgPtr)ap->payload + ap->size;
1167 p = scavenge_PAP((StgPAP *)p);
1171 p = scavenge_AP((StgAP *)p);
1175 // nothing to follow
1178 case MUT_ARR_PTRS_CLEAN:
1179 case MUT_ARR_PTRS_DIRTY:
1182 rtsBool saved_eager;
1184 // We don't eagerly promote objects pointed to by a mutable
1185 // array, but if we find the array only points to objects in
1186 // the same or an older generation, we mark it "clean" and
1187 // avoid traversing it during minor GCs.
1188 saved_eager = eager_promotion;
1189 eager_promotion = rtsFalse;
1191 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1192 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1193 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1195 eager_promotion = saved_eager;
1197 if (failed_to_evac) {
1198 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1200 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1203 failed_to_evac = rtsTrue;
1207 case MUT_ARR_PTRS_FROZEN:
1208 case MUT_ARR_PTRS_FROZEN0:
1210 // follow everything
1213 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1214 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1215 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1218 // If we're going to put this object on the mutable list, then
1219 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1220 if (failed_to_evac) {
1221 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1223 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1230 StgTSO *tso = (StgTSO *)p;
1231 rtsBool saved_eager = eager_promotion;
1233 eager_promotion = rtsFalse;
1235 eager_promotion = saved_eager;
1237 if (failed_to_evac) {
1238 tso->flags |= TSO_DIRTY;
1240 tso->flags &= ~TSO_DIRTY;
1243 failed_to_evac = rtsTrue; // always on the mutable list
1247 case TVAR_WATCH_QUEUE:
1249 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1251 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1252 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1253 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1254 evac_gen = saved_evac_gen;
1255 failed_to_evac = rtsTrue; // mutable
1261 StgTVar *tvar = ((StgTVar *) p);
1263 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1264 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1265 evac_gen = saved_evac_gen;
1266 failed_to_evac = rtsTrue; // mutable
1272 StgTRecHeader *trec = ((StgTRecHeader *) p);
1274 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1275 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1276 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1277 evac_gen = saved_evac_gen;
1278 failed_to_evac = rtsTrue; // mutable
1285 StgTRecChunk *tc = ((StgTRecChunk *) p);
1286 TRecEntry *e = &(tc -> entries[0]);
1288 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1289 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1290 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1291 e->expected_value = evacuate((StgClosure*)e->expected_value);
1292 e->new_value = evacuate((StgClosure*)e->new_value);
1294 evac_gen = saved_evac_gen;
1295 failed_to_evac = rtsTrue; // mutable
1299 case ATOMIC_INVARIANT:
1301 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1303 invariant->code = (StgClosure *)evacuate(invariant->code);
1304 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1305 evac_gen = saved_evac_gen;
1306 failed_to_evac = rtsTrue; // mutable
1310 case INVARIANT_CHECK_QUEUE:
1312 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1314 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1315 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1316 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1317 evac_gen = saved_evac_gen;
1318 failed_to_evac = rtsTrue; // mutable
1323 case IND_OLDGEN_PERM:
1326 /* Careful here: a THUNK can be on the mutable list because
1327 * it contains pointers to young gen objects. If such a thunk
1328 * is updated, the IND_OLDGEN will be added to the mutable
1329 * list again, and we'll scavenge it twice. evacuate()
1330 * doesn't check whether the object has already been
1331 * evacuated, so we perform that check here.
1333 StgClosure *q = ((StgInd *)p)->indirectee;
1334 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1337 ((StgInd *)p)->indirectee = evacuate(q);
1340 #if 0 && defined(DEBUG)
1341 if (RtsFlags.DebugFlags.gc)
1342 /* Debugging code to print out the size of the thing we just
1346 StgPtr start = gen->steps[0].scan;
1347 bdescr *start_bd = gen->steps[0].scan_bd;
1349 scavenge(&gen->steps[0]);
1350 if (start_bd != gen->steps[0].scan_bd) {
1351 size += (P_)BLOCK_ROUND_UP(start) - start;
1352 start_bd = start_bd->link;
1353 while (start_bd != gen->steps[0].scan_bd) {
1354 size += BLOCK_SIZE_W;
1355 start_bd = start_bd->link;
1357 size += gen->steps[0].scan -
1358 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1360 size = gen->steps[0].scan - start;
1362 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1368 barf("scavenge_one: strange object %d", (int)(info->type));
1371 no_luck = failed_to_evac;
1372 failed_to_evac = rtsFalse;
1376 /* -----------------------------------------------------------------------------
1377 Scavenging mutable lists.
1379 We treat the mutable list of each generation > N (i.e. all the
1380 generations older than the one being collected) as roots. We also
1381 remove non-mutable objects from the mutable list at this point.
1382 -------------------------------------------------------------------------- */
1385 scavenge_mutable_list(generation *gen)
1390 bd = gen->saved_mut_list;
1393 for (; bd != NULL; bd = bd->link) {
1394 for (q = bd->start; q < bd->free; q++) {
1396 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1399 switch (get_itbl((StgClosure *)p)->type) {
1401 barf("MUT_VAR_CLEAN on mutable list");
1403 mutlist_MUTVARS++; break;
1404 case MUT_ARR_PTRS_CLEAN:
1405 case MUT_ARR_PTRS_DIRTY:
1406 case MUT_ARR_PTRS_FROZEN:
1407 case MUT_ARR_PTRS_FROZEN0:
1408 mutlist_MUTARRS++; break;
1410 mutlist_OTHERS++; break;
1414 // Check whether this object is "clean", that is it
1415 // definitely doesn't point into a young generation.
1416 // Clean objects don't need to be scavenged. Some clean
1417 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1418 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1419 // TSO, are always on the mutable list.
1421 switch (get_itbl((StgClosure *)p)->type) {
1422 case MUT_ARR_PTRS_CLEAN:
1423 recordMutableGen((StgClosure *)p,gen);
1426 StgTSO *tso = (StgTSO *)p;
1427 if ((tso->flags & TSO_DIRTY) == 0) {
1428 // A clean TSO: we don't have to traverse its
1429 // stack. However, we *do* follow the link field:
1430 // we don't want to have to mark a TSO dirty just
1431 // because we put it on a different queue.
1432 if (tso->why_blocked != BlockedOnBlackHole) {
1433 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1435 recordMutableGen((StgClosure *)p,gen);
1443 if (scavenge_one(p)) {
1444 // didn't manage to promote everything, so put the
1445 // object back on the list.
1446 recordMutableGen((StgClosure *)p,gen);
1451 // free the old mut_list
1452 freeChain(gen->saved_mut_list);
1453 gen->saved_mut_list = NULL;
1456 /* -----------------------------------------------------------------------------
1457 Scavenging the static objects.
1459 We treat the mutable list of each generation > N (i.e. all the
1460 generations older than the one being collected) as roots. We also
1461 remove non-mutable objects from the mutable list at this point.
1462 -------------------------------------------------------------------------- */
1465 scavenge_static(void)
1467 StgClosure* p = static_objects;
1468 const StgInfoTable *info;
1470 /* Always evacuate straight to the oldest generation for static
1472 evac_gen = oldest_gen->no;
1474 /* keep going until we've scavenged all the objects on the linked
1476 while (p != END_OF_STATIC_LIST) {
1478 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1481 if (info->type==RBH)
1482 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1484 // make sure the info pointer is into text space
1486 /* Take this object *off* the static_objects list,
1487 * and put it on the scavenged_static_objects list.
1489 static_objects = *STATIC_LINK(info,p);
1490 *STATIC_LINK(info,p) = scavenged_static_objects;
1491 scavenged_static_objects = p;
1493 switch (info -> type) {
1497 StgInd *ind = (StgInd *)p;
1498 ind->indirectee = evacuate(ind->indirectee);
1500 /* might fail to evacuate it, in which case we have to pop it
1501 * back on the mutable list of the oldest generation. We
1502 * leave it *on* the scavenged_static_objects list, though,
1503 * in case we visit this object again.
1505 if (failed_to_evac) {
1506 failed_to_evac = rtsFalse;
1507 recordMutableGen((StgClosure *)p,oldest_gen);
1513 scavenge_thunk_srt(info);
1517 scavenge_fun_srt(info);
1524 next = (P_)p->payload + info->layout.payload.ptrs;
1525 // evacuate the pointers
1526 for (q = (P_)p->payload; q < next; q++) {
1527 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1533 barf("scavenge_static: strange closure %d", (int)(info->type));
1536 ASSERT(failed_to_evac == rtsFalse);
1538 /* get the next static object from the list. Remember, there might
1539 * be more stuff on this list now that we've done some evacuating!
1540 * (static_objects is a global)
1546 /* -----------------------------------------------------------------------------
1547 scavenge a chunk of memory described by a bitmap
1548 -------------------------------------------------------------------------- */
1551 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1557 bitmap = large_bitmap->bitmap[b];
1558 for (i = 0; i < size; ) {
1559 if ((bitmap & 1) == 0) {
1560 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1564 if (i % BITS_IN(W_) == 0) {
1566 bitmap = large_bitmap->bitmap[b];
1568 bitmap = bitmap >> 1;
1573 STATIC_INLINE StgPtr
1574 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1577 if ((bitmap & 1) == 0) {
1578 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1581 bitmap = bitmap >> 1;
1587 /* -----------------------------------------------------------------------------
1588 scavenge_stack walks over a section of stack and evacuates all the
1589 objects pointed to by it. We can use the same code for walking
1590 AP_STACK_UPDs, since these are just sections of copied stack.
1591 -------------------------------------------------------------------------- */
1594 scavenge_stack(StgPtr p, StgPtr stack_end)
1596 const StgRetInfoTable* info;
1601 * Each time around this loop, we are looking at a chunk of stack
1602 * that starts with an activation record.
1605 while (p < stack_end) {
1606 info = get_ret_itbl((StgClosure *)p);
1608 switch (info->i.type) {
1611 // In SMP, we can get update frames that point to indirections
1612 // when two threads evaluate the same thunk. We do attempt to
1613 // discover this situation in threadPaused(), but it's
1614 // possible that the following sequence occurs:
1623 // Now T is an indirection, and the update frame is already
1624 // marked on A's stack, so we won't traverse it again in
1625 // threadPaused(). We could traverse the whole stack again
1626 // before GC, but that seems like overkill.
1628 // Scavenging this update frame as normal would be disastrous;
1629 // the updatee would end up pointing to the value. So we turn
1630 // the indirection into an IND_PERM, so that evacuate will
1631 // copy the indirection into the old generation instead of
1633 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
1634 ((StgUpdateFrame *)p)->updatee->header.info =
1635 (StgInfoTable *)&stg_IND_PERM_info;
1637 ((StgUpdateFrame *)p)->updatee
1638 = evacuate(((StgUpdateFrame *)p)->updatee);
1639 p += sizeofW(StgUpdateFrame);
1642 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1643 case CATCH_STM_FRAME:
1644 case CATCH_RETRY_FRAME:
1645 case ATOMICALLY_FRAME:
1650 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1651 size = BITMAP_SIZE(info->i.layout.bitmap);
1652 // NOTE: the payload starts immediately after the info-ptr, we
1653 // don't have an StgHeader in the same sense as a heap closure.
1655 p = scavenge_small_bitmap(p, size, bitmap);
1659 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1667 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1670 size = BCO_BITMAP_SIZE(bco);
1671 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1676 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1682 size = GET_LARGE_BITMAP(&info->i)->size;
1684 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1686 // and don't forget to follow the SRT
1690 // Dynamic bitmap: the mask is stored on the stack, and
1691 // there are a number of non-pointers followed by a number
1692 // of pointers above the bitmapped area. (see StgMacros.h,
1697 dyn = ((StgRetDyn *)p)->liveness;
1699 // traverse the bitmap first
1700 bitmap = RET_DYN_LIVENESS(dyn);
1701 p = (P_)&((StgRetDyn *)p)->payload[0];
1702 size = RET_DYN_BITMAP_SIZE;
1703 p = scavenge_small_bitmap(p, size, bitmap);
1705 // skip over the non-ptr words
1706 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1708 // follow the ptr words
1709 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1710 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1718 StgRetFun *ret_fun = (StgRetFun *)p;
1719 StgFunInfoTable *fun_info;
1721 ret_fun->fun = evacuate(ret_fun->fun);
1722 fun_info = get_fun_itbl(ret_fun->fun);
1723 p = scavenge_arg_block(fun_info, ret_fun->payload);
1728 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1733 /*-----------------------------------------------------------------------------
1734 scavenge the large object list.
1736 evac_gen set by caller; similar games played with evac_gen as with
1737 scavenge() - see comment at the top of scavenge(). Most large
1738 objects are (repeatedly) mutable, so most of the time evac_gen will
1740 --------------------------------------------------------------------------- */
1743 scavenge_large(step *stp)
1748 bd = stp->new_large_objects;
1750 for (; bd != NULL; bd = stp->new_large_objects) {
1752 /* take this object *off* the large objects list and put it on
1753 * the scavenged large objects list. This is so that we can
1754 * treat new_large_objects as a stack and push new objects on
1755 * the front when evacuating.
1757 stp->new_large_objects = bd->link;
1758 dbl_link_onto(bd, &stp->scavenged_large_objects);
1760 // update the block count in this step.
1761 stp->n_scavenged_large_blocks += bd->blocks;
1764 if (scavenge_one(p)) {
1765 if (stp->gen_no > 0) {
1766 recordMutableGen((StgClosure *)p, stp->gen);