1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * ---------------------------------------------------------------------------*/
18 #include "LdvProfile.h"
20 static void scavenge_stack (StgPtr p, StgPtr stack_end);
22 static void scavenge_large_bitmap (StgPtr p,
23 StgLargeBitmap *large_bitmap,
26 /* Similar to scavenge_large_bitmap(), but we don't write back the
27 * pointers we get back from evacuate().
30 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
37 bitmap = large_srt->l.bitmap[b];
38 size = (nat)large_srt->l.size;
39 p = (StgClosure **)large_srt->srt;
40 for (i = 0; i < size; ) {
41 if ((bitmap & 1) != 0) {
46 if (i % BITS_IN(W_) == 0) {
48 bitmap = large_srt->l.bitmap[b];
55 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
56 * srt field in the info table. That's ok, because we'll
57 * never dereference it.
60 scavenge_srt (StgClosure **srt, nat srt_bitmap)
68 if (bitmap == (StgHalfWord)(-1)) {
69 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
74 if ((bitmap & 1) != 0) {
75 #ifdef ENABLE_WIN32_DLL_SUPPORT
76 // Special-case to handle references to closures hiding out in DLLs, since
77 // double indirections required to get at those. The code generator knows
78 // which is which when generating the SRT, so it stores the (indirect)
79 // reference to the DLL closure in the table by first adding one to it.
80 // We check for this here, and undo the addition before evacuating it.
82 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
83 // closure that's fixed at link-time, and no extra magic is required.
84 if ( (unsigned long)(*srt) & 0x1 ) {
85 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
100 scavenge_thunk_srt(const StgInfoTable *info)
102 StgThunkInfoTable *thunk_info;
104 if (!major_gc) return;
106 thunk_info = itbl_to_thunk_itbl(info);
107 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
111 scavenge_fun_srt(const StgInfoTable *info)
113 StgFunInfoTable *fun_info;
115 if (!major_gc) return;
117 fun_info = itbl_to_fun_itbl(info);
118 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
121 /* -----------------------------------------------------------------------------
123 -------------------------------------------------------------------------- */
126 scavengeTSO (StgTSO *tso)
128 if ( tso->why_blocked == BlockedOnMVar
129 || tso->why_blocked == BlockedOnBlackHole
130 || tso->why_blocked == BlockedOnException
132 || tso->why_blocked == BlockedOnGA
133 || tso->why_blocked == BlockedOnGA_NoSend
136 tso->block_info.closure = evacuate(tso->block_info.closure);
138 tso->blocked_exceptions =
139 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
141 // We don't always chase the link field: TSOs on the blackhole
142 // queue are not automatically alive, so the link field is a
143 // "weak" pointer in that case.
144 if (tso->why_blocked != BlockedOnBlackHole) {
145 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
148 // scavange current transaction record
149 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
151 // scavenge this thread's stack
152 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
155 /* -----------------------------------------------------------------------------
156 Blocks of function args occur on the stack (at the top) and
158 -------------------------------------------------------------------------- */
161 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
168 switch (fun_info->f.fun_type) {
170 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
171 size = BITMAP_SIZE(fun_info->f.b.bitmap);
174 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
175 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
179 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
180 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
183 if ((bitmap & 1) == 0) {
184 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
187 bitmap = bitmap >> 1;
196 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
200 StgFunInfoTable *fun_info;
202 fun_info = get_fun_itbl(fun);
203 ASSERT(fun_info->i.type != PAP);
206 switch (fun_info->f.fun_type) {
208 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
211 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
215 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
219 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
222 if ((bitmap & 1) == 0) {
223 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
226 bitmap = bitmap >> 1;
235 scavenge_PAP (StgPAP *pap)
237 pap->fun = evacuate(pap->fun);
238 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
242 scavenge_AP (StgAP *ap)
244 ap->fun = evacuate(ap->fun);
245 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
248 /* -----------------------------------------------------------------------------
249 Scavenge a given step until there are no more objects in this step
252 evac_gen is set by the caller to be either zero (for a step in a
253 generation < N) or G where G is the generation of the step being
256 We sometimes temporarily change evac_gen back to zero if we're
257 scavenging a mutable object where early promotion isn't such a good
259 -------------------------------------------------------------------------- */
267 nat saved_evac_gen = evac_gen;
272 failed_to_evac = rtsFalse;
274 /* scavenge phase - standard breadth-first scavenging of the
278 while (bd != stp->hp_bd || p < stp->hp) {
280 // If we're at the end of this block, move on to the next block
281 if (bd != stp->hp_bd && p == bd->free) {
287 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
288 info = get_itbl((StgClosure *)p);
290 ASSERT(thunk_selector_depth == 0);
293 switch (info->type) {
297 StgMVar *mvar = ((StgMVar *)p);
299 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
300 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
301 mvar->value = evacuate((StgClosure *)mvar->value);
302 evac_gen = saved_evac_gen;
303 failed_to_evac = rtsTrue; // mutable.
304 p += sizeofW(StgMVar);
309 scavenge_fun_srt(info);
310 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
311 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
312 p += sizeofW(StgHeader) + 2;
316 scavenge_thunk_srt(info);
317 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
318 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
319 p += sizeofW(StgThunk) + 2;
323 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
324 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
325 p += sizeofW(StgHeader) + 2;
329 scavenge_thunk_srt(info);
330 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
331 p += sizeofW(StgThunk) + 1;
335 scavenge_fun_srt(info);
337 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
338 p += sizeofW(StgHeader) + 1;
342 scavenge_thunk_srt(info);
343 p += sizeofW(StgThunk) + 1;
347 scavenge_fun_srt(info);
349 p += sizeofW(StgHeader) + 1;
353 scavenge_thunk_srt(info);
354 p += sizeofW(StgThunk) + 2;
358 scavenge_fun_srt(info);
360 p += sizeofW(StgHeader) + 2;
364 scavenge_thunk_srt(info);
365 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
366 p += sizeofW(StgThunk) + 2;
370 scavenge_fun_srt(info);
372 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
373 p += sizeofW(StgHeader) + 2;
377 scavenge_fun_srt(info);
384 scavenge_thunk_srt(info);
385 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
386 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
387 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
389 p += info->layout.payload.nptrs;
400 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
401 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
402 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
404 p += info->layout.payload.nptrs;
409 StgBCO *bco = (StgBCO *)p;
410 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
411 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
412 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
413 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
419 if (stp->gen->no != 0) {
422 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
423 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
424 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
427 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
429 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
431 // We pretend that p has just been created.
432 LDV_RECORD_CREATE((StgClosure *)p);
435 case IND_OLDGEN_PERM:
436 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
437 p += sizeofW(StgInd);
441 case MUT_VAR_DIRTY: {
442 rtsBool saved_eager_promotion = eager_promotion;
444 eager_promotion = rtsFalse;
445 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
446 eager_promotion = saved_eager_promotion;
448 if (failed_to_evac) {
449 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
451 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
453 p += sizeofW(StgMutVar);
458 case SE_CAF_BLACKHOLE:
461 p += BLACKHOLE_sizeW();
466 StgSelector *s = (StgSelector *)p;
467 s->selectee = evacuate(s->selectee);
468 p += THUNK_SELECTOR_sizeW();
472 // A chunk of stack saved in a heap object
475 StgAP_STACK *ap = (StgAP_STACK *)p;
477 ap->fun = evacuate(ap->fun);
478 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
479 p = (StgPtr)ap->payload + ap->size;
484 p = scavenge_PAP((StgPAP *)p);
488 p = scavenge_AP((StgAP *)p);
493 p += arr_words_sizeW((StgArrWords *)p);
496 case MUT_ARR_PTRS_CLEAN:
497 case MUT_ARR_PTRS_DIRTY:
503 // We don't eagerly promote objects pointed to by a mutable
504 // array, but if we find the array only points to objects in
505 // the same or an older generation, we mark it "clean" and
506 // avoid traversing it during minor GCs.
507 saved_eager = eager_promotion;
508 eager_promotion = rtsFalse;
509 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
510 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
511 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
513 eager_promotion = saved_eager;
515 if (failed_to_evac) {
516 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
518 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
521 failed_to_evac = rtsTrue; // always put it on the mutable list.
525 case MUT_ARR_PTRS_FROZEN:
526 case MUT_ARR_PTRS_FROZEN0:
531 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
532 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
533 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
536 // If we're going to put this object on the mutable list, then
537 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
538 if (failed_to_evac) {
539 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
541 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
548 StgTSO *tso = (StgTSO *)p;
549 rtsBool saved_eager = eager_promotion;
551 eager_promotion = rtsFalse;
553 eager_promotion = saved_eager;
555 if (failed_to_evac) {
556 tso->flags |= TSO_DIRTY;
558 tso->flags &= ~TSO_DIRTY;
561 failed_to_evac = rtsTrue; // always on the mutable list
570 nat size, ptrs, nonptrs, vhs;
572 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
574 StgRBH *rbh = (StgRBH *)p;
575 (StgClosure *)rbh->blocking_queue =
576 evacuate((StgClosure *)rbh->blocking_queue);
577 failed_to_evac = rtsTrue; // mutable anyhow.
578 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
579 p, info_type(p), (StgClosure *)rbh->blocking_queue);
580 // ToDo: use size of reverted closure here!
581 p += BLACKHOLE_sizeW();
587 StgBlockedFetch *bf = (StgBlockedFetch *)p;
588 // follow the pointer to the node which is being demanded
589 (StgClosure *)bf->node =
590 evacuate((StgClosure *)bf->node);
591 // follow the link to the rest of the blocking queue
592 (StgClosure *)bf->link =
593 evacuate((StgClosure *)bf->link);
594 debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
595 bf, info_type((StgClosure *)bf),
596 bf->node, info_type(bf->node)));
597 p += sizeofW(StgBlockedFetch);
605 p += sizeofW(StgFetchMe);
606 break; // nothing to do in this case
610 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
611 (StgClosure *)fmbq->blocking_queue =
612 evacuate((StgClosure *)fmbq->blocking_queue);
613 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
614 p, info_type((StgClosure *)p)));
615 p += sizeofW(StgFetchMeBlockingQueue);
620 case TVAR_WATCH_QUEUE:
622 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
624 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
625 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
626 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
627 evac_gen = saved_evac_gen;
628 failed_to_evac = rtsTrue; // mutable
629 p += sizeofW(StgTVarWatchQueue);
635 StgTVar *tvar = ((StgTVar *) p);
637 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
638 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
639 evac_gen = saved_evac_gen;
640 failed_to_evac = rtsTrue; // mutable
641 p += sizeofW(StgTVar);
647 StgTRecHeader *trec = ((StgTRecHeader *) p);
649 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
650 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
651 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
652 evac_gen = saved_evac_gen;
653 failed_to_evac = rtsTrue; // mutable
654 p += sizeofW(StgTRecHeader);
661 StgTRecChunk *tc = ((StgTRecChunk *) p);
662 TRecEntry *e = &(tc -> entries[0]);
664 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
665 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
666 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
667 e->expected_value = evacuate((StgClosure*)e->expected_value);
668 e->new_value = evacuate((StgClosure*)e->new_value);
670 evac_gen = saved_evac_gen;
671 failed_to_evac = rtsTrue; // mutable
672 p += sizeofW(StgTRecChunk);
676 case ATOMIC_INVARIANT:
678 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
680 invariant->code = (StgClosure *)evacuate(invariant->code);
681 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
682 evac_gen = saved_evac_gen;
683 failed_to_evac = rtsTrue; // mutable
684 p += sizeofW(StgAtomicInvariant);
688 case INVARIANT_CHECK_QUEUE:
690 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
692 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
693 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
694 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
695 evac_gen = saved_evac_gen;
696 failed_to_evac = rtsTrue; // mutable
697 p += sizeofW(StgInvariantCheckQueue);
702 barf("scavenge: unimplemented/strange closure type %d @ %p",
707 * We need to record the current object on the mutable list if
708 * (a) It is actually mutable, or
709 * (b) It contains pointers to a younger generation.
710 * Case (b) arises if we didn't manage to promote everything that
711 * the current object points to into the current generation.
713 if (failed_to_evac) {
714 failed_to_evac = rtsFalse;
715 if (stp->gen_no > 0) {
716 recordMutableGen((StgClosure *)q, stp->gen);
725 /* -----------------------------------------------------------------------------
726 Scavenge everything on the mark stack.
728 This is slightly different from scavenge():
729 - we don't walk linearly through the objects, so the scavenger
730 doesn't need to advance the pointer on to the next object.
731 -------------------------------------------------------------------------- */
734 scavenge_mark_stack(void)
740 evac_gen = oldest_gen->no;
741 saved_evac_gen = evac_gen;
744 while (!mark_stack_empty()) {
745 p = pop_mark_stack();
747 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
748 info = get_itbl((StgClosure *)p);
751 switch (info->type) {
755 StgMVar *mvar = ((StgMVar *)p);
757 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
758 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
759 mvar->value = evacuate((StgClosure *)mvar->value);
760 evac_gen = saved_evac_gen;
761 failed_to_evac = rtsTrue; // mutable.
766 scavenge_fun_srt(info);
767 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
768 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
772 scavenge_thunk_srt(info);
773 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
774 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
778 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
779 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
784 scavenge_fun_srt(info);
785 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
790 scavenge_thunk_srt(info);
791 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
796 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
801 scavenge_fun_srt(info);
806 scavenge_thunk_srt(info);
814 scavenge_fun_srt(info);
821 scavenge_thunk_srt(info);
822 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
823 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
824 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
836 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
837 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
838 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
844 StgBCO *bco = (StgBCO *)p;
845 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
846 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
847 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
848 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
853 // don't need to do anything here: the only possible case
854 // is that we're in a 1-space compacting collector, with
855 // no "old" generation.
859 case IND_OLDGEN_PERM:
860 ((StgInd *)p)->indirectee =
861 evacuate(((StgInd *)p)->indirectee);
865 case MUT_VAR_DIRTY: {
866 rtsBool saved_eager_promotion = eager_promotion;
868 eager_promotion = rtsFalse;
869 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
870 eager_promotion = saved_eager_promotion;
872 if (failed_to_evac) {
873 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
875 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
881 case SE_CAF_BLACKHOLE:
889 StgSelector *s = (StgSelector *)p;
890 s->selectee = evacuate(s->selectee);
894 // A chunk of stack saved in a heap object
897 StgAP_STACK *ap = (StgAP_STACK *)p;
899 ap->fun = evacuate(ap->fun);
900 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
905 scavenge_PAP((StgPAP *)p);
909 scavenge_AP((StgAP *)p);
912 case MUT_ARR_PTRS_CLEAN:
913 case MUT_ARR_PTRS_DIRTY:
919 // We don't eagerly promote objects pointed to by a mutable
920 // array, but if we find the array only points to objects in
921 // the same or an older generation, we mark it "clean" and
922 // avoid traversing it during minor GCs.
923 saved_eager = eager_promotion;
924 eager_promotion = rtsFalse;
925 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
926 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
927 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
929 eager_promotion = saved_eager;
931 if (failed_to_evac) {
932 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
934 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
937 failed_to_evac = rtsTrue; // mutable anyhow.
941 case MUT_ARR_PTRS_FROZEN:
942 case MUT_ARR_PTRS_FROZEN0:
947 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
948 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
949 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
952 // If we're going to put this object on the mutable list, then
953 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
954 if (failed_to_evac) {
955 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
957 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
964 StgTSO *tso = (StgTSO *)p;
965 rtsBool saved_eager = eager_promotion;
967 eager_promotion = rtsFalse;
969 eager_promotion = saved_eager;
971 if (failed_to_evac) {
972 tso->flags |= TSO_DIRTY;
974 tso->flags &= ~TSO_DIRTY;
977 failed_to_evac = rtsTrue; // always on the mutable list
985 nat size, ptrs, nonptrs, vhs;
987 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
989 StgRBH *rbh = (StgRBH *)p;
991 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
992 failed_to_evac = rtsTrue; // mutable anyhow.
993 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
994 p, info_type(p), (StgClosure *)rbh->blocking_queue));
1000 StgBlockedFetch *bf = (StgBlockedFetch *)p;
1001 // follow the pointer to the node which is being demanded
1002 (StgClosure *)bf->node =
1003 evacuate((StgClosure *)bf->node);
1004 // follow the link to the rest of the blocking queue
1005 (StgClosure *)bf->link =
1006 evacuate((StgClosure *)bf->link);
1007 debugTrace(DEBUG_gc, "scavenge: %p (%s); node is now %p; exciting, isn't it",
1008 bf, info_type((StgClosure *)bf),
1009 bf->node, info_type(bf->node)));
1017 break; // nothing to do in this case
1021 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
1022 (StgClosure *)fmbq->blocking_queue =
1023 evacuate((StgClosure *)fmbq->blocking_queue);
1024 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
1025 p, info_type((StgClosure *)p)));
1030 case TVAR_WATCH_QUEUE:
1032 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1034 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1035 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1036 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1037 evac_gen = saved_evac_gen;
1038 failed_to_evac = rtsTrue; // mutable
1044 StgTVar *tvar = ((StgTVar *) p);
1046 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1047 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1048 evac_gen = saved_evac_gen;
1049 failed_to_evac = rtsTrue; // mutable
1056 StgTRecChunk *tc = ((StgTRecChunk *) p);
1057 TRecEntry *e = &(tc -> entries[0]);
1059 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1060 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1061 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1062 e->expected_value = evacuate((StgClosure*)e->expected_value);
1063 e->new_value = evacuate((StgClosure*)e->new_value);
1065 evac_gen = saved_evac_gen;
1066 failed_to_evac = rtsTrue; // mutable
1072 StgTRecHeader *trec = ((StgTRecHeader *) p);
1074 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1075 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1076 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1077 evac_gen = saved_evac_gen;
1078 failed_to_evac = rtsTrue; // mutable
1082 case ATOMIC_INVARIANT:
1084 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1086 invariant->code = (StgClosure *)evacuate(invariant->code);
1087 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1088 evac_gen = saved_evac_gen;
1089 failed_to_evac = rtsTrue; // mutable
1093 case INVARIANT_CHECK_QUEUE:
1095 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1097 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1098 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1099 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1100 evac_gen = saved_evac_gen;
1101 failed_to_evac = rtsTrue; // mutable
1106 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1110 if (failed_to_evac) {
1111 failed_to_evac = rtsFalse;
1113 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
1117 // mark the next bit to indicate "scavenged"
1118 mark(q+1, Bdescr(q));
1120 } // while (!mark_stack_empty())
1122 // start a new linear scan if the mark stack overflowed at some point
1123 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1124 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1125 mark_stack_overflowed = rtsFalse;
1126 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1127 oldgen_scan = oldgen_scan_bd->start;
1130 if (oldgen_scan_bd) {
1131 // push a new thing on the mark stack
1133 // find a closure that is marked but not scavenged, and start
1135 while (oldgen_scan < oldgen_scan_bd->free
1136 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1140 if (oldgen_scan < oldgen_scan_bd->free) {
1142 // already scavenged?
1143 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1144 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1147 push_mark_stack(oldgen_scan);
1148 // ToDo: bump the linear scan by the actual size of the object
1149 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1153 oldgen_scan_bd = oldgen_scan_bd->link;
1154 if (oldgen_scan_bd != NULL) {
1155 oldgen_scan = oldgen_scan_bd->start;
1161 /* -----------------------------------------------------------------------------
1162 Scavenge one object.
1164 This is used for objects that are temporarily marked as mutable
1165 because they contain old-to-new generation pointers. Only certain
1166 objects can have this property.
1167 -------------------------------------------------------------------------- */
1170 scavenge_one(StgPtr p)
1172 const StgInfoTable *info;
1173 nat saved_evac_gen = evac_gen;
1176 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1177 info = get_itbl((StgClosure *)p);
1179 switch (info->type) {
1183 StgMVar *mvar = ((StgMVar *)p);
1185 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1186 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1187 mvar->value = evacuate((StgClosure *)mvar->value);
1188 evac_gen = saved_evac_gen;
1189 failed_to_evac = rtsTrue; // mutable.
1202 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1203 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1204 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1210 case FUN_1_0: // hardly worth specialising these guys
1226 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1227 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1228 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1234 case MUT_VAR_DIRTY: {
1236 rtsBool saved_eager_promotion = eager_promotion;
1238 eager_promotion = rtsFalse;
1239 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1240 eager_promotion = saved_eager_promotion;
1242 if (failed_to_evac) {
1243 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1245 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1251 case SE_CAF_BLACKHOLE:
1256 case THUNK_SELECTOR:
1258 StgSelector *s = (StgSelector *)p;
1259 s->selectee = evacuate(s->selectee);
1265 StgAP_STACK *ap = (StgAP_STACK *)p;
1267 ap->fun = evacuate(ap->fun);
1268 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1269 p = (StgPtr)ap->payload + ap->size;
1274 p = scavenge_PAP((StgPAP *)p);
1278 p = scavenge_AP((StgAP *)p);
1282 // nothing to follow
1285 case MUT_ARR_PTRS_CLEAN:
1286 case MUT_ARR_PTRS_DIRTY:
1289 rtsBool saved_eager;
1291 // We don't eagerly promote objects pointed to by a mutable
1292 // array, but if we find the array only points to objects in
1293 // the same or an older generation, we mark it "clean" and
1294 // avoid traversing it during minor GCs.
1295 saved_eager = eager_promotion;
1296 eager_promotion = rtsFalse;
1298 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1299 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1300 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1302 eager_promotion = saved_eager;
1304 if (failed_to_evac) {
1305 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1307 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1310 failed_to_evac = rtsTrue;
1314 case MUT_ARR_PTRS_FROZEN:
1315 case MUT_ARR_PTRS_FROZEN0:
1317 // follow everything
1320 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1321 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1322 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1325 // If we're going to put this object on the mutable list, then
1326 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1327 if (failed_to_evac) {
1328 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1330 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1337 StgTSO *tso = (StgTSO *)p;
1338 rtsBool saved_eager = eager_promotion;
1340 eager_promotion = rtsFalse;
1342 eager_promotion = saved_eager;
1344 if (failed_to_evac) {
1345 tso->flags |= TSO_DIRTY;
1347 tso->flags &= ~TSO_DIRTY;
1350 failed_to_evac = rtsTrue; // always on the mutable list
1358 nat size, ptrs, nonptrs, vhs;
1360 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
1362 StgRBH *rbh = (StgRBH *)p;
1363 (StgClosure *)rbh->blocking_queue =
1364 evacuate((StgClosure *)rbh->blocking_queue);
1365 failed_to_evac = rtsTrue; // mutable anyhow.
1366 debugTrace(DEBUG_gc, "scavenge: RBH %p (%s) (new blocking_queue link=%p)",
1367 p, info_type(p), (StgClosure *)rbh->blocking_queue));
1368 // ToDo: use size of reverted closure here!
1374 StgBlockedFetch *bf = (StgBlockedFetch *)p;
1375 // follow the pointer to the node which is being demanded
1376 (StgClosure *)bf->node =
1377 evacuate((StgClosure *)bf->node);
1378 // follow the link to the rest of the blocking queue
1379 (StgClosure *)bf->link =
1380 evacuate((StgClosure *)bf->link);
1381 debugTrace(DEBUG_gc,
1382 "scavenge: %p (%s); node is now %p; exciting, isn't it",
1383 bf, info_type((StgClosure *)bf),
1384 bf->node, info_type(bf->node)));
1392 break; // nothing to do in this case
1396 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
1397 (StgClosure *)fmbq->blocking_queue =
1398 evacuate((StgClosure *)fmbq->blocking_queue);
1399 debugTrace(DEBUG_gc, "scavenge: %p (%s) exciting, isn't it",
1400 p, info_type((StgClosure *)p)));
1405 case TVAR_WATCH_QUEUE:
1407 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1409 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1410 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1411 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1412 evac_gen = saved_evac_gen;
1413 failed_to_evac = rtsTrue; // mutable
1419 StgTVar *tvar = ((StgTVar *) p);
1421 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1422 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1423 evac_gen = saved_evac_gen;
1424 failed_to_evac = rtsTrue; // mutable
1430 StgTRecHeader *trec = ((StgTRecHeader *) p);
1432 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1433 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1434 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1435 evac_gen = saved_evac_gen;
1436 failed_to_evac = rtsTrue; // mutable
1443 StgTRecChunk *tc = ((StgTRecChunk *) p);
1444 TRecEntry *e = &(tc -> entries[0]);
1446 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1447 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1448 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1449 e->expected_value = evacuate((StgClosure*)e->expected_value);
1450 e->new_value = evacuate((StgClosure*)e->new_value);
1452 evac_gen = saved_evac_gen;
1453 failed_to_evac = rtsTrue; // mutable
1457 case ATOMIC_INVARIANT:
1459 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1461 invariant->code = (StgClosure *)evacuate(invariant->code);
1462 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1463 evac_gen = saved_evac_gen;
1464 failed_to_evac = rtsTrue; // mutable
1468 case INVARIANT_CHECK_QUEUE:
1470 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1472 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1473 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1474 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1475 evac_gen = saved_evac_gen;
1476 failed_to_evac = rtsTrue; // mutable
1481 case IND_OLDGEN_PERM:
1484 /* Careful here: a THUNK can be on the mutable list because
1485 * it contains pointers to young gen objects. If such a thunk
1486 * is updated, the IND_OLDGEN will be added to the mutable
1487 * list again, and we'll scavenge it twice. evacuate()
1488 * doesn't check whether the object has already been
1489 * evacuated, so we perform that check here.
1491 StgClosure *q = ((StgInd *)p)->indirectee;
1492 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1495 ((StgInd *)p)->indirectee = evacuate(q);
1498 #if 0 && defined(DEBUG)
1499 if (RtsFlags.DebugFlags.gc)
1500 /* Debugging code to print out the size of the thing we just
1504 StgPtr start = gen->steps[0].scan;
1505 bdescr *start_bd = gen->steps[0].scan_bd;
1507 scavenge(&gen->steps[0]);
1508 if (start_bd != gen->steps[0].scan_bd) {
1509 size += (P_)BLOCK_ROUND_UP(start) - start;
1510 start_bd = start_bd->link;
1511 while (start_bd != gen->steps[0].scan_bd) {
1512 size += BLOCK_SIZE_W;
1513 start_bd = start_bd->link;
1515 size += gen->steps[0].scan -
1516 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1518 size = gen->steps[0].scan - start;
1520 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1526 barf("scavenge_one: strange object %d", (int)(info->type));
1529 no_luck = failed_to_evac;
1530 failed_to_evac = rtsFalse;
1534 /* -----------------------------------------------------------------------------
1535 Scavenging mutable lists.
1537 We treat the mutable list of each generation > N (i.e. all the
1538 generations older than the one being collected) as roots. We also
1539 remove non-mutable objects from the mutable list at this point.
1540 -------------------------------------------------------------------------- */
1543 scavenge_mutable_list(generation *gen)
1548 bd = gen->saved_mut_list;
1551 for (; bd != NULL; bd = bd->link) {
1552 for (q = bd->start; q < bd->free; q++) {
1554 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1557 switch (get_itbl((StgClosure *)p)->type) {
1559 barf("MUT_VAR_CLEAN on mutable list");
1561 mutlist_MUTVARS++; break;
1562 case MUT_ARR_PTRS_CLEAN:
1563 case MUT_ARR_PTRS_DIRTY:
1564 case MUT_ARR_PTRS_FROZEN:
1565 case MUT_ARR_PTRS_FROZEN0:
1566 mutlist_MUTARRS++; break;
1568 mutlist_OTHERS++; break;
1572 // Check whether this object is "clean", that is it
1573 // definitely doesn't point into a young generation.
1574 // Clean objects don't need to be scavenged. Some clean
1575 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1576 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1577 // TSO, are always on the mutable list.
1579 switch (get_itbl((StgClosure *)p)->type) {
1580 case MUT_ARR_PTRS_CLEAN:
1581 recordMutableGen((StgClosure *)p,gen);
1584 StgTSO *tso = (StgTSO *)p;
1585 if ((tso->flags & TSO_DIRTY) == 0) {
1586 // A clean TSO: we don't have to traverse its
1587 // stack. However, we *do* follow the link field:
1588 // we don't want to have to mark a TSO dirty just
1589 // because we put it on a different queue.
1590 if (tso->why_blocked != BlockedOnBlackHole) {
1591 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1593 recordMutableGen((StgClosure *)p,gen);
1601 if (scavenge_one(p)) {
1602 // didn't manage to promote everything, so put the
1603 // object back on the list.
1604 recordMutableGen((StgClosure *)p,gen);
1609 // free the old mut_list
1610 freeChain(gen->saved_mut_list);
1611 gen->saved_mut_list = NULL;
1614 /* -----------------------------------------------------------------------------
1615 Scavenging the static objects.
1617 We treat the mutable list of each generation > N (i.e. all the
1618 generations older than the one being collected) as roots. We also
1619 remove non-mutable objects from the mutable list at this point.
1620 -------------------------------------------------------------------------- */
1623 scavenge_static(void)
1625 StgClosure* p = static_objects;
1626 const StgInfoTable *info;
1628 /* Always evacuate straight to the oldest generation for static
1630 evac_gen = oldest_gen->no;
1632 /* keep going until we've scavenged all the objects on the linked
1634 while (p != END_OF_STATIC_LIST) {
1636 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1639 if (info->type==RBH)
1640 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1642 // make sure the info pointer is into text space
1644 /* Take this object *off* the static_objects list,
1645 * and put it on the scavenged_static_objects list.
1647 static_objects = *STATIC_LINK(info,p);
1648 *STATIC_LINK(info,p) = scavenged_static_objects;
1649 scavenged_static_objects = p;
1651 switch (info -> type) {
1655 StgInd *ind = (StgInd *)p;
1656 ind->indirectee = evacuate(ind->indirectee);
1658 /* might fail to evacuate it, in which case we have to pop it
1659 * back on the mutable list of the oldest generation. We
1660 * leave it *on* the scavenged_static_objects list, though,
1661 * in case we visit this object again.
1663 if (failed_to_evac) {
1664 failed_to_evac = rtsFalse;
1665 recordMutableGen((StgClosure *)p,oldest_gen);
1671 scavenge_thunk_srt(info);
1675 scavenge_fun_srt(info);
1682 next = (P_)p->payload + info->layout.payload.ptrs;
1683 // evacuate the pointers
1684 for (q = (P_)p->payload; q < next; q++) {
1685 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1691 barf("scavenge_static: strange closure %d", (int)(info->type));
1694 ASSERT(failed_to_evac == rtsFalse);
1696 /* get the next static object from the list. Remember, there might
1697 * be more stuff on this list now that we've done some evacuating!
1698 * (static_objects is a global)
1704 /* -----------------------------------------------------------------------------
1705 scavenge a chunk of memory described by a bitmap
1706 -------------------------------------------------------------------------- */
1709 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1715 bitmap = large_bitmap->bitmap[b];
1716 for (i = 0; i < size; ) {
1717 if ((bitmap & 1) == 0) {
1718 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1722 if (i % BITS_IN(W_) == 0) {
1724 bitmap = large_bitmap->bitmap[b];
1726 bitmap = bitmap >> 1;
1731 STATIC_INLINE StgPtr
1732 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1735 if ((bitmap & 1) == 0) {
1736 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1739 bitmap = bitmap >> 1;
1745 /* -----------------------------------------------------------------------------
1746 scavenge_stack walks over a section of stack and evacuates all the
1747 objects pointed to by it. We can use the same code for walking
1748 AP_STACK_UPDs, since these are just sections of copied stack.
1749 -------------------------------------------------------------------------- */
1752 scavenge_stack(StgPtr p, StgPtr stack_end)
1754 const StgRetInfoTable* info;
1759 * Each time around this loop, we are looking at a chunk of stack
1760 * that starts with an activation record.
1763 while (p < stack_end) {
1764 info = get_ret_itbl((StgClosure *)p);
1766 switch (info->i.type) {
1769 // In SMP, we can get update frames that point to indirections
1770 // when two threads evaluate the same thunk. We do attempt to
1771 // discover this situation in threadPaused(), but it's
1772 // possible that the following sequence occurs:
1781 // Now T is an indirection, and the update frame is already
1782 // marked on A's stack, so we won't traverse it again in
1783 // threadPaused(). We could traverse the whole stack again
1784 // before GC, but that seems like overkill.
1786 // Scavenging this update frame as normal would be disastrous;
1787 // the updatee would end up pointing to the value. So we turn
1788 // the indirection into an IND_PERM, so that evacuate will
1789 // copy the indirection into the old generation instead of
1791 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
1792 ((StgUpdateFrame *)p)->updatee->header.info =
1793 (StgInfoTable *)&stg_IND_PERM_info;
1795 ((StgUpdateFrame *)p)->updatee
1796 = evacuate(((StgUpdateFrame *)p)->updatee);
1797 p += sizeofW(StgUpdateFrame);
1800 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1801 case CATCH_STM_FRAME:
1802 case CATCH_RETRY_FRAME:
1803 case ATOMICALLY_FRAME:
1808 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1809 size = BITMAP_SIZE(info->i.layout.bitmap);
1810 // NOTE: the payload starts immediately after the info-ptr, we
1811 // don't have an StgHeader in the same sense as a heap closure.
1813 p = scavenge_small_bitmap(p, size, bitmap);
1817 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1825 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1828 size = BCO_BITMAP_SIZE(bco);
1829 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1834 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1840 size = GET_LARGE_BITMAP(&info->i)->size;
1842 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1844 // and don't forget to follow the SRT
1848 // Dynamic bitmap: the mask is stored on the stack, and
1849 // there are a number of non-pointers followed by a number
1850 // of pointers above the bitmapped area. (see StgMacros.h,
1855 dyn = ((StgRetDyn *)p)->liveness;
1857 // traverse the bitmap first
1858 bitmap = RET_DYN_LIVENESS(dyn);
1859 p = (P_)&((StgRetDyn *)p)->payload[0];
1860 size = RET_DYN_BITMAP_SIZE;
1861 p = scavenge_small_bitmap(p, size, bitmap);
1863 // skip over the non-ptr words
1864 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1866 // follow the ptr words
1867 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1868 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1876 StgRetFun *ret_fun = (StgRetFun *)p;
1877 StgFunInfoTable *fun_info;
1879 ret_fun->fun = evacuate(ret_fun->fun);
1880 fun_info = get_fun_itbl(ret_fun->fun);
1881 p = scavenge_arg_block(fun_info, ret_fun->payload);
1886 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1891 /*-----------------------------------------------------------------------------
1892 scavenge the large object list.
1894 evac_gen set by caller; similar games played with evac_gen as with
1895 scavenge() - see comment at the top of scavenge(). Most large
1896 objects are (repeatedly) mutable, so most of the time evac_gen will
1898 --------------------------------------------------------------------------- */
1901 scavenge_large(step *stp)
1906 bd = stp->new_large_objects;
1908 for (; bd != NULL; bd = stp->new_large_objects) {
1910 /* take this object *off* the large objects list and put it on
1911 * the scavenged large objects list. This is so that we can
1912 * treat new_large_objects as a stack and push new objects on
1913 * the front when evacuating.
1915 stp->new_large_objects = bd->link;
1916 dbl_link_onto(bd, &stp->scavenged_large_objects);
1918 // update the block count in this step.
1919 stp->n_scavenged_large_blocks += bd->blocks;
1922 if (scavenge_one(p)) {
1923 if (stp->gen_no > 0) {
1924 recordMutableGen((StgClosure *)p, stp->gen);