1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
25 #include "LdvProfile.h"
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
34 static void scavenge_block (bdescr *bd, StgPtr scan);
37 /* Similar to scavenge_large_bitmap(), but we don't write back the
38 * pointers we get back from evacuate().
41 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
48 bitmap = large_srt->l.bitmap[b];
49 size = (nat)large_srt->l.size;
50 p = (StgClosure **)large_srt->srt;
51 for (i = 0; i < size; ) {
52 if ((bitmap & 1) != 0) {
57 if (i % BITS_IN(W_) == 0) {
59 bitmap = large_srt->l.bitmap[b];
66 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
67 * srt field in the info table. That's ok, because we'll
68 * never dereference it.
71 scavenge_srt (StgClosure **srt, nat srt_bitmap)
79 if (bitmap == (StgHalfWord)(-1)) {
80 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
85 if ((bitmap & 1) != 0) {
86 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
87 // Special-case to handle references to closures hiding out in DLLs, since
88 // double indirections required to get at those. The code generator knows
89 // which is which when generating the SRT, so it stores the (indirect)
90 // reference to the DLL closure in the table by first adding one to it.
91 // We check for this here, and undo the addition before evacuating it.
93 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
94 // closure that's fixed at link-time, and no extra magic is required.
95 if ( (unsigned long)(*srt) & 0x1 ) {
96 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 bitmap = bitmap >> 1;
111 scavenge_thunk_srt(const StgInfoTable *info)
113 StgThunkInfoTable *thunk_info;
115 if (!major_gc) return;
117 thunk_info = itbl_to_thunk_itbl(info);
118 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
122 scavenge_fun_srt(const StgInfoTable *info)
124 StgFunInfoTable *fun_info;
126 if (!major_gc) return;
128 fun_info = itbl_to_fun_itbl(info);
129 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
132 /* -----------------------------------------------------------------------------
134 -------------------------------------------------------------------------- */
137 scavengeTSO (StgTSO *tso)
139 if ( tso->why_blocked == BlockedOnMVar
140 || tso->why_blocked == BlockedOnBlackHole
141 || tso->why_blocked == BlockedOnException
143 tso->block_info.closure = evacuate(tso->block_info.closure);
145 tso->blocked_exceptions =
146 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
148 // We don't always chase the link field: TSOs on the blackhole
149 // queue are not automatically alive, so the link field is a
150 // "weak" pointer in that case.
151 if (tso->why_blocked != BlockedOnBlackHole) {
152 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
155 // scavange current transaction record
156 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
158 // scavenge this thread's stack
159 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
162 /* -----------------------------------------------------------------------------
163 Blocks of function args occur on the stack (at the top) and
165 -------------------------------------------------------------------------- */
168 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
175 switch (fun_info->f.fun_type) {
177 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
178 size = BITMAP_SIZE(fun_info->f.b.bitmap);
181 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
182 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
186 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
187 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
190 if ((bitmap & 1) == 0) {
191 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
194 bitmap = bitmap >> 1;
203 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
207 StgFunInfoTable *fun_info;
209 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
210 ASSERT(fun_info->i.type != PAP);
213 switch (fun_info->f.fun_type) {
215 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
218 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
222 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
226 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
229 if ((bitmap & 1) == 0) {
230 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
233 bitmap = bitmap >> 1;
242 scavenge_PAP (StgPAP *pap)
244 pap->fun = evacuate(pap->fun);
245 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
249 scavenge_AP (StgAP *ap)
251 ap->fun = evacuate(ap->fun);
252 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
255 /* -----------------------------------------------------------------------------
256 Scavenge a block from the given scan pointer up to bd->free.
258 evac_gen is set by the caller to be either zero (for a step in a
259 generation < N) or G where G is the generation of the step being
262 We sometimes temporarily change evac_gen back to zero if we're
263 scavenging a mutable object where eager promotion isn't such a good
265 -------------------------------------------------------------------------- */
268 scavenge_block (bdescr *bd, StgPtr scan)
276 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
277 bd->start, bd->gen_no, bd->step->no, scan);
279 gct->evac_gen = bd->gen_no;
280 saved_evac_gen = gct->evac_gen;
281 gct->failed_to_evac = rtsFalse;
283 // we might be evacuating into the very object that we're
284 // scavenging, so we have to check the real bd->free pointer each
285 // time around the loop.
286 while (p < bd->free) {
288 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
289 info = get_itbl((StgClosure *)p);
291 ASSERT(gct->thunk_selector_depth == 0);
294 switch (info->type) {
299 rtsBool saved_eager_promotion = gct->eager_promotion;
301 StgMVar *mvar = ((StgMVar *)p);
302 gct->eager_promotion = rtsFalse;
303 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
304 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
305 mvar->value = evacuate((StgClosure *)mvar->value);
306 gct->eager_promotion = saved_eager_promotion;
308 if (gct->failed_to_evac) {
309 mvar->header.info = &stg_MVAR_DIRTY_info;
311 mvar->header.info = &stg_MVAR_CLEAN_info;
313 p += sizeofW(StgMVar);
318 scavenge_fun_srt(info);
319 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
320 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
321 p += sizeofW(StgHeader) + 2;
325 scavenge_thunk_srt(info);
326 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
327 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
328 p += sizeofW(StgThunk) + 2;
332 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
333 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
334 p += sizeofW(StgHeader) + 2;
338 scavenge_thunk_srt(info);
339 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
340 p += sizeofW(StgThunk) + 1;
344 scavenge_fun_srt(info);
346 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
347 p += sizeofW(StgHeader) + 1;
351 scavenge_thunk_srt(info);
352 p += sizeofW(StgThunk) + 1;
356 scavenge_fun_srt(info);
358 p += sizeofW(StgHeader) + 1;
362 scavenge_thunk_srt(info);
363 p += sizeofW(StgThunk) + 2;
367 scavenge_fun_srt(info);
369 p += sizeofW(StgHeader) + 2;
373 scavenge_thunk_srt(info);
374 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
375 p += sizeofW(StgThunk) + 2;
379 scavenge_fun_srt(info);
381 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
382 p += sizeofW(StgHeader) + 2;
386 scavenge_fun_srt(info);
393 scavenge_thunk_srt(info);
394 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
395 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
396 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
398 p += info->layout.payload.nptrs;
409 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
410 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
411 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
413 p += info->layout.payload.nptrs;
418 StgBCO *bco = (StgBCO *)p;
419 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
420 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
421 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
427 if (bd->gen_no != 0) {
430 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
431 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
432 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
435 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
437 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
439 // We pretend that p has just been created.
440 LDV_RECORD_CREATE((StgClosure *)p);
443 case IND_OLDGEN_PERM:
444 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
445 p += sizeofW(StgInd);
449 case MUT_VAR_DIRTY: {
450 rtsBool saved_eager_promotion = gct->eager_promotion;
452 gct->eager_promotion = rtsFalse;
453 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
454 gct->eager_promotion = saved_eager_promotion;
456 if (gct->failed_to_evac) {
457 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
459 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
461 p += sizeofW(StgMutVar);
466 case SE_CAF_BLACKHOLE:
469 p += BLACKHOLE_sizeW();
474 StgSelector *s = (StgSelector *)p;
475 s->selectee = evacuate(s->selectee);
476 p += THUNK_SELECTOR_sizeW();
480 // A chunk of stack saved in a heap object
483 StgAP_STACK *ap = (StgAP_STACK *)p;
485 ap->fun = evacuate(ap->fun);
486 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
487 p = (StgPtr)ap->payload + ap->size;
492 p = scavenge_PAP((StgPAP *)p);
496 p = scavenge_AP((StgAP *)p);
501 p += arr_words_sizeW((StgArrWords *)p);
504 case MUT_ARR_PTRS_CLEAN:
505 case MUT_ARR_PTRS_DIRTY:
511 // We don't eagerly promote objects pointed to by a mutable
512 // array, but if we find the array only points to objects in
513 // the same or an older generation, we mark it "clean" and
514 // avoid traversing it during minor GCs.
515 saved_eager = gct->eager_promotion;
516 gct->eager_promotion = rtsFalse;
517 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
518 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
519 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
521 gct->eager_promotion = saved_eager;
523 if (gct->failed_to_evac) {
524 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
526 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
529 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
533 case MUT_ARR_PTRS_FROZEN:
534 case MUT_ARR_PTRS_FROZEN0:
539 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
540 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
541 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
544 // If we're going to put this object on the mutable list, then
545 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
546 if (gct->failed_to_evac) {
547 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
549 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
556 StgTSO *tso = (StgTSO *)p;
557 rtsBool saved_eager = gct->eager_promotion;
559 gct->eager_promotion = rtsFalse;
561 gct->eager_promotion = saved_eager;
563 if (gct->failed_to_evac) {
564 tso->flags |= TSO_DIRTY;
566 tso->flags &= ~TSO_DIRTY;
569 gct->failed_to_evac = rtsTrue; // always on the mutable list
574 case TVAR_WATCH_QUEUE:
576 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
578 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
579 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
580 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
581 gct->evac_gen = saved_evac_gen;
582 gct->failed_to_evac = rtsTrue; // mutable
583 p += sizeofW(StgTVarWatchQueue);
589 StgTVar *tvar = ((StgTVar *) p);
591 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
592 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
593 gct->evac_gen = saved_evac_gen;
594 gct->failed_to_evac = rtsTrue; // mutable
595 p += sizeofW(StgTVar);
601 StgTRecHeader *trec = ((StgTRecHeader *) p);
603 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
604 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
605 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
606 gct->evac_gen = saved_evac_gen;
607 gct->failed_to_evac = rtsTrue; // mutable
608 p += sizeofW(StgTRecHeader);
615 StgTRecChunk *tc = ((StgTRecChunk *) p);
616 TRecEntry *e = &(tc -> entries[0]);
618 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
619 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
620 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
621 e->expected_value = evacuate((StgClosure*)e->expected_value);
622 e->new_value = evacuate((StgClosure*)e->new_value);
624 gct->evac_gen = saved_evac_gen;
625 gct->failed_to_evac = rtsTrue; // mutable
626 p += sizeofW(StgTRecChunk);
630 case ATOMIC_INVARIANT:
632 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
634 invariant->code = (StgClosure *)evacuate(invariant->code);
635 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
636 gct->evac_gen = saved_evac_gen;
637 gct->failed_to_evac = rtsTrue; // mutable
638 p += sizeofW(StgAtomicInvariant);
642 case INVARIANT_CHECK_QUEUE:
644 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
646 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
647 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
648 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
649 gct->evac_gen = saved_evac_gen;
650 gct->failed_to_evac = rtsTrue; // mutable
651 p += sizeofW(StgInvariantCheckQueue);
656 barf("scavenge: unimplemented/strange closure type %d @ %p",
661 * We need to record the current object on the mutable list if
662 * (a) It is actually mutable, or
663 * (b) It contains pointers to a younger generation.
664 * Case (b) arises if we didn't manage to promote everything that
665 * the current object points to into the current generation.
667 if (gct->failed_to_evac) {
668 gct->failed_to_evac = rtsFalse;
669 if (bd->gen_no > 0) {
670 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
675 debugTrace(DEBUG_gc, " scavenged %ld bytes", (bd->free - scan) * sizeof(W_));
678 /* -----------------------------------------------------------------------------
679 Scavenge everything on the mark stack.
681 This is slightly different from scavenge():
682 - we don't walk linearly through the objects, so the scavenger
683 doesn't need to advance the pointer on to the next object.
684 -------------------------------------------------------------------------- */
687 scavenge_mark_stack(void)
693 gct->evac_gen = oldest_gen->no;
694 saved_evac_gen = gct->evac_gen;
697 while (!mark_stack_empty()) {
698 p = pop_mark_stack();
700 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
701 info = get_itbl((StgClosure *)p);
704 switch (info->type) {
709 rtsBool saved_eager_promotion = gct->eager_promotion;
711 StgMVar *mvar = ((StgMVar *)p);
712 gct->eager_promotion = rtsFalse;
713 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
714 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
715 mvar->value = evacuate((StgClosure *)mvar->value);
716 gct->eager_promotion = saved_eager_promotion;
718 if (gct->failed_to_evac) {
719 mvar->header.info = &stg_MVAR_DIRTY_info;
721 mvar->header.info = &stg_MVAR_CLEAN_info;
727 scavenge_fun_srt(info);
728 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
729 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
733 scavenge_thunk_srt(info);
734 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
735 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
739 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
740 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
745 scavenge_fun_srt(info);
746 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
751 scavenge_thunk_srt(info);
752 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
757 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
762 scavenge_fun_srt(info);
767 scavenge_thunk_srt(info);
775 scavenge_fun_srt(info);
782 scavenge_thunk_srt(info);
783 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
784 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
785 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
797 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
798 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
799 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
805 StgBCO *bco = (StgBCO *)p;
806 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
807 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
808 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
813 // don't need to do anything here: the only possible case
814 // is that we're in a 1-space compacting collector, with
815 // no "old" generation.
819 case IND_OLDGEN_PERM:
820 ((StgInd *)p)->indirectee =
821 evacuate(((StgInd *)p)->indirectee);
825 case MUT_VAR_DIRTY: {
826 rtsBool saved_eager_promotion = gct->eager_promotion;
828 gct->eager_promotion = rtsFalse;
829 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
830 gct->eager_promotion = saved_eager_promotion;
832 if (gct->failed_to_evac) {
833 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
835 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
841 case SE_CAF_BLACKHOLE:
849 StgSelector *s = (StgSelector *)p;
850 s->selectee = evacuate(s->selectee);
854 // A chunk of stack saved in a heap object
857 StgAP_STACK *ap = (StgAP_STACK *)p;
859 ap->fun = evacuate(ap->fun);
860 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
865 scavenge_PAP((StgPAP *)p);
869 scavenge_AP((StgAP *)p);
872 case MUT_ARR_PTRS_CLEAN:
873 case MUT_ARR_PTRS_DIRTY:
879 // We don't eagerly promote objects pointed to by a mutable
880 // array, but if we find the array only points to objects in
881 // the same or an older generation, we mark it "clean" and
882 // avoid traversing it during minor GCs.
883 saved_eager = gct->eager_promotion;
884 gct->eager_promotion = rtsFalse;
885 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
886 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
887 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
889 gct->eager_promotion = saved_eager;
891 if (gct->failed_to_evac) {
892 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
894 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
897 gct->failed_to_evac = rtsTrue; // mutable anyhow.
901 case MUT_ARR_PTRS_FROZEN:
902 case MUT_ARR_PTRS_FROZEN0:
907 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
908 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
909 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
912 // If we're going to put this object on the mutable list, then
913 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
914 if (gct->failed_to_evac) {
915 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
917 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
924 StgTSO *tso = (StgTSO *)p;
925 rtsBool saved_eager = gct->eager_promotion;
927 gct->eager_promotion = rtsFalse;
929 gct->eager_promotion = saved_eager;
931 if (gct->failed_to_evac) {
932 tso->flags |= TSO_DIRTY;
934 tso->flags &= ~TSO_DIRTY;
937 gct->failed_to_evac = rtsTrue; // always on the mutable list
941 case TVAR_WATCH_QUEUE:
943 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
945 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
946 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
947 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
948 gct->evac_gen = saved_evac_gen;
949 gct->failed_to_evac = rtsTrue; // mutable
955 StgTVar *tvar = ((StgTVar *) p);
957 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
958 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
959 gct->evac_gen = saved_evac_gen;
960 gct->failed_to_evac = rtsTrue; // mutable
967 StgTRecChunk *tc = ((StgTRecChunk *) p);
968 TRecEntry *e = &(tc -> entries[0]);
970 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
971 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
972 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
973 e->expected_value = evacuate((StgClosure*)e->expected_value);
974 e->new_value = evacuate((StgClosure*)e->new_value);
976 gct->evac_gen = saved_evac_gen;
977 gct->failed_to_evac = rtsTrue; // mutable
983 StgTRecHeader *trec = ((StgTRecHeader *) p);
985 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
986 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
987 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
988 gct->evac_gen = saved_evac_gen;
989 gct->failed_to_evac = rtsTrue; // mutable
993 case ATOMIC_INVARIANT:
995 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
997 invariant->code = (StgClosure *)evacuate(invariant->code);
998 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
999 gct->evac_gen = saved_evac_gen;
1000 gct->failed_to_evac = rtsTrue; // mutable
1004 case INVARIANT_CHECK_QUEUE:
1006 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1008 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1009 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1010 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1011 gct->evac_gen = saved_evac_gen;
1012 gct->failed_to_evac = rtsTrue; // mutable
1017 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1021 if (gct->failed_to_evac) {
1022 gct->failed_to_evac = rtsFalse;
1023 if (gct->evac_gen > 0) {
1024 recordMutableGen_GC((StgClosure *)q, &generations[gct->evac_gen]);
1028 // mark the next bit to indicate "scavenged"
1029 mark(q+1, Bdescr(q));
1031 } // while (!mark_stack_empty())
1033 // start a new linear scan if the mark stack overflowed at some point
1034 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1035 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1036 mark_stack_overflowed = rtsFalse;
1037 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1038 oldgen_scan = oldgen_scan_bd->start;
1041 if (oldgen_scan_bd) {
1042 // push a new thing on the mark stack
1044 // find a closure that is marked but not scavenged, and start
1046 while (oldgen_scan < oldgen_scan_bd->free
1047 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1051 if (oldgen_scan < oldgen_scan_bd->free) {
1053 // already scavenged?
1054 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1055 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1058 push_mark_stack(oldgen_scan);
1059 // ToDo: bump the linear scan by the actual size of the object
1060 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1064 oldgen_scan_bd = oldgen_scan_bd->link;
1065 if (oldgen_scan_bd != NULL) {
1066 oldgen_scan = oldgen_scan_bd->start;
1072 /* -----------------------------------------------------------------------------
1073 Scavenge one object.
1075 This is used for objects that are temporarily marked as mutable
1076 because they contain old-to-new generation pointers. Only certain
1077 objects can have this property.
1078 -------------------------------------------------------------------------- */
1081 scavenge_one(StgPtr p)
1083 const StgInfoTable *info;
1084 nat saved_evac_gen = gct->evac_gen;
1087 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1088 info = get_itbl((StgClosure *)p);
1090 switch (info->type) {
1095 rtsBool saved_eager_promotion = gct->eager_promotion;
1097 StgMVar *mvar = ((StgMVar *)p);
1098 gct->eager_promotion = rtsFalse;
1099 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
1100 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
1101 mvar->value = evacuate((StgClosure *)mvar->value);
1102 gct->eager_promotion = saved_eager_promotion;
1104 if (gct->failed_to_evac) {
1105 mvar->header.info = &stg_MVAR_DIRTY_info;
1107 mvar->header.info = &stg_MVAR_CLEAN_info;
1121 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1122 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1123 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1129 case FUN_1_0: // hardly worth specialising these guys
1145 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1146 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1147 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1153 case MUT_VAR_DIRTY: {
1155 rtsBool saved_eager_promotion = gct->eager_promotion;
1157 gct->eager_promotion = rtsFalse;
1158 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1159 gct->eager_promotion = saved_eager_promotion;
1161 if (gct->failed_to_evac) {
1162 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1164 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1170 case SE_CAF_BLACKHOLE:
1175 case THUNK_SELECTOR:
1177 StgSelector *s = (StgSelector *)p;
1178 s->selectee = evacuate(s->selectee);
1184 StgAP_STACK *ap = (StgAP_STACK *)p;
1186 ap->fun = evacuate(ap->fun);
1187 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1188 p = (StgPtr)ap->payload + ap->size;
1193 p = scavenge_PAP((StgPAP *)p);
1197 p = scavenge_AP((StgAP *)p);
1201 // nothing to follow
1204 case MUT_ARR_PTRS_CLEAN:
1205 case MUT_ARR_PTRS_DIRTY:
1208 rtsBool saved_eager;
1210 // We don't eagerly promote objects pointed to by a mutable
1211 // array, but if we find the array only points to objects in
1212 // the same or an older generation, we mark it "clean" and
1213 // avoid traversing it during minor GCs.
1214 saved_eager = gct->eager_promotion;
1215 gct->eager_promotion = rtsFalse;
1217 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1218 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1219 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1221 gct->eager_promotion = saved_eager;
1223 if (gct->failed_to_evac) {
1224 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1226 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1229 gct->failed_to_evac = rtsTrue;
1233 case MUT_ARR_PTRS_FROZEN:
1234 case MUT_ARR_PTRS_FROZEN0:
1236 // follow everything
1239 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1240 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1241 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1244 // If we're going to put this object on the mutable list, then
1245 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1246 if (gct->failed_to_evac) {
1247 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1249 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1256 StgTSO *tso = (StgTSO *)p;
1257 rtsBool saved_eager = gct->eager_promotion;
1259 gct->eager_promotion = rtsFalse;
1261 gct->eager_promotion = saved_eager;
1263 if (gct->failed_to_evac) {
1264 tso->flags |= TSO_DIRTY;
1266 tso->flags &= ~TSO_DIRTY;
1269 gct->failed_to_evac = rtsTrue; // always on the mutable list
1273 case TVAR_WATCH_QUEUE:
1275 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1277 wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
1278 wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
1279 wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
1280 gct->evac_gen = saved_evac_gen;
1281 gct->failed_to_evac = rtsTrue; // mutable
1287 StgTVar *tvar = ((StgTVar *) p);
1289 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
1290 tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
1291 gct->evac_gen = saved_evac_gen;
1292 gct->failed_to_evac = rtsTrue; // mutable
1298 StgTRecHeader *trec = ((StgTRecHeader *) p);
1300 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
1301 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
1302 trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
1303 gct->evac_gen = saved_evac_gen;
1304 gct->failed_to_evac = rtsTrue; // mutable
1311 StgTRecChunk *tc = ((StgTRecChunk *) p);
1312 TRecEntry *e = &(tc -> entries[0]);
1314 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
1315 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1316 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
1317 e->expected_value = evacuate((StgClosure*)e->expected_value);
1318 e->new_value = evacuate((StgClosure*)e->new_value);
1320 gct->evac_gen = saved_evac_gen;
1321 gct->failed_to_evac = rtsTrue; // mutable
1325 case ATOMIC_INVARIANT:
1327 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1329 invariant->code = (StgClosure *)evacuate(invariant->code);
1330 invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
1331 gct->evac_gen = saved_evac_gen;
1332 gct->failed_to_evac = rtsTrue; // mutable
1336 case INVARIANT_CHECK_QUEUE:
1338 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1340 queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
1341 queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
1342 queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
1343 gct->evac_gen = saved_evac_gen;
1344 gct->failed_to_evac = rtsTrue; // mutable
1349 case IND_OLDGEN_PERM:
1352 /* Careful here: a THUNK can be on the mutable list because
1353 * it contains pointers to young gen objects. If such a thunk
1354 * is updated, the IND_OLDGEN will be added to the mutable
1355 * list again, and we'll scavenge it twice. evacuate()
1356 * doesn't check whether the object has already been
1357 * evacuated, so we perform that check here.
1359 StgClosure *q = ((StgInd *)p)->indirectee;
1360 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1363 ((StgInd *)p)->indirectee = evacuate(q);
1366 #if 0 && defined(DEBUG)
1367 if (RtsFlags.DebugFlags.gc)
1368 /* Debugging code to print out the size of the thing we just
1372 StgPtr start = gen->steps[0].scan;
1373 bdescr *start_bd = gen->steps[0].scan_bd;
1375 scavenge(&gen->steps[0]);
1376 if (start_bd != gen->steps[0].scan_bd) {
1377 size += (P_)BLOCK_ROUND_UP(start) - start;
1378 start_bd = start_bd->link;
1379 while (start_bd != gen->steps[0].scan_bd) {
1380 size += BLOCK_SIZE_W;
1381 start_bd = start_bd->link;
1383 size += gen->steps[0].scan -
1384 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1386 size = gen->steps[0].scan - start;
1388 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1394 barf("scavenge_one: strange object %d", (int)(info->type));
1397 no_luck = gct->failed_to_evac;
1398 gct->failed_to_evac = rtsFalse;
1402 /* -----------------------------------------------------------------------------
1403 Scavenging mutable lists.
1405 We treat the mutable list of each generation > N (i.e. all the
1406 generations older than the one being collected) as roots. We also
1407 remove non-mutable objects from the mutable list at this point.
1408 -------------------------------------------------------------------------- */
1411 scavenge_mutable_list(generation *gen)
1416 bd = gen->saved_mut_list;
1418 gct->evac_gen = gen->no;
1419 for (; bd != NULL; bd = bd->link) {
1420 for (q = bd->start; q < bd->free; q++) {
1422 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1425 switch (get_itbl((StgClosure *)p)->type) {
1427 barf("MUT_VAR_CLEAN on mutable list");
1429 mutlist_MUTVARS++; break;
1430 case MUT_ARR_PTRS_CLEAN:
1431 case MUT_ARR_PTRS_DIRTY:
1432 case MUT_ARR_PTRS_FROZEN:
1433 case MUT_ARR_PTRS_FROZEN0:
1434 mutlist_MUTARRS++; break;
1436 barf("MVAR_CLEAN on mutable list");
1438 mutlist_MVARS++; break;
1440 mutlist_OTHERS++; break;
1444 // Check whether this object is "clean", that is it
1445 // definitely doesn't point into a young generation.
1446 // Clean objects don't need to be scavenged. Some clean
1447 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1448 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1449 // TSO, are always on the mutable list.
1451 switch (get_itbl((StgClosure *)p)->type) {
1452 case MUT_ARR_PTRS_CLEAN:
1453 recordMutableGen_GC((StgClosure *)p,gen);
1456 StgTSO *tso = (StgTSO *)p;
1457 if ((tso->flags & TSO_DIRTY) == 0) {
1458 // A clean TSO: we don't have to traverse its
1459 // stack. However, we *do* follow the link field:
1460 // we don't want to have to mark a TSO dirty just
1461 // because we put it on a different queue.
1462 if (tso->why_blocked != BlockedOnBlackHole) {
1463 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
1465 recordMutableGen_GC((StgClosure *)p,gen);
1473 if (scavenge_one(p)) {
1474 // didn't manage to promote everything, so put the
1475 // object back on the list.
1476 recordMutableGen_GC((StgClosure *)p,gen);
1481 // free the old mut_list
1482 freeChain(gen->saved_mut_list);
1483 gen->saved_mut_list = NULL;
1486 /* -----------------------------------------------------------------------------
1487 Scavenging the static objects.
1489 We treat the mutable list of each generation > N (i.e. all the
1490 generations older than the one being collected) as roots. We also
1491 remove non-mutable objects from the mutable list at this point.
1492 -------------------------------------------------------------------------- */
1495 scavenge_static(void)
1498 const StgInfoTable *info;
1500 /* Always evacuate straight to the oldest generation for static
1502 gct->evac_gen = oldest_gen->no;
1504 /* keep going until we've scavenged all the objects on the linked
1509 ACQUIRE_SPIN_LOCK(&static_objects_sync);
1511 /* get the next static object from the list. Remember, there might
1512 * be more stuff on this list after each evacuation...
1513 * (static_objects is a global)
1516 if (p == END_OF_STATIC_LIST) {
1517 RELEASE_SPIN_LOCK(&static_objects_sync);
1521 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1524 if (info->type==RBH)
1525 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1527 // make sure the info pointer is into text space
1529 /* Take this object *off* the static_objects list,
1530 * and put it on the scavenged_static_objects list.
1532 static_objects = *STATIC_LINK(info,p);
1533 *STATIC_LINK(info,p) = scavenged_static_objects;
1534 scavenged_static_objects = p;
1536 RELEASE_SPIN_LOCK(&static_objects_sync);
1538 switch (info -> type) {
1542 StgInd *ind = (StgInd *)p;
1543 ind->indirectee = evacuate(ind->indirectee);
1545 /* might fail to evacuate it, in which case we have to pop it
1546 * back on the mutable list of the oldest generation. We
1547 * leave it *on* the scavenged_static_objects list, though,
1548 * in case we visit this object again.
1550 if (gct->failed_to_evac) {
1551 gct->failed_to_evac = rtsFalse;
1552 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1558 scavenge_thunk_srt(info);
1562 scavenge_fun_srt(info);
1569 next = (P_)p->payload + info->layout.payload.ptrs;
1570 // evacuate the pointers
1571 for (q = (P_)p->payload; q < next; q++) {
1572 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
1578 barf("scavenge_static: strange closure %d", (int)(info->type));
1581 ASSERT(gct->failed_to_evac == rtsFalse);
1585 /* -----------------------------------------------------------------------------
1586 scavenge a chunk of memory described by a bitmap
1587 -------------------------------------------------------------------------- */
1590 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1596 bitmap = large_bitmap->bitmap[b];
1597 for (i = 0; i < size; ) {
1598 if ((bitmap & 1) == 0) {
1599 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1603 if (i % BITS_IN(W_) == 0) {
1605 bitmap = large_bitmap->bitmap[b];
1607 bitmap = bitmap >> 1;
1612 STATIC_INLINE StgPtr
1613 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1616 if ((bitmap & 1) == 0) {
1617 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1620 bitmap = bitmap >> 1;
1626 /* -----------------------------------------------------------------------------
1627 scavenge_stack walks over a section of stack and evacuates all the
1628 objects pointed to by it. We can use the same code for walking
1629 AP_STACK_UPDs, since these are just sections of copied stack.
1630 -------------------------------------------------------------------------- */
1633 scavenge_stack(StgPtr p, StgPtr stack_end)
1635 const StgRetInfoTable* info;
1640 * Each time around this loop, we are looking at a chunk of stack
1641 * that starts with an activation record.
1644 while (p < stack_end) {
1645 info = get_ret_itbl((StgClosure *)p);
1647 switch (info->i.type) {
1650 // In SMP, we can get update frames that point to indirections
1651 // when two threads evaluate the same thunk. We do attempt to
1652 // discover this situation in threadPaused(), but it's
1653 // possible that the following sequence occurs:
1662 // Now T is an indirection, and the update frame is already
1663 // marked on A's stack, so we won't traverse it again in
1664 // threadPaused(). We could traverse the whole stack again
1665 // before GC, but that seems like overkill.
1667 // Scavenging this update frame as normal would be disastrous;
1668 // the updatee would end up pointing to the value. So we turn
1669 // the indirection into an IND_PERM, so that evacuate will
1670 // copy the indirection into the old generation instead of
1674 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1676 ((StgUpdateFrame *)p)->updatee->header.info =
1677 (StgInfoTable *)&stg_IND_PERM_info;
1678 } else if (type == IND_OLDGEN) {
1679 ((StgUpdateFrame *)p)->updatee->header.info =
1680 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1682 ((StgUpdateFrame *)p)->updatee
1683 = evacuate(((StgUpdateFrame *)p)->updatee);
1684 p += sizeofW(StgUpdateFrame);
1688 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1689 case CATCH_STM_FRAME:
1690 case CATCH_RETRY_FRAME:
1691 case ATOMICALLY_FRAME:
1695 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1696 size = BITMAP_SIZE(info->i.layout.bitmap);
1697 // NOTE: the payload starts immediately after the info-ptr, we
1698 // don't have an StgHeader in the same sense as a heap closure.
1700 p = scavenge_small_bitmap(p, size, bitmap);
1704 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1712 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1715 size = BCO_BITMAP_SIZE(bco);
1716 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1721 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1726 size = GET_LARGE_BITMAP(&info->i)->size;
1728 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1730 // and don't forget to follow the SRT
1734 // Dynamic bitmap: the mask is stored on the stack, and
1735 // there are a number of non-pointers followed by a number
1736 // of pointers above the bitmapped area. (see StgMacros.h,
1741 dyn = ((StgRetDyn *)p)->liveness;
1743 // traverse the bitmap first
1744 bitmap = RET_DYN_LIVENESS(dyn);
1745 p = (P_)&((StgRetDyn *)p)->payload[0];
1746 size = RET_DYN_BITMAP_SIZE;
1747 p = scavenge_small_bitmap(p, size, bitmap);
1749 // skip over the non-ptr words
1750 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1752 // follow the ptr words
1753 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1754 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
1762 StgRetFun *ret_fun = (StgRetFun *)p;
1763 StgFunInfoTable *fun_info;
1765 ret_fun->fun = evacuate(ret_fun->fun);
1766 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1767 p = scavenge_arg_block(fun_info, ret_fun->payload);
1772 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1777 /*-----------------------------------------------------------------------------
1778 scavenge the large object list.
1780 evac_gen set by caller; similar games played with evac_gen as with
1781 scavenge() - see comment at the top of scavenge(). Most large
1782 objects are (repeatedly) mutable, so most of the time evac_gen will
1784 --------------------------------------------------------------------------- */
1787 scavenge_large (step_workspace *ws)
1792 gct->evac_gen = ws->stp->gen_no;
1794 bd = ws->todo_large_objects;
1796 for (; bd != NULL; bd = ws->todo_large_objects) {
1798 // take this object *off* the large objects list and put it on
1799 // the scavenged large objects list. This is so that we can
1800 // treat new_large_objects as a stack and push new objects on
1801 // the front when evacuating.
1802 ws->todo_large_objects = bd->link;
1804 ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
1805 dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
1806 ws->stp->n_scavenged_large_blocks += bd->blocks;
1807 RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
1810 if (scavenge_one(p)) {
1811 if (ws->stp->gen_no > 0) {
1812 recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
1818 /* ----------------------------------------------------------------------------
1819 Find the oldest full block to scavenge, and scavenge it.
1820 ------------------------------------------------------------------------- */
1823 scavenge_find_global_work (void)
1831 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1832 for (s = generations[g].n_steps; --s >= 0; ) {
1833 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1836 ws = &gct->steps[g][s];
1838 // If we have any large objects to scavenge, do them now.
1839 if (ws->todo_large_objects) {
1844 if ((bd = grab_todo_block(ws)) != NULL) {
1845 // no need to assign this to ws->scan_bd, we're going
1846 // to scavenge the whole thing and then push it on
1847 // our scavd list. This saves pushing out the
1848 // scan_bd block, which might be partial.
1849 scavenge_block(bd, bd->start);
1850 push_scan_block(bd, ws);
1854 if (flag) return rtsTrue;
1860 /* ----------------------------------------------------------------------------
1861 Look for local work to do.
1863 We can have outstanding scavenging to do if, for any of the workspaces,
1865 - the scan block is the same as the todo block, and new objects
1866 have been evacuated to the todo block.
1868 - the scan block *was* the same as the todo block, but the todo
1869 block filled up and a new one has been allocated.
1870 ------------------------------------------------------------------------- */
1873 scavenge_find_local_work (void)
1880 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1881 for (s = generations[g].n_steps; --s >= 0; ) {
1882 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1885 ws = &gct->steps[g][s];
1887 // If we have a todo block and no scan block, start
1888 // scanning the todo block.
1889 if (ws->scan_bd == NULL && ws->todo_bd != NULL)
1891 ws->scan_bd = ws->todo_bd;
1892 ws->scan = ws->scan_bd->start;
1895 // If we have a scan block with some work to do,
1896 // scavenge everything up to the free pointer.
1897 if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
1899 scavenge_block(ws->scan_bd, ws->scan);
1900 ws->scan = ws->scan_bd->free;
1904 if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
1905 && ws->scan_bd != ws->todo_bd)
1907 // we're not going to evac any more objects into
1908 // this block, so push it now.
1909 push_scan_block(ws->scan_bd, ws);
1912 // we might be able to scan the todo block now. But
1913 // don't do it right away: there might be full blocks
1914 // waiting to be scanned as a result of scavenge_block above.
1918 if (flag) return rtsTrue;
1924 /* ----------------------------------------------------------------------------
1925 Scavenge until we can't find anything more to scavenge.
1926 ------------------------------------------------------------------------- */
1934 work_to_do = rtsFalse;
1936 // scavenge static objects
1937 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1938 IF_DEBUG(sanity, checkStaticObjects(static_objects));
1942 // scavenge objects in compacted generation
1943 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1944 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1945 scavenge_mark_stack();
1946 work_to_do = rtsTrue;
1949 // Order is important here: we want to deal in full blocks as
1950 // much as possible, so go for global work in preference to
1951 // local work. Only if all the global work has been exhausted
1952 // do we start scavenging the fragments of blocks in the local
1954 if (scavenge_find_global_work()) goto loop;
1955 if (scavenge_find_local_work()) goto loop;
1957 if (work_to_do) goto loop;
1968 // scavenge static objects
1969 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1973 // scavenge objects in compacted generation
1974 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1975 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1979 // Check for global work in any step. We don't need to check for
1980 // local work, because we have already exited scavenge_loop(),
1981 // which means there is no local work for this thread.
1982 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1983 for (s = generations[g].n_steps; --s >= 0; ) {
1984 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1987 ws = &gct->steps[g][s];
1988 if (ws->todo_large_objects) return rtsTrue;
1989 if (ws->stp->todos) return rtsTrue;