1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavenge_TSO_link (StgTSO *tso)
52 // We don't always chase the link field: TSOs on the blackhole
53 // queue are not automatically alive, so the link field is a
54 // "weak" pointer in that case.
55 if (tso->why_blocked != BlockedOnBlackHole) {
56 evacuate((StgClosure **)&tso->_link);
61 scavengeTSO (StgTSO *tso)
65 if (tso->what_next == ThreadRelocated) {
66 // the only way this can happen is if the old TSO was on the
67 // mutable list. We might have other links to this defunct
68 // TSO, so we must update its link field.
69 evacuate((StgClosure**)&tso->_link);
73 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
75 // update the pointer from the Task.
76 if (tso->bound != NULL) {
77 tso->bound->tso = tso;
80 saved_eager = gct->eager_promotion;
81 gct->eager_promotion = rtsFalse;
83 if ( tso->why_blocked == BlockedOnMVar
84 || tso->why_blocked == BlockedOnBlackHole
85 || tso->why_blocked == BlockedOnMsgWakeup
86 || tso->why_blocked == BlockedOnMsgThrowTo
88 evacuate(&tso->block_info.closure);
90 evacuate((StgClosure **)&tso->blocked_exceptions);
92 // scavange current transaction record
93 evacuate((StgClosure **)&tso->trec);
95 // scavenge this thread's stack
96 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
98 if (gct->failed_to_evac) {
100 scavenge_TSO_link(tso);
103 scavenge_TSO_link(tso);
104 if (gct->failed_to_evac) {
105 tso->flags |= TSO_LINK_DIRTY;
107 tso->flags &= ~TSO_LINK_DIRTY;
111 gct->eager_promotion = saved_eager;
114 /* -----------------------------------------------------------------------------
115 Mutable arrays of pointers
116 -------------------------------------------------------------------------- */
118 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
124 any_failed = rtsFalse;
125 p = (StgPtr)&a->payload[0];
126 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
128 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
130 evacuate((StgClosure**)p);
132 if (gct->failed_to_evac) {
133 any_failed = rtsTrue;
134 *mutArrPtrsCard(a,m) = 1;
135 gct->failed_to_evac = rtsFalse;
137 *mutArrPtrsCard(a,m) = 0;
141 q = (StgPtr)&a->payload[a->ptrs];
144 evacuate((StgClosure**)p);
146 if (gct->failed_to_evac) {
147 any_failed = rtsTrue;
148 *mutArrPtrsCard(a,m) = 1;
149 gct->failed_to_evac = rtsFalse;
151 *mutArrPtrsCard(a,m) = 0;
155 gct->failed_to_evac = any_failed;
156 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
159 // scavenge only the marked areas of a MUT_ARR_PTRS
160 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
166 any_failed = rtsFalse;
167 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
169 if (*mutArrPtrsCard(a,m) != 0) {
170 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
171 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
172 (StgPtr)&a->payload[a->ptrs]);
174 evacuate((StgClosure**)p);
176 if (gct->failed_to_evac) {
177 any_failed = rtsTrue;
178 gct->failed_to_evac = rtsFalse;
180 *mutArrPtrsCard(a,m) = 0;
185 gct->failed_to_evac = any_failed;
186 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
189 /* -----------------------------------------------------------------------------
190 Blocks of function args occur on the stack (at the top) and
192 -------------------------------------------------------------------------- */
195 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
202 switch (fun_info->f.fun_type) {
204 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
205 size = BITMAP_SIZE(fun_info->f.b.bitmap);
208 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
209 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
213 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
214 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
217 if ((bitmap & 1) == 0) {
218 evacuate((StgClosure **)p);
221 bitmap = bitmap >> 1;
229 STATIC_INLINE GNUC_ATTR_HOT StgPtr
230 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
234 StgFunInfoTable *fun_info;
236 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
237 ASSERT(fun_info->i.type != PAP);
240 switch (fun_info->f.fun_type) {
242 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
245 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
249 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
253 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
256 if ((bitmap & 1) == 0) {
257 evacuate((StgClosure **)p);
260 bitmap = bitmap >> 1;
268 STATIC_INLINE GNUC_ATTR_HOT StgPtr
269 scavenge_PAP (StgPAP *pap)
272 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
276 scavenge_AP (StgAP *ap)
279 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
282 /* -----------------------------------------------------------------------------
284 -------------------------------------------------------------------------- */
286 /* Similar to scavenge_large_bitmap(), but we don't write back the
287 * pointers we get back from evacuate().
290 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
297 bitmap = large_srt->l.bitmap[b];
298 size = (nat)large_srt->l.size;
299 p = (StgClosure **)large_srt->srt;
300 for (i = 0; i < size; ) {
301 if ((bitmap & 1) != 0) {
306 if (i % BITS_IN(W_) == 0) {
308 bitmap = large_srt->l.bitmap[b];
310 bitmap = bitmap >> 1;
315 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
316 * srt field in the info table. That's ok, because we'll
317 * never dereference it.
319 STATIC_INLINE GNUC_ATTR_HOT void
320 scavenge_srt (StgClosure **srt, nat srt_bitmap)
328 if (bitmap == (StgHalfWord)(-1)) {
329 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
333 while (bitmap != 0) {
334 if ((bitmap & 1) != 0) {
335 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
336 // Special-case to handle references to closures hiding out in DLLs, since
337 // double indirections required to get at those. The code generator knows
338 // which is which when generating the SRT, so it stores the (indirect)
339 // reference to the DLL closure in the table by first adding one to it.
340 // We check for this here, and undo the addition before evacuating it.
342 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
343 // closure that's fixed at link-time, and no extra magic is required.
344 if ( (unsigned long)(*srt) & 0x1 ) {
345 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
354 bitmap = bitmap >> 1;
359 STATIC_INLINE GNUC_ATTR_HOT void
360 scavenge_thunk_srt(const StgInfoTable *info)
362 StgThunkInfoTable *thunk_info;
364 if (!major_gc) return;
366 thunk_info = itbl_to_thunk_itbl(info);
367 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
370 STATIC_INLINE GNUC_ATTR_HOT void
371 scavenge_fun_srt(const StgInfoTable *info)
373 StgFunInfoTable *fun_info;
375 if (!major_gc) return;
377 fun_info = itbl_to_fun_itbl(info);
378 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
381 /* -----------------------------------------------------------------------------
382 Scavenge a block from the given scan pointer up to bd->free.
384 evac_gen is set by the caller to be either zero (for a step in a
385 generation < N) or G where G is the generation of the step being
388 We sometimes temporarily change evac_gen back to zero if we're
389 scavenging a mutable object where eager promotion isn't such a good
391 -------------------------------------------------------------------------- */
393 static GNUC_ATTR_HOT void
394 scavenge_block (bdescr *bd)
398 rtsBool saved_eager_promotion;
401 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
402 bd->start, bd->gen_no, bd->u.scan);
405 gct->evac_gen = bd->gen;
406 saved_eager_promotion = gct->eager_promotion;
407 gct->failed_to_evac = rtsFalse;
409 ws = &gct->gens[bd->gen->no];
413 // we might be evacuating into the very object that we're
414 // scavenging, so we have to check the real bd->free pointer each
415 // time around the loop.
416 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
418 ASSERT(bd->link == NULL);
419 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
420 info = get_itbl((StgClosure *)p);
422 ASSERT(gct->thunk_selector_depth == 0);
425 switch (info->type) {
430 StgMVar *mvar = ((StgMVar *)p);
431 gct->eager_promotion = rtsFalse;
432 evacuate((StgClosure **)&mvar->head);
433 evacuate((StgClosure **)&mvar->tail);
434 evacuate((StgClosure **)&mvar->value);
435 gct->eager_promotion = saved_eager_promotion;
437 if (gct->failed_to_evac) {
438 mvar->header.info = &stg_MVAR_DIRTY_info;
440 mvar->header.info = &stg_MVAR_CLEAN_info;
442 p += sizeofW(StgMVar);
447 scavenge_fun_srt(info);
448 evacuate(&((StgClosure *)p)->payload[1]);
449 evacuate(&((StgClosure *)p)->payload[0]);
450 p += sizeofW(StgHeader) + 2;
454 scavenge_thunk_srt(info);
455 evacuate(&((StgThunk *)p)->payload[1]);
456 evacuate(&((StgThunk *)p)->payload[0]);
457 p += sizeofW(StgThunk) + 2;
461 evacuate(&((StgClosure *)p)->payload[1]);
462 evacuate(&((StgClosure *)p)->payload[0]);
463 p += sizeofW(StgHeader) + 2;
467 scavenge_thunk_srt(info);
468 evacuate(&((StgThunk *)p)->payload[0]);
469 p += sizeofW(StgThunk) + 1;
473 scavenge_fun_srt(info);
475 evacuate(&((StgClosure *)p)->payload[0]);
476 p += sizeofW(StgHeader) + 1;
480 scavenge_thunk_srt(info);
481 p += sizeofW(StgThunk) + 1;
485 scavenge_fun_srt(info);
487 p += sizeofW(StgHeader) + 1;
491 scavenge_thunk_srt(info);
492 p += sizeofW(StgThunk) + 2;
496 scavenge_fun_srt(info);
498 p += sizeofW(StgHeader) + 2;
502 scavenge_thunk_srt(info);
503 evacuate(&((StgThunk *)p)->payload[0]);
504 p += sizeofW(StgThunk) + 2;
508 scavenge_fun_srt(info);
510 evacuate(&((StgClosure *)p)->payload[0]);
511 p += sizeofW(StgHeader) + 2;
515 scavenge_fun_srt(info);
522 scavenge_thunk_srt(info);
523 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
524 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
525 evacuate((StgClosure **)p);
527 p += info->layout.payload.nptrs;
538 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
539 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
540 evacuate((StgClosure **)p);
542 p += info->layout.payload.nptrs;
547 StgBCO *bco = (StgBCO *)p;
548 evacuate((StgClosure **)&bco->instrs);
549 evacuate((StgClosure **)&bco->literals);
550 evacuate((StgClosure **)&bco->ptrs);
556 if (bd->gen_no != 0) {
559 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
560 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
561 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
564 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
566 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
568 // We pretend that p has just been created.
569 LDV_RECORD_CREATE((StgClosure *)p);
572 case IND_OLDGEN_PERM:
573 evacuate(&((StgInd *)p)->indirectee);
574 p += sizeofW(StgInd);
579 gct->eager_promotion = rtsFalse;
580 evacuate(&((StgMutVar *)p)->var);
581 gct->eager_promotion = saved_eager_promotion;
583 if (gct->failed_to_evac) {
584 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
586 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
588 p += sizeofW(StgMutVar);
593 p += BLACKHOLE_sizeW();
598 StgSelector *s = (StgSelector *)p;
599 evacuate(&s->selectee);
600 p += THUNK_SELECTOR_sizeW();
604 // A chunk of stack saved in a heap object
607 StgAP_STACK *ap = (StgAP_STACK *)p;
610 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
611 p = (StgPtr)ap->payload + ap->size;
616 p = scavenge_PAP((StgPAP *)p);
620 p = scavenge_AP((StgAP *)p);
625 p += arr_words_sizeW((StgArrWords *)p);
628 case MUT_ARR_PTRS_CLEAN:
629 case MUT_ARR_PTRS_DIRTY:
631 // We don't eagerly promote objects pointed to by a mutable
632 // array, but if we find the array only points to objects in
633 // the same or an older generation, we mark it "clean" and
634 // avoid traversing it during minor GCs.
635 gct->eager_promotion = rtsFalse;
637 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
639 if (gct->failed_to_evac) {
640 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
642 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
645 gct->eager_promotion = saved_eager_promotion;
646 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
650 case MUT_ARR_PTRS_FROZEN:
651 case MUT_ARR_PTRS_FROZEN0:
654 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
656 // If we're going to put this object on the mutable list, then
657 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
658 if (gct->failed_to_evac) {
659 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
661 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
668 StgTSO *tso = (StgTSO *)p;
678 gct->eager_promotion = rtsFalse;
680 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
681 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
682 evacuate((StgClosure **)p);
684 p += info->layout.payload.nptrs;
686 gct->eager_promotion = saved_eager_promotion;
687 gct->failed_to_evac = rtsTrue; // mutable
694 StgTRecChunk *tc = ((StgTRecChunk *) p);
695 TRecEntry *e = &(tc -> entries[0]);
696 gct->eager_promotion = rtsFalse;
697 evacuate((StgClosure **)&tc->prev_chunk);
698 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
699 evacuate((StgClosure **)&e->tvar);
700 evacuate((StgClosure **)&e->expected_value);
701 evacuate((StgClosure **)&e->new_value);
703 gct->eager_promotion = saved_eager_promotion;
704 gct->failed_to_evac = rtsTrue; // mutable
705 p += sizeofW(StgTRecChunk);
710 barf("scavenge: unimplemented/strange closure type %d @ %p",
715 * We need to record the current object on the mutable list if
716 * (a) It is actually mutable, or
717 * (b) It contains pointers to a younger generation.
718 * Case (b) arises if we didn't manage to promote everything that
719 * the current object points to into the current generation.
721 if (gct->failed_to_evac) {
722 gct->failed_to_evac = rtsFalse;
723 if (bd->gen_no > 0) {
724 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
730 gct->copied += ws->todo_free - bd->free;
734 debugTrace(DEBUG_gc, " scavenged %ld bytes",
735 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
737 // update stats: this is a block that has been scavenged
738 gct->scanned += bd->free - bd->u.scan;
739 bd->u.scan = bd->free;
741 if (bd != ws->todo_bd) {
742 // we're not going to evac any more objects into
743 // this block, so push it now.
744 push_scanned_block(bd, ws);
749 /* -----------------------------------------------------------------------------
750 Scavenge everything on the mark stack.
752 This is slightly different from scavenge():
753 - we don't walk linearly through the objects, so the scavenger
754 doesn't need to advance the pointer on to the next object.
755 -------------------------------------------------------------------------- */
758 scavenge_mark_stack(void)
762 rtsBool saved_eager_promotion;
764 gct->evac_gen = oldest_gen;
765 saved_eager_promotion = gct->eager_promotion;
767 while ((p = pop_mark_stack())) {
769 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
770 info = get_itbl((StgClosure *)p);
773 switch (info->type) {
778 StgMVar *mvar = ((StgMVar *)p);
779 gct->eager_promotion = rtsFalse;
780 evacuate((StgClosure **)&mvar->head);
781 evacuate((StgClosure **)&mvar->tail);
782 evacuate((StgClosure **)&mvar->value);
783 gct->eager_promotion = saved_eager_promotion;
785 if (gct->failed_to_evac) {
786 mvar->header.info = &stg_MVAR_DIRTY_info;
788 mvar->header.info = &stg_MVAR_CLEAN_info;
794 scavenge_fun_srt(info);
795 evacuate(&((StgClosure *)p)->payload[1]);
796 evacuate(&((StgClosure *)p)->payload[0]);
800 scavenge_thunk_srt(info);
801 evacuate(&((StgThunk *)p)->payload[1]);
802 evacuate(&((StgThunk *)p)->payload[0]);
806 evacuate(&((StgClosure *)p)->payload[1]);
807 evacuate(&((StgClosure *)p)->payload[0]);
812 scavenge_fun_srt(info);
813 evacuate(&((StgClosure *)p)->payload[0]);
818 scavenge_thunk_srt(info);
819 evacuate(&((StgThunk *)p)->payload[0]);
824 evacuate(&((StgClosure *)p)->payload[0]);
829 scavenge_fun_srt(info);
834 scavenge_thunk_srt(info);
842 scavenge_fun_srt(info);
849 scavenge_thunk_srt(info);
850 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
851 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
852 evacuate((StgClosure **)p);
864 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
865 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
866 evacuate((StgClosure **)p);
872 StgBCO *bco = (StgBCO *)p;
873 evacuate((StgClosure **)&bco->instrs);
874 evacuate((StgClosure **)&bco->literals);
875 evacuate((StgClosure **)&bco->ptrs);
880 // don't need to do anything here: the only possible case
881 // is that we're in a 1-space compacting collector, with
882 // no "old" generation.
886 case IND_OLDGEN_PERM:
887 evacuate(&((StgInd *)p)->indirectee);
891 case MUT_VAR_DIRTY: {
892 gct->eager_promotion = rtsFalse;
893 evacuate(&((StgMutVar *)p)->var);
894 gct->eager_promotion = saved_eager_promotion;
896 if (gct->failed_to_evac) {
897 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
899 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
911 StgSelector *s = (StgSelector *)p;
912 evacuate(&s->selectee);
916 // A chunk of stack saved in a heap object
919 StgAP_STACK *ap = (StgAP_STACK *)p;
922 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
927 scavenge_PAP((StgPAP *)p);
931 scavenge_AP((StgAP *)p);
934 case MUT_ARR_PTRS_CLEAN:
935 case MUT_ARR_PTRS_DIRTY:
938 // We don't eagerly promote objects pointed to by a mutable
939 // array, but if we find the array only points to objects in
940 // the same or an older generation, we mark it "clean" and
941 // avoid traversing it during minor GCs.
942 gct->eager_promotion = rtsFalse;
944 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
946 if (gct->failed_to_evac) {
947 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
949 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
952 gct->eager_promotion = saved_eager_promotion;
953 gct->failed_to_evac = rtsTrue; // mutable anyhow.
957 case MUT_ARR_PTRS_FROZEN:
958 case MUT_ARR_PTRS_FROZEN0:
963 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
965 // If we're going to put this object on the mutable list, then
966 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
967 if (gct->failed_to_evac) {
968 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
970 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
977 scavengeTSO((StgTSO*)p);
985 gct->eager_promotion = rtsFalse;
987 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
988 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
989 evacuate((StgClosure **)p);
992 gct->eager_promotion = saved_eager_promotion;
993 gct->failed_to_evac = rtsTrue; // mutable
1000 StgTRecChunk *tc = ((StgTRecChunk *) p);
1001 TRecEntry *e = &(tc -> entries[0]);
1002 gct->eager_promotion = rtsFalse;
1003 evacuate((StgClosure **)&tc->prev_chunk);
1004 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1005 evacuate((StgClosure **)&e->tvar);
1006 evacuate((StgClosure **)&e->expected_value);
1007 evacuate((StgClosure **)&e->new_value);
1009 gct->eager_promotion = saved_eager_promotion;
1010 gct->failed_to_evac = rtsTrue; // mutable
1015 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1019 if (gct->failed_to_evac) {
1020 gct->failed_to_evac = rtsFalse;
1021 if (gct->evac_gen) {
1022 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1025 } // while (p = pop_mark_stack())
1028 /* -----------------------------------------------------------------------------
1029 Scavenge one object.
1031 This is used for objects that are temporarily marked as mutable
1032 because they contain old-to-new generation pointers. Only certain
1033 objects can have this property.
1034 -------------------------------------------------------------------------- */
1037 scavenge_one(StgPtr p)
1039 const StgInfoTable *info;
1041 rtsBool saved_eager_promotion;
1043 saved_eager_promotion = gct->eager_promotion;
1045 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1046 info = get_itbl((StgClosure *)p);
1048 switch (info->type) {
1053 StgMVar *mvar = ((StgMVar *)p);
1054 gct->eager_promotion = rtsFalse;
1055 evacuate((StgClosure **)&mvar->head);
1056 evacuate((StgClosure **)&mvar->tail);
1057 evacuate((StgClosure **)&mvar->value);
1058 gct->eager_promotion = saved_eager_promotion;
1060 if (gct->failed_to_evac) {
1061 mvar->header.info = &stg_MVAR_DIRTY_info;
1063 mvar->header.info = &stg_MVAR_CLEAN_info;
1077 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1078 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1079 evacuate((StgClosure **)q);
1085 case FUN_1_0: // hardly worth specialising these guys
1102 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1103 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1104 evacuate((StgClosure **)q);
1110 case MUT_VAR_DIRTY: {
1113 gct->eager_promotion = rtsFalse;
1114 evacuate(&((StgMutVar *)p)->var);
1115 gct->eager_promotion = saved_eager_promotion;
1117 if (gct->failed_to_evac) {
1118 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1120 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1129 case THUNK_SELECTOR:
1131 StgSelector *s = (StgSelector *)p;
1132 evacuate(&s->selectee);
1138 StgAP_STACK *ap = (StgAP_STACK *)p;
1141 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1142 p = (StgPtr)ap->payload + ap->size;
1147 p = scavenge_PAP((StgPAP *)p);
1151 p = scavenge_AP((StgAP *)p);
1155 // nothing to follow
1158 case MUT_ARR_PTRS_CLEAN:
1159 case MUT_ARR_PTRS_DIRTY:
1161 // We don't eagerly promote objects pointed to by a mutable
1162 // array, but if we find the array only points to objects in
1163 // the same or an older generation, we mark it "clean" and
1164 // avoid traversing it during minor GCs.
1165 gct->eager_promotion = rtsFalse;
1167 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1169 if (gct->failed_to_evac) {
1170 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1172 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1175 gct->eager_promotion = saved_eager_promotion;
1176 gct->failed_to_evac = rtsTrue;
1180 case MUT_ARR_PTRS_FROZEN:
1181 case MUT_ARR_PTRS_FROZEN0:
1183 // follow everything
1184 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1186 // If we're going to put this object on the mutable list, then
1187 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1188 if (gct->failed_to_evac) {
1189 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1191 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1198 scavengeTSO((StgTSO*)p);
1206 gct->eager_promotion = rtsFalse;
1208 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1209 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1210 evacuate((StgClosure **)p);
1213 gct->eager_promotion = saved_eager_promotion;
1214 gct->failed_to_evac = rtsTrue; // mutable
1222 StgTRecChunk *tc = ((StgTRecChunk *) p);
1223 TRecEntry *e = &(tc -> entries[0]);
1224 gct->eager_promotion = rtsFalse;
1225 evacuate((StgClosure **)&tc->prev_chunk);
1226 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1227 evacuate((StgClosure **)&e->tvar);
1228 evacuate((StgClosure **)&e->expected_value);
1229 evacuate((StgClosure **)&e->new_value);
1231 gct->eager_promotion = saved_eager_promotion;
1232 gct->failed_to_evac = rtsTrue; // mutable
1237 // IND can happen, for example, when the interpreter allocates
1238 // a gigantic AP closure (more than one block), which ends up
1239 // on the large-object list and then gets updated. See #3424.
1241 case IND_OLDGEN_PERM:
1243 evacuate(&((StgInd *)p)->indirectee);
1245 #if 0 && defined(DEBUG)
1246 if (RtsFlags.DebugFlags.gc)
1247 /* Debugging code to print out the size of the thing we just
1251 StgPtr start = gen->scan;
1252 bdescr *start_bd = gen->scan_bd;
1255 if (start_bd != gen->scan_bd) {
1256 size += (P_)BLOCK_ROUND_UP(start) - start;
1257 start_bd = start_bd->link;
1258 while (start_bd != gen->scan_bd) {
1259 size += BLOCK_SIZE_W;
1260 start_bd = start_bd->link;
1263 (P_)BLOCK_ROUND_DOWN(gen->scan);
1265 size = gen->scan - start;
1267 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1273 barf("scavenge_one: strange object %d", (int)(info->type));
1276 no_luck = gct->failed_to_evac;
1277 gct->failed_to_evac = rtsFalse;
1281 /* -----------------------------------------------------------------------------
1282 Scavenging mutable lists.
1284 We treat the mutable list of each generation > N (i.e. all the
1285 generations older than the one being collected) as roots. We also
1286 remove non-mutable objects from the mutable list at this point.
1287 -------------------------------------------------------------------------- */
1290 scavenge_mutable_list(bdescr *bd, generation *gen)
1294 gct->evac_gen = gen;
1295 for (; bd != NULL; bd = bd->link) {
1296 for (q = bd->start; q < bd->free; q++) {
1298 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1301 switch (get_itbl((StgClosure *)p)->type) {
1303 barf("MUT_VAR_CLEAN on mutable list");
1305 mutlist_MUTVARS++; break;
1306 case MUT_ARR_PTRS_CLEAN:
1307 case MUT_ARR_PTRS_DIRTY:
1308 case MUT_ARR_PTRS_FROZEN:
1309 case MUT_ARR_PTRS_FROZEN0:
1310 mutlist_MUTARRS++; break;
1312 barf("MVAR_CLEAN on mutable list");
1314 mutlist_MVARS++; break;
1316 mutlist_OTHERS++; break;
1320 // Check whether this object is "clean", that is it
1321 // definitely doesn't point into a young generation.
1322 // Clean objects don't need to be scavenged. Some clean
1323 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1324 // list at all; others, such as TSO
1325 // are always on the mutable list.
1327 switch (get_itbl((StgClosure *)p)->type) {
1328 case MUT_ARR_PTRS_CLEAN:
1329 recordMutableGen_GC((StgClosure *)p,gen->no);
1331 case MUT_ARR_PTRS_DIRTY:
1333 rtsBool saved_eager_promotion;
1334 saved_eager_promotion = gct->eager_promotion;
1335 gct->eager_promotion = rtsFalse;
1337 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1339 if (gct->failed_to_evac) {
1340 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1342 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1345 gct->eager_promotion = saved_eager_promotion;
1346 gct->failed_to_evac = rtsFalse;
1347 recordMutableGen_GC((StgClosure *)p,gen->no);
1351 StgTSO *tso = (StgTSO *)p;
1352 if (tso->dirty == 0) {
1353 // Should be on the mutable list because its link
1354 // field is dirty. However, in parallel GC we may
1355 // have a thread on multiple mutable lists, so
1356 // this assertion would be invalid:
1357 // ASSERT(tso->flags & TSO_LINK_DIRTY);
1359 scavenge_TSO_link(tso);
1360 if (gct->failed_to_evac) {
1361 recordMutableGen_GC((StgClosure *)p,gen->no);
1362 gct->failed_to_evac = rtsFalse;
1364 tso->flags &= ~TSO_LINK_DIRTY;
1373 if (scavenge_one(p)) {
1374 // didn't manage to promote everything, so put the
1375 // object back on the list.
1376 recordMutableGen_GC((StgClosure *)p,gen->no);
1383 scavenge_capability_mut_lists (Capability *cap)
1387 /* Mutable lists from each generation > N
1388 * we want to *scavenge* these roots, not evacuate them: they're not
1389 * going to move in this GC.
1390 * Also do them in reverse generation order, for the usual reason:
1391 * namely to reduce the likelihood of spurious old->new pointers.
1393 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1394 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1395 freeChain_sync(cap->saved_mut_lists[g]);
1396 cap->saved_mut_lists[g] = NULL;
1400 /* -----------------------------------------------------------------------------
1401 Scavenging the static objects.
1403 We treat the mutable list of each generation > N (i.e. all the
1404 generations older than the one being collected) as roots. We also
1405 remove non-mutable objects from the mutable list at this point.
1406 -------------------------------------------------------------------------- */
1409 scavenge_static(void)
1412 const StgInfoTable *info;
1414 debugTrace(DEBUG_gc, "scavenging static objects");
1416 /* Always evacuate straight to the oldest generation for static
1418 gct->evac_gen = oldest_gen;
1420 /* keep going until we've scavenged all the objects on the linked
1425 /* get the next static object from the list. Remember, there might
1426 * be more stuff on this list after each evacuation...
1427 * (static_objects is a global)
1429 p = gct->static_objects;
1430 if (p == END_OF_STATIC_LIST) {
1434 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1437 if (info->type==RBH)
1438 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1440 // make sure the info pointer is into text space
1442 /* Take this object *off* the static_objects list,
1443 * and put it on the scavenged_static_objects list.
1445 gct->static_objects = *STATIC_LINK(info,p);
1446 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1447 gct->scavenged_static_objects = p;
1449 switch (info -> type) {
1453 StgInd *ind = (StgInd *)p;
1454 evacuate(&ind->indirectee);
1456 /* might fail to evacuate it, in which case we have to pop it
1457 * back on the mutable list of the oldest generation. We
1458 * leave it *on* the scavenged_static_objects list, though,
1459 * in case we visit this object again.
1461 if (gct->failed_to_evac) {
1462 gct->failed_to_evac = rtsFalse;
1463 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1469 scavenge_thunk_srt(info);
1473 scavenge_fun_srt(info);
1480 next = (P_)p->payload + info->layout.payload.ptrs;
1481 // evacuate the pointers
1482 for (q = (P_)p->payload; q < next; q++) {
1483 evacuate((StgClosure **)q);
1489 barf("scavenge_static: strange closure %d", (int)(info->type));
1492 ASSERT(gct->failed_to_evac == rtsFalse);
1496 /* -----------------------------------------------------------------------------
1497 scavenge a chunk of memory described by a bitmap
1498 -------------------------------------------------------------------------- */
1501 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1507 bitmap = large_bitmap->bitmap[b];
1508 for (i = 0; i < size; ) {
1509 if ((bitmap & 1) == 0) {
1510 evacuate((StgClosure **)p);
1514 if (i % BITS_IN(W_) == 0) {
1516 bitmap = large_bitmap->bitmap[b];
1518 bitmap = bitmap >> 1;
1523 STATIC_INLINE StgPtr
1524 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1527 if ((bitmap & 1) == 0) {
1528 evacuate((StgClosure **)p);
1531 bitmap = bitmap >> 1;
1537 /* -----------------------------------------------------------------------------
1538 scavenge_stack walks over a section of stack and evacuates all the
1539 objects pointed to by it. We can use the same code for walking
1540 AP_STACK_UPDs, since these are just sections of copied stack.
1541 -------------------------------------------------------------------------- */
1544 scavenge_stack(StgPtr p, StgPtr stack_end)
1546 const StgRetInfoTable* info;
1551 * Each time around this loop, we are looking at a chunk of stack
1552 * that starts with an activation record.
1555 while (p < stack_end) {
1556 info = get_ret_itbl((StgClosure *)p);
1558 switch (info->i.type) {
1561 // In SMP, we can get update frames that point to indirections
1562 // when two threads evaluate the same thunk. We do attempt to
1563 // discover this situation in threadPaused(), but it's
1564 // possible that the following sequence occurs:
1573 // Now T is an indirection, and the update frame is already
1574 // marked on A's stack, so we won't traverse it again in
1575 // threadPaused(). We could traverse the whole stack again
1576 // before GC, but that seems like overkill.
1578 // Scavenging this update frame as normal would be disastrous;
1579 // the updatee would end up pointing to the value. So we turn
1580 // the indirection into an IND_PERM, so that evacuate will
1581 // copy the indirection into the old generation instead of
1584 // Note [upd-black-hole]
1585 // One slight hiccup is that the THUNK_SELECTOR machinery can
1586 // overwrite the updatee with an IND. In parallel GC, this
1587 // could even be happening concurrently, so we can't check for
1588 // the IND. Fortunately if we assume that blackholing is
1589 // happening (either lazy or eager), then we can be sure that
1590 // the updatee is never a THUNK_SELECTOR and we're ok.
1591 // NB. this is a new invariant: blackholing is not optional.
1594 const StgInfoTable *i;
1595 StgClosure *updatee;
1597 updatee = ((StgUpdateFrame *)p)->updatee;
1598 i = updatee->header.info;
1599 if (!IS_FORWARDING_PTR(i)) {
1600 type = get_itbl(updatee)->type;
1602 updatee->header.info = &stg_IND_PERM_info;
1603 } else if (type == IND_OLDGEN) {
1604 updatee->header.info = &stg_IND_OLDGEN_PERM_info;
1607 evacuate(&((StgUpdateFrame *)p)->updatee);
1608 ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
1609 p += sizeofW(StgUpdateFrame);
1613 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1614 case CATCH_STM_FRAME:
1615 case CATCH_RETRY_FRAME:
1616 case ATOMICALLY_FRAME:
1620 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1621 size = BITMAP_SIZE(info->i.layout.bitmap);
1622 // NOTE: the payload starts immediately after the info-ptr, we
1623 // don't have an StgHeader in the same sense as a heap closure.
1625 p = scavenge_small_bitmap(p, size, bitmap);
1629 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1637 evacuate((StgClosure **)p);
1640 size = BCO_BITMAP_SIZE(bco);
1641 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1646 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1651 size = GET_LARGE_BITMAP(&info->i)->size;
1653 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1655 // and don't forget to follow the SRT
1659 // Dynamic bitmap: the mask is stored on the stack, and
1660 // there are a number of non-pointers followed by a number
1661 // of pointers above the bitmapped area. (see StgMacros.h,
1666 dyn = ((StgRetDyn *)p)->liveness;
1668 // traverse the bitmap first
1669 bitmap = RET_DYN_LIVENESS(dyn);
1670 p = (P_)&((StgRetDyn *)p)->payload[0];
1671 size = RET_DYN_BITMAP_SIZE;
1672 p = scavenge_small_bitmap(p, size, bitmap);
1674 // skip over the non-ptr words
1675 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1677 // follow the ptr words
1678 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1679 evacuate((StgClosure **)p);
1687 StgRetFun *ret_fun = (StgRetFun *)p;
1688 StgFunInfoTable *fun_info;
1690 evacuate(&ret_fun->fun);
1691 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1692 p = scavenge_arg_block(fun_info, ret_fun->payload);
1697 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1702 /*-----------------------------------------------------------------------------
1703 scavenge the large object list.
1705 evac_gen set by caller; similar games played with evac_gen as with
1706 scavenge() - see comment at the top of scavenge(). Most large
1707 objects are (repeatedly) mutable, so most of the time evac_gen will
1709 --------------------------------------------------------------------------- */
1712 scavenge_large (gen_workspace *ws)
1717 gct->evac_gen = ws->gen;
1719 bd = ws->todo_large_objects;
1721 for (; bd != NULL; bd = ws->todo_large_objects) {
1723 // take this object *off* the large objects list and put it on
1724 // the scavenged large objects list. This is so that we can
1725 // treat new_large_objects as a stack and push new objects on
1726 // the front when evacuating.
1727 ws->todo_large_objects = bd->link;
1729 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1730 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1731 ws->gen->n_scavenged_large_blocks += bd->blocks;
1732 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1735 if (scavenge_one(p)) {
1736 if (ws->gen->no > 0) {
1737 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1742 gct->scanned += closure_sizeW((StgClosure*)p);
1746 /* ----------------------------------------------------------------------------
1747 Look for work to do.
1749 We look for the oldest gen that has either a todo block that can
1750 be scanned, or a block of work on the global queue that we can
1753 It is important to take work from the *oldest* generation that we
1754 has work available, because that minimizes the likelihood of
1755 evacuating objects into a young generation when they should have
1756 been eagerly promoted. This really does make a difference (the
1757 cacheprof benchmark is one that is affected).
1759 We also want to scan the todo block if possible before grabbing
1760 work from the global queue, the reason being that we don't want to
1761 steal work from the global queue and starve other threads if there
1762 is other work we can usefully be doing.
1763 ------------------------------------------------------------------------- */
1766 scavenge_find_work (void)
1770 rtsBool did_something, did_anything;
1773 gct->scav_find_work++;
1775 did_anything = rtsFalse;
1778 did_something = rtsFalse;
1779 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1782 gct->scan_bd = NULL;
1784 // If we have a scan block with some work to do,
1785 // scavenge everything up to the free pointer.
1786 if (ws->todo_bd->u.scan < ws->todo_free)
1788 scavenge_block(ws->todo_bd);
1789 did_something = rtsTrue;
1793 // If we have any large objects to scavenge, do them now.
1794 if (ws->todo_large_objects) {
1796 did_something = rtsTrue;
1800 if ((bd = grab_local_todo_block(ws)) != NULL) {
1802 did_something = rtsTrue;
1807 if (did_something) {
1808 did_anything = rtsTrue;
1812 #if defined(THREADED_RTS)
1813 if (work_stealing) {
1814 // look for work to steal
1815 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1816 if ((bd = steal_todo_block(g)) != NULL) {
1818 did_something = rtsTrue;
1823 if (did_something) {
1824 did_anything = rtsTrue;
1830 // only return when there is no more work to do
1832 return did_anything;
1835 /* ----------------------------------------------------------------------------
1836 Scavenge until we can't find anything more to scavenge.
1837 ------------------------------------------------------------------------- */
1845 work_to_do = rtsFalse;
1847 // scavenge static objects
1848 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1849 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1853 // scavenge objects in compacted generation
1854 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1855 scavenge_mark_stack();
1856 work_to_do = rtsTrue;
1859 // Order is important here: we want to deal in full blocks as
1860 // much as possible, so go for global work in preference to
1861 // local work. Only if all the global work has been exhausted
1862 // do we start scavenging the fragments of blocks in the local
1864 if (scavenge_find_work()) goto loop;
1866 if (work_to_do) goto loop;