1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
56 // update the pointer from the Task.
57 if (tso->bound != NULL) {
58 tso->bound->tso = tso;
61 saved_eager = gct->eager_promotion;
62 gct->eager_promotion = rtsFalse;
64 evacuate((StgClosure **)&tso->blocked_exceptions);
65 evacuate((StgClosure **)&tso->bq);
67 // scavange current transaction record
68 evacuate((StgClosure **)&tso->trec);
70 evacuate((StgClosure **)&tso->stackobj);
72 evacuate((StgClosure **)&tso->_link);
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnBlackHole
75 || tso->why_blocked == BlockedOnMsgThrowTo
76 || tso->why_blocked == NotBlocked
78 evacuate(&tso->block_info.closure);
81 // in the THREADED_RTS, block_info.closure must always point to a
82 // valid closure, because we assume this in throwTo(). In the
83 // non-threaded RTS it might be a FD (for
84 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
86 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
90 tso->dirty = gct->failed_to_evac;
92 gct->eager_promotion = saved_eager;
95 /* -----------------------------------------------------------------------------
96 Mutable arrays of pointers
97 -------------------------------------------------------------------------- */
99 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
105 any_failed = rtsFalse;
106 p = (StgPtr)&a->payload[0];
107 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
109 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
111 evacuate((StgClosure**)p);
113 if (gct->failed_to_evac) {
114 any_failed = rtsTrue;
115 *mutArrPtrsCard(a,m) = 1;
116 gct->failed_to_evac = rtsFalse;
118 *mutArrPtrsCard(a,m) = 0;
122 q = (StgPtr)&a->payload[a->ptrs];
125 evacuate((StgClosure**)p);
127 if (gct->failed_to_evac) {
128 any_failed = rtsTrue;
129 *mutArrPtrsCard(a,m) = 1;
130 gct->failed_to_evac = rtsFalse;
132 *mutArrPtrsCard(a,m) = 0;
136 gct->failed_to_evac = any_failed;
137 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
140 // scavenge only the marked areas of a MUT_ARR_PTRS
141 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
147 any_failed = rtsFalse;
148 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
150 if (*mutArrPtrsCard(a,m) != 0) {
151 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
152 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
153 (StgPtr)&a->payload[a->ptrs]);
155 evacuate((StgClosure**)p);
157 if (gct->failed_to_evac) {
158 any_failed = rtsTrue;
159 gct->failed_to_evac = rtsFalse;
161 *mutArrPtrsCard(a,m) = 0;
166 gct->failed_to_evac = any_failed;
167 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
170 /* -----------------------------------------------------------------------------
171 Blocks of function args occur on the stack (at the top) and
173 -------------------------------------------------------------------------- */
176 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
183 switch (fun_info->f.fun_type) {
185 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
186 size = BITMAP_SIZE(fun_info->f.b.bitmap);
189 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
190 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
194 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
195 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
198 if ((bitmap & 1) == 0) {
199 evacuate((StgClosure **)p);
202 bitmap = bitmap >> 1;
210 STATIC_INLINE GNUC_ATTR_HOT StgPtr
211 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
215 StgFunInfoTable *fun_info;
217 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
218 ASSERT(fun_info->i.type != PAP);
221 switch (fun_info->f.fun_type) {
223 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
226 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
230 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
234 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
237 if ((bitmap & 1) == 0) {
238 evacuate((StgClosure **)p);
241 bitmap = bitmap >> 1;
249 STATIC_INLINE GNUC_ATTR_HOT StgPtr
250 scavenge_PAP (StgPAP *pap)
253 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
257 scavenge_AP (StgAP *ap)
260 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
263 /* -----------------------------------------------------------------------------
265 -------------------------------------------------------------------------- */
267 /* Similar to scavenge_large_bitmap(), but we don't write back the
268 * pointers we get back from evacuate().
271 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
278 bitmap = large_srt->l.bitmap[b];
279 size = (nat)large_srt->l.size;
280 p = (StgClosure **)large_srt->srt;
281 for (i = 0; i < size; ) {
282 if ((bitmap & 1) != 0) {
287 if (i % BITS_IN(W_) == 0) {
289 bitmap = large_srt->l.bitmap[b];
291 bitmap = bitmap >> 1;
296 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
297 * srt field in the info table. That's ok, because we'll
298 * never dereference it.
300 STATIC_INLINE GNUC_ATTR_HOT void
301 scavenge_srt (StgClosure **srt, nat srt_bitmap)
309 if (bitmap == (StgHalfWord)(-1)) {
310 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
314 while (bitmap != 0) {
315 if ((bitmap & 1) != 0) {
316 #if defined(__PIC__) && defined(mingw32_HOST_OS)
317 // Special-case to handle references to closures hiding out in DLLs, since
318 // double indirections required to get at those. The code generator knows
319 // which is which when generating the SRT, so it stores the (indirect)
320 // reference to the DLL closure in the table by first adding one to it.
321 // We check for this here, and undo the addition before evacuating it.
323 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
324 // closure that's fixed at link-time, and no extra magic is required.
325 if ( (unsigned long)(*srt) & 0x1 ) {
326 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
335 bitmap = bitmap >> 1;
340 STATIC_INLINE GNUC_ATTR_HOT void
341 scavenge_thunk_srt(const StgInfoTable *info)
343 StgThunkInfoTable *thunk_info;
345 if (!major_gc) return;
347 thunk_info = itbl_to_thunk_itbl(info);
348 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
351 STATIC_INLINE GNUC_ATTR_HOT void
352 scavenge_fun_srt(const StgInfoTable *info)
354 StgFunInfoTable *fun_info;
356 if (!major_gc) return;
358 fun_info = itbl_to_fun_itbl(info);
359 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
362 /* -----------------------------------------------------------------------------
363 Scavenge a block from the given scan pointer up to bd->free.
365 evac_gen is set by the caller to be either zero (for a step in a
366 generation < N) or G where G is the generation of the step being
369 We sometimes temporarily change evac_gen back to zero if we're
370 scavenging a mutable object where eager promotion isn't such a good
372 -------------------------------------------------------------------------- */
374 static GNUC_ATTR_HOT void
375 scavenge_block (bdescr *bd)
379 rtsBool saved_eager_promotion;
382 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
383 bd->start, bd->gen_no, bd->u.scan);
386 gct->evac_gen = bd->gen;
387 saved_eager_promotion = gct->eager_promotion;
388 gct->failed_to_evac = rtsFalse;
390 ws = &gct->gens[bd->gen->no];
394 // we might be evacuating into the very object that we're
395 // scavenging, so we have to check the real bd->free pointer each
396 // time around the loop.
397 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
399 ASSERT(bd->link == NULL);
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
401 info = get_itbl((StgClosure *)p);
403 ASSERT(gct->thunk_selector_depth == 0);
406 switch (info->type) {
411 StgMVar *mvar = ((StgMVar *)p);
412 gct->eager_promotion = rtsFalse;
413 evacuate((StgClosure **)&mvar->head);
414 evacuate((StgClosure **)&mvar->tail);
415 evacuate((StgClosure **)&mvar->value);
416 gct->eager_promotion = saved_eager_promotion;
418 if (gct->failed_to_evac) {
419 mvar->header.info = &stg_MVAR_DIRTY_info;
421 mvar->header.info = &stg_MVAR_CLEAN_info;
423 p += sizeofW(StgMVar);
428 scavenge_fun_srt(info);
429 evacuate(&((StgClosure *)p)->payload[1]);
430 evacuate(&((StgClosure *)p)->payload[0]);
431 p += sizeofW(StgHeader) + 2;
435 scavenge_thunk_srt(info);
436 evacuate(&((StgThunk *)p)->payload[1]);
437 evacuate(&((StgThunk *)p)->payload[0]);
438 p += sizeofW(StgThunk) + 2;
442 evacuate(&((StgClosure *)p)->payload[1]);
443 evacuate(&((StgClosure *)p)->payload[0]);
444 p += sizeofW(StgHeader) + 2;
448 scavenge_thunk_srt(info);
449 evacuate(&((StgThunk *)p)->payload[0]);
450 p += sizeofW(StgThunk) + 1;
454 scavenge_fun_srt(info);
456 evacuate(&((StgClosure *)p)->payload[0]);
457 p += sizeofW(StgHeader) + 1;
461 scavenge_thunk_srt(info);
462 p += sizeofW(StgThunk) + 1;
466 scavenge_fun_srt(info);
468 p += sizeofW(StgHeader) + 1;
472 scavenge_thunk_srt(info);
473 p += sizeofW(StgThunk) + 2;
477 scavenge_fun_srt(info);
479 p += sizeofW(StgHeader) + 2;
483 scavenge_thunk_srt(info);
484 evacuate(&((StgThunk *)p)->payload[0]);
485 p += sizeofW(StgThunk) + 2;
489 scavenge_fun_srt(info);
491 evacuate(&((StgClosure *)p)->payload[0]);
492 p += sizeofW(StgHeader) + 2;
496 scavenge_fun_srt(info);
503 scavenge_thunk_srt(info);
504 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
505 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
506 evacuate((StgClosure **)p);
508 p += info->layout.payload.nptrs;
519 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
520 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
521 evacuate((StgClosure **)p);
523 p += info->layout.payload.nptrs;
528 StgBCO *bco = (StgBCO *)p;
529 evacuate((StgClosure **)&bco->instrs);
530 evacuate((StgClosure **)&bco->literals);
531 evacuate((StgClosure **)&bco->ptrs);
538 evacuate(&((StgInd *)p)->indirectee);
539 p += sizeofW(StgInd);
544 gct->eager_promotion = rtsFalse;
545 evacuate(&((StgMutVar *)p)->var);
546 gct->eager_promotion = saved_eager_promotion;
548 if (gct->failed_to_evac) {
549 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
551 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
553 p += sizeofW(StgMutVar);
558 StgBlockingQueue *bq = (StgBlockingQueue *)p;
560 gct->eager_promotion = rtsFalse;
562 evacuate((StgClosure**)&bq->owner);
563 evacuate((StgClosure**)&bq->queue);
564 evacuate((StgClosure**)&bq->link);
565 gct->eager_promotion = saved_eager_promotion;
567 if (gct->failed_to_evac) {
568 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
570 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
572 p += sizeofW(StgBlockingQueue);
578 StgSelector *s = (StgSelector *)p;
579 evacuate(&s->selectee);
580 p += THUNK_SELECTOR_sizeW();
584 // A chunk of stack saved in a heap object
587 StgAP_STACK *ap = (StgAP_STACK *)p;
590 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
591 p = (StgPtr)ap->payload + ap->size;
596 p = scavenge_PAP((StgPAP *)p);
600 p = scavenge_AP((StgAP *)p);
605 p += arr_words_sizeW((StgArrWords *)p);
608 case MUT_ARR_PTRS_CLEAN:
609 case MUT_ARR_PTRS_DIRTY:
611 // We don't eagerly promote objects pointed to by a mutable
612 // array, but if we find the array only points to objects in
613 // the same or an older generation, we mark it "clean" and
614 // avoid traversing it during minor GCs.
615 gct->eager_promotion = rtsFalse;
617 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
619 if (gct->failed_to_evac) {
620 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
622 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
625 gct->eager_promotion = saved_eager_promotion;
626 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
630 case MUT_ARR_PTRS_FROZEN:
631 case MUT_ARR_PTRS_FROZEN0:
634 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
636 // If we're going to put this object on the mutable list, then
637 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
638 if (gct->failed_to_evac) {
639 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
641 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
648 scavengeTSO((StgTSO *)p);
649 p += sizeofW(StgTSO);
655 StgStack *stack = (StgStack*)p;
657 gct->eager_promotion = rtsFalse;
659 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
660 stack->dirty = gct->failed_to_evac;
661 p += stack_sizeW(stack);
663 gct->eager_promotion = saved_eager_promotion;
671 gct->eager_promotion = rtsFalse;
673 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
674 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
675 evacuate((StgClosure **)p);
677 p += info->layout.payload.nptrs;
679 gct->eager_promotion = saved_eager_promotion;
680 gct->failed_to_evac = rtsTrue; // mutable
687 StgTRecChunk *tc = ((StgTRecChunk *) p);
688 TRecEntry *e = &(tc -> entries[0]);
689 gct->eager_promotion = rtsFalse;
690 evacuate((StgClosure **)&tc->prev_chunk);
691 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
692 evacuate((StgClosure **)&e->tvar);
693 evacuate((StgClosure **)&e->expected_value);
694 evacuate((StgClosure **)&e->new_value);
696 gct->eager_promotion = saved_eager_promotion;
697 gct->failed_to_evac = rtsTrue; // mutable
698 p += sizeofW(StgTRecChunk);
703 barf("scavenge: unimplemented/strange closure type %d @ %p",
708 * We need to record the current object on the mutable list if
709 * (a) It is actually mutable, or
710 * (b) It contains pointers to a younger generation.
711 * Case (b) arises if we didn't manage to promote everything that
712 * the current object points to into the current generation.
714 if (gct->failed_to_evac) {
715 gct->failed_to_evac = rtsFalse;
716 if (bd->gen_no > 0) {
717 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
723 gct->copied += ws->todo_free - bd->free;
727 debugTrace(DEBUG_gc, " scavenged %ld bytes",
728 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
730 // update stats: this is a block that has been scavenged
731 gct->scanned += bd->free - bd->u.scan;
732 bd->u.scan = bd->free;
734 if (bd != ws->todo_bd) {
735 // we're not going to evac any more objects into
736 // this block, so push it now.
737 push_scanned_block(bd, ws);
742 /* -----------------------------------------------------------------------------
743 Scavenge everything on the mark stack.
745 This is slightly different from scavenge():
746 - we don't walk linearly through the objects, so the scavenger
747 doesn't need to advance the pointer on to the next object.
748 -------------------------------------------------------------------------- */
751 scavenge_mark_stack(void)
755 rtsBool saved_eager_promotion;
757 gct->evac_gen = oldest_gen;
758 saved_eager_promotion = gct->eager_promotion;
760 while ((p = pop_mark_stack())) {
762 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
763 info = get_itbl((StgClosure *)p);
766 switch (info->type) {
771 StgMVar *mvar = ((StgMVar *)p);
772 gct->eager_promotion = rtsFalse;
773 evacuate((StgClosure **)&mvar->head);
774 evacuate((StgClosure **)&mvar->tail);
775 evacuate((StgClosure **)&mvar->value);
776 gct->eager_promotion = saved_eager_promotion;
778 if (gct->failed_to_evac) {
779 mvar->header.info = &stg_MVAR_DIRTY_info;
781 mvar->header.info = &stg_MVAR_CLEAN_info;
787 scavenge_fun_srt(info);
788 evacuate(&((StgClosure *)p)->payload[1]);
789 evacuate(&((StgClosure *)p)->payload[0]);
793 scavenge_thunk_srt(info);
794 evacuate(&((StgThunk *)p)->payload[1]);
795 evacuate(&((StgThunk *)p)->payload[0]);
799 evacuate(&((StgClosure *)p)->payload[1]);
800 evacuate(&((StgClosure *)p)->payload[0]);
805 scavenge_fun_srt(info);
806 evacuate(&((StgClosure *)p)->payload[0]);
811 scavenge_thunk_srt(info);
812 evacuate(&((StgThunk *)p)->payload[0]);
817 evacuate(&((StgClosure *)p)->payload[0]);
822 scavenge_fun_srt(info);
827 scavenge_thunk_srt(info);
835 scavenge_fun_srt(info);
842 scavenge_thunk_srt(info);
843 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
844 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
845 evacuate((StgClosure **)p);
857 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
858 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
859 evacuate((StgClosure **)p);
865 StgBCO *bco = (StgBCO *)p;
866 evacuate((StgClosure **)&bco->instrs);
867 evacuate((StgClosure **)&bco->literals);
868 evacuate((StgClosure **)&bco->ptrs);
873 // don't need to do anything here: the only possible case
874 // is that we're in a 1-space compacting collector, with
875 // no "old" generation.
880 evacuate(&((StgInd *)p)->indirectee);
884 case MUT_VAR_DIRTY: {
885 gct->eager_promotion = rtsFalse;
886 evacuate(&((StgMutVar *)p)->var);
887 gct->eager_promotion = saved_eager_promotion;
889 if (gct->failed_to_evac) {
890 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
892 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
899 StgBlockingQueue *bq = (StgBlockingQueue *)p;
901 gct->eager_promotion = rtsFalse;
903 evacuate((StgClosure**)&bq->owner);
904 evacuate((StgClosure**)&bq->queue);
905 evacuate((StgClosure**)&bq->link);
906 gct->eager_promotion = saved_eager_promotion;
908 if (gct->failed_to_evac) {
909 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
911 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
921 StgSelector *s = (StgSelector *)p;
922 evacuate(&s->selectee);
926 // A chunk of stack saved in a heap object
929 StgAP_STACK *ap = (StgAP_STACK *)p;
932 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
937 scavenge_PAP((StgPAP *)p);
941 scavenge_AP((StgAP *)p);
944 case MUT_ARR_PTRS_CLEAN:
945 case MUT_ARR_PTRS_DIRTY:
948 // We don't eagerly promote objects pointed to by a mutable
949 // array, but if we find the array only points to objects in
950 // the same or an older generation, we mark it "clean" and
951 // avoid traversing it during minor GCs.
952 gct->eager_promotion = rtsFalse;
954 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
956 if (gct->failed_to_evac) {
957 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
962 gct->eager_promotion = saved_eager_promotion;
963 gct->failed_to_evac = rtsTrue; // mutable anyhow.
967 case MUT_ARR_PTRS_FROZEN:
968 case MUT_ARR_PTRS_FROZEN0:
973 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
975 // If we're going to put this object on the mutable list, then
976 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
977 if (gct->failed_to_evac) {
978 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
980 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
987 scavengeTSO((StgTSO*)p);
993 StgStack *stack = (StgStack*)p;
995 gct->eager_promotion = rtsFalse;
997 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
998 stack->dirty = gct->failed_to_evac;
1000 gct->eager_promotion = saved_eager_promotion;
1008 gct->eager_promotion = rtsFalse;
1010 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1011 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1012 evacuate((StgClosure **)p);
1015 gct->eager_promotion = saved_eager_promotion;
1016 gct->failed_to_evac = rtsTrue; // mutable
1023 StgTRecChunk *tc = ((StgTRecChunk *) p);
1024 TRecEntry *e = &(tc -> entries[0]);
1025 gct->eager_promotion = rtsFalse;
1026 evacuate((StgClosure **)&tc->prev_chunk);
1027 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1028 evacuate((StgClosure **)&e->tvar);
1029 evacuate((StgClosure **)&e->expected_value);
1030 evacuate((StgClosure **)&e->new_value);
1032 gct->eager_promotion = saved_eager_promotion;
1033 gct->failed_to_evac = rtsTrue; // mutable
1038 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1042 if (gct->failed_to_evac) {
1043 gct->failed_to_evac = rtsFalse;
1044 if (gct->evac_gen) {
1045 recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
1048 } // while (p = pop_mark_stack())
1051 /* -----------------------------------------------------------------------------
1052 Scavenge one object.
1054 This is used for objects that are temporarily marked as mutable
1055 because they contain old-to-new generation pointers. Only certain
1056 objects can have this property.
1057 -------------------------------------------------------------------------- */
1060 scavenge_one(StgPtr p)
1062 const StgInfoTable *info;
1064 rtsBool saved_eager_promotion;
1066 saved_eager_promotion = gct->eager_promotion;
1068 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1069 info = get_itbl((StgClosure *)p);
1071 switch (info->type) {
1076 StgMVar *mvar = ((StgMVar *)p);
1077 gct->eager_promotion = rtsFalse;
1078 evacuate((StgClosure **)&mvar->head);
1079 evacuate((StgClosure **)&mvar->tail);
1080 evacuate((StgClosure **)&mvar->value);
1081 gct->eager_promotion = saved_eager_promotion;
1083 if (gct->failed_to_evac) {
1084 mvar->header.info = &stg_MVAR_DIRTY_info;
1086 mvar->header.info = &stg_MVAR_CLEAN_info;
1100 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1101 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1102 evacuate((StgClosure **)q);
1108 case FUN_1_0: // hardly worth specialising these guys
1125 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1126 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1127 evacuate((StgClosure **)q);
1133 case MUT_VAR_DIRTY: {
1136 gct->eager_promotion = rtsFalse;
1137 evacuate(&((StgMutVar *)p)->var);
1138 gct->eager_promotion = saved_eager_promotion;
1140 if (gct->failed_to_evac) {
1141 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1143 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1148 case BLOCKING_QUEUE:
1150 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1152 gct->eager_promotion = rtsFalse;
1154 evacuate((StgClosure**)&bq->owner);
1155 evacuate((StgClosure**)&bq->queue);
1156 evacuate((StgClosure**)&bq->link);
1157 gct->eager_promotion = saved_eager_promotion;
1159 if (gct->failed_to_evac) {
1160 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1162 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1167 case THUNK_SELECTOR:
1169 StgSelector *s = (StgSelector *)p;
1170 evacuate(&s->selectee);
1176 StgAP_STACK *ap = (StgAP_STACK *)p;
1179 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1180 p = (StgPtr)ap->payload + ap->size;
1185 p = scavenge_PAP((StgPAP *)p);
1189 p = scavenge_AP((StgAP *)p);
1193 // nothing to follow
1196 case MUT_ARR_PTRS_CLEAN:
1197 case MUT_ARR_PTRS_DIRTY:
1199 // We don't eagerly promote objects pointed to by a mutable
1200 // array, but if we find the array only points to objects in
1201 // the same or an older generation, we mark it "clean" and
1202 // avoid traversing it during minor GCs.
1203 gct->eager_promotion = rtsFalse;
1205 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1207 if (gct->failed_to_evac) {
1208 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1210 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1213 gct->eager_promotion = saved_eager_promotion;
1214 gct->failed_to_evac = rtsTrue;
1218 case MUT_ARR_PTRS_FROZEN:
1219 case MUT_ARR_PTRS_FROZEN0:
1221 // follow everything
1222 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1224 // If we're going to put this object on the mutable list, then
1225 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1226 if (gct->failed_to_evac) {
1227 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1229 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1236 scavengeTSO((StgTSO*)p);
1242 StgStack *stack = (StgStack*)p;
1244 gct->eager_promotion = rtsFalse;
1246 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1247 stack->dirty = gct->failed_to_evac;
1249 gct->eager_promotion = saved_eager_promotion;
1257 gct->eager_promotion = rtsFalse;
1259 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1260 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1261 evacuate((StgClosure **)p);
1264 gct->eager_promotion = saved_eager_promotion;
1265 gct->failed_to_evac = rtsTrue; // mutable
1273 StgTRecChunk *tc = ((StgTRecChunk *) p);
1274 TRecEntry *e = &(tc -> entries[0]);
1275 gct->eager_promotion = rtsFalse;
1276 evacuate((StgClosure **)&tc->prev_chunk);
1277 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1278 evacuate((StgClosure **)&e->tvar);
1279 evacuate((StgClosure **)&e->expected_value);
1280 evacuate((StgClosure **)&e->new_value);
1282 gct->eager_promotion = saved_eager_promotion;
1283 gct->failed_to_evac = rtsTrue; // mutable
1288 // IND can happen, for example, when the interpreter allocates
1289 // a gigantic AP closure (more than one block), which ends up
1290 // on the large-object list and then gets updated. See #3424.
1293 evacuate(&((StgInd *)p)->indirectee);
1295 #if 0 && defined(DEBUG)
1296 if (RtsFlags.DebugFlags.gc)
1297 /* Debugging code to print out the size of the thing we just
1301 StgPtr start = gen->scan;
1302 bdescr *start_bd = gen->scan_bd;
1305 if (start_bd != gen->scan_bd) {
1306 size += (P_)BLOCK_ROUND_UP(start) - start;
1307 start_bd = start_bd->link;
1308 while (start_bd != gen->scan_bd) {
1309 size += BLOCK_SIZE_W;
1310 start_bd = start_bd->link;
1313 (P_)BLOCK_ROUND_DOWN(gen->scan);
1315 size = gen->scan - start;
1317 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1323 barf("scavenge_one: strange object %d", (int)(info->type));
1326 no_luck = gct->failed_to_evac;
1327 gct->failed_to_evac = rtsFalse;
1331 /* -----------------------------------------------------------------------------
1332 Scavenging mutable lists.
1334 We treat the mutable list of each generation > N (i.e. all the
1335 generations older than the one being collected) as roots. We also
1336 remove non-mutable objects from the mutable list at this point.
1337 -------------------------------------------------------------------------- */
1340 scavenge_mutable_list(bdescr *bd, generation *gen)
1344 gct->evac_gen = gen;
1345 for (; bd != NULL; bd = bd->link) {
1346 for (q = bd->start; q < bd->free; q++) {
1348 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1351 switch (get_itbl((StgClosure *)p)->type) {
1353 // can happen due to concurrent writeMutVars
1355 mutlist_MUTVARS++; break;
1356 case MUT_ARR_PTRS_CLEAN:
1357 case MUT_ARR_PTRS_DIRTY:
1358 case MUT_ARR_PTRS_FROZEN:
1359 case MUT_ARR_PTRS_FROZEN0:
1360 mutlist_MUTARRS++; break;
1362 barf("MVAR_CLEAN on mutable list");
1364 mutlist_MVARS++; break;
1366 mutlist_OTHERS++; break;
1370 // Check whether this object is "clean", that is it
1371 // definitely doesn't point into a young generation.
1372 // Clean objects don't need to be scavenged. Some clean
1373 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1374 // list at all; others, such as TSO
1375 // are always on the mutable list.
1377 switch (get_itbl((StgClosure *)p)->type) {
1378 case MUT_ARR_PTRS_CLEAN:
1379 recordMutableGen_GC((StgClosure *)p,gen->no);
1381 case MUT_ARR_PTRS_DIRTY:
1383 rtsBool saved_eager_promotion;
1384 saved_eager_promotion = gct->eager_promotion;
1385 gct->eager_promotion = rtsFalse;
1387 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1389 if (gct->failed_to_evac) {
1390 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1392 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1395 gct->eager_promotion = saved_eager_promotion;
1396 gct->failed_to_evac = rtsFalse;
1397 recordMutableGen_GC((StgClosure *)p,gen->no);
1404 if (scavenge_one(p)) {
1405 // didn't manage to promote everything, so put the
1406 // object back on the list.
1407 recordMutableGen_GC((StgClosure *)p,gen->no);
1414 scavenge_capability_mut_lists (Capability *cap)
1418 /* Mutable lists from each generation > N
1419 * we want to *scavenge* these roots, not evacuate them: they're not
1420 * going to move in this GC.
1421 * Also do them in reverse generation order, for the usual reason:
1422 * namely to reduce the likelihood of spurious old->new pointers.
1424 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1425 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1426 freeChain_sync(cap->saved_mut_lists[g]);
1427 cap->saved_mut_lists[g] = NULL;
1431 /* -----------------------------------------------------------------------------
1432 Scavenging the static objects.
1434 We treat the mutable list of each generation > N (i.e. all the
1435 generations older than the one being collected) as roots. We also
1436 remove non-mutable objects from the mutable list at this point.
1437 -------------------------------------------------------------------------- */
1440 scavenge_static(void)
1443 const StgInfoTable *info;
1445 debugTrace(DEBUG_gc, "scavenging static objects");
1447 /* Always evacuate straight to the oldest generation for static
1449 gct->evac_gen = oldest_gen;
1451 /* keep going until we've scavenged all the objects on the linked
1456 /* get the next static object from the list. Remember, there might
1457 * be more stuff on this list after each evacuation...
1458 * (static_objects is a global)
1460 p = gct->static_objects;
1461 if (p == END_OF_STATIC_LIST) {
1465 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1468 if (info->type==RBH)
1469 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1471 // make sure the info pointer is into text space
1473 /* Take this object *off* the static_objects list,
1474 * and put it on the scavenged_static_objects list.
1476 gct->static_objects = *STATIC_LINK(info,p);
1477 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1478 gct->scavenged_static_objects = p;
1480 switch (info -> type) {
1484 StgInd *ind = (StgInd *)p;
1485 evacuate(&ind->indirectee);
1487 /* might fail to evacuate it, in which case we have to pop it
1488 * back on the mutable list of the oldest generation. We
1489 * leave it *on* the scavenged_static_objects list, though,
1490 * in case we visit this object again.
1492 if (gct->failed_to_evac) {
1493 gct->failed_to_evac = rtsFalse;
1494 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1500 scavenge_thunk_srt(info);
1504 scavenge_fun_srt(info);
1511 next = (P_)p->payload + info->layout.payload.ptrs;
1512 // evacuate the pointers
1513 for (q = (P_)p->payload; q < next; q++) {
1514 evacuate((StgClosure **)q);
1520 barf("scavenge_static: strange closure %d", (int)(info->type));
1523 ASSERT(gct->failed_to_evac == rtsFalse);
1527 /* -----------------------------------------------------------------------------
1528 scavenge a chunk of memory described by a bitmap
1529 -------------------------------------------------------------------------- */
1532 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1539 for (i = 0; i < size; b++) {
1540 bitmap = large_bitmap->bitmap[b];
1541 j = stg_min(size-i, BITS_IN(W_));
1543 for (; j > 0; j--, p++) {
1544 if ((bitmap & 1) == 0) {
1545 evacuate((StgClosure **)p);
1547 bitmap = bitmap >> 1;
1552 STATIC_INLINE StgPtr
1553 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1556 if ((bitmap & 1) == 0) {
1557 evacuate((StgClosure **)p);
1560 bitmap = bitmap >> 1;
1566 /* -----------------------------------------------------------------------------
1567 scavenge_stack walks over a section of stack and evacuates all the
1568 objects pointed to by it. We can use the same code for walking
1569 AP_STACK_UPDs, since these are just sections of copied stack.
1570 -------------------------------------------------------------------------- */
1573 scavenge_stack(StgPtr p, StgPtr stack_end)
1575 const StgRetInfoTable* info;
1580 * Each time around this loop, we are looking at a chunk of stack
1581 * that starts with an activation record.
1584 while (p < stack_end) {
1585 info = get_ret_itbl((StgClosure *)p);
1587 switch (info->i.type) {
1590 // In SMP, we can get update frames that point to indirections
1591 // when two threads evaluate the same thunk. We do attempt to
1592 // discover this situation in threadPaused(), but it's
1593 // possible that the following sequence occurs:
1602 // Now T is an indirection, and the update frame is already
1603 // marked on A's stack, so we won't traverse it again in
1604 // threadPaused(). We could traverse the whole stack again
1605 // before GC, but that seems like overkill.
1607 // Scavenging this update frame as normal would be disastrous;
1608 // the updatee would end up pointing to the value. So we
1609 // check whether the value after evacuation is a BLACKHOLE,
1610 // and if not, we change the update frame to an stg_enter
1611 // frame that simply returns the value. Hence, blackholing is
1612 // compulsory (otherwise we would have to check for thunks
1615 // Note [upd-black-hole]
1616 // One slight hiccup is that the THUNK_SELECTOR machinery can
1617 // overwrite the updatee with an IND. In parallel GC, this
1618 // could even be happening concurrently, so we can't check for
1619 // the IND. Fortunately if we assume that blackholing is
1620 // happening (either lazy or eager), then we can be sure that
1621 // the updatee is never a THUNK_SELECTOR and we're ok.
1622 // NB. this is a new invariant: blackholing is not optional.
1624 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1627 evacuate(&frame->updatee);
1629 if (GET_CLOSURE_TAG(v) != 0 ||
1630 (get_itbl(v)->type != BLACKHOLE)) {
1631 // blackholing is compulsory, see above.
1632 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1634 ASSERT(v->header.info != &stg_TSO_info);
1635 p += sizeofW(StgUpdateFrame);
1639 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1640 case CATCH_STM_FRAME:
1641 case CATCH_RETRY_FRAME:
1642 case ATOMICALLY_FRAME:
1643 case UNDERFLOW_FRAME:
1647 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1648 size = BITMAP_SIZE(info->i.layout.bitmap);
1649 // NOTE: the payload starts immediately after the info-ptr, we
1650 // don't have an StgHeader in the same sense as a heap closure.
1652 p = scavenge_small_bitmap(p, size, bitmap);
1656 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1664 evacuate((StgClosure **)p);
1667 size = BCO_BITMAP_SIZE(bco);
1668 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1673 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1678 size = GET_LARGE_BITMAP(&info->i)->size;
1680 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1682 // and don't forget to follow the SRT
1686 // Dynamic bitmap: the mask is stored on the stack, and
1687 // there are a number of non-pointers followed by a number
1688 // of pointers above the bitmapped area. (see StgMacros.h,
1693 dyn = ((StgRetDyn *)p)->liveness;
1695 // traverse the bitmap first
1696 bitmap = RET_DYN_LIVENESS(dyn);
1697 p = (P_)&((StgRetDyn *)p)->payload[0];
1698 size = RET_DYN_BITMAP_SIZE;
1699 p = scavenge_small_bitmap(p, size, bitmap);
1701 // skip over the non-ptr words
1702 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1704 // follow the ptr words
1705 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1706 evacuate((StgClosure **)p);
1714 StgRetFun *ret_fun = (StgRetFun *)p;
1715 StgFunInfoTable *fun_info;
1717 evacuate(&ret_fun->fun);
1718 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1719 p = scavenge_arg_block(fun_info, ret_fun->payload);
1724 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1729 /*-----------------------------------------------------------------------------
1730 scavenge the large object list.
1732 evac_gen set by caller; similar games played with evac_gen as with
1733 scavenge() - see comment at the top of scavenge(). Most large
1734 objects are (repeatedly) mutable, so most of the time evac_gen will
1736 --------------------------------------------------------------------------- */
1739 scavenge_large (gen_workspace *ws)
1744 gct->evac_gen = ws->gen;
1746 bd = ws->todo_large_objects;
1748 for (; bd != NULL; bd = ws->todo_large_objects) {
1750 // take this object *off* the large objects list and put it on
1751 // the scavenged large objects list. This is so that we can
1752 // treat new_large_objects as a stack and push new objects on
1753 // the front when evacuating.
1754 ws->todo_large_objects = bd->link;
1756 ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
1757 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1758 ws->gen->n_scavenged_large_blocks += bd->blocks;
1759 RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
1762 if (scavenge_one(p)) {
1763 if (ws->gen->no > 0) {
1764 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1769 gct->scanned += closure_sizeW((StgClosure*)p);
1773 /* ----------------------------------------------------------------------------
1774 Look for work to do.
1776 We look for the oldest gen that has either a todo block that can
1777 be scanned, or a block of work on the global queue that we can
1780 It is important to take work from the *oldest* generation that we
1781 has work available, because that minimizes the likelihood of
1782 evacuating objects into a young generation when they should have
1783 been eagerly promoted. This really does make a difference (the
1784 cacheprof benchmark is one that is affected).
1786 We also want to scan the todo block if possible before grabbing
1787 work from the global queue, the reason being that we don't want to
1788 steal work from the global queue and starve other threads if there
1789 is other work we can usefully be doing.
1790 ------------------------------------------------------------------------- */
1793 scavenge_find_work (void)
1797 rtsBool did_something, did_anything;
1800 gct->scav_find_work++;
1802 did_anything = rtsFalse;
1805 did_something = rtsFalse;
1806 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1809 gct->scan_bd = NULL;
1811 // If we have a scan block with some work to do,
1812 // scavenge everything up to the free pointer.
1813 if (ws->todo_bd->u.scan < ws->todo_free)
1815 scavenge_block(ws->todo_bd);
1816 did_something = rtsTrue;
1820 // If we have any large objects to scavenge, do them now.
1821 if (ws->todo_large_objects) {
1823 did_something = rtsTrue;
1827 if ((bd = grab_local_todo_block(ws)) != NULL) {
1829 did_something = rtsTrue;
1834 if (did_something) {
1835 did_anything = rtsTrue;
1839 #if defined(THREADED_RTS)
1840 if (work_stealing) {
1841 // look for work to steal
1842 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1843 if ((bd = steal_todo_block(g)) != NULL) {
1845 did_something = rtsTrue;
1850 if (did_something) {
1851 did_anything = rtsTrue;
1857 // only return when there is no more work to do
1859 return did_anything;
1862 /* ----------------------------------------------------------------------------
1863 Scavenge until we can't find anything more to scavenge.
1864 ------------------------------------------------------------------------- */
1872 work_to_do = rtsFalse;
1874 // scavenge static objects
1875 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1876 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1880 // scavenge objects in compacted generation
1881 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1882 scavenge_mark_stack();
1883 work_to_do = rtsTrue;
1886 // Order is important here: we want to deal in full blocks as
1887 // much as possible, so go for global work in preference to
1888 // local work. Only if all the global work has been exhausted
1889 // do we start scavenging the fragments of blocks in the local
1891 if (scavenge_find_work()) goto loop;
1893 if (work_to_do) goto loop;