1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
22 #include "MarkStack.h"
28 #include "Capability.h"
29 #include "LdvProfile.h"
31 static void scavenge_stack (StgPtr p, StgPtr stack_end);
33 static void scavenge_large_bitmap (StgPtr p,
34 StgLargeBitmap *large_bitmap,
37 #if defined(THREADED_RTS) && !defined(PARALLEL_GC)
38 # define evacuate(a) evacuate1(a)
39 # define scavenge_loop(a) scavenge_loop1(a)
40 # define scavenge_block(a) scavenge_block1(a)
41 # define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
42 # define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
45 /* -----------------------------------------------------------------------------
47 -------------------------------------------------------------------------- */
50 scavengeTSO (StgTSO *tso)
54 debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
56 // update the pointer from the Task.
57 if (tso->bound != NULL) {
58 tso->bound->tso = tso;
61 saved_eager = gct->eager_promotion;
62 gct->eager_promotion = rtsFalse;
64 evacuate((StgClosure **)&tso->blocked_exceptions);
65 evacuate((StgClosure **)&tso->bq);
67 // scavange current transaction record
68 evacuate((StgClosure **)&tso->trec);
70 evacuate((StgClosure **)&tso->stackobj);
72 evacuate((StgClosure **)&tso->_link);
73 if ( tso->why_blocked == BlockedOnMVar
74 || tso->why_blocked == BlockedOnBlackHole
75 || tso->why_blocked == BlockedOnMsgThrowTo
76 || tso->why_blocked == NotBlocked
78 evacuate(&tso->block_info.closure);
81 // in the THREADED_RTS, block_info.closure must always point to a
82 // valid closure, because we assume this in throwTo(). In the
83 // non-threaded RTS it might be a FD (for
84 // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
86 tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
90 tso->dirty = gct->failed_to_evac;
92 gct->eager_promotion = saved_eager;
95 /* -----------------------------------------------------------------------------
96 Mutable arrays of pointers
97 -------------------------------------------------------------------------- */
99 static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
105 any_failed = rtsFalse;
106 p = (StgPtr)&a->payload[0];
107 for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
109 q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
111 evacuate((StgClosure**)p);
113 if (gct->failed_to_evac) {
114 any_failed = rtsTrue;
115 *mutArrPtrsCard(a,m) = 1;
116 gct->failed_to_evac = rtsFalse;
118 *mutArrPtrsCard(a,m) = 0;
122 q = (StgPtr)&a->payload[a->ptrs];
125 evacuate((StgClosure**)p);
127 if (gct->failed_to_evac) {
128 any_failed = rtsTrue;
129 *mutArrPtrsCard(a,m) = 1;
130 gct->failed_to_evac = rtsFalse;
132 *mutArrPtrsCard(a,m) = 0;
136 gct->failed_to_evac = any_failed;
137 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
140 // scavenge only the marked areas of a MUT_ARR_PTRS
141 static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
147 any_failed = rtsFalse;
148 for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
150 if (*mutArrPtrsCard(a,m) != 0) {
151 p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
152 q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
153 (StgPtr)&a->payload[a->ptrs]);
155 evacuate((StgClosure**)p);
157 if (gct->failed_to_evac) {
158 any_failed = rtsTrue;
159 gct->failed_to_evac = rtsFalse;
161 *mutArrPtrsCard(a,m) = 0;
166 gct->failed_to_evac = any_failed;
167 return (StgPtr)a + mut_arr_ptrs_sizeW(a);
170 /* -----------------------------------------------------------------------------
171 Blocks of function args occur on the stack (at the top) and
173 -------------------------------------------------------------------------- */
176 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
183 switch (fun_info->f.fun_type) {
185 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
186 size = BITMAP_SIZE(fun_info->f.b.bitmap);
189 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
190 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
194 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
195 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
198 if ((bitmap & 1) == 0) {
199 evacuate((StgClosure **)p);
202 bitmap = bitmap >> 1;
210 STATIC_INLINE GNUC_ATTR_HOT StgPtr
211 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
215 StgFunInfoTable *fun_info;
217 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
218 ASSERT(fun_info->i.type != PAP);
221 switch (fun_info->f.fun_type) {
223 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
226 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
230 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
234 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
237 if ((bitmap & 1) == 0) {
238 evacuate((StgClosure **)p);
241 bitmap = bitmap >> 1;
249 STATIC_INLINE GNUC_ATTR_HOT StgPtr
250 scavenge_PAP (StgPAP *pap)
253 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
257 scavenge_AP (StgAP *ap)
260 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
263 /* -----------------------------------------------------------------------------
265 -------------------------------------------------------------------------- */
267 /* Similar to scavenge_large_bitmap(), but we don't write back the
268 * pointers we get back from evacuate().
271 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
278 bitmap = large_srt->l.bitmap[b];
279 size = (nat)large_srt->l.size;
280 p = (StgClosure **)large_srt->srt;
281 for (i = 0; i < size; ) {
282 if ((bitmap & 1) != 0) {
287 if (i % BITS_IN(W_) == 0) {
289 bitmap = large_srt->l.bitmap[b];
291 bitmap = bitmap >> 1;
296 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
297 * srt field in the info table. That's ok, because we'll
298 * never dereference it.
300 STATIC_INLINE GNUC_ATTR_HOT void
301 scavenge_srt (StgClosure **srt, nat srt_bitmap)
309 if (bitmap == (StgHalfWord)(-1)) {
310 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
314 while (bitmap != 0) {
315 if ((bitmap & 1) != 0) {
316 #if defined(__PIC__) && defined(mingw32_HOST_OS)
317 // Special-case to handle references to closures hiding out in DLLs, since
318 // double indirections required to get at those. The code generator knows
319 // which is which when generating the SRT, so it stores the (indirect)
320 // reference to the DLL closure in the table by first adding one to it.
321 // We check for this here, and undo the addition before evacuating it.
323 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
324 // closure that's fixed at link-time, and no extra magic is required.
325 if ( (unsigned long)(*srt) & 0x1 ) {
326 evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
335 bitmap = bitmap >> 1;
340 STATIC_INLINE GNUC_ATTR_HOT void
341 scavenge_thunk_srt(const StgInfoTable *info)
343 StgThunkInfoTable *thunk_info;
345 if (!major_gc) return;
347 thunk_info = itbl_to_thunk_itbl(info);
348 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
351 STATIC_INLINE GNUC_ATTR_HOT void
352 scavenge_fun_srt(const StgInfoTable *info)
354 StgFunInfoTable *fun_info;
356 if (!major_gc) return;
358 fun_info = itbl_to_fun_itbl(info);
359 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
362 /* -----------------------------------------------------------------------------
363 Scavenge a block from the given scan pointer up to bd->free.
365 evac_gen_no is set by the caller to be either zero (for a step in a
366 generation < N) or G where G is the generation of the step being
369 We sometimes temporarily change evac_gen_no back to zero if we're
370 scavenging a mutable object where eager promotion isn't such a good
372 -------------------------------------------------------------------------- */
374 static GNUC_ATTR_HOT void
375 scavenge_block (bdescr *bd)
379 rtsBool saved_eager_promotion;
382 debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
383 bd->start, bd->gen_no, bd->u.scan);
386 gct->evac_gen_no = bd->gen_no;
387 saved_eager_promotion = gct->eager_promotion;
388 gct->failed_to_evac = rtsFalse;
390 ws = &gct->gens[bd->gen->no];
394 // we might be evacuating into the very object that we're
395 // scavenging, so we have to check the real bd->free pointer each
396 // time around the loop.
397 while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
399 ASSERT(bd->link == NULL);
400 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
401 info = get_itbl((StgClosure *)p);
403 ASSERT(gct->thunk_selector_depth == 0);
406 switch (info->type) {
411 StgMVar *mvar = ((StgMVar *)p);
412 gct->eager_promotion = rtsFalse;
413 evacuate((StgClosure **)&mvar->head);
414 evacuate((StgClosure **)&mvar->tail);
415 evacuate((StgClosure **)&mvar->value);
416 gct->eager_promotion = saved_eager_promotion;
418 if (gct->failed_to_evac) {
419 mvar->header.info = &stg_MVAR_DIRTY_info;
421 mvar->header.info = &stg_MVAR_CLEAN_info;
423 p += sizeofW(StgMVar);
428 scavenge_fun_srt(info);
429 evacuate(&((StgClosure *)p)->payload[1]);
430 evacuate(&((StgClosure *)p)->payload[0]);
431 p += sizeofW(StgHeader) + 2;
435 scavenge_thunk_srt(info);
436 evacuate(&((StgThunk *)p)->payload[1]);
437 evacuate(&((StgThunk *)p)->payload[0]);
438 p += sizeofW(StgThunk) + 2;
442 evacuate(&((StgClosure *)p)->payload[1]);
443 evacuate(&((StgClosure *)p)->payload[0]);
444 p += sizeofW(StgHeader) + 2;
448 scavenge_thunk_srt(info);
449 evacuate(&((StgThunk *)p)->payload[0]);
450 p += sizeofW(StgThunk) + 1;
454 scavenge_fun_srt(info);
456 evacuate(&((StgClosure *)p)->payload[0]);
457 p += sizeofW(StgHeader) + 1;
461 scavenge_thunk_srt(info);
462 p += sizeofW(StgThunk) + 1;
466 scavenge_fun_srt(info);
468 p += sizeofW(StgHeader) + 1;
472 scavenge_thunk_srt(info);
473 p += sizeofW(StgThunk) + 2;
477 scavenge_fun_srt(info);
479 p += sizeofW(StgHeader) + 2;
483 scavenge_thunk_srt(info);
484 evacuate(&((StgThunk *)p)->payload[0]);
485 p += sizeofW(StgThunk) + 2;
489 scavenge_fun_srt(info);
491 evacuate(&((StgClosure *)p)->payload[0]);
492 p += sizeofW(StgHeader) + 2;
496 scavenge_fun_srt(info);
503 scavenge_thunk_srt(info);
504 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
505 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
506 evacuate((StgClosure **)p);
508 p += info->layout.payload.nptrs;
519 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
520 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
521 evacuate((StgClosure **)p);
523 p += info->layout.payload.nptrs;
528 StgBCO *bco = (StgBCO *)p;
529 evacuate((StgClosure **)&bco->instrs);
530 evacuate((StgClosure **)&bco->literals);
531 evacuate((StgClosure **)&bco->ptrs);
538 evacuate(&((StgInd *)p)->indirectee);
539 p += sizeofW(StgInd);
544 gct->eager_promotion = rtsFalse;
545 evacuate(&((StgMutVar *)p)->var);
546 gct->eager_promotion = saved_eager_promotion;
548 if (gct->failed_to_evac) {
549 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
551 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
553 p += sizeofW(StgMutVar);
558 StgBlockingQueue *bq = (StgBlockingQueue *)p;
560 gct->eager_promotion = rtsFalse;
562 evacuate((StgClosure**)&bq->owner);
563 evacuate((StgClosure**)&bq->queue);
564 evacuate((StgClosure**)&bq->link);
565 gct->eager_promotion = saved_eager_promotion;
567 if (gct->failed_to_evac) {
568 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
570 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
572 p += sizeofW(StgBlockingQueue);
578 StgSelector *s = (StgSelector *)p;
579 evacuate(&s->selectee);
580 p += THUNK_SELECTOR_sizeW();
584 // A chunk of stack saved in a heap object
587 StgAP_STACK *ap = (StgAP_STACK *)p;
590 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
591 p = (StgPtr)ap->payload + ap->size;
596 p = scavenge_PAP((StgPAP *)p);
600 p = scavenge_AP((StgAP *)p);
605 p += arr_words_sizeW((StgArrWords *)p);
608 case MUT_ARR_PTRS_CLEAN:
609 case MUT_ARR_PTRS_DIRTY:
611 // We don't eagerly promote objects pointed to by a mutable
612 // array, but if we find the array only points to objects in
613 // the same or an older generation, we mark it "clean" and
614 // avoid traversing it during minor GCs.
615 gct->eager_promotion = rtsFalse;
617 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
619 if (gct->failed_to_evac) {
620 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
622 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
625 gct->eager_promotion = saved_eager_promotion;
626 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
630 case MUT_ARR_PTRS_FROZEN:
631 case MUT_ARR_PTRS_FROZEN0:
634 p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
636 // If we're going to put this object on the mutable list, then
637 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
638 if (gct->failed_to_evac) {
639 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
641 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
648 scavengeTSO((StgTSO *)p);
649 p += sizeofW(StgTSO);
655 StgStack *stack = (StgStack*)p;
657 gct->eager_promotion = rtsFalse;
659 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
660 stack->dirty = gct->failed_to_evac;
661 p += stack_sizeW(stack);
663 gct->eager_promotion = saved_eager_promotion;
671 gct->eager_promotion = rtsFalse;
673 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
674 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
675 evacuate((StgClosure **)p);
677 p += info->layout.payload.nptrs;
679 gct->eager_promotion = saved_eager_promotion;
680 gct->failed_to_evac = rtsTrue; // mutable
687 StgTRecChunk *tc = ((StgTRecChunk *) p);
688 TRecEntry *e = &(tc -> entries[0]);
689 gct->eager_promotion = rtsFalse;
690 evacuate((StgClosure **)&tc->prev_chunk);
691 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
692 evacuate((StgClosure **)&e->tvar);
693 evacuate((StgClosure **)&e->expected_value);
694 evacuate((StgClosure **)&e->new_value);
696 gct->eager_promotion = saved_eager_promotion;
697 gct->failed_to_evac = rtsTrue; // mutable
698 p += sizeofW(StgTRecChunk);
703 barf("scavenge: unimplemented/strange closure type %d @ %p",
708 * We need to record the current object on the mutable list if
709 * (a) It is actually mutable, or
710 * (b) It contains pointers to a younger generation.
711 * Case (b) arises if we didn't manage to promote everything that
712 * the current object points to into the current generation.
714 if (gct->failed_to_evac) {
715 gct->failed_to_evac = rtsFalse;
716 if (bd->gen_no > 0) {
717 recordMutableGen_GC((StgClosure *)q, bd->gen_no);
723 gct->copied += ws->todo_free - bd->free;
727 debugTrace(DEBUG_gc, " scavenged %ld bytes",
728 (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
730 // update stats: this is a block that has been scavenged
731 gct->scanned += bd->free - bd->u.scan;
732 bd->u.scan = bd->free;
734 if (bd != ws->todo_bd) {
735 // we're not going to evac any more objects into
736 // this block, so push it now.
737 push_scanned_block(bd, ws);
742 /* -----------------------------------------------------------------------------
743 Scavenge everything on the mark stack.
745 This is slightly different from scavenge():
746 - we don't walk linearly through the objects, so the scavenger
747 doesn't need to advance the pointer on to the next object.
748 -------------------------------------------------------------------------- */
751 scavenge_mark_stack(void)
755 rtsBool saved_eager_promotion;
757 gct->evac_gen_no = oldest_gen->no;
758 saved_eager_promotion = gct->eager_promotion;
760 while ((p = pop_mark_stack())) {
762 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
763 info = get_itbl((StgClosure *)p);
766 switch (info->type) {
771 StgMVar *mvar = ((StgMVar *)p);
772 gct->eager_promotion = rtsFalse;
773 evacuate((StgClosure **)&mvar->head);
774 evacuate((StgClosure **)&mvar->tail);
775 evacuate((StgClosure **)&mvar->value);
776 gct->eager_promotion = saved_eager_promotion;
778 if (gct->failed_to_evac) {
779 mvar->header.info = &stg_MVAR_DIRTY_info;
781 mvar->header.info = &stg_MVAR_CLEAN_info;
787 scavenge_fun_srt(info);
788 evacuate(&((StgClosure *)p)->payload[1]);
789 evacuate(&((StgClosure *)p)->payload[0]);
793 scavenge_thunk_srt(info);
794 evacuate(&((StgThunk *)p)->payload[1]);
795 evacuate(&((StgThunk *)p)->payload[0]);
799 evacuate(&((StgClosure *)p)->payload[1]);
800 evacuate(&((StgClosure *)p)->payload[0]);
805 scavenge_fun_srt(info);
806 evacuate(&((StgClosure *)p)->payload[0]);
811 scavenge_thunk_srt(info);
812 evacuate(&((StgThunk *)p)->payload[0]);
817 evacuate(&((StgClosure *)p)->payload[0]);
822 scavenge_fun_srt(info);
827 scavenge_thunk_srt(info);
835 scavenge_fun_srt(info);
842 scavenge_thunk_srt(info);
843 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
844 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
845 evacuate((StgClosure **)p);
857 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
858 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
859 evacuate((StgClosure **)p);
865 StgBCO *bco = (StgBCO *)p;
866 evacuate((StgClosure **)&bco->instrs);
867 evacuate((StgClosure **)&bco->literals);
868 evacuate((StgClosure **)&bco->ptrs);
873 // don't need to do anything here: the only possible case
874 // is that we're in a 1-space compacting collector, with
875 // no "old" generation.
880 evacuate(&((StgInd *)p)->indirectee);
884 case MUT_VAR_DIRTY: {
885 gct->eager_promotion = rtsFalse;
886 evacuate(&((StgMutVar *)p)->var);
887 gct->eager_promotion = saved_eager_promotion;
889 if (gct->failed_to_evac) {
890 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
892 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
899 StgBlockingQueue *bq = (StgBlockingQueue *)p;
901 gct->eager_promotion = rtsFalse;
903 evacuate((StgClosure**)&bq->owner);
904 evacuate((StgClosure**)&bq->queue);
905 evacuate((StgClosure**)&bq->link);
906 gct->eager_promotion = saved_eager_promotion;
908 if (gct->failed_to_evac) {
909 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
911 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
921 StgSelector *s = (StgSelector *)p;
922 evacuate(&s->selectee);
926 // A chunk of stack saved in a heap object
929 StgAP_STACK *ap = (StgAP_STACK *)p;
932 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
937 scavenge_PAP((StgPAP *)p);
941 scavenge_AP((StgAP *)p);
944 case MUT_ARR_PTRS_CLEAN:
945 case MUT_ARR_PTRS_DIRTY:
948 // We don't eagerly promote objects pointed to by a mutable
949 // array, but if we find the array only points to objects in
950 // the same or an older generation, we mark it "clean" and
951 // avoid traversing it during minor GCs.
952 gct->eager_promotion = rtsFalse;
954 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
956 if (gct->failed_to_evac) {
957 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
959 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
962 gct->eager_promotion = saved_eager_promotion;
963 gct->failed_to_evac = rtsTrue; // mutable anyhow.
967 case MUT_ARR_PTRS_FROZEN:
968 case MUT_ARR_PTRS_FROZEN0:
973 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
975 // If we're going to put this object on the mutable list, then
976 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
977 if (gct->failed_to_evac) {
978 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
980 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
987 scavengeTSO((StgTSO*)p);
993 StgStack *stack = (StgStack*)p;
995 gct->eager_promotion = rtsFalse;
997 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
998 stack->dirty = gct->failed_to_evac;
1000 gct->eager_promotion = saved_eager_promotion;
1008 gct->eager_promotion = rtsFalse;
1010 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1011 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1012 evacuate((StgClosure **)p);
1015 gct->eager_promotion = saved_eager_promotion;
1016 gct->failed_to_evac = rtsTrue; // mutable
1023 StgTRecChunk *tc = ((StgTRecChunk *) p);
1024 TRecEntry *e = &(tc -> entries[0]);
1025 gct->eager_promotion = rtsFalse;
1026 evacuate((StgClosure **)&tc->prev_chunk);
1027 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1028 evacuate((StgClosure **)&e->tvar);
1029 evacuate((StgClosure **)&e->expected_value);
1030 evacuate((StgClosure **)&e->new_value);
1032 gct->eager_promotion = saved_eager_promotion;
1033 gct->failed_to_evac = rtsTrue; // mutable
1038 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1042 if (gct->failed_to_evac) {
1043 gct->failed_to_evac = rtsFalse;
1044 if (gct->evac_gen_no) {
1045 recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
1048 } // while (p = pop_mark_stack())
1051 /* -----------------------------------------------------------------------------
1052 Scavenge one object.
1054 This is used for objects that are temporarily marked as mutable
1055 because they contain old-to-new generation pointers. Only certain
1056 objects can have this property.
1057 -------------------------------------------------------------------------- */
1060 scavenge_one(StgPtr p)
1062 const StgInfoTable *info;
1064 rtsBool saved_eager_promotion;
1066 saved_eager_promotion = gct->eager_promotion;
1068 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1069 info = get_itbl((StgClosure *)p);
1071 switch (info->type) {
1076 StgMVar *mvar = ((StgMVar *)p);
1077 gct->eager_promotion = rtsFalse;
1078 evacuate((StgClosure **)&mvar->head);
1079 evacuate((StgClosure **)&mvar->tail);
1080 evacuate((StgClosure **)&mvar->value);
1081 gct->eager_promotion = saved_eager_promotion;
1083 if (gct->failed_to_evac) {
1084 mvar->header.info = &stg_MVAR_DIRTY_info;
1086 mvar->header.info = &stg_MVAR_CLEAN_info;
1100 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1101 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1102 evacuate((StgClosure **)q);
1108 case FUN_1_0: // hardly worth specialising these guys
1125 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1126 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1127 evacuate((StgClosure **)q);
1133 case MUT_VAR_DIRTY: {
1136 gct->eager_promotion = rtsFalse;
1137 evacuate(&((StgMutVar *)p)->var);
1138 gct->eager_promotion = saved_eager_promotion;
1140 if (gct->failed_to_evac) {
1141 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1143 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1148 case BLOCKING_QUEUE:
1150 StgBlockingQueue *bq = (StgBlockingQueue *)p;
1152 gct->eager_promotion = rtsFalse;
1154 evacuate((StgClosure**)&bq->owner);
1155 evacuate((StgClosure**)&bq->queue);
1156 evacuate((StgClosure**)&bq->link);
1157 gct->eager_promotion = saved_eager_promotion;
1159 if (gct->failed_to_evac) {
1160 bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
1162 bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
1167 case THUNK_SELECTOR:
1169 StgSelector *s = (StgSelector *)p;
1170 evacuate(&s->selectee);
1176 StgAP_STACK *ap = (StgAP_STACK *)p;
1179 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1180 p = (StgPtr)ap->payload + ap->size;
1185 p = scavenge_PAP((StgPAP *)p);
1189 p = scavenge_AP((StgAP *)p);
1193 // nothing to follow
1196 case MUT_ARR_PTRS_CLEAN:
1197 case MUT_ARR_PTRS_DIRTY:
1199 // We don't eagerly promote objects pointed to by a mutable
1200 // array, but if we find the array only points to objects in
1201 // the same or an older generation, we mark it "clean" and
1202 // avoid traversing it during minor GCs.
1203 gct->eager_promotion = rtsFalse;
1205 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1207 if (gct->failed_to_evac) {
1208 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1210 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1213 gct->eager_promotion = saved_eager_promotion;
1214 gct->failed_to_evac = rtsTrue;
1218 case MUT_ARR_PTRS_FROZEN:
1219 case MUT_ARR_PTRS_FROZEN0:
1221 // follow everything
1222 scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
1224 // If we're going to put this object on the mutable list, then
1225 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1226 if (gct->failed_to_evac) {
1227 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1229 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1236 scavengeTSO((StgTSO*)p);
1242 StgStack *stack = (StgStack*)p;
1244 gct->eager_promotion = rtsFalse;
1246 scavenge_stack(stack->sp, stack->stack + stack->stack_size);
1247 stack->dirty = gct->failed_to_evac;
1249 gct->eager_promotion = saved_eager_promotion;
1257 gct->eager_promotion = rtsFalse;
1259 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1260 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1261 evacuate((StgClosure **)p);
1264 gct->eager_promotion = saved_eager_promotion;
1265 gct->failed_to_evac = rtsTrue; // mutable
1273 StgTRecChunk *tc = ((StgTRecChunk *) p);
1274 TRecEntry *e = &(tc -> entries[0]);
1275 gct->eager_promotion = rtsFalse;
1276 evacuate((StgClosure **)&tc->prev_chunk);
1277 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1278 evacuate((StgClosure **)&e->tvar);
1279 evacuate((StgClosure **)&e->expected_value);
1280 evacuate((StgClosure **)&e->new_value);
1282 gct->eager_promotion = saved_eager_promotion;
1283 gct->failed_to_evac = rtsTrue; // mutable
1288 // IND can happen, for example, when the interpreter allocates
1289 // a gigantic AP closure (more than one block), which ends up
1290 // on the large-object list and then gets updated. See #3424.
1293 evacuate(&((StgInd *)p)->indirectee);
1295 #if 0 && defined(DEBUG)
1296 if (RtsFlags.DebugFlags.gc)
1297 /* Debugging code to print out the size of the thing we just
1301 StgPtr start = gen->scan;
1302 bdescr *start_bd = gen->scan_bd;
1305 if (start_bd != gen->scan_bd) {
1306 size += (P_)BLOCK_ROUND_UP(start) - start;
1307 start_bd = start_bd->link;
1308 while (start_bd != gen->scan_bd) {
1309 size += BLOCK_SIZE_W;
1310 start_bd = start_bd->link;
1313 (P_)BLOCK_ROUND_DOWN(gen->scan);
1315 size = gen->scan - start;
1317 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1323 barf("scavenge_one: strange object %d", (int)(info->type));
1326 no_luck = gct->failed_to_evac;
1327 gct->failed_to_evac = rtsFalse;
1331 /* -----------------------------------------------------------------------------
1332 Scavenging mutable lists.
1334 We treat the mutable list of each generation > N (i.e. all the
1335 generations older than the one being collected) as roots. We also
1336 remove non-mutable objects from the mutable list at this point.
1337 -------------------------------------------------------------------------- */
1340 scavenge_mutable_list(bdescr *bd, generation *gen)
1346 gct->evac_gen_no = gen_no;
1347 for (; bd != NULL; bd = bd->link) {
1348 for (q = bd->start; q < bd->free; q++) {
1350 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1353 switch (get_itbl((StgClosure *)p)->type) {
1355 // can happen due to concurrent writeMutVars
1357 mutlist_MUTVARS++; break;
1358 case MUT_ARR_PTRS_CLEAN:
1359 case MUT_ARR_PTRS_DIRTY:
1360 case MUT_ARR_PTRS_FROZEN:
1361 case MUT_ARR_PTRS_FROZEN0:
1362 mutlist_MUTARRS++; break;
1364 barf("MVAR_CLEAN on mutable list");
1366 mutlist_MVARS++; break;
1368 mutlist_OTHERS++; break;
1372 // Check whether this object is "clean", that is it
1373 // definitely doesn't point into a young generation.
1374 // Clean objects don't need to be scavenged. Some clean
1375 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1376 // list at all; others, such as TSO
1377 // are always on the mutable list.
1379 switch (get_itbl((StgClosure *)p)->type) {
1380 case MUT_ARR_PTRS_CLEAN:
1381 recordMutableGen_GC((StgClosure *)p,gen_no);
1383 case MUT_ARR_PTRS_DIRTY:
1385 rtsBool saved_eager_promotion;
1386 saved_eager_promotion = gct->eager_promotion;
1387 gct->eager_promotion = rtsFalse;
1389 scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
1391 if (gct->failed_to_evac) {
1392 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1394 ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1397 gct->eager_promotion = saved_eager_promotion;
1398 gct->failed_to_evac = rtsFalse;
1399 recordMutableGen_GC((StgClosure *)p,gen_no);
1406 if (scavenge_one(p)) {
1407 // didn't manage to promote everything, so put the
1408 // object back on the list.
1409 recordMutableGen_GC((StgClosure *)p,gen_no);
1416 scavenge_capability_mut_lists (Capability *cap)
1420 /* Mutable lists from each generation > N
1421 * we want to *scavenge* these roots, not evacuate them: they're not
1422 * going to move in this GC.
1423 * Also do them in reverse generation order, for the usual reason:
1424 * namely to reduce the likelihood of spurious old->new pointers.
1426 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
1427 scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
1428 freeChain_sync(cap->saved_mut_lists[g]);
1429 cap->saved_mut_lists[g] = NULL;
1433 /* -----------------------------------------------------------------------------
1434 Scavenging the static objects.
1436 We treat the mutable list of each generation > N (i.e. all the
1437 generations older than the one being collected) as roots. We also
1438 remove non-mutable objects from the mutable list at this point.
1439 -------------------------------------------------------------------------- */
1442 scavenge_static(void)
1445 const StgInfoTable *info;
1447 debugTrace(DEBUG_gc, "scavenging static objects");
1449 /* Always evacuate straight to the oldest generation for static
1451 gct->evac_gen_no = oldest_gen->no;
1453 /* keep going until we've scavenged all the objects on the linked
1458 /* get the next static object from the list. Remember, there might
1459 * be more stuff on this list after each evacuation...
1460 * (static_objects is a global)
1462 p = gct->static_objects;
1463 if (p == END_OF_STATIC_LIST) {
1467 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1470 if (info->type==RBH)
1471 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1473 // make sure the info pointer is into text space
1475 /* Take this object *off* the static_objects list,
1476 * and put it on the scavenged_static_objects list.
1478 gct->static_objects = *STATIC_LINK(info,p);
1479 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1480 gct->scavenged_static_objects = p;
1482 switch (info -> type) {
1486 StgInd *ind = (StgInd *)p;
1487 evacuate(&ind->indirectee);
1489 /* might fail to evacuate it, in which case we have to pop it
1490 * back on the mutable list of the oldest generation. We
1491 * leave it *on* the scavenged_static_objects list, though,
1492 * in case we visit this object again.
1494 if (gct->failed_to_evac) {
1495 gct->failed_to_evac = rtsFalse;
1496 recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
1502 scavenge_thunk_srt(info);
1506 scavenge_fun_srt(info);
1513 next = (P_)p->payload + info->layout.payload.ptrs;
1514 // evacuate the pointers
1515 for (q = (P_)p->payload; q < next; q++) {
1516 evacuate((StgClosure **)q);
1522 barf("scavenge_static: strange closure %d", (int)(info->type));
1525 ASSERT(gct->failed_to_evac == rtsFalse);
1529 /* -----------------------------------------------------------------------------
1530 scavenge a chunk of memory described by a bitmap
1531 -------------------------------------------------------------------------- */
1534 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1541 for (i = 0; i < size; b++) {
1542 bitmap = large_bitmap->bitmap[b];
1543 j = stg_min(size-i, BITS_IN(W_));
1545 for (; j > 0; j--, p++) {
1546 if ((bitmap & 1) == 0) {
1547 evacuate((StgClosure **)p);
1549 bitmap = bitmap >> 1;
1554 STATIC_INLINE StgPtr
1555 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1558 if ((bitmap & 1) == 0) {
1559 evacuate((StgClosure **)p);
1562 bitmap = bitmap >> 1;
1568 /* -----------------------------------------------------------------------------
1569 scavenge_stack walks over a section of stack and evacuates all the
1570 objects pointed to by it. We can use the same code for walking
1571 AP_STACK_UPDs, since these are just sections of copied stack.
1572 -------------------------------------------------------------------------- */
1575 scavenge_stack(StgPtr p, StgPtr stack_end)
1577 const StgRetInfoTable* info;
1582 * Each time around this loop, we are looking at a chunk of stack
1583 * that starts with an activation record.
1586 while (p < stack_end) {
1587 info = get_ret_itbl((StgClosure *)p);
1589 switch (info->i.type) {
1592 // In SMP, we can get update frames that point to indirections
1593 // when two threads evaluate the same thunk. We do attempt to
1594 // discover this situation in threadPaused(), but it's
1595 // possible that the following sequence occurs:
1604 // Now T is an indirection, and the update frame is already
1605 // marked on A's stack, so we won't traverse it again in
1606 // threadPaused(). We could traverse the whole stack again
1607 // before GC, but that seems like overkill.
1609 // Scavenging this update frame as normal would be disastrous;
1610 // the updatee would end up pointing to the value. So we
1611 // check whether the value after evacuation is a BLACKHOLE,
1612 // and if not, we change the update frame to an stg_enter
1613 // frame that simply returns the value. Hence, blackholing is
1614 // compulsory (otherwise we would have to check for thunks
1617 // Note [upd-black-hole]
1618 // One slight hiccup is that the THUNK_SELECTOR machinery can
1619 // overwrite the updatee with an IND. In parallel GC, this
1620 // could even be happening concurrently, so we can't check for
1621 // the IND. Fortunately if we assume that blackholing is
1622 // happening (either lazy or eager), then we can be sure that
1623 // the updatee is never a THUNK_SELECTOR and we're ok.
1624 // NB. this is a new invariant: blackholing is not optional.
1626 StgUpdateFrame *frame = (StgUpdateFrame *)p;
1629 evacuate(&frame->updatee);
1631 if (GET_CLOSURE_TAG(v) != 0 ||
1632 (get_itbl(v)->type != BLACKHOLE)) {
1633 // blackholing is compulsory, see above.
1634 frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
1636 ASSERT(v->header.info != &stg_TSO_info);
1637 p += sizeofW(StgUpdateFrame);
1641 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1642 case CATCH_STM_FRAME:
1643 case CATCH_RETRY_FRAME:
1644 case ATOMICALLY_FRAME:
1645 case UNDERFLOW_FRAME:
1649 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1650 size = BITMAP_SIZE(info->i.layout.bitmap);
1651 // NOTE: the payload starts immediately after the info-ptr, we
1652 // don't have an StgHeader in the same sense as a heap closure.
1654 p = scavenge_small_bitmap(p, size, bitmap);
1658 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1666 evacuate((StgClosure **)p);
1669 size = BCO_BITMAP_SIZE(bco);
1670 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1675 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1680 size = GET_LARGE_BITMAP(&info->i)->size;
1682 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1684 // and don't forget to follow the SRT
1688 // Dynamic bitmap: the mask is stored on the stack, and
1689 // there are a number of non-pointers followed by a number
1690 // of pointers above the bitmapped area. (see StgMacros.h,
1695 dyn = ((StgRetDyn *)p)->liveness;
1697 // traverse the bitmap first
1698 bitmap = RET_DYN_LIVENESS(dyn);
1699 p = (P_)&((StgRetDyn *)p)->payload[0];
1700 size = RET_DYN_BITMAP_SIZE;
1701 p = scavenge_small_bitmap(p, size, bitmap);
1703 // skip over the non-ptr words
1704 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1706 // follow the ptr words
1707 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1708 evacuate((StgClosure **)p);
1716 StgRetFun *ret_fun = (StgRetFun *)p;
1717 StgFunInfoTable *fun_info;
1719 evacuate(&ret_fun->fun);
1720 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1721 p = scavenge_arg_block(fun_info, ret_fun->payload);
1726 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1731 /*-----------------------------------------------------------------------------
1732 scavenge the large object list.
1734 evac_gen set by caller; similar games played with evac_gen as with
1735 scavenge() - see comment at the top of scavenge(). Most large
1736 objects are (repeatedly) mutable, so most of the time evac_gen will
1738 --------------------------------------------------------------------------- */
1741 scavenge_large (gen_workspace *ws)
1746 gct->evac_gen_no = ws->gen->no;
1748 bd = ws->todo_large_objects;
1750 for (; bd != NULL; bd = ws->todo_large_objects) {
1752 // take this object *off* the large objects list and put it on
1753 // the scavenged large objects list. This is so that we can
1754 // treat new_large_objects as a stack and push new objects on
1755 // the front when evacuating.
1756 ws->todo_large_objects = bd->link;
1758 ACQUIRE_SPIN_LOCK(&ws->gen->sync);
1759 dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
1760 ws->gen->n_scavenged_large_blocks += bd->blocks;
1761 RELEASE_SPIN_LOCK(&ws->gen->sync);
1764 if (scavenge_one(p)) {
1765 if (ws->gen->no > 0) {
1766 recordMutableGen_GC((StgClosure *)p, ws->gen->no);
1771 gct->scanned += closure_sizeW((StgClosure*)p);
1775 /* ----------------------------------------------------------------------------
1776 Look for work to do.
1778 We look for the oldest gen that has either a todo block that can
1779 be scanned, or a block of work on the global queue that we can
1782 It is important to take work from the *oldest* generation that we
1783 has work available, because that minimizes the likelihood of
1784 evacuating objects into a young generation when they should have
1785 been eagerly promoted. This really does make a difference (the
1786 cacheprof benchmark is one that is affected).
1788 We also want to scan the todo block if possible before grabbing
1789 work from the global queue, the reason being that we don't want to
1790 steal work from the global queue and starve other threads if there
1791 is other work we can usefully be doing.
1792 ------------------------------------------------------------------------- */
1795 scavenge_find_work (void)
1799 rtsBool did_something, did_anything;
1802 gct->scav_find_work++;
1804 did_anything = rtsFalse;
1807 did_something = rtsFalse;
1808 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1811 gct->scan_bd = NULL;
1813 // If we have a scan block with some work to do,
1814 // scavenge everything up to the free pointer.
1815 if (ws->todo_bd->u.scan < ws->todo_free)
1817 scavenge_block(ws->todo_bd);
1818 did_something = rtsTrue;
1822 // If we have any large objects to scavenge, do them now.
1823 if (ws->todo_large_objects) {
1825 did_something = rtsTrue;
1829 if ((bd = grab_local_todo_block(ws)) != NULL) {
1831 did_something = rtsTrue;
1836 if (did_something) {
1837 did_anything = rtsTrue;
1841 #if defined(THREADED_RTS)
1842 if (work_stealing) {
1843 // look for work to steal
1844 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1845 if ((bd = steal_todo_block(g)) != NULL) {
1847 did_something = rtsTrue;
1852 if (did_something) {
1853 did_anything = rtsTrue;
1859 // only return when there is no more work to do
1861 return did_anything;
1864 /* ----------------------------------------------------------------------------
1865 Scavenge until we can't find anything more to scavenge.
1866 ------------------------------------------------------------------------- */
1874 work_to_do = rtsFalse;
1876 // scavenge static objects
1877 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1878 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1882 // scavenge objects in compacted generation
1883 if (mark_stack_bd != NULL && !mark_stack_empty()) {
1884 scavenge_mark_stack();
1885 work_to_do = rtsTrue;
1888 // Order is important here: we want to deal in full blocks as
1889 // much as possible, so go for global work in preference to
1890 // local work. Only if all the global work has been exhausted
1891 // do we start scavenging the fragments of blocks in the local
1893 if (scavenge_find_work()) goto loop;
1895 if (work_to_do) goto loop;