1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
29 static void scavenge_stack (StgPtr p, StgPtr stack_end);
31 static void scavenge_large_bitmap (StgPtr p,
32 StgLargeBitmap *large_bitmap,
36 /* Similar to scavenge_large_bitmap(), but we don't write back the
37 * pointers we get back from evacuate().
40 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
47 bitmap = large_srt->l.bitmap[b];
48 size = (nat)large_srt->l.size;
49 p = (StgClosure **)large_srt->srt;
50 for (i = 0; i < size; ) {
51 if ((bitmap & 1) != 0) {
56 if (i % BITS_IN(W_) == 0) {
58 bitmap = large_srt->l.bitmap[b];
65 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
66 * srt field in the info table. That's ok, because we'll
67 * never dereference it.
70 scavenge_srt (StgClosure **srt, nat srt_bitmap)
78 if (bitmap == (StgHalfWord)(-1)) {
79 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
84 if ((bitmap & 1) != 0) {
85 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
86 // Special-case to handle references to closures hiding out in DLLs, since
87 // double indirections required to get at those. The code generator knows
88 // which is which when generating the SRT, so it stores the (indirect)
89 // reference to the DLL closure in the table by first adding one to it.
90 // We check for this here, and undo the addition before evacuating it.
92 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
93 // closure that's fixed at link-time, and no extra magic is required.
94 if ( (unsigned long)(*srt) & 0x1 ) {
95 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
104 bitmap = bitmap >> 1;
110 scavenge_thunk_srt(const StgInfoTable *info)
112 StgThunkInfoTable *thunk_info;
114 if (!major_gc) return;
116 thunk_info = itbl_to_thunk_itbl(info);
117 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
121 scavenge_fun_srt(const StgInfoTable *info)
123 StgFunInfoTable *fun_info;
125 if (!major_gc) return;
127 fun_info = itbl_to_fun_itbl(info);
128 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
131 /* -----------------------------------------------------------------------------
133 -------------------------------------------------------------------------- */
136 scavenge_TSO_link (StgTSO *tso)
138 // We don't always chase the link field: TSOs on the blackhole
139 // queue are not automatically alive, so the link field is a
140 // "weak" pointer in that case.
141 if (tso->why_blocked != BlockedOnBlackHole) {
142 evacuate((StgClosure **)&tso->_link);
147 scavengeTSO (StgTSO *tso)
151 if (tso->what_next == ThreadRelocated) {
152 // the only way this can happen is if the old TSO was on the
153 // mutable list. We might have other links to this defunct
154 // TSO, so we must update its link field.
155 evacuate((StgClosure**)&tso->_link);
159 saved_eager = gct->eager_promotion;
160 gct->eager_promotion = rtsFalse;
162 if ( tso->why_blocked == BlockedOnMVar
163 || tso->why_blocked == BlockedOnBlackHole
164 || tso->why_blocked == BlockedOnException
166 evacuate(&tso->block_info.closure);
168 evacuate((StgClosure **)&tso->blocked_exceptions);
170 // scavange current transaction record
171 evacuate((StgClosure **)&tso->trec);
173 // scavenge this thread's stack
174 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
176 if (gct->failed_to_evac) {
177 tso->flags |= TSO_DIRTY;
178 scavenge_TSO_link(tso);
180 tso->flags &= ~TSO_DIRTY;
181 scavenge_TSO_link(tso);
182 if (gct->failed_to_evac) {
183 tso->flags |= TSO_LINK_DIRTY;
185 tso->flags &= ~TSO_LINK_DIRTY;
189 gct->eager_promotion = saved_eager;
192 /* -----------------------------------------------------------------------------
193 Blocks of function args occur on the stack (at the top) and
195 -------------------------------------------------------------------------- */
198 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
205 switch (fun_info->f.fun_type) {
207 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
208 size = BITMAP_SIZE(fun_info->f.b.bitmap);
211 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
212 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
216 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
217 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
220 if ((bitmap & 1) == 0) {
221 evacuate((StgClosure **)p);
224 bitmap = bitmap >> 1;
233 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
237 StgFunInfoTable *fun_info;
239 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
240 ASSERT(fun_info->i.type != PAP);
243 switch (fun_info->f.fun_type) {
245 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
248 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
252 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
256 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
259 if ((bitmap & 1) == 0) {
260 evacuate((StgClosure **)p);
263 bitmap = bitmap >> 1;
272 scavenge_PAP (StgPAP *pap)
275 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
279 scavenge_AP (StgAP *ap)
282 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
285 /* -----------------------------------------------------------------------------
286 Scavenge everything on the mark stack.
288 This is slightly different from scavenge():
289 - we don't walk linearly through the objects, so the scavenger
290 doesn't need to advance the pointer on to the next object.
291 -------------------------------------------------------------------------- */
294 scavenge_mark_stack(void)
298 step *saved_evac_step;
300 gct->evac_step = &oldest_gen->steps[0];
301 saved_evac_step = gct->evac_step;
304 while (!mark_stack_empty()) {
305 p = pop_mark_stack();
307 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
308 info = get_itbl((StgClosure *)p);
311 switch (((volatile StgWord *)info)[1] & 0xffff) {
316 rtsBool saved_eager_promotion = gct->eager_promotion;
318 StgMVar *mvar = ((StgMVar *)p);
319 gct->eager_promotion = rtsFalse;
320 evacuate((StgClosure **)&mvar->head);
321 evacuate((StgClosure **)&mvar->tail);
322 evacuate((StgClosure **)&mvar->value);
323 gct->eager_promotion = saved_eager_promotion;
325 if (gct->failed_to_evac) {
326 mvar->header.info = &stg_MVAR_DIRTY_info;
328 mvar->header.info = &stg_MVAR_CLEAN_info;
334 scavenge_fun_srt(info);
335 evacuate(&((StgClosure *)p)->payload[1]);
336 evacuate(&((StgClosure *)p)->payload[0]);
340 scavenge_thunk_srt(info);
341 evacuate(&((StgThunk *)p)->payload[1]);
342 evacuate(&((StgThunk *)p)->payload[0]);
346 evacuate(&((StgClosure *)p)->payload[1]);
347 evacuate(&((StgClosure *)p)->payload[0]);
352 scavenge_fun_srt(info);
353 evacuate(&((StgClosure *)p)->payload[0]);
358 scavenge_thunk_srt(info);
359 evacuate(&((StgThunk *)p)->payload[0]);
364 evacuate(&((StgClosure *)p)->payload[0]);
369 scavenge_fun_srt(info);
374 scavenge_thunk_srt(info);
382 scavenge_fun_srt(info);
389 scavenge_thunk_srt(info);
390 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
391 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
392 evacuate((StgClosure **)p);
404 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
405 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
406 evacuate((StgClosure **)p);
412 StgBCO *bco = (StgBCO *)p;
413 evacuate((StgClosure **)&bco->instrs);
414 evacuate((StgClosure **)&bco->literals);
415 evacuate((StgClosure **)&bco->ptrs);
420 // don't need to do anything here: the only possible case
421 // is that we're in a 1-space compacting collector, with
422 // no "old" generation.
426 case IND_OLDGEN_PERM:
427 evacuate(&((StgInd *)p)->indirectee);
431 case MUT_VAR_DIRTY: {
432 rtsBool saved_eager_promotion = gct->eager_promotion;
434 gct->eager_promotion = rtsFalse;
435 evacuate(&((StgMutVar *)p)->var);
436 gct->eager_promotion = saved_eager_promotion;
438 if (gct->failed_to_evac) {
439 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
441 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
447 case SE_CAF_BLACKHOLE:
455 StgSelector *s = (StgSelector *)p;
456 evacuate(&s->selectee);
460 // A chunk of stack saved in a heap object
463 StgAP_STACK *ap = (StgAP_STACK *)p;
466 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
471 scavenge_PAP((StgPAP *)p);
475 scavenge_AP((StgAP *)p);
478 case MUT_ARR_PTRS_CLEAN:
479 case MUT_ARR_PTRS_DIRTY:
485 // We don't eagerly promote objects pointed to by a mutable
486 // array, but if we find the array only points to objects in
487 // the same or an older generation, we mark it "clean" and
488 // avoid traversing it during minor GCs.
489 saved_eager = gct->eager_promotion;
490 gct->eager_promotion = rtsFalse;
491 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
492 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
493 evacuate((StgClosure **)p);
495 gct->eager_promotion = saved_eager;
497 if (gct->failed_to_evac) {
498 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
500 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
503 gct->failed_to_evac = rtsTrue; // mutable anyhow.
507 case MUT_ARR_PTRS_FROZEN:
508 case MUT_ARR_PTRS_FROZEN0:
513 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
514 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
515 evacuate((StgClosure **)p);
518 // If we're going to put this object on the mutable list, then
519 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
520 if (gct->failed_to_evac) {
521 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
523 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
530 scavengeTSO((StgTSO*)p);
534 case TVAR_WATCH_QUEUE:
536 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
538 evacuate((StgClosure **)&wq->closure);
539 evacuate((StgClosure **)&wq->next_queue_entry);
540 evacuate((StgClosure **)&wq->prev_queue_entry);
541 gct->evac_step = saved_evac_step;
542 gct->failed_to_evac = rtsTrue; // mutable
548 StgTVar *tvar = ((StgTVar *) p);
550 evacuate((StgClosure **)&tvar->current_value);
551 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
552 gct->evac_step = saved_evac_step;
553 gct->failed_to_evac = rtsTrue; // mutable
560 StgTRecChunk *tc = ((StgTRecChunk *) p);
561 TRecEntry *e = &(tc -> entries[0]);
563 evacuate((StgClosure **)&tc->prev_chunk);
564 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
565 evacuate((StgClosure **)&e->tvar);
566 evacuate((StgClosure **)&e->expected_value);
567 evacuate((StgClosure **)&e->new_value);
569 gct->evac_step = saved_evac_step;
570 gct->failed_to_evac = rtsTrue; // mutable
576 StgTRecHeader *trec = ((StgTRecHeader *) p);
578 evacuate((StgClosure **)&trec->enclosing_trec);
579 evacuate((StgClosure **)&trec->current_chunk);
580 evacuate((StgClosure **)&trec->invariants_to_check);
581 gct->evac_step = saved_evac_step;
582 gct->failed_to_evac = rtsTrue; // mutable
586 case ATOMIC_INVARIANT:
588 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
590 evacuate(&invariant->code);
591 evacuate((StgClosure **)&invariant->last_execution);
592 gct->evac_step = saved_evac_step;
593 gct->failed_to_evac = rtsTrue; // mutable
597 case INVARIANT_CHECK_QUEUE:
599 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
601 evacuate((StgClosure **)&queue->invariant);
602 evacuate((StgClosure **)&queue->my_execution);
603 evacuate((StgClosure **)&queue->next_queue_entry);
604 gct->evac_step = saved_evac_step;
605 gct->failed_to_evac = rtsTrue; // mutable
610 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
614 if (gct->failed_to_evac) {
615 gct->failed_to_evac = rtsFalse;
616 if (gct->evac_step) {
617 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
621 // mark the next bit to indicate "scavenged"
622 mark(q+1, Bdescr(q));
624 } // while (!mark_stack_empty())
626 // start a new linear scan if the mark stack overflowed at some point
627 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
628 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
629 mark_stack_overflowed = rtsFalse;
630 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
631 oldgen_scan = oldgen_scan_bd->start;
634 if (oldgen_scan_bd) {
635 // push a new thing on the mark stack
637 // find a closure that is marked but not scavenged, and start
639 while (oldgen_scan < oldgen_scan_bd->free
640 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
644 if (oldgen_scan < oldgen_scan_bd->free) {
646 // already scavenged?
647 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
648 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
651 push_mark_stack(oldgen_scan);
652 // ToDo: bump the linear scan by the actual size of the object
653 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
657 oldgen_scan_bd = oldgen_scan_bd->link;
658 if (oldgen_scan_bd != NULL) {
659 oldgen_scan = oldgen_scan_bd->start;
665 /* -----------------------------------------------------------------------------
668 This is used for objects that are temporarily marked as mutable
669 because they contain old-to-new generation pointers. Only certain
670 objects can have this property.
671 -------------------------------------------------------------------------- */
674 scavenge_one(StgPtr p)
676 const StgInfoTable *info;
677 step *saved_evac_step = gct->evac_step;
680 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
681 info = get_itbl((StgClosure *)p);
683 switch (info->type) {
688 rtsBool saved_eager_promotion = gct->eager_promotion;
690 StgMVar *mvar = ((StgMVar *)p);
691 gct->eager_promotion = rtsFalse;
692 evacuate((StgClosure **)&mvar->head);
693 evacuate((StgClosure **)&mvar->tail);
694 evacuate((StgClosure **)&mvar->value);
695 gct->eager_promotion = saved_eager_promotion;
697 if (gct->failed_to_evac) {
698 mvar->header.info = &stg_MVAR_DIRTY_info;
700 mvar->header.info = &stg_MVAR_CLEAN_info;
714 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
715 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
716 evacuate((StgClosure **)q);
722 case FUN_1_0: // hardly worth specialising these guys
738 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
739 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
740 evacuate((StgClosure **)q);
746 case MUT_VAR_DIRTY: {
748 rtsBool saved_eager_promotion = gct->eager_promotion;
750 gct->eager_promotion = rtsFalse;
751 evacuate(&((StgMutVar *)p)->var);
752 gct->eager_promotion = saved_eager_promotion;
754 if (gct->failed_to_evac) {
755 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
757 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
763 case SE_CAF_BLACKHOLE:
770 StgSelector *s = (StgSelector *)p;
771 evacuate(&s->selectee);
777 StgAP_STACK *ap = (StgAP_STACK *)p;
780 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
781 p = (StgPtr)ap->payload + ap->size;
786 p = scavenge_PAP((StgPAP *)p);
790 p = scavenge_AP((StgAP *)p);
797 case MUT_ARR_PTRS_CLEAN:
798 case MUT_ARR_PTRS_DIRTY:
803 // We don't eagerly promote objects pointed to by a mutable
804 // array, but if we find the array only points to objects in
805 // the same or an older generation, we mark it "clean" and
806 // avoid traversing it during minor GCs.
807 saved_eager = gct->eager_promotion;
808 gct->eager_promotion = rtsFalse;
810 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
811 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
812 evacuate((StgClosure **)p);
814 gct->eager_promotion = saved_eager;
816 if (gct->failed_to_evac) {
817 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
819 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
822 gct->failed_to_evac = rtsTrue;
826 case MUT_ARR_PTRS_FROZEN:
827 case MUT_ARR_PTRS_FROZEN0:
832 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
833 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
834 evacuate((StgClosure **)p);
837 // If we're going to put this object on the mutable list, then
838 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
839 if (gct->failed_to_evac) {
840 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
842 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
849 scavengeTSO((StgTSO*)p);
853 case TVAR_WATCH_QUEUE:
855 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
857 evacuate((StgClosure **)&wq->closure);
858 evacuate((StgClosure **)&wq->next_queue_entry);
859 evacuate((StgClosure **)&wq->prev_queue_entry);
860 gct->evac_step = saved_evac_step;
861 gct->failed_to_evac = rtsTrue; // mutable
867 StgTVar *tvar = ((StgTVar *) p);
869 evacuate((StgClosure **)&tvar->current_value);
870 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
871 gct->evac_step = saved_evac_step;
872 gct->failed_to_evac = rtsTrue; // mutable
878 StgTRecHeader *trec = ((StgTRecHeader *) p);
880 evacuate((StgClosure **)&trec->enclosing_trec);
881 evacuate((StgClosure **)&trec->current_chunk);
882 evacuate((StgClosure **)&trec->invariants_to_check);
883 gct->evac_step = saved_evac_step;
884 gct->failed_to_evac = rtsTrue; // mutable
891 StgTRecChunk *tc = ((StgTRecChunk *) p);
892 TRecEntry *e = &(tc -> entries[0]);
894 evacuate((StgClosure **)&tc->prev_chunk);
895 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
896 evacuate((StgClosure **)&e->tvar);
897 evacuate((StgClosure **)&e->expected_value);
898 evacuate((StgClosure **)&e->new_value);
900 gct->evac_step = saved_evac_step;
901 gct->failed_to_evac = rtsTrue; // mutable
905 case ATOMIC_INVARIANT:
907 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
909 evacuate(&invariant->code);
910 evacuate((StgClosure **)&invariant->last_execution);
911 gct->evac_step = saved_evac_step;
912 gct->failed_to_evac = rtsTrue; // mutable
916 case INVARIANT_CHECK_QUEUE:
918 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
920 evacuate((StgClosure **)&queue->invariant);
921 evacuate((StgClosure **)&queue->my_execution);
922 evacuate((StgClosure **)&queue->next_queue_entry);
923 gct->evac_step = saved_evac_step;
924 gct->failed_to_evac = rtsTrue; // mutable
929 case IND_OLDGEN_PERM:
932 /* Careful here: a THUNK can be on the mutable list because
933 * it contains pointers to young gen objects. If such a thunk
934 * is updated, the IND_OLDGEN will be added to the mutable
935 * list again, and we'll scavenge it twice. evacuate()
936 * doesn't check whether the object has already been
937 * evacuated, so we perform that check here.
939 StgClosure *q = ((StgInd *)p)->indirectee;
940 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
943 evacuate(&((StgInd *)p)->indirectee);
946 #if 0 && defined(DEBUG)
947 if (RtsFlags.DebugFlags.gc)
948 /* Debugging code to print out the size of the thing we just
952 StgPtr start = gen->steps[0].scan;
953 bdescr *start_bd = gen->steps[0].scan_bd;
955 scavenge(&gen->steps[0]);
956 if (start_bd != gen->steps[0].scan_bd) {
957 size += (P_)BLOCK_ROUND_UP(start) - start;
958 start_bd = start_bd->link;
959 while (start_bd != gen->steps[0].scan_bd) {
960 size += BLOCK_SIZE_W;
961 start_bd = start_bd->link;
963 size += gen->steps[0].scan -
964 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
966 size = gen->steps[0].scan - start;
968 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
974 barf("scavenge_one: strange object %d", (int)(info->type));
977 no_luck = gct->failed_to_evac;
978 gct->failed_to_evac = rtsFalse;
982 /* -----------------------------------------------------------------------------
983 Scavenging mutable lists.
985 We treat the mutable list of each generation > N (i.e. all the
986 generations older than the one being collected) as roots. We also
987 remove non-mutable objects from the mutable list at this point.
988 -------------------------------------------------------------------------- */
991 scavenge_mutable_list(generation *gen)
996 bd = gen->saved_mut_list;
998 gct->evac_step = &gen->steps[0];
999 for (; bd != NULL; bd = bd->link) {
1000 for (q = bd->start; q < bd->free; q++) {
1002 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1005 switch (get_itbl((StgClosure *)p)->type) {
1007 barf("MUT_VAR_CLEAN on mutable list");
1009 mutlist_MUTVARS++; break;
1010 case MUT_ARR_PTRS_CLEAN:
1011 case MUT_ARR_PTRS_DIRTY:
1012 case MUT_ARR_PTRS_FROZEN:
1013 case MUT_ARR_PTRS_FROZEN0:
1014 mutlist_MUTARRS++; break;
1016 barf("MVAR_CLEAN on mutable list");
1018 mutlist_MVARS++; break;
1020 mutlist_OTHERS++; break;
1024 // Check whether this object is "clean", that is it
1025 // definitely doesn't point into a young generation.
1026 // Clean objects don't need to be scavenged. Some clean
1027 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1028 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1029 // TSO, are always on the mutable list.
1031 switch (get_itbl((StgClosure *)p)->type) {
1032 case MUT_ARR_PTRS_CLEAN:
1033 recordMutableGen_GC((StgClosure *)p,gen);
1036 StgTSO *tso = (StgTSO *)p;
1037 if ((tso->flags & TSO_DIRTY) == 0) {
1038 // Must be on the mutable list because its link
1040 ASSERT(tso->flags & TSO_LINK_DIRTY);
1042 scavenge_TSO_link(tso);
1043 if (gct->failed_to_evac) {
1044 recordMutableGen_GC((StgClosure *)p,gen);
1045 gct->failed_to_evac = rtsFalse;
1047 tso->flags &= ~TSO_LINK_DIRTY;
1056 if (scavenge_one(p)) {
1057 // didn't manage to promote everything, so put the
1058 // object back on the list.
1059 recordMutableGen_GC((StgClosure *)p,gen);
1064 // free the old mut_list
1065 freeChain_sync(gen->saved_mut_list);
1066 gen->saved_mut_list = NULL;
1069 /* -----------------------------------------------------------------------------
1070 Scavenging the static objects.
1072 We treat the mutable list of each generation > N (i.e. all the
1073 generations older than the one being collected) as roots. We also
1074 remove non-mutable objects from the mutable list at this point.
1075 -------------------------------------------------------------------------- */
1078 scavenge_static(void)
1081 const StgInfoTable *info;
1083 debugTrace(DEBUG_gc, "scavenging static objects");
1085 /* Always evacuate straight to the oldest generation for static
1087 gct->evac_step = &oldest_gen->steps[0];
1089 /* keep going until we've scavenged all the objects on the linked
1094 /* get the next static object from the list. Remember, there might
1095 * be more stuff on this list after each evacuation...
1096 * (static_objects is a global)
1098 p = gct->static_objects;
1099 if (p == END_OF_STATIC_LIST) {
1103 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1106 if (info->type==RBH)
1107 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1109 // make sure the info pointer is into text space
1111 /* Take this object *off* the static_objects list,
1112 * and put it on the scavenged_static_objects list.
1114 gct->static_objects = *STATIC_LINK(info,p);
1115 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1116 gct->scavenged_static_objects = p;
1118 switch (info -> type) {
1122 StgInd *ind = (StgInd *)p;
1123 evacuate(&ind->indirectee);
1125 /* might fail to evacuate it, in which case we have to pop it
1126 * back on the mutable list of the oldest generation. We
1127 * leave it *on* the scavenged_static_objects list, though,
1128 * in case we visit this object again.
1130 if (gct->failed_to_evac) {
1131 gct->failed_to_evac = rtsFalse;
1132 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1138 scavenge_thunk_srt(info);
1142 scavenge_fun_srt(info);
1149 next = (P_)p->payload + info->layout.payload.ptrs;
1150 // evacuate the pointers
1151 for (q = (P_)p->payload; q < next; q++) {
1152 evacuate((StgClosure **)q);
1158 barf("scavenge_static: strange closure %d", (int)(info->type));
1161 ASSERT(gct->failed_to_evac == rtsFalse);
1165 /* -----------------------------------------------------------------------------
1166 scavenge a chunk of memory described by a bitmap
1167 -------------------------------------------------------------------------- */
1170 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1176 bitmap = large_bitmap->bitmap[b];
1177 for (i = 0; i < size; ) {
1178 if ((bitmap & 1) == 0) {
1179 evacuate((StgClosure **)p);
1183 if (i % BITS_IN(W_) == 0) {
1185 bitmap = large_bitmap->bitmap[b];
1187 bitmap = bitmap >> 1;
1192 STATIC_INLINE StgPtr
1193 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1196 if ((bitmap & 1) == 0) {
1197 evacuate((StgClosure **)p);
1200 bitmap = bitmap >> 1;
1206 /* -----------------------------------------------------------------------------
1207 scavenge_stack walks over a section of stack and evacuates all the
1208 objects pointed to by it. We can use the same code for walking
1209 AP_STACK_UPDs, since these are just sections of copied stack.
1210 -------------------------------------------------------------------------- */
1213 scavenge_stack(StgPtr p, StgPtr stack_end)
1215 const StgRetInfoTable* info;
1220 * Each time around this loop, we are looking at a chunk of stack
1221 * that starts with an activation record.
1224 while (p < stack_end) {
1225 info = get_ret_itbl((StgClosure *)p);
1227 switch (info->i.type) {
1230 // In SMP, we can get update frames that point to indirections
1231 // when two threads evaluate the same thunk. We do attempt to
1232 // discover this situation in threadPaused(), but it's
1233 // possible that the following sequence occurs:
1242 // Now T is an indirection, and the update frame is already
1243 // marked on A's stack, so we won't traverse it again in
1244 // threadPaused(). We could traverse the whole stack again
1245 // before GC, but that seems like overkill.
1247 // Scavenging this update frame as normal would be disastrous;
1248 // the updatee would end up pointing to the value. So we turn
1249 // the indirection into an IND_PERM, so that evacuate will
1250 // copy the indirection into the old generation instead of
1254 const StgInfoTable *i;
1256 i = ((StgUpdateFrame *)p)->updatee->header.info;
1257 if (!IS_FORWARDING_PTR(i)) {
1258 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1260 ((StgUpdateFrame *)p)->updatee->header.info =
1261 (StgInfoTable *)&stg_IND_PERM_info;
1262 } else if (type == IND_OLDGEN) {
1263 ((StgUpdateFrame *)p)->updatee->header.info =
1264 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1266 evacuate(&((StgUpdateFrame *)p)->updatee);
1267 p += sizeofW(StgUpdateFrame);
1272 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1273 case CATCH_STM_FRAME:
1274 case CATCH_RETRY_FRAME:
1275 case ATOMICALLY_FRAME:
1279 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1280 size = BITMAP_SIZE(info->i.layout.bitmap);
1281 // NOTE: the payload starts immediately after the info-ptr, we
1282 // don't have an StgHeader in the same sense as a heap closure.
1284 p = scavenge_small_bitmap(p, size, bitmap);
1288 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1296 evacuate((StgClosure **)p);
1299 size = BCO_BITMAP_SIZE(bco);
1300 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1305 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1310 size = GET_LARGE_BITMAP(&info->i)->size;
1312 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1314 // and don't forget to follow the SRT
1318 // Dynamic bitmap: the mask is stored on the stack, and
1319 // there are a number of non-pointers followed by a number
1320 // of pointers above the bitmapped area. (see StgMacros.h,
1325 dyn = ((StgRetDyn *)p)->liveness;
1327 // traverse the bitmap first
1328 bitmap = RET_DYN_LIVENESS(dyn);
1329 p = (P_)&((StgRetDyn *)p)->payload[0];
1330 size = RET_DYN_BITMAP_SIZE;
1331 p = scavenge_small_bitmap(p, size, bitmap);
1333 // skip over the non-ptr words
1334 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1336 // follow the ptr words
1337 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1338 evacuate((StgClosure **)p);
1346 StgRetFun *ret_fun = (StgRetFun *)p;
1347 StgFunInfoTable *fun_info;
1349 evacuate(&ret_fun->fun);
1350 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1351 p = scavenge_arg_block(fun_info, ret_fun->payload);
1356 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1361 /*-----------------------------------------------------------------------------
1362 scavenge the large object list.
1364 evac_step set by caller; similar games played with evac_step as with
1365 scavenge() - see comment at the top of scavenge(). Most large
1366 objects are (repeatedly) mutable, so most of the time evac_step will
1368 --------------------------------------------------------------------------- */
1371 scavenge_large (step_workspace *ws)
1376 gct->evac_step = ws->step;
1378 bd = ws->todo_large_objects;
1380 for (; bd != NULL; bd = ws->todo_large_objects) {
1382 // take this object *off* the large objects list and put it on
1383 // the scavenged large objects list. This is so that we can
1384 // treat new_large_objects as a stack and push new objects on
1385 // the front when evacuating.
1386 ws->todo_large_objects = bd->link;
1388 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1389 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1390 ws->step->n_scavenged_large_blocks += bd->blocks;
1391 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1394 if (scavenge_one(p)) {
1395 if (ws->step->gen_no > 0) {
1396 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1401 gct->scanned += closure_sizeW((StgClosure*)p);
1405 /* ----------------------------------------------------------------------------
1407 ------------------------------------------------------------------------- */
1410 #include "Scav.c-inc"
1414 #include "Scav.c-inc"
1417 /* ----------------------------------------------------------------------------
1418 Look for work to do.
1420 We look for the oldest step that has either a todo block that can
1421 be scanned, or a block of work on the global queue that we can
1424 It is important to take work from the *oldest* generation that we
1425 has work available, because that minimizes the likelihood of
1426 evacuating objects into a young generation when they should have
1427 been eagerly promoted. This really does make a difference (the
1428 cacheprof benchmark is one that is affected).
1430 We also want to scan the todo block if possible before grabbing
1431 work from the global queue, the reason being that we don't want to
1432 steal work from the global queue and starve other threads if there
1433 is other work we can usefully be doing.
1434 ------------------------------------------------------------------------- */
1437 scavenge_find_work (void)
1441 rtsBool did_something, did_anything;
1444 gct->scav_find_work++;
1446 did_anything = rtsFalse;
1449 did_something = rtsFalse;
1450 for (s = total_steps-1; s >= 0; s--) {
1451 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1454 ws = &gct->steps[s];
1456 gct->scan_bd = NULL;
1458 // If we have a scan block with some work to do,
1459 // scavenge everything up to the free pointer.
1460 if (ws->todo_bd->u.scan < ws->todo_free)
1462 if (n_gc_threads == 1) {
1463 scavenge_block1(ws->todo_bd);
1465 scavenge_block(ws->todo_bd);
1467 did_something = rtsTrue;
1471 // If we have any large objects to scavenge, do them now.
1472 if (ws->todo_large_objects) {
1474 did_something = rtsTrue;
1478 if ((bd = grab_todo_block(ws)) != NULL) {
1479 if (n_gc_threads == 1) {
1480 scavenge_block1(bd);
1484 did_something = rtsTrue;
1489 if (did_something) {
1490 did_anything = rtsTrue;
1493 // only return when there is no more work to do
1495 return did_anything;
1498 /* ----------------------------------------------------------------------------
1499 Scavenge until we can't find anything more to scavenge.
1500 ------------------------------------------------------------------------- */
1508 work_to_do = rtsFalse;
1510 // scavenge static objects
1511 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1512 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1516 // scavenge objects in compacted generation
1517 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1518 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1519 scavenge_mark_stack();
1520 work_to_do = rtsTrue;
1523 // Order is important here: we want to deal in full blocks as
1524 // much as possible, so go for global work in preference to
1525 // local work. Only if all the global work has been exhausted
1526 // do we start scavenging the fragments of blocks in the local
1528 if (scavenge_find_work()) goto loop;
1530 if (work_to_do) goto loop;
1543 // scavenge objects in compacted generation
1544 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1545 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1549 // Check for global work in any step. We don't need to check for
1550 // local work, because we have already exited scavenge_loop(),
1551 // which means there is no local work for this thread.
1552 for (s = total_steps-1; s >= 0; s--) {
1553 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1556 ws = &gct->steps[s];
1557 if (ws->todo_large_objects) return rtsTrue;
1558 if (ws->step->todos) return rtsTrue;