1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
26 #include "LdvProfile.h"
29 static void scavenge_stack (StgPtr p, StgPtr stack_end);
31 static void scavenge_large_bitmap (StgPtr p,
32 StgLargeBitmap *large_bitmap,
36 /* Similar to scavenge_large_bitmap(), but we don't write back the
37 * pointers we get back from evacuate().
40 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
47 bitmap = large_srt->l.bitmap[b];
48 size = (nat)large_srt->l.size;
49 p = (StgClosure **)large_srt->srt;
50 for (i = 0; i < size; ) {
51 if ((bitmap & 1) != 0) {
56 if (i % BITS_IN(W_) == 0) {
58 bitmap = large_srt->l.bitmap[b];
65 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
66 * srt field in the info table. That's ok, because we'll
67 * never dereference it.
70 scavenge_srt (StgClosure **srt, nat srt_bitmap)
78 if (bitmap == (StgHalfWord)(-1)) {
79 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
84 if ((bitmap & 1) != 0) {
85 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
86 // Special-case to handle references to closures hiding out in DLLs, since
87 // double indirections required to get at those. The code generator knows
88 // which is which when generating the SRT, so it stores the (indirect)
89 // reference to the DLL closure in the table by first adding one to it.
90 // We check for this here, and undo the addition before evacuating it.
92 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
93 // closure that's fixed at link-time, and no extra magic is required.
94 if ( (unsigned long)(*srt) & 0x1 ) {
95 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
104 bitmap = bitmap >> 1;
110 scavenge_thunk_srt(const StgInfoTable *info)
112 StgThunkInfoTable *thunk_info;
114 if (!major_gc) return;
116 thunk_info = itbl_to_thunk_itbl(info);
117 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
121 scavenge_fun_srt(const StgInfoTable *info)
123 StgFunInfoTable *fun_info;
125 if (!major_gc) return;
127 fun_info = itbl_to_fun_itbl(info);
128 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
131 /* -----------------------------------------------------------------------------
133 -------------------------------------------------------------------------- */
136 scavengeTSO (StgTSO *tso)
140 if (tso->what_next == ThreadRelocated) {
141 // the only way this can happen is if the old TSO was on the
142 // mutable list. We might have other links to this defunct
143 // TSO, so we must update its link field.
144 evacuate((StgClosure**)&tso->_link);
148 saved_eager = gct->eager_promotion;
149 gct->eager_promotion = rtsFalse;
151 if ( tso->why_blocked == BlockedOnMVar
152 || tso->why_blocked == BlockedOnBlackHole
153 || tso->why_blocked == BlockedOnException
155 evacuate(&tso->block_info.closure);
157 evacuate((StgClosure **)&tso->blocked_exceptions);
159 // We don't always chase the link field: TSOs on the blackhole
160 // queue are not automatically alive, so the link field is a
161 // "weak" pointer in that case.
162 if (tso->why_blocked != BlockedOnBlackHole) {
163 evacuate((StgClosure **)&tso->link);
166 // scavange current transaction record
167 evacuate((StgClosure **)&tso->trec);
169 // scavenge this thread's stack
170 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
172 if (gct->failed_to_evac) {
173 tso->flags |= TSO_DIRTY;
175 tso->flags &= ~TSO_DIRTY;
178 gct->eager_promotion = saved_eager;
181 /* -----------------------------------------------------------------------------
182 Blocks of function args occur on the stack (at the top) and
184 -------------------------------------------------------------------------- */
187 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
194 switch (fun_info->f.fun_type) {
196 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
197 size = BITMAP_SIZE(fun_info->f.b.bitmap);
200 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
201 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
205 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
206 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
209 if ((bitmap & 1) == 0) {
210 evacuate((StgClosure **)p);
213 bitmap = bitmap >> 1;
222 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
226 StgFunInfoTable *fun_info;
228 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
229 ASSERT(fun_info->i.type != PAP);
232 switch (fun_info->f.fun_type) {
234 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
237 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
241 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
245 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
248 if ((bitmap & 1) == 0) {
249 evacuate((StgClosure **)p);
252 bitmap = bitmap >> 1;
261 scavenge_PAP (StgPAP *pap)
264 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
268 scavenge_AP (StgAP *ap)
271 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
274 /* -----------------------------------------------------------------------------
275 Scavenge everything on the mark stack.
277 This is slightly different from scavenge():
278 - we don't walk linearly through the objects, so the scavenger
279 doesn't need to advance the pointer on to the next object.
280 -------------------------------------------------------------------------- */
283 scavenge_mark_stack(void)
287 step *saved_evac_step;
289 gct->evac_step = &oldest_gen->steps[0];
290 saved_evac_step = gct->evac_step;
293 while (!mark_stack_empty()) {
294 p = pop_mark_stack();
296 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
297 info = get_itbl((StgClosure *)p);
300 switch (((volatile StgWord *)info)[1] & 0xffff) {
305 rtsBool saved_eager_promotion = gct->eager_promotion;
307 StgMVar *mvar = ((StgMVar *)p);
308 gct->eager_promotion = rtsFalse;
309 evacuate((StgClosure **)&mvar->head);
310 evacuate((StgClosure **)&mvar->tail);
311 evacuate((StgClosure **)&mvar->value);
312 gct->eager_promotion = saved_eager_promotion;
314 if (gct->failed_to_evac) {
315 mvar->header.info = &stg_MVAR_DIRTY_info;
317 mvar->header.info = &stg_MVAR_CLEAN_info;
323 scavenge_fun_srt(info);
324 evacuate(&((StgClosure *)p)->payload[1]);
325 evacuate(&((StgClosure *)p)->payload[0]);
329 scavenge_thunk_srt(info);
330 evacuate(&((StgThunk *)p)->payload[1]);
331 evacuate(&((StgThunk *)p)->payload[0]);
335 evacuate(&((StgClosure *)p)->payload[1]);
336 evacuate(&((StgClosure *)p)->payload[0]);
341 scavenge_fun_srt(info);
342 evacuate(&((StgClosure *)p)->payload[0]);
347 scavenge_thunk_srt(info);
348 evacuate(&((StgThunk *)p)->payload[0]);
353 evacuate(&((StgClosure *)p)->payload[0]);
358 scavenge_fun_srt(info);
363 scavenge_thunk_srt(info);
371 scavenge_fun_srt(info);
378 scavenge_thunk_srt(info);
379 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
380 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
381 evacuate((StgClosure **)p);
393 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
394 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
395 evacuate((StgClosure **)p);
401 StgBCO *bco = (StgBCO *)p;
402 evacuate((StgClosure **)&bco->instrs);
403 evacuate((StgClosure **)&bco->literals);
404 evacuate((StgClosure **)&bco->ptrs);
409 // don't need to do anything here: the only possible case
410 // is that we're in a 1-space compacting collector, with
411 // no "old" generation.
415 case IND_OLDGEN_PERM:
416 evacuate(&((StgInd *)p)->indirectee);
420 case MUT_VAR_DIRTY: {
421 rtsBool saved_eager_promotion = gct->eager_promotion;
423 gct->eager_promotion = rtsFalse;
424 evacuate(&((StgMutVar *)p)->var);
425 gct->eager_promotion = saved_eager_promotion;
427 if (gct->failed_to_evac) {
428 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
430 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
436 case SE_CAF_BLACKHOLE:
444 StgSelector *s = (StgSelector *)p;
445 evacuate(&s->selectee);
449 // A chunk of stack saved in a heap object
452 StgAP_STACK *ap = (StgAP_STACK *)p;
455 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
460 scavenge_PAP((StgPAP *)p);
464 scavenge_AP((StgAP *)p);
467 case MUT_ARR_PTRS_CLEAN:
468 case MUT_ARR_PTRS_DIRTY:
474 // We don't eagerly promote objects pointed to by a mutable
475 // array, but if we find the array only points to objects in
476 // the same or an older generation, we mark it "clean" and
477 // avoid traversing it during minor GCs.
478 saved_eager = gct->eager_promotion;
479 gct->eager_promotion = rtsFalse;
480 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
481 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
482 evacuate((StgClosure **)p);
484 gct->eager_promotion = saved_eager;
486 if (gct->failed_to_evac) {
487 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
489 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
492 gct->failed_to_evac = rtsTrue; // mutable anyhow.
496 case MUT_ARR_PTRS_FROZEN:
497 case MUT_ARR_PTRS_FROZEN0:
502 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
503 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
504 evacuate((StgClosure **)p);
507 // If we're going to put this object on the mutable list, then
508 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
509 if (gct->failed_to_evac) {
510 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
512 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
519 scavengeTSO((StgTSO*)p);
520 gct->failed_to_evac = rtsTrue; // always on the mutable list
524 case TVAR_WATCH_QUEUE:
526 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
528 evacuate((StgClosure **)&wq->closure);
529 evacuate((StgClosure **)&wq->next_queue_entry);
530 evacuate((StgClosure **)&wq->prev_queue_entry);
531 gct->evac_step = saved_evac_step;
532 gct->failed_to_evac = rtsTrue; // mutable
538 StgTVar *tvar = ((StgTVar *) p);
540 evacuate((StgClosure **)&tvar->current_value);
541 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
542 gct->evac_step = saved_evac_step;
543 gct->failed_to_evac = rtsTrue; // mutable
550 StgTRecChunk *tc = ((StgTRecChunk *) p);
551 TRecEntry *e = &(tc -> entries[0]);
553 evacuate((StgClosure **)&tc->prev_chunk);
554 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
555 evacuate((StgClosure **)&e->tvar);
556 evacuate((StgClosure **)&e->expected_value);
557 evacuate((StgClosure **)&e->new_value);
559 gct->evac_step = saved_evac_step;
560 gct->failed_to_evac = rtsTrue; // mutable
566 StgTRecHeader *trec = ((StgTRecHeader *) p);
568 evacuate((StgClosure **)&trec->enclosing_trec);
569 evacuate((StgClosure **)&trec->current_chunk);
570 evacuate((StgClosure **)&trec->invariants_to_check);
571 gct->evac_step = saved_evac_step;
572 gct->failed_to_evac = rtsTrue; // mutable
576 case ATOMIC_INVARIANT:
578 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
580 evacuate(&invariant->code);
581 evacuate((StgClosure **)&invariant->last_execution);
582 gct->evac_step = saved_evac_step;
583 gct->failed_to_evac = rtsTrue; // mutable
587 case INVARIANT_CHECK_QUEUE:
589 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
591 evacuate((StgClosure **)&queue->invariant);
592 evacuate((StgClosure **)&queue->my_execution);
593 evacuate((StgClosure **)&queue->next_queue_entry);
594 gct->evac_step = saved_evac_step;
595 gct->failed_to_evac = rtsTrue; // mutable
600 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
604 if (gct->failed_to_evac) {
605 gct->failed_to_evac = rtsFalse;
606 if (gct->evac_step) {
607 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
611 // mark the next bit to indicate "scavenged"
612 mark(q+1, Bdescr(q));
614 } // while (!mark_stack_empty())
616 // start a new linear scan if the mark stack overflowed at some point
617 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
618 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
619 mark_stack_overflowed = rtsFalse;
620 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
621 oldgen_scan = oldgen_scan_bd->start;
624 if (oldgen_scan_bd) {
625 // push a new thing on the mark stack
627 // find a closure that is marked but not scavenged, and start
629 while (oldgen_scan < oldgen_scan_bd->free
630 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
634 if (oldgen_scan < oldgen_scan_bd->free) {
636 // already scavenged?
637 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
638 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
641 push_mark_stack(oldgen_scan);
642 // ToDo: bump the linear scan by the actual size of the object
643 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
647 oldgen_scan_bd = oldgen_scan_bd->link;
648 if (oldgen_scan_bd != NULL) {
649 oldgen_scan = oldgen_scan_bd->start;
655 /* -----------------------------------------------------------------------------
658 This is used for objects that are temporarily marked as mutable
659 because they contain old-to-new generation pointers. Only certain
660 objects can have this property.
661 -------------------------------------------------------------------------- */
664 scavenge_one(StgPtr p)
666 const StgInfoTable *info;
667 step *saved_evac_step = gct->evac_step;
670 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
671 info = get_itbl((StgClosure *)p);
673 switch (info->type) {
678 rtsBool saved_eager_promotion = gct->eager_promotion;
680 StgMVar *mvar = ((StgMVar *)p);
681 gct->eager_promotion = rtsFalse;
682 evacuate((StgClosure **)&mvar->head);
683 evacuate((StgClosure **)&mvar->tail);
684 evacuate((StgClosure **)&mvar->value);
685 gct->eager_promotion = saved_eager_promotion;
687 if (gct->failed_to_evac) {
688 mvar->header.info = &stg_MVAR_DIRTY_info;
690 mvar->header.info = &stg_MVAR_CLEAN_info;
704 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
705 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
706 evacuate((StgClosure **)q);
712 case FUN_1_0: // hardly worth specialising these guys
728 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
729 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
730 evacuate((StgClosure **)q);
736 case MUT_VAR_DIRTY: {
738 rtsBool saved_eager_promotion = gct->eager_promotion;
740 gct->eager_promotion = rtsFalse;
741 evacuate(&((StgMutVar *)p)->var);
742 gct->eager_promotion = saved_eager_promotion;
744 if (gct->failed_to_evac) {
745 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
747 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
753 case SE_CAF_BLACKHOLE:
760 StgSelector *s = (StgSelector *)p;
761 evacuate(&s->selectee);
767 StgAP_STACK *ap = (StgAP_STACK *)p;
770 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
771 p = (StgPtr)ap->payload + ap->size;
776 p = scavenge_PAP((StgPAP *)p);
780 p = scavenge_AP((StgAP *)p);
787 case MUT_ARR_PTRS_CLEAN:
788 case MUT_ARR_PTRS_DIRTY:
793 // We don't eagerly promote objects pointed to by a mutable
794 // array, but if we find the array only points to objects in
795 // the same or an older generation, we mark it "clean" and
796 // avoid traversing it during minor GCs.
797 saved_eager = gct->eager_promotion;
798 gct->eager_promotion = rtsFalse;
800 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
801 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
802 evacuate((StgClosure **)p);
804 gct->eager_promotion = saved_eager;
806 if (gct->failed_to_evac) {
807 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
809 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
812 gct->failed_to_evac = rtsTrue;
816 case MUT_ARR_PTRS_FROZEN:
817 case MUT_ARR_PTRS_FROZEN0:
822 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
823 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
824 evacuate((StgClosure **)p);
827 // If we're going to put this object on the mutable list, then
828 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
829 if (gct->failed_to_evac) {
830 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
832 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
839 scavengeTSO((StgTSO*)p);
840 gct->failed_to_evac = rtsTrue; // always on the mutable list
844 case TVAR_WATCH_QUEUE:
846 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
848 evacuate((StgClosure **)&wq->closure);
849 evacuate((StgClosure **)&wq->next_queue_entry);
850 evacuate((StgClosure **)&wq->prev_queue_entry);
851 gct->evac_step = saved_evac_step;
852 gct->failed_to_evac = rtsTrue; // mutable
858 StgTVar *tvar = ((StgTVar *) p);
860 evacuate((StgClosure **)&tvar->current_value);
861 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
862 gct->evac_step = saved_evac_step;
863 gct->failed_to_evac = rtsTrue; // mutable
869 StgTRecHeader *trec = ((StgTRecHeader *) p);
871 evacuate((StgClosure **)&trec->enclosing_trec);
872 evacuate((StgClosure **)&trec->current_chunk);
873 evacuate((StgClosure **)&trec->invariants_to_check);
874 gct->evac_step = saved_evac_step;
875 gct->failed_to_evac = rtsTrue; // mutable
882 StgTRecChunk *tc = ((StgTRecChunk *) p);
883 TRecEntry *e = &(tc -> entries[0]);
885 evacuate((StgClosure **)&tc->prev_chunk);
886 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
887 evacuate((StgClosure **)&e->tvar);
888 evacuate((StgClosure **)&e->expected_value);
889 evacuate((StgClosure **)&e->new_value);
891 gct->evac_step = saved_evac_step;
892 gct->failed_to_evac = rtsTrue; // mutable
896 case ATOMIC_INVARIANT:
898 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
900 evacuate(&invariant->code);
901 evacuate((StgClosure **)&invariant->last_execution);
902 gct->evac_step = saved_evac_step;
903 gct->failed_to_evac = rtsTrue; // mutable
907 case INVARIANT_CHECK_QUEUE:
909 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
911 evacuate((StgClosure **)&queue->invariant);
912 evacuate((StgClosure **)&queue->my_execution);
913 evacuate((StgClosure **)&queue->next_queue_entry);
914 gct->evac_step = saved_evac_step;
915 gct->failed_to_evac = rtsTrue; // mutable
920 case IND_OLDGEN_PERM:
923 /* Careful here: a THUNK can be on the mutable list because
924 * it contains pointers to young gen objects. If such a thunk
925 * is updated, the IND_OLDGEN will be added to the mutable
926 * list again, and we'll scavenge it twice. evacuate()
927 * doesn't check whether the object has already been
928 * evacuated, so we perform that check here.
930 StgClosure *q = ((StgInd *)p)->indirectee;
931 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
934 evacuate(&((StgInd *)p)->indirectee);
937 #if 0 && defined(DEBUG)
938 if (RtsFlags.DebugFlags.gc)
939 /* Debugging code to print out the size of the thing we just
943 StgPtr start = gen->steps[0].scan;
944 bdescr *start_bd = gen->steps[0].scan_bd;
946 scavenge(&gen->steps[0]);
947 if (start_bd != gen->steps[0].scan_bd) {
948 size += (P_)BLOCK_ROUND_UP(start) - start;
949 start_bd = start_bd->link;
950 while (start_bd != gen->steps[0].scan_bd) {
951 size += BLOCK_SIZE_W;
952 start_bd = start_bd->link;
954 size += gen->steps[0].scan -
955 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
957 size = gen->steps[0].scan - start;
959 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
965 barf("scavenge_one: strange object %d", (int)(info->type));
968 no_luck = gct->failed_to_evac;
969 gct->failed_to_evac = rtsFalse;
973 /* -----------------------------------------------------------------------------
974 Scavenging mutable lists.
976 We treat the mutable list of each generation > N (i.e. all the
977 generations older than the one being collected) as roots. We also
978 remove non-mutable objects from the mutable list at this point.
979 -------------------------------------------------------------------------- */
982 scavenge_mutable_list(generation *gen)
987 bd = gen->saved_mut_list;
989 gct->evac_step = &gen->steps[0];
990 for (; bd != NULL; bd = bd->link) {
991 for (q = bd->start; q < bd->free; q++) {
993 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
996 switch (get_itbl((StgClosure *)p)->type) {
998 barf("MUT_VAR_CLEAN on mutable list");
1000 mutlist_MUTVARS++; break;
1001 case MUT_ARR_PTRS_CLEAN:
1002 case MUT_ARR_PTRS_DIRTY:
1003 case MUT_ARR_PTRS_FROZEN:
1004 case MUT_ARR_PTRS_FROZEN0:
1005 mutlist_MUTARRS++; break;
1007 barf("MVAR_CLEAN on mutable list");
1009 mutlist_MVARS++; break;
1011 mutlist_OTHERS++; break;
1015 // Check whether this object is "clean", that is it
1016 // definitely doesn't point into a young generation.
1017 // Clean objects don't need to be scavenged. Some clean
1018 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1019 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1020 // TSO, are always on the mutable list.
1022 switch (get_itbl((StgClosure *)p)->type) {
1023 case MUT_ARR_PTRS_CLEAN:
1024 recordMutableGen_GC((StgClosure *)p,gen);
1027 StgTSO *tso = (StgTSO *)p;
1028 if ((tso->flags & TSO_DIRTY) == 0) {
1029 // A clean TSO: we don't have to traverse its
1030 // stack. However, we *do* follow the link field:
1031 // we don't want to have to mark a TSO dirty just
1032 // because we put it on a different queue.
1033 if (tso->why_blocked != BlockedOnBlackHole) {
1034 evacuate((StgClosure **)&tso->link);
1036 recordMutableGen_GC((StgClosure *)p,gen);
1044 if (scavenge_one(p)) {
1045 // didn't manage to promote everything, so put the
1046 // object back on the list.
1047 recordMutableGen_GC((StgClosure *)p,gen);
1052 // free the old mut_list
1053 freeChain_sync(gen->saved_mut_list);
1054 gen->saved_mut_list = NULL;
1057 /* -----------------------------------------------------------------------------
1058 Scavenging the static objects.
1060 We treat the mutable list of each generation > N (i.e. all the
1061 generations older than the one being collected) as roots. We also
1062 remove non-mutable objects from the mutable list at this point.
1063 -------------------------------------------------------------------------- */
1066 scavenge_static(void)
1069 const StgInfoTable *info;
1071 debugTrace(DEBUG_gc, "scavenging static objects");
1073 /* Always evacuate straight to the oldest generation for static
1075 gct->evac_step = &oldest_gen->steps[0];
1077 /* keep going until we've scavenged all the objects on the linked
1082 /* get the next static object from the list. Remember, there might
1083 * be more stuff on this list after each evacuation...
1084 * (static_objects is a global)
1086 p = gct->static_objects;
1087 if (p == END_OF_STATIC_LIST) {
1091 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1094 if (info->type==RBH)
1095 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1097 // make sure the info pointer is into text space
1099 /* Take this object *off* the static_objects list,
1100 * and put it on the scavenged_static_objects list.
1102 gct->static_objects = *STATIC_LINK(info,p);
1103 *STATIC_LINK(info,p) = gct->scavenged_static_objects;
1104 gct->scavenged_static_objects = p;
1106 switch (info -> type) {
1110 StgInd *ind = (StgInd *)p;
1111 evacuate(&ind->indirectee);
1113 /* might fail to evacuate it, in which case we have to pop it
1114 * back on the mutable list of the oldest generation. We
1115 * leave it *on* the scavenged_static_objects list, though,
1116 * in case we visit this object again.
1118 if (gct->failed_to_evac) {
1119 gct->failed_to_evac = rtsFalse;
1120 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1126 scavenge_thunk_srt(info);
1130 scavenge_fun_srt(info);
1137 next = (P_)p->payload + info->layout.payload.ptrs;
1138 // evacuate the pointers
1139 for (q = (P_)p->payload; q < next; q++) {
1140 evacuate((StgClosure **)q);
1146 barf("scavenge_static: strange closure %d", (int)(info->type));
1149 ASSERT(gct->failed_to_evac == rtsFalse);
1153 /* -----------------------------------------------------------------------------
1154 scavenge a chunk of memory described by a bitmap
1155 -------------------------------------------------------------------------- */
1158 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1164 bitmap = large_bitmap->bitmap[b];
1165 for (i = 0; i < size; ) {
1166 if ((bitmap & 1) == 0) {
1167 evacuate((StgClosure **)p);
1171 if (i % BITS_IN(W_) == 0) {
1173 bitmap = large_bitmap->bitmap[b];
1175 bitmap = bitmap >> 1;
1180 STATIC_INLINE StgPtr
1181 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1184 if ((bitmap & 1) == 0) {
1185 evacuate((StgClosure **)p);
1188 bitmap = bitmap >> 1;
1194 /* -----------------------------------------------------------------------------
1195 scavenge_stack walks over a section of stack and evacuates all the
1196 objects pointed to by it. We can use the same code for walking
1197 AP_STACK_UPDs, since these are just sections of copied stack.
1198 -------------------------------------------------------------------------- */
1201 scavenge_stack(StgPtr p, StgPtr stack_end)
1203 const StgRetInfoTable* info;
1208 * Each time around this loop, we are looking at a chunk of stack
1209 * that starts with an activation record.
1212 while (p < stack_end) {
1213 info = get_ret_itbl((StgClosure *)p);
1215 switch (info->i.type) {
1218 // In SMP, we can get update frames that point to indirections
1219 // when two threads evaluate the same thunk. We do attempt to
1220 // discover this situation in threadPaused(), but it's
1221 // possible that the following sequence occurs:
1230 // Now T is an indirection, and the update frame is already
1231 // marked on A's stack, so we won't traverse it again in
1232 // threadPaused(). We could traverse the whole stack again
1233 // before GC, but that seems like overkill.
1235 // Scavenging this update frame as normal would be disastrous;
1236 // the updatee would end up pointing to the value. So we turn
1237 // the indirection into an IND_PERM, so that evacuate will
1238 // copy the indirection into the old generation instead of
1242 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1244 ((StgUpdateFrame *)p)->updatee->header.info =
1245 (StgInfoTable *)&stg_IND_PERM_info;
1246 } else if (type == IND_OLDGEN) {
1247 ((StgUpdateFrame *)p)->updatee->header.info =
1248 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1250 evacuate(&((StgUpdateFrame *)p)->updatee);
1251 p += sizeofW(StgUpdateFrame);
1255 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1256 case CATCH_STM_FRAME:
1257 case CATCH_RETRY_FRAME:
1258 case ATOMICALLY_FRAME:
1262 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1263 size = BITMAP_SIZE(info->i.layout.bitmap);
1264 // NOTE: the payload starts immediately after the info-ptr, we
1265 // don't have an StgHeader in the same sense as a heap closure.
1267 p = scavenge_small_bitmap(p, size, bitmap);
1271 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1279 evacuate((StgClosure **)p);
1282 size = BCO_BITMAP_SIZE(bco);
1283 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1288 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1293 size = GET_LARGE_BITMAP(&info->i)->size;
1295 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1297 // and don't forget to follow the SRT
1301 // Dynamic bitmap: the mask is stored on the stack, and
1302 // there are a number of non-pointers followed by a number
1303 // of pointers above the bitmapped area. (see StgMacros.h,
1308 dyn = ((StgRetDyn *)p)->liveness;
1310 // traverse the bitmap first
1311 bitmap = RET_DYN_LIVENESS(dyn);
1312 p = (P_)&((StgRetDyn *)p)->payload[0];
1313 size = RET_DYN_BITMAP_SIZE;
1314 p = scavenge_small_bitmap(p, size, bitmap);
1316 // skip over the non-ptr words
1317 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1319 // follow the ptr words
1320 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1321 evacuate((StgClosure **)p);
1329 StgRetFun *ret_fun = (StgRetFun *)p;
1330 StgFunInfoTable *fun_info;
1332 evacuate(&ret_fun->fun);
1333 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1334 p = scavenge_arg_block(fun_info, ret_fun->payload);
1339 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1344 /*-----------------------------------------------------------------------------
1345 scavenge the large object list.
1347 evac_step set by caller; similar games played with evac_step as with
1348 scavenge() - see comment at the top of scavenge(). Most large
1349 objects are (repeatedly) mutable, so most of the time evac_step will
1351 --------------------------------------------------------------------------- */
1354 scavenge_large (step_workspace *ws)
1359 gct->evac_step = ws->step;
1361 bd = ws->todo_large_objects;
1363 for (; bd != NULL; bd = ws->todo_large_objects) {
1365 // take this object *off* the large objects list and put it on
1366 // the scavenged large objects list. This is so that we can
1367 // treat new_large_objects as a stack and push new objects on
1368 // the front when evacuating.
1369 ws->todo_large_objects = bd->link;
1371 ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
1372 dbl_link_onto(bd, &ws->step->scavenged_large_objects);
1373 ws->step->n_scavenged_large_blocks += bd->blocks;
1374 RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
1377 if (scavenge_one(p)) {
1378 if (ws->step->gen_no > 0) {
1379 recordMutableGen_GC((StgClosure *)p, ws->step->gen);
1384 gct->scanned += closure_sizeW((StgClosure*)p);
1388 /* ----------------------------------------------------------------------------
1390 ------------------------------------------------------------------------- */
1393 #include "Scav.c-inc"
1395 #include "Scav.c-inc"
1397 /* ----------------------------------------------------------------------------
1398 Look for work to do.
1400 We look for the oldest step that has either a todo block that can
1401 be scanned, or a block of work on the global queue that we can
1404 It is important to take work from the *oldest* generation that we
1405 has work available, because that minimizes the likelihood of
1406 evacuating objects into a young generation when they should have
1407 been eagerly promoted. This really does make a difference (the
1408 cacheprof benchmark is one that is affected).
1410 We also want to scan the todo block if possible before grabbing
1411 work from the global queue, the reason being that we don't want to
1412 steal work from the global queue and starve other threads if there
1413 is other work we can usefully be doing.
1414 ------------------------------------------------------------------------- */
1417 scavenge_find_work (void)
1421 rtsBool did_something, did_anything;
1424 gct->scav_find_work++;
1426 did_anything = rtsFalse;
1429 did_something = rtsFalse;
1430 for (s = total_steps-1; s >= 0; s--) {
1431 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1434 ws = &gct->steps[s];
1436 gct->scan_bd = NULL;
1438 // If we have a scan block with some work to do,
1439 // scavenge everything up to the free pointer.
1440 if (ws->todo_bd->u.scan < ws->todo_free)
1442 if (n_gc_threads == 1) {
1443 scavenge_block1(ws->todo_bd);
1445 scavenge_block(ws->todo_bd);
1447 did_something = rtsTrue;
1451 // If we have any large objects to scavenge, do them now.
1452 if (ws->todo_large_objects) {
1454 did_something = rtsTrue;
1458 if ((bd = grab_todo_block(ws)) != NULL) {
1459 if (n_gc_threads == 1) {
1460 scavenge_block1(bd);
1464 did_something = rtsTrue;
1469 if (did_something) {
1470 did_anything = rtsTrue;
1473 // only return when there is no more work to do
1475 return did_anything;
1478 /* ----------------------------------------------------------------------------
1479 Scavenge until we can't find anything more to scavenge.
1480 ------------------------------------------------------------------------- */
1488 work_to_do = rtsFalse;
1490 // scavenge static objects
1491 if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
1492 IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
1496 // scavenge objects in compacted generation
1497 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1498 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1499 scavenge_mark_stack();
1500 work_to_do = rtsTrue;
1503 // Order is important here: we want to deal in full blocks as
1504 // much as possible, so go for global work in preference to
1505 // local work. Only if all the global work has been exhausted
1506 // do we start scavenging the fragments of blocks in the local
1508 if (scavenge_find_work()) goto loop;
1510 if (work_to_do) goto loop;
1523 // scavenge objects in compacted generation
1524 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1525 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1529 // Check for global work in any step. We don't need to check for
1530 // local work, because we have already exited scavenge_loop(),
1531 // which means there is no local work for this thread.
1532 for (s = total_steps-1; s >= 0; s--) {
1533 if (s == 0 && RtsFlags.GcFlags.generations > 1) {
1536 ws = &gct->steps[s];
1537 if (ws->todo_large_objects) return rtsTrue;
1538 if (ws->step->todos) return rtsTrue;