1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
25 #include "LdvProfile.h"
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
35 /* Similar to scavenge_large_bitmap(), but we don't write back the
36 * pointers we get back from evacuate().
39 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
46 bitmap = large_srt->l.bitmap[b];
47 size = (nat)large_srt->l.size;
48 p = (StgClosure **)large_srt->srt;
49 for (i = 0; i < size; ) {
50 if ((bitmap & 1) != 0) {
55 if (i % BITS_IN(W_) == 0) {
57 bitmap = large_srt->l.bitmap[b];
64 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
65 * srt field in the info table. That's ok, because we'll
66 * never dereference it.
69 scavenge_srt (StgClosure **srt, nat srt_bitmap)
77 if (bitmap == (StgHalfWord)(-1)) {
78 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
83 if ((bitmap & 1) != 0) {
84 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
85 // Special-case to handle references to closures hiding out in DLLs, since
86 // double indirections required to get at those. The code generator knows
87 // which is which when generating the SRT, so it stores the (indirect)
88 // reference to the DLL closure in the table by first adding one to it.
89 // We check for this here, and undo the addition before evacuating it.
91 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
92 // closure that's fixed at link-time, and no extra magic is required.
93 if ( (unsigned long)(*srt) & 0x1 ) {
94 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
103 bitmap = bitmap >> 1;
109 scavenge_thunk_srt(const StgInfoTable *info)
111 StgThunkInfoTable *thunk_info;
113 if (!major_gc) return;
115 thunk_info = itbl_to_thunk_itbl(info);
116 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
120 scavenge_fun_srt(const StgInfoTable *info)
122 StgFunInfoTable *fun_info;
124 if (!major_gc) return;
126 fun_info = itbl_to_fun_itbl(info);
127 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
130 /* -----------------------------------------------------------------------------
132 -------------------------------------------------------------------------- */
135 scavengeTSO (StgTSO *tso)
137 rtsBool saved_eager = gct->eager_promotion;
139 gct->eager_promotion = rtsFalse;
141 if ( tso->why_blocked == BlockedOnMVar
142 || tso->why_blocked == BlockedOnBlackHole
143 || tso->why_blocked == BlockedOnException
145 evacuate(&tso->block_info.closure);
147 evacuate((StgClosure **)&tso->blocked_exceptions);
149 // We don't always chase the link field: TSOs on the blackhole
150 // queue are not automatically alive, so the link field is a
151 // "weak" pointer in that case.
152 if (tso->why_blocked != BlockedOnBlackHole) {
153 evacuate((StgClosure **)&tso->link);
156 // scavange current transaction record
157 evacuate((StgClosure **)&tso->trec);
159 // scavenge this thread's stack
160 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
162 if (gct->failed_to_evac) {
163 tso->flags |= TSO_DIRTY;
165 tso->flags &= ~TSO_DIRTY;
168 gct->eager_promotion = saved_eager;
171 /* -----------------------------------------------------------------------------
172 Blocks of function args occur on the stack (at the top) and
174 -------------------------------------------------------------------------- */
177 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
184 switch (fun_info->f.fun_type) {
186 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
187 size = BITMAP_SIZE(fun_info->f.b.bitmap);
190 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
191 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
195 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
196 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
199 if ((bitmap & 1) == 0) {
200 evacuate((StgClosure **)p);
203 bitmap = bitmap >> 1;
212 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
216 StgFunInfoTable *fun_info;
218 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
219 ASSERT(fun_info->i.type != PAP);
222 switch (fun_info->f.fun_type) {
224 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
227 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
231 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
235 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
238 if ((bitmap & 1) == 0) {
239 evacuate((StgClosure **)p);
242 bitmap = bitmap >> 1;
251 scavenge_PAP (StgPAP *pap)
254 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
258 scavenge_AP (StgAP *ap)
261 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
264 /* -----------------------------------------------------------------------------
265 Scavenge everything on the mark stack.
267 This is slightly different from scavenge():
268 - we don't walk linearly through the objects, so the scavenger
269 doesn't need to advance the pointer on to the next object.
270 -------------------------------------------------------------------------- */
273 scavenge_mark_stack(void)
277 step *saved_evac_step;
279 gct->evac_step = &oldest_gen->steps[0];
280 saved_evac_step = gct->evac_step;
283 while (!mark_stack_empty()) {
284 p = pop_mark_stack();
286 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
287 info = get_itbl((StgClosure *)p);
290 switch (((volatile StgWord *)info)[1] & 0xffff) {
295 rtsBool saved_eager_promotion = gct->eager_promotion;
297 StgMVar *mvar = ((StgMVar *)p);
298 gct->eager_promotion = rtsFalse;
299 evacuate((StgClosure **)&mvar->head);
300 evacuate((StgClosure **)&mvar->tail);
301 evacuate((StgClosure **)&mvar->value);
302 gct->eager_promotion = saved_eager_promotion;
304 if (gct->failed_to_evac) {
305 mvar->header.info = &stg_MVAR_DIRTY_info;
307 mvar->header.info = &stg_MVAR_CLEAN_info;
313 scavenge_fun_srt(info);
314 evacuate(&((StgClosure *)p)->payload[1]);
315 evacuate(&((StgClosure *)p)->payload[0]);
319 scavenge_thunk_srt(info);
320 evacuate(&((StgThunk *)p)->payload[1]);
321 evacuate(&((StgThunk *)p)->payload[0]);
325 evacuate(&((StgClosure *)p)->payload[1]);
326 evacuate(&((StgClosure *)p)->payload[0]);
331 scavenge_fun_srt(info);
332 evacuate(&((StgClosure *)p)->payload[0]);
337 scavenge_thunk_srt(info);
338 evacuate(&((StgThunk *)p)->payload[0]);
343 evacuate(&((StgClosure *)p)->payload[0]);
348 scavenge_fun_srt(info);
353 scavenge_thunk_srt(info);
361 scavenge_fun_srt(info);
368 scavenge_thunk_srt(info);
369 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
370 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
371 evacuate((StgClosure **)p);
383 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
384 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
385 evacuate((StgClosure **)p);
391 StgBCO *bco = (StgBCO *)p;
392 evacuate((StgClosure **)&bco->instrs);
393 evacuate((StgClosure **)&bco->literals);
394 evacuate((StgClosure **)&bco->ptrs);
399 // don't need to do anything here: the only possible case
400 // is that we're in a 1-space compacting collector, with
401 // no "old" generation.
405 case IND_OLDGEN_PERM:
406 evacuate(&((StgInd *)p)->indirectee);
410 case MUT_VAR_DIRTY: {
411 rtsBool saved_eager_promotion = gct->eager_promotion;
413 gct->eager_promotion = rtsFalse;
414 evacuate(&((StgMutVar *)p)->var);
415 gct->eager_promotion = saved_eager_promotion;
417 if (gct->failed_to_evac) {
418 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
420 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
426 case SE_CAF_BLACKHOLE:
434 StgSelector *s = (StgSelector *)p;
435 evacuate(&s->selectee);
439 // A chunk of stack saved in a heap object
442 StgAP_STACK *ap = (StgAP_STACK *)p;
445 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
450 scavenge_PAP((StgPAP *)p);
454 scavenge_AP((StgAP *)p);
457 case MUT_ARR_PTRS_CLEAN:
458 case MUT_ARR_PTRS_DIRTY:
464 // We don't eagerly promote objects pointed to by a mutable
465 // array, but if we find the array only points to objects in
466 // the same or an older generation, we mark it "clean" and
467 // avoid traversing it during minor GCs.
468 saved_eager = gct->eager_promotion;
469 gct->eager_promotion = rtsFalse;
470 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
471 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
472 evacuate((StgClosure **)p);
474 gct->eager_promotion = saved_eager;
476 if (gct->failed_to_evac) {
477 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
479 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
482 gct->failed_to_evac = rtsTrue; // mutable anyhow.
486 case MUT_ARR_PTRS_FROZEN:
487 case MUT_ARR_PTRS_FROZEN0:
492 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
493 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
494 evacuate((StgClosure **)p);
497 // If we're going to put this object on the mutable list, then
498 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
499 if (gct->failed_to_evac) {
500 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
502 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
509 scavengeTSO((StgTSO*)p);
510 gct->failed_to_evac = rtsTrue; // always on the mutable list
514 case TVAR_WATCH_QUEUE:
516 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
518 evacuate((StgClosure **)&wq->closure);
519 evacuate((StgClosure **)&wq->next_queue_entry);
520 evacuate((StgClosure **)&wq->prev_queue_entry);
521 gct->evac_step = saved_evac_step;
522 gct->failed_to_evac = rtsTrue; // mutable
528 StgTVar *tvar = ((StgTVar *) p);
530 evacuate((StgClosure **)&tvar->current_value);
531 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
532 gct->evac_step = saved_evac_step;
533 gct->failed_to_evac = rtsTrue; // mutable
540 StgTRecChunk *tc = ((StgTRecChunk *) p);
541 TRecEntry *e = &(tc -> entries[0]);
543 evacuate((StgClosure **)&tc->prev_chunk);
544 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
545 evacuate((StgClosure **)&e->tvar);
546 evacuate((StgClosure **)&e->expected_value);
547 evacuate((StgClosure **)&e->new_value);
549 gct->evac_step = saved_evac_step;
550 gct->failed_to_evac = rtsTrue; // mutable
556 StgTRecHeader *trec = ((StgTRecHeader *) p);
558 evacuate((StgClosure **)&trec->enclosing_trec);
559 evacuate((StgClosure **)&trec->current_chunk);
560 evacuate((StgClosure **)&trec->invariants_to_check);
561 gct->evac_step = saved_evac_step;
562 gct->failed_to_evac = rtsTrue; // mutable
566 case ATOMIC_INVARIANT:
568 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
570 evacuate(&invariant->code);
571 evacuate((StgClosure **)&invariant->last_execution);
572 gct->evac_step = saved_evac_step;
573 gct->failed_to_evac = rtsTrue; // mutable
577 case INVARIANT_CHECK_QUEUE:
579 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
581 evacuate((StgClosure **)&queue->invariant);
582 evacuate((StgClosure **)&queue->my_execution);
583 evacuate((StgClosure **)&queue->next_queue_entry);
584 gct->evac_step = saved_evac_step;
585 gct->failed_to_evac = rtsTrue; // mutable
590 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
594 if (gct->failed_to_evac) {
595 gct->failed_to_evac = rtsFalse;
596 if (gct->evac_step) {
597 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
601 // mark the next bit to indicate "scavenged"
602 mark(q+1, Bdescr(q));
604 } // while (!mark_stack_empty())
606 // start a new linear scan if the mark stack overflowed at some point
607 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
608 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
609 mark_stack_overflowed = rtsFalse;
610 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
611 oldgen_scan = oldgen_scan_bd->start;
614 if (oldgen_scan_bd) {
615 // push a new thing on the mark stack
617 // find a closure that is marked but not scavenged, and start
619 while (oldgen_scan < oldgen_scan_bd->free
620 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
624 if (oldgen_scan < oldgen_scan_bd->free) {
626 // already scavenged?
627 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
628 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
631 push_mark_stack(oldgen_scan);
632 // ToDo: bump the linear scan by the actual size of the object
633 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
637 oldgen_scan_bd = oldgen_scan_bd->link;
638 if (oldgen_scan_bd != NULL) {
639 oldgen_scan = oldgen_scan_bd->start;
645 /* -----------------------------------------------------------------------------
648 This is used for objects that are temporarily marked as mutable
649 because they contain old-to-new generation pointers. Only certain
650 objects can have this property.
651 -------------------------------------------------------------------------- */
654 scavenge_one(StgPtr p)
656 const StgInfoTable *info;
657 step *saved_evac_step = gct->evac_step;
660 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
661 info = get_itbl((StgClosure *)p);
663 switch (info->type) {
668 rtsBool saved_eager_promotion = gct->eager_promotion;
670 StgMVar *mvar = ((StgMVar *)p);
671 gct->eager_promotion = rtsFalse;
672 evacuate((StgClosure **)&mvar->head);
673 evacuate((StgClosure **)&mvar->tail);
674 evacuate((StgClosure **)&mvar->value);
675 gct->eager_promotion = saved_eager_promotion;
677 if (gct->failed_to_evac) {
678 mvar->header.info = &stg_MVAR_DIRTY_info;
680 mvar->header.info = &stg_MVAR_CLEAN_info;
694 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
695 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
696 evacuate((StgClosure **)q);
702 case FUN_1_0: // hardly worth specialising these guys
718 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
719 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
720 evacuate((StgClosure **)q);
726 case MUT_VAR_DIRTY: {
728 rtsBool saved_eager_promotion = gct->eager_promotion;
730 gct->eager_promotion = rtsFalse;
731 evacuate(&((StgMutVar *)p)->var);
732 gct->eager_promotion = saved_eager_promotion;
734 if (gct->failed_to_evac) {
735 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
737 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
743 case SE_CAF_BLACKHOLE:
750 StgSelector *s = (StgSelector *)p;
751 evacuate(&s->selectee);
757 StgAP_STACK *ap = (StgAP_STACK *)p;
760 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
761 p = (StgPtr)ap->payload + ap->size;
766 p = scavenge_PAP((StgPAP *)p);
770 p = scavenge_AP((StgAP *)p);
777 case MUT_ARR_PTRS_CLEAN:
778 case MUT_ARR_PTRS_DIRTY:
783 // We don't eagerly promote objects pointed to by a mutable
784 // array, but if we find the array only points to objects in
785 // the same or an older generation, we mark it "clean" and
786 // avoid traversing it during minor GCs.
787 saved_eager = gct->eager_promotion;
788 gct->eager_promotion = rtsFalse;
790 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
791 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
792 evacuate((StgClosure **)p);
794 gct->eager_promotion = saved_eager;
796 if (gct->failed_to_evac) {
797 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
799 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
802 gct->failed_to_evac = rtsTrue;
806 case MUT_ARR_PTRS_FROZEN:
807 case MUT_ARR_PTRS_FROZEN0:
812 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
813 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
814 evacuate((StgClosure **)p);
817 // If we're going to put this object on the mutable list, then
818 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
819 if (gct->failed_to_evac) {
820 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
822 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
829 scavengeTSO((StgTSO*)p);
830 gct->failed_to_evac = rtsTrue; // always on the mutable list
834 case TVAR_WATCH_QUEUE:
836 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
838 evacuate((StgClosure **)&wq->closure);
839 evacuate((StgClosure **)&wq->next_queue_entry);
840 evacuate((StgClosure **)&wq->prev_queue_entry);
841 gct->evac_step = saved_evac_step;
842 gct->failed_to_evac = rtsTrue; // mutable
848 StgTVar *tvar = ((StgTVar *) p);
850 evacuate((StgClosure **)&tvar->current_value);
851 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
852 gct->evac_step = saved_evac_step;
853 gct->failed_to_evac = rtsTrue; // mutable
859 StgTRecHeader *trec = ((StgTRecHeader *) p);
861 evacuate((StgClosure **)&trec->enclosing_trec);
862 evacuate((StgClosure **)&trec->current_chunk);
863 evacuate((StgClosure **)&trec->invariants_to_check);
864 gct->evac_step = saved_evac_step;
865 gct->failed_to_evac = rtsTrue; // mutable
872 StgTRecChunk *tc = ((StgTRecChunk *) p);
873 TRecEntry *e = &(tc -> entries[0]);
875 evacuate((StgClosure **)&tc->prev_chunk);
876 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
877 evacuate((StgClosure **)&e->tvar);
878 evacuate((StgClosure **)&e->expected_value);
879 evacuate((StgClosure **)&e->new_value);
881 gct->evac_step = saved_evac_step;
882 gct->failed_to_evac = rtsTrue; // mutable
886 case ATOMIC_INVARIANT:
888 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
890 evacuate(&invariant->code);
891 evacuate((StgClosure **)&invariant->last_execution);
892 gct->evac_step = saved_evac_step;
893 gct->failed_to_evac = rtsTrue; // mutable
897 case INVARIANT_CHECK_QUEUE:
899 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
901 evacuate((StgClosure **)&queue->invariant);
902 evacuate((StgClosure **)&queue->my_execution);
903 evacuate((StgClosure **)&queue->next_queue_entry);
904 gct->evac_step = saved_evac_step;
905 gct->failed_to_evac = rtsTrue; // mutable
910 case IND_OLDGEN_PERM:
913 /* Careful here: a THUNK can be on the mutable list because
914 * it contains pointers to young gen objects. If such a thunk
915 * is updated, the IND_OLDGEN will be added to the mutable
916 * list again, and we'll scavenge it twice. evacuate()
917 * doesn't check whether the object has already been
918 * evacuated, so we perform that check here.
920 StgClosure *q = ((StgInd *)p)->indirectee;
921 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
924 evacuate(&((StgInd *)p)->indirectee);
927 #if 0 && defined(DEBUG)
928 if (RtsFlags.DebugFlags.gc)
929 /* Debugging code to print out the size of the thing we just
933 StgPtr start = gen->steps[0].scan;
934 bdescr *start_bd = gen->steps[0].scan_bd;
936 scavenge(&gen->steps[0]);
937 if (start_bd != gen->steps[0].scan_bd) {
938 size += (P_)BLOCK_ROUND_UP(start) - start;
939 start_bd = start_bd->link;
940 while (start_bd != gen->steps[0].scan_bd) {
941 size += BLOCK_SIZE_W;
942 start_bd = start_bd->link;
944 size += gen->steps[0].scan -
945 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
947 size = gen->steps[0].scan - start;
949 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
955 barf("scavenge_one: strange object %d", (int)(info->type));
958 no_luck = gct->failed_to_evac;
959 gct->failed_to_evac = rtsFalse;
963 /* -----------------------------------------------------------------------------
964 Scavenging mutable lists.
966 We treat the mutable list of each generation > N (i.e. all the
967 generations older than the one being collected) as roots. We also
968 remove non-mutable objects from the mutable list at this point.
969 -------------------------------------------------------------------------- */
972 scavenge_mutable_list(generation *gen)
977 bd = gen->saved_mut_list;
979 gct->evac_step = &gen->steps[0];
980 for (; bd != NULL; bd = bd->link) {
981 for (q = bd->start; q < bd->free; q++) {
983 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
986 switch (get_itbl((StgClosure *)p)->type) {
988 barf("MUT_VAR_CLEAN on mutable list");
990 mutlist_MUTVARS++; break;
991 case MUT_ARR_PTRS_CLEAN:
992 case MUT_ARR_PTRS_DIRTY:
993 case MUT_ARR_PTRS_FROZEN:
994 case MUT_ARR_PTRS_FROZEN0:
995 mutlist_MUTARRS++; break;
997 barf("MVAR_CLEAN on mutable list");
999 mutlist_MVARS++; break;
1001 mutlist_OTHERS++; break;
1005 // Check whether this object is "clean", that is it
1006 // definitely doesn't point into a young generation.
1007 // Clean objects don't need to be scavenged. Some clean
1008 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1009 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1010 // TSO, are always on the mutable list.
1012 switch (get_itbl((StgClosure *)p)->type) {
1013 case MUT_ARR_PTRS_CLEAN:
1014 recordMutableGen_GC((StgClosure *)p,gen);
1017 StgTSO *tso = (StgTSO *)p;
1018 if ((tso->flags & TSO_DIRTY) == 0) {
1019 // A clean TSO: we don't have to traverse its
1020 // stack. However, we *do* follow the link field:
1021 // we don't want to have to mark a TSO dirty just
1022 // because we put it on a different queue.
1023 if (tso->why_blocked != BlockedOnBlackHole) {
1024 evacuate((StgClosure **)&tso->link);
1026 recordMutableGen_GC((StgClosure *)p,gen);
1034 if (scavenge_one(p)) {
1035 // didn't manage to promote everything, so put the
1036 // object back on the list.
1037 recordMutableGen_GC((StgClosure *)p,gen);
1042 // free the old mut_list
1043 freeChain_sync(gen->saved_mut_list);
1044 gen->saved_mut_list = NULL;
1047 /* -----------------------------------------------------------------------------
1048 Scavenging the static objects.
1050 We treat the mutable list of each generation > N (i.e. all the
1051 generations older than the one being collected) as roots. We also
1052 remove non-mutable objects from the mutable list at this point.
1053 -------------------------------------------------------------------------- */
1056 scavenge_static(void)
1059 const StgInfoTable *info;
1061 /* Always evacuate straight to the oldest generation for static
1063 gct->evac_step = &oldest_gen->steps[0];
1065 /* keep going until we've scavenged all the objects on the linked
1070 ACQUIRE_SPIN_LOCK(&static_objects_sync);
1072 /* get the next static object from the list. Remember, there might
1073 * be more stuff on this list after each evacuation...
1074 * (static_objects is a global)
1077 if (p == END_OF_STATIC_LIST) {
1078 RELEASE_SPIN_LOCK(&static_objects_sync);
1082 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1085 if (info->type==RBH)
1086 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1088 // make sure the info pointer is into text space
1090 /* Take this object *off* the static_objects list,
1091 * and put it on the scavenged_static_objects list.
1093 static_objects = *STATIC_LINK(info,p);
1094 *STATIC_LINK(info,p) = scavenged_static_objects;
1095 scavenged_static_objects = p;
1097 RELEASE_SPIN_LOCK(&static_objects_sync);
1099 switch (info -> type) {
1103 StgInd *ind = (StgInd *)p;
1104 evacuate(&ind->indirectee);
1106 /* might fail to evacuate it, in which case we have to pop it
1107 * back on the mutable list of the oldest generation. We
1108 * leave it *on* the scavenged_static_objects list, though,
1109 * in case we visit this object again.
1111 if (gct->failed_to_evac) {
1112 gct->failed_to_evac = rtsFalse;
1113 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1119 scavenge_thunk_srt(info);
1123 scavenge_fun_srt(info);
1130 next = (P_)p->payload + info->layout.payload.ptrs;
1131 // evacuate the pointers
1132 for (q = (P_)p->payload; q < next; q++) {
1133 evacuate((StgClosure **)q);
1139 barf("scavenge_static: strange closure %d", (int)(info->type));
1142 ASSERT(gct->failed_to_evac == rtsFalse);
1146 /* -----------------------------------------------------------------------------
1147 scavenge a chunk of memory described by a bitmap
1148 -------------------------------------------------------------------------- */
1151 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1157 bitmap = large_bitmap->bitmap[b];
1158 for (i = 0; i < size; ) {
1159 if ((bitmap & 1) == 0) {
1160 evacuate((StgClosure **)p);
1164 if (i % BITS_IN(W_) == 0) {
1166 bitmap = large_bitmap->bitmap[b];
1168 bitmap = bitmap >> 1;
1173 STATIC_INLINE StgPtr
1174 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1177 if ((bitmap & 1) == 0) {
1178 evacuate((StgClosure **)p);
1181 bitmap = bitmap >> 1;
1187 /* -----------------------------------------------------------------------------
1188 scavenge_stack walks over a section of stack and evacuates all the
1189 objects pointed to by it. We can use the same code for walking
1190 AP_STACK_UPDs, since these are just sections of copied stack.
1191 -------------------------------------------------------------------------- */
1194 scavenge_stack(StgPtr p, StgPtr stack_end)
1196 const StgRetInfoTable* info;
1201 * Each time around this loop, we are looking at a chunk of stack
1202 * that starts with an activation record.
1205 while (p < stack_end) {
1206 info = get_ret_itbl((StgClosure *)p);
1208 switch (info->i.type) {
1211 // In SMP, we can get update frames that point to indirections
1212 // when two threads evaluate the same thunk. We do attempt to
1213 // discover this situation in threadPaused(), but it's
1214 // possible that the following sequence occurs:
1223 // Now T is an indirection, and the update frame is already
1224 // marked on A's stack, so we won't traverse it again in
1225 // threadPaused(). We could traverse the whole stack again
1226 // before GC, but that seems like overkill.
1228 // Scavenging this update frame as normal would be disastrous;
1229 // the updatee would end up pointing to the value. So we turn
1230 // the indirection into an IND_PERM, so that evacuate will
1231 // copy the indirection into the old generation instead of
1235 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1237 ((StgUpdateFrame *)p)->updatee->header.info =
1238 (StgInfoTable *)&stg_IND_PERM_info;
1239 } else if (type == IND_OLDGEN) {
1240 ((StgUpdateFrame *)p)->updatee->header.info =
1241 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1243 evacuate(&((StgUpdateFrame *)p)->updatee);
1244 p += sizeofW(StgUpdateFrame);
1248 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1249 case CATCH_STM_FRAME:
1250 case CATCH_RETRY_FRAME:
1251 case ATOMICALLY_FRAME:
1255 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1256 size = BITMAP_SIZE(info->i.layout.bitmap);
1257 // NOTE: the payload starts immediately after the info-ptr, we
1258 // don't have an StgHeader in the same sense as a heap closure.
1260 p = scavenge_small_bitmap(p, size, bitmap);
1264 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1272 evacuate((StgClosure **)p);
1275 size = BCO_BITMAP_SIZE(bco);
1276 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1281 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1286 size = GET_LARGE_BITMAP(&info->i)->size;
1288 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1290 // and don't forget to follow the SRT
1294 // Dynamic bitmap: the mask is stored on the stack, and
1295 // there are a number of non-pointers followed by a number
1296 // of pointers above the bitmapped area. (see StgMacros.h,
1301 dyn = ((StgRetDyn *)p)->liveness;
1303 // traverse the bitmap first
1304 bitmap = RET_DYN_LIVENESS(dyn);
1305 p = (P_)&((StgRetDyn *)p)->payload[0];
1306 size = RET_DYN_BITMAP_SIZE;
1307 p = scavenge_small_bitmap(p, size, bitmap);
1309 // skip over the non-ptr words
1310 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1312 // follow the ptr words
1313 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1314 evacuate((StgClosure **)p);
1322 StgRetFun *ret_fun = (StgRetFun *)p;
1323 StgFunInfoTable *fun_info;
1325 evacuate(&ret_fun->fun);
1326 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1327 p = scavenge_arg_block(fun_info, ret_fun->payload);
1332 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1337 /*-----------------------------------------------------------------------------
1338 scavenge the large object list.
1340 evac_step set by caller; similar games played with evac_step as with
1341 scavenge() - see comment at the top of scavenge(). Most large
1342 objects are (repeatedly) mutable, so most of the time evac_step will
1344 --------------------------------------------------------------------------- */
1347 scavenge_large (step_workspace *ws)
1352 gct->evac_step = ws->stp;
1354 bd = ws->todo_large_objects;
1356 for (; bd != NULL; bd = ws->todo_large_objects) {
1358 // take this object *off* the large objects list and put it on
1359 // the scavenged large objects list. This is so that we can
1360 // treat new_large_objects as a stack and push new objects on
1361 // the front when evacuating.
1362 ws->todo_large_objects = bd->link;
1364 ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
1365 dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
1366 ws->stp->n_scavenged_large_blocks += bd->blocks;
1367 RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
1370 if (scavenge_one(p)) {
1371 if (ws->stp->gen_no > 0) {
1372 recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
1378 /* ----------------------------------------------------------------------------
1380 ------------------------------------------------------------------------- */
1383 #include "Scav.c-inc"
1385 #include "Scav.c-inc"
1387 /* ----------------------------------------------------------------------------
1388 Find the oldest full block to scavenge, and scavenge it.
1389 ------------------------------------------------------------------------- */
1392 scavenge_find_global_work (void)
1400 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1401 for (s = generations[g].n_steps-1; s >= 0; s--) {
1402 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1405 ws = &gct->steps[g][s];
1407 // If we have any large objects to scavenge, do them now.
1408 if (ws->todo_large_objects) {
1413 if ((bd = grab_todo_block(ws)) != NULL) {
1414 // no need to assign this to ws->scan_bd, we're going
1415 // to scavenge the whole thing and then push it on
1416 // our scavd list. This saves pushing out the
1417 // scan_bd block, which might be partial.
1419 scavenge_block0(bd, bd->start);
1421 scavenge_block(bd, bd->start);
1423 push_scan_block(bd, ws);
1427 if (flag) return rtsTrue;
1433 /* ----------------------------------------------------------------------------
1434 Look for local work to do.
1436 We can have outstanding scavenging to do if, for any of the workspaces,
1438 - the scan block is the same as the todo block, and new objects
1439 have been evacuated to the todo block.
1441 - the scan block *was* the same as the todo block, but the todo
1442 block filled up and a new one has been allocated.
1443 ------------------------------------------------------------------------- */
1446 scavenge_find_local_work (void)
1453 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1454 for (s = generations[g].n_steps; --s >= 0; ) {
1455 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1458 ws = &gct->steps[g][s];
1460 if (ws->todo_bd != NULL)
1462 ws->todo_bd->free = ws->todo_free;
1465 // If we have a todo block and no scan block, start
1466 // scanning the todo block.
1467 if (ws->scan_bd == NULL && ws->todo_bd != NULL)
1469 ws->scan_bd = ws->todo_bd;
1470 ws->scan = ws->scan_bd->start;
1473 // If we have a scan block with some work to do,
1474 // scavenge everything up to the free pointer.
1475 if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
1478 scavenge_block0(ws->scan_bd, ws->scan);
1480 scavenge_block(ws->scan_bd, ws->scan);
1482 ws->scan = ws->scan_bd->free;
1486 if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
1487 && ws->scan_bd != ws->todo_bd)
1489 // we're not going to evac any more objects into
1490 // this block, so push it now.
1491 push_scan_block(ws->scan_bd, ws);
1494 // we might be able to scan the todo block now. But
1495 // don't do it right away: there might be full blocks
1496 // waiting to be scanned as a result of scavenge_block above.
1500 if (flag) return rtsTrue;
1506 /* ----------------------------------------------------------------------------
1507 Scavenge until we can't find anything more to scavenge.
1508 ------------------------------------------------------------------------- */
1516 work_to_do = rtsFalse;
1518 // scavenge static objects
1519 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1520 IF_DEBUG(sanity, checkStaticObjects(static_objects));
1524 // scavenge objects in compacted generation
1525 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1526 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1527 scavenge_mark_stack();
1528 work_to_do = rtsTrue;
1531 // Order is important here: we want to deal in full blocks as
1532 // much as possible, so go for global work in preference to
1533 // local work. Only if all the global work has been exhausted
1534 // do we start scavenging the fragments of blocks in the local
1536 if (scavenge_find_global_work()) goto loop;
1537 if (scavenge_find_local_work()) goto loop;
1539 if (work_to_do) goto loop;
1550 // scavenge static objects
1551 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1555 // scavenge objects in compacted generation
1556 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1557 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1561 // Check for global work in any step. We don't need to check for
1562 // local work, because we have already exited scavenge_loop(),
1563 // which means there is no local work for this thread.
1564 for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
1565 for (s = generations[g].n_steps-1; s >= 0; s--) {
1566 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1569 ws = &gct->steps[g][s];
1570 if (ws->todo_large_objects) return rtsTrue;
1571 if (ws->stp->todos) return rtsTrue;