1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2006
5 * Generational garbage collector: scavenging functions
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
25 #include "LdvProfile.h"
28 static void scavenge_stack (StgPtr p, StgPtr stack_end);
30 static void scavenge_large_bitmap (StgPtr p,
31 StgLargeBitmap *large_bitmap,
34 static void scavenge_block (bdescr *bd, StgPtr scan);
37 /* Similar to scavenge_large_bitmap(), but we don't write back the
38 * pointers we get back from evacuate().
41 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
48 bitmap = large_srt->l.bitmap[b];
49 size = (nat)large_srt->l.size;
50 p = (StgClosure **)large_srt->srt;
51 for (i = 0; i < size; ) {
52 if ((bitmap & 1) != 0) {
57 if (i % BITS_IN(W_) == 0) {
59 bitmap = large_srt->l.bitmap[b];
66 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
67 * srt field in the info table. That's ok, because we'll
68 * never dereference it.
71 scavenge_srt (StgClosure **srt, nat srt_bitmap)
79 if (bitmap == (StgHalfWord)(-1)) {
80 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
85 if ((bitmap & 1) != 0) {
86 #if defined(__PIC__) && defined(mingw32_TARGET_OS)
87 // Special-case to handle references to closures hiding out in DLLs, since
88 // double indirections required to get at those. The code generator knows
89 // which is which when generating the SRT, so it stores the (indirect)
90 // reference to the DLL closure in the table by first adding one to it.
91 // We check for this here, and undo the addition before evacuating it.
93 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
94 // closure that's fixed at link-time, and no extra magic is required.
95 if ( (unsigned long)(*srt) & 0x1 ) {
96 evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
105 bitmap = bitmap >> 1;
111 scavenge_thunk_srt(const StgInfoTable *info)
113 StgThunkInfoTable *thunk_info;
115 if (!major_gc) return;
117 thunk_info = itbl_to_thunk_itbl(info);
118 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
122 scavenge_fun_srt(const StgInfoTable *info)
124 StgFunInfoTable *fun_info;
126 if (!major_gc) return;
128 fun_info = itbl_to_fun_itbl(info);
129 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
132 /* -----------------------------------------------------------------------------
134 -------------------------------------------------------------------------- */
137 scavengeTSO (StgTSO *tso)
139 if ( tso->why_blocked == BlockedOnMVar
140 || tso->why_blocked == BlockedOnBlackHole
141 || tso->why_blocked == BlockedOnException
143 evacuate(&tso->block_info.closure);
145 evacuate((StgClosure **)&tso->blocked_exceptions);
147 // We don't always chase the link field: TSOs on the blackhole
148 // queue are not automatically alive, so the link field is a
149 // "weak" pointer in that case.
150 if (tso->why_blocked != BlockedOnBlackHole) {
151 evacuate((StgClosure **)&tso->link);
154 // scavange current transaction record
155 evacuate((StgClosure **)&tso->trec);
157 // scavenge this thread's stack
158 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
161 /* -----------------------------------------------------------------------------
162 Blocks of function args occur on the stack (at the top) and
164 -------------------------------------------------------------------------- */
167 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
174 switch (fun_info->f.fun_type) {
176 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
177 size = BITMAP_SIZE(fun_info->f.b.bitmap);
180 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
181 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
185 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
186 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
189 if ((bitmap & 1) == 0) {
190 evacuate((StgClosure **)p);
193 bitmap = bitmap >> 1;
202 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
206 StgFunInfoTable *fun_info;
208 fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
209 ASSERT(fun_info->i.type != PAP);
212 switch (fun_info->f.fun_type) {
214 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
217 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
221 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
225 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
228 if ((bitmap & 1) == 0) {
229 evacuate((StgClosure **)p);
232 bitmap = bitmap >> 1;
241 scavenge_PAP (StgPAP *pap)
244 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
248 scavenge_AP (StgAP *ap)
251 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
254 /* -----------------------------------------------------------------------------
255 Scavenge a block from the given scan pointer up to bd->free.
257 evac_step is set by the caller to be either zero (for a step in a
258 generation < N) or G where G is the generation of the step being
261 We sometimes temporarily change evac_step back to zero if we're
262 scavenging a mutable object where eager promotion isn't such a good
264 -------------------------------------------------------------------------- */
267 scavenge_block (bdescr *bd, StgPtr scan)
271 step *saved_evac_step;
275 debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
276 bd->start, bd->gen_no, bd->step->no, scan);
278 gct->evac_step = bd->step;
279 saved_evac_step = gct->evac_step;
280 gct->failed_to_evac = rtsFalse;
282 // we might be evacuating into the very object that we're
283 // scavenging, so we have to check the real bd->free pointer each
284 // time around the loop.
285 while (p < bd->free) {
287 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
288 info = get_itbl((StgClosure *)p);
290 ASSERT(gct->thunk_selector_depth == 0);
293 switch (info->type) {
298 rtsBool saved_eager_promotion = gct->eager_promotion;
300 StgMVar *mvar = ((StgMVar *)p);
301 gct->eager_promotion = rtsFalse;
302 evacuate((StgClosure **)&mvar->head);
303 evacuate((StgClosure **)&mvar->tail);
304 evacuate((StgClosure **)&mvar->value);
305 gct->eager_promotion = saved_eager_promotion;
307 if (gct->failed_to_evac) {
308 mvar->header.info = &stg_MVAR_DIRTY_info;
310 mvar->header.info = &stg_MVAR_CLEAN_info;
312 p += sizeofW(StgMVar);
317 scavenge_fun_srt(info);
318 evacuate(&((StgClosure *)p)->payload[1]);
319 evacuate(&((StgClosure *)p)->payload[0]);
320 p += sizeofW(StgHeader) + 2;
324 scavenge_thunk_srt(info);
325 evacuate(&((StgThunk *)p)->payload[1]);
326 evacuate(&((StgThunk *)p)->payload[0]);
327 p += sizeofW(StgThunk) + 2;
331 evacuate(&((StgClosure *)p)->payload[1]);
332 evacuate(&((StgClosure *)p)->payload[0]);
333 p += sizeofW(StgHeader) + 2;
337 scavenge_thunk_srt(info);
338 evacuate(&((StgThunk *)p)->payload[0]);
339 p += sizeofW(StgThunk) + 1;
343 scavenge_fun_srt(info);
345 evacuate(&((StgClosure *)p)->payload[0]);
346 p += sizeofW(StgHeader) + 1;
350 scavenge_thunk_srt(info);
351 p += sizeofW(StgThunk) + 1;
355 scavenge_fun_srt(info);
357 p += sizeofW(StgHeader) + 1;
361 scavenge_thunk_srt(info);
362 p += sizeofW(StgThunk) + 2;
366 scavenge_fun_srt(info);
368 p += sizeofW(StgHeader) + 2;
372 scavenge_thunk_srt(info);
373 evacuate(&((StgThunk *)p)->payload[0]);
374 p += sizeofW(StgThunk) + 2;
378 scavenge_fun_srt(info);
380 evacuate(&((StgClosure *)p)->payload[0]);
381 p += sizeofW(StgHeader) + 2;
385 scavenge_fun_srt(info);
392 scavenge_thunk_srt(info);
393 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
394 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
395 evacuate((StgClosure **)p);
397 p += info->layout.payload.nptrs;
408 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
409 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
410 evacuate((StgClosure **)p);
412 p += info->layout.payload.nptrs;
417 StgBCO *bco = (StgBCO *)p;
418 evacuate((StgClosure **)&bco->instrs);
419 evacuate((StgClosure **)&bco->literals);
420 evacuate((StgClosure **)&bco->ptrs);
426 if (bd->gen_no != 0) {
429 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
430 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
431 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
434 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
436 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
438 // We pretend that p has just been created.
439 LDV_RECORD_CREATE((StgClosure *)p);
442 case IND_OLDGEN_PERM:
443 evacuate(&((StgInd *)p)->indirectee);
444 p += sizeofW(StgInd);
448 case MUT_VAR_DIRTY: {
449 rtsBool saved_eager_promotion = gct->eager_promotion;
451 gct->eager_promotion = rtsFalse;
452 evacuate(&((StgMutVar *)p)->var);
453 gct->eager_promotion = saved_eager_promotion;
455 if (gct->failed_to_evac) {
456 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
458 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
460 p += sizeofW(StgMutVar);
465 case SE_CAF_BLACKHOLE:
468 p += BLACKHOLE_sizeW();
473 StgSelector *s = (StgSelector *)p;
474 evacuate(&s->selectee);
475 p += THUNK_SELECTOR_sizeW();
479 // A chunk of stack saved in a heap object
482 StgAP_STACK *ap = (StgAP_STACK *)p;
485 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
486 p = (StgPtr)ap->payload + ap->size;
491 p = scavenge_PAP((StgPAP *)p);
495 p = scavenge_AP((StgAP *)p);
500 p += arr_words_sizeW((StgArrWords *)p);
503 case MUT_ARR_PTRS_CLEAN:
504 case MUT_ARR_PTRS_DIRTY:
510 // We don't eagerly promote objects pointed to by a mutable
511 // array, but if we find the array only points to objects in
512 // the same or an older generation, we mark it "clean" and
513 // avoid traversing it during minor GCs.
514 saved_eager = gct->eager_promotion;
515 gct->eager_promotion = rtsFalse;
516 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
517 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
518 evacuate((StgClosure **)p);
520 gct->eager_promotion = saved_eager;
522 if (gct->failed_to_evac) {
523 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
525 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
528 gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
532 case MUT_ARR_PTRS_FROZEN:
533 case MUT_ARR_PTRS_FROZEN0:
538 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
539 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
540 evacuate((StgClosure **)p);
543 // If we're going to put this object on the mutable list, then
544 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
545 if (gct->failed_to_evac) {
546 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
548 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
555 StgTSO *tso = (StgTSO *)p;
556 rtsBool saved_eager = gct->eager_promotion;
558 gct->eager_promotion = rtsFalse;
560 gct->eager_promotion = saved_eager;
562 if (gct->failed_to_evac) {
563 tso->flags |= TSO_DIRTY;
565 tso->flags &= ~TSO_DIRTY;
568 gct->failed_to_evac = rtsTrue; // always on the mutable list
573 case TVAR_WATCH_QUEUE:
575 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
577 evacuate((StgClosure **)&wq->closure);
578 evacuate((StgClosure **)&wq->next_queue_entry);
579 evacuate((StgClosure **)&wq->prev_queue_entry);
580 gct->evac_step = saved_evac_step;
581 gct->failed_to_evac = rtsTrue; // mutable
582 p += sizeofW(StgTVarWatchQueue);
588 StgTVar *tvar = ((StgTVar *) p);
590 evacuate((StgClosure **)&tvar->current_value);
591 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
592 gct->evac_step = saved_evac_step;
593 gct->failed_to_evac = rtsTrue; // mutable
594 p += sizeofW(StgTVar);
600 StgTRecHeader *trec = ((StgTRecHeader *) p);
602 evacuate((StgClosure **)&trec->enclosing_trec);
603 evacuate((StgClosure **)&trec->current_chunk);
604 evacuate((StgClosure **)&trec->invariants_to_check);
605 gct->evac_step = saved_evac_step;
606 gct->failed_to_evac = rtsTrue; // mutable
607 p += sizeofW(StgTRecHeader);
614 StgTRecChunk *tc = ((StgTRecChunk *) p);
615 TRecEntry *e = &(tc -> entries[0]);
617 evacuate((StgClosure **)&tc->prev_chunk);
618 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
619 evacuate((StgClosure **)&e->tvar);
620 evacuate((StgClosure **)&e->expected_value);
621 evacuate((StgClosure **)&e->new_value);
623 gct->evac_step = saved_evac_step;
624 gct->failed_to_evac = rtsTrue; // mutable
625 p += sizeofW(StgTRecChunk);
629 case ATOMIC_INVARIANT:
631 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
633 evacuate(&invariant->code);
634 evacuate((StgClosure **)&invariant->last_execution);
635 gct->evac_step = saved_evac_step;
636 gct->failed_to_evac = rtsTrue; // mutable
637 p += sizeofW(StgAtomicInvariant);
641 case INVARIANT_CHECK_QUEUE:
643 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
645 evacuate((StgClosure **)&queue->invariant);
646 evacuate((StgClosure **)&queue->my_execution);
647 evacuate((StgClosure **)&queue->next_queue_entry);
648 gct->evac_step = saved_evac_step;
649 gct->failed_to_evac = rtsTrue; // mutable
650 p += sizeofW(StgInvariantCheckQueue);
655 barf("scavenge: unimplemented/strange closure type %d @ %p",
660 * We need to record the current object on the mutable list if
661 * (a) It is actually mutable, or
662 * (b) It contains pointers to a younger generation.
663 * Case (b) arises if we didn't manage to promote everything that
664 * the current object points to into the current generation.
666 if (gct->failed_to_evac) {
667 gct->failed_to_evac = rtsFalse;
668 if (bd->gen_no > 0) {
669 recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
674 debugTrace(DEBUG_gc, " scavenged %ld bytes", (bd->free - scan) * sizeof(W_));
677 /* -----------------------------------------------------------------------------
678 Scavenge everything on the mark stack.
680 This is slightly different from scavenge():
681 - we don't walk linearly through the objects, so the scavenger
682 doesn't need to advance the pointer on to the next object.
683 -------------------------------------------------------------------------- */
686 scavenge_mark_stack(void)
690 step *saved_evac_step;
692 gct->evac_step = &oldest_gen->steps[0];
693 saved_evac_step = gct->evac_step;
696 while (!mark_stack_empty()) {
697 p = pop_mark_stack();
699 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
700 info = get_itbl((StgClosure *)p);
703 switch (info->type) {
708 rtsBool saved_eager_promotion = gct->eager_promotion;
710 StgMVar *mvar = ((StgMVar *)p);
711 gct->eager_promotion = rtsFalse;
712 evacuate((StgClosure **)&mvar->head);
713 evacuate((StgClosure **)&mvar->tail);
714 evacuate((StgClosure **)&mvar->value);
715 gct->eager_promotion = saved_eager_promotion;
717 if (gct->failed_to_evac) {
718 mvar->header.info = &stg_MVAR_DIRTY_info;
720 mvar->header.info = &stg_MVAR_CLEAN_info;
726 scavenge_fun_srt(info);
727 evacuate(&((StgClosure *)p)->payload[1]);
728 evacuate(&((StgClosure *)p)->payload[0]);
732 scavenge_thunk_srt(info);
733 evacuate(&((StgThunk *)p)->payload[1]);
734 evacuate(&((StgThunk *)p)->payload[0]);
738 evacuate(&((StgClosure *)p)->payload[1]);
739 evacuate(&((StgClosure *)p)->payload[0]);
744 scavenge_fun_srt(info);
745 evacuate(&((StgClosure *)p)->payload[0]);
750 scavenge_thunk_srt(info);
751 evacuate(&((StgThunk *)p)->payload[0]);
756 evacuate(&((StgClosure *)p)->payload[0]);
761 scavenge_fun_srt(info);
766 scavenge_thunk_srt(info);
774 scavenge_fun_srt(info);
781 scavenge_thunk_srt(info);
782 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
783 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
784 evacuate((StgClosure **)p);
796 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
797 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
798 evacuate((StgClosure **)p);
804 StgBCO *bco = (StgBCO *)p;
805 evacuate((StgClosure **)&bco->instrs);
806 evacuate((StgClosure **)&bco->literals);
807 evacuate((StgClosure **)&bco->ptrs);
812 // don't need to do anything here: the only possible case
813 // is that we're in a 1-space compacting collector, with
814 // no "old" generation.
818 case IND_OLDGEN_PERM:
819 evacuate(&((StgInd *)p)->indirectee);
823 case MUT_VAR_DIRTY: {
824 rtsBool saved_eager_promotion = gct->eager_promotion;
826 gct->eager_promotion = rtsFalse;
827 evacuate(&((StgMutVar *)p)->var);
828 gct->eager_promotion = saved_eager_promotion;
830 if (gct->failed_to_evac) {
831 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
833 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
839 case SE_CAF_BLACKHOLE:
847 StgSelector *s = (StgSelector *)p;
848 evacuate(&s->selectee);
852 // A chunk of stack saved in a heap object
855 StgAP_STACK *ap = (StgAP_STACK *)p;
858 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
863 scavenge_PAP((StgPAP *)p);
867 scavenge_AP((StgAP *)p);
870 case MUT_ARR_PTRS_CLEAN:
871 case MUT_ARR_PTRS_DIRTY:
877 // We don't eagerly promote objects pointed to by a mutable
878 // array, but if we find the array only points to objects in
879 // the same or an older generation, we mark it "clean" and
880 // avoid traversing it during minor GCs.
881 saved_eager = gct->eager_promotion;
882 gct->eager_promotion = rtsFalse;
883 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
884 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
885 evacuate((StgClosure **)p);
887 gct->eager_promotion = saved_eager;
889 if (gct->failed_to_evac) {
890 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
892 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
895 gct->failed_to_evac = rtsTrue; // mutable anyhow.
899 case MUT_ARR_PTRS_FROZEN:
900 case MUT_ARR_PTRS_FROZEN0:
905 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
906 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
907 evacuate((StgClosure **)p);
910 // If we're going to put this object on the mutable list, then
911 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
912 if (gct->failed_to_evac) {
913 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
915 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
922 StgTSO *tso = (StgTSO *)p;
923 rtsBool saved_eager = gct->eager_promotion;
925 gct->eager_promotion = rtsFalse;
927 gct->eager_promotion = saved_eager;
929 if (gct->failed_to_evac) {
930 tso->flags |= TSO_DIRTY;
932 tso->flags &= ~TSO_DIRTY;
935 gct->failed_to_evac = rtsTrue; // always on the mutable list
939 case TVAR_WATCH_QUEUE:
941 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
943 evacuate((StgClosure **)&wq->closure);
944 evacuate((StgClosure **)&wq->next_queue_entry);
945 evacuate((StgClosure **)&wq->prev_queue_entry);
946 gct->evac_step = saved_evac_step;
947 gct->failed_to_evac = rtsTrue; // mutable
953 StgTVar *tvar = ((StgTVar *) p);
955 evacuate((StgClosure **)&tvar->current_value);
956 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
957 gct->evac_step = saved_evac_step;
958 gct->failed_to_evac = rtsTrue; // mutable
965 StgTRecChunk *tc = ((StgTRecChunk *) p);
966 TRecEntry *e = &(tc -> entries[0]);
968 evacuate((StgClosure **)&tc->prev_chunk);
969 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
970 evacuate((StgClosure **)&e->tvar);
971 evacuate((StgClosure **)&e->expected_value);
972 evacuate((StgClosure **)&e->new_value);
974 gct->evac_step = saved_evac_step;
975 gct->failed_to_evac = rtsTrue; // mutable
981 StgTRecHeader *trec = ((StgTRecHeader *) p);
983 evacuate((StgClosure **)&trec->enclosing_trec);
984 evacuate((StgClosure **)&trec->current_chunk);
985 evacuate((StgClosure **)&trec->invariants_to_check);
986 gct->evac_step = saved_evac_step;
987 gct->failed_to_evac = rtsTrue; // mutable
991 case ATOMIC_INVARIANT:
993 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
995 evacuate(&invariant->code);
996 evacuate((StgClosure **)&invariant->last_execution);
997 gct->evac_step = saved_evac_step;
998 gct->failed_to_evac = rtsTrue; // mutable
1002 case INVARIANT_CHECK_QUEUE:
1004 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1006 evacuate((StgClosure **)&queue->invariant);
1007 evacuate((StgClosure **)&queue->my_execution);
1008 evacuate((StgClosure **)&queue->next_queue_entry);
1009 gct->evac_step = saved_evac_step;
1010 gct->failed_to_evac = rtsTrue; // mutable
1015 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
1019 if (gct->failed_to_evac) {
1020 gct->failed_to_evac = rtsFalse;
1021 if (gct->evac_step) {
1022 recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
1026 // mark the next bit to indicate "scavenged"
1027 mark(q+1, Bdescr(q));
1029 } // while (!mark_stack_empty())
1031 // start a new linear scan if the mark stack overflowed at some point
1032 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
1033 debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
1034 mark_stack_overflowed = rtsFalse;
1035 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
1036 oldgen_scan = oldgen_scan_bd->start;
1039 if (oldgen_scan_bd) {
1040 // push a new thing on the mark stack
1042 // find a closure that is marked but not scavenged, and start
1044 while (oldgen_scan < oldgen_scan_bd->free
1045 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
1049 if (oldgen_scan < oldgen_scan_bd->free) {
1051 // already scavenged?
1052 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
1053 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1056 push_mark_stack(oldgen_scan);
1057 // ToDo: bump the linear scan by the actual size of the object
1058 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
1062 oldgen_scan_bd = oldgen_scan_bd->link;
1063 if (oldgen_scan_bd != NULL) {
1064 oldgen_scan = oldgen_scan_bd->start;
1070 /* -----------------------------------------------------------------------------
1071 Scavenge one object.
1073 This is used for objects that are temporarily marked as mutable
1074 because they contain old-to-new generation pointers. Only certain
1075 objects can have this property.
1076 -------------------------------------------------------------------------- */
1079 scavenge_one(StgPtr p)
1081 const StgInfoTable *info;
1082 step *saved_evac_step = gct->evac_step;
1085 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1086 info = get_itbl((StgClosure *)p);
1088 switch (info->type) {
1093 rtsBool saved_eager_promotion = gct->eager_promotion;
1095 StgMVar *mvar = ((StgMVar *)p);
1096 gct->eager_promotion = rtsFalse;
1097 evacuate((StgClosure **)&mvar->head);
1098 evacuate((StgClosure **)&mvar->tail);
1099 evacuate((StgClosure **)&mvar->value);
1100 gct->eager_promotion = saved_eager_promotion;
1102 if (gct->failed_to_evac) {
1103 mvar->header.info = &stg_MVAR_DIRTY_info;
1105 mvar->header.info = &stg_MVAR_CLEAN_info;
1119 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
1120 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
1121 evacuate((StgClosure **)q);
1127 case FUN_1_0: // hardly worth specialising these guys
1143 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1144 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
1145 evacuate((StgClosure **)q);
1151 case MUT_VAR_DIRTY: {
1153 rtsBool saved_eager_promotion = gct->eager_promotion;
1155 gct->eager_promotion = rtsFalse;
1156 evacuate(&((StgMutVar *)p)->var);
1157 gct->eager_promotion = saved_eager_promotion;
1159 if (gct->failed_to_evac) {
1160 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
1162 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
1168 case SE_CAF_BLACKHOLE:
1173 case THUNK_SELECTOR:
1175 StgSelector *s = (StgSelector *)p;
1176 evacuate(&s->selectee);
1182 StgAP_STACK *ap = (StgAP_STACK *)p;
1185 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
1186 p = (StgPtr)ap->payload + ap->size;
1191 p = scavenge_PAP((StgPAP *)p);
1195 p = scavenge_AP((StgAP *)p);
1199 // nothing to follow
1202 case MUT_ARR_PTRS_CLEAN:
1203 case MUT_ARR_PTRS_DIRTY:
1206 rtsBool saved_eager;
1208 // We don't eagerly promote objects pointed to by a mutable
1209 // array, but if we find the array only points to objects in
1210 // the same or an older generation, we mark it "clean" and
1211 // avoid traversing it during minor GCs.
1212 saved_eager = gct->eager_promotion;
1213 gct->eager_promotion = rtsFalse;
1215 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1216 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1217 evacuate((StgClosure **)p);
1219 gct->eager_promotion = saved_eager;
1221 if (gct->failed_to_evac) {
1222 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
1224 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
1227 gct->failed_to_evac = rtsTrue;
1231 case MUT_ARR_PTRS_FROZEN:
1232 case MUT_ARR_PTRS_FROZEN0:
1234 // follow everything
1237 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
1238 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
1239 evacuate((StgClosure **)p);
1242 // If we're going to put this object on the mutable list, then
1243 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
1244 if (gct->failed_to_evac) {
1245 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
1247 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
1254 StgTSO *tso = (StgTSO *)p;
1255 rtsBool saved_eager = gct->eager_promotion;
1257 gct->eager_promotion = rtsFalse;
1259 gct->eager_promotion = saved_eager;
1261 if (gct->failed_to_evac) {
1262 tso->flags |= TSO_DIRTY;
1264 tso->flags &= ~TSO_DIRTY;
1267 gct->failed_to_evac = rtsTrue; // always on the mutable list
1271 case TVAR_WATCH_QUEUE:
1273 StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
1275 evacuate((StgClosure **)&wq->closure);
1276 evacuate((StgClosure **)&wq->next_queue_entry);
1277 evacuate((StgClosure **)&wq->prev_queue_entry);
1278 gct->evac_step = saved_evac_step;
1279 gct->failed_to_evac = rtsTrue; // mutable
1285 StgTVar *tvar = ((StgTVar *) p);
1287 evacuate((StgClosure **)&tvar->current_value);
1288 evacuate((StgClosure **)&tvar->first_watch_queue_entry);
1289 gct->evac_step = saved_evac_step;
1290 gct->failed_to_evac = rtsTrue; // mutable
1296 StgTRecHeader *trec = ((StgTRecHeader *) p);
1298 evacuate((StgClosure **)&trec->enclosing_trec);
1299 evacuate((StgClosure **)&trec->current_chunk);
1300 evacuate((StgClosure **)&trec->invariants_to_check);
1301 gct->evac_step = saved_evac_step;
1302 gct->failed_to_evac = rtsTrue; // mutable
1309 StgTRecChunk *tc = ((StgTRecChunk *) p);
1310 TRecEntry *e = &(tc -> entries[0]);
1312 evacuate((StgClosure **)&tc->prev_chunk);
1313 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
1314 evacuate((StgClosure **)&e->tvar);
1315 evacuate((StgClosure **)&e->expected_value);
1316 evacuate((StgClosure **)&e->new_value);
1318 gct->evac_step = saved_evac_step;
1319 gct->failed_to_evac = rtsTrue; // mutable
1323 case ATOMIC_INVARIANT:
1325 StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
1327 evacuate(&invariant->code);
1328 evacuate((StgClosure **)&invariant->last_execution);
1329 gct->evac_step = saved_evac_step;
1330 gct->failed_to_evac = rtsTrue; // mutable
1334 case INVARIANT_CHECK_QUEUE:
1336 StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
1338 evacuate((StgClosure **)&queue->invariant);
1339 evacuate((StgClosure **)&queue->my_execution);
1340 evacuate((StgClosure **)&queue->next_queue_entry);
1341 gct->evac_step = saved_evac_step;
1342 gct->failed_to_evac = rtsTrue; // mutable
1347 case IND_OLDGEN_PERM:
1350 /* Careful here: a THUNK can be on the mutable list because
1351 * it contains pointers to young gen objects. If such a thunk
1352 * is updated, the IND_OLDGEN will be added to the mutable
1353 * list again, and we'll scavenge it twice. evacuate()
1354 * doesn't check whether the object has already been
1355 * evacuated, so we perform that check here.
1357 StgClosure *q = ((StgInd *)p)->indirectee;
1358 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
1361 evacuate(&((StgInd *)p)->indirectee);
1364 #if 0 && defined(DEBUG)
1365 if (RtsFlags.DebugFlags.gc)
1366 /* Debugging code to print out the size of the thing we just
1370 StgPtr start = gen->steps[0].scan;
1371 bdescr *start_bd = gen->steps[0].scan_bd;
1373 scavenge(&gen->steps[0]);
1374 if (start_bd != gen->steps[0].scan_bd) {
1375 size += (P_)BLOCK_ROUND_UP(start) - start;
1376 start_bd = start_bd->link;
1377 while (start_bd != gen->steps[0].scan_bd) {
1378 size += BLOCK_SIZE_W;
1379 start_bd = start_bd->link;
1381 size += gen->steps[0].scan -
1382 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
1384 size = gen->steps[0].scan - start;
1386 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
1392 barf("scavenge_one: strange object %d", (int)(info->type));
1395 no_luck = gct->failed_to_evac;
1396 gct->failed_to_evac = rtsFalse;
1400 /* -----------------------------------------------------------------------------
1401 Scavenging mutable lists.
1403 We treat the mutable list of each generation > N (i.e. all the
1404 generations older than the one being collected) as roots. We also
1405 remove non-mutable objects from the mutable list at this point.
1406 -------------------------------------------------------------------------- */
1409 scavenge_mutable_list(generation *gen)
1414 bd = gen->saved_mut_list;
1416 gct->evac_step = &gen->steps[0];
1417 for (; bd != NULL; bd = bd->link) {
1418 for (q = bd->start; q < bd->free; q++) {
1420 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1423 switch (get_itbl((StgClosure *)p)->type) {
1425 barf("MUT_VAR_CLEAN on mutable list");
1427 mutlist_MUTVARS++; break;
1428 case MUT_ARR_PTRS_CLEAN:
1429 case MUT_ARR_PTRS_DIRTY:
1430 case MUT_ARR_PTRS_FROZEN:
1431 case MUT_ARR_PTRS_FROZEN0:
1432 mutlist_MUTARRS++; break;
1434 barf("MVAR_CLEAN on mutable list");
1436 mutlist_MVARS++; break;
1438 mutlist_OTHERS++; break;
1442 // Check whether this object is "clean", that is it
1443 // definitely doesn't point into a young generation.
1444 // Clean objects don't need to be scavenged. Some clean
1445 // objects (MUT_VAR_CLEAN) are not kept on the mutable
1446 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
1447 // TSO, are always on the mutable list.
1449 switch (get_itbl((StgClosure *)p)->type) {
1450 case MUT_ARR_PTRS_CLEAN:
1451 recordMutableGen_GC((StgClosure *)p,gen);
1454 StgTSO *tso = (StgTSO *)p;
1455 if ((tso->flags & TSO_DIRTY) == 0) {
1456 // A clean TSO: we don't have to traverse its
1457 // stack. However, we *do* follow the link field:
1458 // we don't want to have to mark a TSO dirty just
1459 // because we put it on a different queue.
1460 if (tso->why_blocked != BlockedOnBlackHole) {
1461 evacuate((StgClosure **)&tso->link);
1463 recordMutableGen_GC((StgClosure *)p,gen);
1471 if (scavenge_one(p)) {
1472 // didn't manage to promote everything, so put the
1473 // object back on the list.
1474 recordMutableGen_GC((StgClosure *)p,gen);
1479 // free the old mut_list
1480 freeChain(gen->saved_mut_list);
1481 gen->saved_mut_list = NULL;
1484 /* -----------------------------------------------------------------------------
1485 Scavenging the static objects.
1487 We treat the mutable list of each generation > N (i.e. all the
1488 generations older than the one being collected) as roots. We also
1489 remove non-mutable objects from the mutable list at this point.
1490 -------------------------------------------------------------------------- */
1493 scavenge_static(void)
1496 const StgInfoTable *info;
1498 /* Always evacuate straight to the oldest generation for static
1500 gct->evac_step = &oldest_gen->steps[0];
1502 /* keep going until we've scavenged all the objects on the linked
1507 ACQUIRE_SPIN_LOCK(&static_objects_sync);
1509 /* get the next static object from the list. Remember, there might
1510 * be more stuff on this list after each evacuation...
1511 * (static_objects is a global)
1514 if (p == END_OF_STATIC_LIST) {
1515 RELEASE_SPIN_LOCK(&static_objects_sync);
1519 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1522 if (info->type==RBH)
1523 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
1525 // make sure the info pointer is into text space
1527 /* Take this object *off* the static_objects list,
1528 * and put it on the scavenged_static_objects list.
1530 static_objects = *STATIC_LINK(info,p);
1531 *STATIC_LINK(info,p) = scavenged_static_objects;
1532 scavenged_static_objects = p;
1534 RELEASE_SPIN_LOCK(&static_objects_sync);
1536 switch (info -> type) {
1540 StgInd *ind = (StgInd *)p;
1541 evacuate(&ind->indirectee);
1543 /* might fail to evacuate it, in which case we have to pop it
1544 * back on the mutable list of the oldest generation. We
1545 * leave it *on* the scavenged_static_objects list, though,
1546 * in case we visit this object again.
1548 if (gct->failed_to_evac) {
1549 gct->failed_to_evac = rtsFalse;
1550 recordMutableGen_GC((StgClosure *)p,oldest_gen);
1556 scavenge_thunk_srt(info);
1560 scavenge_fun_srt(info);
1567 next = (P_)p->payload + info->layout.payload.ptrs;
1568 // evacuate the pointers
1569 for (q = (P_)p->payload; q < next; q++) {
1570 evacuate((StgClosure **)q);
1576 barf("scavenge_static: strange closure %d", (int)(info->type));
1579 ASSERT(gct->failed_to_evac == rtsFalse);
1583 /* -----------------------------------------------------------------------------
1584 scavenge a chunk of memory described by a bitmap
1585 -------------------------------------------------------------------------- */
1588 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
1594 bitmap = large_bitmap->bitmap[b];
1595 for (i = 0; i < size; ) {
1596 if ((bitmap & 1) == 0) {
1597 evacuate((StgClosure **)p);
1601 if (i % BITS_IN(W_) == 0) {
1603 bitmap = large_bitmap->bitmap[b];
1605 bitmap = bitmap >> 1;
1610 STATIC_INLINE StgPtr
1611 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
1614 if ((bitmap & 1) == 0) {
1615 evacuate((StgClosure **)p);
1618 bitmap = bitmap >> 1;
1624 /* -----------------------------------------------------------------------------
1625 scavenge_stack walks over a section of stack and evacuates all the
1626 objects pointed to by it. We can use the same code for walking
1627 AP_STACK_UPDs, since these are just sections of copied stack.
1628 -------------------------------------------------------------------------- */
1631 scavenge_stack(StgPtr p, StgPtr stack_end)
1633 const StgRetInfoTable* info;
1638 * Each time around this loop, we are looking at a chunk of stack
1639 * that starts with an activation record.
1642 while (p < stack_end) {
1643 info = get_ret_itbl((StgClosure *)p);
1645 switch (info->i.type) {
1648 // In SMP, we can get update frames that point to indirections
1649 // when two threads evaluate the same thunk. We do attempt to
1650 // discover this situation in threadPaused(), but it's
1651 // possible that the following sequence occurs:
1660 // Now T is an indirection, and the update frame is already
1661 // marked on A's stack, so we won't traverse it again in
1662 // threadPaused(). We could traverse the whole stack again
1663 // before GC, but that seems like overkill.
1665 // Scavenging this update frame as normal would be disastrous;
1666 // the updatee would end up pointing to the value. So we turn
1667 // the indirection into an IND_PERM, so that evacuate will
1668 // copy the indirection into the old generation instead of
1672 type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
1674 ((StgUpdateFrame *)p)->updatee->header.info =
1675 (StgInfoTable *)&stg_IND_PERM_info;
1676 } else if (type == IND_OLDGEN) {
1677 ((StgUpdateFrame *)p)->updatee->header.info =
1678 (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
1680 evacuate(&((StgUpdateFrame *)p)->updatee);
1681 p += sizeofW(StgUpdateFrame);
1685 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
1686 case CATCH_STM_FRAME:
1687 case CATCH_RETRY_FRAME:
1688 case ATOMICALLY_FRAME:
1692 bitmap = BITMAP_BITS(info->i.layout.bitmap);
1693 size = BITMAP_SIZE(info->i.layout.bitmap);
1694 // NOTE: the payload starts immediately after the info-ptr, we
1695 // don't have an StgHeader in the same sense as a heap closure.
1697 p = scavenge_small_bitmap(p, size, bitmap);
1701 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
1709 evacuate((StgClosure **)p);
1712 size = BCO_BITMAP_SIZE(bco);
1713 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
1718 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
1723 size = GET_LARGE_BITMAP(&info->i)->size;
1725 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
1727 // and don't forget to follow the SRT
1731 // Dynamic bitmap: the mask is stored on the stack, and
1732 // there are a number of non-pointers followed by a number
1733 // of pointers above the bitmapped area. (see StgMacros.h,
1738 dyn = ((StgRetDyn *)p)->liveness;
1740 // traverse the bitmap first
1741 bitmap = RET_DYN_LIVENESS(dyn);
1742 p = (P_)&((StgRetDyn *)p)->payload[0];
1743 size = RET_DYN_BITMAP_SIZE;
1744 p = scavenge_small_bitmap(p, size, bitmap);
1746 // skip over the non-ptr words
1747 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
1749 // follow the ptr words
1750 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
1751 evacuate((StgClosure **)p);
1759 StgRetFun *ret_fun = (StgRetFun *)p;
1760 StgFunInfoTable *fun_info;
1762 evacuate(&ret_fun->fun);
1763 fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
1764 p = scavenge_arg_block(fun_info, ret_fun->payload);
1769 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
1774 /*-----------------------------------------------------------------------------
1775 scavenge the large object list.
1777 evac_step set by caller; similar games played with evac_step as with
1778 scavenge() - see comment at the top of scavenge(). Most large
1779 objects are (repeatedly) mutable, so most of the time evac_step will
1781 --------------------------------------------------------------------------- */
1784 scavenge_large (step_workspace *ws)
1789 gct->evac_step = ws->stp;
1791 bd = ws->todo_large_objects;
1793 for (; bd != NULL; bd = ws->todo_large_objects) {
1795 // take this object *off* the large objects list and put it on
1796 // the scavenged large objects list. This is so that we can
1797 // treat new_large_objects as a stack and push new objects on
1798 // the front when evacuating.
1799 ws->todo_large_objects = bd->link;
1801 ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
1802 dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
1803 ws->stp->n_scavenged_large_blocks += bd->blocks;
1804 RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
1807 if (scavenge_one(p)) {
1808 if (ws->stp->gen_no > 0) {
1809 recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
1815 /* ----------------------------------------------------------------------------
1816 Find the oldest full block to scavenge, and scavenge it.
1817 ------------------------------------------------------------------------- */
1820 scavenge_find_global_work (void)
1828 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1829 for (s = generations[g].n_steps; --s >= 0; ) {
1830 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1833 ws = &gct->steps[g][s];
1835 // If we have any large objects to scavenge, do them now.
1836 if (ws->todo_large_objects) {
1841 if ((bd = grab_todo_block(ws)) != NULL) {
1842 // no need to assign this to ws->scan_bd, we're going
1843 // to scavenge the whole thing and then push it on
1844 // our scavd list. This saves pushing out the
1845 // scan_bd block, which might be partial.
1846 scavenge_block(bd, bd->start);
1847 push_scan_block(bd, ws);
1851 if (flag) return rtsTrue;
1857 /* ----------------------------------------------------------------------------
1858 Look for local work to do.
1860 We can have outstanding scavenging to do if, for any of the workspaces,
1862 - the scan block is the same as the todo block, and new objects
1863 have been evacuated to the todo block.
1865 - the scan block *was* the same as the todo block, but the todo
1866 block filled up and a new one has been allocated.
1867 ------------------------------------------------------------------------- */
1870 scavenge_find_local_work (void)
1877 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1878 for (s = generations[g].n_steps; --s >= 0; ) {
1879 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1882 ws = &gct->steps[g][s];
1884 // If we have a todo block and no scan block, start
1885 // scanning the todo block.
1886 if (ws->scan_bd == NULL && ws->todo_bd != NULL)
1888 ws->scan_bd = ws->todo_bd;
1889 ws->scan = ws->scan_bd->start;
1892 // If we have a scan block with some work to do,
1893 // scavenge everything up to the free pointer.
1894 if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
1896 scavenge_block(ws->scan_bd, ws->scan);
1897 ws->scan = ws->scan_bd->free;
1901 if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
1902 && ws->scan_bd != ws->todo_bd)
1904 // we're not going to evac any more objects into
1905 // this block, so push it now.
1906 push_scan_block(ws->scan_bd, ws);
1909 // we might be able to scan the todo block now. But
1910 // don't do it right away: there might be full blocks
1911 // waiting to be scanned as a result of scavenge_block above.
1915 if (flag) return rtsTrue;
1921 /* ----------------------------------------------------------------------------
1922 Scavenge until we can't find anything more to scavenge.
1923 ------------------------------------------------------------------------- */
1931 work_to_do = rtsFalse;
1933 // scavenge static objects
1934 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1935 IF_DEBUG(sanity, checkStaticObjects(static_objects));
1939 // scavenge objects in compacted generation
1940 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1941 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1942 scavenge_mark_stack();
1943 work_to_do = rtsTrue;
1946 // Order is important here: we want to deal in full blocks as
1947 // much as possible, so go for global work in preference to
1948 // local work. Only if all the global work has been exhausted
1949 // do we start scavenging the fragments of blocks in the local
1951 if (scavenge_find_global_work()) goto loop;
1952 if (scavenge_find_local_work()) goto loop;
1954 if (work_to_do) goto loop;
1965 // scavenge static objects
1966 if (major_gc && static_objects != END_OF_STATIC_LIST) {
1970 // scavenge objects in compacted generation
1971 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
1972 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
1976 // Check for global work in any step. We don't need to check for
1977 // local work, because we have already exited scavenge_loop(),
1978 // which means there is no local work for this thread.
1979 for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
1980 for (s = generations[g].n_steps; --s >= 0; ) {
1981 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
1984 ws = &gct->steps[g][s];
1985 if (ws->todo_large_objects) return rtsTrue;
1986 if (ws->stp->todos) return rtsTrue;