X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=466b9b44f7fb6b93f8d63c1f2669bd5bab827151;hb=ce7bf1839d868fd829f0224b226da54612ac0e88;hp=cd200f3dbb4701f4354db0ca9148fb6336c4ef7a;hpb=64c17c4561cf419a4c70511bafc0815ea670bb2e;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index cd200f3..466b9b4 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------------- * - * (c) The GHC Team 1998-2006 + * (c) The GHC Team 1998-2008 * * Generational garbage collector: scavenging functions * @@ -11,15 +11,21 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" + #include "Storage.h" -#include "MBlock.h" #include "GC.h" +#include "GCThread.h" +#include "GCUtils.h" #include "Compact.h" +#include "MarkStack.h" #include "Evac.h" #include "Scav.h" #include "Apply.h" #include "Trace.h" +#include "Sanity.h" +#include "Capability.h" #include "LdvProfile.h" static void scavenge_stack (StgPtr p, StgPtr stack_end); @@ -28,129 +34,155 @@ static void scavenge_large_bitmap (StgPtr p, StgLargeBitmap *large_bitmap, nat size ); -/* Similar to scavenge_large_bitmap(), but we don't write back the - * pointers we get back from evacuate(). - */ -static void -scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) -{ - nat i, b, size; - StgWord bitmap; - StgClosure **p; - - b = 0; - bitmap = large_srt->l.bitmap[b]; - size = (nat)large_srt->l.size; - p = (StgClosure **)large_srt->srt; - for (i = 0; i < size; ) { - if ((bitmap & 1) != 0) { - evacuate(*p); - } - i++; - p++; - if (i % BITS_IN(W_) == 0) { - b++; - bitmap = large_srt->l.bitmap[b]; - } else { - bitmap = bitmap >> 1; - } - } -} - -/* evacuate the SRT. If srt_bitmap is zero, then there isn't an - * srt field in the info table. That's ok, because we'll - * never dereference it. - */ -STATIC_INLINE void -scavenge_srt (StgClosure **srt, nat srt_bitmap) -{ - nat bitmap; - StgClosure **p; - - bitmap = srt_bitmap; - p = srt; - - if (bitmap == (StgHalfWord)(-1)) { - scavenge_large_srt_bitmap( (StgLargeSRT *)srt ); - return; - } - - while (bitmap != 0) { - if ((bitmap & 1) != 0) { -#ifdef ENABLE_WIN32_DLL_SUPPORT - // Special-case to handle references to closures hiding out in DLLs, since - // double indirections required to get at those. The code generator knows - // which is which when generating the SRT, so it stores the (indirect) - // reference to the DLL closure in the table by first adding one to it. - // We check for this here, and undo the addition before evacuating it. - // - // If the SRT entry hasn't got bit 0 set, the SRT entry points to a - // closure that's fixed at link-time, and no extra magic is required. - if ( (unsigned long)(*srt) & 0x1 ) { - evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); - } else { - evacuate(*p); - } -#else - evacuate(*p); +#if defined(THREADED_RTS) && !defined(PARALLEL_GC) +# define evacuate(a) evacuate1(a) +# define scavenge_loop(a) scavenge_loop1(a) +# define scavenge_block(a) scavenge_block1(a) +# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g) +# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap) #endif - } - p++; - bitmap = bitmap >> 1; - } -} +/* ----------------------------------------------------------------------------- + Scavenge a TSO. + -------------------------------------------------------------------------- */ STATIC_INLINE void -scavenge_thunk_srt(const StgInfoTable *info) +scavenge_TSO_link (StgTSO *tso) { - StgThunkInfoTable *thunk_info; - - if (!major_gc) return; - - thunk_info = itbl_to_thunk_itbl(info); - scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); + // We don't always chase the link field: TSOs on the blackhole + // queue are not automatically alive, so the link field is a + // "weak" pointer in that case. + if (tso->why_blocked != BlockedOnBlackHole) { + evacuate((StgClosure **)&tso->_link); + } } -STATIC_INLINE void -scavenge_fun_srt(const StgInfoTable *info) +static void +scavengeTSO (StgTSO *tso) { - StgFunInfoTable *fun_info; + rtsBool saved_eager; + + if (tso->what_next == ThreadRelocated) { + // the only way this can happen is if the old TSO was on the + // mutable list. We might have other links to this defunct + // TSO, so we must update its link field. + evacuate((StgClosure**)&tso->_link); + return; + } - if (!major_gc) return; - - fun_info = itbl_to_fun_itbl(info); - scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap); -} + debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); -/* ----------------------------------------------------------------------------- - Scavenge a TSO. - -------------------------------------------------------------------------- */ + // update the pointer from the Task. + if (tso->bound != NULL) { + tso->bound->tso = tso; + } + + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; -static void -scavengeTSO (StgTSO *tso) -{ if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException ) { - tso->block_info.closure = evacuate(tso->block_info.closure); + evacuate(&tso->block_info.closure); } - tso->blocked_exceptions = - (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); + evacuate((StgClosure **)&tso->blocked_exceptions); - // We don't always chase the link field: TSOs on the blackhole - // queue are not automatically alive, so the link field is a - // "weak" pointer in that case. - if (tso->why_blocked != BlockedOnBlackHole) { - tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); - } - // scavange current transaction record - tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec); + evacuate((StgClosure **)&tso->trec); // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + + if (gct->failed_to_evac) { + tso->dirty = 1; + scavenge_TSO_link(tso); + } else { + tso->dirty = 0; + scavenge_TSO_link(tso); + if (gct->failed_to_evac) { + tso->flags |= TSO_LINK_DIRTY; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } + } + + gct->eager_promotion = saved_eager; +} + +/* ----------------------------------------------------------------------------- + Mutable arrays of pointers + -------------------------------------------------------------------------- */ + +static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) +{ + lnat m; + rtsBool any_failed; + StgPtr p, q; + + any_failed = rtsFalse; + p = (StgPtr)&a->payload[0]; + for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++) + { + q = p + (1 << MUT_ARR_PTRS_CARD_BITS); + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + *mutArrPtrsCard(a,m) = 1; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + + q = (StgPtr)&a->payload[a->ptrs]; + if (p < q) { + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + *mutArrPtrsCard(a,m) = 1; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + + gct->failed_to_evac = any_failed; + return (StgPtr)a + mut_arr_ptrs_sizeW(a); +} + +// scavenge only the marked areas of a MUT_ARR_PTRS +static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) +{ + lnat m; + StgPtr p, q; + rtsBool any_failed; + + any_failed = rtsFalse; + for (m = 0; m < mutArrPtrsCards(a->ptrs); m++) + { + if (*mutArrPtrsCard(a,m) != 0) { + p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS]; + q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS), + (StgPtr)&a->payload[a->ptrs]); + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + } + + gct->failed_to_evac = any_failed; + return (StgPtr)a + mut_arr_ptrs_sizeW(a); } /* ----------------------------------------------------------------------------- @@ -182,7 +214,7 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) small_bitmap: while (size > 0) { if ((bitmap & 1) == 0) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } p++; bitmap = bitmap >> 1; @@ -193,14 +225,14 @@ scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) return p; } -STATIC_INLINE StgPtr +STATIC_INLINE GNUC_ATTR_HOT StgPtr scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) { StgPtr p; StgWord bitmap; StgFunInfoTable *fun_info; - fun_info = get_fun_itbl(fun); + fun_info = get_fun_itbl(UNTAG_CLOSURE(fun)); ASSERT(fun_info->i.type != PAP); p = (StgPtr)payload; @@ -221,7 +253,7 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) small_bitmap: while (size > 0) { if ((bitmap & 1) == 0) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } p++; bitmap = bitmap >> 1; @@ -232,110 +264,216 @@ scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) return p; } -STATIC_INLINE StgPtr +STATIC_INLINE GNUC_ATTR_HOT StgPtr scavenge_PAP (StgPAP *pap) { - pap->fun = evacuate(pap->fun); + evacuate(&pap->fun); return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); } STATIC_INLINE StgPtr scavenge_AP (StgAP *ap) { - ap->fun = evacuate(ap->fun); + evacuate(&ap->fun); return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); } /* ----------------------------------------------------------------------------- - Scavenge a given step until there are no more objects in this step - to scavenge. + Scavenge SRTs + -------------------------------------------------------------------------- */ + +/* Similar to scavenge_large_bitmap(), but we don't write back the + * pointers we get back from evacuate(). + */ +static void +scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) +{ + nat i, b, size; + StgWord bitmap; + StgClosure **p; + + b = 0; + bitmap = large_srt->l.bitmap[b]; + size = (nat)large_srt->l.size; + p = (StgClosure **)large_srt->srt; + for (i = 0; i < size; ) { + if ((bitmap & 1) != 0) { + evacuate(p); + } + i++; + p++; + if (i % BITS_IN(W_) == 0) { + b++; + bitmap = large_srt->l.bitmap[b]; + } else { + bitmap = bitmap >> 1; + } + } +} + +/* evacuate the SRT. If srt_bitmap is zero, then there isn't an + * srt field in the info table. That's ok, because we'll + * never dereference it. + */ +STATIC_INLINE GNUC_ATTR_HOT void +scavenge_srt (StgClosure **srt, nat srt_bitmap) +{ + nat bitmap; + StgClosure **p; + + bitmap = srt_bitmap; + p = srt; + + if (bitmap == (StgHalfWord)(-1)) { + scavenge_large_srt_bitmap( (StgLargeSRT *)srt ); + return; + } + + while (bitmap != 0) { + if ((bitmap & 1) != 0) { +#if defined(__PIC__) && defined(mingw32_TARGET_OS) + // Special-case to handle references to closures hiding out in DLLs, since + // double indirections required to get at those. The code generator knows + // which is which when generating the SRT, so it stores the (indirect) + // reference to the DLL closure in the table by first adding one to it. + // We check for this here, and undo the addition before evacuating it. + // + // If the SRT entry hasn't got bit 0 set, the SRT entry points to a + // closure that's fixed at link-time, and no extra magic is required. + if ( (unsigned long)(*srt) & 0x1 ) { + evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1)); + } else { + evacuate(p); + } +#else + evacuate(p); +#endif + } + p++; + bitmap = bitmap >> 1; + } +} + + +STATIC_INLINE GNUC_ATTR_HOT void +scavenge_thunk_srt(const StgInfoTable *info) +{ + StgThunkInfoTable *thunk_info; + + if (!major_gc) return; + + thunk_info = itbl_to_thunk_itbl(info); + scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); +} + +STATIC_INLINE GNUC_ATTR_HOT void +scavenge_fun_srt(const StgInfoTable *info) +{ + StgFunInfoTable *fun_info; + + if (!major_gc) return; + + fun_info = itbl_to_fun_itbl(info); + scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap); +} + +/* ----------------------------------------------------------------------------- + Scavenge a block from the given scan pointer up to bd->free. evac_gen is set by the caller to be either zero (for a step in a generation < N) or G where G is the generation of the step being scavenged. We sometimes temporarily change evac_gen back to zero if we're - scavenging a mutable object where early promotion isn't such a good + scavenging a mutable object where eager promotion isn't such a good idea. -------------------------------------------------------------------------- */ -void -scavenge(step *stp) +static GNUC_ATTR_HOT void +scavenge_block (bdescr *bd) { StgPtr p, q; StgInfoTable *info; - bdescr *bd; - nat saved_evac_gen = evac_gen; - - p = stp->scan; - bd = stp->scan_bd; + generation *saved_evac_gen; + rtsBool saved_eager_promotion; + gen_workspace *ws; - failed_to_evac = rtsFalse; + debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p", + bd->start, bd->gen_no, bd->u.scan); - /* scavenge phase - standard breadth-first scavenging of the - * evacuated objects - */ + gct->scan_bd = bd; + gct->evac_gen = bd->gen; + saved_evac_gen = gct->evac_gen; + saved_eager_promotion = gct->eager_promotion; + gct->failed_to_evac = rtsFalse; - while (bd != stp->hp_bd || p < stp->hp) { + ws = &gct->gens[bd->gen->no]; - // If we're at the end of this block, move on to the next block - if (bd != stp->hp_bd && p == bd->free) { - bd = bd->link; - p = bd->start; - continue; - } + p = bd->u.scan; + + // we might be evacuating into the very object that we're + // scavenging, so we have to check the real bd->free pointer each + // time around the loop. + while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) { + ASSERT(bd->link == NULL); ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); - ASSERT(thunk_selector_depth == 0); + ASSERT(gct->thunk_selector_depth == 0); q = p; switch (info->type) { - case MVAR: + case MVAR_CLEAN: + case MVAR_DIRTY: { StgMVar *mvar = ((StgMVar *)p); - evac_gen = 0; - mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); - mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); - mvar->value = evacuate((StgClosure *)mvar->value); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable. + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&mvar->head); + evacuate((StgClosure **)&mvar->tail); + evacuate((StgClosure **)&mvar->value); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + mvar->header.info = &stg_MVAR_DIRTY_info; + } else { + mvar->header.info = &stg_MVAR_CLEAN_info; + } p += sizeofW(StgMVar); break; } case FUN_2_0: scavenge_fun_srt(info); - ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); p += sizeofW(StgHeader) + 2; break; case THUNK_2_0: scavenge_thunk_srt(info); - ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]); - ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + evacuate(&((StgThunk *)p)->payload[1]); + evacuate(&((StgThunk *)p)->payload[0]); p += sizeofW(StgThunk) + 2; break; case CONSTR_2_0: - ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); p += sizeofW(StgHeader) + 2; break; case THUNK_1_0: scavenge_thunk_srt(info); - ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + evacuate(&((StgThunk *)p)->payload[0]); p += sizeofW(StgThunk) + 1; break; case FUN_1_0: scavenge_fun_srt(info); case CONSTR_1_0: - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[0]); p += sizeofW(StgHeader) + 1; break; @@ -363,14 +501,14 @@ scavenge(step *stp) case THUNK_1_1: scavenge_thunk_srt(info); - ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + evacuate(&((StgThunk *)p)->payload[0]); p += sizeofW(StgThunk) + 2; break; case FUN_1_1: scavenge_fun_srt(info); case CONSTR_1_1: - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[0]); p += sizeofW(StgHeader) + 2; break; @@ -385,7 +523,7 @@ scavenge(step *stp) scavenge_thunk_srt(info); end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } p += info->layout.payload.nptrs; break; @@ -400,7 +538,7 @@ scavenge(step *stp) end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } p += info->layout.payload.nptrs; break; @@ -408,16 +546,15 @@ scavenge(step *stp) case BCO: { StgBCO *bco = (StgBCO *)p; - bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); - bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); - bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); - bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); + evacuate((StgClosure **)&bco->instrs); + evacuate((StgClosure **)&bco->literals); + evacuate((StgClosure **)&bco->ptrs); p += bco_sizeW(bco); break; } case IND_PERM: - if (stp->gen->no != 0) { + if (bd->gen_no != 0) { #ifdef PROFILING // @LDV profiling // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an @@ -434,30 +571,25 @@ scavenge(step *stp) } // fall through case IND_OLDGEN_PERM: - ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee); + evacuate(&((StgInd *)p)->indirectee); p += sizeofW(StgInd); break; case MUT_VAR_CLEAN: - case MUT_VAR_DIRTY: { - rtsBool saved_eager_promotion = eager_promotion; + case MUT_VAR_DIRTY: + gct->eager_promotion = rtsFalse; + evacuate(&((StgMutVar *)p)->var); + gct->eager_promotion = saved_eager_promotion; - eager_promotion = rtsFalse; - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - eager_promotion = saved_eager_promotion; - - if (failed_to_evac) { + if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; } p += sizeofW(StgMutVar); break; - } case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: case BLACKHOLE: p += BLACKHOLE_sizeW(); break; @@ -465,7 +597,7 @@ scavenge(step *stp) case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; - s->selectee = evacuate(s->selectee); + evacuate(&s->selectee); p += THUNK_SELECTOR_sizeW(); break; } @@ -475,7 +607,7 @@ scavenge(step *stp) { StgAP_STACK *ap = (StgAP_STACK *)p; - ap->fun = evacuate(ap->fun); + evacuate(&ap->fun); scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); p = (StgPtr)ap->payload + ap->size; break; @@ -496,30 +628,23 @@ scavenge(step *stp) case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: - // follow everything { - StgPtr next; - rtsBool saved_eager; + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + gct->eager_promotion = rtsFalse; - // We don't eagerly promote objects pointed to by a mutable - // array, but if we find the array only points to objects in - // the same or an older generation, we mark it "clean" and - // avoid traversing it during minor GCs. - saved_eager = eager_promotion; - eager_promotion = rtsFalse; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } - eager_promotion = saved_eager; + p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); - if (failed_to_evac) { + if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } - failed_to_evac = rtsTrue; // always put it on the mutable list. + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // always put it on the mutable list. break; } @@ -527,17 +652,12 @@ scavenge(step *stp) case MUT_ARR_PTRS_FROZEN0: // follow everything { - StgPtr next; - - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } + p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. - if (failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; } @@ -547,19 +667,7 @@ scavenge(step *stp) case TSO: { StgTSO *tso = (StgTSO *)p; - rtsBool saved_eager = eager_promotion; - - eager_promotion = rtsFalse; - scavengeTSO(tso); - eager_promotion = saved_eager; - - if (failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; - } - - failed_to_evac = rtsTrue; // always on the mutable list + scavengeTSO(tso); p += tso_sizeW(tso); break; } @@ -567,12 +675,12 @@ scavenge(step *stp) case TVAR_WATCH_QUEUE: { StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - evac_gen = 0; - wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure); - wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry); - wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&wq->closure); + evacuate((StgClosure **)&wq->next_queue_entry); + evacuate((StgClosure **)&wq->prev_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTVarWatchQueue); break; } @@ -580,11 +688,11 @@ scavenge(step *stp) case TVAR: { StgTVar *tvar = ((StgTVar *) p); - evac_gen = 0; - tvar->current_value = evacuate((StgClosure*)tvar->current_value); - tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTVar); break; } @@ -592,12 +700,12 @@ scavenge(step *stp) case TREC_HEADER: { StgTRecHeader *trec = ((StgTRecHeader *) p); - evac_gen = 0; - trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); - trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); - trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&trec->enclosing_trec); + evacuate((StgClosure **)&trec->current_chunk); + evacuate((StgClosure **)&trec->invariants_to_check); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTRecHeader); break; } @@ -607,15 +715,15 @@ scavenge(step *stp) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - evac_gen = 0; - tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + gct->evac_gen = 0; + evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { - e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); - e->expected_value = evacuate((StgClosure*)e->expected_value); - e->new_value = evacuate((StgClosure*)e->new_value); + evacuate((StgClosure **)&e->tvar); + evacuate((StgClosure **)&e->expected_value); + evacuate((StgClosure **)&e->new_value); } - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTRecChunk); break; } @@ -623,11 +731,11 @@ scavenge(step *stp) case ATOMIC_INVARIANT: { StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - evac_gen = 0; - invariant->code = (StgClosure *)evacuate(invariant->code); - invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate(&invariant->code); + evacuate((StgClosure **)&invariant->last_execution); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgAtomicInvariant); break; } @@ -635,12 +743,12 @@ scavenge(step *stp) case INVARIANT_CHECK_QUEUE: { StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - evac_gen = 0; - queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant); - queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution); - queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&queue->invariant); + evacuate((StgClosure **)&queue->my_execution); + evacuate((StgClosure **)&queue->next_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgInvariantCheckQueue); break; } @@ -657,18 +765,34 @@ scavenge(step *stp) * Case (b) arises if we didn't manage to promote everything that * the current object points to into the current generation. */ - if (failed_to_evac) { - failed_to_evac = rtsFalse; - if (stp->gen_no > 0) { - recordMutableGen((StgClosure *)q, stp->gen); + if (gct->failed_to_evac) { + gct->failed_to_evac = rtsFalse; + if (bd->gen_no > 0) { + recordMutableGen_GC((StgClosure *)q, bd->gen_no); } } } - stp->scan_bd = bd; - stp->scan = p; -} + if (p > bd->free) { + gct->copied += ws->todo_free - bd->free; + bd->free = p; + } + + debugTrace(DEBUG_gc, " scavenged %ld bytes", + (unsigned long)((bd->free - bd->u.scan) * sizeof(W_))); + + // update stats: this is a block that has been scavenged + gct->scanned += bd->free - bd->u.scan; + bd->u.scan = bd->free; + + if (bd != ws->todo_bd) { + // we're not going to evac any more objects into + // this block, so push it now. + push_scanned_block(bd, ws); + } + gct->scan_bd = NULL; +} /* ----------------------------------------------------------------------------- Scavenge everything on the mark stack. @@ -677,70 +801,76 @@ scavenge(step *stp) doesn't need to advance the pointer on to the next object. -------------------------------------------------------------------------- */ -void +static void scavenge_mark_stack(void) { StgPtr p, q; StgInfoTable *info; - nat saved_evac_gen; + generation *saved_evac_gen; - evac_gen = oldest_gen->no; - saved_evac_gen = evac_gen; + gct->evac_gen = oldest_gen; + saved_evac_gen = gct->evac_gen; -linear_scan: - while (!mark_stack_empty()) { - p = pop_mark_stack(); + while ((p = pop_mark_stack())) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); q = p; - switch (info->type) { + switch (info->type) { - case MVAR: - { - StgMVar *mvar = ((StgMVar *)p); - evac_gen = 0; - mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); - mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); - mvar->value = evacuate((StgClosure *)mvar->value); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable. - break; - } + case MVAR_CLEAN: + case MVAR_DIRTY: + { + rtsBool saved_eager_promotion = gct->eager_promotion; + + StgMVar *mvar = ((StgMVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&mvar->head); + evacuate((StgClosure **)&mvar->tail); + evacuate((StgClosure **)&mvar->value); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + mvar->header.info = &stg_MVAR_DIRTY_info; + } else { + mvar->header.info = &stg_MVAR_CLEAN_info; + } + break; + } case FUN_2_0: scavenge_fun_srt(info); - ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); break; case THUNK_2_0: scavenge_thunk_srt(info); - ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]); - ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + evacuate(&((StgThunk *)p)->payload[1]); + evacuate(&((StgThunk *)p)->payload[0]); break; case CONSTR_2_0: - ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); break; case FUN_1_0: case FUN_1_1: scavenge_fun_srt(info); - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[0]); break; case THUNK_1_0: case THUNK_1_1: scavenge_thunk_srt(info); - ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]); + evacuate(&((StgThunk *)p)->payload[0]); break; case CONSTR_1_0: case CONSTR_1_1: - ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]); + evacuate(&((StgClosure *)p)->payload[0]); break; case FUN_0_1: @@ -768,7 +898,7 @@ linear_scan: scavenge_thunk_srt(info); end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } break; } @@ -782,17 +912,16 @@ linear_scan: end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } break; } case BCO: { StgBCO *bco = (StgBCO *)p; - bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs); - bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals); - bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs); - bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls); + evacuate((StgClosure **)&bco->instrs); + evacuate((StgClosure **)&bco->literals); + evacuate((StgClosure **)&bco->ptrs); break; } @@ -804,19 +933,18 @@ linear_scan: case IND_OLDGEN: case IND_OLDGEN_PERM: - ((StgInd *)p)->indirectee = - evacuate(((StgInd *)p)->indirectee); + evacuate(&((StgInd *)p)->indirectee); break; case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { - rtsBool saved_eager_promotion = eager_promotion; + rtsBool saved_eager_promotion = gct->eager_promotion; - eager_promotion = rtsFalse; - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - eager_promotion = saved_eager_promotion; + gct->eager_promotion = rtsFalse; + evacuate(&((StgMutVar *)p)->var); + gct->eager_promotion = saved_eager_promotion; - if (failed_to_evac) { + if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; @@ -825,8 +953,6 @@ linear_scan: } case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: case BLACKHOLE: case ARR_WORDS: break; @@ -834,7 +960,7 @@ linear_scan: case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; - s->selectee = evacuate(s->selectee); + evacuate(&s->selectee); break; } @@ -843,7 +969,7 @@ linear_scan: { StgAP_STACK *ap = (StgAP_STACK *)p; - ap->fun = evacuate(ap->fun); + evacuate(&ap->fun); scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); break; } @@ -860,28 +986,25 @@ linear_scan: case MUT_ARR_PTRS_DIRTY: // follow everything { - StgPtr next; rtsBool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = eager_promotion; - eager_promotion = rtsFalse; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } - eager_promotion = saved_eager; + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; - if (failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; - } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; - } + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } - failed_to_evac = rtsTrue; // mutable anyhow. + gct->eager_promotion = saved_eager; + gct->failed_to_evac = rtsTrue; // mutable anyhow. break; } @@ -889,16 +1012,13 @@ linear_scan: case MUT_ARR_PTRS_FROZEN0: // follow everything { - StgPtr next, q = p; + StgPtr q = p; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. - if (failed_to_evac) { + if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; @@ -908,43 +1028,30 @@ linear_scan: case TSO: { - StgTSO *tso = (StgTSO *)p; - rtsBool saved_eager = eager_promotion; - - eager_promotion = rtsFalse; - scavengeTSO(tso); - eager_promotion = saved_eager; - - if (failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; - } - - failed_to_evac = rtsTrue; // always on the mutable list + scavengeTSO((StgTSO*)p); break; } case TVAR_WATCH_QUEUE: { StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - evac_gen = 0; - wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure); - wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry); - wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&wq->closure); + evacuate((StgClosure **)&wq->next_queue_entry); + evacuate((StgClosure **)&wq->prev_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case TVAR: { StgTVar *tvar = ((StgTVar *) p); - evac_gen = 0; - tvar->current_value = evacuate((StgClosure*)tvar->current_value); - tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } @@ -953,50 +1060,50 @@ linear_scan: StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - evac_gen = 0; - tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + gct->evac_gen = 0; + evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { - e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); - e->expected_value = evacuate((StgClosure*)e->expected_value); - e->new_value = evacuate((StgClosure*)e->new_value); + evacuate((StgClosure **)&e->tvar); + evacuate((StgClosure **)&e->expected_value); + evacuate((StgClosure **)&e->new_value); } - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case TREC_HEADER: { StgTRecHeader *trec = ((StgTRecHeader *) p); - evac_gen = 0; - trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); - trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); - trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&trec->enclosing_trec); + evacuate((StgClosure **)&trec->current_chunk); + evacuate((StgClosure **)&trec->invariants_to_check); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case ATOMIC_INVARIANT: { StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - evac_gen = 0; - invariant->code = (StgClosure *)evacuate(invariant->code); - invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate(&invariant->code); + evacuate((StgClosure **)&invariant->last_execution); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case INVARIANT_CHECK_QUEUE: { StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - evac_gen = 0; - queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant); - queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution); - queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&queue->invariant); + evacuate((StgClosure **)&queue->my_execution); + evacuate((StgClosure **)&queue->next_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } @@ -1005,55 +1112,13 @@ linear_scan: info->type, p); } - if (failed_to_evac) { - failed_to_evac = rtsFalse; - if (evac_gen > 0) { - recordMutableGen((StgClosure *)q, &generations[evac_gen]); + if (gct->failed_to_evac) { + gct->failed_to_evac = rtsFalse; + if (gct->evac_gen) { + recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no); } } - - // mark the next bit to indicate "scavenged" - mark(q+1, Bdescr(q)); - - } // while (!mark_stack_empty()) - - // start a new linear scan if the mark stack overflowed at some point - if (mark_stack_overflowed && oldgen_scan_bd == NULL) { - debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan"); - mark_stack_overflowed = rtsFalse; - oldgen_scan_bd = oldest_gen->steps[0].old_blocks; - oldgen_scan = oldgen_scan_bd->start; - } - - if (oldgen_scan_bd) { - // push a new thing on the mark stack - loop: - // find a closure that is marked but not scavenged, and start - // from there. - while (oldgen_scan < oldgen_scan_bd->free - && !is_marked(oldgen_scan,oldgen_scan_bd)) { - oldgen_scan++; - } - - if (oldgen_scan < oldgen_scan_bd->free) { - - // already scavenged? - if (is_marked(oldgen_scan+1,oldgen_scan_bd)) { - oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; - goto loop; - } - push_mark_stack(oldgen_scan); - // ToDo: bump the linear scan by the actual size of the object - oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; - goto linear_scan; - } - - oldgen_scan_bd = oldgen_scan_bd->link; - if (oldgen_scan_bd != NULL) { - oldgen_scan = oldgen_scan_bd->start; - goto loop; - } - } + } // while (p = pop_mark_stack()) } /* ----------------------------------------------------------------------------- @@ -1068,7 +1133,7 @@ static rtsBool scavenge_one(StgPtr p) { const StgInfoTable *info; - nat saved_evac_gen = evac_gen; + generation *saved_evac_gen = gct->evac_gen; rtsBool no_luck; ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); @@ -1076,15 +1141,23 @@ scavenge_one(StgPtr p) switch (info->type) { - case MVAR: + case MVAR_CLEAN: + case MVAR_DIRTY: { + rtsBool saved_eager_promotion = gct->eager_promotion; + StgMVar *mvar = ((StgMVar *)p); - evac_gen = 0; - mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head); - mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail); - mvar->value = evacuate((StgClosure *)mvar->value); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable. + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&mvar->head); + evacuate((StgClosure **)&mvar->tail); + evacuate((StgClosure **)&mvar->value); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + mvar->header.info = &stg_MVAR_DIRTY_info; + } else { + mvar->header.info = &stg_MVAR_CLEAN_info; + } break; } @@ -1099,7 +1172,7 @@ scavenge_one(StgPtr p) end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) { - *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); + evacuate((StgClosure **)q); } break; } @@ -1123,7 +1196,7 @@ scavenge_one(StgPtr p) end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs; for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) { - *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); + evacuate((StgClosure **)q); } break; } @@ -1131,13 +1204,13 @@ scavenge_one(StgPtr p) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { StgPtr q = p; - rtsBool saved_eager_promotion = eager_promotion; + rtsBool saved_eager_promotion = gct->eager_promotion; - eager_promotion = rtsFalse; - ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var); - eager_promotion = saved_eager_promotion; + gct->eager_promotion = rtsFalse; + evacuate(&((StgMutVar *)p)->var); + gct->eager_promotion = saved_eager_promotion; - if (failed_to_evac) { + if (gct->failed_to_evac) { ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; } else { ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; @@ -1146,15 +1219,13 @@ scavenge_one(StgPtr p) } case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: case BLACKHOLE: break; case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; - s->selectee = evacuate(s->selectee); + evacuate(&s->selectee); break; } @@ -1162,7 +1233,7 @@ scavenge_one(StgPtr p) { StgAP_STACK *ap = (StgAP_STACK *)p; - ap->fun = evacuate(ap->fun); + evacuate(&ap->fun); scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); p = (StgPtr)ap->payload + ap->size; break; @@ -1183,29 +1254,25 @@ scavenge_one(StgPtr p) case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: { - StgPtr next, q; rtsBool saved_eager; // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = eager_promotion; - eager_promotion = rtsFalse; - q = p; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } - eager_promotion = saved_eager; + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; - if (failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + + if (gct->failed_to_evac) { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } - failed_to_evac = rtsTrue; + gct->eager_promotion = saved_eager; + gct->failed_to_evac = rtsTrue; break; } @@ -1213,74 +1280,56 @@ scavenge_one(StgPtr p) case MUT_ARR_PTRS_FROZEN0: { // follow everything - StgPtr next, q=p; - - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); - } - + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. - if (failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; + if (gct->failed_to_evac) { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; } break; } case TSO: { - StgTSO *tso = (StgTSO *)p; - rtsBool saved_eager = eager_promotion; - - eager_promotion = rtsFalse; - scavengeTSO(tso); - eager_promotion = saved_eager; - - if (failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; - } - - failed_to_evac = rtsTrue; // always on the mutable list + scavengeTSO((StgTSO*)p); break; } case TVAR_WATCH_QUEUE: { StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - evac_gen = 0; - wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure); - wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry); - wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&wq->closure); + evacuate((StgClosure **)&wq->next_queue_entry); + evacuate((StgClosure **)&wq->prev_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case TVAR: { StgTVar *tvar = ((StgTVar *) p); - evac_gen = 0; - tvar->current_value = evacuate((StgClosure*)tvar->current_value); - tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case TREC_HEADER: { StgTRecHeader *trec = ((StgTRecHeader *) p); - evac_gen = 0; - trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec); - trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk); - trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&trec->enclosing_trec); + evacuate((StgClosure **)&trec->current_chunk); + evacuate((StgClosure **)&trec->invariants_to_check); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } @@ -1289,58 +1338,49 @@ scavenge_one(StgPtr p) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - evac_gen = 0; - tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk); + gct->evac_gen = 0; + evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { - e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar); - e->expected_value = evacuate((StgClosure*)e->expected_value); - e->new_value = evacuate((StgClosure*)e->new_value); + evacuate((StgClosure **)&e->tvar); + evacuate((StgClosure **)&e->expected_value); + evacuate((StgClosure **)&e->new_value); } - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case ATOMIC_INVARIANT: { StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - evac_gen = 0; - invariant->code = (StgClosure *)evacuate(invariant->code); - invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate(&invariant->code); + evacuate((StgClosure **)&invariant->last_execution); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } case INVARIANT_CHECK_QUEUE: { StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - evac_gen = 0; - queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant); - queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution); - queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry); - evac_gen = saved_evac_gen; - failed_to_evac = rtsTrue; // mutable + gct->evac_gen = 0; + evacuate((StgClosure **)&queue->invariant); + evacuate((StgClosure **)&queue->my_execution); + evacuate((StgClosure **)&queue->next_queue_entry); + gct->evac_gen = saved_evac_gen; + gct->failed_to_evac = rtsTrue; // mutable break; } + case IND: + // IND can happen, for example, when the interpreter allocates + // a gigantic AP closure (more than one block), which ends up + // on the large-object list and then gets updated. See #3424. case IND_OLDGEN: case IND_OLDGEN_PERM: case IND_STATIC: - { - /* Careful here: a THUNK can be on the mutable list because - * it contains pointers to young gen objects. If such a thunk - * is updated, the IND_OLDGEN will be added to the mutable - * list again, and we'll scavenge it twice. evacuate() - * doesn't check whether the object has already been - * evacuated, so we perform that check here. - */ - StgClosure *q = ((StgInd *)p)->indirectee; - if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) { - break; - } - ((StgInd *)p)->indirectee = evacuate(q); - } + evacuate(&((StgInd *)p)->indirectee); #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) @@ -1348,21 +1388,21 @@ scavenge_one(StgPtr p) * promoted */ { - StgPtr start = gen->steps[0].scan; - bdescr *start_bd = gen->steps[0].scan_bd; + StgPtr start = gen->scan; + bdescr *start_bd = gen->scan_bd; nat size = 0; - scavenge(&gen->steps[0]); - if (start_bd != gen->steps[0].scan_bd) { + scavenge(&gen); + if (start_bd != gen->scan_bd) { size += (P_)BLOCK_ROUND_UP(start) - start; start_bd = start_bd->link; - while (start_bd != gen->steps[0].scan_bd) { + while (start_bd != gen->scan_bd) { size += BLOCK_SIZE_W; start_bd = start_bd->link; } - size += gen->steps[0].scan - - (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan); + size += gen->scan - + (P_)BLOCK_ROUND_DOWN(gen->scan); } else { - size = gen->steps[0].scan - start; + size = gen->scan - start; } debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } @@ -1373,8 +1413,8 @@ scavenge_one(StgPtr p) barf("scavenge_one: strange object %d", (int)(info->type)); } - no_luck = failed_to_evac; - failed_to_evac = rtsFalse; + no_luck = gct->failed_to_evac; + gct->failed_to_evac = rtsFalse; return (no_luck); } @@ -1387,14 +1427,11 @@ scavenge_one(StgPtr p) -------------------------------------------------------------------------- */ void -scavenge_mutable_list(generation *gen) +scavenge_mutable_list(bdescr *bd, generation *gen) { - bdescr *bd; StgPtr p, q; - bd = gen->saved_mut_list; - - evac_gen = gen->no; + gct->evac_gen = gen; for (; bd != NULL; bd = bd->link) { for (q = bd->start; q < bd->free; q++) { p = (StgPtr)*q; @@ -1411,6 +1448,10 @@ scavenge_mutable_list(generation *gen) case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: mutlist_MUTARRS++; break; + case MVAR_CLEAN: + barf("MVAR_CLEAN on mutable list"); + case MVAR_DIRTY: + mutlist_MVARS++; break; default: mutlist_OTHERS++; break; } @@ -1420,24 +1461,48 @@ scavenge_mutable_list(generation *gen) // definitely doesn't point into a young generation. // Clean objects don't need to be scavenged. Some clean // objects (MUT_VAR_CLEAN) are not kept on the mutable - // list at all; others, such as MUT_ARR_PTRS_CLEAN and - // TSO, are always on the mutable list. + // list at all; others, such as TSO + // are always on the mutable list. // switch (get_itbl((StgClosure *)p)->type) { case MUT_ARR_PTRS_CLEAN: - recordMutableGen((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); + continue; + case MUT_ARR_PTRS_DIRTY: + { + rtsBool saved_eager; + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + + scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); + + if (gct->failed_to_evac) { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + + gct->eager_promotion = saved_eager; + gct->failed_to_evac = rtsFalse; + recordMutableGen_GC((StgClosure *)p,gen->no); continue; + } case TSO: { StgTSO *tso = (StgTSO *)p; - if ((tso->flags & TSO_DIRTY) == 0) { - // A clean TSO: we don't have to traverse its - // stack. However, we *do* follow the link field: - // we don't want to have to mark a TSO dirty just - // because we put it on a different queue. - if (tso->why_blocked != BlockedOnBlackHole) { - tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); - } - recordMutableGen((StgClosure *)p,gen); + if (tso->dirty == 0) { + // Should be on the mutable list because its link + // field is dirty. However, in parallel GC we may + // have a thread on multiple mutable lists, so + // this assertion would be invalid: + // ASSERT(tso->flags & TSO_LINK_DIRTY); + + scavenge_TSO_link(tso); + if (gct->failed_to_evac) { + recordMutableGen_GC((StgClosure *)p,gen->no); + gct->failed_to_evac = rtsFalse; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } continue; } } @@ -1448,14 +1513,28 @@ scavenge_mutable_list(generation *gen) if (scavenge_one(p)) { // didn't manage to promote everything, so put the // object back on the list. - recordMutableGen((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); } } } +} + +void +scavenge_capability_mut_lists (Capability *cap) +{ + nat g; - // free the old mut_list - freeChain(gen->saved_mut_list); - gen->saved_mut_list = NULL; + /* Mutable lists from each generation > N + * we want to *scavenge* these roots, not evacuate them: they're not + * going to move in this GC. + * Also do them in reverse generation order, for the usual reason: + * namely to reduce the likelihood of spurious old->new pointers. + */ + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]); + freeChain_sync(cap->saved_mut_lists[g]); + cap->saved_mut_lists[g] = NULL; + } } /* ----------------------------------------------------------------------------- @@ -1466,50 +1545,62 @@ scavenge_mutable_list(generation *gen) remove non-mutable objects from the mutable list at this point. -------------------------------------------------------------------------- */ -void +static void scavenge_static(void) { - StgClosure* p = static_objects; + StgClosure* p; const StgInfoTable *info; + debugTrace(DEBUG_gc, "scavenging static objects"); + /* Always evacuate straight to the oldest generation for static * objects */ - evac_gen = oldest_gen->no; + gct->evac_gen = oldest_gen; /* keep going until we've scavenged all the objects on the linked list... */ - while (p != END_OF_STATIC_LIST) { + while (1) { + + /* get the next static object from the list. Remember, there might + * be more stuff on this list after each evacuation... + * (static_objects is a global) + */ + p = gct->static_objects; + if (p == END_OF_STATIC_LIST) { + break; + } + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl(p); /* - if (info->type==RBH) - info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure + if (info->type==RBH) + info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure */ // make sure the info pointer is into text space /* Take this object *off* the static_objects list, * and put it on the scavenged_static_objects list. */ - static_objects = *STATIC_LINK(info,p); - *STATIC_LINK(info,p) = scavenged_static_objects; - scavenged_static_objects = p; + gct->static_objects = *STATIC_LINK(info,p); + *STATIC_LINK(info,p) = gct->scavenged_static_objects; + gct->scavenged_static_objects = p; switch (info -> type) { case IND_STATIC: { StgInd *ind = (StgInd *)p; - ind->indirectee = evacuate(ind->indirectee); + evacuate(&ind->indirectee); /* might fail to evacuate it, in which case we have to pop it * back on the mutable list of the oldest generation. We * leave it *on* the scavenged_static_objects list, though, * in case we visit this object again. */ - if (failed_to_evac) { - failed_to_evac = rtsFalse; - recordMutableGen((StgClosure *)p,oldest_gen); + if (gct->failed_to_evac) { + gct->failed_to_evac = rtsFalse; + recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; } @@ -1529,7 +1620,7 @@ scavenge_static(void) next = (P_)p->payload + info->layout.payload.ptrs; // evacuate the pointers for (q = (P_)p->payload; q < next; q++) { - *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q); + evacuate((StgClosure **)q); } break; } @@ -1538,13 +1629,7 @@ scavenge_static(void) barf("scavenge_static: strange closure %d", (int)(info->type)); } - ASSERT(failed_to_evac == rtsFalse); - - /* get the next static object from the list. Remember, there might - * be more stuff on this list now that we've done some evacuating! - * (static_objects is a global) - */ - p = static_objects; + ASSERT(gct->failed_to_evac == rtsFalse); } } @@ -1562,7 +1647,7 @@ scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) bitmap = large_bitmap->bitmap[b]; for (i = 0; i < size; ) { if ((bitmap & 1) == 0) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } i++; p++; @@ -1580,7 +1665,7 @@ scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap) { while (size > 0) { if ((bitmap & 1) == 0) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); } p++; bitmap = bitmap >> 1; @@ -1635,14 +1720,35 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // the indirection into an IND_PERM, so that evacuate will // copy the indirection into the old generation instead of // discarding it. - if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) { - ((StgUpdateFrame *)p)->updatee->header.info = - (StgInfoTable *)&stg_IND_PERM_info; - } - ((StgUpdateFrame *)p)->updatee - = evacuate(((StgUpdateFrame *)p)->updatee); - p += sizeofW(StgUpdateFrame); - continue; + // + // Note [upd-black-hole] + // One slight hiccup is that the THUNK_SELECTOR machinery can + // overwrite the updatee with an IND. In parallel GC, this + // could even be happening concurrently, so we can't check for + // the IND. Fortunately if we assume that blackholing is + // happening (either lazy or eager), then we can be sure that + // the updatee is never a THUNK_SELECTOR and we're ok. + // NB. this is a new invariant: blackholing is not optional. + { + nat type; + const StgInfoTable *i; + StgClosure *updatee; + + updatee = ((StgUpdateFrame *)p)->updatee; + i = updatee->header.info; + if (!IS_FORWARDING_PTR(i)) { + type = get_itbl(updatee)->type; + if (type == IND) { + updatee->header.info = &stg_IND_PERM_info; + } else if (type == IND_OLDGEN) { + updatee->header.info = &stg_IND_OLDGEN_PERM_info; + } + } + evacuate(&((StgUpdateFrame *)p)->updatee); + ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0); + p += sizeofW(StgUpdateFrame); + continue; + } // small bitmap (< 32 entries, or 64 on a 64-bit machine) case CATCH_STM_FRAME: @@ -1651,7 +1757,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: - case RET_VEC_SMALL: bitmap = BITMAP_BITS(info->i.layout.bitmap); size = BITMAP_SIZE(info->i.layout.bitmap); // NOTE: the payload starts immediately after the info-ptr, we @@ -1669,7 +1774,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) nat size; p++; - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); bco = (StgBCO *)*p; p++; size = BCO_BITMAP_SIZE(bco); @@ -1680,7 +1785,6 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // large bitmap (> 32 entries, or > 64 on a 64-bit machine) case RET_BIG: - case RET_VEC_BIG: { nat size; @@ -1712,7 +1816,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // follow the ptr words for (size = RET_DYN_PTRS(dyn); size > 0; size--) { - *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p); + evacuate((StgClosure **)p); p++; } continue; @@ -1723,8 +1827,8 @@ scavenge_stack(StgPtr p, StgPtr stack_end) StgRetFun *ret_fun = (StgRetFun *)p; StgFunInfoTable *fun_info; - ret_fun->fun = evacuate(ret_fun->fun); - fun_info = get_fun_itbl(ret_fun->fun); + evacuate(&ret_fun->fun); + fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun)); p = scavenge_arg_block(fun_info, ret_fun->payload); goto follow_srt; } @@ -1744,33 +1848,161 @@ scavenge_stack(StgPtr p, StgPtr stack_end) be zero. --------------------------------------------------------------------------- */ -void -scavenge_large(step *stp) +static void +scavenge_large (gen_workspace *ws) { - bdescr *bd; - StgPtr p; + bdescr *bd; + StgPtr p; - bd = stp->new_large_objects; + gct->evac_gen = ws->gen; - for (; bd != NULL; bd = stp->new_large_objects) { + bd = ws->todo_large_objects; + + for (; bd != NULL; bd = ws->todo_large_objects) { + + // take this object *off* the large objects list and put it on + // the scavenged large objects list. This is so that we can + // treat new_large_objects as a stack and push new objects on + // the front when evacuating. + ws->todo_large_objects = bd->link; + + ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects); + dbl_link_onto(bd, &ws->gen->scavenged_large_objects); + ws->gen->n_scavenged_large_blocks += bd->blocks; + RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects); + + p = bd->start; + if (scavenge_one(p)) { + if (ws->gen->no > 0) { + recordMutableGen_GC((StgClosure *)p, ws->gen->no); + } + } - /* take this object *off* the large objects list and put it on - * the scavenged large objects list. This is so that we can - * treat new_large_objects as a stack and push new objects on - * the front when evacuating. - */ - stp->new_large_objects = bd->link; - dbl_link_onto(bd, &stp->scavenged_large_objects); + // stats + gct->scanned += closure_sizeW((StgClosure*)p); + } +} - // update the block count in this step. - stp->n_scavenged_large_blocks += bd->blocks; +/* ---------------------------------------------------------------------------- + Look for work to do. - p = bd->start; - if (scavenge_one(p)) { - if (stp->gen_no > 0) { - recordMutableGen((StgClosure *)p, stp->gen); - } + We look for the oldest gen that has either a todo block that can + be scanned, or a block of work on the global queue that we can + scan. + + It is important to take work from the *oldest* generation that we + has work available, because that minimizes the likelihood of + evacuating objects into a young generation when they should have + been eagerly promoted. This really does make a difference (the + cacheprof benchmark is one that is affected). + + We also want to scan the todo block if possible before grabbing + work from the global queue, the reason being that we don't want to + steal work from the global queue and starve other threads if there + is other work we can usefully be doing. + ------------------------------------------------------------------------- */ + +static rtsBool +scavenge_find_work (void) +{ + int g; + gen_workspace *ws; + rtsBool did_something, did_anything; + bdescr *bd; + + gct->scav_find_work++; + + did_anything = rtsFalse; + +loop: + did_something = rtsFalse; + for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { + ws = &gct->gens[g]; + + gct->scan_bd = NULL; + + // If we have a scan block with some work to do, + // scavenge everything up to the free pointer. + if (ws->todo_bd->u.scan < ws->todo_free) + { + scavenge_block(ws->todo_bd); + did_something = rtsTrue; + break; + } + + // If we have any large objects to scavenge, do them now. + if (ws->todo_large_objects) { + scavenge_large(ws); + did_something = rtsTrue; + break; + } + + if ((bd = grab_local_todo_block(ws)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; + } } - } + + if (did_something) { + did_anything = rtsTrue; + goto loop; + } + +#if defined(THREADED_RTS) + if (work_stealing) { + // look for work to steal + for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { + if ((bd = steal_todo_block(g)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; + } + } + + if (did_something) { + did_anything = rtsTrue; + goto loop; + } + } +#endif + + // only return when there is no more work to do + + return did_anything; +} + +/* ---------------------------------------------------------------------------- + Scavenge until we can't find anything more to scavenge. + ------------------------------------------------------------------------- */ + +void +scavenge_loop(void) +{ + rtsBool work_to_do; + +loop: + work_to_do = rtsFalse; + + // scavenge static objects + if (major_gc && gct->static_objects != END_OF_STATIC_LIST) { + IF_DEBUG(sanity, checkStaticObjects(gct->static_objects)); + scavenge_static(); + } + + // scavenge objects in compacted generation + if (mark_stack_bd != NULL && !mark_stack_empty()) { + scavenge_mark_stack(); + work_to_do = rtsTrue; + } + + // Order is important here: we want to deal in full blocks as + // much as possible, so go for global work in preference to + // local work. Only if all the global work has been exhausted + // do we start scavenging the fragments of blocks in the local + // workspaces. + if (scavenge_find_work()) goto loop; + + if (work_to_do) goto loop; }