X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=d01442b34ed8101f31be5bac883d3cf19e33b7e7;hb=4a05e6139d756c0473df7a6dcb257074201f843d;hp=b22f2442a175f8eb2f20f1811c034fafdec7a859;hpb=2aa877f8588da099351ef51efca3605fd87ea768;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index b22f244..d01442b 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------------- * - * (c) The GHC Team 1998-2006 + * (c) The GHC Team 1998-2008 * * Generational garbage collector: scavenging functions * @@ -11,19 +11,22 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" -#include "RtsFlags.h" + #include "Storage.h" -#include "MBlock.h" #include "GC.h" +#include "GCThread.h" #include "GCUtils.h" #include "Compact.h" +#include "MarkStack.h" #include "Evac.h" #include "Scav.h" #include "Apply.h" #include "Trace.h" -#include "LdvProfile.h" #include "Sanity.h" +#include "Capability.h" +#include "LdvProfile.h" static void scavenge_stack (StgPtr p, StgPtr stack_end); @@ -31,6 +34,251 @@ static void scavenge_large_bitmap (StgPtr p, StgLargeBitmap *large_bitmap, nat size ); +#if defined(THREADED_RTS) && !defined(PARALLEL_GC) +# define evacuate(a) evacuate1(a) +# define scavenge_loop(a) scavenge_loop1(a) +# define scavenge_block(a) scavenge_block1(a) +# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g) +# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap) +#endif + +/* ----------------------------------------------------------------------------- + Scavenge a TSO. + -------------------------------------------------------------------------- */ + +static void +scavengeTSO (StgTSO *tso) +{ + rtsBool saved_eager; + + if (tso->what_next == ThreadRelocated) { + // the only way this can happen is if the old TSO was on the + // mutable list. We might have other links to this defunct + // TSO, so we must update its link field. + evacuate((StgClosure**)&tso->_link); + return; + } + + debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); + + // update the pointer from the Task. + if (tso->bound != NULL) { + tso->bound->tso = tso; + } + + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + + + evacuate((StgClosure **)&tso->blocked_exceptions); + evacuate((StgClosure **)&tso->bq); + + // scavange current transaction record + evacuate((StgClosure **)&tso->trec); + + // scavenge this thread's stack + scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + + tso->dirty = gct->failed_to_evac; + + evacuate((StgClosure **)&tso->_link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked + ) { + evacuate(&tso->block_info.closure); + } +#ifdef THREADED_RTS + // in the THREADED_RTS, block_info.closure must always point to a + // valid closure, because we assume this in throwTo(). In the + // non-threaded RTS it might be a FD (for + // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay) + else { + tso->block_info.closure = (StgClosure *)END_TSO_QUEUE; + } +#endif + + if (tso->dirty == 0 && gct->failed_to_evac) { + tso->flags |= TSO_LINK_DIRTY; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } + + gct->eager_promotion = saved_eager; +} + +/* ----------------------------------------------------------------------------- + Mutable arrays of pointers + -------------------------------------------------------------------------- */ + +static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a) +{ + lnat m; + rtsBool any_failed; + StgPtr p, q; + + any_failed = rtsFalse; + p = (StgPtr)&a->payload[0]; + for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++) + { + q = p + (1 << MUT_ARR_PTRS_CARD_BITS); + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + *mutArrPtrsCard(a,m) = 1; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + + q = (StgPtr)&a->payload[a->ptrs]; + if (p < q) { + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + *mutArrPtrsCard(a,m) = 1; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + + gct->failed_to_evac = any_failed; + return (StgPtr)a + mut_arr_ptrs_sizeW(a); +} + +// scavenge only the marked areas of a MUT_ARR_PTRS +static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a) +{ + lnat m; + StgPtr p, q; + rtsBool any_failed; + + any_failed = rtsFalse; + for (m = 0; m < mutArrPtrsCards(a->ptrs); m++) + { + if (*mutArrPtrsCard(a,m) != 0) { + p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS]; + q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS), + (StgPtr)&a->payload[a->ptrs]); + for (; p < q; p++) { + evacuate((StgClosure**)p); + } + if (gct->failed_to_evac) { + any_failed = rtsTrue; + gct->failed_to_evac = rtsFalse; + } else { + *mutArrPtrsCard(a,m) = 0; + } + } + } + + gct->failed_to_evac = any_failed; + return (StgPtr)a + mut_arr_ptrs_sizeW(a); +} + +/* ----------------------------------------------------------------------------- + Blocks of function args occur on the stack (at the top) and + in PAPs. + -------------------------------------------------------------------------- */ + +STATIC_INLINE StgPtr +scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) +{ + StgPtr p; + StgWord bitmap; + nat size; + + p = (StgPtr)args; + switch (fun_info->f.fun_type) { + case ARG_GEN: + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); + size = BITMAP_SIZE(fun_info->f.b.bitmap); + goto small_bitmap; + case ARG_GEN_BIG: + size = GET_FUN_LARGE_BITMAP(fun_info)->size; + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); + p += size; + break; + default: + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); + small_bitmap: + while (size > 0) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } + p++; + bitmap = bitmap >> 1; + size--; + } + break; + } + return p; +} + +STATIC_INLINE GNUC_ATTR_HOT StgPtr +scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) +{ + StgPtr p; + StgWord bitmap; + StgFunInfoTable *fun_info; + + fun_info = get_fun_itbl(UNTAG_CLOSURE(fun)); + ASSERT(fun_info->i.type != PAP); + p = (StgPtr)payload; + + switch (fun_info->f.fun_type) { + case ARG_GEN: + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); + goto small_bitmap; + case ARG_GEN_BIG: + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); + p += size; + break; + case ARG_BCO: + scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size); + p += size; + break; + default: + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + small_bitmap: + while (size > 0) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } + p++; + bitmap = bitmap >> 1; + size--; + } + break; + } + return p; +} + +STATIC_INLINE GNUC_ATTR_HOT StgPtr +scavenge_PAP (StgPAP *pap) +{ + evacuate(&pap->fun); + return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); +} + +STATIC_INLINE StgPtr +scavenge_AP (StgAP *ap) +{ + evacuate(&ap->fun); + return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); +} + +/* ----------------------------------------------------------------------------- + Scavenge SRTs + -------------------------------------------------------------------------- */ /* Similar to scavenge_large_bitmap(), but we don't write back the * pointers we get back from evacuate(). @@ -65,7 +313,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) * srt field in the info table. That's ok, because we'll * never dereference it. */ -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_srt (StgClosure **srt, nat srt_bitmap) { nat bitmap; @@ -81,7 +329,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) while (bitmap != 0) { if ((bitmap & 1) != 0) { -#if defined(__PIC__) && defined(mingw32_TARGET_OS) +#if defined(__PIC__) && defined(mingw32_HOST_OS) // Special-case to handle references to closures hiding out in DLLs, since // double indirections required to get at those. The code generator knows // which is which when generating the SRT, so it stores the (indirect) @@ -91,7 +339,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) // If the SRT entry hasn't got bit 0 set, the SRT entry points to a // closure that's fixed at link-time, and no extra magic is required. if ( (unsigned long)(*srt) & 0x1 ) { - evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1))); + evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1)); } else { evacuate(p); } @@ -105,7 +353,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) } -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_thunk_srt(const StgInfoTable *info) { StgThunkInfoTable *thunk_info; @@ -116,7 +364,7 @@ scavenge_thunk_srt(const StgInfoTable *info) scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); } -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_fun_srt(const StgInfoTable *info) { StgFunInfoTable *fun_info; @@ -128,148 +376,372 @@ scavenge_fun_srt(const StgInfoTable *info) } /* ----------------------------------------------------------------------------- - Scavenge a TSO. + Scavenge a block from the given scan pointer up to bd->free. + + evac_gen is set by the caller to be either zero (for a step in a + generation < N) or G where G is the generation of the step being + scavenged. + + We sometimes temporarily change evac_gen back to zero if we're + scavenging a mutable object where eager promotion isn't such a good + idea. -------------------------------------------------------------------------- */ -static void -scavengeTSO (StgTSO *tso) +static GNUC_ATTR_HOT void +scavenge_block (bdescr *bd) { - rtsBool saved_eager; + StgPtr p, q; + StgInfoTable *info; + rtsBool saved_eager_promotion; + gen_workspace *ws; - if (tso->what_next == ThreadRelocated) { - // the only way this can happen is if the old TSO was on the - // mutable list. We might have other links to this defunct - // TSO, so we must update its link field. - evacuate((StgClosure**)&tso->_link); - return; + debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p", + bd->start, bd->gen_no, bd->u.scan); + + gct->scan_bd = bd; + gct->evac_gen = bd->gen; + saved_eager_promotion = gct->eager_promotion; + gct->failed_to_evac = rtsFalse; + + ws = &gct->gens[bd->gen->no]; + + p = bd->u.scan; + + // we might be evacuating into the very object that we're + // scavenging, so we have to check the real bd->free pointer each + // time around the loop. + while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) { + + ASSERT(bd->link == NULL); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); + info = get_itbl((StgClosure *)p); + + ASSERT(gct->thunk_selector_depth == 0); + + q = p; + switch (info->type) { + + case MVAR_CLEAN: + case MVAR_DIRTY: + { + StgMVar *mvar = ((StgMVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&mvar->head); + evacuate((StgClosure **)&mvar->tail); + evacuate((StgClosure **)&mvar->value); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + mvar->header.info = &stg_MVAR_DIRTY_info; + } else { + mvar->header.info = &stg_MVAR_CLEAN_info; + } + p += sizeofW(StgMVar); + break; } - saved_eager = gct->eager_promotion; - gct->eager_promotion = rtsFalse; + case FUN_2_0: + scavenge_fun_srt(info); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; - if ( tso->why_blocked == BlockedOnMVar - || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnException - ) { - evacuate(&tso->block_info.closure); + case THUNK_2_0: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[1]); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; + break; + + case CONSTR_2_0: + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_0: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 1; + break; + + case FUN_1_0: + scavenge_fun_srt(info); + case CONSTR_1_0: + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_1: + scavenge_thunk_srt(info); + p += sizeofW(StgThunk) + 1; + break; + + case FUN_0_1: + scavenge_fun_srt(info); + case CONSTR_0_1: + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_2: + scavenge_thunk_srt(info); + p += sizeofW(StgThunk) + 2; + break; + + case FUN_0_2: + scavenge_fun_srt(info); + case CONSTR_0_2: + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_1: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; + break; + + case FUN_1_1: + scavenge_fun_srt(info); + case CONSTR_1_1: + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case FUN: + scavenge_fun_srt(info); + goto gen_obj; + + case THUNK: + { + StgPtr end; + + scavenge_thunk_srt(info); + end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + break; } - evacuate((StgClosure **)&tso->blocked_exceptions); - - // We don't always chase the link field: TSOs on the blackhole - // queue are not automatically alive, so the link field is a - // "weak" pointer in that case. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->link); + + gen_obj: + case CONSTR: + case WEAK: + case PRIM: + { + StgPtr end; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + break; } - // scavange current transaction record - evacuate((StgClosure **)&tso->trec); - - // scavenge this thread's stack - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + case BCO: { + StgBCO *bco = (StgBCO *)p; + evacuate((StgClosure **)&bco->instrs); + evacuate((StgClosure **)&bco->literals); + evacuate((StgClosure **)&bco->ptrs); + p += bco_sizeW(bco); + break; + } - if (gct->failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; + case IND_PERM: + case BLACKHOLE: + evacuate(&((StgInd *)p)->indirectee); + p += sizeofW(StgInd); + break; + + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: + gct->eager_promotion = rtsFalse; + evacuate(&((StgMutVar *)p)->var); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; + } + p += sizeofW(StgMutVar); + break; + + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + p += sizeofW(StgBlockingQueue); + break; } - gct->eager_promotion = saved_eager; -} + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + evacuate(&s->selectee); + p += THUNK_SELECTOR_sizeW(); + break; + } -/* ----------------------------------------------------------------------------- - Blocks of function args occur on the stack (at the top) and - in PAPs. - -------------------------------------------------------------------------- */ + // A chunk of stack saved in a heap object + case AP_STACK: + { + StgAP_STACK *ap = (StgAP_STACK *)p; -STATIC_INLINE StgPtr -scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) -{ - StgPtr p; - StgWord bitmap; - nat size; + evacuate(&ap->fun); + scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); + p = (StgPtr)ap->payload + ap->size; + break; + } - p = (StgPtr)args; - switch (fun_info->f.fun_type) { - case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->f.b.bitmap); - size = BITMAP_SIZE(fun_info->f.b.bitmap); - goto small_bitmap; - case ARG_GEN_BIG: - size = GET_FUN_LARGE_BITMAP(fun_info)->size; - scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); - p += size; + case PAP: + p = scavenge_PAP((StgPAP *)p); break; - default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); - size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); - small_bitmap: - while (size > 0) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - p++; - bitmap = bitmap >> 1; - size--; + + case AP: + p = scavenge_AP((StgAP *)p); + break; + + case ARR_WORDS: + // nothing to follow + p += arr_words_sizeW((StgArrWords *)p); + break; + + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: + { + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + gct->eager_promotion = rtsFalse; + + p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } + + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // always put it on the mutable list. break; } - return p; -} -STATIC_INLINE StgPtr -scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) -{ - StgPtr p; - StgWord bitmap; - StgFunInfoTable *fun_info; - - fun_info = get_fun_itbl(UNTAG_CLOSURE(fun)); - ASSERT(fun_info->i.type != PAP); - p = (StgPtr)payload; + case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: + // follow everything + { + p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p); - switch (fun_info->f.fun_type) { - case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->f.b.bitmap); - goto small_bitmap; - case ARG_GEN_BIG: - scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); - p += size; + // If we're going to put this object on the mutable list, then + // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; + } break; - case ARG_BCO: - scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size); - p += size; + } + + case TSO: + { + StgTSO *tso = (StgTSO *)p; + scavengeTSO(tso); + p += tso_sizeW(tso); + break; + } + + case MUT_PRIM: + { + StgPtr end; + + gct->eager_promotion = rtsFalse; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // mutable + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + evacuate((StgClosure **)&e->tvar); + evacuate((StgClosure **)&e->expected_value); + evacuate((StgClosure **)&e->new_value); + } + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTRecChunk); break; + } + default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); - small_bitmap: - while (size > 0) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - p++; - bitmap = bitmap >> 1; - size--; + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); + } + + /* + * We need to record the current object on the mutable list if + * (a) It is actually mutable, or + * (b) It contains pointers to a younger generation. + * Case (b) arises if we didn't manage to promote everything that + * the current object points to into the current generation. + */ + if (gct->failed_to_evac) { + gct->failed_to_evac = rtsFalse; + if (bd->gen_no > 0) { + recordMutableGen_GC((StgClosure *)q, bd->gen_no); } - break; } - return p; -} + } -STATIC_INLINE StgPtr -scavenge_PAP (StgPAP *pap) -{ - evacuate(&pap->fun); - return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); -} + if (p > bd->free) { + gct->copied += ws->todo_free - bd->free; + bd->free = p; + } -STATIC_INLINE StgPtr -scavenge_AP (StgAP *ap) -{ - evacuate(&ap->fun); - return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); -} + debugTrace(DEBUG_gc, " scavenged %ld bytes", + (unsigned long)((bd->free - bd->u.scan) * sizeof(W_))); + + // update stats: this is a block that has been scavenged + gct->scanned += bd->free - bd->u.scan; + bd->u.scan = bd->free; + + if (bd != ws->todo_bd) { + // we're not going to evac any more objects into + // this block, so push it now. + push_scanned_block(bd, ws); + } + gct->scan_bd = NULL; +} /* ----------------------------------------------------------------------------- Scavenge everything on the mark stack. @@ -283,26 +755,22 @@ scavenge_mark_stack(void) { StgPtr p, q; StgInfoTable *info; - step *saved_evac_step; + rtsBool saved_eager_promotion; - gct->evac_step = &oldest_gen->steps[0]; - saved_evac_step = gct->evac_step; + gct->evac_gen = oldest_gen; + saved_eager_promotion = gct->eager_promotion; -linear_scan: - while (!mark_stack_empty()) { - p = pop_mark_stack(); + while ((p = pop_mark_stack())) { ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); q = p; - switch (((volatile StgWord *)info)[1] & 0xffff) { + switch (info->type) { case MVAR_CLEAN: case MVAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - StgMVar *mvar = ((StgMVar *)p); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&mvar->head); @@ -385,7 +853,7 @@ linear_scan: gen_obj: case CONSTR: case WEAK: - case STABLE_NAME: + case PRIM: { StgPtr end; @@ -410,15 +878,13 @@ linear_scan: // no "old" generation. break; - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case IND: + case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); break; case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - gct->eager_promotion = rtsFalse; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -431,10 +897,25 @@ linear_scan: break; } - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case ARR_WORDS: break; @@ -467,27 +948,21 @@ linear_scan: case MUT_ARR_PTRS_DIRTY: // follow everything { - StgPtr next; - rtsBool saved_eager; - // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - evacuate((StgClosure **)p); - } - gct->eager_promotion = saved_eager; - if (gct->failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; - } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; - } + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable anyhow. break; } @@ -496,12 +971,9 @@ linear_scan: case MUT_ARR_PTRS_FROZEN0: // follow everything { - StgPtr next, q = p; + StgPtr q = p; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - evacuate((StgClosure **)p); - } + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. @@ -516,85 +988,42 @@ linear_scan: case TSO: { scavengeTSO((StgTSO*)p); - gct->failed_to_evac = rtsTrue; // always on the mutable list break; } - case TVAR_WATCH_QUEUE: - { - StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&wq->closure); - evacuate((StgClosure **)&wq->next_queue_entry); - evacuate((StgClosure **)&wq->prev_queue_entry); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&tvar->current_value); - evacuate((StgClosure **)&tvar->first_watch_queue_entry); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - + case MUT_PRIM: + { + StgPtr end; + + gct->eager_promotion = rtsFalse; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // mutable + break; + } + case TREC_CHUNK: { StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->evac_step = 0; + gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&trec->enclosing_trec); - evacuate((StgClosure **)&trec->current_chunk); - evacuate((StgClosure **)&trec->invariants_to_check); - gct->evac_step = saved_evac_step; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } - case ATOMIC_INVARIANT: - { - StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - gct->evac_step = 0; - evacuate(&invariant->code); - evacuate((StgClosure **)&invariant->last_execution); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case INVARIANT_CHECK_QUEUE: - { - StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&queue->invariant); - evacuate((StgClosure **)&queue->my_execution); - evacuate((StgClosure **)&queue->next_queue_entry); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", info->type, p); @@ -602,53 +1031,11 @@ linear_scan: if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; - if (gct->evac_step) { - recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen); - } - } - - // mark the next bit to indicate "scavenged" - mark(q+1, Bdescr(q)); - - } // while (!mark_stack_empty()) - - // start a new linear scan if the mark stack overflowed at some point - if (mark_stack_overflowed && oldgen_scan_bd == NULL) { - debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan"); - mark_stack_overflowed = rtsFalse; - oldgen_scan_bd = oldest_gen->steps[0].old_blocks; - oldgen_scan = oldgen_scan_bd->start; - } - - if (oldgen_scan_bd) { - // push a new thing on the mark stack - loop: - // find a closure that is marked but not scavenged, and start - // from there. - while (oldgen_scan < oldgen_scan_bd->free - && !is_marked(oldgen_scan,oldgen_scan_bd)) { - oldgen_scan++; - } - - if (oldgen_scan < oldgen_scan_bd->free) { - - // already scavenged? - if (is_marked(oldgen_scan+1,oldgen_scan_bd)) { - oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; - goto loop; + if (gct->evac_gen) { + recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no); } - push_mark_stack(oldgen_scan); - // ToDo: bump the linear scan by the actual size of the object - oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE; - goto linear_scan; - } - - oldgen_scan_bd = oldgen_scan_bd->link; - if (oldgen_scan_bd != NULL) { - oldgen_scan = oldgen_scan_bd->start; - goto loop; } - } + } // while (p = pop_mark_stack()) } /* ----------------------------------------------------------------------------- @@ -663,9 +1050,11 @@ static rtsBool scavenge_one(StgPtr p) { const StgInfoTable *info; - step *saved_evac_step = gct->evac_step; rtsBool no_luck; + rtsBool saved_eager_promotion; + saved_eager_promotion = gct->eager_promotion; + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); @@ -674,8 +1063,6 @@ scavenge_one(StgPtr p) case MVAR_CLEAN: case MVAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - StgMVar *mvar = ((StgMVar *)p); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&mvar->head); @@ -720,6 +1107,7 @@ scavenge_one(StgPtr p) case CONSTR_0_2: case CONSTR_2_0: case WEAK: + case PRIM: case IND_PERM: { StgPtr q, end; @@ -734,7 +1122,6 @@ scavenge_one(StgPtr p) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { StgPtr q = p; - rtsBool saved_eager_promotion = gct->eager_promotion; gct->eager_promotion = rtsFalse; evacuate(&((StgMutVar *)p)->var); @@ -748,12 +1135,25 @@ scavenge_one(StgPtr p) break; } - case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: - case BLACKHOLE: - break; - + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; @@ -786,28 +1186,21 @@ scavenge_one(StgPtr p) case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: { - StgPtr next, q; - rtsBool saved_eager; - // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; - q = p; - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - evacuate((StgClosure **)p); - } - gct->eager_promotion = saved_eager; + + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); if (gct->failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; break; } @@ -816,19 +1209,14 @@ scavenge_one(StgPtr p) case MUT_ARR_PTRS_FROZEN0: { // follow everything - StgPtr next, q=p; - - next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); - for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { - evacuate((StgClosure **)p); - } - + scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); + // If we're going to put this object on the mutable list, then // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. if (gct->failed_to_evac) { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; } else { - ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; } break; } @@ -836,102 +1224,50 @@ scavenge_one(StgPtr p) case TSO: { scavengeTSO((StgTSO*)p); - gct->failed_to_evac = rtsTrue; // always on the mutable list break; } - case TVAR_WATCH_QUEUE: - { - StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&wq->closure); - evacuate((StgClosure **)&wq->next_queue_entry); - evacuate((StgClosure **)&wq->prev_queue_entry); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } + case MUT_PRIM: + { + StgPtr end; + + gct->eager_promotion = rtsFalse; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&tvar->current_value); - evacuate((StgClosure **)&tvar->first_watch_queue_entry); - gct->evac_step = saved_evac_step; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; - } - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&trec->enclosing_trec); - evacuate((StgClosure **)&trec->current_chunk); - evacuate((StgClosure **)&trec->invariants_to_check); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } + } case TREC_CHUNK: { StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->evac_step = 0; + gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } - gct->evac_step = saved_evac_step; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } - case ATOMIC_INVARIANT: - { - StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - gct->evac_step = 0; - evacuate(&invariant->code); - evacuate((StgClosure **)&invariant->last_execution); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case INVARIANT_CHECK_QUEUE: - { - StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - gct->evac_step = 0; - evacuate((StgClosure **)&queue->invariant); - evacuate((StgClosure **)&queue->my_execution); - evacuate((StgClosure **)&queue->next_queue_entry); - gct->evac_step = saved_evac_step; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case IND: + // IND can happen, for example, when the interpreter allocates + // a gigantic AP closure (more than one block), which ends up + // on the large-object list and then gets updated. See #3424. + case BLACKHOLE: case IND_STATIC: - { - /* Careful here: a THUNK can be on the mutable list because - * it contains pointers to young gen objects. If such a thunk - * is updated, the IND_OLDGEN will be added to the mutable - * list again, and we'll scavenge it twice. evacuate() - * doesn't check whether the object has already been - * evacuated, so we perform that check here. - */ - StgClosure *q = ((StgInd *)p)->indirectee; - if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) { - break; - } evacuate(&((StgInd *)p)->indirectee); - } #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) @@ -939,21 +1275,21 @@ scavenge_one(StgPtr p) * promoted */ { - StgPtr start = gen->steps[0].scan; - bdescr *start_bd = gen->steps[0].scan_bd; + StgPtr start = gen->scan; + bdescr *start_bd = gen->scan_bd; nat size = 0; - scavenge(&gen->steps[0]); - if (start_bd != gen->steps[0].scan_bd) { + scavenge(&gen); + if (start_bd != gen->scan_bd) { size += (P_)BLOCK_ROUND_UP(start) - start; start_bd = start_bd->link; - while (start_bd != gen->steps[0].scan_bd) { + while (start_bd != gen->scan_bd) { size += BLOCK_SIZE_W; start_bd = start_bd->link; } - size += gen->steps[0].scan - - (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan); + size += gen->scan - + (P_)BLOCK_ROUND_DOWN(gen->scan); } else { - size = gen->steps[0].scan - start; + size = gen->scan - start; } debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_)); } @@ -978,14 +1314,11 @@ scavenge_one(StgPtr p) -------------------------------------------------------------------------- */ void -scavenge_mutable_list(generation *gen) +scavenge_mutable_list(bdescr *bd, generation *gen) { - bdescr *bd; StgPtr p, q; - bd = gen->saved_mut_list; - - gct->evac_step = &gen->steps[0]; + gct->evac_gen = gen; for (; bd != NULL; bd = bd->link) { for (q = bd->start; q < bd->free; q++) { p = (StgPtr)*q; @@ -994,7 +1327,7 @@ scavenge_mutable_list(generation *gen) #ifdef DEBUG switch (get_itbl((StgClosure *)p)->type) { case MUT_VAR_CLEAN: - barf("MUT_VAR_CLEAN on mutable list"); + // can happen due to concurrent writeMutVars case MUT_VAR_DIRTY: mutlist_MUTVARS++; break; case MUT_ARR_PTRS_CLEAN: @@ -1015,24 +1348,55 @@ scavenge_mutable_list(generation *gen) // definitely doesn't point into a young generation. // Clean objects don't need to be scavenged. Some clean // objects (MUT_VAR_CLEAN) are not kept on the mutable - // list at all; others, such as MUT_ARR_PTRS_CLEAN and - // TSO, are always on the mutable list. + // list at all; others, such as TSO + // are always on the mutable list. // switch (get_itbl((StgClosure *)p)->type) { case MUT_ARR_PTRS_CLEAN: - recordMutableGen_GC((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); continue; + case MUT_ARR_PTRS_DIRTY: + { + rtsBool saved_eager_promotion; + saved_eager_promotion = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + + scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); + + if (gct->failed_to_evac) { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } + + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsFalse; + recordMutableGen_GC((StgClosure *)p,gen->no); + continue; + } case TSO: { StgTSO *tso = (StgTSO *)p; - if ((tso->flags & TSO_DIRTY) == 0) { - // A clean TSO: we don't have to traverse its - // stack. However, we *do* follow the link field: - // we don't want to have to mark a TSO dirty just - // because we put it on a different queue. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->link); - } - recordMutableGen_GC((StgClosure *)p,gen); + if (tso->dirty == 0) { + // Should be on the mutable list because its link + // field is dirty. However, in parallel GC we may + // have a thread on multiple mutable lists, so + // this assertion would be invalid: + // ASSERT(tso->flags & TSO_LINK_DIRTY); + + evacuate((StgClosure **)&tso->_link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked + ) { + evacuate((StgClosure **)&tso->block_info.prev); + } + if (gct->failed_to_evac) { + recordMutableGen_GC((StgClosure *)p,gen->no); + gct->failed_to_evac = rtsFalse; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } continue; } } @@ -1043,14 +1407,28 @@ scavenge_mutable_list(generation *gen) if (scavenge_one(p)) { // didn't manage to promote everything, so put the // object back on the list. - recordMutableGen_GC((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); } } } +} - // free the old mut_list - freeChain_sync(gen->saved_mut_list); - gen->saved_mut_list = NULL; +void +scavenge_capability_mut_lists (Capability *cap) +{ + nat g; + + /* Mutable lists from each generation > N + * we want to *scavenge* these roots, not evacuate them: they're not + * going to move in this GC. + * Also do them in reverse generation order, for the usual reason: + * namely to reduce the likelihood of spurious old->new pointers. + */ + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]); + freeChain_sync(cap->saved_mut_lists[g]); + cap->saved_mut_lists[g] = NULL; + } } /* ----------------------------------------------------------------------------- @@ -1067,24 +1445,23 @@ scavenge_static(void) StgClosure* p; const StgInfoTable *info; + debugTrace(DEBUG_gc, "scavenging static objects"); + /* Always evacuate straight to the oldest generation for static * objects */ - gct->evac_step = &oldest_gen->steps[0]; + gct->evac_gen = oldest_gen; /* keep going until we've scavenged all the objects on the linked list... */ while (1) { - ACQUIRE_SPIN_LOCK(&static_objects_sync); - /* get the next static object from the list. Remember, there might * be more stuff on this list after each evacuation... * (static_objects is a global) */ - p = static_objects; + p = gct->static_objects; if (p == END_OF_STATIC_LIST) { - RELEASE_SPIN_LOCK(&static_objects_sync); break; } @@ -1099,11 +1476,9 @@ scavenge_static(void) /* Take this object *off* the static_objects list, * and put it on the scavenged_static_objects list. */ - static_objects = *STATIC_LINK(info,p); - *STATIC_LINK(info,p) = scavenged_static_objects; - scavenged_static_objects = p; - - RELEASE_SPIN_LOCK(&static_objects_sync); + gct->static_objects = *STATIC_LINK(info,p); + *STATIC_LINK(info,p) = gct->scavenged_static_objects; + gct->scavenged_static_objects = p; switch (info -> type) { @@ -1119,7 +1494,7 @@ scavenge_static(void) */ if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; - recordMutableGen_GC((StgClosure *)p,oldest_gen); + recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; } @@ -1159,23 +1534,21 @@ scavenge_static(void) static void scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) { - nat i, b; + nat i, j, b; StgWord bitmap; b = 0; - bitmap = large_bitmap->bitmap[b]; - for (i = 0; i < size; ) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - i++; - p++; - if (i % BITS_IN(W_) == 0) { - b++; - bitmap = large_bitmap->bitmap[b]; - } else { + + for (i = 0; i < size; b++) { + bitmap = large_bitmap->bitmap[b]; + j = stg_min(size-i, BITS_IN(W_)); + i += j; + for (; j > 0; j--, p++) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } bitmap = bitmap >> 1; - } + } } } @@ -1235,23 +1608,35 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // before GC, but that seems like overkill. // // Scavenging this update frame as normal would be disastrous; - // the updatee would end up pointing to the value. So we turn - // the indirection into an IND_PERM, so that evacuate will - // copy the indirection into the old generation instead of - // discarding it. + // the updatee would end up pointing to the value. So we + // check whether the value after evacuation is a BLACKHOLE, + // and if not, we change the update frame to an stg_enter + // frame that simply returns the value. Hence, blackholing is + // compulsory (otherwise we would have to check for thunks + // too). + // + // Note [upd-black-hole] + // One slight hiccup is that the THUNK_SELECTOR machinery can + // overwrite the updatee with an IND. In parallel GC, this + // could even be happening concurrently, so we can't check for + // the IND. Fortunately if we assume that blackholing is + // happening (either lazy or eager), then we can be sure that + // the updatee is never a THUNK_SELECTOR and we're ok. + // NB. this is a new invariant: blackholing is not optional. { - nat type; - type = get_itbl(((StgUpdateFrame *)p)->updatee)->type; - if (type == IND) { - ((StgUpdateFrame *)p)->updatee->header.info = - (StgInfoTable *)&stg_IND_PERM_info; - } else if (type == IND_OLDGEN) { - ((StgUpdateFrame *)p)->updatee->header.info = - (StgInfoTable *)&stg_IND_OLDGEN_PERM_info; - } - evacuate(&((StgUpdateFrame *)p)->updatee); - p += sizeofW(StgUpdateFrame); - continue; + StgUpdateFrame *frame = (StgUpdateFrame *)p; + StgClosure *v; + + evacuate(&frame->updatee); + v = frame->updatee; + if (GET_CLOSURE_TAG(v) != 0 || + (get_itbl(v)->type != BLACKHOLE)) { + // blackholing is compulsory, see above. + frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info; + } + ASSERT(v->header.info != &stg_TSO_info); + p += sizeofW(StgUpdateFrame); + continue; } // small bitmap (< 32 entries, or 64 on a 64-bit machine) @@ -1346,19 +1731,19 @@ scavenge_stack(StgPtr p, StgPtr stack_end) /*----------------------------------------------------------------------------- scavenge the large object list. - evac_step set by caller; similar games played with evac_step as with + evac_gen set by caller; similar games played with evac_gen as with scavenge() - see comment at the top of scavenge(). Most large - objects are (repeatedly) mutable, so most of the time evac_step will + objects are (repeatedly) mutable, so most of the time evac_gen will be zero. --------------------------------------------------------------------------- */ static void -scavenge_large (step_workspace *ws) +scavenge_large (gen_workspace *ws) { bdescr *bd; StgPtr p; - gct->evac_step = ws->stp; + gct->evac_gen = ws->gen; bd = ws->todo_large_objects; @@ -1370,143 +1755,110 @@ scavenge_large (step_workspace *ws) // the front when evacuating. ws->todo_large_objects = bd->link; - ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects); - dbl_link_onto(bd, &ws->stp->scavenged_large_objects); - ws->stp->n_scavenged_large_blocks += bd->blocks; - RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects); + ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects); + dbl_link_onto(bd, &ws->gen->scavenged_large_objects); + ws->gen->n_scavenged_large_blocks += bd->blocks; + RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects); p = bd->start; if (scavenge_one(p)) { - if (ws->stp->gen_no > 0) { - recordMutableGen_GC((StgClosure *)p, ws->stp->gen); + if (ws->gen->no > 0) { + recordMutableGen_GC((StgClosure *)p, ws->gen->no); } } + + // stats + gct->scanned += closure_sizeW((StgClosure*)p); } } /* ---------------------------------------------------------------------------- - Scavenge a block - ------------------------------------------------------------------------- */ - -#define MINOR_GC -#include "Scav.c-inc" -#undef MINOR_GC -#include "Scav.c-inc" - -/* ---------------------------------------------------------------------------- - Find the oldest full block to scavenge, and scavenge it. + Look for work to do. + + We look for the oldest gen that has either a todo block that can + be scanned, or a block of work on the global queue that we can + scan. + + It is important to take work from the *oldest* generation that we + has work available, because that minimizes the likelihood of + evacuating objects into a young generation when they should have + been eagerly promoted. This really does make a difference (the + cacheprof benchmark is one that is affected). + + We also want to scan the todo block if possible before grabbing + work from the global queue, the reason being that we don't want to + steal work from the global queue and starve other threads if there + is other work we can usefully be doing. ------------------------------------------------------------------------- */ static rtsBool -scavenge_find_global_work (void) +scavenge_find_work (void) { + int g; + gen_workspace *ws; + rtsBool did_something, did_anything; bdescr *bd; - int s; - rtsBool flag; - step_workspace *ws; - flag = rtsFalse; - for (s = total_steps-1; s>=0; s--) - { - if (s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; + gct->scav_find_work++; + + did_anything = rtsFalse; + +loop: + did_something = rtsFalse; + for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { + ws = &gct->gens[g]; + + gct->scan_bd = NULL; + + // If we have a scan block with some work to do, + // scavenge everything up to the free pointer. + if (ws->todo_bd->u.scan < ws->todo_free) + { + scavenge_block(ws->todo_bd); + did_something = rtsTrue; + break; } - ws = &gct->steps[s]; // If we have any large objects to scavenge, do them now. if (ws->todo_large_objects) { scavenge_large(ws); - flag = rtsTrue; + did_something = rtsTrue; + break; } - if ((bd = grab_todo_block(ws)) != NULL) { - // no need to assign this to ws->scan_bd, we're going - // to scavenge the whole thing and then push it on - // our scavd list. This saves pushing out the - // scan_bd block, which might be partial. - if (N == 0) { - scavenge_block0(bd, bd->start); - } else { - scavenge_block(bd, bd->start); - } - push_scan_block(bd, ws); - return rtsTrue; + if ((bd = grab_local_todo_block(ws)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; } - - if (flag) return rtsTrue; } - return rtsFalse; -} - -/* ---------------------------------------------------------------------------- - Look for local work to do. - - We can have outstanding scavenging to do if, for any of the workspaces, - - the scan block is the same as the todo block, and new objects - have been evacuated to the todo block. - - - the scan block *was* the same as the todo block, but the todo - block filled up and a new one has been allocated. - ------------------------------------------------------------------------- */ - -static rtsBool -scavenge_find_local_work (void) -{ - int s; - step_workspace *ws; - rtsBool flag; - - flag = rtsFalse; - for (s = total_steps-1; s >= 0; s--) { - if (s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[s]; + if (did_something) { + did_anything = rtsTrue; + goto loop; + } - if (ws->todo_bd != NULL) - { - ws->todo_bd->free = ws->todo_free; - } - - // If we have a todo block and no scan block, start - // scanning the todo block. - if (ws->scan_bd == NULL && ws->todo_bd != NULL) - { - ws->scan_bd = ws->todo_bd; - ws->scan = ws->scan_bd->start; - } - - // If we have a scan block with some work to do, - // scavenge everything up to the free pointer. - if (ws->scan != NULL && ws->scan < ws->scan_bd->free) - { - if (N == 0) { - scavenge_block0(ws->scan_bd, ws->scan); - } else { - scavenge_block(ws->scan_bd, ws->scan); +#if defined(THREADED_RTS) + if (work_stealing) { + // look for work to steal + for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { + if ((bd = steal_todo_block(g)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; } - ws->scan = ws->scan_bd->free; - flag = rtsTrue; } - - if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free - && ws->scan_bd != ws->todo_bd) - { - // we're not going to evac any more objects into - // this block, so push it now. - push_scan_block(ws->scan_bd, ws); - ws->scan_bd = NULL; - ws->scan = NULL; - // we might be able to scan the todo block now. But - // don't do it right away: there might be full blocks - // waiting to be scanned as a result of scavenge_block above. - flag = rtsTrue; + + if (did_something) { + did_anything = rtsTrue; + goto loop; } - - if (flag) return rtsTrue; } - return rtsFalse; +#endif + + // only return when there is no more work to do + + return did_anything; } /* ---------------------------------------------------------------------------- @@ -1522,14 +1874,13 @@ loop: work_to_do = rtsFalse; // scavenge static objects - if (major_gc && static_objects != END_OF_STATIC_LIST) { - IF_DEBUG(sanity, checkStaticObjects(static_objects)); + if (major_gc && gct->static_objects != END_OF_STATIC_LIST) { + IF_DEBUG(sanity, checkStaticObjects(gct->static_objects)); scavenge_static(); } // scavenge objects in compacted generation - if (mark_stack_overflowed || oldgen_scan_bd != NULL || - (mark_stack_bdescr != NULL && !mark_stack_empty())) { + if (mark_stack_bd != NULL && !mark_stack_empty()) { scavenge_mark_stack(); work_to_do = rtsTrue; } @@ -1539,42 +1890,8 @@ loop: // local work. Only if all the global work has been exhausted // do we start scavenging the fragments of blocks in the local // workspaces. - if (scavenge_find_global_work()) goto loop; - if (scavenge_find_local_work()) goto loop; + if (scavenge_find_work()) goto loop; if (work_to_do) goto loop; } -rtsBool -any_work (void) -{ - int s; - step_workspace *ws; - - write_barrier(); - - // scavenge static objects - if (major_gc && static_objects != END_OF_STATIC_LIST) { - return rtsTrue; - } - - // scavenge objects in compacted generation - if (mark_stack_overflowed || oldgen_scan_bd != NULL || - (mark_stack_bdescr != NULL && !mark_stack_empty())) { - return rtsTrue; - } - - // Check for global work in any step. We don't need to check for - // local work, because we have already exited scavenge_loop(), - // which means there is no local work for this thread. - for (s = total_steps-1; s >= 0; s--) { - if (s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[s]; - if (ws->todo_large_objects) return rtsTrue; - if (ws->stp->todos) return rtsTrue; - } - - return rtsFalse; -}