X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;ds=sidebyside;f=rts%2Fsm%2FScav.c;h=9ebd4c5597de06446af7ea1c777cfda07c6a0be7;hb=a2a67cd520b9841114d69a87a423dabcb3b4368e;hp=1640eddabf2af776e7af471f4b99c154835c5028;hpb=5801be78034f83a82527e5b035a540b4cf9741eb;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 1640edd..9ebd4c5 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------------- * - * (c) The GHC Team 1998-2006 + * (c) The GHC Team 1998-2008 * * Generational garbage collector: scavenging functions * @@ -11,19 +11,20 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" -#include "RtsFlags.h" + #include "Storage.h" -#include "MBlock.h" #include "GC.h" +#include "GCThread.h" #include "GCUtils.h" #include "Compact.h" #include "Evac.h" #include "Scav.h" #include "Apply.h" #include "Trace.h" -#include "LdvProfile.h" #include "Sanity.h" +#include "Capability.h" static void scavenge_stack (StgPtr p, StgPtr stack_end); @@ -31,6 +32,173 @@ static void scavenge_large_bitmap (StgPtr p, StgLargeBitmap *large_bitmap, nat size ); +#if defined(THREADED_RTS) && !defined(PARALLEL_GC) +# define evacuate(a) evacuate1(a) +# define recordMutableGen_GC(a,b) recordMutableGen(a,b) +# define scavenge_loop(a) scavenge_loop1(a) +# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g) +# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap) +#endif + +/* ----------------------------------------------------------------------------- + Scavenge a TSO. + -------------------------------------------------------------------------- */ + +STATIC_INLINE void +scavenge_TSO_link (StgTSO *tso) +{ + // We don't always chase the link field: TSOs on the blackhole + // queue are not automatically alive, so the link field is a + // "weak" pointer in that case. + if (tso->why_blocked != BlockedOnBlackHole) { + evacuate((StgClosure **)&tso->_link); + } +} + +static void +scavengeTSO (StgTSO *tso) +{ + rtsBool saved_eager; + + if (tso->what_next == ThreadRelocated) { + // the only way this can happen is if the old TSO was on the + // mutable list. We might have other links to this defunct + // TSO, so we must update its link field. + evacuate((StgClosure**)&tso->_link); + return; + } + + debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); + + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnException + ) { + evacuate(&tso->block_info.closure); + } + evacuate((StgClosure **)&tso->blocked_exceptions); + + // scavange current transaction record + evacuate((StgClosure **)&tso->trec); + + // scavenge this thread's stack + scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + + if (gct->failed_to_evac) { + tso->flags |= TSO_DIRTY; + scavenge_TSO_link(tso); + } else { + tso->flags &= ~TSO_DIRTY; + scavenge_TSO_link(tso); + if (gct->failed_to_evac) { + tso->flags |= TSO_LINK_DIRTY; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } + } + + gct->eager_promotion = saved_eager; +} + +/* ----------------------------------------------------------------------------- + Blocks of function args occur on the stack (at the top) and + in PAPs. + -------------------------------------------------------------------------- */ + +STATIC_INLINE StgPtr +scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) +{ + StgPtr p; + StgWord bitmap; + nat size; + + p = (StgPtr)args; + switch (fun_info->f.fun_type) { + case ARG_GEN: + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); + size = BITMAP_SIZE(fun_info->f.b.bitmap); + goto small_bitmap; + case ARG_GEN_BIG: + size = GET_FUN_LARGE_BITMAP(fun_info)->size; + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); + p += size; + break; + default: + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); + small_bitmap: + while (size > 0) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } + p++; + bitmap = bitmap >> 1; + size--; + } + break; + } + return p; +} + +STATIC_INLINE GNUC_ATTR_HOT StgPtr +scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) +{ + StgPtr p; + StgWord bitmap; + StgFunInfoTable *fun_info; + + fun_info = get_fun_itbl(UNTAG_CLOSURE(fun)); + ASSERT(fun_info->i.type != PAP); + p = (StgPtr)payload; + + switch (fun_info->f.fun_type) { + case ARG_GEN: + bitmap = BITMAP_BITS(fun_info->f.b.bitmap); + goto small_bitmap; + case ARG_GEN_BIG: + scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); + p += size; + break; + case ARG_BCO: + scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size); + p += size; + break; + default: + bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); + small_bitmap: + while (size > 0) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } + p++; + bitmap = bitmap >> 1; + size--; + } + break; + } + return p; +} + +STATIC_INLINE GNUC_ATTR_HOT StgPtr +scavenge_PAP (StgPAP *pap) +{ + evacuate(&pap->fun); + return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); +} + +STATIC_INLINE StgPtr +scavenge_AP (StgAP *ap) +{ + evacuate(&ap->fun); + return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); +} + +/* ----------------------------------------------------------------------------- + Scavenge SRTs + -------------------------------------------------------------------------- */ /* Similar to scavenge_large_bitmap(), but we don't write back the * pointers we get back from evacuate(). @@ -65,7 +233,7 @@ scavenge_large_srt_bitmap( StgLargeSRT *large_srt ) * srt field in the info table. That's ok, because we'll * never dereference it. */ -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_srt (StgClosure **srt, nat srt_bitmap) { nat bitmap; @@ -105,7 +273,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) } -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_thunk_srt(const StgInfoTable *info) { StgThunkInfoTable *thunk_info; @@ -116,7 +284,7 @@ scavenge_thunk_srt(const StgInfoTable *info) scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap); } -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void scavenge_fun_srt(const StgInfoTable *info) { StgFunInfoTable *fun_info; @@ -128,139 +296,431 @@ scavenge_fun_srt(const StgInfoTable *info) } /* ----------------------------------------------------------------------------- - Scavenge a TSO. + Scavenge a block from the given scan pointer up to bd->free. + + evac_step is set by the caller to be either zero (for a step in a + generation < N) or G where G is the generation of the step being + scavenged. + + We sometimes temporarily change evac_step back to zero if we're + scavenging a mutable object where eager promotion isn't such a good + idea. -------------------------------------------------------------------------- */ -static void -scavengeTSO (StgTSO *tso) +static GNUC_ATTR_HOT void +scavenge_block (bdescr *bd) { - rtsBool saved_eager = gct->eager_promotion; + StgPtr p, q; + StgInfoTable *info; + step *saved_evac_step; + rtsBool saved_eager_promotion; + step_workspace *ws; - gct->eager_promotion = rtsFalse; + debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p", + bd->start, bd->gen_no, bd->step->no, bd->u.scan); - if ( tso->why_blocked == BlockedOnMVar - || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnException - ) { - evacuate(&tso->block_info.closure); - } - evacuate((StgClosure **)&tso->blocked_exceptions); + gct->scan_bd = bd; + gct->evac_step = bd->step; + saved_evac_step = gct->evac_step; + saved_eager_promotion = gct->eager_promotion; + gct->failed_to_evac = rtsFalse; + + ws = &gct->steps[bd->step->abs_no]; + + p = bd->u.scan; + + // we might be evacuating into the very object that we're + // scavenging, so we have to check the real bd->free pointer each + // time around the loop. + while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) { + + ASSERT(bd->link == NULL); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); + info = get_itbl((StgClosure *)p); - // We don't always chase the link field: TSOs on the blackhole - // queue are not automatically alive, so the link field is a - // "weak" pointer in that case. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->link); + ASSERT(gct->thunk_selector_depth == 0); + + q = p; + switch (info->type) { + + case MVAR_CLEAN: + case MVAR_DIRTY: + { + StgMVar *mvar = ((StgMVar *)p); + gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&mvar->head); + evacuate((StgClosure **)&mvar->tail); + evacuate((StgClosure **)&mvar->value); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + mvar->header.info = &stg_MVAR_DIRTY_info; + } else { + mvar->header.info = &stg_MVAR_CLEAN_info; + } + p += sizeofW(StgMVar); + break; } - // scavange current transaction record - evacuate((StgClosure **)&tso->trec); - - // scavenge this thread's stack - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + case FUN_2_0: + scavenge_fun_srt(info); + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_2_0: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[1]); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; + break; + + case CONSTR_2_0: + evacuate(&((StgClosure *)p)->payload[1]); + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_0: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 1; + break; + + case FUN_1_0: + scavenge_fun_srt(info); + case CONSTR_1_0: + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_1: + scavenge_thunk_srt(info); + p += sizeofW(StgThunk) + 1; + break; + + case FUN_0_1: + scavenge_fun_srt(info); + case CONSTR_0_1: + p += sizeofW(StgHeader) + 1; + break; + + case THUNK_0_2: + scavenge_thunk_srt(info); + p += sizeofW(StgThunk) + 2; + break; + + case FUN_0_2: + scavenge_fun_srt(info); + case CONSTR_0_2: + p += sizeofW(StgHeader) + 2; + break; + + case THUNK_1_1: + scavenge_thunk_srt(info); + evacuate(&((StgThunk *)p)->payload[0]); + p += sizeofW(StgThunk) + 2; + break; + + case FUN_1_1: + scavenge_fun_srt(info); + case CONSTR_1_1: + evacuate(&((StgClosure *)p)->payload[0]); + p += sizeofW(StgHeader) + 2; + break; + + case FUN: + scavenge_fun_srt(info); + goto gen_obj; + + case THUNK: + { + StgPtr end; + + scavenge_thunk_srt(info); + end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgThunk *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + break; + } + + gen_obj: + case CONSTR: + case WEAK: + case STABLE_NAME: + { + StgPtr end; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + break; + } + + case BCO: { + StgBCO *bco = (StgBCO *)p; + evacuate((StgClosure **)&bco->instrs); + evacuate((StgClosure **)&bco->literals); + evacuate((StgClosure **)&bco->ptrs); + p += bco_sizeW(bco); + break; + } + + case IND_PERM: + if (bd->gen_no != 0) { +#ifdef PROFILING + // @LDV profiling + // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an + // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. + LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? + // + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); + + // We pretend that p has just been created. + LDV_RECORD_CREATE((StgClosure *)p); + } + // fall through + case IND_OLDGEN_PERM: + evacuate(&((StgInd *)p)->indirectee); + p += sizeofW(StgInd); + break; + + case MUT_VAR_CLEAN: + case MUT_VAR_DIRTY: + gct->eager_promotion = rtsFalse; + evacuate(&((StgMutVar *)p)->var); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info; + } + p += sizeofW(StgMutVar); + break; + + case CAF_BLACKHOLE: + case BLACKHOLE: + p += BLACKHOLE_sizeW(); + break; + + case THUNK_SELECTOR: + { + StgSelector *s = (StgSelector *)p; + evacuate(&s->selectee); + p += THUNK_SELECTOR_sizeW(); + break; + } + + // A chunk of stack saved in a heap object + case AP_STACK: + { + StgAP_STACK *ap = (StgAP_STACK *)p; + + evacuate(&ap->fun); + scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size); + p = (StgPtr)ap->payload + ap->size; + break; + } + + case PAP: + p = scavenge_PAP((StgPAP *)p); + break; + + case AP: + p = scavenge_AP((StgAP *)p); + break; + + case ARR_WORDS: + // nothing to follow + p += arr_words_sizeW((StgArrWords *)p); + break; + + case MUT_ARR_PTRS_CLEAN: + case MUT_ARR_PTRS_DIRTY: + // follow everything + { + StgPtr next; + + // We don't eagerly promote objects pointed to by a mutable + // array, but if we find the array only points to objects in + // the same or an older generation, we mark it "clean" and + // avoid traversing it during minor GCs. + gct->eager_promotion = rtsFalse; + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; + } - if (gct->failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; + gct->failed_to_evac = rtsTrue; // always put it on the mutable list. + break; } - gct->eager_promotion = saved_eager; -} - -/* ----------------------------------------------------------------------------- - Blocks of function args occur on the stack (at the top) and - in PAPs. - -------------------------------------------------------------------------- */ + case MUT_ARR_PTRS_FROZEN: + case MUT_ARR_PTRS_FROZEN0: + // follow everything + { + StgPtr next; -STATIC_INLINE StgPtr -scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args) -{ - StgPtr p; - StgWord bitmap; - nat size; + next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p); + for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) { + evacuate((StgClosure **)p); + } - p = (StgPtr)args; - switch (fun_info->f.fun_type) { - case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->f.b.bitmap); - size = BITMAP_SIZE(fun_info->f.b.bitmap); - goto small_bitmap; - case ARG_GEN_BIG: - size = GET_FUN_LARGE_BITMAP(fun_info)->size; - scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); - p += size; - break; - default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); - size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]); - small_bitmap: - while (size > 0) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - p++; - bitmap = bitmap >> 1; - size--; + // If we're going to put this object on the mutable list, then + // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that. + if (gct->failed_to_evac) { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info; + } else { + ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info; } break; } - return p; -} -STATIC_INLINE StgPtr -scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size) -{ - StgPtr p; - StgWord bitmap; - StgFunInfoTable *fun_info; - - fun_info = get_fun_itbl(UNTAG_CLOSURE(fun)); - ASSERT(fun_info->i.type != PAP); - p = (StgPtr)payload; + case TSO: + { + StgTSO *tso = (StgTSO *)p; + scavengeTSO(tso); + p += tso_sizeW(tso); + break; + } - switch (fun_info->f.fun_type) { - case ARG_GEN: - bitmap = BITMAP_BITS(fun_info->f.b.bitmap); - goto small_bitmap; - case ARG_GEN_BIG: - scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size); - p += size; + case TVAR_WATCH_QUEUE: + { + StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); + gct->evac_step = 0; + evacuate((StgClosure **)&wq->closure); + evacuate((StgClosure **)&wq->next_queue_entry); + evacuate((StgClosure **)&wq->prev_queue_entry); + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTVarWatchQueue); break; - case ARG_BCO: - scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size); - p += size; + } + + case TVAR: + { + StgTVar *tvar = ((StgTVar *) p); + gct->evac_step = 0; + evacuate((StgClosure **)&tvar->current_value); + evacuate((StgClosure **)&tvar->first_watch_queue_entry); + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTVar); break; - default: - bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]); - small_bitmap: - while (size > 0) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - p++; - bitmap = bitmap >> 1; - size--; + } + + case TREC_HEADER: + { + StgTRecHeader *trec = ((StgTRecHeader *) p); + gct->evac_step = 0; + evacuate((StgClosure **)&trec->enclosing_trec); + evacuate((StgClosure **)&trec->current_chunk); + evacuate((StgClosure **)&trec->invariants_to_check); + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTRecHeader); + break; + } + + case TREC_CHUNK: + { + StgWord i; + StgTRecChunk *tc = ((StgTRecChunk *) p); + TRecEntry *e = &(tc -> entries[0]); + gct->evac_step = 0; + evacuate((StgClosure **)&tc->prev_chunk); + for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { + evacuate((StgClosure **)&e->tvar); + evacuate((StgClosure **)&e->expected_value); + evacuate((StgClosure **)&e->new_value); } + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgTRecChunk); break; + } + + case ATOMIC_INVARIANT: + { + StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); + gct->evac_step = 0; + evacuate(&invariant->code); + evacuate((StgClosure **)&invariant->last_execution); + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgAtomicInvariant); + break; + } + + case INVARIANT_CHECK_QUEUE: + { + StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); + gct->evac_step = 0; + evacuate((StgClosure **)&queue->invariant); + evacuate((StgClosure **)&queue->my_execution); + evacuate((StgClosure **)&queue->next_queue_entry); + gct->evac_step = saved_evac_step; + gct->failed_to_evac = rtsTrue; // mutable + p += sizeofW(StgInvariantCheckQueue); + break; + } + + default: + barf("scavenge: unimplemented/strange closure type %d @ %p", + info->type, p); } - return p; -} -STATIC_INLINE StgPtr -scavenge_PAP (StgPAP *pap) -{ - evacuate(&pap->fun); - return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args); -} + /* + * We need to record the current object on the mutable list if + * (a) It is actually mutable, or + * (b) It contains pointers to a younger generation. + * Case (b) arises if we didn't manage to promote everything that + * the current object points to into the current generation. + */ + if (gct->failed_to_evac) { + gct->failed_to_evac = rtsFalse; + if (bd->gen_no > 0) { + recordMutableGen_GC((StgClosure *)q, bd->gen_no); + } + } + } -STATIC_INLINE StgPtr -scavenge_AP (StgAP *ap) -{ - evacuate(&ap->fun); - return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args); -} + if (p > bd->free) { + gct->copied += ws->todo_free - bd->free; + bd->free = p; + } + + debugTrace(DEBUG_gc, " scavenged %ld bytes", + (unsigned long)((bd->free - bd->u.scan) * sizeof(W_))); + // update stats: this is a block that has been scavenged + gct->scanned += bd->free - bd->u.scan; + bd->u.scan = bd->free; + + if (bd != ws->todo_bd) { + // we're not going to evac any more objects into + // this block, so push it now. + push_scanned_block(bd, ws); + } + + gct->scan_bd = NULL; +} /* ----------------------------------------------------------------------------- Scavenge everything on the mark stack. @@ -287,7 +747,7 @@ linear_scan: info = get_itbl((StgClosure *)p); q = p; - switch (((volatile StgWord *)info)[1] & 0xffff) { + switch (info->type) { case MVAR_CLEAN: case MVAR_DIRTY: @@ -423,8 +883,6 @@ linear_scan: } case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: case BLACKHOLE: case ARR_WORDS: break; @@ -507,7 +965,6 @@ linear_scan: case TSO: { scavengeTSO((StgTSO*)p); - gct->failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -594,7 +1051,7 @@ linear_scan: if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; if (gct->evac_step) { - recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen); + recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no); } } @@ -740,8 +1197,6 @@ scavenge_one(StgPtr p) } case CAF_BLACKHOLE: - case SE_CAF_BLACKHOLE: - case SE_BLACKHOLE: case BLACKHOLE: break; @@ -827,7 +1282,6 @@ scavenge_one(StgPtr p) case TSO: { scavengeTSO((StgTSO*)p); - gct->failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -909,20 +1363,7 @@ scavenge_one(StgPtr p) case IND_OLDGEN: case IND_OLDGEN_PERM: case IND_STATIC: - { - /* Careful here: a THUNK can be on the mutable list because - * it contains pointers to young gen objects. If such a thunk - * is updated, the IND_OLDGEN will be added to the mutable - * list again, and we'll scavenge it twice. evacuate() - * doesn't check whether the object has already been - * evacuated, so we perform that check here. - */ - StgClosure *q = ((StgInd *)p)->indirectee; - if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) { - break; - } evacuate(&((StgInd *)p)->indirectee); - } #if 0 && defined(DEBUG) if (RtsFlags.DebugFlags.gc) @@ -969,13 +1410,10 @@ scavenge_one(StgPtr p) -------------------------------------------------------------------------- */ void -scavenge_mutable_list(generation *gen) +scavenge_mutable_list(bdescr *bd, generation *gen) { - bdescr *bd; StgPtr p, q; - bd = gen->saved_mut_list; - gct->evac_step = &gen->steps[0]; for (; bd != NULL; bd = bd->link) { for (q = bd->start; q < bd->free; q++) { @@ -1006,24 +1444,27 @@ scavenge_mutable_list(generation *gen) // definitely doesn't point into a young generation. // Clean objects don't need to be scavenged. Some clean // objects (MUT_VAR_CLEAN) are not kept on the mutable - // list at all; others, such as MUT_ARR_PTRS_CLEAN and - // TSO, are always on the mutable list. + // list at all; others, such as MUT_ARR_PTRS_CLEAN + // are always on the mutable list. // switch (get_itbl((StgClosure *)p)->type) { case MUT_ARR_PTRS_CLEAN: - recordMutableGen_GC((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); continue; case TSO: { StgTSO *tso = (StgTSO *)p; if ((tso->flags & TSO_DIRTY) == 0) { - // A clean TSO: we don't have to traverse its - // stack. However, we *do* follow the link field: - // we don't want to have to mark a TSO dirty just - // because we put it on a different queue. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->link); - } - recordMutableGen_GC((StgClosure *)p,gen); + // Must be on the mutable list because its link + // field is dirty. + ASSERT(tso->flags & TSO_LINK_DIRTY); + + scavenge_TSO_link(tso); + if (gct->failed_to_evac) { + recordMutableGen_GC((StgClosure *)p,gen->no); + gct->failed_to_evac = rtsFalse; + } else { + tso->flags &= ~TSO_LINK_DIRTY; + } continue; } } @@ -1034,14 +1475,28 @@ scavenge_mutable_list(generation *gen) if (scavenge_one(p)) { // didn't manage to promote everything, so put the // object back on the list. - recordMutableGen_GC((StgClosure *)p,gen); + recordMutableGen_GC((StgClosure *)p,gen->no); } } } +} - // free the old mut_list - freeChain_sync(gen->saved_mut_list); - gen->saved_mut_list = NULL; +void +scavenge_capability_mut_lists (Capability *cap) +{ + nat g; + + /* Mutable lists from each generation > N + * we want to *scavenge* these roots, not evacuate them: they're not + * going to move in this GC. + * Also do them in reverse generation order, for the usual reason: + * namely to reduce the likelihood of spurious old->new pointers. + */ + for (g = RtsFlags.GcFlags.generations-1; g > N; g--) { + scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]); + freeChain_sync(cap->saved_mut_lists[g]); + cap->saved_mut_lists[g] = NULL; + } } /* ----------------------------------------------------------------------------- @@ -1058,6 +1513,8 @@ scavenge_static(void) StgClosure* p; const StgInfoTable *info; + debugTrace(DEBUG_gc, "scavenging static objects"); + /* Always evacuate straight to the oldest generation for static * objects */ gct->evac_step = &oldest_gen->steps[0]; @@ -1067,15 +1524,12 @@ scavenge_static(void) while (1) { - ACQUIRE_SPIN_LOCK(&static_objects_sync); - /* get the next static object from the list. Remember, there might * be more stuff on this list after each evacuation... * (static_objects is a global) */ - p = static_objects; + p = gct->static_objects; if (p == END_OF_STATIC_LIST) { - RELEASE_SPIN_LOCK(&static_objects_sync); break; } @@ -1090,11 +1544,9 @@ scavenge_static(void) /* Take this object *off* the static_objects list, * and put it on the scavenged_static_objects list. */ - static_objects = *STATIC_LINK(info,p); - *STATIC_LINK(info,p) = scavenged_static_objects; - scavenged_static_objects = p; - - RELEASE_SPIN_LOCK(&static_objects_sync); + gct->static_objects = *STATIC_LINK(info,p); + *STATIC_LINK(info,p) = gct->scavenged_static_objects; + gct->scavenged_static_objects = p; switch (info -> type) { @@ -1110,7 +1562,7 @@ scavenge_static(void) */ if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; - recordMutableGen_GC((StgClosure *)p,oldest_gen); + recordMutableGen_GC((StgClosure *)p,oldest_gen->no); } break; } @@ -1230,19 +1682,34 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // the indirection into an IND_PERM, so that evacuate will // copy the indirection into the old generation instead of // discarding it. + // + // Note [upd-black-hole] + // One slight hiccup is that the THUNK_SELECTOR machinery can + // overwrite the updatee with an IND. In parallel GC, this + // could even be happening concurrently, so we can't check for + // the IND. Fortunately if we assume that blackholing is + // happening (either lazy or eager), then we can be sure that + // the updatee is never a THUNK_SELECTOR and we're ok. + // NB. this is a new invariant: blackholing is not optional. { nat type; - type = get_itbl(((StgUpdateFrame *)p)->updatee)->type; - if (type == IND) { - ((StgUpdateFrame *)p)->updatee->header.info = - (StgInfoTable *)&stg_IND_PERM_info; - } else if (type == IND_OLDGEN) { - ((StgUpdateFrame *)p)->updatee->header.info = - (StgInfoTable *)&stg_IND_OLDGEN_PERM_info; - } - evacuate(&((StgUpdateFrame *)p)->updatee); - p += sizeofW(StgUpdateFrame); - continue; + const StgInfoTable *i; + StgClosure *updatee; + + updatee = ((StgUpdateFrame *)p)->updatee; + i = updatee->header.info; + if (!IS_FORWARDING_PTR(i)) { + type = get_itbl(updatee)->type; + if (type == IND) { + updatee->header.info = &stg_IND_PERM_info; + } else if (type == IND_OLDGEN) { + updatee->header.info = &stg_IND_OLDGEN_PERM_info; + } + } + evacuate(&((StgUpdateFrame *)p)->updatee); + ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0); + p += sizeofW(StgUpdateFrame); + continue; } // small bitmap (< 32 entries, or 64 on a 64-bit machine) @@ -1349,7 +1816,7 @@ scavenge_large (step_workspace *ws) bdescr *bd; StgPtr p; - gct->evac_step = ws->stp; + gct->evac_step = ws->step; bd = ws->todo_large_objects; @@ -1361,146 +1828,116 @@ scavenge_large (step_workspace *ws) // the front when evacuating. ws->todo_large_objects = bd->link; - ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects); - dbl_link_onto(bd, &ws->stp->scavenged_large_objects); - ws->stp->n_scavenged_large_blocks += bd->blocks; - RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects); + ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects); + dbl_link_onto(bd, &ws->step->scavenged_large_objects); + ws->step->n_scavenged_large_blocks += bd->blocks; + RELEASE_SPIN_LOCK(&ws->step->sync_large_objects); p = bd->start; if (scavenge_one(p)) { - if (ws->stp->gen_no > 0) { - recordMutableGen_GC((StgClosure *)p, ws->stp->gen); + if (ws->step->gen_no > 0) { + recordMutableGen_GC((StgClosure *)p, ws->step->gen_no); } } + + // stats + gct->scanned += closure_sizeW((StgClosure*)p); } } /* ---------------------------------------------------------------------------- - Scavenge a block - ------------------------------------------------------------------------- */ - -#define MINOR_GC -#include "Scav.c-inc" -#undef MINOR_GC -#include "Scav.c-inc" - -/* ---------------------------------------------------------------------------- - Find the oldest full block to scavenge, and scavenge it. + Look for work to do. + + We look for the oldest step that has either a todo block that can + be scanned, or a block of work on the global queue that we can + scan. + + It is important to take work from the *oldest* generation that we + has work available, because that minimizes the likelihood of + evacuating objects into a young generation when they should have + been eagerly promoted. This really does make a difference (the + cacheprof benchmark is one that is affected). + + We also want to scan the todo block if possible before grabbing + work from the global queue, the reason being that we don't want to + steal work from the global queue and starve other threads if there + is other work we can usefully be doing. ------------------------------------------------------------------------- */ static rtsBool -scavenge_find_global_work (void) +scavenge_find_work (void) { - bdescr *bd; - int g, s; - rtsBool flag; + int s; step_workspace *ws; + rtsBool did_something, did_anything; + bdescr *bd; - flag = rtsFalse; - for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { - for (s = generations[g].n_steps-1; s >= 0; s--) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; - - // If we have any large objects to scavenge, do them now. - if (ws->todo_large_objects) { - scavenge_large(ws); - flag = rtsTrue; - } - - if ((bd = grab_todo_block(ws)) != NULL) { - // no need to assign this to ws->scan_bd, we're going - // to scavenge the whole thing and then push it on - // our scavd list. This saves pushing out the - // scan_bd block, which might be partial. - if (N == 0) { - scavenge_block0(bd, bd->start); - } else { - scavenge_block(bd, bd->start); - } - push_scan_block(bd, ws); - return rtsTrue; - } - - if (flag) return rtsTrue; - } - } - return rtsFalse; -} - -/* ---------------------------------------------------------------------------- - Look for local work to do. + gct->scav_find_work++; - We can have outstanding scavenging to do if, for any of the workspaces, + did_anything = rtsFalse; - - the scan block is the same as the todo block, and new objects - have been evacuated to the todo block. +loop: + did_something = rtsFalse; + for (s = total_steps-1; s >= 0; s--) { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + ws = &gct->steps[s]; + + gct->scan_bd = NULL; + + // If we have a scan block with some work to do, + // scavenge everything up to the free pointer. + if (ws->todo_bd->u.scan < ws->todo_free) + { + scavenge_block(ws->todo_bd); + did_something = rtsTrue; + break; + } - - the scan block *was* the same as the todo block, but the todo - block filled up and a new one has been allocated. - ------------------------------------------------------------------------- */ + // If we have any large objects to scavenge, do them now. + if (ws->todo_large_objects) { + scavenge_large(ws); + did_something = rtsTrue; + break; + } -static rtsBool -scavenge_find_local_work (void) -{ - int g, s; - step_workspace *ws; - rtsBool flag; + if ((bd = grab_local_todo_block(ws)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; + } + } - flag = rtsFalse; - for (g = RtsFlags.GcFlags.generations; --g >= 0; ) { - for (s = generations[g].n_steps; --s >= 0; ) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; + if (did_something) { + did_anything = rtsTrue; + goto loop; + } - if (ws->todo_bd != NULL) - { - ws->todo_bd->free = ws->todo_free; +#if defined(THREADED_RTS) + if (work_stealing) { + // look for work to steal + for (s = total_steps-1; s >= 0; s--) { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; } + if ((bd = steal_todo_block(s)) != NULL) { + scavenge_block(bd); + did_something = rtsTrue; + break; + } + } - // If we have a todo block and no scan block, start - // scanning the todo block. - if (ws->scan_bd == NULL && ws->todo_bd != NULL) - { - ws->scan_bd = ws->todo_bd; - ws->scan = ws->scan_bd->start; - } - - // If we have a scan block with some work to do, - // scavenge everything up to the free pointer. - if (ws->scan != NULL && ws->scan < ws->scan_bd->free) - { - if (N == 0) { - scavenge_block0(ws->scan_bd, ws->scan); - } else { - scavenge_block(ws->scan_bd, ws->scan); - } - ws->scan = ws->scan_bd->free; - flag = rtsTrue; - } + if (did_something) { + did_anything = rtsTrue; + goto loop; + } + } +#endif - if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free - && ws->scan_bd != ws->todo_bd) - { - // we're not going to evac any more objects into - // this block, so push it now. - push_scan_block(ws->scan_bd, ws); - ws->scan_bd = NULL; - ws->scan = NULL; - // we might be able to scan the todo block now. But - // don't do it right away: there might be full blocks - // waiting to be scanned as a result of scavenge_block above. - flag = rtsTrue; - } + // only return when there is no more work to do - if (flag) return rtsTrue; - } - } - return rtsFalse; + return did_anything; } /* ---------------------------------------------------------------------------- @@ -1516,8 +1953,8 @@ loop: work_to_do = rtsFalse; // scavenge static objects - if (major_gc && static_objects != END_OF_STATIC_LIST) { - IF_DEBUG(sanity, checkStaticObjects(static_objects)); + if (major_gc && gct->static_objects != END_OF_STATIC_LIST) { + IF_DEBUG(sanity, checkStaticObjects(gct->static_objects)); scavenge_static(); } @@ -1533,44 +1970,8 @@ loop: // local work. Only if all the global work has been exhausted // do we start scavenging the fragments of blocks in the local // workspaces. - if (scavenge_find_global_work()) goto loop; - if (scavenge_find_local_work()) goto loop; + if (scavenge_find_work()) goto loop; if (work_to_do) goto loop; } -rtsBool -any_work (void) -{ - int g, s; - step_workspace *ws; - - write_barrier(); - - // scavenge static objects - if (major_gc && static_objects != END_OF_STATIC_LIST) { - return rtsTrue; - } - - // scavenge objects in compacted generation - if (mark_stack_overflowed || oldgen_scan_bd != NULL || - (mark_stack_bdescr != NULL && !mark_stack_empty())) { - return rtsTrue; - } - - // Check for global work in any step. We don't need to check for - // local work, because we have already exited scavenge_loop(), - // which means there is no local work for this thread. - for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) { - for (s = generations[g].n_steps-1; s >= 0; s--) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; - if (ws->todo_large_objects) return rtsTrue; - if (ws->stp->todos) return rtsTrue; - } - } - - return rtsFalse; -}