X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FEvac.c;h=f537e2b9a3c46f559b00fbbdcfcd30fbe97196a5;hb=3e7ebef1f18e2718dd37f47613694de9ebf80ae2;hp=78f0f315d2c44de4893258d7bda2c6251740aeef;hpb=b339c8b1d0f239031802555b454062e9430ec8bb;p=ghc-hetmet.git diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 78f0f31..f537e2b 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -21,6 +21,7 @@ #include "Compact.h" #include "Prelude.h" #include "LdvProfile.h" +#include "Trace.h" #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC) StgWord64 whitehole_spin = 0; @@ -85,7 +86,7 @@ alloc_for_copy (nat size, step *stp) The evacuate() code -------------------------------------------------------------------------- */ -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void copy_tag(StgClosure **p, const StgInfoTable *info, StgClosure *src, nat size, step *stp, StgWord tag) { @@ -164,7 +165,7 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info, * pointer of an object, but reserve some padding after it. This is * used to optimise evacuation of BLACKHOLEs. */ -static void +static rtsBool copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) { StgPtr to, from; @@ -183,7 +184,7 @@ spin: if (IS_FORWARDING_PTR(info)) { src->header.info = (const StgInfoTable *)info; evacuate(p); // does the failed_to_evac stuff - return ; + return rtsFalse; } #else info = (W_)src->header.info; @@ -213,17 +214,88 @@ spin: if (size_to_reserve - size_to_copy > 0) LDV_FILL_SLOP(to + size_to_copy - 1, (int)(size_to_reserve - size_to_copy)); #endif + + return rtsTrue; } /* Copy wrappers that don't tag the closure after copying */ -STATIC_INLINE void +STATIC_INLINE GNUC_ATTR_HOT void copy(StgClosure **p, const StgInfoTable *info, StgClosure *src, nat size, step *stp) { copy_tag(p,info,src,size,stp,0); } +/* ----------------------------------------------------------------------------- + Evacuate a large object + + This just consists of removing the object from the (doubly-linked) + step->large_objects list, and linking it on to the (singly-linked) + step->new_large_objects list, from where it will be scavenged later. + + Convention: bd->flags has BF_EVACUATED set for a large object + that has been evacuated, or unset otherwise. + -------------------------------------------------------------------------- */ + +STATIC_INLINE void +evacuate_large(StgPtr p) +{ + bdescr *bd = Bdescr(p); + step *stp, *new_stp; + step_workspace *ws; + + stp = bd->step; + ACQUIRE_SPIN_LOCK(&stp->sync_large_objects); + + // object must be at the beginning of the block (or be a ByteArray) + ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || + (((W_)p & BLOCK_MASK) == 0)); + + // already evacuated? + if (bd->flags & BF_EVACUATED) { + /* Don't forget to set the gct->failed_to_evac flag if we didn't get + * the desired destination (see comments in evacuate()). + */ + if (stp < gct->evac_step) { + gct->failed_to_evac = rtsTrue; + TICK_GC_FAILED_PROMOTION(); + } + RELEASE_SPIN_LOCK(&stp->sync_large_objects); + return; + } + + // remove from large_object list + if (bd->u.back) { + bd->u.back->link = bd->link; + } else { // first object in the list + stp->large_objects = bd->link; + } + if (bd->link) { + bd->link->u.back = bd->u.back; + } + + /* link it on to the evacuated large object list of the destination step + */ + new_stp = stp->to; + if (new_stp < gct->evac_step) { + if (gct->eager_promotion) { + new_stp = gct->evac_step; + } else { + gct->failed_to_evac = rtsTrue; + } + } + + ws = &gct->steps[new_stp->abs_no]; + bd->flags |= BF_EVACUATED; + bd->step = new_stp; + bd->gen_no = new_stp->gen_no; + bd->link = ws->todo_large_objects; + ws->todo_large_objects = bd; + + RELEASE_SPIN_LOCK(&stp->sync_large_objects); +} + /* ---------------------------------------------------------------------------- Evacuate @@ -266,7 +338,7 @@ copy(StgClosure **p, const StgInfoTable *info, extra reads/writes than we save. ------------------------------------------------------------------------- */ -REGPARM1 void +REGPARM1 GNUC_ATTR_HOT void evacuate(StgClosure **p) { bdescr *bd = NULL; @@ -383,7 +455,7 @@ loop: bd = Bdescr((P_)q); - if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) { + if ((bd->flags & (BF_LARGE | BF_MARKED | BF_EVACUATED)) != 0) { // pointer into to-space: just return it. It might be a pointer // into a generation that we aren't collecting (> N), or it @@ -419,17 +491,16 @@ loop: /* If the object is in a step that we're compacting, then we * need to use an alternative evacuate procedure. */ - if (bd->flags & BF_COMPACTED) { - if (!is_marked((P_)q,bd)) { - mark((P_)q,bd); - if (mark_stack_full()) { - mark_stack_overflowed = rtsTrue; - reset_mark_stack(); - } - push_mark_stack((P_)q); - } - return; + if (!is_marked((P_)q,bd)) { + mark((P_)q,bd); + if (mark_stack_full()) { + debugTrace(DEBUG_gc,"mark stack overflowed"); + mark_stack_overflowed = rtsTrue; + reset_mark_stack(); + } + push_mark_stack((P_)q); } + return; } stp = bd->step->to; @@ -628,14 +699,18 @@ loop: { StgTSO *new_tso; StgPtr r, s; - - copyPart(p,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp); - new_tso = (StgTSO *)*p; - move_TSO(tso, new_tso); - for (r = tso->sp, s = new_tso->sp; - r < tso->stack+tso->stack_size;) { - *s++ = *r++; - } + rtsBool mine; + + mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso), + sizeofW(StgTSO), stp); + if (mine) { + new_tso = (StgTSO *)*p; + move_TSO(tso, new_tso); + for (r = tso->sp, s = new_tso->sp; + r < tso->stack+tso->stack_size;) { + *s++ = *r++; + } + } return; } } @@ -672,75 +747,6 @@ loop: } /* ----------------------------------------------------------------------------- - Evacuate a large object - - This just consists of removing the object from the (doubly-linked) - step->large_objects list, and linking it on to the (singly-linked) - step->new_large_objects list, from where it will be scavenged later. - - Convention: bd->flags has BF_EVACUATED set for a large object - that has been evacuated, or unset otherwise. - -------------------------------------------------------------------------- */ - -STATIC_INLINE void -evacuate_large(StgPtr p) -{ - bdescr *bd = Bdescr(p); - step *stp, *new_stp; - step_workspace *ws; - - stp = bd->step; - ACQUIRE_SPIN_LOCK(&stp->sync_large_objects); - - // object must be at the beginning of the block (or be a ByteArray) - ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || - (((W_)p & BLOCK_MASK) == 0)); - - // already evacuated? - if (bd->flags & BF_EVACUATED) { - /* Don't forget to set the gct->failed_to_evac flag if we didn't get - * the desired destination (see comments in evacuate()). - */ - if (stp < gct->evac_step) { - gct->failed_to_evac = rtsTrue; - TICK_GC_FAILED_PROMOTION(); - } - RELEASE_SPIN_LOCK(&stp->sync_large_objects); - return; - } - - // remove from large_object list - if (bd->u.back) { - bd->u.back->link = bd->link; - } else { // first object in the list - stp->large_objects = bd->link; - } - if (bd->link) { - bd->link->u.back = bd->u.back; - } - - /* link it on to the evacuated large object list of the destination step - */ - new_stp = stp->to; - if (new_stp < gct->evac_step) { - if (gct->eager_promotion) { - new_stp = gct->evac_step; - } else { - gct->failed_to_evac = rtsTrue; - } - } - - ws = &gct->steps[new_stp->abs_no]; - bd->flags |= BF_EVACUATED; - bd->step = new_stp; - bd->gen_no = new_stp->gen_no; - bd->link = ws->todo_large_objects; - ws->todo_large_objects = bd; - - RELEASE_SPIN_LOCK(&stp->sync_large_objects); -} - -/* ----------------------------------------------------------------------------- Evaluate a THUNK_SELECTOR if possible. p points to a THUNK_SELECTOR that we want to evaluate. The @@ -773,14 +779,25 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val) prev = (StgSelector*)((StgClosure *)p)->payload[0]; // Update the THUNK_SELECTOR with an indirection to the - // EVACUATED closure now at p. Why do this rather than - // upd_evacuee(q,p)? Because we have an invariant that an - // EVACUATED closure always points to an object in the - // same or an older generation (required by the short-cut - // test in the EVACUATED case, below). - ((StgInd *)p)->indirectee = val; - write_barrier(); - SET_INFO(p, &stg_IND_info); + // value. The value is still in from-space at this stage. + // + // (old note: Why not do upd_evacuee(q,p)? Because we have an + // invariant that an EVACUATED closure always points to an + // object in the same or an older generation (required by + // the short-cut test in the EVACUATED case, below). + if ((StgClosure *)p == val) { + // must be a loop; just leave a BLACKHOLE in place. This + // can happen when we have a chain of selectors that + // eventually loops back on itself. We can't leave an + // indirection pointing to itself, and we want the program + // to deadlock if it ever enters this closure, so + // BLACKHOLE is correct. + SET_INFO(p, &stg_BLACKHOLE_info); + } else { + ((StgInd *)p)->indirectee = val; + write_barrier(); + SET_INFO(p, &stg_IND_info); + } // For the purposes of LDV profiling, we have created an // indirection. @@ -828,7 +845,7 @@ selector_chain: // (scavenge_mark_stack doesn't deal with IND). BEWARE! This // bit is very tricky to get right. If you make changes // around here, test by compiling stage 3 with +RTS -c -RTS. - if (bd->flags & BF_COMPACTED) { + if (bd->flags & BF_MARKED) { // must call evacuate() to mark this closure if evac==rtsTrue *q = (StgClosure *)p; if (evac) evacuate(q); @@ -957,12 +974,18 @@ selector_loop: prev_thunk_selector = p; *q = val; - if (evac) evacuate(q); - val = *q; + + // update the other selectors in the chain *before* + // evacuating the value. This is necessary in the case + // where the value turns out to be one of the selectors + // in the chain (i.e. we have a loop), and evacuating it + // would corrupt the chain. + unchain_thunk_selectors(prev_thunk_selector, val); + // evacuate() cannot recurse through // eval_thunk_selector(), because we know val is not // a THUNK_SELECTOR. - unchain_thunk_selectors(prev_thunk_selector, val); + if (evac) evacuate(q); return; }