X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FEvac.c;h=b7119148c81f35a913275662779d25bd82b15ac6;hb=68bc07fed38228a1f9fd1885333c7412f57c7e17;hp=8d37f2766b45f21c747776a6507c7a2a826ec86f;hpb=1b62aecee4a58f52999cfa53f1c6b7744b29b808;p=ghc-hetmet.git diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 8d37f27..b711914 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -11,17 +11,18 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" -#include "Storage.h" -#include "MBlock.h" + #include "Evac.h" +#include "Storage.h" #include "GC.h" #include "GCThread.h" #include "GCUtils.h" #include "Compact.h" #include "Prelude.h" -#include "LdvProfile.h" #include "Trace.h" +#include "LdvProfile.h" #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC) StgWord64 whitehole_spin = 0; @@ -29,6 +30,7 @@ StgWord64 whitehole_spin = 0; #if defined(THREADED_RTS) && !defined(PARALLEL_GC) #define evacuate(p) evacuate1(p) +#define HEAP_ALLOCED_GC(p) HEAP_ALLOCED(p) #endif #if !defined(PARALLEL_GC) @@ -73,10 +75,10 @@ alloc_for_copy (nat size, step *stp) * necessary. */ to = ws->todo_free; - if (to + size > ws->todo_lim) { + ws->todo_free += size; + if (ws->todo_free > ws->todo_lim) { to = todo_block_full(size, ws); } - ws->todo_free = to + size; ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim); return to; @@ -110,7 +112,8 @@ copy_tag(StgClosure **p, const StgInfoTable *info, #if defined(PARALLEL_GC) { const StgInfoTable *new_info; - new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, (W_)info, MK_FORWARDING_PTR(to)); + new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, + (W_)info, MK_FORWARDING_PTR(to)); if (new_info != info) { return evacuate(p); // does the failed_to_evac stuff } else { @@ -166,46 +169,39 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info, * used to optimise evacuation of BLACKHOLEs. */ static rtsBool -copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) +copyPart(StgClosure **p, const StgInfoTable *info, StgClosure *src, + nat size_to_reserve, nat size_to_copy, step *stp) { StgPtr to, from; nat i; - StgWord info; -#if defined(PARALLEL_GC) -spin: - info = xchg((StgPtr)&src->header.info, (W_)&stg_WHITEHOLE_info); - if (info == (W_)&stg_WHITEHOLE_info) { -#ifdef PROF_SPIN - whitehole_spin++; -#endif - goto spin; - } - if (IS_FORWARDING_PTR(info)) { - src->header.info = (const StgInfoTable *)info; - evacuate(p); // does the failed_to_evac stuff - return rtsFalse; - } -#else - info = (W_)src->header.info; -#endif - to = alloc_for_copy(size_to_reserve, stp); - *p = (StgClosure *)to; TICK_GC_WORDS_COPIED(size_to_copy); from = (StgPtr)src; - to[0] = info; + to[0] = (W_)info; for (i = 1; i < size_to_copy; i++) { // unroll for small i to[i] = from[i]; } #if defined(PARALLEL_GC) - write_barrier(); + { + const StgInfoTable *new_info; + new_info = (const StgInfoTable *)cas((StgPtr)&src->header.info, + (W_)info, MK_FORWARDING_PTR(to)); + if (new_info != info) { + evacuate(p); // does the failed_to_evac stuff + return rtsFalse; + } else { + *p = (StgClosure*)to; + } + } +#else + src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); + *p = (StgClosure*)to; #endif - src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to); - + #ifdef PROFILING // We store the size of the just evacuated object in the LDV word so that // the profiler can guess the position of the next object later. @@ -294,8 +290,10 @@ evacuate_large(StgPtr p) // them straight on the scavenged_large_objects list. if (bd->flags & BF_PINNED) { ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS); - dbl_link_onto(bd, &ws->step->scavenged_large_objects); - ws->step->n_scavenged_large_blocks += bd->blocks; + if (new_stp != stp) { ACQUIRE_SPIN_LOCK(&new_stp->sync_large_objects); } + dbl_link_onto(bd, &new_stp->scavenged_large_objects); + new_stp->n_scavenged_large_blocks += bd->blocks; + if (new_stp != stp) { RELEASE_SPIN_LOCK(&new_stp->sync_large_objects); } } else { bd->link = ws->todo_large_objects; ws->todo_large_objects = bd; @@ -364,7 +362,7 @@ loop: ASSERT(LOOKS_LIKE_CLOSURE_PTR(q)); - if (!HEAP_ALLOCED(q)) { + if (!HEAP_ALLOCED_GC(q)) { if (!major_gc) return; @@ -635,7 +633,7 @@ loop: case CAF_BLACKHOLE: case BLACKHOLE: - copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); + copyPart(p,info,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); return; case THUNK_SELECTOR: @@ -707,7 +705,7 @@ loop: StgPtr r, s; rtsBool mine; - mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso), + mine = copyPart(p,info,(StgClosure *)tso, tso_sizeW(tso), sizeofW(StgTSO), stp); if (mine) { new_tso = (StgTSO *)*p; @@ -780,7 +778,7 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val) // invoke eval_thunk_selector(), the recursive calls will not // evacuate the value (because we want to select on the value, // not evacuate it), so in this case val is in from-space. - // ASSERT(!HEAP_ALLOCED(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED)); + // ASSERT(!HEAP_ALLOCED_GC(val) || Bdescr((P_)val)->gen_no > N || (Bdescr((P_)val)->flags & BF_EVACUATED)); prev = (StgSelector*)((StgClosure *)p)->payload[0]; @@ -834,7 +832,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac) selector_chain: bd = Bdescr((StgPtr)p); - if (HEAP_ALLOCED(p)) { + if (HEAP_ALLOCED_GC(p)) { // If the THUNK_SELECTOR is in to-space or in a generation that we // are not collecting, then bale out early. We won't be able to // save any space in any case, and updating with an indirection is