X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2Fsm%2FEvac.c;h=fdb5477a3801d5996222256784f82e9fe9f405ac;hp=6325a12f9ce8f9707b7246abe8012d25d4daf993;hb=1fb38442d3a55ac92795aa6c5ed4df82011df724;hpb=30554cc82c2aeec64b03495048a650a3c0443d3c diff --git a/rts/sm/Evac.c b/rts/sm/Evac.c index 6325a12..fdb5477 100644 --- a/rts/sm/Evac.c +++ b/rts/sm/Evac.c @@ -11,17 +11,20 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" -#include "Storage.h" -#include "MBlock.h" + #include "Evac.h" +#include "Storage.h" #include "GC.h" #include "GCThread.h" +#include "GCTDecl.h" #include "GCUtils.h" #include "Compact.h" +#include "MarkStack.h" #include "Prelude.h" -#include "LdvProfile.h" #include "Trace.h" +#include "LdvProfile.h" #if defined(PROF_SPIN) && defined(THREADED_RTS) && defined(PARALLEL_GC) StgWord64 whitehole_spin = 0; @@ -49,35 +52,34 @@ STATIC_INLINE void evacuate_large(StgPtr p); -------------------------------------------------------------------------- */ STATIC_INLINE StgPtr -alloc_for_copy (nat size, step *stp) +alloc_for_copy (nat size, nat gen_no) { StgPtr to; - step_workspace *ws; + gen_workspace *ws; /* Find out where we're going, using the handy "to" pointer in - * the step of the source object. If it turns out we need to + * the gen of the source object. If it turns out we need to * evacuate to an older generation, adjust it here (see comment * by evacuate()). */ - if (stp < gct->evac_step) { + if (gen_no < gct->evac_gen_no) { if (gct->eager_promotion) { - stp = gct->evac_step; + gen_no = gct->evac_gen_no; } else { gct->failed_to_evac = rtsTrue; } } - ws = &gct->steps[stp->abs_no]; - // this compiles to a single mem access to stp->abs_no only - - /* chain a new block onto the to-space for the destination step if + ws = &gct->gens[gen_no]; // zero memory references here + + /* chain a new block onto the to-space for the destination gen if * necessary. */ to = ws->todo_free; - if (to + size > ws->todo_lim) { + ws->todo_free += size; + if (ws->todo_free > ws->todo_lim) { to = todo_block_full(size, ws); } - ws->todo_free = to + size; ASSERT(ws->todo_free >= ws->todo_bd->free && ws->todo_free <= ws->todo_lim); return to; @@ -89,15 +91,13 @@ alloc_for_copy (nat size, step *stp) STATIC_INLINE GNUC_ATTR_HOT void copy_tag(StgClosure **p, const StgInfoTable *info, - StgClosure *src, nat size, step *stp, StgWord tag) + StgClosure *src, nat size, nat gen_no, StgWord tag) { StgPtr to, from; nat i; - to = alloc_for_copy(size,stp); + to = alloc_for_copy(size,gen_no); - TICK_GC_WORDS_COPIED(size); - from = (StgPtr)src; to[0] = (W_)info; for (i = 1; i < size; i++) { // unroll for small i @@ -133,16 +133,12 @@ copy_tag(StgClosure **p, const StgInfoTable *info, #if defined(PARALLEL_GC) STATIC_INLINE void copy_tag_nolock(StgClosure **p, const StgInfoTable *info, - StgClosure *src, nat size, step *stp, StgWord tag) + StgClosure *src, nat size, nat gen_no, StgWord tag) { StgPtr to, from; nat i; - to = alloc_for_copy(size,stp); - *p = TAG_CLOSURE(tag,(StgClosure*)to); - src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); - - TICK_GC_WORDS_COPIED(size); + to = alloc_for_copy(size,gen_no); from = (StgPtr)src; to[0] = (W_)info; @@ -150,6 +146,12 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info, to[i] = from[i]; } + // if somebody else reads the forwarding pointer, we better make + // sure there's a closure at the end of it. + write_barrier(); + *p = TAG_CLOSURE(tag,(StgClosure*)to); + src->header.info = (const StgInfoTable *)MK_FORWARDING_PTR(to); + // if (to+size+2 < bd->start + BLOCK_SIZE_W) { // __builtin_prefetch(to + size + 2, 1); // } @@ -164,10 +166,11 @@ copy_tag_nolock(StgClosure **p, const StgInfoTable *info, /* Special version of copy() for when we only want to copy the info * pointer of an object, but reserve some padding after it. This is - * used to optimise evacuation of BLACKHOLEs. + * used to optimise evacuation of TSOs. */ static rtsBool -copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) +copyPart(StgClosure **p, StgClosure *src, nat size_to_reserve, + nat size_to_copy, nat gen_no) { StgPtr to, from; nat i; @@ -191,10 +194,7 @@ spin: info = (W_)src->header.info; #endif - to = alloc_for_copy(size_to_reserve, stp); - *p = (StgClosure *)to; - - TICK_GC_WORDS_COPIED(size_to_copy); + to = alloc_for_copy(size_to_reserve, gen_no); from = (StgPtr)src; to[0] = info; @@ -202,10 +202,9 @@ spin: to[i] = from[i]; } -#if defined(PARALLEL_GC) write_barrier(); -#endif src->header.info = (const StgInfoTable*)MK_FORWARDING_PTR(to); + *p = (StgClosure *)to; #ifdef PROFILING // We store the size of the just evacuated object in the LDV word so that @@ -223,17 +222,17 @@ spin: /* Copy wrappers that don't tag the closure after copying */ STATIC_INLINE GNUC_ATTR_HOT void copy(StgClosure **p, const StgInfoTable *info, - StgClosure *src, nat size, step *stp) + StgClosure *src, nat size, nat gen_no) { - copy_tag(p,info,src,size,stp,0); + copy_tag(p,info,src,size,gen_no,0); } /* ----------------------------------------------------------------------------- Evacuate a large object This just consists of removing the object from the (doubly-linked) - step->large_objects list, and linking it on to the (singly-linked) - step->new_large_objects list, from where it will be scavenged later. + gen->large_objects list, and linking it on to the (singly-linked) + gen->new_large_objects list, from where it will be scavenged later. Convention: bd->flags has BF_EVACUATED set for a large object that has been evacuated, or unset otherwise. @@ -242,23 +241,26 @@ copy(StgClosure **p, const StgInfoTable *info, STATIC_INLINE void evacuate_large(StgPtr p) { - bdescr *bd = Bdescr(p); - step *stp, *new_stp; - step_workspace *ws; + bdescr *bd; + generation *gen, *new_gen; + nat gen_no, new_gen_no; + gen_workspace *ws; - stp = bd->step; - ACQUIRE_SPIN_LOCK(&stp->sync_large_objects); + bd = Bdescr(p); + gen = bd->gen; + gen_no = bd->gen_no; + ACQUIRE_SPIN_LOCK(&gen->sync); // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the gct->failed_to_evac flag if we didn't get * the desired destination (see comments in evacuate()). */ - if (stp < gct->evac_step) { + if (gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } - RELEASE_SPIN_LOCK(&stp->sync_large_objects); + RELEASE_SPIN_LOCK(&gen->sync); return; } @@ -266,28 +268,29 @@ evacuate_large(StgPtr p) if (bd->u.back) { bd->u.back->link = bd->link; } else { // first object in the list - stp->large_objects = bd->link; + gen->large_objects = bd->link; } if (bd->link) { bd->link->u.back = bd->u.back; } - /* link it on to the evacuated large object list of the destination step + /* link it on to the evacuated large object list of the destination gen */ - new_stp = stp->to; - if (new_stp < gct->evac_step) { + new_gen_no = bd->dest_no; + + if (new_gen_no < gct->evac_gen_no) { if (gct->eager_promotion) { - new_stp = gct->evac_step; + new_gen_no = gct->evac_gen_no; } else { gct->failed_to_evac = rtsTrue; } } - ws = &gct->steps[new_stp->abs_no]; + ws = &gct->gens[new_gen_no]; + new_gen = &generations[new_gen_no]; bd->flags |= BF_EVACUATED; - bd->step = new_stp; - bd->gen_no = new_stp->gen_no; + initBdescr(bd, new_gen, new_gen->to); // If this is a block of pinned objects, we don't have to scan // these objects, because they aren't allowed to contain any @@ -295,16 +298,16 @@ evacuate_large(StgPtr p) // them straight on the scavenged_large_objects list. if (bd->flags & BF_PINNED) { ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS); - if (new_stp != stp) { ACQUIRE_SPIN_LOCK(&new_stp->sync_large_objects); } - dbl_link_onto(bd, &new_stp->scavenged_large_objects); - new_stp->n_scavenged_large_blocks += bd->blocks; - if (new_stp != stp) { RELEASE_SPIN_LOCK(&new_stp->sync_large_objects); } + if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); } + dbl_link_onto(bd, &new_gen->scavenged_large_objects); + new_gen->n_scavenged_large_blocks += bd->blocks; + if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); } } else { bd->link = ws->todo_large_objects; ws->todo_large_objects = bd; } - RELEASE_SPIN_LOCK(&stp->sync_large_objects); + RELEASE_SPIN_LOCK(&gen->sync); } /* ---------------------------------------------------------------------------- @@ -313,22 +316,22 @@ evacuate_large(StgPtr p) This is called (eventually) for every live object in the system. The caller to evacuate specifies a desired generation in the - gct->evac_step thread-local variable. The following conditions apply to + gct->evac_gen thread-local variable. The following conditions apply to evacuating an object which resides in generation M when we're collecting up to generation N - if M >= gct->evac_step + if M >= gct->evac_gen if M > N do nothing - else evac to step->to + else evac to gen->to - if M < gct->evac_step evac to gct->evac_step, step 0 + if M < gct->evac_gen evac to gct->evac_gen, step 0 if the object is already evacuated, then we check which generation it now resides in. - if M >= gct->evac_step do nothing - if M < gct->evac_step set gct->failed_to_evac flag to indicate that we - didn't manage to evacuate this object into gct->evac_step. + if M >= gct->evac_gen do nothing + if M < gct->evac_gen set gct->failed_to_evac flag to indicate that we + didn't manage to evacuate this object into gct->evac_gen. OPTIMISATION NOTES: @@ -353,7 +356,7 @@ REGPARM1 GNUC_ATTR_HOT void evacuate(StgClosure **p) { bdescr *bd = NULL; - step *stp; + nat gen_no; StgClosure *q; const StgInfoTable *info; StgWord tag; @@ -365,7 +368,7 @@ loop: tag = GET_CLOSURE_TAG(q); q = UNTAG_CLOSURE(q); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(q)); + ASSERTM(LOOKS_LIKE_CLOSURE_PTR(q), "invalid closure, info=%p", q->header.info); if (!HEAP_ALLOCED_GC(q)) { @@ -416,8 +419,7 @@ loop: * on the CAF list, so don't do anything with it here (we'll * scavenge it later). */ - if (((StgIndStatic *)q)->saved_info == NULL) { - if (*IND_STATIC_LINK((StgClosure *)q) == NULL) { + if (*IND_STATIC_LINK((StgClosure *)q) == NULL) { #ifndef THREADED_RTS *IND_STATIC_LINK((StgClosure *)q) = gct->static_objects; gct->static_objects = (StgClosure *)q; @@ -430,7 +432,6 @@ loop: gct->static_objects = (StgClosure *)q; } #endif - } } return; @@ -478,7 +479,7 @@ loop: // We aren't copying this object, so we have to check // whether it is already in the target generation. (this is // the write barrier). - if (bd->step < gct->evac_step) { + if (bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -488,39 +489,27 @@ loop: /* evacuate large objects by re-linking them onto a different list. */ if (bd->flags & BF_LARGE) { - info = get_itbl(q); - if (info->type == TSO && - ((StgTSO *)q)->what_next == ThreadRelocated) { - q = (StgClosure *)((StgTSO *)q)->_link; - *p = q; - goto loop; - } - evacuate_large((P_)q); + evacuate_large((P_)q); return; } - /* If the object is in a step that we're compacting, then we + /* If the object is in a gen that we're compacting, then we * need to use an alternative evacuate procedure. */ if (!is_marked((P_)q,bd)) { mark((P_)q,bd); - if (mark_stack_full()) { - debugTrace(DEBUG_gc,"mark stack overflowed"); - mark_stack_overflowed = rtsTrue; - reset_mark_stack(); - } push_mark_stack((P_)q); } return; } - stp = bd->step->to; + gen_no = bd->dest_no; info = q->header.info; if (IS_FORWARDING_PTR(info)) { /* Already evacuated, just return the forwarding address. - * HOWEVER: if the requested destination generation (gct->evac_step) is + * HOWEVER: if the requested destination generation (gct->evac_gen) is * older than the actual generation (because the object was * already evacuated to a younger generation) then we have to * set the gct->failed_to_evac flag to indicate that we couldn't @@ -531,14 +520,14 @@ loop: * shortcut it if either the required generation is 0, or the * current object (the EVACUATED) is in a high enough generation. * We know that an EVACUATED always points to an object in the - * same or an older generation. stp is the lowest step that the + * same or an older generation. gen is the lowest generation that the * current object would be evacuated to, so we only do the full - * check if stp is too low. + * check if gen is too low. */ StgClosure *e = (StgClosure*)UN_FORWARDING_PTR(info); *p = TAG_CLOSURE(tag,e); - if (stp < gct->evac_step) { // optimisation - if (Bdescr((P_)e)->step < gct->evac_step) { + if (gen_no < gct->evac_gen_no) { // optimisation + if (Bdescr((P_)e)->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -555,11 +544,20 @@ loop: case MUT_VAR_DIRTY: case MVAR_CLEAN: case MVAR_DIRTY: - copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp); + copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; + // For ints and chars of low value, save space by replacing references to + // these with closures with references to common, shared ones in the RTS. + // + // * Except when compiling into Windows DLLs which don't support cross-package + // data references very well. + // case CONSTR_0_1: - { + { +#if defined(__PIC__) && defined(mingw32_HOST_OS) + copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); +#else StgWord w = (StgWord)q->payload[0]; if (info == Czh_con_info && // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE && @@ -575,33 +573,30 @@ loop: ); } else { - copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag); + copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); } +#endif return; } case FUN_0_1: case FUN_1_0: case CONSTR_1_0: - copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,stp,tag); + copy_tag_nolock(p,info,q,sizeofW(StgHeader)+1,gen_no,tag); return; case THUNK_1_0: case THUNK_0_1: - copy(p,info,q,sizeofW(StgThunk)+1,stp); + copy(p,info,q,sizeofW(StgThunk)+1,gen_no); return; case THUNK_1_1: case THUNK_2_0: case THUNK_0_2: #ifdef NO_PROMOTE_THUNKS - if (bd->gen_no == 0 && - bd->step->no != 0 && - bd->step->no == generations[bd->gen_no].n_steps-1) { - stp = bd->step; - } +#error bitrotted #endif - copy(p,info,q,sizeofW(StgThunk)+2,stp); + copy(p,info,q,sizeofW(StgThunk)+2,gen_no); return; case FUN_1_1: @@ -609,36 +604,57 @@ loop: case FUN_0_2: case CONSTR_1_1: case CONSTR_2_0: - copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag); + copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case CONSTR_0_2: - copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,stp,tag); + copy_tag_nolock(p,info,q,sizeofW(StgHeader)+2,gen_no,tag); return; case THUNK: - copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp); + copy(p,info,q,thunk_sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case FUN: case IND_PERM: - case IND_OLDGEN_PERM: case CONSTR: - copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag); + copy_tag_nolock(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no,tag); return; + case BLACKHOLE: + { + StgClosure *r; + const StgInfoTable *i; + r = ((StgInd*)q)->indirectee; + if (GET_CLOSURE_TAG(r) == 0) { + i = r->header.info; + if (IS_FORWARDING_PTR(i)) { + r = (StgClosure *)UN_FORWARDING_PTR(i); + i = r->header.info; + } + if (i == &stg_TSO_info + || i == &stg_WHITEHOLE_info + || i == &stg_BLOCKING_QUEUE_CLEAN_info + || i == &stg_BLOCKING_QUEUE_DIRTY_info) { + copy(p,info,q,sizeofW(StgInd),gen_no); + return; + } + ASSERT(i != &stg_IND_info); + } + q = r; + *p = r; + goto loop; + } + + case BLOCKING_QUEUE: case WEAK: - case STABLE_NAME: - copy_tag(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),stp,tag); + case PRIM: + case MUT_PRIM: + copy(p,info,q,sizeW_fromITBL(INFO_PTR_TO_STRUCT(info)),gen_no); return; case BCO: - copy(p,info,q,bco_sizeW((StgBCO *)q),stp); - return; - - case CAF_BLACKHOLE: - case BLACKHOLE: - copyPart(p,q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp); + copy(p,info,q,bco_sizeW((StgBCO *)q),gen_no); return; case THUNK_SELECTOR: @@ -646,7 +662,6 @@ loop: return; case IND: - case IND_OLDGEN: // follow chains of indirections, don't evacuate them q = ((StgInd*)q)->indirectee; *p = q; @@ -657,6 +672,7 @@ loop: case RET_BIG: case RET_DYN: case UPDATE_FRAME: + case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case CATCH_STM_FRAME: @@ -666,20 +682,20 @@ loop: barf("evacuate: stack frame at %p\n", q); case PAP: - copy(p,info,q,pap_sizeW((StgPAP*)q),stp); + copy(p,info,q,pap_sizeW((StgPAP*)q),gen_no); return; case AP: - copy(p,info,q,ap_sizeW((StgAP*)q),stp); + copy(p,info,q,ap_sizeW((StgAP*)q),gen_no); return; case AP_STACK: - copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),stp); + copy(p,info,q,ap_stack_sizeW((StgAP_STACK*)q),gen_no); return; case ARR_WORDS: // just copy the block - copy(p,info,q,arr_words_sizeW((StgArrWords *)q),stp); + copy(p,info,q,arr_words_sizeW((StgArrWords *)q),gen_no); return; case MUT_ARR_PTRS_CLEAN: @@ -687,36 +703,31 @@ loop: case MUT_ARR_PTRS_FROZEN: case MUT_ARR_PTRS_FROZEN0: // just copy the block - copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp); + copy(p,info,q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),gen_no); return; case TSO: - { - StgTSO *tso = (StgTSO *)q; + copy(p,info,q,sizeofW(StgTSO),gen_no); + return; - /* Deal with redirected TSOs (a TSO that's had its stack enlarged). - */ - if (tso->what_next == ThreadRelocated) { - q = (StgClosure *)tso->_link; - *p = q; - goto loop; - } + case STACK: + { + StgStack *stack = (StgStack *)q; - /* To evacuate a small TSO, we need to relocate the update frame - * list it contains. + /* To evacuate a small STACK, we need to adjust the stack pointer */ { - StgTSO *new_tso; + StgStack *new_stack; StgPtr r, s; rtsBool mine; - mine = copyPart(p,(StgClosure *)tso, tso_sizeW(tso), - sizeofW(StgTSO), stp); + mine = copyPart(p,(StgClosure *)stack, stack_sizeW(stack), + sizeofW(StgStack), gen_no); if (mine) { - new_tso = (StgTSO *)*p; - move_TSO(tso, new_tso); - for (r = tso->sp, s = new_tso->sp; - r < tso->stack+tso->stack_size;) { + new_stack = (StgStack *)*p; + move_STACK(stack, new_stack); + for (r = stack->sp, s = new_stack->sp; + r < stack->stack + stack->stack_size;) { *s++ = *r++; } } @@ -724,28 +735,8 @@ loop: } } - case TREC_HEADER: - copy(p,info,q,sizeofW(StgTRecHeader),stp); - return; - - case TVAR_WATCH_QUEUE: - copy(p,info,q,sizeofW(StgTVarWatchQueue),stp); - return; - - case TVAR: - copy(p,info,q,sizeofW(StgTVar),stp); - return; - case TREC_CHUNK: - copy(p,info,q,sizeofW(StgTRecChunk),stp); - return; - - case ATOMIC_INVARIANT: - copy(p,info,q,sizeofW(StgAtomicInvariant),stp); - return; - - case INVARIANT_CHECK_QUEUE: - copy(p,info,q,sizeofW(StgInvariantCheckQueue),stp); + copy(p,info,q,sizeofW(StgTRecChunk),gen_no); return; default: @@ -774,11 +765,7 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val) prev = NULL; while (p) { -#ifdef THREADED_RTS ASSERT(p->header.info == &stg_WHITEHOLE_info); -#else - ASSERT(p->header.info == &stg_BLACKHOLE_info); -#endif // val must be in to-space. Not always: when we recursively // invoke eval_thunk_selector(), the recursive calls will not // evacuate the value (because we want to select on the value, @@ -801,7 +788,13 @@ unchain_thunk_selectors(StgSelector *p, StgClosure *val) // indirection pointing to itself, and we want the program // to deadlock if it ever enters this closure, so // BLACKHOLE is correct. - SET_INFO(p, &stg_BLACKHOLE_info); + + // XXX we do not have BLACKHOLEs any more; replace with + // a THUNK_SELECTOR again. This will go into a loop if it is + // entered, and should result in a NonTermination exception. + ((StgThunk *)p)->payload[0] = val; + write_barrier(); + SET_INFO(p, &stg_sel_0_upd_info); } else { ((StgInd *)p)->indirectee = val; write_barrier(); @@ -831,7 +824,7 @@ eval_thunk_selector (StgClosure **q, StgSelector * p, rtsBool evac) prev_thunk_selector = NULL; // this is a chain of THUNK_SELECTORs that we are going to update // to point to the value of the current THUNK_SELECTOR. Each - // closure on the chain is a BLACKHOLE, and points to the next in the + // closure on the chain is a WHITEHOLE, and points to the next in the // chain with payload[0]. selector_chain: @@ -847,7 +840,7 @@ selector_chain: unchain_thunk_selectors(prev_thunk_selector, (StgClosure *)p); *q = (StgClosure *)p; // shortcut, behave as for: if (evac) evacuate(q); - if (evac && bd->step < gct->evac_step) { + if (evac && bd->gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } @@ -869,7 +862,7 @@ selector_chain: } - // BLACKHOLE the selector thunk, since it is now under evaluation. + // WHITEHOLE the selector thunk, since it is now under evaluation. // This is important to stop us going into an infinite loop if // this selector thunk eventually refers to itself. #if defined(THREADED_RTS) @@ -881,7 +874,7 @@ selector_chain: } while (info_ptr == (W_)&stg_WHITEHOLE_info); // make sure someone else didn't get here first... - if (IS_FORWARDING_PTR(p) || + if (IS_FORWARDING_PTR(info_ptr) || INFO_PTR_TO_STRUCT(info_ptr)->type != THUNK_SELECTOR) { // v. tricky now. The THUNK_SELECTOR has been evacuated // by another thread, and is now either a forwarding ptr or IND. @@ -902,7 +895,7 @@ selector_chain: #else // Save the real info pointer (NOTE: not the same as get_itbl()). info_ptr = (StgWord)p->header.info; - SET_INFO(p,&stg_BLACKHOLE_info); + SET_INFO(p,&stg_WHITEHOLE_info); #endif field = INFO_PTR_TO_STRUCT(info_ptr)->layout.selector_offset; @@ -953,12 +946,8 @@ selector_loop: // For the purposes of LDV profiling, we have destroyed // the original selector thunk, p. SET_INFO(p, (StgInfoTable *)info_ptr); - LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC((StgClosure *)p); -#if defined(THREADED_RTS) + OVERWRITING_CLOSURE((StgClosure*)p); SET_INFO(p, &stg_WHITEHOLE_info); -#else - SET_INFO(p, &stg_BLACKHOLE_info); -#endif #endif // the closure in val is now the "value" of the @@ -974,8 +963,6 @@ selector_loop: switch (info->type) { case IND: case IND_PERM: - case IND_OLDGEN: - case IND_OLDGEN_PERM: case IND_STATIC: val = ((StgInd *)val)->indirectee; goto val_loop; @@ -1009,13 +996,38 @@ selector_loop: case IND: case IND_PERM: - case IND_OLDGEN: - case IND_OLDGEN_PERM: case IND_STATIC: // Again, we might need to untag a constructor. selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee ); goto selector_loop; + case BLACKHOLE: + { + StgClosure *r; + const StgInfoTable *i; + r = ((StgInd*)selectee)->indirectee; + + // establish whether this BH has been updated, and is now an + // indirection, as in evacuate(). + if (GET_CLOSURE_TAG(r) == 0) { + i = r->header.info; + if (IS_FORWARDING_PTR(i)) { + r = (StgClosure *)UN_FORWARDING_PTR(i); + i = r->header.info; + } + if (i == &stg_TSO_info + || i == &stg_WHITEHOLE_info + || i == &stg_BLOCKING_QUEUE_CLEAN_info + || i == &stg_BLOCKING_QUEUE_DIRTY_info) { + goto bale_out; + } + ASSERT(i != &stg_IND_info); + } + + selectee = UNTAG_CLOSURE( ((StgInd *)selectee)->indirectee ); + goto selector_loop; + } + case THUNK_SELECTOR: { StgClosure *val; @@ -1050,8 +1062,6 @@ selector_loop: case THUNK_1_1: case THUNK_0_2: case THUNK_STATIC: - case CAF_BLACKHOLE: - case BLACKHOLE: // not evaluated yet goto bale_out; @@ -1069,7 +1079,7 @@ bale_out: // check whether it was updated in the meantime. *q = (StgClosure *)p; if (evac) { - copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->step->to); + copy(q,(const StgInfoTable *)info_ptr,(StgClosure *)p,THUNK_SELECTOR_sizeW(),bd->dest_no); } unchain_thunk_selectors(prev_thunk_selector, *q); return;