X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=d01442b34ed8101f31be5bac883d3cf19e33b7e7;hb=4a05e6139d756c0473df7a6dcb257074201f843d;hp=a2ee1ced2756b118091f9d0422a6563203cec3fc;hpb=f2c58035f765712341399be2dcec871757f529e6;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index a2ee1ce..d01442b 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -46,17 +46,6 @@ static void scavenge_large_bitmap (StgPtr p, Scavenge a TSO. -------------------------------------------------------------------------- */ -STATIC_INLINE void -scavenge_TSO_link (StgTSO *tso) -{ - // We don't always chase the link field: TSOs on the blackhole - // queue are not automatically alive, so the link field is a - // "weak" pointer in that case. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->_link); - } -} - static void scavengeTSO (StgTSO *tso) { @@ -80,13 +69,9 @@ scavengeTSO (StgTSO *tso) saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; - if ( tso->why_blocked == BlockedOnMVar - || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnException - ) { - evacuate(&tso->block_info.closure); - } + evacuate((StgClosure **)&tso->blocked_exceptions); + evacuate((StgClosure **)&tso->bq); // scavange current transaction record evacuate((StgClosure **)&tso->trec); @@ -94,17 +79,30 @@ scavengeTSO (StgTSO *tso) // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - if (gct->failed_to_evac) { - tso->dirty = 1; - scavenge_TSO_link(tso); + tso->dirty = gct->failed_to_evac; + + evacuate((StgClosure **)&tso->_link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked + ) { + evacuate(&tso->block_info.closure); + } +#ifdef THREADED_RTS + // in the THREADED_RTS, block_info.closure must always point to a + // valid closure, because we assume this in throwTo(). In the + // non-threaded RTS it might be a FD (for + // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay) + else { + tso->block_info.closure = (StgClosure *)END_TSO_QUEUE; + } +#endif + + if (tso->dirty == 0 && gct->failed_to_evac) { + tso->flags |= TSO_LINK_DIRTY; } else { - tso->dirty = 0; - scavenge_TSO_link(tso); - if (gct->failed_to_evac) { - tso->flags |= TSO_LINK_DIRTY; - } else { - tso->flags &= ~TSO_LINK_DIRTY; - } + tso->flags &= ~TSO_LINK_DIRTY; } gct->eager_promotion = saved_eager; @@ -331,7 +329,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) while (bitmap != 0) { if ((bitmap & 1) != 0) { -#if defined(__PIC__) && defined(mingw32_TARGET_OS) +#if defined(__PIC__) && defined(mingw32_HOST_OS) // Special-case to handle references to closures hiding out in DLLs, since // double indirections required to get at those. The code generator knows // which is which when generating the SRT, so it stores the (indirect) @@ -394,7 +392,6 @@ scavenge_block (bdescr *bd) { StgPtr p, q; StgInfoTable *info; - generation *saved_evac_gen; rtsBool saved_eager_promotion; gen_workspace *ws; @@ -403,7 +400,6 @@ scavenge_block (bdescr *bd) gct->scan_bd = bd; gct->evac_gen = bd->gen; - saved_evac_gen = gct->evac_gen; saved_eager_promotion = gct->eager_promotion; gct->failed_to_evac = rtsFalse; @@ -532,7 +528,7 @@ scavenge_block (bdescr *bd) gen_obj: case CONSTR: case WEAK: - case STABLE_NAME: + case PRIM: { StgPtr end; @@ -554,23 +550,7 @@ scavenge_block (bdescr *bd) } case IND_PERM: - if (bd->gen_no != 0) { -#ifdef PROFILING - // @LDV profiling - // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an - // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. - LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); -#endif - // - // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? - // - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - - // We pretend that p has just been created. - LDV_RECORD_CREATE((StgClosure *)p); - } - // fall through - case IND_OLDGEN_PERM: + case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); p += sizeofW(StgInd); break; @@ -589,10 +569,25 @@ scavenge_block (bdescr *bd) p += sizeofW(StgMutVar); break; - case CAF_BLACKHOLE: - case BLACKHOLE: - p += BLACKHOLE_sizeW(); - break; + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + p += sizeofW(StgBlockingQueue); + break; + } case THUNK_SELECTOR: { @@ -672,42 +667,21 @@ scavenge_block (bdescr *bd) break; } - case TVAR_WATCH_QUEUE: + case MUT_PRIM: { - StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&wq->closure); - evacuate((StgClosure **)&wq->next_queue_entry); - evacuate((StgClosure **)&wq->prev_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - p += sizeofW(StgTVarWatchQueue); - break; - } + StgPtr end; - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&tvar->current_value); - evacuate((StgClosure **)&tvar->first_watch_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - p += sizeofW(StgTVar); - break; - } + gct->eager_promotion = rtsFalse; - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&trec->enclosing_trec); - evacuate((StgClosure **)&trec->current_chunk); - evacuate((StgClosure **)&trec->invariants_to_check); - gct->evac_gen = saved_evac_gen; + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + p += info->layout.payload.nptrs; + + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable - p += sizeofW(StgTRecHeader); - break; + break; } case TREC_CHUNK: @@ -715,44 +689,19 @@ scavenge_block (bdescr *bd) StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->evac_gen = 0; + gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } - gct->evac_gen = saved_evac_gen; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable p += sizeofW(StgTRecChunk); break; } - case ATOMIC_INVARIANT: - { - StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - gct->evac_gen = 0; - evacuate(&invariant->code); - evacuate((StgClosure **)&invariant->last_execution); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - p += sizeofW(StgAtomicInvariant); - break; - } - - case INVARIANT_CHECK_QUEUE: - { - StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&queue->invariant); - evacuate((StgClosure **)&queue->my_execution); - evacuate((StgClosure **)&queue->next_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - p += sizeofW(StgInvariantCheckQueue); - break; - } - default: barf("scavenge: unimplemented/strange closure type %d @ %p", info->type, p); @@ -806,10 +755,10 @@ scavenge_mark_stack(void) { StgPtr p, q; StgInfoTable *info; - generation *saved_evac_gen; + rtsBool saved_eager_promotion; gct->evac_gen = oldest_gen; - saved_evac_gen = gct->evac_gen; + saved_eager_promotion = gct->eager_promotion; while ((p = pop_mark_stack())) { @@ -822,8 +771,6 @@ scavenge_mark_stack(void) case MVAR_CLEAN: case MVAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - StgMVar *mvar = ((StgMVar *)p); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&mvar->head); @@ -906,7 +853,7 @@ scavenge_mark_stack(void) gen_obj: case CONSTR: case WEAK: - case STABLE_NAME: + case PRIM: { StgPtr end; @@ -931,15 +878,13 @@ scavenge_mark_stack(void) // no "old" generation. break; - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case IND: + case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); break; case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - gct->eager_promotion = rtsFalse; evacuate(&((StgMutVar *)p)->var); gct->eager_promotion = saved_eager_promotion; @@ -952,8 +897,25 @@ scavenge_mark_stack(void) break; } - case CAF_BLACKHOLE: - case BLACKHOLE: + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case ARR_WORDS: break; @@ -986,13 +948,10 @@ scavenge_mark_stack(void) case MUT_ARR_PTRS_DIRTY: // follow everything { - rtsBool saved_eager; - // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1003,7 +962,7 @@ scavenge_mark_stack(void) ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } - gct->eager_promotion = saved_eager; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable anyhow. break; } @@ -1032,81 +991,39 @@ scavenge_mark_stack(void) break; } - case TVAR_WATCH_QUEUE: - { - StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&wq->closure); - evacuate((StgClosure **)&wq->next_queue_entry); - evacuate((StgClosure **)&wq->prev_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&tvar->current_value); - evacuate((StgClosure **)&tvar->first_watch_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - + case MUT_PRIM: + { + StgPtr end; + + gct->eager_promotion = rtsFalse; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } + + gct->eager_promotion = saved_eager_promotion; + gct->failed_to_evac = rtsTrue; // mutable + break; + } + case TREC_CHUNK: { StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->evac_gen = 0; + gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&trec->enclosing_trec); - evacuate((StgClosure **)&trec->current_chunk); - evacuate((StgClosure **)&trec->invariants_to_check); - gct->evac_gen = saved_evac_gen; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } - case ATOMIC_INVARIANT: - { - StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - gct->evac_gen = 0; - evacuate(&invariant->code); - evacuate((StgClosure **)&invariant->last_execution); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case INVARIANT_CHECK_QUEUE: - { - StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&queue->invariant); - evacuate((StgClosure **)&queue->my_execution); - evacuate((StgClosure **)&queue->next_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - default: barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p", info->type, p); @@ -1133,9 +1050,11 @@ static rtsBool scavenge_one(StgPtr p) { const StgInfoTable *info; - generation *saved_evac_gen = gct->evac_gen; rtsBool no_luck; + rtsBool saved_eager_promotion; + saved_eager_promotion = gct->eager_promotion; + ASSERT(LOOKS_LIKE_CLOSURE_PTR(p)); info = get_itbl((StgClosure *)p); @@ -1144,8 +1063,6 @@ scavenge_one(StgPtr p) case MVAR_CLEAN: case MVAR_DIRTY: { - rtsBool saved_eager_promotion = gct->eager_promotion; - StgMVar *mvar = ((StgMVar *)p); gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&mvar->head); @@ -1190,6 +1107,7 @@ scavenge_one(StgPtr p) case CONSTR_0_2: case CONSTR_2_0: case WEAK: + case PRIM: case IND_PERM: { StgPtr q, end; @@ -1204,7 +1122,6 @@ scavenge_one(StgPtr p) case MUT_VAR_CLEAN: case MUT_VAR_DIRTY: { StgPtr q = p; - rtsBool saved_eager_promotion = gct->eager_promotion; gct->eager_promotion = rtsFalse; evacuate(&((StgMutVar *)p)->var); @@ -1218,10 +1135,25 @@ scavenge_one(StgPtr p) break; } - case CAF_BLACKHOLE: - case BLACKHOLE: - break; - + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; @@ -1254,13 +1186,10 @@ scavenge_one(StgPtr p) case MUT_ARR_PTRS_CLEAN: case MUT_ARR_PTRS_DIRTY: { - rtsBool saved_eager; - // We don't eagerly promote objects pointed to by a mutable // array, but if we find the array only points to objects in // the same or an older generation, we mark it "clean" and // avoid traversing it during minor GCs. - saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; scavenge_mut_arr_ptrs((StgMutArrPtrs *)p); @@ -1271,7 +1200,7 @@ scavenge_one(StgPtr p) ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } - gct->eager_promotion = saved_eager; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; break; } @@ -1298,87 +1227,45 @@ scavenge_one(StgPtr p) break; } - case TVAR_WATCH_QUEUE: - { - StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&wq->closure); - evacuate((StgClosure **)&wq->next_queue_entry); - evacuate((StgClosure **)&wq->prev_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } + case MUT_PRIM: + { + StgPtr end; + + gct->eager_promotion = rtsFalse; + + end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs; + for (p = (P_)((StgClosure *)p)->payload; p < end; p++) { + evacuate((StgClosure **)p); + } - case TVAR: - { - StgTVar *tvar = ((StgTVar *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&tvar->current_value); - evacuate((StgClosure **)&tvar->first_watch_queue_entry); - gct->evac_gen = saved_evac_gen; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; - } - case TREC_HEADER: - { - StgTRecHeader *trec = ((StgTRecHeader *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&trec->enclosing_trec); - evacuate((StgClosure **)&trec->current_chunk); - evacuate((StgClosure **)&trec->invariants_to_check); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } + } case TREC_CHUNK: { StgWord i; StgTRecChunk *tc = ((StgTRecChunk *) p); TRecEntry *e = &(tc -> entries[0]); - gct->evac_gen = 0; + gct->eager_promotion = rtsFalse; evacuate((StgClosure **)&tc->prev_chunk); for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) { evacuate((StgClosure **)&e->tvar); evacuate((StgClosure **)&e->expected_value); evacuate((StgClosure **)&e->new_value); } - gct->evac_gen = saved_evac_gen; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsTrue; // mutable break; } - case ATOMIC_INVARIANT: - { - StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p); - gct->evac_gen = 0; - evacuate(&invariant->code); - evacuate((StgClosure **)&invariant->last_execution); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - - case INVARIANT_CHECK_QUEUE: - { - StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p); - gct->evac_gen = 0; - evacuate((StgClosure **)&queue->invariant); - evacuate((StgClosure **)&queue->my_execution); - evacuate((StgClosure **)&queue->next_queue_entry); - gct->evac_gen = saved_evac_gen; - gct->failed_to_evac = rtsTrue; // mutable - break; - } - case IND: // IND can happen, for example, when the interpreter allocates // a gigantic AP closure (more than one block), which ends up // on the large-object list and then gets updated. See #3424. - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case BLACKHOLE: case IND_STATIC: evacuate(&((StgInd *)p)->indirectee); @@ -1440,7 +1327,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) #ifdef DEBUG switch (get_itbl((StgClosure *)p)->type) { case MUT_VAR_CLEAN: - barf("MUT_VAR_CLEAN on mutable list"); + // can happen due to concurrent writeMutVars case MUT_VAR_DIRTY: mutlist_MUTVARS++; break; case MUT_ARR_PTRS_CLEAN: @@ -1470,8 +1357,8 @@ scavenge_mutable_list(bdescr *bd, generation *gen) continue; case MUT_ARR_PTRS_DIRTY: { - rtsBool saved_eager; - saved_eager = gct->eager_promotion; + rtsBool saved_eager_promotion; + saved_eager_promotion = gct->eager_promotion; gct->eager_promotion = rtsFalse; scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p); @@ -1482,7 +1369,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info; } - gct->eager_promotion = saved_eager; + gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsFalse; recordMutableGen_GC((StgClosure *)p,gen->no); continue; @@ -1490,11 +1377,20 @@ scavenge_mutable_list(bdescr *bd, generation *gen) case TSO: { StgTSO *tso = (StgTSO *)p; if (tso->dirty == 0) { - // Must be on the mutable list because its link - // field is dirty. - ASSERT(tso->flags & TSO_LINK_DIRTY); - - scavenge_TSO_link(tso); + // Should be on the mutable list because its link + // field is dirty. However, in parallel GC we may + // have a thread on multiple mutable lists, so + // this assertion would be invalid: + // ASSERT(tso->flags & TSO_LINK_DIRTY); + + evacuate((StgClosure **)&tso->_link); + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked + ) { + evacuate((StgClosure **)&tso->block_info.prev); + } if (gct->failed_to_evac) { recordMutableGen_GC((StgClosure *)p,gen->no); gct->failed_to_evac = rtsFalse; @@ -1638,23 +1534,21 @@ scavenge_static(void) static void scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) { - nat i, b; + nat i, j, b; StgWord bitmap; b = 0; - bitmap = large_bitmap->bitmap[b]; - for (i = 0; i < size; ) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - i++; - p++; - if (i % BITS_IN(W_) == 0) { - b++; - bitmap = large_bitmap->bitmap[b]; - } else { + + for (i = 0; i < size; b++) { + bitmap = large_bitmap->bitmap[b]; + j = stg_min(size-i, BITS_IN(W_)); + i += j; + for (; j > 0; j--, p++) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } bitmap = bitmap >> 1; - } + } } } @@ -1714,10 +1608,12 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // before GC, but that seems like overkill. // // Scavenging this update frame as normal would be disastrous; - // the updatee would end up pointing to the value. So we turn - // the indirection into an IND_PERM, so that evacuate will - // copy the indirection into the old generation instead of - // discarding it. + // the updatee would end up pointing to the value. So we + // check whether the value after evacuation is a BLACKHOLE, + // and if not, we change the update frame to an stg_enter + // frame that simply returns the value. Hence, blackholing is + // compulsory (otherwise we would have to check for thunks + // too). // // Note [upd-black-hole] // One slight hiccup is that the THUNK_SELECTOR machinery can @@ -1728,22 +1624,17 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // the updatee is never a THUNK_SELECTOR and we're ok. // NB. this is a new invariant: blackholing is not optional. { - nat type; - const StgInfoTable *i; - StgClosure *updatee; - - updatee = ((StgUpdateFrame *)p)->updatee; - i = updatee->header.info; - if (!IS_FORWARDING_PTR(i)) { - type = get_itbl(updatee)->type; - if (type == IND) { - updatee->header.info = &stg_IND_PERM_info; - } else if (type == IND_OLDGEN) { - updatee->header.info = &stg_IND_OLDGEN_PERM_info; - } + StgUpdateFrame *frame = (StgUpdateFrame *)p; + StgClosure *v; + + evacuate(&frame->updatee); + v = frame->updatee; + if (GET_CLOSURE_TAG(v) != 0 || + (get_itbl(v)->type != BLACKHOLE)) { + // blackholing is compulsory, see above. + frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info; } - evacuate(&((StgUpdateFrame *)p)->updatee); - ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0); + ASSERT(v->header.info != &stg_TSO_info); p += sizeofW(StgUpdateFrame); continue; }