X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=d7e16eaf2a81e91f4dc624705fc49fc00f56860b;hb=890f22ef8eff8dbb5b31fa221dfce65a7b84c202;hp=1b671a097c4a1f7eac6ed0dc1efec3aaf79949cb;hpb=78f5cf9c5421ede69133e48823302375871e52c4;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 1b671a0..d7e16ea 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -46,30 +46,11 @@ static void scavenge_large_bitmap (StgPtr p, Scavenge a TSO. -------------------------------------------------------------------------- */ -STATIC_INLINE void -scavenge_TSO_link (StgTSO *tso) -{ - // We don't always chase the link field: TSOs on the blackhole - // queue are not automatically alive, so the link field is a - // "weak" pointer in that case. - if (tso->why_blocked != BlockedOnBlackHole) { - evacuate((StgClosure **)&tso->_link); - } -} - static void scavengeTSO (StgTSO *tso) { rtsBool saved_eager; - if (tso->what_next == ThreadRelocated) { - // the only way this can happen is if the old TSO was on the - // mutable list. We might have other links to this defunct - // TSO, so we must update its link field. - evacuate((StgClosure**)&tso->_link); - return; - } - debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); // update the pointer from the Task. @@ -80,33 +61,33 @@ scavengeTSO (StgTSO *tso) saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; + evacuate((StgClosure **)&tso->blocked_exceptions); + evacuate((StgClosure **)&tso->bq); + + // scavange current transaction record + evacuate((StgClosure **)&tso->trec); + + evacuate((StgClosure **)&tso->stackobj); + + evacuate((StgClosure **)&tso->_link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnMsgWakeup || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked ) { evacuate(&tso->block_info.closure); } - evacuate((StgClosure **)&tso->blocked_exceptions); - - // scavange current transaction record - evacuate((StgClosure **)&tso->trec); - - // scavenge this thread's stack - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - - if (gct->failed_to_evac) { - tso->dirty = 1; - scavenge_TSO_link(tso); - } else { - tso->dirty = 0; - scavenge_TSO_link(tso); - if (gct->failed_to_evac) { - tso->flags |= TSO_LINK_DIRTY; - } else { - tso->flags &= ~TSO_LINK_DIRTY; - } +#ifdef THREADED_RTS + // in the THREADED_RTS, block_info.closure must always point to a + // valid closure, because we assume this in throwTo(). In the + // non-threaded RTS it might be a FD (for + // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay) + else { + tso->block_info.closure = (StgClosure *)END_TSO_QUEUE; } +#endif + + tso->dirty = gct->failed_to_evac; gct->eager_promotion = saved_eager; } @@ -332,7 +313,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) while (bitmap != 0) { if ((bitmap & 1) != 0) { -#if defined(__PIC__) && defined(mingw32_TARGET_OS) +#if defined(__PIC__) && defined(mingw32_HOST_OS) // Special-case to handle references to closures hiding out in DLLs, since // double indirections required to get at those. The code generator knows // which is which when generating the SRT, so it stores the (indirect) @@ -553,23 +534,7 @@ scavenge_block (bdescr *bd) } case IND_PERM: - if (bd->gen_no != 0) { -#ifdef PROFILING - // @LDV profiling - // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an - // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. - LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); -#endif - // - // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? - // - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - - // We pretend that p has just been created. - LDV_RECORD_CREATE((StgClosure *)p); - } - // fall through - case IND_OLDGEN_PERM: + case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); p += sizeofW(StgInd); break; @@ -588,10 +553,25 @@ scavenge_block (bdescr *bd) p += sizeofW(StgMutVar); break; - case CAF_BLACKHOLE: - case BLACKHOLE: - p += BLACKHOLE_sizeW(); - break; + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + p += sizeofW(StgBlockingQueue); + break; + } case THUNK_SELECTOR: { @@ -665,12 +645,25 @@ scavenge_block (bdescr *bd) case TSO: { - StgTSO *tso = (StgTSO *)p; - scavengeTSO(tso); - p += tso_sizeW(tso); + scavengeTSO((StgTSO *)p); + p += sizeofW(StgTSO); break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + p += stack_sizeW(stack); + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -882,8 +875,8 @@ scavenge_mark_stack(void) // no "old" generation. break; - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case IND: + case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); break; @@ -901,8 +894,25 @@ scavenge_mark_stack(void) break; } - case CAF_BLACKHOLE: - case BLACKHOLE: + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case ARR_WORDS: break; @@ -978,6 +988,19 @@ scavenge_mark_stack(void) break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -1122,10 +1145,25 @@ scavenge_one(StgPtr p) break; } - case CAF_BLACKHOLE: - case BLACKHOLE: - break; - + case BLOCKING_QUEUE: + { + StgBlockingQueue *bq = (StgBlockingQueue *)p; + + gct->eager_promotion = rtsFalse; + evacuate(&bq->bh); + evacuate((StgClosure**)&bq->owner); + evacuate((StgClosure**)&bq->queue); + evacuate((StgClosure**)&bq->link); + gct->eager_promotion = saved_eager_promotion; + + if (gct->failed_to_evac) { + bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info; + } else { + bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info; + } + break; + } + case THUNK_SELECTOR: { StgSelector *s = (StgSelector *)p; @@ -1199,6 +1237,19 @@ scavenge_one(StgPtr p) break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -1237,8 +1288,7 @@ scavenge_one(StgPtr p) // IND can happen, for example, when the interpreter allocates // a gigantic AP closure (more than one block), which ends up // on the large-object list and then gets updated. See #3424. - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case BLACKHOLE: case IND_STATIC: evacuate(&((StgInd *)p)->indirectee); @@ -1300,7 +1350,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) #ifdef DEBUG switch (get_itbl((StgClosure *)p)->type) { case MUT_VAR_CLEAN: - barf("MUT_VAR_CLEAN on mutable list"); + // can happen due to concurrent writeMutVars case MUT_VAR_DIRTY: mutlist_MUTVARS++; break; case MUT_ARR_PTRS_CLEAN: @@ -1347,26 +1397,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) recordMutableGen_GC((StgClosure *)p,gen->no); continue; } - case TSO: { - StgTSO *tso = (StgTSO *)p; - if (tso->dirty == 0) { - // Should be on the mutable list because its link - // field is dirty. However, in parallel GC we may - // have a thread on multiple mutable lists, so - // this assertion would be invalid: - // ASSERT(tso->flags & TSO_LINK_DIRTY); - - scavenge_TSO_link(tso); - if (gct->failed_to_evac) { - recordMutableGen_GC((StgClosure *)p,gen->no); - gct->failed_to_evac = rtsFalse; - } else { - tso->flags &= ~TSO_LINK_DIRTY; - } - continue; - } - } - default: + default: ; } @@ -1500,23 +1531,21 @@ scavenge_static(void) static void scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) { - nat i, b; + nat i, j, b; StgWord bitmap; b = 0; - bitmap = large_bitmap->bitmap[b]; - for (i = 0; i < size; ) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - i++; - p++; - if (i % BITS_IN(W_) == 0) { - b++; - bitmap = large_bitmap->bitmap[b]; - } else { + + for (i = 0; i < size; b++) { + bitmap = large_bitmap->bitmap[b]; + j = stg_min(size-i, BITS_IN(W_)); + i += j; + for (; j > 0; j--, p++) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } bitmap = bitmap >> 1; - } + } } } @@ -1576,10 +1605,12 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // before GC, but that seems like overkill. // // Scavenging this update frame as normal would be disastrous; - // the updatee would end up pointing to the value. So we turn - // the indirection into an IND_PERM, so that evacuate will - // copy the indirection into the old generation instead of - // discarding it. + // the updatee would end up pointing to the value. So we + // check whether the value after evacuation is a BLACKHOLE, + // and if not, we change the update frame to an stg_enter + // frame that simply returns the value. Hence, blackholing is + // compulsory (otherwise we would have to check for thunks + // too). // // Note [upd-black-hole] // One slight hiccup is that the THUNK_SELECTOR machinery can @@ -1590,22 +1621,17 @@ scavenge_stack(StgPtr p, StgPtr stack_end) // the updatee is never a THUNK_SELECTOR and we're ok. // NB. this is a new invariant: blackholing is not optional. { - nat type; - const StgInfoTable *i; - StgClosure *updatee; - - updatee = ((StgUpdateFrame *)p)->updatee; - i = updatee->header.info; - if (!IS_FORWARDING_PTR(i)) { - type = get_itbl(updatee)->type; - if (type == IND) { - updatee->header.info = &stg_IND_PERM_info; - } else if (type == IND_OLDGEN) { - updatee->header.info = &stg_IND_OLDGEN_PERM_info; - } + StgUpdateFrame *frame = (StgUpdateFrame *)p; + StgClosure *v; + + evacuate(&frame->updatee); + v = frame->updatee; + if (GET_CLOSURE_TAG(v) != 0 || + (get_itbl(v)->type != BLACKHOLE)) { + // blackholing is compulsory, see above. + frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info; } - evacuate(&((StgUpdateFrame *)p)->updatee); - ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0); + ASSERT(v->header.info != &stg_TSO_info); p += sizeofW(StgUpdateFrame); continue; } @@ -1614,6 +1640,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: + case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: