X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=d77734fd0506c9c458fbbc01ebac3452860151fc;hb=c5b178be60a5a44abd2f4ddf8c399857678326e2;hp=e6234f6c0f79f909e9252d118b298340d9443f8f;hpb=2726a2f10256710cc6ed80b1098cb32e121e1be7;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index e6234f6..d77734f 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -51,14 +51,6 @@ scavengeTSO (StgTSO *tso) { rtsBool saved_eager; - if (tso->what_next == ThreadRelocated) { - // the only way this can happen is if the old TSO was on the - // mutable list. We might have other links to this defunct - // TSO, so we must update its link field. - evacuate((StgClosure**)&tso->_link); - return; - } - debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id); // update the pointer from the Task. @@ -69,22 +61,17 @@ scavengeTSO (StgTSO *tso) saved_eager = gct->eager_promotion; gct->eager_promotion = rtsFalse; - evacuate((StgClosure **)&tso->blocked_exceptions); evacuate((StgClosure **)&tso->bq); // scavange current transaction record evacuate((StgClosure **)&tso->trec); - - // scavenge this thread's stack - scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); - tso->dirty = gct->failed_to_evac; + evacuate((StgClosure **)&tso->stackobj); evacuate((StgClosure **)&tso->_link); if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnMsgWakeup || tso->why_blocked == BlockedOnMsgThrowTo || tso->why_blocked == NotBlocked ) { @@ -100,11 +87,7 @@ scavengeTSO (StgTSO *tso) } #endif - if (tso->dirty == 0 && gct->failed_to_evac) { - tso->flags |= TSO_LINK_DIRTY; - } else { - tso->flags &= ~TSO_LINK_DIRTY; - } + tso->dirty = gct->failed_to_evac; gct->eager_promotion = saved_eager; } @@ -330,7 +313,7 @@ scavenge_srt (StgClosure **srt, nat srt_bitmap) while (bitmap != 0) { if ((bitmap & 1) != 0) { -#if defined(__PIC__) && defined(mingw32_TARGET_OS) +#if defined(__PIC__) && defined(mingw32_HOST_OS) // Special-case to handle references to closures hiding out in DLLs, since // double indirections required to get at those. The code generator knows // which is which when generating the SRT, so it stores the (indirect) @@ -379,11 +362,11 @@ scavenge_fun_srt(const StgInfoTable *info) /* ----------------------------------------------------------------------------- Scavenge a block from the given scan pointer up to bd->free. - evac_gen is set by the caller to be either zero (for a step in a + evac_gen_no is set by the caller to be either zero (for a step in a generation < N) or G where G is the generation of the step being scavenged. - We sometimes temporarily change evac_gen back to zero if we're + We sometimes temporarily change evac_gen_no back to zero if we're scavenging a mutable object where eager promotion isn't such a good idea. -------------------------------------------------------------------------- */ @@ -400,7 +383,7 @@ scavenge_block (bdescr *bd) bd->start, bd->gen_no, bd->u.scan); gct->scan_bd = bd; - gct->evac_gen = bd->gen; + gct->evac_gen_no = bd->gen_no; saved_eager_promotion = gct->eager_promotion; gct->failed_to_evac = rtsFalse; @@ -551,23 +534,6 @@ scavenge_block (bdescr *bd) } case IND_PERM: - if (bd->gen_no != 0) { -#ifdef PROFILING - // @LDV profiling - // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an - // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. - LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); -#endif - // - // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()? - // - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - - // We pretend that p has just been created. - LDV_RECORD_CREATE((StgClosure *)p); - } - // fall through - case IND_OLDGEN_PERM: case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); p += sizeofW(StgInd); @@ -679,12 +645,25 @@ scavenge_block (bdescr *bd) case TSO: { - StgTSO *tso = (StgTSO *)p; - scavengeTSO(tso); - p += tso_sizeW(tso); + scavengeTSO((StgTSO *)p); + p += sizeofW(StgTSO); break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + p += stack_sizeW(stack); + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -775,7 +754,7 @@ scavenge_mark_stack(void) StgInfoTable *info; rtsBool saved_eager_promotion; - gct->evac_gen = oldest_gen; + gct->evac_gen_no = oldest_gen->no; saved_eager_promotion = gct->eager_promotion; while ((p = pop_mark_stack())) { @@ -896,8 +875,7 @@ scavenge_mark_stack(void) // no "old" generation. break; - case IND_OLDGEN: - case IND_OLDGEN_PERM: + case IND: case BLACKHOLE: evacuate(&((StgInd *)p)->indirectee); break; @@ -1010,6 +988,19 @@ scavenge_mark_stack(void) break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -1050,8 +1041,8 @@ scavenge_mark_stack(void) if (gct->failed_to_evac) { gct->failed_to_evac = rtsFalse; - if (gct->evac_gen) { - recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no); + if (gct->evac_gen_no) { + recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no); } } } // while (p = pop_mark_stack()) @@ -1246,6 +1237,19 @@ scavenge_one(StgPtr p) break; } + case STACK: + { + StgStack *stack = (StgStack*)p; + + gct->eager_promotion = rtsFalse; + + scavenge_stack(stack->sp, stack->stack + stack->stack_size); + stack->dirty = gct->failed_to_evac; + + gct->eager_promotion = saved_eager_promotion; + break; + } + case MUT_PRIM: { StgPtr end; @@ -1284,8 +1288,6 @@ scavenge_one(StgPtr p) // IND can happen, for example, when the interpreter allocates // a gigantic AP closure (more than one block), which ends up // on the large-object list and then gets updated. See #3424. - case IND_OLDGEN: - case IND_OLDGEN_PERM: case BLACKHOLE: case IND_STATIC: evacuate(&((StgInd *)p)->indirectee); @@ -1338,8 +1340,10 @@ void scavenge_mutable_list(bdescr *bd, generation *gen) { StgPtr p, q; + nat gen_no; - gct->evac_gen = gen; + gen_no = gen->no; + gct->evac_gen_no = gen_no; for (; bd != NULL; bd = bd->link) { for (q = bd->start; q < bd->free; q++) { p = (StgPtr)*q; @@ -1374,7 +1378,7 @@ scavenge_mutable_list(bdescr *bd, generation *gen) // switch (get_itbl((StgClosure *)p)->type) { case MUT_ARR_PTRS_CLEAN: - recordMutableGen_GC((StgClosure *)p,gen->no); + recordMutableGen_GC((StgClosure *)p,gen_no); continue; case MUT_ARR_PTRS_DIRTY: { @@ -1392,44 +1396,17 @@ scavenge_mutable_list(bdescr *bd, generation *gen) gct->eager_promotion = saved_eager_promotion; gct->failed_to_evac = rtsFalse; - recordMutableGen_GC((StgClosure *)p,gen->no); + recordMutableGen_GC((StgClosure *)p,gen_no); continue; } - case TSO: { - StgTSO *tso = (StgTSO *)p; - if (tso->dirty == 0) { - // Should be on the mutable list because its link - // field is dirty. However, in parallel GC we may - // have a thread on multiple mutable lists, so - // this assertion would be invalid: - // ASSERT(tso->flags & TSO_LINK_DIRTY); - - evacuate((StgClosure **)&tso->_link); - if ( tso->why_blocked == BlockedOnMVar - || tso->why_blocked == BlockedOnBlackHole - || tso->why_blocked == BlockedOnMsgWakeup - || tso->why_blocked == BlockedOnMsgThrowTo - || tso->why_blocked == NotBlocked - ) { - evacuate((StgClosure **)&tso->block_info.prev); - } - if (gct->failed_to_evac) { - recordMutableGen_GC((StgClosure *)p,gen->no); - gct->failed_to_evac = rtsFalse; - } else { - tso->flags &= ~TSO_LINK_DIRTY; - } - continue; - } - } - default: + default: ; } if (scavenge_one(p)) { // didn't manage to promote everything, so put the // object back on the list. - recordMutableGen_GC((StgClosure *)p,gen->no); + recordMutableGen_GC((StgClosure *)p,gen_no); } } } @@ -1471,7 +1448,7 @@ scavenge_static(void) /* Always evacuate straight to the oldest generation for static * objects */ - gct->evac_gen = oldest_gen; + gct->evac_gen_no = oldest_gen->no; /* keep going until we've scavenged all the objects on the linked list... */ @@ -1556,23 +1533,21 @@ scavenge_static(void) static void scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size ) { - nat i, b; + nat i, j, b; StgWord bitmap; b = 0; - bitmap = large_bitmap->bitmap[b]; - for (i = 0; i < size; ) { - if ((bitmap & 1) == 0) { - evacuate((StgClosure **)p); - } - i++; - p++; - if (i % BITS_IN(W_) == 0) { - b++; - bitmap = large_bitmap->bitmap[b]; - } else { + + for (i = 0; i < size; b++) { + bitmap = large_bitmap->bitmap[b]; + j = stg_min(size-i, BITS_IN(W_)); + i += j; + for (; j > 0; j--, p++) { + if ((bitmap & 1) == 0) { + evacuate((StgClosure **)p); + } bitmap = bitmap >> 1; - } + } } } @@ -1667,6 +1642,7 @@ scavenge_stack(StgPtr p, StgPtr stack_end) case CATCH_STM_FRAME: case CATCH_RETRY_FRAME: case ATOMICALLY_FRAME: + case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case RET_SMALL: @@ -1767,7 +1743,7 @@ scavenge_large (gen_workspace *ws) bdescr *bd; StgPtr p; - gct->evac_gen = ws->gen; + gct->evac_gen_no = ws->gen->no; bd = ws->todo_large_objects; @@ -1779,10 +1755,10 @@ scavenge_large (gen_workspace *ws) // the front when evacuating. ws->todo_large_objects = bd->link; - ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects); + ACQUIRE_SPIN_LOCK(&ws->gen->sync); dbl_link_onto(bd, &ws->gen->scavenged_large_objects); ws->gen->n_scavenged_large_blocks += bd->blocks; - RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects); + RELEASE_SPIN_LOCK(&ws->gen->sync); p = bd->start; if (scavenge_one(p)) {