X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FScav.c;h=674078f558cd419287f942d0b03b61bba0e11ff4;hb=f732e7862bb1fcc65fcfbcfb6eaaf6dde39fdd5f;hp=17e519db2a745e16bdc002d86db77395089b5c79;hpb=d13df738cbbe8017ae19ae2702f4e10805ee521b;p=ghc-hetmet.git diff --git a/rts/sm/Scav.c b/rts/sm/Scav.c index 17e519d..674078f 100644 --- a/rts/sm/Scav.c +++ b/rts/sm/Scav.c @@ -134,6 +134,19 @@ scavenge_fun_srt(const StgInfoTable *info) static void scavengeTSO (StgTSO *tso) { + rtsBool saved_eager; + + if (tso->what_next == ThreadRelocated) { + // the only way this can happen is if the old TSO was on the + // mutable list. We might have other links to this defunct + // TSO, so we must update its link field. + evacuate((StgClosure**)&tso->_link); + return; + } + + saved_eager = gct->eager_promotion; + gct->eager_promotion = rtsFalse; + if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException @@ -154,6 +167,14 @@ scavengeTSO (StgTSO *tso) // scavenge this thread's stack scavenge_stack(tso->sp, &(tso->stack[tso->stack_size])); + + if (gct->failed_to_evac) { + tso->flags |= TSO_DIRTY; + } else { + tso->flags &= ~TSO_DIRTY; + } + + gct->eager_promotion = saved_eager; } /* ----------------------------------------------------------------------------- @@ -275,7 +296,7 @@ linear_scan: info = get_itbl((StgClosure *)p); q = p; - switch (info->type) { + switch (((volatile StgWord *)info)[1] & 0xffff) { case MVAR_CLEAN: case MVAR_DIRTY: @@ -494,19 +515,7 @@ linear_scan: case TSO: { - StgTSO *tso = (StgTSO *)p; - rtsBool saved_eager = gct->eager_promotion; - - gct->eager_promotion = rtsFalse; - scavengeTSO(tso); - gct->eager_promotion = saved_eager; - - if (gct->failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; - } - + scavengeTSO((StgTSO*)p); gct->failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -826,19 +835,7 @@ scavenge_one(StgPtr p) case TSO: { - StgTSO *tso = (StgTSO *)p; - rtsBool saved_eager = gct->eager_promotion; - - gct->eager_promotion = rtsFalse; - scavengeTSO(tso); - gct->eager_promotion = saved_eager; - - if (gct->failed_to_evac) { - tso->flags |= TSO_DIRTY; - } else { - tso->flags &= ~TSO_DIRTY; - } - + scavengeTSO((StgTSO*)p); gct->failed_to_evac = rtsTrue; // always on the mutable list break; } @@ -1052,7 +1049,7 @@ scavenge_mutable_list(generation *gen) } // free the old mut_list - freeChain(gen->saved_mut_list); + freeChain_sync(gen->saved_mut_list); gen->saved_mut_list = NULL; } @@ -1404,40 +1401,41 @@ static rtsBool scavenge_find_global_work (void) { bdescr *bd; - int g, s; + int s; rtsBool flag; step_workspace *ws; + gct->scav_global_work++; + flag = rtsFalse; - for (g = RtsFlags.GcFlags.generations; --g >= 0; ) { - for (s = generations[g].n_steps; --s >= 0; ) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; + for (s = total_steps-1; s>=0; s--) + { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + ws = &gct->steps[s]; - // If we have any large objects to scavenge, do them now. - if (ws->todo_large_objects) { - scavenge_large(ws); - flag = rtsTrue; - } + // If we have any large objects to scavenge, do them now. + if (ws->todo_large_objects) { + scavenge_large(ws); + flag = rtsTrue; + } - if ((bd = grab_todo_block(ws)) != NULL) { - // no need to assign this to ws->scan_bd, we're going - // to scavenge the whole thing and then push it on - // our scavd list. This saves pushing out the - // scan_bd block, which might be partial. - if (N == 0) { - scavenge_block0(bd, bd->start); - } else { - scavenge_block(bd, bd->start); - } - push_scan_block(bd, ws); - return rtsTrue; - } + if ((bd = grab_todo_block(ws)) != NULL) { + // no need to assign this to ws->scan_bd, we're going + // to scavenge the whole thing and then push it on + // our scavd list. This saves pushing out the + // scan_bd block, which might be partial. + if (N == 0) { + scavenge_block0(bd, bd->start); + } else { + scavenge_block(bd, bd->start); + } + push_scan_block(bd, ws); + return rtsTrue; + } - if (flag) return rtsTrue; - } + if (flag) return rtsTrue; } return rtsFalse; } @@ -1457,55 +1455,60 @@ scavenge_find_global_work (void) static rtsBool scavenge_find_local_work (void) { - int g, s; + int s; step_workspace *ws; rtsBool flag; - flag = rtsFalse; - for (g = RtsFlags.GcFlags.generations; --g >= 0; ) { - for (s = generations[g].n_steps; --s >= 0; ) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; - - // If we have a todo block and no scan block, start - // scanning the todo block. - if (ws->scan_bd == NULL && ws->todo_bd != NULL) - { - ws->scan_bd = ws->todo_bd; - ws->scan = ws->scan_bd->start; - } + gct->scav_local_work++; - // If we have a scan block with some work to do, - // scavenge everything up to the free pointer. - if (ws->scan != NULL && ws->scan < ws->scan_bd->free) - { - if (N == 0) { - scavenge_block0(ws->scan_bd, ws->scan); - } else { - scavenge_block(ws->scan_bd, ws->scan); - } - ws->scan = ws->scan_bd->free; - flag = rtsTrue; - } - - if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free - && ws->scan_bd != ws->todo_bd) - { - // we're not going to evac any more objects into - // this block, so push it now. - push_scan_block(ws->scan_bd, ws); - ws->scan_bd = NULL; - ws->scan = NULL; - // we might be able to scan the todo block now. But - // don't do it right away: there might be full blocks - // waiting to be scanned as a result of scavenge_block above. - flag = rtsTrue; - } + flag = rtsFalse; + for (s = total_steps-1; s >= 0; s--) { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + ws = &gct->steps[s]; - if (flag) return rtsTrue; - } + if (ws->todo_bd != NULL) + { + ws->todo_bd->free = ws->todo_free; + } + + // If we have a todo block and no scan block, start + // scanning the todo block. + if (ws->scan_bd == NULL && ws->todo_bd != NULL) + { + ws->scan_bd = ws->todo_bd; + ws->scan = ws->scan_bd->start; + } + + // If we have a scan block with some work to do, + // scavenge everything up to the free pointer. + if (ws->scan != NULL && ws->scan < ws->scan_bd->free) + { + if (N == 0) { + scavenge_block0(ws->scan_bd, ws->scan); + } else { + scavenge_block(ws->scan_bd, ws->scan); + } + ws->scan = ws->scan_bd->free; + flag = rtsTrue; + } + + if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free + && ws->scan_bd != ws->todo_bd) + { + // we're not going to evac any more objects into + // this block, so push it now. + push_scan_block(ws->scan_bd, ws); + ws->scan_bd = NULL; + ws->scan = NULL; + // we might be able to scan the todo block now. But + // don't do it right away: there might be full blocks + // waiting to be scanned as a result of scavenge_block above. + flag = rtsTrue; + } + + if (flag) return rtsTrue; } return rtsFalse; } @@ -1549,9 +1552,11 @@ loop: rtsBool any_work (void) { - int g, s; + int s; step_workspace *ws; + gct->any_work++; + write_barrier(); // scavenge static objects @@ -1568,15 +1573,13 @@ any_work (void) // Check for global work in any step. We don't need to check for // local work, because we have already exited scavenge_loop(), // which means there is no local work for this thread. - for (g = RtsFlags.GcFlags.generations; --g >= 0; ) { - for (s = generations[g].n_steps; --s >= 0; ) { - if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { - continue; - } - ws = &gct->steps[g][s]; - if (ws->todo_large_objects) return rtsTrue; - if (ws->stp->todos) return rtsTrue; - } + for (s = total_steps-1; s >= 0; s--) { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + ws = &gct->steps[s]; + if (ws->todo_large_objects) return rtsTrue; + if (ws->stp->todos) return rtsTrue; } return rtsFalse;