X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FSanity.c;h=65a70fa05c9bc3259ab27cd55e2adcb4e9884053;hb=329077220af83860d5dd6891649cb1058b5bbaa6;hp=44af5926d20bba38e7801c0e0cf8c1aad70f3eda;hpb=2baf448a8bb8138c736c74ae40cafa4b09491fda;p=ghc-hetmet.git diff --git a/rts/sm/Sanity.c b/rts/sm/Sanity.c index 44af592..65a70fa 100644 --- a/rts/sm/Sanity.c +++ b/rts/sm/Sanity.c @@ -26,6 +26,7 @@ #include "Apply.h" #include "Printer.h" #include "Arena.h" +#include "RetainerProfile.h" /* ----------------------------------------------------------------------------- Forward decls. @@ -34,6 +35,7 @@ static void checkSmallBitmap ( StgPtr payload, StgWord bitmap, nat ); static void checkLargeBitmap ( StgPtr payload, StgLargeBitmap*, nat ); static void checkClosureShallow ( StgClosure * ); +static void checkSTACK (StgStack *stack); /* ----------------------------------------------------------------------------- Check stack sanity @@ -138,6 +140,7 @@ checkStackFrame( StgPtr c ) case CATCH_STM_FRAME: case CATCH_FRAME: // small bitmap cases (<= 32 entries) + case UNDERFLOW_FRAME: case STOP_FRAME: case RET_SMALL: size = BITMAP_SIZE(info->i.layout.bitmap); @@ -330,7 +333,7 @@ checkClosure( StgClosure* p ) ASSERT(get_itbl(bq->owner)->type == TSO); ASSERT(bq->queue == (MessageBlackHole*)END_TSO_QUEUE - || get_itbl(bq->queue)->type == TSO); + || bq->queue->header.info == &stg_MSG_BLACKHOLE_info); ASSERT(bq->link == (StgBlockingQueue*)END_TSO_QUEUE || get_itbl(bq->link)->type == IND || get_itbl(bq->link)->type == BLOCKING_QUEUE); @@ -383,6 +386,7 @@ checkClosure( StgClosure* p ) case RET_BIG: case RET_DYN: case UPDATE_FRAME: + case UNDERFLOW_FRAME: case STOP_FRAME: case CATCH_FRAME: case ATOMICALLY_FRAME: @@ -430,7 +434,11 @@ checkClosure( StgClosure* p ) case TSO: checkTSO((StgTSO *)p); - return tso_sizeW((StgTSO *)p); + return sizeofW(StgTSO); + + case STACK: + checkSTACK((StgStack*)p); + return stack_sizeW((StgStack*)p); case TREC_CHUNK: { @@ -472,16 +480,18 @@ checkHeap(bdescr *bd) #endif for (; bd != NULL; bd = bd->link) { - p = bd->start; - while (p < bd->free) { - nat size = checkClosure((StgClosure *)p); - /* This is the smallest size of closure that can live in the heap */ - ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) ); - p += size; + if(!(bd->flags & BF_SWEPT)) { + p = bd->start; + while (p < bd->free) { + nat size = checkClosure((StgClosure *)p); + /* This is the smallest size of closure that can live in the heap */ + ASSERT( size >= MIN_PAYLOAD_SIZE + sizeofW(StgHeader) ); + p += size; - /* skip over slop */ - while (p < bd->free && - (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; } + /* skip over slop */ + while (p < bd->free && + (*p < 0x1000 || !LOOKS_LIKE_INFO_PTR(*p))) { p++; } + } } } } @@ -511,19 +521,21 @@ checkLargeObjects(bdescr *bd) } } -void -checkTSO(StgTSO *tso) +static void +checkSTACK (StgStack *stack) { - StgPtr sp = tso->sp; - StgPtr stack = tso->stack; - StgOffset stack_size = tso->stack_size; - StgPtr stack_end = stack + stack_size; + StgPtr sp = stack->sp; + StgOffset stack_size = stack->stack_size; + StgPtr stack_end = stack->stack + stack_size; - if (tso->what_next == ThreadRelocated) { - checkTSO(tso->_link); - return; - } + ASSERT(stack->stack <= sp && sp <= stack_end); + + checkStackChunk(sp, stack_end); +} +void +checkTSO(StgTSO *tso) +{ if (tso->what_next == ThreadKilled) { /* The garbage collector doesn't bother following any pointers * from dead threads, so don't check sanity here. @@ -534,16 +546,24 @@ checkTSO(StgTSO *tso) ASSERT(tso->_link == END_TSO_QUEUE || tso->_link->header.info == &stg_MVAR_TSO_QUEUE_info || tso->_link->header.info == &stg_TSO_info); - ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure)); + + if ( tso->why_blocked == BlockedOnMVar + || tso->why_blocked == BlockedOnBlackHole + || tso->why_blocked == BlockedOnMsgThrowTo + || tso->why_blocked == NotBlocked + ) { + ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->block_info.closure)); + } + ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->bq)); ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->blocked_exceptions)); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(tso->stackobj)); - ASSERT(stack <= sp && sp < stack_end); - - checkStackChunk(sp, stack_end); + // XXX are we checking the stack twice? + checkSTACK(tso->stackobj); } -/* +/* Check that all TSOs have been evacuated. Optionally also check the sanity of the TSOs. */ @@ -561,11 +581,9 @@ checkGlobalTSOList (rtsBool checkTSOs) if (checkTSOs) checkTSO(tso); - tso = deRefTSO(tso); - // If this TSO is dirty and in an old generation, it better // be on the mutable list. - if (tso->dirty || (tso->flags & TSO_LINK_DIRTY)) { + if (tso->dirty) { ASSERT(Bdescr((P_)tso)->gen_no == 0 || (tso->flags & TSO_MARKED)); tso->flags &= ~TSO_MARKED; } @@ -601,7 +619,6 @@ checkMutableLists (rtsBool checkTSOs) nat g, i; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - checkMutableList(generations[g].mut_list, g); for (i = 0; i < n_capabilities; i++) { checkMutableList(capabilities[i].mut_lists[g], g); } @@ -720,7 +737,6 @@ findMemoryLeak (void) for (i = 0; i < n_capabilities; i++) { markBlocks(capabilities[i].mut_lists[g]); } - markBlocks(generations[g].mut_list); markBlocks(generations[g].blocks); markBlocks(generations[g].large_objects); } @@ -808,7 +824,6 @@ memInventory (rtsBool show) for (i = 0; i < n_capabilities; i++) { gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]); } - gen_blocks[g] += countAllocdBlocks(generations[g].mut_list); gen_blocks[g] += genBlocks(&generations[g]); }