X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=94f9e10b9f64713b1805ec04e7af25f6635321ae;hb=8bc3f02844342419a9c6fe29afe6ab2f07acfb1e;hp=f0506cd77cd8037a8774b8c723e2153f770f98f6;hpb=ce9a12321f228ab68934e3031c32ab7f9a2173fc;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index f0506cd..94f9e10 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -40,24 +40,21 @@ StgClosure *caf_list = NULL; StgClosure *revertible_caf_list = NULL; rtsBool keepCAFs; -bdescr *pinned_object_block; /* allocate pinned objects into this block */ -nat alloc_blocks; /* number of allocate()d blocks since GC */ -nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ +nat alloc_blocks_lim; /* GC if n_large_blocks in any nursery + * reaches this. */ -static bdescr *exec_block; +bdescr *exec_block; generation *generations = NULL; /* all the generations */ generation *g0 = NULL; /* generation 0, for convenience */ generation *oldest_gen = NULL; /* oldest generation, for convenience */ -step *g0s0 = NULL; /* generation 0, step 0, for convenience */ nat total_steps = 0; step *all_steps = NULL; /* single array of steps */ ullong total_allocated = 0; /* total memory allocated during run */ -nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */ -step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS */ +step *nurseries = NULL; /* array of nurseries, size == n_capabilities */ #ifdef THREADED_RTS /* @@ -143,14 +140,6 @@ initStorage( void ) * sizeof(struct generation_), "initStorage: gens"); - /* allocate all the steps into an array. It is important that we do - it this way, because we need the invariant that two step pointers - can be directly compared to see which is the oldest. - Remember that the last generation has only one step. */ - total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps; - all_steps = stgMallocBytes(total_steps * sizeof(struct step_), - "initStorage: steps"); - /* Initialise all generations */ for(g = 0; g < RtsFlags.GcFlags.generations; g++) { gen = &generations[g]; @@ -166,6 +155,14 @@ initStorage( void ) g0 = &generations[0]; oldest_gen = &generations[RtsFlags.GcFlags.generations-1]; + /* allocate all the steps into an array. It is important that we do + it this way, because we need the invariant that two step pointers + can be directly compared to see which is the oldest. + Remember that the last generation has only one step. */ + total_steps = 1 + (RtsFlags.GcFlags.generations - 1) * RtsFlags.GcFlags.steps; + all_steps = stgMallocBytes(total_steps * sizeof(struct step_), + "initStorage: steps"); + /* Allocate step structures in each generation */ if (RtsFlags.GcFlags.generations > 1) { /* Only for multiple-generations */ @@ -187,12 +184,7 @@ initStorage( void ) g0->steps = all_steps; } -#ifdef THREADED_RTS - n_nurseries = n_capabilities; -#else - n_nurseries = 1; -#endif - nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_), + nurseries = stgMallocBytes (n_capabilities * sizeof(struct step_), "initStorage: nurseries"); /* Initialise all steps */ @@ -202,7 +194,7 @@ initStorage( void ) } } - for (s = 0; s < n_nurseries; s++) { + for (s = 0; s < n_capabilities; s++) { initStep(&nurseries[s], 0, s); } @@ -215,7 +207,7 @@ initStorage( void ) } oldest_gen->steps[0].to = &oldest_gen->steps[0]; - for (s = 0; s < n_nurseries; s++) { + for (s = 0; s < n_capabilities; s++) { nurseries[s].to = generations[0].steps[0].to; } @@ -231,7 +223,6 @@ initStorage( void ) } generations[0].max_blocks = 0; - g0s0 = &generations[0].steps[0]; /* The allocation area. Policy: keep the allocation area * small to begin with, even if we have a large suggested heap @@ -246,7 +237,6 @@ initStorage( void ) revertible_caf_list = NULL; /* initialise the allocate() interface */ - alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; exec_block = NULL; @@ -274,7 +264,7 @@ exitStorage (void) void freeStorage (void) { - stgFree(g0s0); // frees all the steps + stgFree(all_steps); // frees all the steps stgFree(generations); freeAllMBlocks(); #if defined(THREADED_RTS) @@ -411,8 +401,7 @@ allocNursery (step *stp, bdescr *tail, nat blocks) if (tail != NULL) { tail->u.back = bd; } - bd->step = stp; - bd->gen_no = 0; + initBdescr(bd, stp); bd->flags = 0; bd->free = bd->start; tail = bd; @@ -424,19 +413,13 @@ allocNursery (step *stp, bdescr *tail, nat blocks) static void assignNurseriesToCapabilities (void) { -#ifdef THREADED_RTS nat i; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { capabilities[i].r.rNursery = &nurseries[i]; capabilities[i].r.rCurrentNursery = nurseries[i].blocks; capabilities[i].r.rCurrentAlloc = NULL; } -#else /* THREADED_RTS */ - MainCapability.r.rNursery = &nurseries[0]; - MainCapability.r.rCurrentNursery = nurseries[0].blocks; - MainCapability.r.rCurrentAlloc = NULL; -#endif } static void @@ -444,7 +427,7 @@ allocNurseries( void ) { nat i; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { nurseries[i].blocks = allocNursery(&nurseries[i], NULL, RtsFlags.GcFlags.minAllocAreaSize); @@ -462,7 +445,7 @@ resetNurseries( void ) bdescr *bd; step *stp; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { stp = &nurseries[i]; for (bd = stp->blocks; bd; bd = bd->link) { bd->free = bd->start; @@ -470,6 +453,10 @@ resetNurseries( void ) ASSERT(bd->step == stp); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } + // these large objects are dead, since we have just GC'd + freeChain(stp->large_objects); + stp->large_objects = NULL; + stp->n_large_blocks = 0; } assignNurseriesToCapabilities(); } @@ -480,8 +467,9 @@ countNurseryBlocks (void) nat i; lnat blocks = 0; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { blocks += nurseries[i].n_blocks; + blocks += nurseries[i].n_large_blocks; } return blocks; } @@ -533,7 +521,7 @@ void resizeNurseriesFixed (nat blocks) { nat i; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { resizeNursery(&nurseries[i], blocks); } } @@ -546,7 +534,7 @@ resizeNurseries (nat blocks) { // If there are multiple nurseries, then we just divide the number // of available blocks between them. - resizeNurseriesFixed(blocks / n_nurseries); + resizeNurseriesFixed(blocks / n_capabilities); } @@ -566,132 +554,46 @@ move_TSO (StgTSO *src, StgTSO *dest) } /* ----------------------------------------------------------------------------- - The allocate() interface - - allocateInGen() function allocates memory directly into a specific - generation. It always succeeds, and returns a chunk of memory n - words long. n can be larger than the size of a block if necessary, - in which case a contiguous block group will be allocated. - - allocate(n) is equivalent to allocateInGen(g0). + split N blocks off the front of the given bdescr, returning the + new block group. We add the remainder to the large_blocks list + in the same step as the original block. -------------------------------------------------------------------------- */ -StgPtr -allocateInGen (generation *g, lnat n) -{ - step *stp; - bdescr *bd; - StgPtr ret; - - ACQUIRE_SM_LOCK; - - TICK_ALLOC_HEAP_NOCTR(n); - CCS_ALLOC(CCCS,n); - - stp = &g->steps[0]; - - if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) - { - lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; - - // Attempting to allocate an object larger than maxHeapSize - // should definitely be disallowed. (bug #1791) - if (RtsFlags.GcFlags.maxHeapSize > 0 && - req_blocks >= RtsFlags.GcFlags.maxHeapSize) { - heapOverflow(); - // heapOverflow() doesn't exit (see #2592), but we aren't - // in a position to do a clean shutdown here: we - // either have to allocate the memory or exit now. - // Allocating the memory would be bad, because the user - // has requested that we not exceed maxHeapSize, so we - // just exit. - stg_exit(EXIT_HEAPOVERFLOW); - } - - bd = allocGroup(req_blocks); - dbl_link_onto(bd, &stp->large_objects); - stp->n_large_blocks += bd->blocks; // might be larger than req_blocks - alloc_blocks += bd->blocks; - bd->gen_no = g->no; - bd->step = stp; - bd->flags = BF_LARGE; - bd->free = bd->start + n; - ret = bd->start; - } - else - { - // small allocation (blocks; - if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) { - bd = allocBlock(); - bd->gen_no = g->no; - bd->step = stp; - bd->flags = 0; - bd->link = stp->blocks; - stp->blocks = bd; - stp->n_blocks++; - alloc_blocks++; - } - ret = bd->free; - bd->free += n; - } - - RELEASE_SM_LOCK; - - return ret; -} - -StgPtr -allocate (lnat n) -{ - return allocateInGen(g0,n); -} - -lnat -allocatedBytes( void ) -{ - lnat allocated; - - allocated = alloc_blocks * BLOCK_SIZE_W; - if (pinned_object_block != NULL) { - allocated -= (pinned_object_block->start + BLOCK_SIZE_W) - - pinned_object_block->free; - } - - return allocated; -} - -// split N blocks off the front of the given bdescr, returning the -// new block group. We treat the remainder as if it -// had been freshly allocated in generation 0. bdescr * splitLargeBlock (bdescr *bd, nat blocks) { bdescr *new_bd; + ACQUIRE_SM_LOCK; + + ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks); + // subtract the original number of blocks from the counter first bd->step->n_large_blocks -= bd->blocks; new_bd = splitBlockGroup (bd, blocks); - - dbl_link_onto(new_bd, &g0s0->large_objects); - g0s0->n_large_blocks += new_bd->blocks; - new_bd->gen_no = g0s0->no; - new_bd->step = g0s0; - new_bd->flags = BF_LARGE; + initBdescr(new_bd, bd->step); + new_bd->flags = BF_LARGE | (bd->flags & BF_EVACUATED); + // if new_bd is in an old generation, we have to set BF_EVACUATED new_bd->free = bd->free; + dbl_link_onto(new_bd, &bd->step->large_objects); + ASSERT(new_bd->free <= new_bd->start + new_bd->blocks * BLOCK_SIZE_W); // add the new number of blocks to the counter. Due to the gaps - // for block descriptor, new_bd->blocks + bd->blocks might not be + // for block descriptors, new_bd->blocks + bd->blocks might not be // equal to the original bd->blocks, which is why we do it this way. - bd->step->n_large_blocks += bd->blocks; + bd->step->n_large_blocks += bd->blocks + new_bd->blocks; + + ASSERT(countBlocks(bd->step->large_objects) == bd->step->n_large_blocks); + + RELEASE_SM_LOCK; return new_bd; } /* ----------------------------------------------------------------------------- - allocateLocal() + allocate() This allocates memory in the current thread - it is intended for use primarily from STG-land where we have a Capability. It is @@ -704,13 +606,40 @@ splitLargeBlock (bdescr *bd, nat blocks) -------------------------------------------------------------------------- */ StgPtr -allocateLocal (Capability *cap, lnat n) +allocate (Capability *cap, lnat n) { bdescr *bd; StgPtr p; + step *stp; if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - return allocateInGen(g0,n); + lnat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; + + // Attempting to allocate an object larger than maxHeapSize + // should definitely be disallowed. (bug #1791) + if (RtsFlags.GcFlags.maxHeapSize > 0 && + req_blocks >= RtsFlags.GcFlags.maxHeapSize) { + heapOverflow(); + // heapOverflow() doesn't exit (see #2592), but we aren't + // in a position to do a clean shutdown here: we + // either have to allocate the memory or exit now. + // Allocating the memory would be bad, because the user + // has requested that we not exceed maxHeapSize, so we + // just exit. + stg_exit(EXIT_HEAPOVERFLOW); + } + + stp = &nurseries[cap->no]; + + ACQUIRE_SM_LOCK + bd = allocGroup(req_blocks); + RELEASE_SM_LOCK; + dbl_link_onto(bd, &stp->large_objects); + stp->n_large_blocks += bd->blocks; // might be larger than req_blocks + initBdescr(bd, stp); + bd->flags = BF_LARGE; + bd->free = bd->start + n; + return bd->start; } /* small allocation (r.rNursery->n_blocks++; RELEASE_SM_LOCK; - bd->gen_no = 0; - bd->step = cap->r.rNursery; + initBdescr(bd, cap->r.rNursery); bd->flags = 0; - // NO: alloc_blocks++; - // calcAllocated() uses the size of the nursery, and we've - // already bumpted nursery->n_blocks above. We'll GC - // pretty quickly now anyway, because MAYBE_GC() will + // If we had to allocate a new block, then we'll GC + // pretty quickly now, because MAYBE_GC() will // notice that CurrentNursery->link is NULL. } else { // we have a block in the nursery: take it and put @@ -783,40 +709,41 @@ allocateLocal (Capability *cap, lnat n) ------------------------------------------------------------------------- */ StgPtr -allocatePinned( lnat n ) +allocatePinned (Capability *cap, lnat n) { StgPtr p; - bdescr *bd = pinned_object_block; + bdescr *bd; + step *stp; // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - p = allocate(n); + p = allocate(cap, n); Bdescr(p)->flags |= BF_PINNED; return p; } - ACQUIRE_SM_LOCK; - TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); + bd = cap->pinned_object_block; + // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { - pinned_object_block = bd = allocBlock(); - dbl_link_onto(bd, &g0s0->large_objects); - g0s0->n_large_blocks++; - bd->gen_no = 0; - bd->step = g0s0; + ACQUIRE_SM_LOCK + cap->pinned_object_block = bd = allocBlock(); + RELEASE_SM_LOCK + stp = &nurseries[cap->no]; + dbl_link_onto(bd, &stp->large_objects); + stp->n_large_blocks++; + initBdescr(bd, stp); bd->flags = BF_PINNED | BF_LARGE; bd->free = bd->start; - alloc_blocks++; } p = bd->free; bd->free += n; - RELEASE_SM_LOCK; return p; } @@ -897,8 +824,7 @@ dirty_MVAR(StgRegTable *reg, StgClosure *p) * * Approximate how much we've allocated: number of blocks in the * nursery + blocks allocated via allocate() - unused nusery blocks. - * This leaves a little slop at the end of each block, and doesn't - * take into account large objects (ToDo). + * This leaves a little slop at the end of each block. * -------------------------------------------------------------------------- */ lnat @@ -906,14 +832,11 @@ calcAllocated( void ) { nat allocated; bdescr *bd; + nat i; - allocated = allocatedBytes(); - allocated += countNurseryBlocks() * BLOCK_SIZE_W; + allocated = countNurseryBlocks() * BLOCK_SIZE_W; - { -#ifdef THREADED_RTS - nat i; - for (i = 0; i < n_nurseries; i++) { + for (i = 0; i < n_capabilities; i++) { Capability *cap; for ( bd = capabilities[i].r.rCurrentNursery->link; bd != NULL; bd = bd->link ) { @@ -925,18 +848,10 @@ calcAllocated( void ) allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W) - cap->r.rCurrentNursery->free; } - } -#else - bdescr *current_nursery = MainCapability.r.rCurrentNursery; - - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } - if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) { - allocated -= (current_nursery->start + BLOCK_SIZE_W) - - current_nursery->free; - } -#endif + if (cap->pinned_object_block != NULL) { + allocated -= (cap->pinned_object_block->start + BLOCK_SIZE_W) - + cap->pinned_object_block->free; + } } total_allocated += allocated; @@ -953,16 +868,12 @@ calcLiveBlocks(void) lnat live = 0; step *stp; - if (RtsFlags.GcFlags.generations == 1) { - return g0s0->n_large_blocks + g0s0->n_blocks; - } - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { /* approximate amount of live data (doesn't take into account slop * at end of each block). */ - if (g == 0 && s == 0) { + if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { continue; } stp = &generations[g].steps[s]; @@ -994,14 +905,10 @@ calcLiveWords(void) lnat live; step *stp; - if (RtsFlags.GcFlags.generations == 1) { - return g0s0->n_words + countOccupied(g0s0->large_objects); - } - live = 0; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) continue; + if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) continue; stp = &generations[g].steps[s]; live += stp->n_words + countOccupied(stp->large_objects); } @@ -1180,268 +1087,8 @@ void freeExec (void *addr) #endif /* mingw32_HOST_OS */ -/* ----------------------------------------------------------------------------- - Debugging - - memInventory() checks for memory leaks by counting up all the - blocks we know about and comparing that to the number of blocks - allegedly floating around in the system. - -------------------------------------------------------------------------- */ - #ifdef DEBUG -// Useful for finding partially full blocks in gdb -void findSlop(bdescr *bd); -void findSlop(bdescr *bd) -{ - lnat slop; - - for (; bd != NULL; bd = bd->link) { - slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start); - if (slop > (1024/sizeof(W_))) { - debugBelch("block at %p (bdescr %p) has %ldKB slop\n", - bd->start, bd, slop / (1024/sizeof(W_))); - } - } -} - -nat -countBlocks(bdescr *bd) -{ - nat n; - for (n=0; bd != NULL; bd=bd->link) { - n += bd->blocks; - } - return n; -} - -// (*1) Just like countBlocks, except that we adjust the count for a -// megablock group so that it doesn't include the extra few blocks -// that would be taken up by block descriptors in the second and -// subsequent megablock. This is so we can tally the count with the -// number of blocks allocated in the system, for memInventory(). -static nat -countAllocdBlocks(bdescr *bd) -{ - nat n; - for (n=0; bd != NULL; bd=bd->link) { - n += bd->blocks; - // hack for megablock groups: see (*1) above - if (bd->blocks > BLOCKS_PER_MBLOCK) { - n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); - } - } - return n; -} - -static lnat -stepBlocks (step *stp) -{ - ASSERT(countBlocks(stp->blocks) == stp->n_blocks); - ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks); - return stp->n_blocks + stp->n_old_blocks + - countAllocdBlocks(stp->large_objects); -} - -// If memInventory() calculates that we have a memory leak, this -// function will try to find the block(s) that are leaking by marking -// all the ones that we know about, and search through memory to find -// blocks that are not marked. In the debugger this can help to give -// us a clue about what kind of block leaked. In the future we might -// annotate blocks with their allocation site to give more helpful -// info. -static void -findMemoryLeak (void) -{ - nat g, s, i; - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - for (i = 0; i < n_capabilities; i++) { - markBlocks(capabilities[i].mut_lists[g]); - } - markBlocks(generations[g].mut_list); - for (s = 0; s < generations[g].n_steps; s++) { - markBlocks(generations[g].steps[s].blocks); - markBlocks(generations[g].steps[s].large_objects); - } - } - - for (i = 0; i < n_nurseries; i++) { - markBlocks(nurseries[i].blocks); - markBlocks(nurseries[i].large_objects); - } - -#ifdef PROFILING - // TODO: - // if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - // markRetainerBlocks(); - // } -#endif - - // count the blocks allocated by the arena allocator - // TODO: - // markArenaBlocks(); - - // count the blocks containing executable memory - markBlocks(exec_block); - - reportUnmarkedBlocks(); -} - - -void -memInventory (rtsBool show) -{ - nat g, s, i; - step *stp; - lnat gen_blocks[RtsFlags.GcFlags.generations]; - lnat nursery_blocks, retainer_blocks, - arena_blocks, exec_blocks; - lnat live_blocks = 0, free_blocks = 0; - rtsBool leak; - - // count the blocks we current have - - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - gen_blocks[g] = 0; - for (i = 0; i < n_capabilities; i++) { - gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]); - } - gen_blocks[g] += countAllocdBlocks(generations[g].mut_list); - for (s = 0; s < generations[g].n_steps; s++) { - stp = &generations[g].steps[s]; - gen_blocks[g] += stepBlocks(stp); - } - } - - nursery_blocks = 0; - for (i = 0; i < n_nurseries; i++) { - nursery_blocks += stepBlocks(&nurseries[i]); - } - - retainer_blocks = 0; -#ifdef PROFILING - if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - retainer_blocks = retainerStackBlocks(); - } -#endif - - // count the blocks allocated by the arena allocator - arena_blocks = arenaBlocks(); - - // count the blocks containing executable memory - exec_blocks = countAllocdBlocks(exec_block); - - /* count the blocks on the free list */ - free_blocks = countFreeList(); - - live_blocks = 0; - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - live_blocks += gen_blocks[g]; - } - live_blocks += nursery_blocks + - + retainer_blocks + arena_blocks + exec_blocks; - -#define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_))) - - leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK; - - if (show || leak) - { - if (leak) { - debugBelch("Memory leak detected:\n"); - } else { - debugBelch("Memory inventory:\n"); - } - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - debugBelch(" gen %d blocks : %5lu blocks (%lu MB)\n", g, - gen_blocks[g], MB(gen_blocks[g])); - } - debugBelch(" nursery : %5lu blocks (%lu MB)\n", - nursery_blocks, MB(nursery_blocks)); - debugBelch(" retainer : %5lu blocks (%lu MB)\n", - retainer_blocks, MB(retainer_blocks)); - debugBelch(" arena blocks : %5lu blocks (%lu MB)\n", - arena_blocks, MB(arena_blocks)); - debugBelch(" exec : %5lu blocks (%lu MB)\n", - exec_blocks, MB(exec_blocks)); - debugBelch(" free : %5lu blocks (%lu MB)\n", - free_blocks, MB(free_blocks)); - debugBelch(" total : %5lu blocks (%lu MB)\n", - live_blocks + free_blocks, MB(live_blocks+free_blocks)); - if (leak) { - debugBelch("\n in system : %5lu blocks (%lu MB)\n", - mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated); - } - } - - if (leak) { - debugBelch("\n"); - findMemoryLeak(); - } - ASSERT(n_alloc_blocks == live_blocks); - ASSERT(!leak); -} - - -/* Full heap sanity check. */ -void -checkSanity( void ) -{ - nat g, s; - - if (RtsFlags.GcFlags.generations == 1) { - checkHeap(g0s0->blocks); - checkLargeObjects(g0s0->large_objects); - } else { - - for (g = 0; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) { continue; } - ASSERT(countBlocks(generations[g].steps[s].blocks) - == generations[g].steps[s].n_blocks); - ASSERT(countBlocks(generations[g].steps[s].large_objects) - == generations[g].steps[s].n_large_blocks); - checkHeap(generations[g].steps[s].blocks); - checkLargeObjects(generations[g].steps[s].large_objects); - } - } - - for (s = 0; s < n_nurseries; s++) { - ASSERT(countBlocks(nurseries[s].blocks) - == nurseries[s].n_blocks); - ASSERT(countBlocks(nurseries[s].large_objects) - == nurseries[s].n_large_blocks); - } - - checkFreeListSanity(); - } - -#if defined(THREADED_RTS) - // check the stacks too in threaded mode, because we don't do a - // full heap sanity check in this case (see checkHeap()) - checkMutableLists(rtsTrue); -#else - checkMutableLists(rtsFalse); -#endif -} - -/* Nursery sanity check */ -void -checkNurserySanity( step *stp ) -{ - bdescr *bd, *prev; - nat blocks = 0; - - prev = NULL; - for (bd = stp->blocks; bd != NULL; bd = bd->link) { - ASSERT(bd->u.back == prev); - prev = bd; - blocks += bd->blocks; - } - ASSERT(blocks == stp->n_blocks); -} - // handy function for use in gdb, because Bdescr() is inlined. extern bdescr *_bdescr( StgPtr p );