From: Simon Marlow Date: Sun, 8 Jun 2008 07:37:54 +0000 (+0000) Subject: fix allocated blocks calculation, and add more sanity checks X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=commitdiff_plain;h=a7f2a897bab20f05d4cf5fc8cdae328698c7fc82 fix allocated blocks calculation, and add more sanity checks --- diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c index 3c12e19..0bffa82 100644 --- a/rts/sm/BlockAlloc.c +++ b/rts/sm/BlockAlloc.c @@ -306,23 +306,30 @@ allocGroup (nat n) bdescr *bd, *rem; nat ln; - // Todo: not true in multithreaded GC, where we use allocBlock_sync(). - // ASSERT_SM_LOCK(); - if (n == 0) barf("allocGroup: requested zero blocks"); - n_alloc_blocks += n; - if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; - if (n >= BLOCKS_PER_MBLOCK) { - bd = alloc_mega_group(BLOCKS_TO_MBLOCKS(n)); + nat mblocks; + + mblocks = BLOCKS_TO_MBLOCKS(n); + + // n_alloc_blocks doesn't count the extra blocks we get in a + // megablock group. + n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK; + if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; + + bd = alloc_mega_group(mblocks); // only the bdescrs of the first MB are required to be initialised initGroup(bd); + IF_DEBUG(sanity, checkFreeListSanity()); return bd; } + n_alloc_blocks += n; + if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; + ln = log_2_ceil(n); while (free_list[ln] == NULL && ln < MAX_FREE_LIST) { @@ -461,8 +468,6 @@ freeGroup(bdescr *p) ASSERT(p->free != (P_)-1); - n_alloc_blocks -= p->blocks; - p->free = (void *)-1; /* indicates that this block is free */ p->step = NULL; p->gen_no = 0; @@ -473,12 +478,21 @@ freeGroup(bdescr *p) if (p->blocks >= BLOCKS_PER_MBLOCK) { + nat mblocks; + + mblocks = BLOCKS_TO_MBLOCKS(p->blocks); // If this is an mgroup, make sure it has the right number of blocks - ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks))); + ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks)); + + n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK; + free_mega_group(p); return; } + ASSERT(n_alloc_blocks >= p->blocks); + n_alloc_blocks -= p->blocks; + // coalesce forwards { bdescr *next; diff --git a/rts/sm/BlockAlloc.h b/rts/sm/BlockAlloc.h index 1472ac6..2d777f7 100644 --- a/rts/sm/BlockAlloc.h +++ b/rts/sm/BlockAlloc.h @@ -16,4 +16,7 @@ extern void checkFreeListSanity(void); nat countFreeList(void); #endif +lnat n_alloc_blocks; // currently allocated blocks +lnat hw_alloc_blocks; // high-water allocated blocks + #endif /* BLOCK_ALLOC_H */ diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index d37a076..b76fcf4 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -1308,6 +1308,9 @@ memInventory (rtsBool show) #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_))) leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK; + + ASSERT(n_alloc_blocks == live_blocks); + if (show || leak) { if (leak) {