X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2Fsm%2FBlockAlloc.c;h=bf7a55e7a96d9d52a418d6495168b7a35c5481e5;hp=a2ccaeb4de0d3b8ab446dc6f58903c07b0581db1;hb=a2a67cd520b9841114d69a87a423dabcb3b4368e;hpb=ae267d04df855051b99218e3712b3f56b8016d56 diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c index a2ccaeb..bf7a55e 100644 --- a/rts/sm/BlockAlloc.c +++ b/rts/sm/BlockAlloc.c @@ -1,6 +1,6 @@ /* ----------------------------------------------------------------------------- * - * (c) The GHC Team 1998-2006 + * (c) The GHC Team 1998-2008 * * The block allocator and free list manager. * @@ -17,11 +17,10 @@ #include "PosixSource.h" #include "Rts.h" -#include "RtsFlags.h" + +#include "Storage.h" #include "RtsUtils.h" #include "BlockAlloc.h" -#include "MBlock.h" -#include "Storage.h" #include @@ -155,19 +154,18 @@ void initBlockAllocator(void) -------------------------------------------------------------------------- */ STATIC_INLINE void -initGroup(nat n, bdescr *head) +initGroup(bdescr *head) { bdescr *bd; - nat i; + nat i, n; - if (n != 0) { - head->free = head->start; - head->link = NULL; - for (i=1, bd = head+1; i < n; i++, bd++) { + n = head->blocks; + head->free = head->start; + head->link = NULL; + for (i=1, bd = head+1; i < n; i++, bd++) { bd->free = 0; bd->blocks = 0; bd->link = head; - } } } @@ -269,7 +267,7 @@ alloc_mega_group (nat mblocks) } else { free_mblock_list = bd->link; } - initGroup(n, bd); + initGroup(bd); return bd; } else if (bd->blocks > n) @@ -285,7 +283,7 @@ alloc_mega_group (nat mblocks) { // we take our chunk off the end here. nat best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); - bd = FIRST_BDESCR(MBLOCK_ROUND_DOWN(best) + + bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) + (best_mblocks-mblocks)*MBLOCK_SIZE); best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks); @@ -307,36 +305,51 @@ allocGroup (nat n) bdescr *bd, *rem; nat ln; - // Todo: not true in multithreaded GC, where we use allocBlock_sync(). - // ASSERT_SM_LOCK(); - if (n == 0) barf("allocGroup: requested zero blocks"); - n_alloc_blocks += n; - if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; - if (n >= BLOCKS_PER_MBLOCK) { - bd = alloc_mega_group(BLOCKS_TO_MBLOCKS(n)); + nat mblocks; + + mblocks = BLOCKS_TO_MBLOCKS(n); + + // n_alloc_blocks doesn't count the extra blocks we get in a + // megablock group. + n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK; + if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; + + bd = alloc_mega_group(mblocks); // only the bdescrs of the first MB are required to be initialised - initGroup(BLOCKS_PER_MBLOCK, bd); + initGroup(bd); + IF_DEBUG(sanity, checkFreeListSanity()); return bd; } + n_alloc_blocks += n; + if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; + ln = log_2_ceil(n); - while (free_list[ln] == NULL && ln < MAX_FREE_LIST) { + while (ln < MAX_FREE_LIST && free_list[ln] == NULL) { ln++; } if (ln == MAX_FREE_LIST) { +#if 0 + if ((mblocks_allocated * MBLOCK_SIZE_W - n_alloc_blocks * BLOCK_SIZE_W) > (1024*1024)/sizeof(W_)) { + debugBelch("Fragmentation, wanted %d blocks:", n); + RtsFlags.DebugFlags.block_alloc = 1; + checkFreeListSanity(); + } +#endif + bd = alloc_mega_group(1); bd->blocks = n; - initGroup(n,bd); // we know the group will fit + initGroup(bd); // we know the group will fit rem = bd + n; rem->blocks = BLOCKS_PER_MBLOCK-n; - initGroup(BLOCKS_PER_MBLOCK-n, rem); // init the slop + initGroup(rem); // init the slop n_alloc_blocks += rem->blocks; freeGroup(rem); // add the slop on to the free list IF_DEBUG(sanity, checkFreeListSanity()); @@ -357,7 +370,7 @@ allocGroup (nat n) { barf("allocGroup: free list corrupted"); } - initGroup(n, bd); // initialise it + initGroup(bd); // initialise it IF_DEBUG(sanity, checkFreeListSanity()); ASSERT(bd->blocks == n); return bd; @@ -401,7 +414,8 @@ coalesce_mblocks (bdescr *p) q = p->link; if (q != NULL && MBLOCK_ROUND_DOWN(q) == - MBLOCK_ROUND_DOWN(p) + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { + (StgWord8*)MBLOCK_ROUND_DOWN(p) + + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { // can coalesce p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) + BLOCKS_TO_MBLOCKS(q->blocks)); @@ -454,8 +468,6 @@ freeGroup(bdescr *p) ASSERT(p->free != (P_)-1); - n_alloc_blocks -= p->blocks; - p->free = (void *)-1; /* indicates that this block is free */ p->step = NULL; p->gen_no = 0; @@ -466,12 +478,21 @@ freeGroup(bdescr *p) if (p->blocks >= BLOCKS_PER_MBLOCK) { + nat mblocks; + + mblocks = BLOCKS_TO_MBLOCKS(p->blocks); // If this is an mgroup, make sure it has the right number of blocks - ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks))); + ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks)); + + n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK; + free_mega_group(p); return; } + ASSERT(n_alloc_blocks >= p->blocks); + n_alloc_blocks -= p->blocks; + // coalesce forwards { bdescr *next; @@ -544,6 +565,9 @@ freeChain_lock(bdescr *bd) RELEASE_SM_LOCK; } +// splitBlockGroup(bd,B) splits bd in two. Afterward, bd will have B +// blocks, and a new block descriptor pointing to the remainder is +// returned. bdescr * splitBlockGroup (bdescr *bd, nat blocks) { @@ -554,16 +578,21 @@ splitBlockGroup (bdescr *bd, nat blocks) } if (bd->blocks > BLOCKS_PER_MBLOCK) { - nat mblocks; + nat low_mblocks, high_mblocks; void *new_mblock; if ((blocks - BLOCKS_PER_MBLOCK) % (MBLOCK_SIZE / BLOCK_SIZE) != 0) { barf("splitLargeBlock: not a multiple of a megablock"); } - mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE); - new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + mblocks * MBLOCK_SIZE_W); + low_mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE); + high_mblocks = (bd->blocks - blocks) / (MBLOCK_SIZE / BLOCK_SIZE); + + new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + low_mblocks * MBLOCK_SIZE_W); initMBlock(new_mblock); new_bd = FIRST_BDESCR(new_mblock); - new_bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks); + new_bd->blocks = MBLOCK_GROUP_BLOCKS(high_mblocks); + + ASSERT(blocks + new_bd->blocks == + bd->blocks + BLOCKS_PER_MBLOCK - MBLOCK_SIZE/BLOCK_SIZE); } else { @@ -581,20 +610,21 @@ splitBlockGroup (bdescr *bd, nat blocks) static void initMBlock(void *mblock) { - bdescr *bd; - void *block; - - /* the first few Bdescr's in a block are unused, so we don't want to - * put them all on the free list. - */ - block = FIRST_BLOCK(mblock); - bd = FIRST_BDESCR(mblock); - - /* Initialise the start field of each block descriptor - */ - for (; block <= LAST_BLOCK(mblock); bd += 1, block += BLOCK_SIZE) { - bd->start = block; - } + bdescr *bd; + StgWord8 *block; + + /* the first few Bdescr's in a block are unused, so we don't want to + * put them all on the free list. + */ + block = FIRST_BLOCK(mblock); + bd = FIRST_BDESCR(mblock); + + /* Initialise the start field of each block descriptor + */ + for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1, + block += BLOCK_SIZE) { + bd->start = (void*)block; + } } /* ----------------------------------------------------------------------------- @@ -679,7 +709,7 @@ checkFreeListSanity(void) if (bd->link != NULL) { ASSERT (MBLOCK_ROUND_DOWN(bd->link) != - MBLOCK_ROUND_DOWN(bd) + + (StgWord8*)MBLOCK_ROUND_DOWN(bd) + BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE); } } @@ -706,4 +736,37 @@ countFreeList(void) } return total_blocks; } + +void +markBlocks (bdescr *bd) +{ + for (; bd != NULL; bd = bd->link) { + bd->flags |= BF_KNOWN; + } +} + +void +reportUnmarkedBlocks (void) +{ + void *mblock; + bdescr *bd; + + debugBelch("Unreachable blocks:\n"); + for (mblock = getFirstMBlock(); mblock != NULL; + mblock = getNextMBlock(mblock)) { + for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) { + if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) { + debugBelch(" %p\n",bd); + } + if (bd->blocks >= BLOCKS_PER_MBLOCK) { + mblock = (StgWord8*)mblock + + (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE; + break; + } else { + bd += bd->blocks; + } + } + } +} + #endif