X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FBlockAlloc.c;h=0b8955c8fbb38702dcb264a605767f5024d1cd03;hb=069cd16951bd45a55c94b5adce048c4ec9aad2c8;hp=0bffa82993652afb140743197aa160cefb491a41;hpb=a7f2a897bab20f05d4cf5fc8cdae328698c7fc82;p=ghc-hetmet.git diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c index 0bffa82..0b8955c 100644 --- a/rts/sm/BlockAlloc.c +++ b/rts/sm/BlockAlloc.c @@ -17,19 +17,16 @@ #include "PosixSource.h" #include "Rts.h" -#include "RtsFlags.h" + +#include "Storage.h" #include "RtsUtils.h" #include "BlockAlloc.h" -#include "MBlock.h" -#include "Storage.h" +#include "OSMem.h" #include static void initMBlock(void *mblock); -// The free_list is kept sorted by size, smallest first. -// In THREADED_RTS mode, the free list is protected by sm_mutex. - /* ----------------------------------------------------------------------------- Implementation notes @@ -59,7 +56,8 @@ static void initMBlock(void *mblock); The following fields are not used by the allocator: bd->flags bd->gen_no - bd->step + bd->gen + bd->dest Exceptions: we don't maintain invariants for all the blocks within a group on the free list, because it is expensive to modify every @@ -71,7 +69,7 @@ static void initMBlock(void *mblock); ~~~~~~~~~~ Preliminaries: - - most allocations are for small blocks + - most allocations are for a small number of blocks - sometimes the OS gives us new memory backwards in the address space, sometimes forwards, so we should not be biased towards any particular layout in the address space @@ -121,8 +119,20 @@ static void initMBlock(void *mblock); --------------------------------------------------------------------------- */ +/* --------------------------------------------------------------------------- + WATCH OUT FOR OVERFLOW + + Be very careful with integer overflow here. If you have an + expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or + a nat, then it will very likely overflow on a 64-bit platform. + Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE). + + --------------------------------------------------------------------------- */ + #define MAX_FREE_LIST 9 +// In THREADED_RTS mode, the free list is protected by sm_mutex. + static bdescr *free_list[MAX_FREE_LIST]; static bdescr *free_mblock_list; @@ -283,8 +293,8 @@ alloc_mega_group (nat mblocks) if (best) { // we take our chunk off the end here. - nat best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); - bd = FIRST_BDESCR(MBLOCK_ROUND_DOWN(best) + + StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); + bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) + (best_mblocks-mblocks)*MBLOCK_SIZE); best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks); @@ -322,9 +332,7 @@ allocGroup (nat n) bd = alloc_mega_group(mblocks); // only the bdescrs of the first MB are required to be initialised initGroup(bd); - - IF_DEBUG(sanity, checkFreeListSanity()); - return bd; + goto finish; } n_alloc_blocks += n; @@ -332,13 +340,14 @@ allocGroup (nat n) ln = log_2_ceil(n); - while (free_list[ln] == NULL && ln < MAX_FREE_LIST) { + while (ln < MAX_FREE_LIST && free_list[ln] == NULL) { ln++; } if (ln == MAX_FREE_LIST) { -#if 0 - if ((mblocks_allocated * MBLOCK_SIZE_W - n_alloc_blocks * BLOCK_SIZE_W) > (1024*1024)/sizeof(W_)) { +#if 0 /* useful for debugging fragmentation */ + if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W + - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) { debugBelch("Fragmentation, wanted %d blocks:", n); RtsFlags.DebugFlags.block_alloc = 1; checkFreeListSanity(); @@ -353,8 +362,7 @@ allocGroup (nat n) initGroup(rem); // init the slop n_alloc_blocks += rem->blocks; freeGroup(rem); // add the slop on to the free list - IF_DEBUG(sanity, checkFreeListSanity()); - return bd; + goto finish; } bd = free_list[ln]; @@ -362,18 +370,22 @@ allocGroup (nat n) if (bd->blocks == n) // exactly the right size! { dbl_link_remove(bd, &free_list[ln]); + initGroup(bd); } else if (bd->blocks > n) // block too big... { bd = split_free_block(bd, n, ln); + ASSERT(bd->blocks == n); + initGroup(bd); } else { barf("allocGroup: free list corrupted"); } - initGroup(bd); // initialise it + +finish: + IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE)); IF_DEBUG(sanity, checkFreeListSanity()); - ASSERT(bd->blocks == n); return bd; } @@ -415,7 +427,8 @@ coalesce_mblocks (bdescr *p) q = p->link; if (q != NULL && MBLOCK_ROUND_DOWN(q) == - MBLOCK_ROUND_DOWN(p) + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { + (StgWord8*)MBLOCK_ROUND_DOWN(p) + + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { // can coalesce p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) + BLOCKS_TO_MBLOCKS(q->blocks)); @@ -469,10 +482,10 @@ freeGroup(bdescr *p) ASSERT(p->free != (P_)-1); p->free = (void *)-1; /* indicates that this block is free */ - p->step = NULL; + p->gen = NULL; p->gen_no = 0; /* fill the block group with garbage if sanity checking is on */ - IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE)); + IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE)); if (p->blocks == 0) barf("freeGroup: block size is zero"); @@ -565,57 +578,93 @@ freeChain_lock(bdescr *bd) RELEASE_SM_LOCK; } -bdescr * -splitBlockGroup (bdescr *bd, nat blocks) +static void +initMBlock(void *mblock) { - bdescr *new_bd; + bdescr *bd; + StgWord8 *block; - if (bd->blocks <= blocks) { - barf("splitLargeBlock: too small"); + /* the first few Bdescr's in a block are unused, so we don't want to + * put them all on the free list. + */ + block = FIRST_BLOCK(mblock); + bd = FIRST_BDESCR(mblock); + + /* Initialise the start field of each block descriptor + */ + for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1, + block += BLOCK_SIZE) { + bd->start = (void*)block; } +} - if (bd->blocks > BLOCKS_PER_MBLOCK) { - nat mblocks; - void *new_mblock; - if ((blocks - BLOCKS_PER_MBLOCK) % (MBLOCK_SIZE / BLOCK_SIZE) != 0) { - barf("splitLargeBlock: not a multiple of a megablock"); - } - mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE); - new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + mblocks * MBLOCK_SIZE_W); - initMBlock(new_mblock); - new_bd = FIRST_BDESCR(new_mblock); - new_bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks); - } - else - { - // NB. we're not updating all the bdescrs in the split groups to - // point to the new heads, so this can only be used for large - // objects which do not start in the non-head block. - new_bd = bd + blocks; - new_bd->blocks = bd->blocks - blocks; +/* ----------------------------------------------------------------------------- + Stats / metrics + -------------------------------------------------------------------------- */ + +nat +countBlocks(bdescr *bd) +{ + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n += bd->blocks; } - bd->blocks = blocks; + return n; +} - return new_bd; +// (*1) Just like countBlocks, except that we adjust the count for a +// megablock group so that it doesn't include the extra few blocks +// that would be taken up by block descriptors in the second and +// subsequent megablock. This is so we can tally the count with the +// number of blocks allocated in the system, for memInventory(). +nat +countAllocdBlocks(bdescr *bd) +{ + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n += bd->blocks; + // hack for megablock groups: see (*1) above + if (bd->blocks > BLOCKS_PER_MBLOCK) { + n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); + } + } + return n; } -static void -initMBlock(void *mblock) +void returnMemoryToOS(nat n /* megablocks */) { - bdescr *bd; - void *block; - - /* the first few Bdescr's in a block are unused, so we don't want to - * put them all on the free list. - */ - block = FIRST_BLOCK(mblock); - bd = FIRST_BDESCR(mblock); - - /* Initialise the start field of each block descriptor - */ - for (; block <= LAST_BLOCK(mblock); bd += 1, block += BLOCK_SIZE) { - bd->start = block; - } + static bdescr *bd; + nat size; + + bd = free_mblock_list; + while ((n > 0) && (bd != NULL)) { + size = BLOCKS_TO_MBLOCKS(bd->blocks); + if (size > n) { + nat newSize = size - n; + char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); + freeAddr += newSize * MBLOCK_SIZE; + bd->blocks = MBLOCK_GROUP_BLOCKS(newSize); + freeMBlocks(freeAddr, n); + n = 0; + } + else { + char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); + n -= size; + bd = bd->link; + freeMBlocks(freeAddr, size); + } + } + free_mblock_list = bd; + + osReleaseFreeMemory(); + + IF_DEBUG(gc, + if (n != 0) { + debugBelch("Wanted to free %d more MBlocks than are freeable\n", + n); + } + ); } /* ----------------------------------------------------------------------------- @@ -700,7 +749,7 @@ checkFreeListSanity(void) if (bd->link != NULL) { ASSERT (MBLOCK_ROUND_DOWN(bd->link) != - MBLOCK_ROUND_DOWN(bd) + + (StgWord8*)MBLOCK_ROUND_DOWN(bd) + BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE); } } @@ -727,4 +776,37 @@ countFreeList(void) } return total_blocks; } + +void +markBlocks (bdescr *bd) +{ + for (; bd != NULL; bd = bd->link) { + bd->flags |= BF_KNOWN; + } +} + +void +reportUnmarkedBlocks (void) +{ + void *mblock; + bdescr *bd; + + debugBelch("Unreachable blocks:\n"); + for (mblock = getFirstMBlock(); mblock != NULL; + mblock = getNextMBlock(mblock)) { + for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) { + if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) { + debugBelch(" %p\n",bd); + } + if (bd->blocks >= BLOCKS_PER_MBLOCK) { + mblock = (StgWord8*)mblock + + (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE; + break; + } else { + bd += bd->blocks; + } + } + } +} + #endif