X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FBlockAlloc.c;h=8eaba72b723275564501f6395021363a2fd6bc1f;hb=3fb074b5fcfd91fe0d37af83f221450ac4734908;hp=0bffa82993652afb140743197aa160cefb491a41;hpb=a7f2a897bab20f05d4cf5fc8cdae328698c7fc82;p=ghc-hetmet.git diff --git a/rts/sm/BlockAlloc.c b/rts/sm/BlockAlloc.c index 0bffa82..8eaba72 100644 --- a/rts/sm/BlockAlloc.c +++ b/rts/sm/BlockAlloc.c @@ -17,19 +17,16 @@ #include "PosixSource.h" #include "Rts.h" -#include "RtsFlags.h" + +#include "Storage.h" #include "RtsUtils.h" #include "BlockAlloc.h" -#include "MBlock.h" -#include "Storage.h" +#include "OSMem.h" #include static void initMBlock(void *mblock); -// The free_list is kept sorted by size, smallest first. -// In THREADED_RTS mode, the free list is protected by sm_mutex. - /* ----------------------------------------------------------------------------- Implementation notes @@ -59,7 +56,8 @@ static void initMBlock(void *mblock); The following fields are not used by the allocator: bd->flags bd->gen_no - bd->step + bd->gen + bd->dest Exceptions: we don't maintain invariants for all the blocks within a group on the free list, because it is expensive to modify every @@ -71,7 +69,7 @@ static void initMBlock(void *mblock); ~~~~~~~~~~ Preliminaries: - - most allocations are for small blocks + - most allocations are for a small number of blocks - sometimes the OS gives us new memory backwards in the address space, sometimes forwards, so we should not be biased towards any particular layout in the address space @@ -121,8 +119,20 @@ static void initMBlock(void *mblock); --------------------------------------------------------------------------- */ +/* --------------------------------------------------------------------------- + WATCH OUT FOR OVERFLOW + + Be very careful with integer overflow here. If you have an + expression like (n_blocks * BLOCK_SIZE), and n_blocks is an int or + a nat, then it will very likely overflow on a 64-bit platform. + Always cast to StgWord (or W_ for short) first: ((W_)n_blocks * BLOCK_SIZE). + + --------------------------------------------------------------------------- */ + #define MAX_FREE_LIST 9 +// In THREADED_RTS mode, the free list is protected by sm_mutex. + static bdescr *free_list[MAX_FREE_LIST]; static bdescr *free_mblock_list; @@ -283,8 +293,8 @@ alloc_mega_group (nat mblocks) if (best) { // we take our chunk off the end here. - nat best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); - bd = FIRST_BDESCR(MBLOCK_ROUND_DOWN(best) + + StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); + bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) + (best_mblocks-mblocks)*MBLOCK_SIZE); best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks); @@ -332,13 +342,13 @@ allocGroup (nat n) ln = log_2_ceil(n); - while (free_list[ln] == NULL && ln < MAX_FREE_LIST) { + while (ln < MAX_FREE_LIST && free_list[ln] == NULL) { ln++; } if (ln == MAX_FREE_LIST) { #if 0 - if ((mblocks_allocated * MBLOCK_SIZE_W - n_alloc_blocks * BLOCK_SIZE_W) > (1024*1024)/sizeof(W_)) { + if (((W_)mblocks_allocated * MBLOCK_SIZE_W - (W_)n_alloc_blocks * BLOCK_SIZE_W) > (1024*1024)/sizeof(W_)) { debugBelch("Fragmentation, wanted %d blocks:", n); RtsFlags.DebugFlags.block_alloc = 1; checkFreeListSanity(); @@ -415,7 +425,8 @@ coalesce_mblocks (bdescr *p) q = p->link; if (q != NULL && MBLOCK_ROUND_DOWN(q) == - MBLOCK_ROUND_DOWN(p) + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { + (StgWord8*)MBLOCK_ROUND_DOWN(p) + + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { // can coalesce p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) + BLOCKS_TO_MBLOCKS(q->blocks)); @@ -469,10 +480,10 @@ freeGroup(bdescr *p) ASSERT(p->free != (P_)-1); p->free = (void *)-1; /* indicates that this block is free */ - p->step = NULL; + p->gen = NULL; p->gen_no = 0; /* fill the block group with garbage if sanity checking is on */ - IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE)); + IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE)); if (p->blocks == 0) barf("freeGroup: block size is zero"); @@ -565,6 +576,9 @@ freeChain_lock(bdescr *bd) RELEASE_SM_LOCK; } +// splitBlockGroup(bd,B) splits bd in two. Afterward, bd will have B +// blocks, and a new block descriptor pointing to the remainder is +// returned. bdescr * splitBlockGroup (bdescr *bd, nat blocks) { @@ -575,16 +589,21 @@ splitBlockGroup (bdescr *bd, nat blocks) } if (bd->blocks > BLOCKS_PER_MBLOCK) { - nat mblocks; + nat low_mblocks, high_mblocks; void *new_mblock; if ((blocks - BLOCKS_PER_MBLOCK) % (MBLOCK_SIZE / BLOCK_SIZE) != 0) { barf("splitLargeBlock: not a multiple of a megablock"); } - mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE); - new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + mblocks * MBLOCK_SIZE_W); + low_mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE); + high_mblocks = (bd->blocks - blocks) / (MBLOCK_SIZE / BLOCK_SIZE); + + new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + (W_)low_mblocks * MBLOCK_SIZE_W); initMBlock(new_mblock); new_bd = FIRST_BDESCR(new_mblock); - new_bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks); + new_bd->blocks = MBLOCK_GROUP_BLOCKS(high_mblocks); + + ASSERT(blocks + new_bd->blocks == + bd->blocks + BLOCKS_PER_MBLOCK - MBLOCK_SIZE/BLOCK_SIZE); } else { @@ -602,20 +621,88 @@ splitBlockGroup (bdescr *bd, nat blocks) static void initMBlock(void *mblock) { - bdescr *bd; - void *block; - - /* the first few Bdescr's in a block are unused, so we don't want to - * put them all on the free list. - */ - block = FIRST_BLOCK(mblock); - bd = FIRST_BDESCR(mblock); - - /* Initialise the start field of each block descriptor - */ - for (; block <= LAST_BLOCK(mblock); bd += 1, block += BLOCK_SIZE) { - bd->start = block; - } + bdescr *bd; + StgWord8 *block; + + /* the first few Bdescr's in a block are unused, so we don't want to + * put them all on the free list. + */ + block = FIRST_BLOCK(mblock); + bd = FIRST_BDESCR(mblock); + + /* Initialise the start field of each block descriptor + */ + for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1, + block += BLOCK_SIZE) { + bd->start = (void*)block; + } +} + +/* ----------------------------------------------------------------------------- + Stats / metrics + -------------------------------------------------------------------------- */ + +nat +countBlocks(bdescr *bd) +{ + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n += bd->blocks; + } + return n; +} + +// (*1) Just like countBlocks, except that we adjust the count for a +// megablock group so that it doesn't include the extra few blocks +// that would be taken up by block descriptors in the second and +// subsequent megablock. This is so we can tally the count with the +// number of blocks allocated in the system, for memInventory(). +nat +countAllocdBlocks(bdescr *bd) +{ + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n += bd->blocks; + // hack for megablock groups: see (*1) above + if (bd->blocks > BLOCKS_PER_MBLOCK) { + n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); + } + } + return n; +} + +void returnMemoryToOS(nat n /* megablocks */) +{ + static bdescr *bd; + nat size; + + bd = free_mblock_list; + while ((n > 0) && (bd != NULL)) { + size = BLOCKS_TO_MBLOCKS(bd->blocks); + if (size > n) { + nat newSize = size - n; + char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); + freeAddr += newSize * MBLOCK_SIZE; + bd->blocks = MBLOCK_GROUP_BLOCKS(newSize); + freeMBlocks(freeAddr, n); + n = 0; + } + else { + char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); + n -= size; + bd = bd->link; + freeMBlocks(freeAddr, size); + } + } + free_mblock_list = bd; + + IF_DEBUG(gc, + if (n != 0) { + debugBelch("Wanted to free %d more MBlocks than are freeable\n", + n); + } + ); } /* ----------------------------------------------------------------------------- @@ -700,7 +787,7 @@ checkFreeListSanity(void) if (bd->link != NULL) { ASSERT (MBLOCK_ROUND_DOWN(bd->link) != - MBLOCK_ROUND_DOWN(bd) + + (StgWord8*)MBLOCK_ROUND_DOWN(bd) + BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE); } } @@ -727,4 +814,37 @@ countFreeList(void) } return total_blocks; } + +void +markBlocks (bdescr *bd) +{ + for (; bd != NULL; bd = bd->link) { + bd->flags |= BF_KNOWN; + } +} + +void +reportUnmarkedBlocks (void) +{ + void *mblock; + bdescr *bd; + + debugBelch("Unreachable blocks:\n"); + for (mblock = getFirstMBlock(); mblock != NULL; + mblock = getNextMBlock(mblock)) { + for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) { + if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) { + debugBelch(" %p\n",bd); + } + if (bd->blocks >= BLOCKS_PER_MBLOCK) { + mblock = (StgWord8*)mblock + + (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE; + break; + } else { + bd += bd->blocks; + } + } + } +} + #endif