X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FMBlock.h;h=d3214c8311a022778950ec688d7ed42fed984895;hb=98ae15bfb073289e2e65aaf288a83fac71549a9d;hp=de5d0b4373184669fe775601143c40c7dc3b4e6a;hpb=59940493f000b3542a93d71e925d37a3f61b2f1a;p=ghc-hetmet.git diff --git a/ghc/rts/MBlock.h b/ghc/rts/MBlock.h index de5d0b4..d3214c8 100644 --- a/ghc/rts/MBlock.h +++ b/ghc/rts/MBlock.h @@ -1,20 +1,20 @@ /* ----------------------------------------------------------------------------- - * $Id: MBlock.h,v 1.17 2003/07/30 10:38:42 simonmar Exp $ * - * (c) The GHC Team, 1998-1999 + * (c) The GHC Team, 1998-2005 * * MegaBlock Allocator interface. * * ---------------------------------------------------------------------------*/ -#ifndef __MBLOCK_H__ -#define __MBLOCK_H__ -extern lnat mblocks_allocated; +#ifndef MBLOCK_H +#define MBLOCK_H + +extern lnat RTS_VAR(mblocks_allocated); extern void * getMBlock(void); extern void * getMBlocks(nat n); -#if osf3_TARGET_OS +#if osf3_HOST_OS /* ToDo: Perhaps by adjusting this value we can make linking without * -static work (i.e., not generate a core-dumping executable)? */ #if SIZEOF_VOID_P == 8 @@ -47,31 +47,43 @@ extern void * getMBlocks(nat n); will be quickly cached (indeed, performance measurements showed no measurable difference between doing the table lookup and using a constant comparison). + + On 64-bit machines, we cache one 12-bit block map that describes + 4096 megablocks or 4GB of memory. If HEAP_ALLOCED is called for + an address that is not in the cache, it calls slowIsHeapAlloced + (see MBlock.c) which will find the block map for the 4GB block in + question. -------------------------------------------------------------------------- */ +#if SIZEOF_VOID_P == 4 extern StgWord8 mblock_map[]; -#if SIZEOF_VOID_P == 4 /* On a 32-bit machine a 4KB table is always sufficient */ # define MBLOCK_MAP_SIZE 4096 # define MBLOCK_MAP_ENTRY(p) ((StgWord)(p) >> MBLOCK_SHIFT) # define HEAP_ALLOCED(p) mblock_map[MBLOCK_MAP_ENTRY(p)] -# define MARK_HEAP_ALLOCED(p) (mblock_map[MBLOCK_MAP_ENTRY(p)] = 1) -#elif defined(ia64_TARGET_ARCH) -/* Instead of trying to cover the whole 64-bit address space (which would - * require a better data structure), we assume that mmap allocates mappings - * from the bottom of region 1, and track some portion of address space from - * there upwards (currently 4GB). */ +#elif SIZEOF_VOID_P == 8 + # define MBLOCK_MAP_SIZE 4096 -# define MBLOCK_MAP_ENTRY(p) (((StgWord)(p) - (1UL << 61)) >> MBLOCK_SHIFT) -# define HEAP_ALLOCED(p) ((MBLOCK_MAP_ENTRY(p) < MBLOCK_MAP_SIZE) \ - && mblock_map[MBLOCK_MAP_ENTRY(p)]) -# define MARK_HEAP_ALLOCED(p) ((MBLOCK_MAP_ENTRY(p) < MBLOCK_MAP_SIZE) \ - && (mblock_map[MBLOCK_MAP_ENTRY(p)] = 1)) +# define MBLOCK_MAP_ENTRY(p) (((StgWord)(p) & 0xffffffff) >> MBLOCK_SHIFT) + +typedef struct { + StgWord32 addrHigh32; + StgWord8 mblocks[MBLOCK_MAP_SIZE]; +} MBlockMap; + +extern MBlockMap *mblock_cache; + +StgBool slowIsHeapAlloced(void *p); + +# define HEAP_ALLOCED(p) \ + ( ((((StgWord)(p)) >> 32) == mblock_cache->addrHigh32) \ + ? mblock_cache->mblocks[MBLOCK_MAP_ENTRY(p)] \ + : slowIsHeapAlloced(p) ) #else # error HEAP_ALLOCED not defined #endif -#endif // __MBLOCK_H__ +#endif /* MBLOCK_H */