X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FMBlock.c;h=ceb8856c38e58f84c3f315e0b740b7c03bf70ab5;hb=25efe5a45ff5579172312ca14cfd89443ba931dc;hp=7bb39d799f308efb2aff4f4aeb7026dff463f546;hpb=b38dcbd46c8cdd7d074e54c86fc36e3405aa5260;p=ghc-hetmet.git diff --git a/ghc/rts/MBlock.c b/ghc/rts/MBlock.c index 7bb39d7..ceb8856 100644 --- a/ghc/rts/MBlock.c +++ b/ghc/rts/MBlock.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: MBlock.c,v 1.28 2002/04/12 04:00:35 sof Exp $ + * $Id: MBlock.c,v 1.43 2003/03/20 15:43:31 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -18,80 +18,203 @@ #include "MBlock.h" #include "BlockAlloc.h" +#ifdef HAVE_STDLIB_H +#include +#endif #ifdef HAVE_UNISTD_H #include #endif - #ifdef HAVE_SYS_TYPES_H #include #endif - #ifndef mingw32_TARGET_OS # ifdef HAVE_SYS_MMAN_H # include # endif #endif - #ifdef HAVE_FCNTL_H #include #endif - #if HAVE_WINDOWS_H #include #endif +#if darwin_TARGET_OS +#include +#endif + +#include lnat mblocks_allocated = 0; +/* ----------------------------------------------------------------------------- + The MBlock Map: provides our implementation of HEAP_ALLOCED() + -------------------------------------------------------------------------- */ + +#ifdef MBLOCK_MAP_SIZE +StgWord8 mblock_map[MBLOCK_MAP_SIZE]; // initially all zeros +#endif + +/* ----------------------------------------------------------------------------- + Allocate new mblock(s) + -------------------------------------------------------------------------- */ + void * getMBlock(void) { return getMBlocks(1); } +/* ----------------------------------------------------------------------------- + The mmap() method + + On Unix-like systems, we use mmap() to allocate our memory. We + want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE + boundary. The mmap() interface doesn't give us this level of + control, so we have to use some heuristics. + + In the general case, if we want a block of n megablocks, then we + allocate n+1 and trim off the slop from either side (using + munmap()) to get an aligned chunk of size n. However, the next + time we'll try to allocate directly after the previously allocated + chunk, on the grounds that this is aligned and likely to be free. + If it turns out that we were wrong, we have to munmap() and try + again using the general method. + -------------------------------------------------------------------------- */ + #if !defined(mingw32_TARGET_OS) && !defined(cygwin32_TARGET_OS) + +// A wrapper around mmap(), to abstract away from OS differences in +// the mmap() interface. + +static void * +my_mmap (void *addr, int size) +{ + void *ret; + +#ifdef solaris2_TARGET_OS + { + int fd = open("/dev/zero",O_RDONLY); + ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); + close(fd); + } +#elif hpux_TARGET_OS + ret = mmap(addr, size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +#elif darwin_TARGET_OS + // Without MAP_FIXED, Apple's mmap ignores addr. + // With MAP_FIXED, it overwrites already mapped regions, whic + // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text + // and replaces it with zeroes, causing instant death. + // This behaviour seems to be conformant with IEEE Std 1003.1-2001. + // Let's just use the underlying Mach Microkernel calls directly, + // they're much nicer. + + kern_return_t err; + ret = addr; + if(addr) // try to allocate at adress + err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE); + if(!addr || err) // try to allocate anywhere + err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE); + + if(err) // don't know what the error codes mean exactly + barf("memory allocation failed (requested %d bytes)", size); + else + vm_protect(mach_task_self(),ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE); +#else + ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_ANON | MAP_PRIVATE, -1, 0); +#endif + + if (ret == (void *)-1) { + if (errno == ENOMEM) { + prog_belch("out of memory (requested %d bytes)", size); + stg_exit(EXIT_FAILURE); + } else { + barf("getMBlock: mmap failed"); + } + } + + return ret; +} + +// Implements the general case: allocate a chunk of memory of 'size' +// mblocks. + +static void * +gen_map_mblocks (int size) +{ + int slop; + void *ret; + + // Try to map a larger block, and take the aligned portion from + // it (unmap the rest). + size += MBLOCK_SIZE; + ret = my_mmap(0, size); + + // unmap the slop bits around the chunk we allocated + slop = (W_)ret & MBLOCK_MASK; + + if (munmap(ret, MBLOCK_SIZE - slop) == -1) { + barf("gen_map_mblocks: munmap failed"); + } + if (slop > 0 && munmap(ret+size-slop, slop) == -1) { + barf("gen_map_mblocks: munmap failed"); + } + + // ToDo: if we happened to get an aligned block, then don't + // unmap the excess, just use it. + + // next time, try after the block we just got. + ret += MBLOCK_SIZE - slop; + return ret; +} + + +// The external interface: allocate 'n' mblocks, and return the +// address. + void * getMBlocks(nat n) { static caddr_t next_request = (caddr_t)HEAP_BASE; caddr_t ret; lnat size = MBLOCK_SIZE * n; + nat i; -#ifdef solaris2_TARGET_OS - { - int fd = open("/dev/zero",O_RDONLY); - ret = mmap(next_request, size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_PRIVATE, fd, 0); - close(fd); - } -#elif hpux_TARGET_OS - ret = mmap(next_request, size, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); -#elif darwin_TARGET_OS - ret = mmap(next_request, size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); -#else - ret = mmap(next_request, size, PROT_READ | PROT_WRITE, - MAP_ANON | MAP_PRIVATE, -1, 0); + if (next_request == 0) { + // use gen_map_mblocks the first time. + ret = gen_map_mblocks(size); + } else { + ret = my_mmap(next_request, size); + + if (((W_)ret & MBLOCK_MASK) != 0) { + // misaligned block! +#if 0 // defined(DEBUG) + belch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request); #endif - - if (ret == (void *)-1) { - if (errno == ENOMEM) { - barf("getMBlock: out of memory (blocks requested: %d)", n); - } else { - barf("GetMBlock: mmap failed"); - } - } - if (((W_)ret & MBLOCK_MASK) != 0) { - barf("GetMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request); + // unmap this block... + if (munmap(ret, size) == -1) { + barf("getMBlock: munmap failed"); + } + // and do it the hard way + ret = gen_map_mblocks(size); + } } + // Next time, we'll try to allocate right after the block we just got. + // ToDo: check that we haven't already grabbed the memory at next_request + next_request = ret + size; + IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret)); - next_request += size; + // fill in the table + for (i = 0; i < n; i++) { + MARK_HEAP_ALLOCED( ret + i * MBLOCK_SIZE ); + } mblocks_allocated += n; - + return ret; } @@ -121,21 +244,13 @@ char* end_non_committed = (char*)0; /* Number of bytes reserved */ static unsigned long size_reserved_pool = SIZE_RESERVED_POOL; -/* This predicate should be inlined, really. */ -/* TODO: this only works for a single chunk */ -int -is_heap_alloced(const void* x) -{ - return (((char*)(x) >= base_non_committed) && - ((char*)(x) <= end_non_committed)); -} - void * getMBlocks(nat n) { static char* base_mblocks = (char*)0; static char* next_request = (char*)0; void* ret = (void*)0; + int i; lnat size = MBLOCK_SIZE * n; @@ -160,7 +275,7 @@ getMBlocks(nat n) } else { end_non_committed = (char*)base_non_committed + (unsigned long)size_reserved_pool; /* The returned pointer is not aligned on a mega-block boundary. Make it. */ - base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)0xfff00000) + MBLOCK_SIZE; + base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)~MBLOCK_MASK) + MBLOCK_SIZE; # if 0 fprintf(stderr, "getMBlocks: Dropping %d bytes off of 256M chunk\n", (unsigned)base_mblocks - (unsigned)base_non_committed); @@ -196,6 +311,11 @@ getMBlocks(nat n) mblocks_allocated += n; + // fill in the table + for (i = 0; i < n; i++) { + MARK_HEAP_ALLOCED ( ret + i * MBLOCK_SIZE ); + } + return ret; }