X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FMBlock.c;h=d0ead3fc7d8574d5b92d3b5460c595d5afb1b344;hb=272a418428beede04a9c4ae027474878c59d6ca1;hp=7ada8e9496cdfef97f0cbfcee6f18ef56739beea;hpb=24889e6abe3ef8a84809fc55ad494933df767d3d;p=ghc-hetmet.git diff --git a/ghc/rts/MBlock.c b/ghc/rts/MBlock.c index 7ada8e9..d0ead3f 100644 --- a/ghc/rts/MBlock.c +++ b/ghc/rts/MBlock.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: MBlock.c,v 1.33 2002/10/25 12:56:34 simonmar Exp $ + * $Id: MBlock.c,v 1.50 2003/10/31 16:21:27 sof Exp $ * * (c) The GHC Team 1998-1999 * @@ -21,6 +21,9 @@ #ifdef HAVE_STDLIB_H #include #endif +#ifdef HAVE_STRING_H +#include +#endif #ifdef HAVE_UNISTD_H #include #endif @@ -38,6 +41,9 @@ #if HAVE_WINDOWS_H #include #endif +#if darwin_TARGET_OS +#include +#endif #include @@ -47,13 +53,9 @@ lnat mblocks_allocated = 0; The MBlock Map: provides our implementation of HEAP_ALLOCED() -------------------------------------------------------------------------- */ -StgWord8 mblock_map[4096]; // initially all zeros - -static void -mblockIsHeap (void *p) -{ - mblock_map[((StgWord)p & ~MBLOCK_MASK) >> MBLOCK_SHIFT] = 1; -} +#ifdef MBLOCK_MAP_SIZE +StgWord8 mblock_map[MBLOCK_MAP_SIZE]; // initially all zeros +#endif /* ----------------------------------------------------------------------------- Allocate new mblock(s) @@ -88,11 +90,11 @@ getMBlock(void) // the mmap() interface. static void * -my_mmap (void *addr, int size) +my_mmap (void *addr, lnat size) { void *ret; -#ifdef solaris2_TARGET_OS +#if defined(solaris2_TARGET_OS) || defined(irix_TARGET_OS) { int fd = open("/dev/zero",O_RDONLY); ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); @@ -102,21 +104,50 @@ my_mmap (void *addr, int size) ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #elif darwin_TARGET_OS - ret = mmap(addr, size, PROT_READ | PROT_WRITE, - MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); + // Without MAP_FIXED, Apple's mmap ignores addr. + // With MAP_FIXED, it overwrites already mapped regions, whic + // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text + // and replaces it with zeroes, causing instant death. + // This behaviour seems to be conformant with IEEE Std 1003.1-2001. + // Let's just use the underlying Mach Microkernel calls directly, + // they're much nicer. + + kern_return_t err; + ret = addr; + if(addr) // try to allocate at adress + err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE); + if(!addr || err) // try to allocate anywhere + err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE); + + if(err) // don't know what the error codes mean exactly + barf("memory allocation failed (requested %d bytes)", size); + else + vm_protect(mach_task_self(),ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE); #else - ret = mmap(addr, size, PROT_READ | PROT_WRITE, + ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE, -1, 0); #endif + if (ret == (void *)-1) { + if (errno == ENOMEM || + (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) { + // If we request more than 3Gig, then we get EINVAL + // instead of ENOMEM (at least on Linux). + prog_belch("out of memory (requested %d bytes)", size); + stg_exit(EXIT_FAILURE); + } else { + barf("getMBlock: mmap: %s", strerror(errno)); + } + } + return ret; -} +} // Implements the general case: allocate a chunk of memory of 'size' // mblocks. static void * -gen_map_mblocks (int size) +gen_map_mblocks (lnat size) { int slop; void *ret; @@ -125,20 +156,29 @@ gen_map_mblocks (int size) // it (unmap the rest). size += MBLOCK_SIZE; ret = my_mmap(0, size); - if (ret == (void *)-1) { - barf("gen_map_mblocks: mmap failed"); - } // unmap the slop bits around the chunk we allocated slop = (W_)ret & MBLOCK_MASK; - + if (munmap(ret, MBLOCK_SIZE - slop) == -1) { - barf("gen_map_mblocks: munmap failed"); + barf("gen_map_mblocks: munmap failed"); } if (slop > 0 && munmap(ret+size-slop, slop) == -1) { - barf("gen_map_mblocks: munmap failed"); + barf("gen_map_mblocks: munmap failed"); } - + + // ToDo: if we happened to get an aligned block, then don't + // unmap the excess, just use it. For this to work, you + // need to keep in mind the following: + // * Calling my_mmap() with an 'addr' arg pointing to + // already my_mmap()ed space is OK and won't fail. + // * If my_mmap() can't satisfy the request at the + // given 'next_request' address in getMBlocks(), that + // you unmap the extra mblock mmap()ed here (or simply + // satisfy yourself that the slop introduced isn't worth + // salvaging.) + // + // next time, try after the block we just got. ret += MBLOCK_SIZE - slop; return ret; @@ -161,22 +201,13 @@ getMBlocks(nat n) ret = gen_map_mblocks(size); } else { ret = my_mmap(next_request, size); - - if (ret == (void *)-1) { - if (errno == ENOMEM) { - belch("out of memory (requested %d bytes)", n * BLOCK_SIZE); - stg_exit(EXIT_FAILURE); - } else { - barf("getMBlock: mmap failed"); - } - } if (((W_)ret & MBLOCK_MASK) != 0) { // misaligned block! -#ifdef DEBUG - belch("getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request); +#if 0 // defined(DEBUG) + belch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request); #endif - + // unmap this block... if (munmap(ret, size) == -1) { barf("getMBlock: munmap failed"); @@ -187,13 +218,14 @@ getMBlocks(nat n) } // Next time, we'll try to allocate right after the block we just got. + // ToDo: check that we haven't already grabbed the memory at next_request next_request = ret + size; IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret)); // fill in the table for (i = 0; i < n; i++) { - mblockIsHeap( ret + i * MBLOCK_SIZE ); + MARK_HEAP_ALLOCED( ret + i * MBLOCK_SIZE ); } mblocks_allocated += n; @@ -233,13 +265,19 @@ getMBlocks(nat n) static char* base_mblocks = (char*)0; static char* next_request = (char*)0; void* ret = (void*)0; - int i; + nat i; lnat size = MBLOCK_SIZE * n; if ( (base_non_committed == 0) || (next_request + size > end_non_committed) ) { if (base_non_committed) { - barf("RTS exhausted max heap size (%d bytes)\n", size_reserved_pool); + /* Tacky, but if no user-provided -M option is in effect, + * set it to the default (==256M) in time for the heap overflow PSA. + */ + if (RtsFlags.GcFlags.maxHeapSize == 0) { + RtsFlags.GcFlags.maxHeapSize = size_reserved_pool / BLOCK_SIZE; + } + heapOverflow(); } if (RtsFlags.GcFlags.maxHeapSize != 0) { size_reserved_pool = BLOCK_SIZE * RtsFlags.GcFlags.maxHeapSize; @@ -296,7 +334,7 @@ getMBlocks(nat n) // fill in the table for (i = 0; i < n; i++) { - mblockIsHeap( ret + i * MBLOCK_SIZE ); + MARK_HEAP_ALLOCED ( ret + i * MBLOCK_SIZE ); } return ret;