/* -----------------------------------------------------------------------------
- * $Id: MBlock.c,v 1.32 2002/10/23 12:26:11 mthomas Exp $
+ * $Id: MBlock.c,v 1.51 2004/09/03 15:28:33 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
+#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#if HAVE_WINDOWS_H
#include <windows.h>
#endif
+#if darwin_TARGET_OS
+#include <mach/vm_map.h>
+#endif
#include <errno.h>
The MBlock Map: provides our implementation of HEAP_ALLOCED()
-------------------------------------------------------------------------- */
-StgWord8 mblock_map[4096]; // initially all zeros
-
-static void
-mblockIsHeap (void *p)
-{
- mblock_map[((StgWord)p & ~MBLOCK_MASK) >> MBLOCK_SHIFT] = 1;
-}
+#ifdef MBLOCK_MAP_SIZE
+StgWord8 mblock_map[MBLOCK_MAP_SIZE]; // initially all zeros
+#endif
/* -----------------------------------------------------------------------------
Allocate new mblock(s)
// the mmap() interface.
static void *
-my_mmap (void *addr, int size)
+my_mmap (void *addr, lnat size)
{
void *ret;
-#ifdef solaris2_TARGET_OS
+#if defined(solaris2_TARGET_OS) || defined(irix_TARGET_OS)
{
int fd = open("/dev/zero",O_RDONLY);
- ret = mmap(addr, size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE, fd, 0);
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
close(fd);
}
#elif hpux_TARGET_OS
ret = mmap(addr, size, PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
#elif darwin_TARGET_OS
- ret = mmap(addr, size, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
+ // Without MAP_FIXED, Apple's mmap ignores addr.
+ // With MAP_FIXED, it overwrites already mapped regions, whic
+ // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
+ // and replaces it with zeroes, causing instant death.
+ // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
+ // Let's just use the underlying Mach Microkernel calls directly,
+ // they're much nicer.
+
+ kern_return_t err;
+ ret = addr;
+ if(addr) // try to allocate at adress
+ err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE);
+ if(!addr || err) // try to allocate anywhere
+ err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE);
+
+ if(err) // don't know what the error codes mean exactly
+ barf("memory allocation failed (requested %d bytes)", size);
+ else
+ vm_protect(mach_task_self(),ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE);
#else
- ret = mmap(addr, size, PROT_READ | PROT_WRITE,
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANON | MAP_PRIVATE, -1, 0);
#endif
+ if (ret == (void *)-1) {
+ if (errno == ENOMEM ||
+ (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) {
+ // If we request more than 3Gig, then we get EINVAL
+ // instead of ENOMEM (at least on Linux).
+ errorBelch("out of memory (requested %d bytes)", size);
+ stg_exit(EXIT_FAILURE);
+ } else {
+ barf("getMBlock: mmap: %s", strerror(errno));
+ }
+ }
+
return ret;
-}
+}
// Implements the general case: allocate a chunk of memory of 'size'
// mblocks.
static void *
-gen_map_mblocks (int size)
+gen_map_mblocks (lnat size)
{
int slop;
void *ret;
// it (unmap the rest).
size += MBLOCK_SIZE;
ret = my_mmap(0, size);
- if (ret == (void *)-1) {
- barf("gen_map_mblocks: mmap failed");
- }
// unmap the slop bits around the chunk we allocated
slop = (W_)ret & MBLOCK_MASK;
-
+
if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
- barf("gen_map_mblocks: munmap failed");
+ barf("gen_map_mblocks: munmap failed");
}
if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
- barf("gen_map_mblocks: munmap failed");
+ barf("gen_map_mblocks: munmap failed");
}
-
+
+ // ToDo: if we happened to get an aligned block, then don't
+ // unmap the excess, just use it. For this to work, you
+ // need to keep in mind the following:
+ // * Calling my_mmap() with an 'addr' arg pointing to
+ // already my_mmap()ed space is OK and won't fail.
+ // * If my_mmap() can't satisfy the request at the
+ // given 'next_request' address in getMBlocks(), that
+ // you unmap the extra mblock mmap()ed here (or simply
+ // satisfy yourself that the slop introduced isn't worth
+ // salvaging.)
+ //
+
// next time, try after the block we just got.
ret += MBLOCK_SIZE - slop;
return ret;
ret = gen_map_mblocks(size);
} else {
ret = my_mmap(next_request, size);
-
- if (ret == (void *)-1) {
- if (errno == ENOMEM) {
- belch("out of memory (requested %d bytes)", n * BLOCK_SIZE);
- stg_exit(EXIT_FAILURE);
- } else {
- barf("getMBlock: mmap failed");
- }
- }
if (((W_)ret & MBLOCK_MASK) != 0) {
// misaligned block!
-#ifdef DEBUG
- belch("getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
+#if 0 // defined(DEBUG)
+ errorBelch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
#endif
-
+
// unmap this block...
if (munmap(ret, size) == -1) {
barf("getMBlock: munmap failed");
}
// Next time, we'll try to allocate right after the block we just got.
+ // ToDo: check that we haven't already grabbed the memory at next_request
next_request = ret + size;
- IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret));
+ IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at %p\n",n,ret));
// fill in the table
for (i = 0; i < n; i++) {
- mblockIsHeap( ret + i * MBLOCK_SIZE );
+ MARK_HEAP_ALLOCED( ret + i * MBLOCK_SIZE );
}
mblocks_allocated += n;
static char* base_mblocks = (char*)0;
static char* next_request = (char*)0;
void* ret = (void*)0;
- int i;
+ nat i;
lnat size = MBLOCK_SIZE * n;
if ( (base_non_committed == 0) || (next_request + size > end_non_committed) ) {
if (base_non_committed) {
- barf("RTS exhausted max heap size (%d bytes)\n", size_reserved_pool);
+ /* Tacky, but if no user-provided -M option is in effect,
+ * set it to the default (==256M) in time for the heap overflow PSA.
+ */
+ if (RtsFlags.GcFlags.maxHeapSize == 0) {
+ RtsFlags.GcFlags.maxHeapSize = size_reserved_pool / BLOCK_SIZE;
+ }
+ heapOverflow();
}
if (RtsFlags.GcFlags.maxHeapSize != 0) {
size_reserved_pool = BLOCK_SIZE * RtsFlags.GcFlags.maxHeapSize;
, PAGE_READWRITE
);
if ( base_non_committed == 0 ) {
- fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
+ errorBelch("getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
ret=(void*)-1;
} else {
end_non_committed = (char*)base_non_committed + (unsigned long)size_reserved_pool;
/* The returned pointer is not aligned on a mega-block boundary. Make it. */
base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)~MBLOCK_MASK) + MBLOCK_SIZE;
# if 0
- fprintf(stderr, "getMBlocks: Dropping %d bytes off of 256M chunk\n",
- (unsigned)base_mblocks - (unsigned)base_non_committed);
+ debugBelch("getMBlocks: Dropping %d bytes off of 256M chunk\n",
+ (unsigned)base_mblocks - (unsigned)base_non_committed);
# endif
if ( ((char*)base_mblocks + size) > end_non_committed ) {
- fprintf(stderr, "getMBlocks: oops, committed too small a region to start with.");
+ debugBelch("getMBlocks: oops, committed too small a region to start with.");
ret=(void*)-1;
} else {
next_request = base_mblocks;
if ( ret != (void*)-1 ) {
ret = VirtualAlloc(next_request, size, MEM_COMMIT, PAGE_READWRITE);
if (ret == NULL) {
- fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
+ debugBelch("getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
ret=(void*)-1;
}
}
barf("getMBlocks: unknown memory allocation failure on Win32.");
}
- IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
+ IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
next_request = (char*)next_request + size;
mblocks_allocated += n;
// fill in the table
for (i = 0; i < n; i++) {
- mblockIsHeap( ret + i * MBLOCK_SIZE );
+ MARK_HEAP_ALLOCED ( ret + i * MBLOCK_SIZE );
}
return ret;
if (rc == FALSE) {
# ifdef DEBUG
- fprintf(stderr, "freeMBlocks: VirtualFree failed with: %d\n", GetLastError());
+ debugBelch("freeMBlocks: VirtualFree failed with: %d\n", GetLastError());
# endif
}