+/* -----------------------------------------------------------------------------
+ The mmap() method
+
+ On Unix-like systems, we use mmap() to allocate our memory. We
+ want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
+ boundary. The mmap() interface doesn't give us this level of
+ control, so we have to use some heuristics.
+
+ In the general case, if we want a block of n megablocks, then we
+ allocate n+1 and trim off the slop from either side (using
+ munmap()) to get an aligned chunk of size n. However, the next
+ time we'll try to allocate directly after the previously allocated
+ chunk, on the grounds that this is aligned and likely to be free.
+ If it turns out that we were wrong, we have to munmap() and try
+ again using the general method.
+ -------------------------------------------------------------------------- */
+
+#if !defined(mingw32_TARGET_OS) && !defined(cygwin32_TARGET_OS)
+
+// A wrapper around mmap(), to abstract away from OS differences in
+// the mmap() interface.
+
+static void *
+my_mmap (void *addr, lnat size)
+{
+ void *ret;
+
+#if defined(solaris2_TARGET_OS) || defined(irix_TARGET_OS)
+ {
+ int fd = open("/dev/zero",O_RDONLY);
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
+ close(fd);
+ }
+#elif hpux_TARGET_OS
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#elif darwin_TARGET_OS
+ // Without MAP_FIXED, Apple's mmap ignores addr.
+ // With MAP_FIXED, it overwrites already mapped regions, whic
+ // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
+ // and replaces it with zeroes, causing instant death.
+ // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
+ // Let's just use the underlying Mach Microkernel calls directly,
+ // they're much nicer.
+
+ kern_return_t err;
+ ret = addr;
+ if(addr) // try to allocate at adress
+ err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE);
+ if(!addr || err) // try to allocate anywhere
+ err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE);
+
+ if(err) // don't know what the error codes mean exactly
+ barf("memory allocation failed (requested %lu bytes)", size);
+ else
+ vm_protect(mach_task_self(),ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE);
+#else
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+#endif
+
+ if (ret == (void *)-1) {
+ if (errno == ENOMEM ||
+ (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) {
+ // If we request more than 3Gig, then we get EINVAL
+ // instead of ENOMEM (at least on Linux).
+ errorBelch("out of memory (requested %lu bytes)", size);
+ stg_exit(EXIT_FAILURE);
+ } else {
+ barf("getMBlock: mmap: %s", strerror(errno));
+ }
+ }
+
+ return ret;
+}
+
+// Implements the general case: allocate a chunk of memory of 'size'
+// mblocks.
+
+static void *
+gen_map_mblocks (lnat size)
+{
+ int slop;
+ void *ret;
+
+ // Try to map a larger block, and take the aligned portion from
+ // it (unmap the rest).
+ size += MBLOCK_SIZE;
+ ret = my_mmap(0, size);
+
+ // unmap the slop bits around the chunk we allocated
+ slop = (W_)ret & MBLOCK_MASK;
+
+ if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
+ barf("gen_map_mblocks: munmap failed");
+ }
+ if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
+ barf("gen_map_mblocks: munmap failed");
+ }
+
+ // ToDo: if we happened to get an aligned block, then don't
+ // unmap the excess, just use it. For this to work, you
+ // need to keep in mind the following:
+ // * Calling my_mmap() with an 'addr' arg pointing to
+ // already my_mmap()ed space is OK and won't fail.
+ // * If my_mmap() can't satisfy the request at the
+ // given 'next_request' address in getMBlocks(), that
+ // you unmap the extra mblock mmap()ed here (or simply
+ // satisfy yourself that the slop introduced isn't worth
+ // salvaging.)
+ //
+
+ // next time, try after the block we just got.
+ ret += MBLOCK_SIZE - slop;
+ return ret;
+}
+
+
+// The external interface: allocate 'n' mblocks, and return the
+// address.