1 /* -----------------------------------------------------------------------------
3 * (c) The University of Glasgow 2006-2007
5 * OS-specific memory management
7 * ---------------------------------------------------------------------------*/
9 /* This is non-posix compliant. */
10 /* #include "PosixSource.h" */
19 #ifdef HAVE_SYS_TYPES_H
20 #include <sys/types.h>
22 #ifdef HAVE_SYS_MMAN_H
35 #include <mach/mach.h>
36 #include <mach/vm_map.h>
39 static caddr_t next_request = 0;
43 next_request = (caddr_t)RtsFlags.GcFlags.heapBase;
46 /* -----------------------------------------------------------------------------
49 On Unix-like systems, we use mmap() to allocate our memory. We
50 want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
51 boundary. The mmap() interface doesn't give us this level of
52 control, so we have to use some heuristics.
54 In the general case, if we want a block of n megablocks, then we
55 allocate n+1 and trim off the slop from either side (using
56 munmap()) to get an aligned chunk of size n. However, the next
57 time we'll try to allocate directly after the previously allocated
58 chunk, on the grounds that this is aligned and likely to be free.
59 If it turns out that we were wrong, we have to munmap() and try
60 again using the general method.
62 Note on posix_memalign(): this interface is available on recent
63 systems and appears to provide exactly what we want. However, it
64 turns out not to be as good as our mmap() implementation, because
65 it wastes extra space (using double the address space, in a test on
66 x86_64/Linux). The problem seems to be that posix_memalign()
67 returns memory that can be free()'d, so the library must store
68 extra information along with the allocated block, thus messing up
69 the alignment. Hence, we don't use posix_memalign() for now.
71 -------------------------------------------------------------------------- */
73 // A wrapper around mmap(), to abstract away from OS differences in
74 // the mmap() interface.
77 my_mmap (void *addr, lnat size)
81 #if defined(solaris2_HOST_OS) || defined(irix_HOST_OS)
83 int fd = open("/dev/zero",O_RDONLY);
84 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
88 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
89 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
91 // Without MAP_FIXED, Apple's mmap ignores addr.
92 // With MAP_FIXED, it overwrites already mapped regions, whic
93 // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
94 // and replaces it with zeroes, causing instant death.
95 // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
96 // Let's just use the underlying Mach Microkernel calls directly,
97 // they're much nicer.
99 kern_return_t err = 0;
101 if(addr) // try to allocate at adress
102 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE);
103 if(!addr || err) // try to allocate anywhere
104 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE);
107 // don't know what the error codes mean exactly, assume it's
108 // not our problem though.
109 errorBelch("memory allocation failed (requested %lu bytes)", size);
110 stg_exit(EXIT_FAILURE);
112 vm_protect(mach_task_self(),(vm_address_t)ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE);
115 ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC,
116 MAP_ANON | MAP_PRIVATE, -1, 0);
119 if (ret == (void *)-1) {
120 if (errno == ENOMEM ||
121 (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) {
122 // If we request more than 3Gig, then we get EINVAL
123 // instead of ENOMEM (at least on Linux).
124 errorBelch("out of memory (requested %lu bytes)", size);
125 stg_exit(EXIT_FAILURE);
127 barf("getMBlock: mmap: %s", strerror(errno));
134 // Implements the general case: allocate a chunk of memory of 'size'
138 gen_map_mblocks (lnat size)
143 // Try to map a larger block, and take the aligned portion from
144 // it (unmap the rest).
146 ret = my_mmap(0, size);
148 // unmap the slop bits around the chunk we allocated
149 slop = (W_)ret & MBLOCK_MASK;
151 if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
152 barf("gen_map_mblocks: munmap failed");
154 if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
155 barf("gen_map_mblocks: munmap failed");
158 // ToDo: if we happened to get an aligned block, then don't
159 // unmap the excess, just use it. For this to work, you
160 // need to keep in mind the following:
161 // * Calling my_mmap() with an 'addr' arg pointing to
162 // already my_mmap()ed space is OK and won't fail.
163 // * If my_mmap() can't satisfy the request at the
164 // given 'next_request' address in getMBlocks(), that
165 // you unmap the extra mblock mmap()ed here (or simply
166 // satisfy yourself that the slop introduced isn't worth
170 // next time, try after the block we just got.
171 ret += MBLOCK_SIZE - slop;
179 lnat size = MBLOCK_SIZE * n;
181 if (next_request == 0) {
182 // use gen_map_mblocks the first time.
183 ret = gen_map_mblocks(size);
185 ret = my_mmap(next_request, size);
187 if (((W_)ret & MBLOCK_MASK) != 0) {
189 #if 0 // defined(DEBUG)
190 errorBelch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
193 // unmap this block...
194 if (munmap(ret, size) == -1) {
195 barf("getMBlock: munmap failed");
197 // and do it the hard way
198 ret = gen_map_mblocks(size);
202 // Next time, we'll try to allocate right after the block we just got.
203 // ToDo: check that we haven't already grabbed the memory at next_request
204 next_request = ret + size;
209 void osFreeAllMBlocks(void)
211 /* XXX Do something here (bug #711) */
214 lnat getPageSize (void)
216 static lnat pageSize = 0;
221 ret = sysconf(_SC_PAGESIZE);
223 barf("getPageSize: cannot get page size");
229 void setExecutable (void *p, lnat len, rtsBool exec)
231 StgWord pageSize = getPageSize();
233 /* malloced memory isn't executable by default on OpenBSD */
234 StgWord mask = ~(pageSize - 1);
235 StgWord startOfFirstPage = ((StgWord)p ) & mask;
236 StgWord startOfLastPage = ((StgWord)p + len - 1) & mask;
237 StgWord size = startOfLastPage - startOfFirstPage + pageSize;
238 if (mprotect((void*)startOfFirstPage, (size_t)size,
239 (exec ? PROT_EXEC : 0) | PROT_READ | PROT_WRITE) != 0) {
240 barf("makeExecutable: failed to protect 0x%p\n", p);