1 /* -----------------------------------------------------------------------------
3 * (c) The University of Glasgow 2006-2007
5 * OS-specific memory management
7 * ---------------------------------------------------------------------------*/
9 // This is non-posix compliant.
10 // #include "PosixSource.h"
20 #ifdef HAVE_SYS_TYPES_H
21 #include <sys/types.h>
23 #ifdef HAVE_SYS_MMAN_H
36 #include <mach/mach.h>
37 #include <mach/vm_map.h>
40 /* keep track of maps returned by my_mmap */
41 typedef struct _map_rec {
42 char* base; /* base addr */
43 int size; /* map size */
44 struct _map_rec* next; /* next pointer */
48 static caddr_t next_request = 0;
49 static map_rec* mmap_rec = NULL;
53 next_request = (caddr_t)RtsFlags.GcFlags.heapBase;
56 /* -----------------------------------------------------------------------------
59 On Unix-like systems, we use mmap() to allocate our memory. We
60 want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
61 boundary. The mmap() interface doesn't give us this level of
62 control, so we have to use some heuristics.
64 In the general case, if we want a block of n megablocks, then we
65 allocate n+1 and trim off the slop from either side (using
66 munmap()) to get an aligned chunk of size n. However, the next
67 time we'll try to allocate directly after the previously allocated
68 chunk, on the grounds that this is aligned and likely to be free.
69 If it turns out that we were wrong, we have to munmap() and try
70 again using the general method.
72 Note on posix_memalign(): this interface is available on recent
73 systems and appears to provide exactly what we want. However, it
74 turns out not to be as good as our mmap() implementation, because
75 it wastes extra space (using double the address space, in a test on
76 x86_64/Linux). The problem seems to be that posix_memalign()
77 returns memory that can be free()'d, so the library must store
78 extra information along with the allocated block, thus messing up
79 the alignment. Hence, we don't use posix_memalign() for now.
81 -------------------------------------------------------------------------- */
83 // A wrapper around mmap(), to abstract away from OS differences in
84 // the mmap() interface.
87 my_mmap (void *addr, lnat size)
91 #if defined(solaris2_HOST_OS) || defined(irix_HOST_OS)
93 int fd = open("/dev/zero",O_RDONLY);
94 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
98 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
99 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
101 // Without MAP_FIXED, Apple's mmap ignores addr.
102 // With MAP_FIXED, it overwrites already mapped regions, whic
103 // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
104 // and replaces it with zeroes, causing instant death.
105 // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
106 // Let's just use the underlying Mach Microkernel calls directly,
107 // they're much nicer.
109 kern_return_t err = 0;
111 if(addr) // try to allocate at adress
112 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE);
113 if(!addr || err) // try to allocate anywhere
114 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE);
117 // don't know what the error codes mean exactly, assume it's
118 // not our problem though.
119 errorBelch("memory allocation failed (requested %lu bytes)", size);
120 stg_exit(EXIT_FAILURE);
122 vm_protect(mach_task_self(),(vm_address_t)ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE);
125 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
126 MAP_ANON | MAP_PRIVATE, -1, 0);
129 if (ret == (void *)-1) {
130 if (errno == ENOMEM ||
131 (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) {
132 // If we request more than 3Gig, then we get EINVAL
133 // instead of ENOMEM (at least on Linux).
134 errorBelch("out of memory (requested %lu bytes)", size);
135 stg_exit(EXIT_FAILURE);
137 barf("getMBlock: mmap: %s", strerror(errno));
144 // Implements the general case: allocate a chunk of memory of 'size'
148 gen_map_mblocks (lnat size)
153 // Try to map a larger block, and take the aligned portion from
154 // it (unmap the rest).
156 ret = my_mmap(0, size);
158 // unmap the slop bits around the chunk we allocated
159 slop = (W_)ret & MBLOCK_MASK;
161 if (munmap((void*)ret, MBLOCK_SIZE - slop) == -1) {
162 barf("gen_map_mblocks: munmap failed");
164 if (slop > 0 && munmap((void*)(ret+size-slop), slop) == -1) {
165 barf("gen_map_mblocks: munmap failed");
168 // ToDo: if we happened to get an aligned block, then don't
169 // unmap the excess, just use it. For this to work, you
170 // need to keep in mind the following:
171 // * Calling my_mmap() with an 'addr' arg pointing to
172 // already my_mmap()ed space is OK and won't fail.
173 // * If my_mmap() can't satisfy the request at the
174 // given 'next_request' address in getMBlocks(), that
175 // you unmap the extra mblock mmap()ed here (or simply
176 // satisfy yourself that the slop introduced isn't worth
180 // next time, try after the block we just got.
181 ret += MBLOCK_SIZE - slop;
189 lnat size = MBLOCK_SIZE * (lnat)n;
192 if (next_request == 0) {
193 // use gen_map_mblocks the first time.
194 ret = gen_map_mblocks(size);
196 ret = my_mmap(next_request, size);
198 if (((W_)ret & MBLOCK_MASK) != 0) {
200 #if 0 // defined(DEBUG)
201 errorBelch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
204 // unmap this block...
205 if (munmap(ret, size) == -1) {
206 barf("getMBlock: munmap failed");
208 // and do it the hard way
209 ret = gen_map_mblocks(size);
212 rec = (map_rec*)stgMallocBytes(sizeof(map_rec),"OSMem: osGetMBlocks");
215 rec->next = mmap_rec;
217 // Next time, we'll try to allocate right after the block we just got.
218 // ToDo: check that we haven't already grabbed the memory at next_request
219 next_request = ret + size;
224 void osFreeAllMBlocks(void)
226 map_rec* tmp = mmap_rec;
227 map_rec* next = NULL;
230 if(munmap(tmp->base,tmp->size))
231 barf("osFreeAllMBlocks: munmap failed!");
239 lnat getPageSize (void)
241 static lnat pageSize = 0;
246 ret = sysconf(_SC_PAGESIZE);
248 barf("getPageSize: cannot get page size");
254 void setExecutable (void *p, lnat len, rtsBool exec)
256 StgWord pageSize = getPageSize();
258 /* malloced memory isn't executable by default on OpenBSD */
259 StgWord mask = ~(pageSize - 1);
260 StgWord startOfFirstPage = ((StgWord)p ) & mask;
261 StgWord startOfLastPage = ((StgWord)p + len - 1) & mask;
262 StgWord size = startOfLastPage - startOfFirstPage + pageSize;
263 if (mprotect((void*)startOfFirstPage, (size_t)size,
264 (exec ? PROT_EXEC : 0) | PROT_READ | PROT_WRITE) != 0) {
265 barf("setExecutable: failed to protect 0x%p\n", p);