1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-1999
5 * MegaBlock Allocator Interface. This file contains all the dirty
6 * architecture-dependent hackery required to get a chunk of aligned
7 * memory from the operating system.
9 * ---------------------------------------------------------------------------*/
11 /* This is non-posix compliant. */
12 /* #include "PosixSource.h" */
18 #include "BlockAlloc.h"
30 #ifdef HAVE_SYS_TYPES_H
31 #include <sys/types.h>
33 #ifndef mingw32_HOST_OS
34 # ifdef HAVE_SYS_MMAN_H
35 # include <sys/mman.h>
45 #include <mach/vm_map.h>
50 lnat mblocks_allocated = 0;
52 /* -----------------------------------------------------------------------------
53 The MBlock Map: provides our implementation of HEAP_ALLOCED()
54 -------------------------------------------------------------------------- */
56 #if SIZEOF_VOID_P == 4
57 StgWord8 mblock_map[MBLOCK_MAP_SIZE]; // initially all zeros
58 #elif SIZEOF_VOID_P == 8
59 static MBlockMap dummy_mblock_map;
60 MBlockMap *mblock_cache = &dummy_mblock_map;
61 int mblock_map_count = 0;
62 MBlockMap **mblock_maps = NULL;
65 findMBlockMap(void *p)
68 StgWord32 hi = (StgWord32) (((StgWord)p) >> 32);
69 for( i = 0; i < mblock_map_count; i++ )
71 if(mblock_maps[i]->addrHigh32 == hi)
73 return mblock_maps[i];
80 slowIsHeapAlloced(void *p)
82 MBlockMap *map = findMBlockMap(p);
86 return map->mblocks[MBLOCK_MAP_ENTRY(p)];
94 markHeapAlloced(void *p)
96 #if SIZEOF_VOID_P == 4
97 mblock_map[MBLOCK_MAP_ENTRY(p)] = 1;
98 #elif SIZEOF_VOID_P == 8
99 MBlockMap *map = findMBlockMap(p);
103 mblock_maps = realloc(mblock_maps,
104 sizeof(MBlockMap*) * mblock_map_count);
105 map = mblock_maps[mblock_map_count-1] = calloc(1,sizeof(MBlockMap));
106 map->addrHigh32 = (StgWord32) (((StgWord)p) >> 32);
108 map->mblocks[MBLOCK_MAP_ENTRY(p)] = 1;
113 /* -----------------------------------------------------------------------------
114 Allocate new mblock(s)
115 -------------------------------------------------------------------------- */
120 return getMBlocks(1);
123 /* -----------------------------------------------------------------------------
126 On Unix-like systems, we use mmap() to allocate our memory. We
127 want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
128 boundary. The mmap() interface doesn't give us this level of
129 control, so we have to use some heuristics.
131 In the general case, if we want a block of n megablocks, then we
132 allocate n+1 and trim off the slop from either side (using
133 munmap()) to get an aligned chunk of size n. However, the next
134 time we'll try to allocate directly after the previously allocated
135 chunk, on the grounds that this is aligned and likely to be free.
136 If it turns out that we were wrong, we have to munmap() and try
137 again using the general method.
139 Note on posix_memalign(): this interface is available on recent
140 systems and appears to provide exactly what we want. However, it
141 turns out not to be as good as our mmap() implementation, because
142 it wastes extra space (using double the address space, in a test on
143 x86_64/Linux). The problem seems to be that posix_memalign()
144 returns memory that can be free()'d, so the library must store
145 extra information along with the allocated block, thus messing up
146 the alignment. Hence, we don't use posix_memalign() for now.
148 -------------------------------------------------------------------------- */
150 #if !defined(mingw32_HOST_OS) && !defined(cygwin32_HOST_OS)
152 // A wrapper around mmap(), to abstract away from OS differences in
153 // the mmap() interface.
156 my_mmap (void *addr, lnat size)
160 #if defined(solaris2_HOST_OS) || defined(irix_HOST_OS)
162 int fd = open("/dev/zero",O_RDONLY);
163 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
167 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
168 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
170 // Without MAP_FIXED, Apple's mmap ignores addr.
171 // With MAP_FIXED, it overwrites already mapped regions, whic
172 // mmap(0, ... MAP_FIXED ...) is worst of all: It unmaps the program text
173 // and replaces it with zeroes, causing instant death.
174 // This behaviour seems to be conformant with IEEE Std 1003.1-2001.
175 // Let's just use the underlying Mach Microkernel calls directly,
176 // they're much nicer.
180 if(addr) // try to allocate at adress
181 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, FALSE);
182 if(!addr || err) // try to allocate anywhere
183 err = vm_allocate(mach_task_self(),(vm_address_t*) &ret, size, TRUE);
186 // don't know what the error codes mean exactly, assume it's
187 // not our problem though.
188 errorBelch("memory allocation failed (requested %lu bytes)", size);
189 stg_exit(EXIT_FAILURE);
191 vm_protect(mach_task_self(),ret,size,FALSE,VM_PROT_READ|VM_PROT_WRITE);
194 ret = mmap(addr, size, PROT_READ | PROT_WRITE | PROT_EXEC,
195 MAP_ANON | MAP_PRIVATE, -1, 0);
198 if (ret == (void *)-1) {
199 if (errno == ENOMEM ||
200 (errno == EINVAL && sizeof(void*)==4 && size >= 0xc0000000)) {
201 // If we request more than 3Gig, then we get EINVAL
202 // instead of ENOMEM (at least on Linux).
203 errorBelch("out of memory (requested %lu bytes)", size);
204 stg_exit(EXIT_FAILURE);
206 barf("getMBlock: mmap: %s", strerror(errno));
213 // Implements the general case: allocate a chunk of memory of 'size'
217 gen_map_mblocks (lnat size)
222 // Try to map a larger block, and take the aligned portion from
223 // it (unmap the rest).
225 ret = my_mmap(0, size);
227 // unmap the slop bits around the chunk we allocated
228 slop = (W_)ret & MBLOCK_MASK;
230 if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
231 barf("gen_map_mblocks: munmap failed");
233 if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
234 barf("gen_map_mblocks: munmap failed");
237 // ToDo: if we happened to get an aligned block, then don't
238 // unmap the excess, just use it. For this to work, you
239 // need to keep in mind the following:
240 // * Calling my_mmap() with an 'addr' arg pointing to
241 // already my_mmap()ed space is OK and won't fail.
242 // * If my_mmap() can't satisfy the request at the
243 // given 'next_request' address in getMBlocks(), that
244 // you unmap the extra mblock mmap()ed here (or simply
245 // satisfy yourself that the slop introduced isn't worth
249 // next time, try after the block we just got.
250 ret += MBLOCK_SIZE - slop;
255 // The external interface: allocate 'n' mblocks, and return the
261 static caddr_t next_request = (caddr_t)HEAP_BASE;
263 lnat size = MBLOCK_SIZE * n;
266 if (next_request == 0) {
267 // use gen_map_mblocks the first time.
268 ret = gen_map_mblocks(size);
270 ret = my_mmap(next_request, size);
272 if (((W_)ret & MBLOCK_MASK) != 0) {
274 #if 0 // defined(DEBUG)
275 errorBelch("warning: getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
278 // unmap this block...
279 if (munmap(ret, size) == -1) {
280 barf("getMBlock: munmap failed");
282 // and do it the hard way
283 ret = gen_map_mblocks(size);
287 // Next time, we'll try to allocate right after the block we just got.
288 // ToDo: check that we haven't already grabbed the memory at next_request
289 next_request = ret + size;
291 debugTrace(DEBUG_gc, "allocated %d megablock(s) at %p",n,ret);
294 for (i = 0; i < n; i++) {
295 markHeapAlloced( ret + i * MBLOCK_SIZE );
298 mblocks_allocated += n;
306 /* XXX Do something here */
309 #else /* defined(mingw32_HOST_OS) || defined(cygwin32_HOST_OS) */
311 /* alloc_rec keeps the info we need to have matching VirtualAlloc and
314 typedef struct alloc_rec_ {
315 char* base; /* non-aligned base address, directly from VirtualAlloc */
316 int size; /* Size in bytes */
317 struct alloc_rec_* next;
320 typedef struct block_rec_ {
321 char* base; /* base address, non-MBLOCK-aligned */
322 int size; /* size in bytes */
323 struct block_rec_* next;
326 static alloc_rec* allocs = 0;
327 static block_rec* free_blocks = 0;
333 rec = (alloc_rec*)stgMallocBytes(sizeof(alloc_rec),"getMBlocks: allocNew");
334 rec->size = (n+1)*MBLOCK_SIZE;
336 VirtualAlloc(NULL, rec->size, MEM_RESERVE, PAGE_READWRITE);
341 "getMBlocks: VirtualAlloc MEM_RESERVE %d blocks failed", n);
344 temp.base=0; temp.size=0; temp.next=allocs;
348 for(; it->next!=0 && it->next->base<rec->base; it=it->next) ;
353 debugTrace(DEBUG_gc, "allocated %d megablock(s) at 0x%x",n,(nat)rec->base);
360 insertFree(char* alloc_base, int alloc_size) {
365 temp.base=0; temp.size=0; temp.next=free_blocks;
368 for( ; it!=0 && it->base<alloc_base; prev=it, it=it->next) {}
370 if(it!=0 && alloc_base+alloc_size == it->base) {
371 if(prev->base + prev->size == alloc_base) { /* Merge it, alloc, prev */
372 prev->size += alloc_size + it->size;
373 prev->next = it->next;
375 } else { /* Merge it, alloc */
376 it->base = alloc_base;
377 it->size += alloc_size;
379 } else if(prev->base + prev->size == alloc_base) { /* Merge alloc, prev */
380 prev->size += alloc_size;
381 } else { /* Merge none */
383 rec = (block_rec*)stgMallocBytes(sizeof(block_rec),"getMBlocks: insertFree");
384 rec->base=alloc_base;
385 rec->size=alloc_size;
389 free_blocks=temp.next;
394 findFreeBlocks(nat n) {
402 required_size = n*MBLOCK_SIZE;
403 temp.next=free_blocks; temp.base=0; temp.size=0;
405 /* TODO: Don't just take first block, find smallest sufficient block */
406 for( ; it!=0 && it->size<required_size; prev=it, it=it->next ) {}
408 if( (((unsigned long)it->base) & MBLOCK_MASK) == 0) { /* MBlock aligned */
409 ret = (void*)it->base;
410 if(it->size==required_size) {
414 it->base += required_size;
415 it->size -=required_size;
421 need_base = (char*)(((unsigned long)it->base) & ((unsigned long)~MBLOCK_MASK)) + MBLOCK_SIZE;
422 next = (block_rec*)stgMallocBytes(
424 , "getMBlocks: findFreeBlocks: splitting");
425 new_size = need_base - it->base;
426 next->base = need_base +required_size;
427 next->size = it->size - (new_size+required_size);
429 next->next = it->next;
431 ret=(void*)need_base;
434 free_blocks=temp.next;
438 /* VirtualAlloc MEM_COMMIT can't cross boundaries of VirtualAlloc MEM_RESERVE,
439 so we might need to do many VirtualAlloc MEM_COMMITs. We simply walk the
440 (ordered) allocated blocks. */
442 commitBlocks(char* base, int size) {
445 for( ; it!=0 && (it->base+it->size)<=base; it=it->next ) {}
446 for( ; it!=0 && size>0; it=it->next ) {
449 size_delta = it->size - (base-it->base);
450 if(size_delta>size) size_delta=size;
451 temp = VirtualAlloc(base, size_delta, MEM_COMMIT, PAGE_READWRITE);
453 sysErrorBelch("getMBlocks: VirtualAlloc MEM_COMMIT failed");
454 stg_exit(EXIT_FAILURE);
464 ret = findFreeBlocks(n);
468 /* We already belch in allocNew if it fails */
470 stg_exit(EXIT_FAILURE);
472 insertFree(alloc->base, alloc->size);
473 ret = findFreeBlocks(n);
478 /* (In)sanity tests */
479 if (((W_)ret & MBLOCK_MASK) != 0) {
480 barf("getMBlocks: misaligned block returned");
483 commitBlocks(ret, MBLOCK_SIZE*n);
485 /* Global bookkeeping */
486 mblocks_allocated += n;
488 for(i=0; i<(int)n; ++i) {
489 markHeapAlloced( ret + i * MBLOCK_SIZE );
516 if(!VirtualFree((void*)it->base, 0, MEM_RELEASE)) {
517 sysErrorBelch("freeAllMBlocks: VirtualFree MEM_RELEASE failed");
518 stg_exit(EXIT_FAILURE);