1 /* -----------------------------------------------------------------------------
2 * $Id: MBlock.c,v 1.33 2002/10/25 12:56:34 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * MegaBlock Allocator Interface. This file contains all the dirty
7 * architecture-dependent hackery required to get a chunk of aligned
8 * memory from the operating system.
10 * ---------------------------------------------------------------------------*/
12 /* This is non-posix compliant. */
13 /* #include "PosixSource.h" */
19 #include "BlockAlloc.h"
27 #ifdef HAVE_SYS_TYPES_H
28 #include <sys/types.h>
30 #ifndef mingw32_TARGET_OS
31 # ifdef HAVE_SYS_MMAN_H
32 # include <sys/mman.h>
44 lnat mblocks_allocated = 0;
46 /* -----------------------------------------------------------------------------
47 The MBlock Map: provides our implementation of HEAP_ALLOCED()
48 -------------------------------------------------------------------------- */
50 StgWord8 mblock_map[4096]; // initially all zeros
53 mblockIsHeap (void *p)
55 mblock_map[((StgWord)p & ~MBLOCK_MASK) >> MBLOCK_SHIFT] = 1;
58 /* -----------------------------------------------------------------------------
59 Allocate new mblock(s)
60 -------------------------------------------------------------------------- */
68 /* -----------------------------------------------------------------------------
71 On Unix-like systems, we use mmap() to allocate our memory. We
72 want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
73 boundary. The mmap() interface doesn't give us this level of
74 control, so we have to use some heuristics.
76 In the general case, if we want a block of n megablocks, then we
77 allocate n+1 and trim off the slop from either side (using
78 munmap()) to get an aligned chunk of size n. However, the next
79 time we'll try to allocate directly after the previously allocated
80 chunk, on the grounds that this is aligned and likely to be free.
81 If it turns out that we were wrong, we have to munmap() and try
82 again using the general method.
83 -------------------------------------------------------------------------- */
85 #if !defined(mingw32_TARGET_OS) && !defined(cygwin32_TARGET_OS)
87 // A wrapper around mmap(), to abstract away from OS differences in
88 // the mmap() interface.
91 my_mmap (void *addr, int size)
95 #ifdef solaris2_TARGET_OS
97 int fd = open("/dev/zero",O_RDONLY);
98 ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
102 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
103 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
104 #elif darwin_TARGET_OS
105 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
106 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
108 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
109 MAP_ANON | MAP_PRIVATE, -1, 0);
115 // Implements the general case: allocate a chunk of memory of 'size'
119 gen_map_mblocks (int size)
124 // Try to map a larger block, and take the aligned portion from
125 // it (unmap the rest).
127 ret = my_mmap(0, size);
128 if (ret == (void *)-1) {
129 barf("gen_map_mblocks: mmap failed");
132 // unmap the slop bits around the chunk we allocated
133 slop = (W_)ret & MBLOCK_MASK;
135 if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
136 barf("gen_map_mblocks: munmap failed");
138 if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
139 barf("gen_map_mblocks: munmap failed");
142 // next time, try after the block we just got.
143 ret += MBLOCK_SIZE - slop;
148 // The external interface: allocate 'n' mblocks, and return the
154 static caddr_t next_request = (caddr_t)HEAP_BASE;
156 lnat size = MBLOCK_SIZE * n;
159 if (next_request == 0) {
160 // use gen_map_mblocks the first time.
161 ret = gen_map_mblocks(size);
163 ret = my_mmap(next_request, size);
165 if (ret == (void *)-1) {
166 if (errno == ENOMEM) {
167 belch("out of memory (requested %d bytes)", n * BLOCK_SIZE);
168 stg_exit(EXIT_FAILURE);
170 barf("getMBlock: mmap failed");
174 if (((W_)ret & MBLOCK_MASK) != 0) {
177 belch("getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
180 // unmap this block...
181 if (munmap(ret, size) == -1) {
182 barf("getMBlock: munmap failed");
184 // and do it the hard way
185 ret = gen_map_mblocks(size);
189 // Next time, we'll try to allocate right after the block we just got.
190 next_request = ret + size;
192 IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret));
195 for (i = 0; i < n; i++) {
196 mblockIsHeap( ret + i * MBLOCK_SIZE );
199 mblocks_allocated += n;
204 #else /* defined(mingw32_TARGET_OS) || defined(cygwin32_TARGET_OS) */
207 On Win32 platforms we make use of the two-phased virtual memory API
208 to allocate mega blocks. We proceed as follows:
210 Reserve a large chunk of VM (256M at the time, or what the user asked
211 for via the -M option), but don't supply a base address that's aligned on
212 a MB boundary. Instead we round up to the nearest mblock from the chunk of
213 VM we're handed back from the OS (at the moment we just leave the 'slop' at
214 the beginning of the reserved chunk unused - ToDo: reuse it .)
216 Reserving memory doesn't allocate physical storage (not even in the
217 page file), this is done later on by committing pages (or mega-blocks in
221 char* base_non_committed = (char*)0;
222 char* end_non_committed = (char*)0;
224 /* Default is to reserve 256M of VM to minimise the slop cost. */
225 #define SIZE_RESERVED_POOL ( 256 * 1024 * 1024 )
227 /* Number of bytes reserved */
228 static unsigned long size_reserved_pool = SIZE_RESERVED_POOL;
233 static char* base_mblocks = (char*)0;
234 static char* next_request = (char*)0;
235 void* ret = (void*)0;
238 lnat size = MBLOCK_SIZE * n;
240 if ( (base_non_committed == 0) || (next_request + size > end_non_committed) ) {
241 if (base_non_committed) {
242 barf("RTS exhausted max heap size (%d bytes)\n", size_reserved_pool);
244 if (RtsFlags.GcFlags.maxHeapSize != 0) {
245 size_reserved_pool = BLOCK_SIZE * RtsFlags.GcFlags.maxHeapSize;
246 if (size_reserved_pool < MBLOCK_SIZE) {
247 size_reserved_pool = 2*MBLOCK_SIZE;
250 base_non_committed = VirtualAlloc ( NULL
255 if ( base_non_committed == 0 ) {
256 fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
259 end_non_committed = (char*)base_non_committed + (unsigned long)size_reserved_pool;
260 /* The returned pointer is not aligned on a mega-block boundary. Make it. */
261 base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)~MBLOCK_MASK) + MBLOCK_SIZE;
263 fprintf(stderr, "getMBlocks: Dropping %d bytes off of 256M chunk\n",
264 (unsigned)base_mblocks - (unsigned)base_non_committed);
267 if ( ((char*)base_mblocks + size) > end_non_committed ) {
268 fprintf(stderr, "getMBlocks: oops, committed too small a region to start with.");
271 next_request = base_mblocks;
275 /* Commit the mega block(s) to phys mem */
276 if ( ret != (void*)-1 ) {
277 ret = VirtualAlloc(next_request, size, MEM_COMMIT, PAGE_READWRITE);
279 fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
284 if (((W_)ret & MBLOCK_MASK) != 0) {
285 barf("getMBlocks: misaligned block returned");
288 if (ret == (void*)-1) {
289 barf("getMBlocks: unknown memory allocation failure on Win32.");
292 IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
293 next_request = (char*)next_request + size;
295 mblocks_allocated += n;
298 for (i = 0; i < n; i++) {
299 mblockIsHeap( ret + i * MBLOCK_SIZE );
305 /* Hand back the physical memory that is allocated to a mega-block.
306 ToDo: chain the released mega block onto some list so that
307 getMBlocks() can get at it.
313 freeMBlock(void* p, nat n)
317 rc = VirtualFree(p, n * MBLOCK_SIZE , MEM_DECOMMIT );
321 fprintf(stderr, "freeMBlocks: VirtualFree failed with: %d\n", GetLastError());