1 /* -----------------------------------------------------------------------------
2 * $Id: MBlock.c,v 1.32 2002/10/23 12:26:11 mthomas Exp $
4 * (c) The GHC Team 1998-1999
6 * MegaBlock Allocator Interface. This file contains all the dirty
7 * architecture-dependent hackery required to get a chunk of aligned
8 * memory from the operating system.
10 * ---------------------------------------------------------------------------*/
12 /* This is non-posix compliant. */
13 /* #include "PosixSource.h" */
19 #include "BlockAlloc.h"
27 #ifdef HAVE_SYS_TYPES_H
28 #include <sys/types.h>
30 #ifndef mingw32_TARGET_OS
31 # ifdef HAVE_SYS_MMAN_H
32 # include <sys/mman.h>
44 lnat mblocks_allocated = 0;
46 /* -----------------------------------------------------------------------------
47 The MBlock Map: provides our implementation of HEAP_ALLOCED()
48 -------------------------------------------------------------------------- */
50 StgWord8 mblock_map[4096]; // initially all zeros
53 mblockIsHeap (void *p)
55 mblock_map[((StgWord)p & ~MBLOCK_MASK) >> MBLOCK_SHIFT] = 1;
58 /* -----------------------------------------------------------------------------
59 Allocate new mblock(s)
60 -------------------------------------------------------------------------- */
68 /* -----------------------------------------------------------------------------
71 On Unix-like systems, we use mmap() to allocate our memory. We
72 want memory in chunks of MBLOCK_SIZE, and aligned on an MBLOCK_SIZE
73 boundary. The mmap() interface doesn't give us this level of
74 control, so we have to use some heuristics.
76 In the general case, if we want a block of n megablocks, then we
77 allocate n+1 and trim off the slop from either side (using
78 munmap()) to get an aligned chunk of size n. However, the next
79 time we'll try to allocate directly after the previously allocated
80 chunk, on the grounds that this is aligned and likely to be free.
81 If it turns out that we were wrong, we have to munmap() and try
82 again using the general method.
83 -------------------------------------------------------------------------- */
85 #if !defined(mingw32_TARGET_OS) && !defined(cygwin32_TARGET_OS)
87 // A wrapper around mmap(), to abstract away from OS differences in
88 // the mmap() interface.
91 my_mmap (void *addr, int size)
95 #ifdef solaris2_TARGET_OS
97 int fd = open("/dev/zero",O_RDONLY);
98 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
99 MAP_FIXED | MAP_PRIVATE, fd, 0);
103 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
104 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
105 #elif darwin_TARGET_OS
106 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
107 MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
109 ret = mmap(addr, size, PROT_READ | PROT_WRITE,
110 MAP_ANON | MAP_PRIVATE, -1, 0);
116 // Implements the general case: allocate a chunk of memory of 'size'
120 gen_map_mblocks (int size)
125 // Try to map a larger block, and take the aligned portion from
126 // it (unmap the rest).
128 ret = my_mmap(0, size);
129 if (ret == (void *)-1) {
130 barf("gen_map_mblocks: mmap failed");
133 // unmap the slop bits around the chunk we allocated
134 slop = (W_)ret & MBLOCK_MASK;
136 if (munmap(ret, MBLOCK_SIZE - slop) == -1) {
137 barf("gen_map_mblocks: munmap failed");
139 if (slop > 0 && munmap(ret+size-slop, slop) == -1) {
140 barf("gen_map_mblocks: munmap failed");
143 // next time, try after the block we just got.
144 ret += MBLOCK_SIZE - slop;
149 // The external interface: allocate 'n' mblocks, and return the
155 static caddr_t next_request = (caddr_t)HEAP_BASE;
157 lnat size = MBLOCK_SIZE * n;
160 if (next_request == 0) {
161 // use gen_map_mblocks the first time.
162 ret = gen_map_mblocks(size);
164 ret = my_mmap(next_request, size);
166 if (ret == (void *)-1) {
167 if (errno == ENOMEM) {
168 belch("out of memory (requested %d bytes)", n * BLOCK_SIZE);
169 stg_exit(EXIT_FAILURE);
171 barf("getMBlock: mmap failed");
175 if (((W_)ret & MBLOCK_MASK) != 0) {
178 belch("getMBlock: misaligned block %p returned when allocating %d megablock(s) at %p", ret, n, next_request);
181 // unmap this block...
182 if (munmap(ret, size) == -1) {
183 barf("getMBlock: munmap failed");
185 // and do it the hard way
186 ret = gen_map_mblocks(size);
190 // Next time, we'll try to allocate right after the block we just got.
191 next_request = ret + size;
193 IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at %p\n",n,ret));
196 for (i = 0; i < n; i++) {
197 mblockIsHeap( ret + i * MBLOCK_SIZE );
200 mblocks_allocated += n;
205 #else /* defined(mingw32_TARGET_OS) || defined(cygwin32_TARGET_OS) */
208 On Win32 platforms we make use of the two-phased virtual memory API
209 to allocate mega blocks. We proceed as follows:
211 Reserve a large chunk of VM (256M at the time, or what the user asked
212 for via the -M option), but don't supply a base address that's aligned on
213 a MB boundary. Instead we round up to the nearest mblock from the chunk of
214 VM we're handed back from the OS (at the moment we just leave the 'slop' at
215 the beginning of the reserved chunk unused - ToDo: reuse it .)
217 Reserving memory doesn't allocate physical storage (not even in the
218 page file), this is done later on by committing pages (or mega-blocks in
222 char* base_non_committed = (char*)0;
223 char* end_non_committed = (char*)0;
225 /* Default is to reserve 256M of VM to minimise the slop cost. */
226 #define SIZE_RESERVED_POOL ( 256 * 1024 * 1024 )
228 /* Number of bytes reserved */
229 static unsigned long size_reserved_pool = SIZE_RESERVED_POOL;
234 static char* base_mblocks = (char*)0;
235 static char* next_request = (char*)0;
236 void* ret = (void*)0;
239 lnat size = MBLOCK_SIZE * n;
241 if ( (base_non_committed == 0) || (next_request + size > end_non_committed) ) {
242 if (base_non_committed) {
243 barf("RTS exhausted max heap size (%d bytes)\n", size_reserved_pool);
245 if (RtsFlags.GcFlags.maxHeapSize != 0) {
246 size_reserved_pool = BLOCK_SIZE * RtsFlags.GcFlags.maxHeapSize;
247 if (size_reserved_pool < MBLOCK_SIZE) {
248 size_reserved_pool = 2*MBLOCK_SIZE;
251 base_non_committed = VirtualAlloc ( NULL
256 if ( base_non_committed == 0 ) {
257 fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
260 end_non_committed = (char*)base_non_committed + (unsigned long)size_reserved_pool;
261 /* The returned pointer is not aligned on a mega-block boundary. Make it. */
262 base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)~MBLOCK_MASK) + MBLOCK_SIZE;
264 fprintf(stderr, "getMBlocks: Dropping %d bytes off of 256M chunk\n",
265 (unsigned)base_mblocks - (unsigned)base_non_committed);
268 if ( ((char*)base_mblocks + size) > end_non_committed ) {
269 fprintf(stderr, "getMBlocks: oops, committed too small a region to start with.");
272 next_request = base_mblocks;
276 /* Commit the mega block(s) to phys mem */
277 if ( ret != (void*)-1 ) {
278 ret = VirtualAlloc(next_request, size, MEM_COMMIT, PAGE_READWRITE);
280 fprintf(stderr, "getMBlocks: VirtualAlloc failed with: %ld\n", GetLastError());
285 if (((W_)ret & MBLOCK_MASK) != 0) {
286 barf("getMBlocks: misaligned block returned");
289 if (ret == (void*)-1) {
290 barf("getMBlocks: unknown memory allocation failure on Win32.");
293 IF_DEBUG(gc,fprintf(stderr,"Allocated %d megablock(s) at 0x%x\n",n,(nat)ret));
294 next_request = (char*)next_request + size;
296 mblocks_allocated += n;
299 for (i = 0; i < n; i++) {
300 mblockIsHeap( ret + i * MBLOCK_SIZE );
306 /* Hand back the physical memory that is allocated to a mega-block.
307 ToDo: chain the released mega block onto some list so that
308 getMBlocks() can get at it.
314 freeMBlock(void* p, nat n)
318 rc = VirtualFree(p, n * MBLOCK_SIZE , MEM_DECOMMIT );
322 fprintf(stderr, "freeMBlocks: VirtualFree failed with: %d\n", GetLastError());