X-Git-Url: http://git.megacz.com/?p=ghc-hetmet.git;a=blobdiff_plain;f=rts%2FMBlock.c;h=85fe02da6e12a411b255e9194a8ce8e440f0a8b4;hp=fa8fd49d886daa62d1f8e71d7d8c31b7cc32959e;hb=9da4639011348fb6c318e3cba4b08622f811d9c4;hpb=0065d5ab628975892cea1ec7303f968c3338cbe1 diff --git a/rts/MBlock.c b/rts/MBlock.c index fa8fd49..85fe02d 100644 --- a/rts/MBlock.c +++ b/rts/MBlock.c @@ -16,6 +16,7 @@ #include "RtsFlags.h" #include "MBlock.h" #include "BlockAlloc.h" +#include "Trace.h" #ifdef HAVE_STDLIB_H #include @@ -287,7 +288,7 @@ getMBlocks(nat n) // ToDo: check that we haven't already grabbed the memory at next_request next_request = ret + size; - IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at %p\n",n,ret)); + debugTrace(DEBUG_gc, "allocated %d megablock(s) at %p",n,ret); // fill in the table for (i = 0; i < n; i++) { @@ -307,147 +308,220 @@ freeAllMBlocks(void) #else /* defined(mingw32_HOST_OS) || defined(cygwin32_HOST_OS) */ -/* - On Win32 platforms we make use of the two-phased virtual memory API - to allocate mega blocks. We proceed as follows: - - Reserve a large chunk of VM (256M at the time, or what the user asked - for via the -M option), but don't supply a base address that's aligned on - a MB boundary. Instead we round up to the nearest mblock from the chunk of - VM we're handed back from the OS (at the moment we just leave the 'slop' at - the beginning of the reserved chunk unused - ToDo: reuse it .) - - Reserving memory doesn't allocate physical storage (not even in the - page file), this is done later on by committing pages (or mega-blocks in - our case). +/* alloc_rec keeps the info we need to have matching VirtualAlloc and + VirtualFree calls. */ +typedef struct alloc_rec_ { + char* base; /* non-aligned base address, directly from VirtualAlloc */ + int size; /* Size in bytes */ + struct alloc_rec_* next; +} alloc_rec; + +typedef struct block_rec_ { + char* base; /* base address, non-MBLOCK-aligned */ + int size; /* size in bytes */ + struct block_rec_* next; +} block_rec; + +static alloc_rec* allocs = 0; +static block_rec* free_blocks = 0; + +static +alloc_rec* +allocNew(nat n) { + alloc_rec* rec; + rec = (alloc_rec*)stgMallocBytes(sizeof(alloc_rec),"getMBlocks: allocNew"); + rec->size = (n+1)*MBLOCK_SIZE; + rec->base = + VirtualAlloc(NULL, rec->size, MEM_RESERVE, PAGE_READWRITE); + if(rec->base==0) { + stgFree((void*)rec); + rec=0; + sysErrorBelch( + "getMBlocks: VirtualAlloc MEM_RESERVE %d blocks failed", n); + } else { + alloc_rec temp; + temp.base=0; temp.size=0; temp.next=allocs; -static char* base_non_committed = (char*)0; -static char* end_non_committed = (char*)0; - -static void *membase; + alloc_rec* it; + it=&temp; + for(; it->next!=0 && it->next->basebase; it=it->next) ; + rec->next=it->next; + it->next=rec; -/* Default is to reserve 256M of VM to minimise the slop cost. */ -#define SIZE_RESERVED_POOL ( 256 * 1024 * 1024 ) + allocs=temp.next; + debugTrace(DEBUG_gc, "allocated %d megablock(s) at 0x%x",n,(nat)rec->base); + } + return rec; +} -/* Number of bytes reserved */ -static unsigned long size_reserved_pool = SIZE_RESERVED_POOL; +static +void +insertFree(char* alloc_base, int alloc_size) { + block_rec temp; + block_rec* it; + block_rec* prev; + + temp.base=0; temp.size=0; temp.next=free_blocks; + it = free_blocks; + prev = &temp; + for( ; it!=0 && it->basenext) {} + + if(it!=0 && alloc_base+alloc_size == it->base) { + if(prev->base + prev->size == alloc_base) { /* Merge it, alloc, prev */ + prev->size += alloc_size + it->size; + prev->next = it->next; + stgFree(it); + } else { /* Merge it, alloc */ + it->base = alloc_base; + it->size += alloc_size; + } + } else if(prev->base + prev->size == alloc_base) { /* Merge alloc, prev */ + prev->size += alloc_size; + } else { /* Merge none */ + block_rec* rec; + rec = (block_rec*)stgMallocBytes(sizeof(block_rec),"getMBlocks: insertFree"); + rec->base=alloc_base; + rec->size=alloc_size; + rec->next = it; + prev->next=rec; + } + free_blocks=temp.next; +} -void * -getMBlocks(nat n) -{ - static char* base_mblocks = (char*)0; - static char* next_request = (char*)0; - void* ret = (void*)0; - nat i; +static +void* +findFreeBlocks(nat n) { + void* ret=0; + block_rec* it; + block_rec temp; + block_rec* prev; + + int required_size; + it=free_blocks; + required_size = n*MBLOCK_SIZE; + temp.next=free_blocks; temp.base=0; temp.size=0; + prev=&temp; + /* TODO: Don't just take first block, find smallest sufficient block */ + for( ; it!=0 && it->sizenext ) {} + if(it!=0) { + if( (((unsigned long)it->base) & MBLOCK_MASK) == 0) { /* MBlock aligned */ + ret = (void*)it->base; + if(it->size==required_size) { + prev->next=it->next; + stgFree(it); + } else { + it->base += required_size; + it->size -=required_size; + } + } else { + char* need_base; + block_rec* next; + int new_size; + need_base = (char*)(((unsigned long)it->base) & ((unsigned long)~MBLOCK_MASK)) + MBLOCK_SIZE; + next = (block_rec*)stgMallocBytes( + sizeof(block_rec) + , "getMBlocks: findFreeBlocks: splitting"); + new_size = need_base - it->base; + next->base = need_base +required_size; + next->size = it->size - (new_size+required_size); + it->size = new_size; + next->next = it->next; + it->next = next; + ret=(void*)need_base; + } + } + free_blocks=temp.next; + return ret; +} - lnat size = MBLOCK_SIZE * n; - - if ( (base_non_committed == 0) || (next_request + size > end_non_committed) ) { - if (base_non_committed) { - /* Tacky, but if no user-provided -M option is in effect, - * set it to the default (==256M) in time for the heap overflow PSA. - */ - if (RtsFlags.GcFlags.maxHeapSize == 0) { - RtsFlags.GcFlags.maxHeapSize = size_reserved_pool / BLOCK_SIZE; +/* VirtualAlloc MEM_COMMIT can't cross boundaries of VirtualAlloc MEM_RESERVE, + so we might need to do many VirtualAlloc MEM_COMMITs. We simply walk the + (ordered) allocated blocks. */ +static void +commitBlocks(char* base, int size) { + alloc_rec* it; + it=allocs; + for( ; it!=0 && (it->base+it->size)<=base; it=it->next ) {} + for( ; it!=0 && size>0; it=it->next ) { + int size_delta; + void* temp; + size_delta = it->size - (base-it->base); + if(size_delta>size) size_delta=size; + temp = VirtualAlloc(base, size_delta, MEM_COMMIT, PAGE_READWRITE); + if(temp==0) { + sysErrorBelch("getMBlocks: VirtualAlloc MEM_COMMIT failed"); + stg_exit(EXIT_FAILURE); } - heapOverflow(); - } - if (RtsFlags.GcFlags.maxHeapSize != 0) { - size_reserved_pool = BLOCK_SIZE * RtsFlags.GcFlags.maxHeapSize; - if (size_reserved_pool < MBLOCK_SIZE) { - size_reserved_pool = 2*MBLOCK_SIZE; - } + size-=size_delta; + base+=size_delta; } - base_non_committed = VirtualAlloc ( NULL - , size_reserved_pool - , MEM_RESERVE - , PAGE_READWRITE - ); - membase = base_non_committed; - if ( base_non_committed == 0 ) { - errorBelch("getMBlocks: VirtualAlloc MEM_RESERVE %lu failed with: %ld\n", size_reserved_pool, GetLastError()); - ret=(void*)-1; - } else { - end_non_committed = (char*)base_non_committed + (unsigned long)size_reserved_pool; - /* The returned pointer is not aligned on a mega-block boundary. Make it. */ - base_mblocks = (char*)((unsigned long)base_non_committed & (unsigned long)~MBLOCK_MASK) + MBLOCK_SIZE; -# if 0 - debugBelch("getMBlocks: Dropping %d bytes off of 256M chunk\n", - (unsigned)base_mblocks - (unsigned)base_non_committed); -# endif - - if ( ((char*)base_mblocks + size) > end_non_committed ) { - debugBelch("getMBlocks: oops, committed too small a region to start with."); - ret=(void*)-1; - } else { - next_request = base_mblocks; - } - } - } - /* Commit the mega block(s) to phys mem */ - if ( ret != (void*)-1 ) { - ret = VirtualAlloc(next_request, size, MEM_COMMIT, PAGE_READWRITE); - if (ret == NULL) { - debugBelch("getMBlocks: VirtualAlloc MEM_COMMIT %lu failed with: %ld\n", size, GetLastError()); - ret=(void*)-1; - } - } +} - if (((W_)ret & MBLOCK_MASK) != 0) { - barf("getMBlocks: misaligned block returned"); - } +void * +getMBlocks(nat n) { + void* ret; + ret = findFreeBlocks(n); + if(ret==0) { + alloc_rec* alloc; + alloc = allocNew(n); + /* We already belch in allocNew if it fails */ + if (alloc == 0) { + stg_exit(EXIT_FAILURE); + } else { + insertFree(alloc->base, alloc->size); + ret = findFreeBlocks(n); + } + } - if (ret == (void*)-1) { - barf("getMBlocks: unknown memory allocation failure on Win32."); - } + if(ret!=0) { + /* (In)sanity tests */ + if (((W_)ret & MBLOCK_MASK) != 0) { + barf("getMBlocks: misaligned block returned"); + } - IF_DEBUG(gc,debugBelch("Allocated %d megablock(s) at 0x%x\n",n,(nat)ret)); - next_request = (char*)next_request + size; + commitBlocks(ret, MBLOCK_SIZE*n); - mblocks_allocated += n; - - // fill in the table - for (i = 0; i < n; i++) { - markHeapAlloced( ret + i * MBLOCK_SIZE ); - } + /* Global bookkeeping */ + mblocks_allocated += n; + int i; + for(i=0; i<(int)n; ++i) { + markHeapAlloced( ret + i * MBLOCK_SIZE ); + } + } - return ret; + return ret; } void freeAllMBlocks(void) { - BOOL rc; - - rc = VirtualFree(membase, 0, MEM_RELEASE); - - if (rc == FALSE) { - debugBelch("freeAllMBlocks: VirtualFree failed with: %ld\n", GetLastError()); - } -} - -/* Hand back the physical memory that is allocated to a mega-block. - ToDo: chain the released mega block onto some list so that - getMBlocks() can get at it. - - Currently unused. -*/ -#if 0 -void -freeMBlock(void* p, nat n) -{ - BOOL rc; - - rc = VirtualFree(p, n * MBLOCK_SIZE , MEM_DECOMMIT ); - - if (rc == FALSE) { -# ifdef DEBUG - debugBelch("freeMBlocks: VirtualFree failed with: %d\n", GetLastError()); -# endif - } - + { + block_rec* next; + block_rec* it; + next=0; + it = free_blocks; + for(; it!=0; ) { + next = it->next; + stgFree(it); + it=next; + } + } + { + alloc_rec* next; + alloc_rec* it; + next=0; + it=allocs; + for(; it!=0; ) { + if(!VirtualFree((void*)it->base, 0, MEM_RELEASE)) { + sysErrorBelch("freeAllMBlocks: VirtualFree MEM_RELEASE failed"); + stg_exit(EXIT_FAILURE); + } + next = it->next; + stgFree(it); + it=next; + } + } } -#endif #endif