X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=f13f186ec41151c9b5c171361bffb64f280c0c55;hb=e1c4a20eb3545e0ac5c67099e487d1f26d4a655c;hp=caadd6fc8c60ec135108f4a5059c76bd35c5a3b9;hpb=d182db3a49ecb720293666fb278c1acd54c5b31d;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index caadd6f..f13f186 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.60 2002/03/21 11:23:59 sebc Exp $ + * $Id: Storage.c,v 1.83 2004/07/21 10:47:28 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -26,47 +26,12 @@ #include "RetainerProfile.h" // for counting memory blocks (memInventory) -#ifdef darwin_TARGET_OS -#include -#include -#include -#include -#include -#include -unsigned long macho_etext = 0; -unsigned long macho_edata = 0; -#define IN_RANGE(base,size,x) (((P_)base) <= ((P_)x) && ((P_)x) < ((P_)((unsigned long)base + size))) -static void macosx_get_memory_layout(void) -{ - vm_address_t address; - vm_size_t size; - struct vm_region_basic_info info; - mach_msg_type_number_t info_count; - mach_port_t object_name; - task_t task = mach_task_self(); - P_ in_text = ((P_*)(&stg_BLACKHOLE_info))[0]; - P_ in_data = (P_)&stg_dummy_ret_closure; - - address = 0; /* VM_MIN_ADDRESS */ - while (1) { - info_count = VM_REGION_BASIC_INFO_COUNT; - if (vm_region(task, &address, &size, VM_REGION_BASIC_INFO, - (vm_region_info_t)&info, &info_count, &object_name) - != KERN_SUCCESS) - break; - if (IN_RANGE(address, size, in_text)) - macho_etext = address + size; - if (IN_RANGE(address, size, in_data)) - macho_edata = address + size; - address += size; - } -} -#endif +#include +#include StgClosure *caf_list = NULL; bdescr *small_alloc_list; /* allocate()d small objects */ -bdescr *large_alloc_list; /* allocate()d large objects */ bdescr *pinned_object_block; /* allocate pinned objects into this block */ nat alloc_blocks; /* number of allocate()d blocks since GC */ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ @@ -74,12 +39,12 @@ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */ StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */ -generation *generations; /* all the generations */ -generation *g0; /* generation 0, for convenience */ -generation *oldest_gen; /* oldest generation, for convenience */ -step *g0s0; /* generation 0, step 0, for convenience */ +generation *generations = NULL; /* all the generations */ +generation *g0 = NULL; /* generation 0, for convenience */ +generation *oldest_gen = NULL; /* oldest generation, for convenience */ +step *g0s0 = NULL; /* generation 0, step 0, for convenience */ -lnat total_allocated = 0; /* total memory allocated during run */ +ullong total_allocated = 0; /* total memory allocated during run */ /* * Storage manager mutex: protects all the above state from @@ -103,30 +68,18 @@ initStorage( void ) step *stp; generation *gen; -#if defined(darwin_TARGET_OS) - macosx_get_memory_layout(); -#endif - - /* Sanity check to make sure we are able to make the distinction - * between closures and infotables - */ - if (!LOOKS_LIKE_GHC_INFO(&stg_BLACKHOLE_info)) { - barf("LOOKS_LIKE_GHC_INFO+ is incorrectly defined"); - exit(0); - } - if (LOOKS_LIKE_GHC_INFO(&stg_dummy_ret_closure)) { - barf("LOOKS_LIKE_GHC_INFO- is incorrectly defined"); - exit(0); - } - if (LOOKS_LIKE_STATIC_CLOSURE(&stg_BLACKHOLE_info)) { - barf("LOOKS_LIKE_STATIC_CLOSURE- is incorrectly defined"); - exit(0); - } - if (!LOOKS_LIKE_STATIC_CLOSURE(&stg_dummy_ret_closure)) { - barf("LOOKS_LIKE_STATIC_CLOSURE+ is incorrectly defined"); - exit(0); + if (generations != NULL) { + // multi-init protection + return; } + /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be + * doing something reasonable. + */ + ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); + ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); + if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { @@ -143,7 +96,7 @@ initStorage( void ) initBlockAllocator(); #if defined(SMP) - initCondition(&sm_mutex); + initMutex(&sm_mutex); #endif /* allocate generation info array */ @@ -195,6 +148,7 @@ initStorage( void ) stp = &generations[g].steps[s]; stp->no = s; stp->blocks = NULL; + stp->n_to_blocks = 0; stp->n_blocks = 0; stp->gen = &generations[g]; stp->gen_no = g; @@ -249,17 +203,12 @@ initStorage( void ) /* initialise the allocate() interface */ small_alloc_list = NULL; - large_alloc_list = NULL; alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); -#if defined(SMP) - initMutex(&sm_mutex); -#endif - IF_DEBUG(gc, statDescribeGens()); } @@ -322,15 +271,9 @@ newCAF(StgClosure* caf) */ ACQUIRE_SM_LOCK; - if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { - ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; - ((StgIndStatic *)caf)->static_link = caf_list; - caf_list = caf; - } else { - ((StgIndStatic *)caf)->saved_info = NULL; - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)caf; - } + ((StgIndStatic *)caf)->saved_info = NULL; + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; RELEASE_SM_LOCK; @@ -344,6 +287,25 @@ newCAF(StgClosure* caf) #endif /* PAR */ } +// An alternate version of newCaf which is used for dynamically loaded +// object code in GHCi. In this case we want to retain *all* CAFs in +// the object code, because they might be demanded at any time from an +// expression evaluated on the command line. +// +// The linker hackily arranges that references to newCaf from dynamic +// code end up pointing to newDynCAF. +void +newDynCAF(StgClosure *caf) +{ + ACQUIRE_SM_LOCK; + + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; + caf_list = caf; + + RELEASE_SM_LOCK; +} + /* ----------------------------------------------------------------------------- Nursery management. -------------------------------------------------------------------------- */ @@ -352,22 +314,20 @@ void allocNurseries( void ) { #ifdef SMP - { - Capability *cap; - bdescr *bd; - - g0s0->blocks = NULL; - g0s0->n_blocks = 0; - for (cap = free_capabilities; cap != NULL; cap = cap->link) { - cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - cap->r.rCurrentNursery = cap->r.rNursery; - for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { - bd->u.back = (bdescr *)cap; - } - } + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->r.rCurrentNursery = cap->r.rNursery; /* Set the back links to be equal to the Capability, * so we can do slightly better informed locking. */ + for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { + bd->u.back = (bdescr *)cap; + } } #else /* SMP */ g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); @@ -515,15 +475,11 @@ allocate( nat n ) nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0s0->large_objects); + g0s0->n_large_blocks += req_blocks; bd->gen_no = 0; bd->step = g0s0; bd->flags = BF_LARGE; - bd->free = bd->start; - /* don't add these blocks to alloc_blocks, since we're assuming - * that large objects are likely to remain live for quite a while - * (eg. running threads), so garbage collecting early won't make - * much difference. - */ + bd->free = bd->start + n; alloc_blocks += req_blocks; RELEASE_SM_LOCK; return bd->start; @@ -553,7 +509,25 @@ allocate( nat n ) lnat allocated_bytes( void ) { - return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp)); + lnat allocated; + + allocated = alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp); + if (pinned_object_block != NULL) { + allocated -= (pinned_object_block->start + BLOCK_SIZE_W) - + pinned_object_block->free; + } + + return allocated; +} + +void +tidyAllocateLists (void) +{ + if (small_alloc_list != NULL) { + ASSERT(alloc_Hp >= small_alloc_list->start && + alloc_Hp <= small_alloc_list->start + BLOCK_SIZE); + small_alloc_list->free = alloc_Hp; + } } /* --------------------------------------------------------------------------- @@ -585,16 +559,22 @@ allocatePinned( nat n ) StgPtr p; bdescr *bd = pinned_object_block; + // If the request is for a large object, then allocate() + // will give us a pinned object anyway. + if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { + return allocate(n); + } + ACQUIRE_SM_LOCK; TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); - // If the request is for a large object, then allocate() - // will give us a pinned object anyway. - if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - RELEASE_SM_LOCK; - return allocate(n); + // we always return 8-byte aligned memory. bd->free must be + // 8-byte aligned to begin with, so we just round up n to + // the nearest multiple of 8 bytes. + if (sizeof(StgWord) == 4) { + n = (n+1) & ~1; } // If we don't have a block of pinned objects yet, or the current @@ -604,7 +584,7 @@ allocatePinned( nat n ) dbl_link_onto(bd, &g0s0->large_objects); bd->gen_no = 0; bd->step = g0s0; - bd->flags = BF_LARGE; + bd->flags = BF_PINNED | BF_LARGE; bd->free = bd->start; alloc_blocks++; } @@ -632,10 +612,8 @@ stgAllocForGMP (size_t size_in_bytes) StgArrWords* arr; nat data_size_in_words, total_size_in_words; - /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */ - ASSERT(size_in_bytes % sizeof(W_) == 0); - - data_size_in_words = size_in_bytes / sizeof(W_); + /* round up to a whole number of words */ + data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_); total_size_in_words = sizeofW(StgArrWords) + data_size_in_words; /* allocate and fill it in. */ @@ -840,14 +818,10 @@ memInventory(void) for (bd = small_alloc_list; bd; bd = bd->link) { total_blocks += bd->blocks; } - for (bd = large_alloc_list; bd; bd = bd->link) { - total_blocks += bd->blocks; - } #ifdef PROFILING if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - for (bd = firstStack; bd != NULL; bd = bd->link) - total_blocks += bd->blocks; + total_blocks += retainerStackBlocks(); } #endif