X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=f13f186ec41151c9b5c171361bffb64f280c0c55;hb=e1c4a20eb3545e0ac5c67099e487d1f26d4a655c;hp=ee8cfd85c6e6e436586e56d2f147c9a6cbfd6436;hpb=db61851c5472bf565cd1da900b33d6e033fd743d;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index ee8cfd8..f13f186 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.54 2001/11/22 14:25:12 simonmar Exp $ + * $Id: Storage.c,v 1.83 2004/07/21 10:47:28 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -21,14 +21,17 @@ #include "Storage.h" #include "Schedule.h" +#include "OSThreads.h" #include "StoragePriv.h" #include "RetainerProfile.h" // for counting memory blocks (memInventory) +#include +#include + StgClosure *caf_list = NULL; bdescr *small_alloc_list; /* allocate()d small objects */ -bdescr *large_alloc_list; /* allocate()d large objects */ bdescr *pinned_object_block; /* allocate pinned objects into this block */ nat alloc_blocks; /* number of allocate()d blocks since GC */ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ @@ -36,19 +39,19 @@ nat alloc_blocks_lim; /* approximate limit on alloc_blocks */ StgPtr alloc_Hp = NULL; /* next free byte in small_alloc_list */ StgPtr alloc_HpLim = NULL; /* end of block at small_alloc_list */ -generation *generations; /* all the generations */ -generation *g0; /* generation 0, for convenience */ -generation *oldest_gen; /* oldest generation, for convenience */ -step *g0s0; /* generation 0, step 0, for convenience */ +generation *generations = NULL; /* all the generations */ +generation *g0 = NULL; /* generation 0, for convenience */ +generation *oldest_gen = NULL; /* oldest generation, for convenience */ +step *g0s0 = NULL; /* generation 0, step 0, for convenience */ -lnat total_allocated = 0; /* total memory allocated during run */ +ullong total_allocated = 0; /* total memory allocated during run */ /* * Storage manager mutex: protects all the above state from * simultaneous access by two STG threads. */ #ifdef SMP -pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER; +Mutex sm_mutex = INIT_MUTEX_VAR; #endif /* @@ -65,6 +68,18 @@ initStorage( void ) step *stp; generation *gen; + if (generations != NULL) { + // multi-init protection + return; + } + + /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be + * doing something reasonable. + */ + ASSERT(LOOKS_LIKE_INFO_PTR(&stg_BLACKHOLE_info)); + ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure)); + ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure)); + if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { @@ -80,6 +95,10 @@ initStorage( void ) initBlockAllocator(); +#if defined(SMP) + initMutex(&sm_mutex); +#endif + /* allocate generation info array */ generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations * sizeof(struct _generation), @@ -129,6 +148,7 @@ initStorage( void ) stp = &generations[g].steps[s]; stp->no = s; stp->blocks = NULL; + stp->n_to_blocks = 0; stp->n_blocks = 0; stp->gen = &generations[g]; stp->gen_no = g; @@ -183,17 +203,12 @@ initStorage( void ) /* initialise the allocate() interface */ small_alloc_list = NULL; - large_alloc_list = NULL; alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); -#ifdef SMP - pthread_mutex_init(&sm_mutex, NULL); -#endif - IF_DEBUG(gc, statDescribeGens()); } @@ -254,19 +269,13 @@ newCAF(StgClosure* caf) * come to do a major GC we won't need the mut_link field * any more and can use it as a STATIC_LINK. */ - ACQUIRE_LOCK(&sm_mutex); + ACQUIRE_SM_LOCK; - if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { - ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; - ((StgIndStatic *)caf)->static_link = caf_list; - caf_list = caf; - } else { - ((StgIndStatic *)caf)->saved_info = NULL; - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)caf; - } + ((StgIndStatic *)caf)->saved_info = NULL; + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; #ifdef PAR /* If we are PAR or DIST then we never forget a CAF */ @@ -278,6 +287,25 @@ newCAF(StgClosure* caf) #endif /* PAR */ } +// An alternate version of newCaf which is used for dynamically loaded +// object code in GHCi. In this case we want to retain *all* CAFs in +// the object code, because they might be demanded at any time from an +// expression evaluated on the command line. +// +// The linker hackily arranges that references to newCaf from dynamic +// code end up pointing to newDynCAF. +void +newDynCAF(StgClosure *caf) +{ + ACQUIRE_SM_LOCK; + + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; + caf_list = caf; + + RELEASE_SM_LOCK; +} + /* ----------------------------------------------------------------------------- Nursery management. -------------------------------------------------------------------------- */ @@ -286,22 +314,20 @@ void allocNurseries( void ) { #ifdef SMP - { - Capability *cap; - bdescr *bd; - - g0s0->blocks = NULL; - g0s0->n_blocks = 0; - for (cap = free_capabilities; cap != NULL; cap = cap->link) { - cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - cap->rCurrentNursery = cap->rNursery; - for (bd = cap->rNursery; bd != NULL; bd = bd->link) { - bd->u.back = (bdescr *)cap; - } - } + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->r.rCurrentNursery = cap->r.rNursery; /* Set the back links to be equal to the Capability, * so we can do slightly better informed locking. */ + for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { + bd->u.back = (bdescr *)cap; + } } #else /* SMP */ g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); @@ -325,30 +351,16 @@ resetNurseries( void ) ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); for (cap = free_capabilities; cap != NULL; cap = cap->link) { - for (bd = cap->rNursery; bd; bd = bd->link) { + for (bd = cap->r.rNursery; bd; bd = bd->link) { bd->free = bd->start; ASSERT(bd->gen_no == 0); ASSERT(bd->step == g0s0); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } - cap->rCurrentNursery = cap->rNursery; + cap->r.rCurrentNursery = cap->r.rNursery; } #else for (bd = g0s0->blocks; bd; bd = bd->link) { -#ifdef PROFILING - // @LDV profiling - // Reset every word in the nursery to zero when doing LDV profiling. - // This relieves the mutator of the burden of zeroing every new closure, - // which is stored in the nursery. - // - // Todo: make it more efficient, e.g. memcpy() - // - StgPtr p; - if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) { - for (p = bd->start; p < bd->start + BLOCK_SIZE_W; p++) - *p = 0; - } -#endif bd->free = bd->start; ASSERT(bd->gen_no == 0); ASSERT(bd->step == g0s0); @@ -452,7 +464,7 @@ allocate( nat n ) bdescr *bd; StgPtr p; - ACQUIRE_LOCK(&sm_mutex); + ACQUIRE_SM_LOCK; TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); @@ -463,17 +475,13 @@ allocate( nat n ) nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0s0->large_objects); + g0s0->n_large_blocks += req_blocks; bd->gen_no = 0; bd->step = g0s0; bd->flags = BF_LARGE; - bd->free = bd->start; - /* don't add these blocks to alloc_blocks, since we're assuming - * that large objects are likely to remain live for quite a while - * (eg. running threads), so garbage collecting early won't make - * much difference. - */ + bd->free = bd->start + n; alloc_blocks += req_blocks; - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; return bd->start; /* small allocation (start + BLOCK_SIZE_W) - + pinned_object_block->free; + } + + return allocated; +} + +void +tidyAllocateLists (void) +{ + if (small_alloc_list != NULL) { + ASSERT(alloc_Hp >= small_alloc_list->start && + alloc_Hp <= small_alloc_list->start + BLOCK_SIZE); + small_alloc_list->free = alloc_Hp; + } } /* --------------------------------------------------------------------------- @@ -533,18 +559,24 @@ allocatePinned( nat n ) StgPtr p; bdescr *bd = pinned_object_block; - ACQUIRE_LOCK(&sm_mutex); - - TICK_ALLOC_HEAP_NOCTR(n); - CCS_ALLOC(CCCS,n); - // If the request is for a large object, then allocate() // will give us a pinned object anyway. if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - RELEASE_LOCK(&sm_mutex); return allocate(n); } + ACQUIRE_SM_LOCK; + + TICK_ALLOC_HEAP_NOCTR(n); + CCS_ALLOC(CCCS,n); + + // we always return 8-byte aligned memory. bd->free must be + // 8-byte aligned to begin with, so we just round up n to + // the nearest multiple of 8 bytes. + if (sizeof(StgWord) == 4) { + n = (n+1) & ~1; + } + // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { @@ -552,14 +584,14 @@ allocatePinned( nat n ) dbl_link_onto(bd, &g0s0->large_objects); bd->gen_no = 0; bd->step = g0s0; - bd->flags = BF_LARGE; + bd->flags = BF_PINNED | BF_LARGE; bd->free = bd->start; alloc_blocks++; } p = bd->free; bd->free += n; - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; return p; } @@ -580,10 +612,8 @@ stgAllocForGMP (size_t size_in_bytes) StgArrWords* arr; nat data_size_in_words, total_size_in_words; - /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */ - ASSERT(size_in_bytes % sizeof(W_) == 0); - - data_size_in_words = size_in_bytes / sizeof(W_); + /* round up to a whole number of words */ + data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_); total_size_in_words = sizeofW(StgArrWords) + data_size_in_words; /* allocate and fill it in. */ @@ -649,13 +679,13 @@ calcAllocated( void ) + allocated_bytes(); for (cap = free_capabilities; cap != NULL; cap = cap->link) { - for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) { + for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) { allocated -= BLOCK_SIZE_W; } - if (cap->rCurrentNursery->free < cap->rCurrentNursery->start + if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start + BLOCK_SIZE_W) { - allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W) - - cap->rCurrentNursery->free; + allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W) + - cap->r.rCurrentNursery->free; } } @@ -788,14 +818,10 @@ memInventory(void) for (bd = small_alloc_list; bd; bd = bd->link) { total_blocks += bd->blocks; } - for (bd = large_alloc_list; bd; bd = bd->link) { - total_blocks += bd->blocks; - } #ifdef PROFILING if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { - for (bd = firstStack; bd != NULL; bd = bd->link) - total_blocks += bd->blocks; + total_blocks += retainerStackBlocks(); } #endif @@ -856,4 +882,13 @@ checkSanity( void ) } } +// handy function for use in gdb, because Bdescr() is inlined. +extern bdescr *_bdescr( StgPtr p ); + +bdescr * +_bdescr( StgPtr p ) +{ + return Bdescr(p); +} + #endif