X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=f7a321d9d514ca9a3f16b8a372cb1a522cb29ca1;hb=7457757f193b28b5fe8fee01edbed012c2fda897;hp=a2c111bf8b23ca3a873b882d64383b6f80263061;hpb=bc5c802181b513216bc88f0d1ec9574157ee05fe;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index a2c111b..f7a321d 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.49 2001/08/14 13:40:09 sewardj Exp $ + * $Id: Storage.c,v 1.67 2002/07/17 09:21:51 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -17,13 +17,32 @@ #include "MBlock.h" #include "Weak.h" #include "Sanity.h" +#include "Arena.h" #include "Storage.h" #include "Schedule.h" +#include "OSThreads.h" #include "StoragePriv.h" -#ifndef SMP -nat nursery_blocks; /* number of blocks in the nursery */ +#include "RetainerProfile.h" // for counting memory blocks (memInventory) + +#include +#include + +#ifdef darwin_TARGET_OS +#include +unsigned long macho_etext = 0; +unsigned long macho_edata = 0; + +static void macosx_get_memory_layout(void) +{ + struct segment_command *seg; + + seg = getsegbyname("__TEXT"); + macho_etext = seg->vmaddr + seg->vmsize; + seg = getsegbyname("__DATA"); + macho_edata = seg->vmaddr + seg->vmsize; +} #endif StgClosure *caf_list = NULL; @@ -49,7 +68,7 @@ lnat total_allocated = 0; /* total memory allocated during run */ * simultaneous access by two STG threads. */ #ifdef SMP -pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER; +Mutex sm_mutex = INIT_MUTEX_VAR; #endif /* @@ -66,22 +85,29 @@ initStorage( void ) step *stp; generation *gen; - /* If we're doing heap profiling, we want a two-space heap with a - * fixed-size allocation area so that we get roughly even-spaced - * samples. - */ - - /* As an experiment, try a 2 generation collector - */ +#if defined(darwin_TARGET_OS) + macosx_get_memory_layout(); +#endif -#if defined(PROFILING) || defined(DEBUG) - if (RtsFlags.ProfFlags.doHeapProfile) { - RtsFlags.GcFlags.generations = 1; - RtsFlags.GcFlags.steps = 1; - RtsFlags.GcFlags.oldGenFactor = 0; - RtsFlags.GcFlags.heapSizeSuggestion = 0; + /* Sanity check to make sure we are able to make the distinction + * between closures and infotables + */ + if (!LOOKS_LIKE_GHC_INFO(&stg_BLACKHOLE_info)) { + barf("LOOKS_LIKE_GHC_INFO+ is incorrectly defined"); + exit(0); + } + if (LOOKS_LIKE_GHC_INFO(&stg_dummy_ret_closure)) { + barf("LOOKS_LIKE_GHC_INFO- is incorrectly defined"); + exit(0); + } + if (LOOKS_LIKE_STATIC_CLOSURE(&stg_BLACKHOLE_info)) { + barf("LOOKS_LIKE_STATIC_CLOSURE- is incorrectly defined"); + exit(0); + } + if (!LOOKS_LIKE_STATIC_CLOSURE(&stg_dummy_ret_closure)) { + barf("LOOKS_LIKE_STATIC_CLOSURE+ is incorrectly defined"); + exit(0); } -#endif if (RtsFlags.GcFlags.maxHeapSize != 0 && RtsFlags.GcFlags.heapSizeSuggestion > @@ -89,8 +115,19 @@ initStorage( void ) RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; } + if (RtsFlags.GcFlags.maxHeapSize != 0 && + RtsFlags.GcFlags.minAllocAreaSize > + RtsFlags.GcFlags.maxHeapSize) { + prog_belch("maximum heap size (-M) is smaller than minimum alloc area size (-A)"); + exit(1); + } + initBlockAllocator(); +#if defined(SMP) + initCondition(&sm_mutex); +#endif + /* allocate generation info array */ generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations * sizeof(struct _generation), @@ -168,7 +205,11 @@ initStorage( void ) /* The oldest generation has one step and it is compacted. */ if (RtsFlags.GcFlags.compact) { - oldest_gen->steps[0].is_compacted = 1; + if (RtsFlags.GcFlags.generations == 1) { + belch("WARNING: compaction is incompatible with -G1; disabled"); + } else { + oldest_gen->steps[0].is_compacted = 1; + } } oldest_gen->steps[0].to = &oldest_gen->steps[0]; @@ -197,8 +238,8 @@ initStorage( void ) /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); -#ifdef SMP - pthread_mutex_init(&sm_mutex, NULL); +#if defined(SMP) + initMutex(&sm_mutex); #endif IF_DEBUG(gc, statDescribeGens()); @@ -261,7 +302,7 @@ newCAF(StgClosure* caf) * come to do a major GC we won't need the mut_link field * any more and can use it as a STATIC_LINK. */ - ACQUIRE_LOCK(&sm_mutex); + ACQUIRE_SM_LOCK; if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; @@ -273,7 +314,7 @@ newCAF(StgClosure* caf) oldest_gen->mut_once_list = (StgMutClosure *)caf; } - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; #ifdef PAR /* If we are PAR or DIST then we never forget a CAF */ @@ -300,9 +341,9 @@ allocNurseries( void ) g0s0->blocks = NULL; g0s0->n_blocks = 0; for (cap = free_capabilities; cap != NULL; cap = cap->link) { - cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - cap->rCurrentNursery = cap->rNursery; - for (bd = cap->rNursery; bd != NULL; bd = bd->link) { + cap->r.rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->r.rCurrentNursery = cap->r.rNursery; + for (bd = cap->r.rNursery; bd != NULL; bd = bd->link) { bd->u.back = (bdescr *)cap; } } @@ -311,13 +352,12 @@ allocNurseries( void ) */ } #else /* SMP */ - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - g0s0->blocks = allocNursery(NULL, nursery_blocks); - g0s0->n_blocks = nursery_blocks; + g0s0->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + g0s0->n_blocks = RtsFlags.GcFlags.minAllocAreaSize; g0s0->to_blocks = NULL; g0s0->n_to_blocks = 0; - MainRegTable.rNursery = g0s0->blocks; - MainRegTable.rCurrentNursery = g0s0->blocks; + MainCapability.r.rNursery = g0s0->blocks; + MainCapability.r.rCurrentNursery = g0s0->blocks; /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ #endif } @@ -333,13 +373,13 @@ resetNurseries( void ) ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); for (cap = free_capabilities; cap != NULL; cap = cap->link) { - for (bd = cap->rNursery; bd; bd = bd->link) { + for (bd = cap->r.rNursery; bd; bd = bd->link) { bd->free = bd->start; ASSERT(bd->gen_no == 0); ASSERT(bd->step == g0s0); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } - cap->rCurrentNursery = cap->rNursery; + cap->r.rCurrentNursery = cap->r.rNursery; } #else for (bd = g0s0->blocks; bd; bd = bd->link) { @@ -348,41 +388,55 @@ resetNurseries( void ) ASSERT(bd->step == g0s0); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } - MainRegTable.rNursery = g0s0->blocks; - MainRegTable.rCurrentNursery = g0s0->blocks; + MainCapability.r.rNursery = g0s0->blocks; + MainCapability.r.rCurrentNursery = g0s0->blocks; #endif } bdescr * -allocNursery (bdescr *last_bd, nat blocks) +allocNursery (bdescr *tail, nat blocks) { bdescr *bd; nat i; - /* Allocate a nursery */ + // Allocate a nursery: we allocate fresh blocks one at a time and + // cons them on to the front of the list, not forgetting to update + // the back pointer on the tail of the list to point to the new block. for (i=0; i < blocks; i++) { + // @LDV profiling + /* + processNursery() in LdvProfile.c assumes that every block group in + the nursery contains only a single block. So, if a block group is + given multiple blocks, change processNursery() accordingly. + */ bd = allocBlock(); - bd->link = last_bd; + bd->link = tail; + // double-link the nursery: we might need to insert blocks + if (tail != NULL) { + tail->u.back = bd; + } bd->step = g0s0; bd->gen_no = 0; bd->flags = 0; bd->free = bd->start; - last_bd = bd; + tail = bd; } - return last_bd; + tail->u.back = NULL; + return tail; } void resizeNursery ( nat blocks ) { bdescr *bd; + nat nursery_blocks; #ifdef SMP barf("resizeNursery: can't resize in SMP mode"); #endif + nursery_blocks = g0s0->n_blocks; if (nursery_blocks == blocks) { - ASSERT(g0s0->n_blocks == blocks); return; } @@ -397,15 +451,25 @@ resizeNursery ( nat blocks ) IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n", blocks)); - for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) { - next_bd = bd->link; - freeGroup(bd); - bd = next_bd; + + bd = g0s0->blocks; + while (nursery_blocks > blocks) { + next_bd = bd->link; + next_bd->u.back = NULL; + nursery_blocks -= bd->blocks; // might be a large block + freeGroup(bd); + bd = next_bd; } g0s0->blocks = bd; + // might have gone just under, by freeing a large block, so make + // up the difference. + if (nursery_blocks < blocks) { + g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks); + } } - g0s0->n_blocks = nursery_blocks = blocks; + g0s0->n_blocks = blocks; + ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks); } /* ----------------------------------------------------------------------------- @@ -422,7 +486,7 @@ allocate( nat n ) bdescr *bd; StgPtr p; - ACQUIRE_LOCK(&sm_mutex); + ACQUIRE_SM_LOCK; TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); @@ -443,7 +507,7 @@ allocate( nat n ) * much difference. */ alloc_blocks += req_blocks; - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; return bd->start; /* small allocation (= LARGE_OBJECT_THRESHOLD/sizeof(W_)) { - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; return allocate(n); } + // we always return 8-byte aligned memory. bd->free must be + // 8-byte aligned to begin with, so we just round up n to + // the nearest multiple of 8 bytes. + if (sizeof(StgWord) == 4) { + n = (n+1) & ~1; + } + // If we don't have a block of pinned objects yet, or the current // one isn't large enough to hold the new object, allocate a new one. if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) { @@ -529,7 +600,7 @@ allocatePinned( nat n ) p = bd->free; bd->free += n; - RELEASE_LOCK(&sm_mutex); + RELEASE_SM_LOCK; return p; } @@ -550,10 +621,8 @@ stgAllocForGMP (size_t size_in_bytes) StgArrWords* arr; nat data_size_in_words, total_size_in_words; - /* should be a multiple of sizeof(StgWord) (whole no. of limbs) */ - ASSERT(size_in_bytes % sizeof(W_) == 0); - - data_size_in_words = size_in_bytes / sizeof(W_); + /* round up to a whole number of words */ + data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_); total_size_in_words = sizeofW(StgArrWords) + data_size_in_words; /* allocate and fill it in. */ @@ -619,20 +688,20 @@ calcAllocated( void ) + allocated_bytes(); for (cap = free_capabilities; cap != NULL; cap = cap->link) { - for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) { + for ( bd = cap->r.rCurrentNursery->link; bd != NULL; bd = bd->link ) { allocated -= BLOCK_SIZE_W; } - if (cap->rCurrentNursery->free < cap->rCurrentNursery->start + if (cap->r.rCurrentNursery->free < cap->r.rCurrentNursery->start + BLOCK_SIZE_W) { - allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W) - - cap->rCurrentNursery->free; + allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W) + - cap->r.rCurrentNursery->free; } } #else /* !SMP */ - bdescr *current_nursery = MainRegTable.rCurrentNursery; + bdescr *current_nursery = MainCapability.r.rCurrentNursery; - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); + allocated = (g0s0->n_blocks * BLOCK_SIZE_W) + allocated_bytes(); for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { allocated -= BLOCK_SIZE_W; } @@ -761,7 +830,17 @@ memInventory(void) for (bd = large_alloc_list; bd; bd = bd->link) { total_blocks += bd->blocks; } - + +#ifdef PROFILING + if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { + for (bd = firstStack; bd != NULL; bd = bd->link) + total_blocks += bd->blocks; + } +#endif + + // count the blocks allocated by the arena allocator + total_blocks += arenaBlocks(); + /* count the blocks on the free list */ free_blocks = countFreeList(); @@ -775,7 +854,8 @@ memInventory(void) ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK); } -static nat + +nat countBlocks(bdescr *bd) { nat n; @@ -798,13 +878,13 @@ checkSanity( void ) for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) { continue; } - checkHeap(generations[g].steps[s].blocks); - checkChain(generations[g].steps[s].large_objects); ASSERT(countBlocks(generations[g].steps[s].blocks) == generations[g].steps[s].n_blocks); ASSERT(countBlocks(generations[g].steps[s].large_objects) == generations[g].steps[s].n_large_blocks); + if (g == 0 && s == 0) { continue; } + checkHeap(generations[g].steps[s].blocks); + checkChain(generations[g].steps[s].large_objects); if (g > 0) { checkMutableList(generations[g].mut_list, g); checkMutOnceList(generations[g].mut_once_list, g); @@ -815,4 +895,13 @@ checkSanity( void ) } } +// handy function for use in gdb, because Bdescr() is inlined. +extern bdescr *_bdescr( StgPtr p ); + +bdescr * +_bdescr( StgPtr p ) +{ + return Bdescr(p); +} + #endif