X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=b2f95931de5ddbf4cd5af44edd989132594f4912;hb=c95b2db6b50df8d9a3eba87d8c0dfa779503eb83;hp=e093888b4beba9ddddf3f668ef09f8a737e46643;hpb=bbab3c15e58912433d5f2b5dcd2344f5a176848c;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index e093888..b2f9593 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,7 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.5 1999/01/19 17:06:05 simonm Exp $ + * $Id: Storage.c,v 1.39 2001/07/19 07:28:00 andy Exp $ + * + * (c) The GHC Team, 1998-1999 * * Storage manager front end * @@ -12,14 +14,16 @@ #include "Hooks.h" #include "BlockAlloc.h" #include "MBlock.h" -#include "gmp.h" #include "Weak.h" +#include "Sanity.h" #include "Storage.h" +#include "Schedule.h" #include "StoragePriv.h" -bdescr *current_nursery; /* next available nursery block, or NULL */ +#ifndef SMP nat nursery_blocks; /* number of blocks in the nursery */ +#endif StgClosure *caf_list = NULL; @@ -36,6 +40,16 @@ generation *g0; /* generation 0, for convenience */ generation *oldest_gen; /* oldest generation, for convenience */ step *g0s0; /* generation 0, step 0, for convenience */ +lnat total_allocated = 0; /* total memory allocated during run */ + +/* + * Storage manager mutex: protects all the above state from + * simultaneous access by two STG threads. + */ +#ifdef SMP +pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + /* * Forward references */ @@ -47,9 +61,31 @@ void initStorage (void) { nat g, s; - step *step; + step *stp; generation *gen; + /* If we're doing heap profiling, we want a two-space heap with a + * fixed-size allocation area so that we get roughly even-spaced + * samples. + */ + + /* As an experiment, try a 2 generation collector + */ + +#if defined(PROFILING) || defined(DEBUG) + if (RtsFlags.ProfFlags.doHeapProfile) { + RtsFlags.GcFlags.generations = 1; + RtsFlags.GcFlags.steps = 1; + RtsFlags.GcFlags.oldGenFactor = 0; + RtsFlags.GcFlags.heapSizeSuggestion = 0; + } +#endif + + if (RtsFlags.GcFlags.heapSizeSuggestion > + RtsFlags.GcFlags.maxHeapSize) { + RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; + } + initBlockAllocator(); /* allocate generation info array */ @@ -62,9 +98,10 @@ initStorage (void) gen = &generations[g]; gen->no = g; gen->mut_list = END_MUT_LIST; + gen->mut_once_list = END_MUT_LIST; gen->collections = 0; gen->failed_promotions = 0; - gen->max_blocks = RtsFlags.GcFlags.minOldGenSize; + gen->max_blocks = 0; } /* A couple of convenience pointers */ @@ -82,9 +119,10 @@ initStorage (void) /* set up all except the oldest generation with 2 steps */ for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) { - generations[g].n_steps = 2; - generations[g].steps = stgMallocBytes (2 * sizeof(struct _step), - "initStorage: steps"); + generations[g].n_steps = RtsFlags.GcFlags.steps; + generations[g].steps = + stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step), + "initStorage: steps"); } } else { @@ -96,30 +134,28 @@ initStorage (void) /* Initialise all steps */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - step->no = s; - step->blocks = NULL; - step->n_blocks = 0; - step->gen = &generations[g]; - step->hp = NULL; - step->hpLim = NULL; - step->hp_bd = NULL; - step->large_objects = NULL; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + stp = &generations[g].steps[s]; + stp->no = s; + stp->blocks = NULL; + stp->n_blocks = 0; + stp->gen = &generations[g]; + stp->hp = NULL; + stp->hpLim = NULL; + stp->hp_bd = NULL; + stp->scan = NULL; + stp->scan_bd = NULL; + stp->large_objects = NULL; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; } } /* Set up the destination pointers in each younger gen. step */ for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - if ( s == 1 ) { - step->to = &generations[g+1].steps[0]; - } else { - step->to = &generations[g].steps[s+1]; - } + for (s = 0; s < generations[g].n_steps-1; s++) { + generations[g].steps[s].to = &generations[g].steps[s+1]; } + generations[g].steps[s].to = &generations[g+1].steps[0]; } /* The oldest generation has one step and its destination is the @@ -129,14 +165,15 @@ initStorage (void) /* generation 0 is special: that's the nursery */ generations[0].max_blocks = 0; - /* G0S0: the allocation area */ - step = &generations[0].steps[0]; - g0s0 = step; - step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize; - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - current_nursery = step->blocks; - /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ + /* G0S0: the allocation area. Policy: keep the allocation area + * small to begin with, even if we have a large suggested heap + * size. Reason: we're going to do a major collection first, and we + * don't want it to be a big one. This vague idea is borne out by + * rigorous experimental evidence. + */ + g0s0 = &generations[0].steps[0]; + + allocNurseries(); weak_ptr_list = NULL; caf_list = NULL; @@ -147,15 +184,165 @@ initStorage (void) alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; -#ifdef COMPILER /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); + +#ifdef SMP + pthread_mutex_init(&sm_mutex, NULL); #endif IF_DEBUG(gc, stat_describe_gens()); } -extern bdescr * +void +exitStorage (void) +{ + stat_exit(calcAllocated()); +} + +/* ----------------------------------------------------------------------------- + CAF management. + + The entry code for every CAF does the following: + + - builds a CAF_BLACKHOLE in the heap + - pushes an update frame pointing to the CAF_BLACKHOLE + - invokes UPD_CAF(), which: + - calls newCaf, below + - updates the CAF with a static indirection to the CAF_BLACKHOLE + + Why do we build a BLACKHOLE in the heap rather than just updating + the thunk directly? It's so that we only need one kind of update + frame - otherwise we'd need a static version of the update frame too. + + newCaf() does the following: + + - it puts the CAF on the oldest generation's mut-once list. + This is so that we can treat the CAF as a root when collecting + younger generations. + + For GHCI, we have additional requirements when dealing with CAFs: + + - we must *retain* all dynamically-loaded CAFs ever entered, + just in case we need them again. + - we must be able to *revert* CAFs that have been evaluated, to + their pre-evaluated form. + + To do this, we use an additional CAF list. When newCaf() is + called on a dynamically-loaded CAF, we add it to the CAF list + instead of the old-generation mutable list, and save away its + old info pointer (in caf->saved_info) for later reversion. + + To revert all the CAFs, we traverse the CAF list and reset the + info pointer to caf->saved_info, then throw away the CAF list. + (see GC.c:revertCAFs()). + + -- SDM 29/1/01 + + -------------------------------------------------------------------------- */ + +void +newCAF(StgClosure* caf) +{ + /* Put this CAF on the mutable list for the old generation. + * This is a HACK - the IND_STATIC closure doesn't really have + * a mut_link field, but we pretend it has - in fact we re-use + * the STATIC_LINK field for the time being, because when we + * come to do a major GC we won't need the mut_link field + * any more and can use it as a STATIC_LINK. + */ + ACQUIRE_LOCK(&sm_mutex); + + if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; + caf_list = caf; + } else { + ((StgIndStatic *)caf)->saved_info = NULL; + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; + } + + RELEASE_LOCK(&sm_mutex); + +#ifdef PAR + /* If we are PAR or DIST then we never forget a CAF */ + { globalAddr *newGA; + //belch("<##> Globalising CAF %08x %s",caf,info_type(caf)); + newGA=makeGlobal(caf,rtsTrue); /*given full weight*/ + ASSERT(newGA); + } +#endif /* PAR */ +} + +/* ----------------------------------------------------------------------------- + Nursery management. + -------------------------------------------------------------------------- */ + +void +allocNurseries( void ) +{ +#ifdef SMP + { + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->rCurrentNursery = cap->rNursery; + for (bd = cap->rNursery; bd != NULL; bd = bd->link) { + bd->back = (bdescr *)cap; + } + } + /* Set the back links to be equal to the Capability, + * so we can do slightly better informed locking. + */ + } +#else /* SMP */ + nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; + g0s0->blocks = allocNursery(NULL, nursery_blocks); + g0s0->n_blocks = nursery_blocks; + g0s0->to_space = NULL; + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; + /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ +#endif +} + +void +resetNurseries( void ) +{ + bdescr *bd; +#ifdef SMP + Capability *cap; + + /* All tasks must be stopped */ + ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); + + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + for (bd = cap->rNursery; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + cap->rCurrentNursery = cap->rNursery; + } +#else + for (bd = g0s0->blocks; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; +#endif +} + +bdescr * allocNursery (bdescr *last_bd, nat blocks) { bdescr *bd; @@ -175,63 +362,39 @@ allocNursery (bdescr *last_bd, nat blocks) } void -exitStorage (void) +resizeNursery ( nat blocks ) { - lnat allocated; bdescr *bd; - /* Return code ignored for now */ - /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */ - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } - stat_exit(allocated); -} - -void -recordMutable(StgMutClosure *p) -{ - bdescr *bd; - - ASSERT(closure_MUTABLE(p)); - - bd = Bdescr((P_)p); - - /* no need to bother in generation 0 */ - if (bd->gen == g0) { - return; - } +#ifdef SMP + barf("resizeNursery: can't resize in SMP mode"); +#endif - if (p->mut_link == NULL) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; + if (nursery_blocks == blocks) { + ASSERT(g0s0->n_blocks == blocks); + return; } -} -void -newCAF(StgClosure* caf) -{ - /* Put this CAF on the mutable list for the old generation. - * This is a HACK - the IND_STATIC closure doesn't really have - * a mut_link field, but we pretend it has - in fact we re-use - * the STATIC_LINK field for the time being, because when we - * come to do a major GC we won't need the mut_link field - * any more and can use it as a STATIC_LINK. - */ - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list; - oldest_gen->mut_list = (StgMutClosure *)caf; + else if (nursery_blocks < blocks) { + IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n", + blocks)); + g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks); + } -#ifdef DEBUG - { - const StgInfoTable *info; + else { + bdescr *next_bd; - info = get_itbl(caf); - ASSERT(info->type == IND_STATIC); - STATIC_LINK2(info,caf) = caf_list; - caf_list = caf; + IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n", + blocks)); + for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) { + next_bd = bd->link; + freeGroup(bd); + bd = next_bd; + } + g0s0->blocks = bd; } -#endif + + g0s0->n_blocks = nursery_blocks = blocks; } /* ----------------------------------------------------------------------------- @@ -248,7 +411,9 @@ allocate(nat n) bdescr *bd; StgPtr p; - TICK_ALLOC_PRIM(n,wibble,wibble,wibble) + ACQUIRE_LOCK(&sm_mutex); + + TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); /* big allocation (>LARGE_OBJECT_THRESHOLD) */ @@ -266,6 +431,8 @@ allocate(nat n) * (eg. running threads), so garbage collecting early won't make * much difference. */ + alloc_blocks += req_blocks; + RELEASE_LOCK(&sm_mutex); return bd->start; /* small allocation (link) { + for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (cap->rCurrentNursery->free < cap->rCurrentNursery->start + + BLOCK_SIZE_W) { + allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W) + - cap->rCurrentNursery->free; + } + } + +#else /* !SMP */ + bdescr *current_nursery = MainRegTable.rCurrentNursery; + + allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); + for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) { + allocated -= (current_nursery->start + BLOCK_SIZE_W) + - current_nursery->free; + } +#endif + + total_allocated += allocated; + return allocated; +} + +/* Approximate the amount of live data in the heap. To be called just + * after garbage collection (see GarbageCollect()). + */ +extern lnat +calcLive(void) +{ + nat g, s; + lnat live = 0; + step *stp; + + if (RtsFlags.GcFlags.generations == 1) { + live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W + + ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_); + return live; + } + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + /* approximate amount of live data (doesn't take into account slop + * at end of each block). + */ + if (g == 0 && s == 0) { + continue; + } + stp = &generations[g].steps[s]; + live += (stp->n_blocks - 1) * BLOCK_SIZE_W + + ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) / sizeof(W_); + } + } + return live; +} + +/* Approximate the number of blocks that will be needed at the next + * garbage collection. + * + * Assume: all data currently live will remain live. Steps that will + * be collected next time will therefore need twice as many blocks + * since all the data will be copied. + */ +extern lnat +calcNeeded(void) +{ + lnat needed = 0; + nat g, s; + step *stp; + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + stp = &generations[g].steps[s]; + if (generations[g].steps[0].n_blocks > generations[g].max_blocks) { + needed += 2 * stp->n_blocks; + } else { + needed += stp->n_blocks; + } + } + } + return needed; +} + +/* ----------------------------------------------------------------------------- Debugging memInventory() checks for memory leaks by counting up all the @@ -361,7 +649,7 @@ extern void memInventory(void) { nat g, s; - step *step; + step *stp; bdescr *bd; lnat total_blocks = 0, free_blocks = 0; @@ -369,13 +657,13 @@ memInventory(void) for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - total_blocks += step->n_blocks; + stp = &generations[g].steps[s]; + total_blocks += stp->n_blocks; if (RtsFlags.GcFlags.generations == 1) { /* two-space collector has a to-space too :-) */ total_blocks += g0s0->to_blocks; } - for (bd = step->large_objects; bd; bd = bd->link) { + for (bd = stp->large_objects; bd; bd = bd->link) { total_blocks += bd->blocks; /* hack for megablock groups: they have an extra block or two in the second and subsequent megablocks where the block @@ -383,7 +671,7 @@ memInventory(void) */ if (bd->blocks > BLOCKS_PER_MBLOCK) { total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE); + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); } } } @@ -412,4 +700,33 @@ memInventory(void) #endif } +/* Full heap sanity check. */ + +extern void +checkSanity(nat N) +{ + nat g, s; + + if (RtsFlags.GcFlags.generations == 1) { + checkHeap(g0s0->to_space, NULL); + checkChain(g0s0->large_objects); + } else { + + for (g = 0; g <= N; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + checkHeap(generations[g].steps[s].blocks, NULL); + } + } + for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + checkHeap(generations[g].steps[s].blocks, + generations[g].steps[s].blocks->start); + checkChain(generations[g].steps[s].large_objects); + } + } + checkFreeListSanity(); + } +} + #endif