X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=b2f95931de5ddbf4cd5af44edd989132594f4912;hb=6ea86573500e4a576d22feec71e36cf27c94aaa9;hp=1f080c58a75493d08b3d2293428d67e033fbf918;hpb=f939293c9e2b8eead06935196500734b1240aee8;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index 1f080c5..b2f9593 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,7 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.8 1999/01/28 15:04:02 simonm Exp $ + * $Id: Storage.c,v 1.39 2001/07/19 07:28:00 andy Exp $ + * + * (c) The GHC Team, 1998-1999 * * Storage manager front end * @@ -12,15 +14,16 @@ #include "Hooks.h" #include "BlockAlloc.h" #include "MBlock.h" -#include "gmp.h" #include "Weak.h" #include "Sanity.h" #include "Storage.h" +#include "Schedule.h" #include "StoragePriv.h" -bdescr *current_nursery; /* next available nursery block, or NULL */ +#ifndef SMP nat nursery_blocks; /* number of blocks in the nursery */ +#endif StgClosure *caf_list = NULL; @@ -37,6 +40,16 @@ generation *g0; /* generation 0, for convenience */ generation *oldest_gen; /* oldest generation, for convenience */ step *g0s0; /* generation 0, step 0, for convenience */ +lnat total_allocated = 0; /* total memory allocated during run */ + +/* + * Storage manager mutex: protects all the above state from + * simultaneous access by two STG threads. + */ +#ifdef SMP +pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + /* * Forward references */ @@ -48,12 +61,29 @@ void initStorage (void) { nat g, s; - step *step; + step *stp; generation *gen; + /* If we're doing heap profiling, we want a two-space heap with a + * fixed-size allocation area so that we get roughly even-spaced + * samples. + */ + + /* As an experiment, try a 2 generation collector + */ + +#if defined(PROFILING) || defined(DEBUG) + if (RtsFlags.ProfFlags.doHeapProfile) { + RtsFlags.GcFlags.generations = 1; + RtsFlags.GcFlags.steps = 1; + RtsFlags.GcFlags.oldGenFactor = 0; + RtsFlags.GcFlags.heapSizeSuggestion = 0; + } +#endif + if (RtsFlags.GcFlags.heapSizeSuggestion > RtsFlags.GcFlags.maxHeapSize) { - barf("Suggested heap size (-H) is larger than max. heap size (-M)\n"); + RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; } initBlockAllocator(); @@ -68,9 +98,10 @@ initStorage (void) gen = &generations[g]; gen->no = g; gen->mut_list = END_MUT_LIST; + gen->mut_once_list = END_MUT_LIST; gen->collections = 0; gen->failed_promotions = 0; - gen->max_blocks = RtsFlags.GcFlags.minOldGenSize; + gen->max_blocks = 0; } /* A couple of convenience pointers */ @@ -103,17 +134,19 @@ initStorage (void) /* Initialise all steps */ for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - step->no = s; - step->blocks = NULL; - step->n_blocks = 0; - step->gen = &generations[g]; - step->hp = NULL; - step->hpLim = NULL; - step->hp_bd = NULL; - step->large_objects = NULL; - step->new_large_objects = NULL; - step->scavenged_large_objects = NULL; + stp = &generations[g].steps[s]; + stp->no = s; + stp->blocks = NULL; + stp->n_blocks = 0; + stp->gen = &generations[g]; + stp->hp = NULL; + stp->hpLim = NULL; + stp->hp_bd = NULL; + stp->scan = NULL; + stp->scan_bd = NULL; + stp->large_objects = NULL; + stp->new_large_objects = NULL; + stp->scavenged_large_objects = NULL; } } @@ -132,14 +165,15 @@ initStorage (void) /* generation 0 is special: that's the nursery */ generations[0].max_blocks = 0; - /* G0S0: the allocation area */ - step = &generations[0].steps[0]; - g0s0 = step; - step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); - step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize; - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - current_nursery = step->blocks; - /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ + /* G0S0: the allocation area. Policy: keep the allocation area + * small to begin with, even if we have a large suggested heap + * size. Reason: we're going to do a major collection first, and we + * don't want it to be a big one. This vague idea is borne out by + * rigorous experimental evidence. + */ + g0s0 = &generations[0].steps[0]; + + allocNurseries(); weak_ptr_list = NULL; caf_list = NULL; @@ -150,15 +184,165 @@ initStorage (void) alloc_blocks = 0; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; -#ifdef COMPILER /* Tell GNU multi-precision pkg about our custom alloc functions */ mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); + +#ifdef SMP + pthread_mutex_init(&sm_mutex, NULL); #endif IF_DEBUG(gc, stat_describe_gens()); } -extern bdescr * +void +exitStorage (void) +{ + stat_exit(calcAllocated()); +} + +/* ----------------------------------------------------------------------------- + CAF management. + + The entry code for every CAF does the following: + + - builds a CAF_BLACKHOLE in the heap + - pushes an update frame pointing to the CAF_BLACKHOLE + - invokes UPD_CAF(), which: + - calls newCaf, below + - updates the CAF with a static indirection to the CAF_BLACKHOLE + + Why do we build a BLACKHOLE in the heap rather than just updating + the thunk directly? It's so that we only need one kind of update + frame - otherwise we'd need a static version of the update frame too. + + newCaf() does the following: + + - it puts the CAF on the oldest generation's mut-once list. + This is so that we can treat the CAF as a root when collecting + younger generations. + + For GHCI, we have additional requirements when dealing with CAFs: + + - we must *retain* all dynamically-loaded CAFs ever entered, + just in case we need them again. + - we must be able to *revert* CAFs that have been evaluated, to + their pre-evaluated form. + + To do this, we use an additional CAF list. When newCaf() is + called on a dynamically-loaded CAF, we add it to the CAF list + instead of the old-generation mutable list, and save away its + old info pointer (in caf->saved_info) for later reversion. + + To revert all the CAFs, we traverse the CAF list and reset the + info pointer to caf->saved_info, then throw away the CAF list. + (see GC.c:revertCAFs()). + + -- SDM 29/1/01 + + -------------------------------------------------------------------------- */ + +void +newCAF(StgClosure* caf) +{ + /* Put this CAF on the mutable list for the old generation. + * This is a HACK - the IND_STATIC closure doesn't really have + * a mut_link field, but we pretend it has - in fact we re-use + * the STATIC_LINK field for the time being, because when we + * come to do a major GC we won't need the mut_link field + * any more and can use it as a STATIC_LINK. + */ + ACQUIRE_LOCK(&sm_mutex); + + if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; + caf_list = caf; + } else { + ((StgIndStatic *)caf)->saved_info = NULL; + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; + } + + RELEASE_LOCK(&sm_mutex); + +#ifdef PAR + /* If we are PAR or DIST then we never forget a CAF */ + { globalAddr *newGA; + //belch("<##> Globalising CAF %08x %s",caf,info_type(caf)); + newGA=makeGlobal(caf,rtsTrue); /*given full weight*/ + ASSERT(newGA); + } +#endif /* PAR */ +} + +/* ----------------------------------------------------------------------------- + Nursery management. + -------------------------------------------------------------------------- */ + +void +allocNurseries( void ) +{ +#ifdef SMP + { + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->rCurrentNursery = cap->rNursery; + for (bd = cap->rNursery; bd != NULL; bd = bd->link) { + bd->back = (bdescr *)cap; + } + } + /* Set the back links to be equal to the Capability, + * so we can do slightly better informed locking. + */ + } +#else /* SMP */ + nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; + g0s0->blocks = allocNursery(NULL, nursery_blocks); + g0s0->n_blocks = nursery_blocks; + g0s0->to_space = NULL; + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; + /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ +#endif +} + +void +resetNurseries( void ) +{ + bdescr *bd; +#ifdef SMP + Capability *cap; + + /* All tasks must be stopped */ + ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); + + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + for (bd = cap->rNursery; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + cap->rCurrentNursery = cap->rNursery; + } +#else + for (bd = g0s0->blocks; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; +#endif +} + +bdescr * allocNursery (bdescr *last_bd, nat blocks) { bdescr *bd; @@ -177,11 +361,15 @@ allocNursery (bdescr *last_bd, nat blocks) return last_bd; } -extern void +void resizeNursery ( nat blocks ) { bdescr *bd; +#ifdef SMP + barf("resizeNursery: can't resize in SMP mode"); +#endif + if (nursery_blocks == blocks) { ASSERT(g0s0->n_blocks == blocks); return; @@ -209,66 +397,6 @@ resizeNursery ( nat blocks ) g0s0->n_blocks = nursery_blocks = blocks; } -void -exitStorage (void) -{ - lnat allocated; - bdescr *bd; - - /* Return code ignored for now */ - /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */ - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } - stat_exit(allocated); -} - -void -recordMutable(StgMutClosure *p) -{ - bdescr *bd; - - ASSERT(closure_MUTABLE(p)); - - bd = Bdescr((P_)p); - - /* no need to bother in generation 0 */ - if (bd->gen == g0) { - return; - } - - if (p->mut_link == NULL) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; - } -} - -void -newCAF(StgClosure* caf) -{ - /* Put this CAF on the mutable list for the old generation. - * This is a HACK - the IND_STATIC closure doesn't really have - * a mut_link field, but we pretend it has - in fact we re-use - * the STATIC_LINK field for the time being, because when we - * come to do a major GC we won't need the mut_link field - * any more and can use it as a STATIC_LINK. - */ - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list; - oldest_gen->mut_list = (StgMutClosure *)caf; - -#ifdef DEBUG - { - const StgInfoTable *info; - - info = get_itbl(caf); - ASSERT(info->type == IND_STATIC); - STATIC_LINK2(info,caf) = caf_list; - caf_list = caf; - } -#endif -} - /* ----------------------------------------------------------------------------- The allocate() interface @@ -283,7 +411,9 @@ allocate(nat n) bdescr *bd; StgPtr p; - TICK_ALLOC_HEAP(n); + ACQUIRE_LOCK(&sm_mutex); + + TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); /* big allocation (>LARGE_OBJECT_THRESHOLD) */ @@ -301,6 +431,8 @@ allocate(nat n) * (eg. running threads), so garbage collecting early won't make * much difference. */ + alloc_blocks += req_blocks; + RELEASE_LOCK(&sm_mutex); return bd->start; /* small allocation (link) { + for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (cap->rCurrentNursery->free < cap->rCurrentNursery->start + + BLOCK_SIZE_W) { + allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W) + - cap->rCurrentNursery->free; + } + } + +#else /* !SMP */ + bdescr *current_nursery = MainRegTable.rCurrentNursery; + + allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); + for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) { + allocated -= (current_nursery->start + BLOCK_SIZE_W) + - current_nursery->free; + } +#endif + + total_allocated += allocated; + return allocated; +} /* Approximate the amount of live data in the heap. To be called just * after garbage collection (see GarbageCollect()). @@ -394,24 +583,25 @@ calcLive(void) { nat g, s; lnat live = 0; - step *step; + step *stp; if (RtsFlags.GcFlags.generations == 1) { - live = g0s0->to_blocks * BLOCK_SIZE_W + + live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W + ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_); + return live; } for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { /* approximate amount of live data (doesn't take into account slop - * at end of each block). - */ + * at end of each block). + */ if (g == 0 && s == 0) { continue; } - step = &generations[g].steps[s]; - live += step->n_blocks * BLOCK_SIZE_W + - ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_); + stp = &generations[g].steps[s]; + live += (stp->n_blocks - 1) * BLOCK_SIZE_W + + ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) / sizeof(W_); } } return live; @@ -429,16 +619,16 @@ calcNeeded(void) { lnat needed = 0; nat g, s; - step *step; + step *stp; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { if (g == 0 && s == 0) { continue; } - step = &generations[g].steps[s]; + stp = &generations[g].steps[s]; if (generations[g].steps[0].n_blocks > generations[g].max_blocks) { - needed += 2 * step->n_blocks; + needed += 2 * stp->n_blocks; } else { - needed += step->n_blocks; + needed += stp->n_blocks; } } } @@ -459,7 +649,7 @@ extern void memInventory(void) { nat g, s; - step *step; + step *stp; bdescr *bd; lnat total_blocks = 0, free_blocks = 0; @@ -467,13 +657,13 @@ memInventory(void) for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { - step = &generations[g].steps[s]; - total_blocks += step->n_blocks; + stp = &generations[g].steps[s]; + total_blocks += stp->n_blocks; if (RtsFlags.GcFlags.generations == 1) { /* two-space collector has a to-space too :-) */ total_blocks += g0s0->to_blocks; } - for (bd = step->large_objects; bd; bd = bd->link) { + for (bd = stp->large_objects; bd; bd = bd->link) { total_blocks += bd->blocks; /* hack for megablock groups: they have an extra block or two in the second and subsequent megablocks where the block @@ -481,7 +671,7 @@ memInventory(void) */ if (bd->blocks > BLOCKS_PER_MBLOCK) { total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE); + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); } } }