X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=ec0728af70c6779a5b00cf6adc7424b8b026eb43;hb=30681e796f707fa109aaf756d4586049f595195d;hp=3d7a0b7ca570cc57a2632621edabacdfbd750812;hpb=4391e44f910ce579f269986faef9e5db8907a6c0;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index 3d7a0b7..ec0728a 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,7 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.3 1999/01/13 17:25:47 simonm Exp $ + * $Id: Storage.c,v 1.21 1999/11/09 15:46:59 simonmar Exp $ + * + * (c) The GHC Team, 1998-1999 * * Storage manager front end * @@ -14,12 +16,15 @@ #include "MBlock.h" #include "gmp.h" #include "Weak.h" +#include "Sanity.h" #include "Storage.h" +#include "Schedule.h" #include "StoragePriv.h" -bdescr *current_nursery; /* next available nursery block, or NULL */ +#ifndef SMP nat nursery_blocks; /* number of blocks in the nursery */ +#endif StgClosure *caf_list = NULL; @@ -37,9 +42,16 @@ generation *oldest_gen; /* oldest generation, for convenience */ step *g0s0; /* generation 0, step 0, for convenience */ /* + * Storage manager mutex: protects all the above state from + * simultaneous access by two STG threads. + */ +#ifdef SMP +pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER; +#endif + +/* * Forward references */ -static bdescr *allocNursery (nat blocks); static void *stgAllocForGMP (size_t size_in_bytes); static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size); static void stgDeallocForGMP (void *ptr, size_t size); @@ -49,6 +61,25 @@ initStorage (void) { nat g, s; step *step; + generation *gen; + + /* If we're doing heap profiling, we want a two-space heap with a + * fixed-size allocation area so that we get roughly even-spaced + * samples. + */ +#if defined(PROFILING) || defined(DEBUG) + if (RtsFlags.ProfFlags.doHeapProfile) { + RtsFlags.GcFlags.generations = 1; + RtsFlags.GcFlags.steps = 1; + RtsFlags.GcFlags.oldGenFactor = 0; + RtsFlags.GcFlags.heapSizeSuggestion = 0; + } +#endif + + if (RtsFlags.GcFlags.heapSizeSuggestion > + RtsFlags.GcFlags.maxHeapSize) { + RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion; + } initBlockAllocator(); @@ -57,73 +88,87 @@ initStorage (void) * sizeof(struct _generation), "initStorage: gens"); - /* set up all generations */ + /* Initialise all generations */ for(g = 0; g < RtsFlags.GcFlags.generations; g++) { - generations[g].no = g; - generations[g].mut_list = END_MUT_LIST; - generations[g].collections = 0; - generations[g].failed_promotions = 0; + gen = &generations[g]; + gen->no = g; + gen->mut_list = END_MUT_LIST; + gen->mut_once_list = END_MUT_LIST; + gen->collections = 0; + gen->failed_promotions = 0; + gen->max_blocks = 0; } - /* Oldest generation: one step */ - g = RtsFlags.GcFlags.generations-1; - generations[g].n_steps = 1; - generations[g].steps = - stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step"); - generations[g].max_blocks = RtsFlags.GcFlags.minAllocAreaSize * 4; - step = &generations[g].steps[0]; - step->no = 0; - step->gen = &generations[g]; - step->blocks = NULL; - step->n_blocks = 0; - step->to = step; /* destination is this step */ - step->hp = NULL; - step->hpLim = NULL; - step->hp_bd = NULL; - - /* set up all except the oldest generation with 2 steps */ - for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) { - generations[g].n_steps = 2; - generations[g].steps = stgMallocBytes (2 * sizeof(struct _step), - "initStorage: steps"); - generations[g].max_blocks = RtsFlags.GcFlags.minAllocAreaSize * 4; + /* A couple of convenience pointers */ + g0 = &generations[0]; + oldest_gen = &generations[RtsFlags.GcFlags.generations-1]; + + /* Allocate step structures in each generation */ + if (RtsFlags.GcFlags.generations > 1) { + /* Only for multiple-generations */ + + /* Oldest generation: one step */ + oldest_gen->n_steps = 1; + oldest_gen->steps = + stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step"); + + /* set up all except the oldest generation with 2 steps */ + for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) { + generations[g].n_steps = RtsFlags.GcFlags.steps; + generations[g].steps = + stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step), + "initStorage: steps"); + } + + } else { + /* single generation, i.e. a two-space collector */ + g0->n_steps = 1; + g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps"); } - for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) { + /* Initialise all steps */ + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { step = &generations[g].steps[s]; step->no = s; step->blocks = NULL; step->n_blocks = 0; step->gen = &generations[g]; - if ( s == 1 ) { - step->to = &generations[g+1].steps[0]; - } else { - step->to = &generations[g].steps[s+1]; - } step->hp = NULL; step->hpLim = NULL; step->hp_bd = NULL; + step->scan = NULL; + step->scan_bd = NULL; step->large_objects = NULL; step->new_large_objects = NULL; step->scavenged_large_objects = NULL; } } - oldest_gen = &generations[RtsFlags.GcFlags.generations-1]; + /* Set up the destination pointers in each younger gen. step */ + for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) { + for (s = 0; s < generations[g].n_steps-1; s++) { + generations[g].steps[s].to = &generations[g].steps[s+1]; + } + generations[g].steps[s].to = &generations[g+1].steps[0]; + } + + /* The oldest generation has one step and its destination is the + * same step. */ + oldest_gen->steps[0].to = &oldest_gen->steps[0]; /* generation 0 is special: that's the nursery */ - g0 = &generations[0]; generations[0].max_blocks = 0; - /* G0S0: the allocation area */ - step = &generations[0].steps[0]; - g0s0 = step; - step->blocks = allocNursery(RtsFlags.GcFlags.minAllocAreaSize); - step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize; - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - current_nursery = step->blocks; - /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ + /* G0S0: the allocation area. Policy: keep the allocation area + * small to begin with, even if we have a large suggested heap + * size. Reason: we're going to do a major collection first, and we + * don't want it to be a big one. This vague idea is borne out by + * rigorous experimental evidence. + */ + g0s0 = &generations[0].steps[0]; + + allocNurseries(); weak_ptr_list = NULL; caf_list = NULL; @@ -139,16 +184,121 @@ initStorage (void) mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP); #endif +#ifdef SMP + pthread_mutex_init(&sm_mutex, NULL); +#endif + IF_DEBUG(gc, stat_describe_gens()); } -static bdescr * -allocNursery (nat blocks) +void +exitStorage (void) +{ + stat_exit(calcAllocated()); +} + +void +newCAF(StgClosure* caf) { - bdescr *last_bd, *bd; + /* Put this CAF on the mutable list for the old generation. + * This is a HACK - the IND_STATIC closure doesn't really have + * a mut_link field, but we pretend it has - in fact we re-use + * the STATIC_LINK field for the time being, because when we + * come to do a major GC we won't need the mut_link field + * any more and can use it as a STATIC_LINK. + */ + ACQUIRE_LOCK(&sm_mutex); + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; + +#ifdef DEBUG + { + const StgInfoTable *info; + + info = get_itbl(caf); + ASSERT(info->type == IND_STATIC); +#if 0 + STATIC_LINK2(info,caf) = caf_list; + caf_list = caf; +#endif + } +#endif + RELEASE_LOCK(&sm_mutex); +} + +/* ----------------------------------------------------------------------------- + Nursery management. + -------------------------------------------------------------------------- */ + +void +allocNurseries( void ) +{ +#ifdef SMP + { + Capability *cap; + bdescr *bd; + + g0s0->blocks = NULL; + g0s0->n_blocks = 0; + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); + cap->rCurrentNursery = cap->rNursery; + for (bd = cap->rNursery; bd != NULL; bd = bd->link) { + bd->back = (bdescr *)cap; + } + } + /* Set the back links to be equal to the Capability, + * so we can do slightly better informed locking. + */ + } +#else /* SMP */ + nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; + g0s0->blocks = allocNursery(NULL, nursery_blocks); + g0s0->n_blocks = nursery_blocks; + g0s0->to_space = NULL; + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; + /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ +#endif +} + +void +resetNurseries( void ) +{ + bdescr *bd; +#ifdef SMP + Capability *cap; + + /* All tasks must be stopped */ + ASSERT(n_free_capabilities == RtsFlags.ConcFlags.nNodes); + + for (cap = free_capabilities; cap != NULL; cap = cap->link) { + for (bd = cap->rNursery; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + cap->rCurrentNursery = cap->rNursery; + } +#else + for (bd = g0s0->blocks; bd; bd = bd->link) { + bd->free = bd->start; + ASSERT(bd->gen == g0); + ASSERT(bd->step == g0s0); + IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); + } + MainRegTable.rNursery = g0s0->blocks; + MainRegTable.rCurrentNursery = g0s0->blocks; +#endif +} + +bdescr * +allocNursery (bdescr *last_bd, nat blocks) +{ + bdescr *bd; nat i; - last_bd = NULL; /* Allocate a nursery */ for (i=0; i < blocks; i++) { bd = allocBlock(); @@ -163,61 +313,39 @@ allocNursery (nat blocks) } void -exitStorage (void) -{ - lnat allocated; - bdescr *bd; - - /* Return code ignored for now */ - /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */ - allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); - for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { - allocated -= BLOCK_SIZE_W; - } - stat_exit(allocated); -} - -void -recordMutable(StgMutClosure *p) +resizeNursery ( nat blocks ) { bdescr *bd; - ASSERT(closure_MUTABLE(p)); +#ifdef SMP + barf("resizeNursery: can't resize in SMP mode"); +#endif - bd = Bdescr((P_)p); + if (nursery_blocks == blocks) { + ASSERT(g0s0->n_blocks == blocks); + return; + } - /* no need to bother in generation 0 */ - if (bd->gen == g0) { - return; + else if (nursery_blocks < blocks) { + IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n", + blocks)); + g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks); } - if (p->mut_link == NULL) { - p->mut_link = bd->gen->mut_list; - bd->gen->mut_list = p; + else { + bdescr *next_bd; + + IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n", + blocks)); + for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) { + next_bd = bd->link; + freeGroup(bd); + bd = next_bd; + } + g0s0->blocks = bd; } -} - -void -newCAF(StgClosure* caf) -{ - const StgInfoTable *info; - - /* Put this CAF on the mutable list for the old generation. - * This is a HACK - the IND_STATIC closure doesn't really have - * a mut_link field, but we pretend it has - in fact we re-use - * the STATIC_LINK field for the time being, because when we - * come to do a major GC we won't need the mut_link field - * any more and can use it as a STATIC_LINK. - */ - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list; - oldest_gen->mut_list = (StgMutClosure *)caf; - -#ifdef DEBUG - info = get_itbl(caf); - ASSERT(info->type == IND_STATIC); - STATIC_LINK2(info,caf) = caf_list; - caf_list = caf; -#endif + + g0s0->n_blocks = nursery_blocks = blocks; } /* ----------------------------------------------------------------------------- @@ -234,7 +362,9 @@ allocate(nat n) bdescr *bd; StgPtr p; - TICK_ALLOC_PRIM(n,wibble,wibble,wibble) + ACQUIRE_LOCK(&sm_mutex); + + TICK_ALLOC_HEAP_NOCTR(n); CCS_ALLOC(CCCS,n); /* big allocation (>LARGE_OBJECT_THRESHOLD) */ @@ -252,6 +382,7 @@ allocate(nat n) * (eg. running threads), so garbage collecting early won't make * much difference. */ + RELEASE_LOCK(&sm_mutex); return bd->start; /* small allocation (link) { + for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (cap->rCurrentNursery->free < cap->rCurrentNursery->start + + BLOCK_SIZE_W) { + allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W) + - cap->rCurrentNursery->free; + } + } + +#else /* !SMP */ + bdescr *current_nursery = MainRegTable.rCurrentNursery; + + allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes(); + for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) { + allocated -= BLOCK_SIZE_W; + } + if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) { + allocated -= (current_nursery->start + BLOCK_SIZE_W) + - current_nursery->free; + } +#endif + + return allocated; +} + +/* Approximate the amount of live data in the heap. To be called just + * after garbage collection (see GarbageCollect()). + */ +extern lnat +calcLive(void) +{ + nat g, s; + lnat live = 0; + step *step; + + if (RtsFlags.GcFlags.generations == 1) { + live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W + + ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_); + return live; + } + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + /* approximate amount of live data (doesn't take into account slop + * at end of each block). + */ + if (g == 0 && s == 0) { + continue; + } + step = &generations[g].steps[s]; + live += (step->n_blocks - 1) * BLOCK_SIZE_W + + ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_); + } + } + return live; +} + +/* Approximate the number of blocks that will be needed at the next + * garbage collection. + * + * Assume: all data currently live will remain live. Steps that will + * be collected next time will therefore need twice as many blocks + * since all the data will be copied. + */ +extern lnat +calcNeeded(void) +{ + lnat needed = 0; + nat g, s; + step *step; + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + step = &generations[g].steps[s]; + if (generations[g].steps[0].n_blocks > generations[g].max_blocks) { + needed += 2 * step->n_blocks; + } else { + needed += step->n_blocks; + } + } + } + return needed; +} + +/* ----------------------------------------------------------------------------- Debugging memInventory() checks for memory leaks by counting up all the @@ -352,10 +603,15 @@ memInventory(void) lnat total_blocks = 0, free_blocks = 0; /* count the blocks we current have */ + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { for (s = 0; s < generations[g].n_steps; s++) { step = &generations[g].steps[s]; total_blocks += step->n_blocks; + if (RtsFlags.GcFlags.generations == 1) { + /* two-space collector has a to-space too :-) */ + total_blocks += g0s0->to_blocks; + } for (bd = step->large_objects; bd; bd = bd->link) { total_blocks += bd->blocks; /* hack for megablock groups: they have an extra block or two in @@ -364,7 +620,7 @@ memInventory(void) */ if (bd->blocks > BLOCKS_PER_MBLOCK) { total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK) - * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE); + * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE)); } } } @@ -393,4 +649,33 @@ memInventory(void) #endif } +/* Full heap sanity check. */ + +extern void +checkSanity(nat N) +{ + nat g, s; + + if (RtsFlags.GcFlags.generations == 1) { + checkHeap(g0s0->to_space, NULL); + checkChain(g0s0->large_objects); + } else { + + for (g = 0; g <= N; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + checkHeap(generations[g].steps[s].blocks, NULL); + } + } + for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + checkHeap(generations[g].steps[s].blocks, + generations[g].steps[s].blocks->start); + checkChain(generations[g].steps[s].large_objects); + } + } + checkFreeListSanity(); + } +} + #endif