X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FStorage.c;h=320a83485732715082f0401db79d4aa219a08726;hb=dfd7d6d02a597949b08161ae3d49dc6dfc9e812d;hp=1119519e42a15ab4d08ae2f27c7a81f4537e3af0;hpb=43b212f520c00ee42d2d711f26183cdb14096158;p=ghc-hetmet.git diff --git a/ghc/rts/Storage.c b/ghc/rts/Storage.c index 1119519..320a834 100644 --- a/ghc/rts/Storage.c +++ b/ghc/rts/Storage.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: Storage.c,v 1.33 2001/01/24 15:46:19 simonmar Exp $ + * $Id: Storage.c,v 1.41 2001/07/23 17:23:20 simonmar Exp $ * * (c) The GHC Team, 1998-1999 * @@ -68,6 +68,10 @@ initStorage (void) * fixed-size allocation area so that we get roughly even-spaced * samples. */ + + /* As an experiment, try a 2 generation collector + */ + #if defined(PROFILING) || defined(DEBUG) if (RtsFlags.ProfFlags.doHeapProfile) { RtsFlags.GcFlags.generations = 1; @@ -135,6 +139,7 @@ initStorage (void) stp->blocks = NULL; stp->n_blocks = 0; stp->gen = &generations[g]; + stp->gen_no = g; stp->hp = NULL; stp->hpLim = NULL; stp->hp_bd = NULL; @@ -143,6 +148,7 @@ initStorage (void) stp->large_objects = NULL; stp->new_large_objects = NULL; stp->scavenged_large_objects = NULL; + stp->is_compacted = 0; } } @@ -154,8 +160,10 @@ initStorage (void) generations[g].steps[s].to = &generations[g+1].steps[0]; } - /* The oldest generation has one step and its destination is the - * same step. */ + /* The oldest generation has one step and it is compacted. */ + if (RtsFlags.GcFlags.compact) { + oldest_gen->steps[0].is_compacted = 1; + } oldest_gen->steps[0].to = &oldest_gen->steps[0]; /* generation 0 is special: that's the nursery */ @@ -187,7 +195,7 @@ initStorage (void) pthread_mutex_init(&sm_mutex, NULL); #endif - IF_DEBUG(gc, stat_describe_gens()); + IF_DEBUG(gc, statDescribeGens()); } void @@ -198,6 +206,43 @@ exitStorage (void) /* ----------------------------------------------------------------------------- CAF management. + + The entry code for every CAF does the following: + + - builds a CAF_BLACKHOLE in the heap + - pushes an update frame pointing to the CAF_BLACKHOLE + - invokes UPD_CAF(), which: + - calls newCaf, below + - updates the CAF with a static indirection to the CAF_BLACKHOLE + + Why do we build a BLACKHOLE in the heap rather than just updating + the thunk directly? It's so that we only need one kind of update + frame - otherwise we'd need a static version of the update frame too. + + newCaf() does the following: + + - it puts the CAF on the oldest generation's mut-once list. + This is so that we can treat the CAF as a root when collecting + younger generations. + + For GHCI, we have additional requirements when dealing with CAFs: + + - we must *retain* all dynamically-loaded CAFs ever entered, + just in case we need them again. + - we must be able to *revert* CAFs that have been evaluated, to + their pre-evaluated form. + + To do this, we use an additional CAF list. When newCaf() is + called on a dynamically-loaded CAF, we add it to the CAF list + instead of the old-generation mutable list, and save away its + old info pointer (in caf->saved_info) for later reversion. + + To revert all the CAFs, we traverse the CAF list and reset the + info pointer to caf->saved_info, then throw away the CAF list. + (see GC.c:revertCAFs()). + + -- SDM 29/1/01 + -------------------------------------------------------------------------- */ void @@ -212,98 +257,27 @@ newCAF(StgClosure* caf) */ ACQUIRE_LOCK(&sm_mutex); - ASSERT( ((StgMutClosure*)caf)->mut_link == NULL ); - ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; - oldest_gen->mut_once_list = (StgMutClosure *)caf; - -#ifdef GHCI - /* For dynamically-loaded code, we retain all the CAFs. There is no - * way of knowing which ones we'll need in the future. - */ if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) { - caf->payload[2] = caf_list; /* IND_STATIC_LINK2() */ + ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info; + ((StgIndStatic *)caf)->static_link = caf_list; caf_list = caf; + } else { + ((StgIndStatic *)caf)->saved_info = NULL; + ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list; + oldest_gen->mut_once_list = (StgMutClosure *)caf; } -#endif - -#ifdef INTERPRETER - /* If we're Hugs, we also have to put it in the CAF table, so that - the CAF can be reverted. When reverting, CAFs created by compiled - code are recorded in the CAF table, which lives outside the - heap, in mallocville. CAFs created by interpreted code are - chained together via the link fields in StgCAFs, and are not - recorded in the CAF table. - */ - ASSERT( get_itbl(caf)->type == THUNK_STATIC ); - addToECafTable ( caf, get_itbl(caf) ); -#endif RELEASE_LOCK(&sm_mutex); -} - -#ifdef GHCI -void -markCafs( void ) -{ - StgClosure *p; - - for (p = caf_list; p != NULL; p = STATIC_LINK2(get_itbl(p),p)) { - MarkRoot(p); - } -} -#endif /* GHCI */ - -#ifdef INTERPRETER -void -newCAF_made_by_Hugs(StgCAF* caf) -{ - ACQUIRE_LOCK(&sm_mutex); - - ASSERT( get_itbl(caf)->type == CAF_ENTERED ); - recordOldToNewPtrs((StgMutClosure*)caf); - caf->link = ecafList; - ecafList = caf->link; - - RELEASE_LOCK(&sm_mutex); -} -#endif - -#ifdef INTERPRETER -/* These initialisations are critical for correct operation - on the first call of addToECafTable. -*/ -StgCAF* ecafList = END_ECAF_LIST; -StgCAFTabEntry* ecafTable = NULL; -StgInt usedECafTable = 0; -StgInt sizeECafTable = 0; - -void clearECafTable ( void ) -{ - usedECafTable = 0; -} - -void addToECafTable ( StgClosure* closure, StgInfoTable* origItbl ) -{ - StgInt i; - StgCAFTabEntry* et2; - if (usedECafTable == sizeECafTable) { - /* Make the initial table size be 8 */ - sizeECafTable *= 2; - if (sizeECafTable == 0) sizeECafTable = 8; - et2 = stgMallocBytes ( - sizeECafTable * sizeof(StgCAFTabEntry), - "addToECafTable" ); - for (i = 0; i < usedECafTable; i++) - et2[i] = ecafTable[i]; - if (ecafTable) free(ecafTable); - ecafTable = et2; - } - ecafTable[usedECafTable].closure = closure; - ecafTable[usedECafTable].origItbl = origItbl; - usedECafTable++; +#ifdef PAR + /* If we are PAR or DIST then we never forget a CAF */ + { globalAddr *newGA; + //belch("<##> Globalising CAF %08x %s",caf,info_type(caf)); + newGA=makeGlobal(caf,rtsTrue); /*given full weight*/ + ASSERT(newGA); + } +#endif /* PAR */ } -#endif /* ----------------------------------------------------------------------------- Nursery management. @@ -323,7 +297,7 @@ allocNurseries( void ) cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize); cap->rCurrentNursery = cap->rNursery; for (bd = cap->rNursery; bd != NULL; bd = bd->link) { - bd->back = (bdescr *)cap; + bd->u.back = (bdescr *)cap; } } /* Set the back links to be equal to the Capability, @@ -331,10 +305,11 @@ allocNurseries( void ) */ } #else /* SMP */ - nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; - g0s0->blocks = allocNursery(NULL, nursery_blocks); - g0s0->n_blocks = nursery_blocks; - g0s0->to_space = NULL; + nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize; + g0s0->blocks = allocNursery(NULL, nursery_blocks); + g0s0->n_blocks = nursery_blocks; + g0s0->to_blocks = NULL; + g0s0->n_to_blocks = 0; MainRegTable.rNursery = g0s0->blocks; MainRegTable.rCurrentNursery = g0s0->blocks; /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */ @@ -354,7 +329,7 @@ resetNurseries( void ) for (cap = free_capabilities; cap != NULL; cap = cap->link) { for (bd = cap->rNursery; bd; bd = bd->link) { bd->free = bd->start; - ASSERT(bd->gen == g0); + ASSERT(bd->gen_no == 0); ASSERT(bd->step == g0s0); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } @@ -363,7 +338,7 @@ resetNurseries( void ) #else for (bd = g0s0->blocks; bd; bd = bd->link) { bd->free = bd->start; - ASSERT(bd->gen == g0); + ASSERT(bd->gen_no == 0); ASSERT(bd->step == g0s0); IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE)); } @@ -383,8 +358,8 @@ allocNursery (bdescr *last_bd, nat blocks) bd = allocBlock(); bd->link = last_bd; bd->step = g0s0; - bd->gen = g0; - bd->evacuated = 0; + bd->gen_no = 0; + bd->flags = 0; bd->free = bd->start; last_bd = bd; } @@ -452,9 +427,9 @@ allocate(nat n) nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE; bd = allocGroup(req_blocks); dbl_link_onto(bd, &g0s0->large_objects); - bd->gen = g0; + bd->gen_no = 0; bd->step = g0s0; - bd->evacuated = 0; + bd->flags = BF_LARGE; bd->free = bd->start; /* don't add these blocks to alloc_blocks, since we're assuming * that large objects are likely to remain live for quite a while @@ -473,14 +448,14 @@ allocate(nat n) bd = allocBlock(); bd->link = small_alloc_list; small_alloc_list = bd; - bd->gen = g0; + bd->gen_no = 0; bd->step = g0s0; - bd->evacuated = 0; + bd->flags = 0; alloc_Hp = bd->start; alloc_HpLim = bd->start + BLOCK_SIZE_W; alloc_blocks++; } - + p = alloc_Hp; alloc_Hp += n; RELEASE_LOCK(&sm_mutex); @@ -616,7 +591,7 @@ calcLive(void) step *stp; if (RtsFlags.GcFlags.generations == 1) { - live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W + + live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W + ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_); return live; } @@ -630,8 +605,11 @@ calcLive(void) continue; } stp = &generations[g].steps[s]; - live += (stp->n_blocks - 1) * BLOCK_SIZE_W + - ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) / sizeof(W_); + live += (stp->n_blocks - 1) * BLOCK_SIZE_W; + if (stp->hp_bd != NULL) { + live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start) + / sizeof(W_); + } } } return live; @@ -655,7 +633,8 @@ calcNeeded(void) for (s = 0; s < generations[g].n_steps; s++) { if (g == 0 && s == 0) { continue; } stp = &generations[g].steps[s]; - if (generations[g].steps[0].n_blocks > generations[g].max_blocks) { + if (generations[g].steps[0].n_blocks > generations[g].max_blocks + && stp->is_compacted == 0) { needed += 2 * stp->n_blocks; } else { needed += stp->n_blocks; @@ -675,7 +654,7 @@ calcNeeded(void) #ifdef DEBUG -extern void +void memInventory(void) { nat g, s; @@ -691,7 +670,7 @@ memInventory(void) total_blocks += stp->n_blocks; if (RtsFlags.GcFlags.generations == 1) { /* two-space collector has a to-space too :-) */ - total_blocks += g0s0->to_blocks; + total_blocks += g0s0->n_to_blocks; } for (bd = stp->large_objects; bd; bd = bd->link) { total_blocks += bd->blocks; @@ -718,45 +697,52 @@ memInventory(void) /* count the blocks on the free list */ free_blocks = countFreeList(); - ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK); - -#if 0 if (total_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK) { fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n", total_blocks, free_blocks, total_blocks + free_blocks, mblocks_allocated * BLOCKS_PER_MBLOCK); } -#endif -} -/* Full heap sanity check. */ + ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK); +} -extern void -checkSanity(nat N) +static nat +countBlocks(bdescr *bd) { - nat g, s; - - if (RtsFlags.GcFlags.generations == 1) { - checkHeap(g0s0->to_space, NULL); - checkChain(g0s0->large_objects); - } else { - - for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - if (g == 0 && s == 0) { continue; } - checkHeap(generations[g].steps[s].blocks, NULL); - } + nat n; + for (n=0; bd != NULL; bd=bd->link) { + n++; } - for (g = N+1; g < RtsFlags.GcFlags.generations; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - checkHeap(generations[g].steps[s].blocks, - generations[g].steps[s].blocks->start); - checkChain(generations[g].steps[s].large_objects); - } + return n; +} + +/* Full heap sanity check. */ +void +checkSanity( void ) +{ + nat g, s; + + if (RtsFlags.GcFlags.generations == 1) { + checkHeap(g0s0->to_blocks); + checkChain(g0s0->large_objects); + } else { + + for (g = 0; g < RtsFlags.GcFlags.generations; g++) { + for (s = 0; s < generations[g].n_steps; s++) { + if (g == 0 && s == 0) { continue; } + checkHeap(generations[g].steps[s].blocks); + ASSERT(countBlocks(generations[g].steps[s].blocks) + == generations[g].steps[s].n_blocks); + checkChain(generations[g].steps[s].large_objects); + if (g > 0) { + checkMutableList(generations[g].mut_list, g); + checkMutOnceList(generations[g].mut_once_list, g); + } + } + } + checkFreeListSanity(); } - checkFreeListSanity(); - } } #endif