X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=ghc%2Frts%2FGC.c;h=42668e7db5129ec0cae5938f9dec65ca92f92dbf;hb=9efa71070da30418a29c6eef85c51d647d6cf43d;hp=2712dba0d30c25ecc9ffced3ac879aa7e025933f;hpb=433cdcade29d3a887e4db32b241b98a0eadb39d0;p=ghc-hetmet.git diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 2712dba..42668e7 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1,5 +1,5 @@ /* ----------------------------------------------------------------------------- - * $Id: GC.c,v 1.114 2001/08/07 09:20:52 simonmar Exp $ + * $Id: GC.c,v 1.129 2001/11/28 15:42:05 simonmar Exp $ * * (c) The GHC Team 1998-1999 * @@ -7,6 +7,7 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" #include "RtsFlags.h" #include "RtsUtils.h" @@ -41,6 +42,9 @@ #include "FrontPanel.h" #endif +#include "RetainerProfile.h" +#include "LdvProfile.h" + /* STATIC OBJECT LIST. * * During GC: @@ -141,7 +145,6 @@ static void scavenge_large ( step * ); static void scavenge_static ( void ); static void scavenge_mutable_list ( generation *g ); static void scavenge_mut_once_list ( generation *g ); -static void scavengeCAFs ( void ); #if 0 && defined(DEBUG) static void gcCAFs ( void ); @@ -461,7 +464,10 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } - scavengeCAFs(); + /* follow roots from the CAF list (used by GHCi) + */ + evac_gen = 0; + markCAFs(mark_root); /* follow all the roots that the application knows about. */ @@ -599,6 +605,14 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } } +#ifdef PROFILING + // We call processHeapClosureForDead() on every closure destroyed during + // the current garbage collection, so we invoke LdvCensusForDead(). + if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV + || RtsFlags.ProfFlags.bioSelector != NULL) + LdvCensusForDead(N); +#endif + // NO MORE EVACUATION AFTER THIS POINT! // Finally: compaction of the oldest generation. if (major_gc && oldest_gen->steps[0].is_compacted) { @@ -735,14 +749,37 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) RtsFlags.GcFlags.minOldGenSize); // minimum size for generation zero - min_alloc = (RtsFlags.GcFlags.pcFreeHeap * max) / 200; + min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200, + RtsFlags.GcFlags.minAllocAreaSize); + + // Auto-enable compaction when the residency reaches a + // certain percentage of the maximum heap size (default: 30%). + if (RtsFlags.GcFlags.generations > 1 && + (RtsFlags.GcFlags.compact || + (max > 0 && + oldest_gen->steps[0].n_blocks > + (RtsFlags.GcFlags.compactThreshold * max) / 100))) { + oldest_gen->steps[0].is_compacted = 1; +// fprintf(stderr,"compaction: on\n", live); + } else { + oldest_gen->steps[0].is_compacted = 0; +// fprintf(stderr,"compaction: off\n", live); + } // if we're going to go over the maximum heap size, reduce the // size of the generations accordingly. The calculation is // different if compaction is turned on, because we don't need // to double the space required to collect the old generation. if (max != 0) { - if (RtsFlags.GcFlags.compact) { + + // this test is necessary to ensure that the calculations + // below don't have any negative results - we're working + // with unsigned values here. + if (max < min_alloc) { + heapOverflow(); + } + + if (oldest_gen->steps[0].is_compacted) { if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) { size = (max - min_alloc) / ((gens - 1) * 2 - 1); } @@ -765,18 +802,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) for (g = 0; g < gens; g++) { generations[g].max_blocks = size; } - - // Auto-enable compaction when the residency reaches a - // certain percentage of the maximum heap size (default: 30%). - if (RtsFlags.GcFlags.compact && - oldest_gen->steps[0].n_blocks > - (RtsFlags.GcFlags.compactThreshold * max) / 100) { - oldest_gen->steps[0].is_compacted = 1; -// fprintf(stderr,"compaction: on\n", live); - } else { - oldest_gen->steps[0].is_compacted = 0; -// fprintf(stderr,"compaction: off\n", live); - } } // Guess the amount of live data for stats. @@ -794,6 +819,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) alloc_HpLim = NULL; alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize; + // Start a new pinned_object_block + pinned_object_block = NULL; + /* Free the mark stack. */ if (mark_stack_bdescr != NULL) { @@ -842,8 +870,9 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) */ blocks = g0s0->n_to_blocks; - if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 > - RtsFlags.GcFlags.maxHeapSize ) { + if ( RtsFlags.GcFlags.maxHeapSize != 0 && + blocks * RtsFlags.GcFlags.oldGenFactor * 2 > + RtsFlags.GcFlags.maxHeapSize ) { long adjusted_blocks; // signed on purpose int pc_free; @@ -902,6 +931,11 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) } resizeNursery((nat)blocks); + + } else { + // we might have added extra large blocks to the nursery, so + // resize back to minAllocAreaSize again. + resizeNursery(RtsFlags.GcFlags.minAllocAreaSize); } } @@ -910,6 +944,12 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) if (major_gc) { gcCAFs(); } #endif +#ifdef PROFILING + // resetStaticObjectForRetainerProfiling() must be called before + // zeroing below. + resetStaticObjectForRetainerProfiling(); +#endif + // zero the scavenged static object list if (major_gc) { zero_static_object_list(scavenged_static_objects); @@ -940,7 +980,6 @@ GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc ) // restore enclosing cost centre #ifdef PROFILING - heapCensus(); CCCS = prev_CCS; #endif @@ -1248,6 +1287,10 @@ static __inline__ StgClosure * copy(StgClosure *src, nat size, step *stp) { P_ to, from, dest; +#ifdef PROFILING + // @LDV profiling + nat size_org = size; +#endif TICK_GC_WORDS_COPIED(size); /* Find out where we're going, using the handy "to" pointer in @@ -1277,6 +1320,11 @@ copy(StgClosure *src, nat size, step *stp) dest = stp->hp; stp->hp = to; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + SET_EVACUAEE_FOR_LDV(src, size_org); +#endif return (StgClosure *)dest; } @@ -1286,10 +1334,14 @@ copy(StgClosure *src, nat size, step *stp) */ -static __inline__ StgClosure * +static StgClosure * copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) { P_ dest, to, from; +#ifdef PROFILING + // @LDV profiling + nat size_to_copy_org = size_to_copy; +#endif TICK_GC_WORDS_COPIED(size_to_copy); if (stp->gen_no < evac_gen) { @@ -1311,6 +1363,16 @@ copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp) dest = stp->hp; stp->hp += size_to_reserve; upd_evacuee(src,(StgClosure *)dest); +#ifdef PROFILING + // We store the size of the just evacuated object in the LDV word so that + // the profiler can guess the position of the next object later. + // size_to_copy_org is wrong because the closure already occupies size_to_reserve + // words. + SET_EVACUAEE_FOR_LDV(src, size_to_reserve); + // fill the slop + if (size_to_reserve - size_to_copy_org > 0) + FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org)); +#endif return (StgClosure *)dest; } @@ -1333,9 +1395,10 @@ evacuate_large(StgPtr p) bdescr *bd = Bdescr(p); step *stp; - // should point to the beginning of the block - ASSERT(((W_)p & BLOCK_MASK) == 0); - + // object must be at the beginning of the block (or be a ByteArray) + ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS || + (((W_)p & BLOCK_MASK) == 0)); + // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the failed_to_evac flag if we didn't get @@ -1448,6 +1511,9 @@ loop: if (HEAP_ALLOCED(q)) { bd = Bdescr((P_)q); + // not a group head: find the group head + if (bd->blocks == 0) { bd = bd->link; } + if (bd->gen_no > N) { /* Can't evacuate this object, because it's in a generation * older than the ones we're collecting. Let's hope that it's @@ -1586,6 +1652,7 @@ loop: case CONSTR_1_1: case CONSTR_0_2: case CONSTR_STATIC: + case CONSTR_NOCAF_STATIC: { StgWord offset = info->layout.selector_offset; @@ -2134,9 +2201,23 @@ scavenge(step *stp) } case IND_PERM: - if (stp->gen_no != 0) { - SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); - } + if (stp->gen->no != 0) { +#ifdef PROFILING + // @LDV profiling + // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an + // IND_OLDGEN_PERM closure is larger than an IND_PERM closure. + LDV_recordDead((StgClosure *)p, sizeofW(StgInd)); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // + SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that p has just been created. + LDV_recordCreate((StgClosure *)p); +#endif + } // fall through case IND_OLDGEN_PERM: ((StgIndOldGen *)p)->indirectee = @@ -3455,15 +3536,14 @@ revertCAFs( void ) } void -scavengeCAFs( void ) +markCAFs( evac_fn evac ) { StgIndStatic *c; - evac_gen = 0; for (c = (StgIndStatic *)caf_list; c != NULL; c = (StgIndStatic *)c->static_link) { - c->indirectee = evacuate(c->indirectee); + evac(&c->indirectee); } } @@ -3563,7 +3643,17 @@ threadLazyBlackHole(StgTSO *tso) #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG) belch("Unexpected lazy BHing required at 0x%04x",(int)bh); #endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); +#endif SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } update_frame = update_frame->link; @@ -3805,7 +3895,20 @@ threadSqueezeStack(StgTSO *tso) } } #endif +#ifdef PROFILING + // @LDV profiling + // We pretend that bh is now dead. + LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh); +#endif + // + // Todo: maybe use SET_HDR() and remove LDV_recordCreate()? + // SET_INFO(bh,&stg_BLACKHOLE_info); +#ifdef PROFILING + // @LDV profiling + // We pretend that bh has just been created. + LDV_recordCreate(bh); +#endif } }