X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FStorage.c;h=69e441de5f6fb7e9378c04eebe79d46e47d805d4;hb=74ee9df9f9e79e7110e9d8541b84010f35c464c5;hp=db0299c5034c66f8bc2c095e79d551bc4b48cb94;hpb=200c73fdfea734765c48309cc8dcbcf44b69c8c5;p=ghc-hetmet.git diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index db0299c..69e441d 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -87,6 +87,7 @@ initStep (step *stp, int g, int s) stp->blocks = NULL; stp->n_blocks = 0; stp->n_words = 0; + stp->live_estimate = 0; stp->old_blocks = NULL; stp->n_old_blocks = 0; stp->gen = &generations[g]; @@ -95,7 +96,8 @@ initStep (step *stp, int g, int s) stp->n_large_blocks = 0; stp->scavenged_large_objects = NULL; stp->n_scavenged_large_blocks = 0; - stp->is_compacted = 0; + stp->mark = 0; + stp->compact = 0; stp->bitmap = NULL; #ifdef THREADED_RTS initSpinLock(&stp->sync_todo); @@ -230,11 +232,13 @@ initStorage( void ) } /* The oldest generation has one step. */ - if (RtsFlags.GcFlags.compact) { + if (RtsFlags.GcFlags.compact || RtsFlags.GcFlags.sweep) { if (RtsFlags.GcFlags.generations == 1) { - errorBelch("WARNING: compaction is incompatible with -G1; disabled"); + errorBelch("WARNING: compact/sweep is incompatible with -G1; disabled"); } else { - oldest_gen->steps[0].is_compacted = 1; + oldest_gen->steps[0].mark = 1; + if (RtsFlags.GcFlags.compact) + oldest_gen->steps[0].compact = 1; } } @@ -553,6 +557,22 @@ resizeNurseries (nat blocks) resizeNurseriesFixed(blocks / n_nurseries); } + +/* ----------------------------------------------------------------------------- + move_TSO is called to update the TSO structure after it has been + moved from one place to another. + -------------------------------------------------------------------------- */ + +void +move_TSO (StgTSO *src, StgTSO *dest) +{ + ptrdiff_t diff; + + // relocate the stack pointer... + diff = (StgPtr)dest - (StgPtr)src; // In *words* + dest->sp = (StgPtr)dest->sp + diff; +} + /* ----------------------------------------------------------------------------- The allocate() interface @@ -846,11 +866,11 @@ void dirty_TSO (Capability *cap, StgTSO *tso) { bdescr *bd; - if ((tso->flags & TSO_DIRTY) == 0) { - tso->flags |= TSO_DIRTY; + if ((tso->flags & (TSO_DIRTY|TSO_LINK_DIRTY)) == 0) { bd = Bdescr((StgPtr)tso); if (bd->gen_no > 0) recordMutableCap((StgClosure*)tso,cap,bd->gen_no); } + tso->flags |= TSO_DIRTY; } /* @@ -1016,6 +1036,7 @@ countOccupied(bdescr *bd) words = 0; for (; bd != NULL; bd = bd->link) { + ASSERT(bd->free <= bd->start + bd->blocks * BLOCK_SIZE_W); words += bd->free - bd->start; } return words; @@ -1063,14 +1084,27 @@ calcNeeded(void) for (s = 0; s < generations[g].n_steps; s++) { if (g == 0 && s == 0) { continue; } stp = &generations[g].steps[s]; + + // we need at least this much space + needed += stp->n_blocks + stp->n_large_blocks; + + // any additional space needed to collect this gen next time? if (g == 0 || // always collect gen 0 (generations[g].steps[0].n_blocks + generations[g].steps[0].n_large_blocks - > generations[g].max_blocks - && stp->is_compacted == 0)) { - needed += 2 * stp->n_blocks + stp->n_large_blocks; - } else { - needed += stp->n_blocks + stp->n_large_blocks; + > generations[g].max_blocks)) { + // we will collect this gen next time + if (stp->mark) { + // bitmap: + needed += stp->n_blocks / BITS_IN(W_); + // mark stack: + needed += stp->n_blocks / 100; + } + if (stp->compact) { + continue; // no additional space needed for compaction + } else { + needed += stp->n_blocks; + } } } } @@ -1292,6 +1326,9 @@ memInventory (rtsBool show) #define MB(n) (((n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_))) leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK; + + ASSERT(n_alloc_blocks == live_blocks); + if (show || leak) { if (leak) {