X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FGC.c;h=e0792d970d9104b4b3ca763c6d878e6c6e44bf0b;hb=3e140bdf457f0c4c762ba344f396d84afef6d8f4;hp=2b6dbb7dd2caf422e985b43db4c7312e48a63784;hpb=e686d8dc6cd67c35132059d0718d1501f0af1e67;p=ghc-hetmet.git diff --git a/rts/sm/GC.c b/rts/sm/GC.c index 2b6dbb7..e0792d9 100644 --- a/rts/sm/GC.c +++ b/rts/sm/GC.c @@ -39,7 +39,6 @@ #include "Trace.h" #include "RetainerProfile.h" #include "RaiseAsync.h" -#include "Sparks.h" #include "Papi.h" #include "GC.h" @@ -50,6 +49,7 @@ #include "GCUtils.h" #include "MarkWeak.h" #include "Sparks.h" +#include "Sweep.h" #include // for memset() #include @@ -129,6 +129,8 @@ long copied; // *words* copied & scavenged during this GC SpinLock recordMutableGen_sync; #endif +DECLARE_GCT + /* ----------------------------------------------------------------------------- Static function declarations -------------------------------------------------------------------------- */ @@ -144,7 +146,7 @@ static void update_task_list (void); static void resize_generations (void); static void resize_nursery (void); static void start_gc_threads (void); -static void gc_thread_work (void); +static void scavenge_until_all_done (void); static nat inc_running (void); static nat dec_running (void); static void wakeup_gc_threads (nat n_threads); @@ -183,7 +185,6 @@ GarbageCollect ( rtsBool force_major_gc ) bdescr *bd; step *stp; lnat live, allocated, max_copied, avg_copied, slop; - lnat oldgen_saved_blocks = 0; gc_thread *saved_gct; nat g, s, t, n; @@ -242,10 +243,10 @@ GarbageCollect ( rtsBool force_major_gc ) start_gc_threads(); /* How many threads will be participating in this GC? - * We don't try to parallelise minor GC. + * We don't try to parallelise minor GC, or mark/compact/sweep GC. */ #if defined(THREADED_RTS) - if (n < (4*1024*1024 / BLOCK_SIZE)) { + if (n < (4*1024*1024 / BLOCK_SIZE) || oldest_gen->steps[0].mark) { n_gc_threads = 1; } else { n_gc_threads = RtsFlags.ParFlags.gcThreads; @@ -253,8 +254,8 @@ GarbageCollect ( rtsBool force_major_gc ) #else n_gc_threads = 1; #endif - trace(TRACE_gc|DEBUG_gc, "GC (gen %d): %dKB to collect, using %d thread(s)", - N, n * (BLOCK_SIZE / 1024), n_gc_threads); + trace(TRACE_gc|DEBUG_gc, "GC (gen %d): %d KB to collect, %ld MB in use, using %d thread(s)", + N, n * (BLOCK_SIZE / 1024), mblocks_allocated, n_gc_threads); #ifdef RTS_GTK_FRONTPANEL if (RtsFlags.GcFlags.frontpanel) { @@ -288,10 +289,13 @@ GarbageCollect ( rtsBool force_major_gc ) /* Allocate a mark stack if we're doing a major collection. */ if (major_gc) { - mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS); + nat mark_stack_blocks; + mark_stack_blocks = stg_max(MARK_STACK_BLOCKS, + oldest_gen->steps[0].n_old_blocks / 100); + mark_stack_bdescr = allocGroup(mark_stack_blocks); mark_stack = (StgPtr *)mark_stack_bdescr->start; mark_sp = mark_stack; - mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W); + mark_splim = mark_stack + (mark_stack_blocks * BLOCK_SIZE_W); } else { mark_stack_bdescr = NULL; } @@ -350,7 +354,7 @@ GarbageCollect ( rtsBool force_major_gc ) */ for (;;) { - gc_thread_work(); + scavenge_until_all_done(); // The other threads are now stopped. We might recurse back to // here, but from now on this is the only thread. @@ -377,6 +381,9 @@ GarbageCollect ( rtsBool force_major_gc ) // Update pointers from the Task list update_task_list(); + // Update pointers from capabilities (probably just the spark queues) + updateCapabilitiesPostGC(); + // Now see which stable names are still alive. gcStablePtrTable(); @@ -389,14 +396,6 @@ GarbageCollect ( rtsBool force_major_gc ) #endif // NO MORE EVACUATION AFTER THIS POINT! - // Finally: compaction of the oldest generation. - if (major_gc && oldest_gen->steps[0].is_compacted) { - // save number of blocks for stats - oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks; - compact(gct->scavenged_static_objects); - } - - IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse)); // Two-space collector: free the old to-space. // g0s0->old_blocks is the old nursery @@ -408,9 +407,7 @@ GarbageCollect ( rtsBool force_major_gc ) } } - // For each workspace, in each thread: - // * clear the BF_EVACUATED flag from each copied block - // * move the copied blocks to the step + // For each workspace, in each thread, move the copied blocks to the step { gc_thread *thr; step_workspace *ws; @@ -420,7 +417,12 @@ GarbageCollect ( rtsBool force_major_gc ) thr = gc_threads[t]; // not step 0 - for (s = 1; s < total_steps; s++) { + if (RtsFlags.GcFlags.generations == 1) { + s = 0; + } else { + s = 1; + } + for (; s < total_steps; s++) { ws = &thr->steps[s]; // Push the final block @@ -433,7 +435,6 @@ GarbageCollect ( rtsBool force_major_gc ) prev = NULL; for (bd = ws->scavd_list; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space ws->step->n_words += bd->free - bd->start; prev = bd; } @@ -442,6 +443,23 @@ GarbageCollect ( rtsBool force_major_gc ) ws->step->blocks = ws->scavd_list; } ws->step->n_blocks += ws->n_scavd_blocks; + } + } + + // Add all the partial blocks *after* we've added all the full + // blocks. This is so that we can grab the partial blocks back + // again and try to fill them up in the next GC. + for (t = 0; t < n_gc_threads; t++) { + thr = gc_threads[t]; + + // not step 0 + if (RtsFlags.GcFlags.generations == 1) { + s = 0; + } else { + s = 1; + } + for (; s < total_steps; s++) { + ws = &thr->steps[s]; prev = NULL; for (bd = ws->part_list; bd != NULL; bd = next) { @@ -455,7 +473,6 @@ GarbageCollect ( rtsBool force_major_gc ) freeGroup(bd); ws->n_part_blocks--; } else { - bd->flags &= ~BF_EVACUATED; // now from-space ws->step->n_words += bd->free - bd->start; prev = bd; } @@ -472,19 +489,16 @@ GarbageCollect ( rtsBool force_major_gc ) } } - // Two-space collector: swap the semi-spaces around. - // Currently: g0s0->old_blocks is the old nursery - // g0s0->blocks is to-space from this GC - // We want these the other way around. - if (RtsFlags.GcFlags.generations == 1) { - bdescr *nursery_blocks = g0s0->old_blocks; - nat n_nursery_blocks = g0s0->n_old_blocks; - g0s0->old_blocks = g0s0->blocks; - g0s0->n_old_blocks = g0s0->n_blocks; - g0s0->blocks = nursery_blocks; - g0s0->n_blocks = n_nursery_blocks; + // Finally: compact or sweep the oldest generation. + if (major_gc && oldest_gen->steps[0].mark) { + if (oldest_gen->steps[0].compact) + compact(gct->scavenged_static_objects); + else + sweep(&oldest_gen->steps[0]); } + IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse)); + /* run through all the generations/steps and tidy up */ copied = 0; @@ -535,7 +549,7 @@ GarbageCollect ( rtsBool force_major_gc ) } for (s = 0; s < generations[g].n_steps; s++) { - bdescr *next; + bdescr *next, *prev; stp = &generations[g].steps[s]; // for generations we collected... @@ -546,28 +560,48 @@ GarbageCollect ( rtsBool force_major_gc ) * freed blocks will probaby be quickly recycled. */ if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) { - if (stp->is_compacted) + if (stp->mark) { - // for a compacted step, just shift the new to-space - // onto the front of the now-compacted existing blocks. - for (bd = stp->blocks; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; // now from-space - stp->n_words += bd->free - bd->start; - } // tack the new blocks on the end of the existing blocks if (stp->old_blocks != NULL) { + + prev = NULL; for (bd = stp->old_blocks; bd != NULL; bd = next) { - // NB. this step might not be compacted next - // time, so reset the BF_COMPACTED flags. - // They are set before GC if we're going to - // compact. (search for BF_COMPACTED above). - bd->flags &= ~BF_COMPACTED; - next = bd->link; - if (next == NULL) { - bd->link = stp->blocks; - } + + next = bd->link; + + if (!(bd->flags & BF_MARKED)) + { + if (prev == NULL) { + stp->old_blocks = next; + } else { + prev->link = next; + } + freeGroup(bd); + stp->n_old_blocks--; + } + else + { + stp->n_words += bd->free - bd->start; + + // NB. this step might not be compacted next + // time, so reset the BF_MARKED flags. + // They are set before GC if we're going to + // compact. (search for BF_MARKED above). + bd->flags &= ~BF_MARKED; + + // between GCs, all blocks in the heap except + // for the nursery have the BF_EVACUATED flag set. + bd->flags |= BF_EVACUATED; + + prev = bd; + } } - stp->blocks = stp->old_blocks; + + if (prev != NULL) { + prev->link = stp->blocks; + stp->blocks = stp->old_blocks; + } } // add the new blocks to the block tally stp->n_blocks += stp->n_old_blocks; @@ -593,10 +627,6 @@ GarbageCollect ( rtsBool force_major_gc ) bd = next; } - // update the count of blocks used by large objects - for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) { - bd->flags &= ~BF_EVACUATED; - } stp->large_objects = stp->scavenged_large_objects; stp->n_large_blocks = stp->n_scavenged_large_blocks; @@ -609,7 +639,6 @@ GarbageCollect ( rtsBool force_major_gc ) */ for (bd = stp->scavenged_large_objects; bd; bd = next) { next = bd->link; - bd->flags &= ~BF_EVACUATED; dbl_link_onto(bd, &stp->large_objects); } @@ -693,6 +722,7 @@ GarbageCollect ( rtsBool force_major_gc ) // send exceptions to any threads which were about to die RELEASE_SM_LOCK; resurrectThreads(resurrected_threads); + performPendingThrowTos(exception_threads); ACQUIRE_SM_LOCK; // Update the stable pointer hash table. @@ -891,28 +921,57 @@ dec_running (void) return n_running; } -// -// gc_thread_work(): Scavenge until there's no work left to do and all -// the running threads are idle. -// +static rtsBool +any_work (void) +{ + int s; + step_workspace *ws; + + gct->any_work++; + + write_barrier(); + + // scavenge objects in compacted generation + if (mark_stack_overflowed || oldgen_scan_bd != NULL || + (mark_stack_bdescr != NULL && !mark_stack_empty())) { + return rtsTrue; + } + + // Check for global work in any step. We don't need to check for + // local work, because we have already exited scavenge_loop(), + // which means there is no local work for this thread. + for (s = total_steps-1; s >= 0; s--) { + if (s == 0 && RtsFlags.GcFlags.generations > 1) { + continue; + } + ws = &gct->steps[s]; + if (ws->todo_large_objects) return rtsTrue; + if (ws->step->todos) return rtsTrue; + } + + gct->no_work++; + + return rtsFalse; +} + static void -gc_thread_work (void) +scavenge_until_all_done (void) { nat r; debugTrace(DEBUG_gc, "GC thread %d working", gct->thread_index); - // gc_running_threads has already been incremented for us; either - // this is the main thread and we incremented it inside - // GarbageCollect(), or this is a worker thread and the main - // thread bumped gc_running_threads before waking us up. - - // Every thread evacuates some roots. - gct->evac_step = 0; - markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads); - loop: +#if defined(THREADED_RTS) + if (n_gc_threads > 1) { + scavenge_loop(); + } else { + scavenge_loop1(); + } +#else scavenge_loop(); +#endif + // scavenge_loop() only exits when there's no work to do r = dec_running(); @@ -920,7 +979,7 @@ loop: gct->thread_index, r); while (gc_running_threads != 0) { - usleep(1); + // usleep(1); if (any_work()) { inc_running(); goto loop; @@ -935,8 +994,26 @@ loop: debugTrace(DEBUG_gc, "GC thread %d finished.", gct->thread_index); } - #if defined(THREADED_RTS) +// +// gc_thread_work(): Scavenge until there's no work left to do and all +// the running threads are idle. +// +static void +gc_thread_work (void) +{ + // gc_running_threads has already been incremented for us; this is + // a worker thread and the main thread bumped gc_running_threads + // before waking us up. + + // Every thread evacuates some roots. + gct->evac_step = 0; + markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads); + + scavenge_until_all_done(); +} + + static void gc_thread_mainloop (void) { @@ -1074,20 +1151,26 @@ init_collected_gen (nat g, nat n_threads) for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + ASSERT(stp->gen_no == g); + + // we'll construct a new list of threads in this step + // during GC, throw away the current list. + stp->old_threads = stp->threads; + stp->threads = END_TSO_QUEUE; + // generation 0, step 0 doesn't need to-space if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) { continue; } - stp = &generations[g].steps[s]; - ASSERT(stp->gen_no == g); - // deprecate the existing blocks stp->old_blocks = stp->blocks; stp->n_old_blocks = stp->n_blocks; stp->blocks = NULL; stp->n_blocks = 0; stp->n_words = 0; + stp->live_estimate = 0; // we don't have any to-be-scavenged blocks yet stp->todos = NULL; @@ -1098,13 +1181,18 @@ init_collected_gen (nat g, nat n_threads) stp->scavenged_large_objects = NULL; stp->n_scavenged_large_blocks = 0; - // mark the large objects as not evacuated yet + // mark the small objects as from-space + for (bd = stp->old_blocks; bd; bd = bd->link) { + bd->flags &= ~BF_EVACUATED; + } + + // mark the large objects as from-space for (bd = stp->large_objects; bd; bd = bd->link) { bd->flags &= ~BF_EVACUATED; } // for a compacted step, we need to allocate the bitmap - if (stp->is_compacted) { + if (stp->mark) { nat bitmap_size; // in bytes bdescr *bitmap_bdescr; StgWord *bitmap; @@ -1129,12 +1217,14 @@ init_collected_gen (nat g, nat n_threads) bd->u.bitmap = bitmap; bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE); - // Also at this point we set the BF_COMPACTED flag + // Also at this point we set the BF_MARKED flag // for this block. The invariant is that - // BF_COMPACTED is always unset, except during GC + // BF_MARKED is always unset, except during GC // when it is set on those blocks which will be // compacted. - bd->flags |= BF_COMPACTED; + if (!(bd->flags & BF_FRAGMENTED)) { + bd->flags |= BF_MARKED; + } } } } @@ -1187,11 +1277,12 @@ init_uncollected_gen (nat g, nat threads) stp->n_scavenged_large_blocks = 0; } - for (t = 0; t < threads; t++) { - for (s = 0; s < generations[g].n_steps; s++) { + for (s = 0; s < generations[g].n_steps; s++) { + stp = &generations[g].steps[s]; + + for (t = 0; t < threads; t++) { ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s]; - stp = ws->step; ws->buffer_todo_bd = NULL; ws->todo_large_objects = NULL; @@ -1222,8 +1313,26 @@ init_uncollected_gen (nat g, nat threads) alloc_todo_block(ws,0); } } + + // deal out any more partial blocks to the threads' part_lists + t = 0; + while (stp->blocks && isPartiallyFull(stp->blocks)) + { + bd = stp->blocks; + stp->blocks = bd->link; + ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s]; + bd->link = ws->part_list; + ws->part_list = bd; + ws->n_part_blocks += 1; + bd->u.scan = bd->free; + stp->n_blocks -= 1; + stp->n_words -= bd->free - bd->start; + t++; + if (t == n_gc_threads) t = 0; + } } + // Move the private mutable lists from each capability onto the // main mutable list for the generation. for (i = 0; i < n_capabilities; i++) { @@ -1345,13 +1454,18 @@ resize_generations (void) nat g; if (major_gc && RtsFlags.GcFlags.generations > 1) { - nat live, size, min_alloc; + nat live, size, min_alloc, words; nat max = RtsFlags.GcFlags.maxHeapSize; nat gens = RtsFlags.GcFlags.generations; // live in the oldest generations - live = (oldest_gen->steps[0].n_words + BLOCK_SIZE_W - 1) / BLOCK_SIZE_W+ - oldest_gen->steps[0].n_large_blocks; + if (oldest_gen->steps[0].live_estimate != 0) { + words = oldest_gen->steps[0].live_estimate; + } else { + words = oldest_gen->steps[0].n_words; + } + live = (words + BLOCK_SIZE_W - 1) / BLOCK_SIZE_W + + oldest_gen->steps[0].n_large_blocks; // default max size for all generations except zero size = stg_max(live * RtsFlags.GcFlags.oldGenFactor, @@ -1368,13 +1482,19 @@ resize_generations (void) (max > 0 && oldest_gen->steps[0].n_blocks > (RtsFlags.GcFlags.compactThreshold * max) / 100))) { - oldest_gen->steps[0].is_compacted = 1; + oldest_gen->steps[0].mark = 1; + oldest_gen->steps[0].compact = 1; // debugBelch("compaction: on\n", live); } else { - oldest_gen->steps[0].is_compacted = 0; + oldest_gen->steps[0].mark = 0; + oldest_gen->steps[0].compact = 0; // debugBelch("compaction: off\n", live); } + if (RtsFlags.GcFlags.sweep) { + oldest_gen->steps[0].mark = 1; + } + // if we're going to go over the maximum heap size, reduce the // size of the generations accordingly. The calculation is // different if compaction is turned on, because we don't need @@ -1388,7 +1508,7 @@ resize_generations (void) heapOverflow(); } - if (oldest_gen->steps[0].is_compacted) { + if (oldest_gen->steps[0].compact) { if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) { size = (max - min_alloc) / ((gens - 1) * 2 - 1); } @@ -1439,7 +1559,7 @@ resize_nursery (void) * performance we get from 3L bytes, reducing to the same * performance at 2L bytes. */ - blocks = g0s0->n_old_blocks; + blocks = g0s0->n_blocks; if ( RtsFlags.GcFlags.maxHeapSize != 0 && blocks * RtsFlags.GcFlags.oldGenFactor * 2 >