#include "Trace.h"
#include "RetainerProfile.h"
#include "RaiseAsync.h"
-#include "Sparks.h"
#include "Papi.h"
#include "GC.h"
#include "GCUtils.h"
#include "MarkWeak.h"
#include "Sparks.h"
+#include "Sweep.h"
#include <string.h> // for memset()
#include <unistd.h>
// For stats:
long copied; // *words* copied & scavenged during this GC
-#ifdef THREADED_RTS
-SpinLock recordMutableGen_sync;
-#endif
+DECLARE_GCT
/* -----------------------------------------------------------------------------
Static function declarations
static void mark_root (void *user, StgClosure **root);
static void zero_static_object_list (StgClosure* first_static);
static nat initialise_N (rtsBool force_major_gc);
-static void alloc_gc_threads (void);
static void init_collected_gen (nat g, nat threads);
static void init_uncollected_gen (nat g, nat threads);
static void init_gc_thread (gc_thread *t);
static void scavenge_until_all_done (void);
static nat inc_running (void);
static nat dec_running (void);
-static void wakeup_gc_threads (nat n_threads);
-static void shutdown_gc_threads (nat n_threads);
+static void wakeup_gc_threads (nat n_threads, nat me);
+static void shutdown_gc_threads (nat n_threads, nat me);
#if 0 && defined(DEBUG)
static void gcCAFs (void);
-------------------------------------------------------------------------- */
void
-GarbageCollect ( rtsBool force_major_gc )
+GarbageCollect (rtsBool force_major_gc,
+ nat gc_type USED_IF_THREADS,
+ Capability *cap USED_IF_THREADS)
{
bdescr *bd;
step *stp;
lnat live, allocated, max_copied, avg_copied, slop;
- lnat oldgen_saved_blocks = 0;
gc_thread *saved_gct;
nat g, s, t, n;
*/
n = initialise_N(force_major_gc);
- /* Allocate + initialise the gc_thread structures.
- */
- alloc_gc_threads();
-
/* Start threads, so they can be spinning up while we finish initialisation.
*/
start_gc_threads();
+#if defined(THREADED_RTS)
/* How many threads will be participating in this GC?
- * We don't try to parallelise minor GC.
+ * We don't try to parallelise minor GCs (unless the user asks for
+ * it with +RTS -gn0), or mark/compact/sweep GC.
*/
-#if defined(THREADED_RTS)
- if (n < (4*1024*1024 / BLOCK_SIZE)) {
- n_gc_threads = 1;
+ if (gc_type == PENDING_GC_PAR) {
+ n_gc_threads = RtsFlags.ParFlags.nNodes;
} else {
- n_gc_threads = RtsFlags.ParFlags.gcThreads;
+ n_gc_threads = 1;
}
#else
n_gc_threads = 1;
#endif
+
trace(TRACE_gc|DEBUG_gc, "GC (gen %d): %d KB to collect, %ld MB in use, using %d thread(s)",
N, n * (BLOCK_SIZE / 1024), mblocks_allocated, n_gc_threads);
memInventory(traceClass(DEBUG_gc));
#endif
- // check stack sanity *before* GC (ToDo: check all threads)
+ // check stack sanity *before* GC
IF_DEBUG(sanity, checkFreeListSanity());
+ IF_DEBUG(sanity, checkMutableLists(rtsTrue));
// Initialise all our gc_thread structures
for (t = 0; t < n_gc_threads; t++) {
/* Allocate a mark stack if we're doing a major collection.
*/
- if (major_gc) {
- mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
+ if (major_gc && oldest_gen->steps[0].mark) {
+ nat mark_stack_blocks;
+ mark_stack_blocks = stg_max(MARK_STACK_BLOCKS,
+ oldest_gen->steps[0].n_old_blocks / 100);
+ mark_stack_bdescr = allocGroup(mark_stack_blocks);
mark_stack = (StgPtr *)mark_stack_bdescr->start;
mark_sp = mark_stack;
- mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
+ mark_splim = mark_stack + (mark_stack_blocks * BLOCK_SIZE_W);
} else {
mark_stack_bdescr = NULL;
}
// this is the main thread
+#ifdef THREADED_RTS
+ if (n_gc_threads == 1) {
+ gct = gc_threads[0];
+ } else {
+ gct = gc_threads[cap->no];
+ }
+#else
gct = gc_threads[0];
+#endif
/* -----------------------------------------------------------------------
* follow all the roots that we know about:
- * - mutable lists from each generation > N
- * we want to *scavenge* these roots, not evacuate them: they're not
- * going to move in this GC.
- * Also do them in reverse generation order, for the usual reason:
- * namely to reduce the likelihood of spurious old->new pointers.
*/
- for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
- generations[g].saved_mut_list = generations[g].mut_list;
- generations[g].mut_list = allocBlock();
- // mut_list always has at least one block.
- }
// the main thread is running: this prevents any other threads from
// exiting prematurely, so we can start them now.
// NB. do this after the mutable lists have been saved above, otherwise
// the other GC threads will be writing into the old mutable lists.
inc_running();
- wakeup_gc_threads(n_gc_threads);
-
+ wakeup_gc_threads(n_gc_threads, gct->thread_index);
+
+ // Mutable lists from each generation > N
+ // we want to *scavenge* these roots, not evacuate them: they're not
+ // going to move in this GC.
+ // Also do them in reverse generation order, for the usual reason:
+ // namely to reduce the likelihood of spurious old->new pointers.
+ //
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
- scavenge_mutable_list(&generations[g]);
+ scavenge_mutable_list(generations[g].saved_mut_list, &generations[g]);
+ freeChain_sync(generations[g].saved_mut_list);
+ generations[g].saved_mut_list = NULL;
+
+ }
+
+ // scavenge the capability-private mutable lists. This isn't part
+ // of markSomeCapabilities() because markSomeCapabilities() can only
+ // call back into the GC via mark_root() (due to the gct register
+ // variable).
+ if (n_gc_threads == 1) {
+ for (n = 0; n < n_capabilities; n++) {
+ scavenge_capability_mut_lists(&capabilities[n]);
+ }
+ } else {
+ scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
}
// follow roots from the CAF list (used by GHCi)
// follow all the roots that the application knows about.
gct->evac_step = 0;
- markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads);
+ markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
+ rtsTrue/*prune sparks*/);
#if defined(RTS_USER_SIGNALS)
// mark the signal handlers (signals should be already blocked)
break;
}
- shutdown_gc_threads(n_gc_threads);
+ shutdown_gc_threads(n_gc_threads, gct->thread_index);
// Update pointers from the Task list
update_task_list();
#endif
// NO MORE EVACUATION AFTER THIS POINT!
- // Finally: compaction of the oldest generation.
- if (major_gc && oldest_gen->steps[0].is_compacted) {
- // save number of blocks for stats
- oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
- compact(gct->scavenged_static_objects);
- }
-
- IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
// Two-space collector: free the old to-space.
// g0s0->old_blocks is the old nursery
}
}
- // For each workspace, in each thread:
- // * clear the BF_EVACUATED flag from each copied block
- // * move the copied blocks to the step
+ // For each workspace, in each thread, move the copied blocks to the step
{
gc_thread *thr;
step_workspace *ws;
ws->step->blocks = ws->scavd_list;
}
ws->step->n_blocks += ws->n_scavd_blocks;
+ }
+ }
+
+ // Add all the partial blocks *after* we've added all the full
+ // blocks. This is so that we can grab the partial blocks back
+ // again and try to fill them up in the next GC.
+ for (t = 0; t < n_gc_threads; t++) {
+ thr = gc_threads[t];
+
+ // not step 0
+ if (RtsFlags.GcFlags.generations == 1) {
+ s = 0;
+ } else {
+ s = 1;
+ }
+ for (; s < total_steps; s++) {
+ ws = &thr->steps[s];
prev = NULL;
for (bd = ws->part_list; bd != NULL; bd = next) {
}
}
+ // Finally: compact or sweep the oldest generation.
+ if (major_gc && oldest_gen->steps[0].mark) {
+ if (oldest_gen->steps[0].compact)
+ compact(gct->scavenged_static_objects);
+ else
+ sweep(&oldest_gen->steps[0]);
+ }
+
/* run through all the generations/steps and tidy up
*/
copied = 0;
for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
mut_list_size += bd->free - bd->start;
}
+ for (n = 0; n < n_capabilities; n++) {
+ for (bd = capabilities[n].mut_lists[g];
+ bd != NULL; bd = bd->link) {
+ mut_list_size += bd->free - bd->start;
+ }
+ }
copied += mut_list_size;
debugTrace(DEBUG_gc,
}
for (s = 0; s < generations[g].n_steps; s++) {
- bdescr *next;
+ bdescr *next, *prev;
stp = &generations[g].steps[s];
// for generations we collected...
* freed blocks will probaby be quickly recycled.
*/
if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
- if (stp->is_compacted)
+ if (stp->mark)
{
- // for a compacted step, just shift the new to-space
- // onto the front of the now-compacted existing blocks.
- for (bd = stp->blocks; bd != NULL; bd = bd->link) {
- stp->n_words += bd->free - bd->start;
- }
// tack the new blocks on the end of the existing blocks
if (stp->old_blocks != NULL) {
+
+ prev = NULL;
for (bd = stp->old_blocks; bd != NULL; bd = next) {
- // NB. this step might not be compacted next
- // time, so reset the BF_COMPACTED flags.
- // They are set before GC if we're going to
- // compact. (search for BF_COMPACTED above).
- bd->flags &= ~BF_COMPACTED;
- next = bd->link;
- if (next == NULL) {
- bd->link = stp->blocks;
- }
+
+ next = bd->link;
+
+ if (!(bd->flags & BF_MARKED))
+ {
+ if (prev == NULL) {
+ stp->old_blocks = next;
+ } else {
+ prev->link = next;
+ }
+ freeGroup(bd);
+ stp->n_old_blocks--;
+ }
+ else
+ {
+ stp->n_words += bd->free - bd->start;
+
+ // NB. this step might not be compacted next
+ // time, so reset the BF_MARKED flags.
+ // They are set before GC if we're going to
+ // compact. (search for BF_MARKED above).
+ bd->flags &= ~BF_MARKED;
+
+ // between GCs, all blocks in the heap except
+ // for the nursery have the BF_EVACUATED flag set.
+ bd->flags |= BF_EVACUATED;
+
+ prev = bd;
+ }
}
- stp->blocks = stp->old_blocks;
+
+ if (prev != NULL) {
+ prev->link = stp->blocks;
+ stp->blocks = stp->old_blocks;
+ }
}
// add the new blocks to the block tally
stp->n_blocks += stp->n_old_blocks;
// send exceptions to any threads which were about to die
RELEASE_SM_LOCK;
resurrectThreads(resurrected_threads);
+ performPendingThrowTos(exception_threads);
ACQUIRE_SM_LOCK;
// Update the stable pointer hash table.
slop = calcLiveBlocks() * BLOCK_SIZE_W - live;
stat_endGC(allocated, live, copied, N, max_copied, avg_copied, slop);
+ // Guess which generation we'll collect *next* time
+ initialise_N(force_major_gc);
+
#if defined(RTS_USER_SIGNALS)
if (RtsFlags.MiscFlags.install_signal_handlers) {
// unblock signals again
Initialise the gc_thread structures.
-------------------------------------------------------------------------- */
+#define GC_THREAD_INACTIVE 0
+#define GC_THREAD_STANDING_BY 1
+#define GC_THREAD_RUNNING 2
+#define GC_THREAD_WAITING_TO_CONTINUE 3
+
static gc_thread *
alloc_gc_thread (int n)
{
#ifdef THREADED_RTS
t->id = 0;
- initCondition(&t->wake_cond);
- initMutex(&t->wake_mutex);
- t->wakeup = rtsTrue; // starts true, so we can wait for the
+ initSpinLock(&t->gc_spin);
+ initSpinLock(&t->mut_spin);
+ ACQUIRE_SPIN_LOCK(&t->gc_spin);
+ t->wakeup = GC_THREAD_INACTIVE; // starts true, so we can wait for the
// thread to start up, see wakeup_gc_threads
- t->exit = rtsFalse;
#endif
t->thread_index = n;
}
-static void
-alloc_gc_threads (void)
+void
+initGcThreads (void)
{
if (gc_threads == NULL) {
#if defined(THREADED_RTS)
nat i;
- gc_threads = stgMallocBytes (RtsFlags.ParFlags.gcThreads *
+ gc_threads = stgMallocBytes (RtsFlags.ParFlags.nNodes *
sizeof(gc_thread*),
"alloc_gc_threads");
- for (i = 0; i < RtsFlags.ParFlags.gcThreads; i++) {
+ for (i = 0; i < RtsFlags.ParFlags.nNodes; i++) {
gc_threads[i] = alloc_gc_thread(i);
}
#else
return n_running;
}
+static rtsBool
+any_work (void)
+{
+ int s;
+ step_workspace *ws;
+
+ gct->any_work++;
+
+ write_barrier();
+
+ // scavenge objects in compacted generation
+ if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
+ (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ return rtsTrue;
+ }
+
+ // Check for global work in any step. We don't need to check for
+ // local work, because we have already exited scavenge_loop(),
+ // which means there is no local work for this thread.
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[s];
+ if (ws->todo_large_objects) return rtsTrue;
+ if (ws->step->todos) return rtsTrue;
+ }
+
+ gct->no_work++;
+
+ return rtsFalse;
+}
+
static void
scavenge_until_all_done (void)
{
debugTrace(DEBUG_gc, "GC thread %d working", gct->thread_index);
loop:
+#if defined(THREADED_RTS)
+ if (n_gc_threads > 1) {
+ scavenge_loop();
+ } else {
+ scavenge_loop1();
+ }
+#else
scavenge_loop();
+#endif
+
// scavenge_loop() only exits when there's no work to do
r = dec_running();
gct->thread_index, r);
while (gc_running_threads != 0) {
- usleep(1);
+ // usleep(1);
if (any_work()) {
inc_running();
goto loop;
}
#if defined(THREADED_RTS)
-//
-// gc_thread_work(): Scavenge until there's no work left to do and all
-// the running threads are idle.
-//
-static void
-gc_thread_work (void)
+
+void
+gcWorkerThread (Capability *cap)
{
- // gc_running_threads has already been incremented for us; this is
- // a worker thread and the main thread bumped gc_running_threads
- // before waking us up.
+ cap->in_gc = rtsTrue;
+ gct = gc_threads[cap->no];
+ gct->id = osThreadId();
+
+ // Wait until we're told to wake up
+ RELEASE_SPIN_LOCK(&gct->mut_spin);
+ gct->wakeup = GC_THREAD_STANDING_BY;
+ debugTrace(DEBUG_gc, "GC thread %d standing by...", gct->thread_index);
+ ACQUIRE_SPIN_LOCK(&gct->gc_spin);
+
+#ifdef USE_PAPI
+ // start performance counters in this thread...
+ if (gct->papi_events == -1) {
+ papi_init_eventset(&gct->papi_events);
+ }
+ papi_thread_start_gc1_count(gct->papi_events);
+#endif
+
// Every thread evacuates some roots.
gct->evac_step = 0;
- markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads);
+ markSomeCapabilities(mark_root, gct, gct->thread_index, n_gc_threads,
+ rtsTrue/*prune sparks*/);
+ scavenge_capability_mut_lists(&capabilities[gct->thread_index]);
scavenge_until_all_done();
-}
-
-
-static void
-gc_thread_mainloop (void)
-{
- while (!gct->exit) {
-
- // Wait until we're told to wake up
- ACQUIRE_LOCK(&gct->wake_mutex);
- gct->wakeup = rtsFalse;
- while (!gct->wakeup) {
- debugTrace(DEBUG_gc, "GC thread %d standing by...",
- gct->thread_index);
- waitCondition(&gct->wake_cond, &gct->wake_mutex);
- }
- RELEASE_LOCK(&gct->wake_mutex);
- if (gct->exit) break;
-
+
#ifdef USE_PAPI
- // start performance counters in this thread...
- if (gct->papi_events == -1) {
- papi_init_eventset(&gct->papi_events);
- }
- papi_thread_start_gc1_count(gct->papi_events);
+ // count events in this thread towards the GC totals
+ papi_thread_stop_gc1_count(gct->papi_events);
#endif
- gc_thread_work();
+ // Wait until we're told to continue
+ RELEASE_SPIN_LOCK(&gct->gc_spin);
+ gct->wakeup = GC_THREAD_WAITING_TO_CONTINUE;
+ debugTrace(DEBUG_gc, "GC thread %d waiting to continue...",
+ gct->thread_index);
+ ACQUIRE_SPIN_LOCK(&gct->mut_spin);
+ debugTrace(DEBUG_gc, "GC thread %d on my way...", gct->thread_index);
+}
-#ifdef USE_PAPI
- // count events in this thread towards the GC totals
- papi_thread_stop_gc1_count(gct->papi_events);
-#endif
- }
-}
#endif
-#if defined(THREADED_RTS)
-static void
-gc_thread_entry (gc_thread *my_gct)
+void
+waitForGcThreads (Capability *cap USED_IF_THREADS)
{
- gct = my_gct;
- debugTrace(DEBUG_gc, "GC thread %d starting...", gct->thread_index);
- gct->id = osThreadId();
- gc_thread_mainloop();
-}
+#if defined(THREADED_RTS)
+ nat n_threads = RtsFlags.ParFlags.nNodes;
+ nat me = cap->no;
+ nat i, j;
+ rtsBool retry = rtsTrue;
+
+ while(retry) {
+ for (i=0; i < n_threads; i++) {
+ if (i == me) continue;
+ if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
+ prodCapability(&capabilities[i], cap->running_task);
+ }
+ }
+ for (j=0; j < 10000000; j++) {
+ retry = rtsFalse;
+ for (i=0; i < n_threads; i++) {
+ if (i == me) continue;
+ write_barrier();
+ setContextSwitches();
+ if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) {
+ retry = rtsTrue;
+ }
+ }
+ if (!retry) break;
+ }
+ }
#endif
+}
static void
start_gc_threads (void)
{
#if defined(THREADED_RTS)
- nat i;
- OSThreadId id;
- static rtsBool done = rtsFalse;
-
gc_running_threads = 0;
initMutex(&gc_running_mutex);
-
- if (!done) {
- // Start from 1: the main thread is 0
- for (i = 1; i < RtsFlags.ParFlags.gcThreads; i++) {
- createOSThread(&id, (OSThreadProc*)&gc_thread_entry,
- gc_threads[i]);
- }
- done = rtsTrue;
- }
#endif
}
static void
-wakeup_gc_threads (nat n_threads USED_IF_THREADS)
+wakeup_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
{
#if defined(THREADED_RTS)
nat i;
- for (i=1; i < n_threads; i++) {
+ for (i=0; i < n_threads; i++) {
+ if (i == me) continue;
inc_running();
debugTrace(DEBUG_gc, "waking up gc thread %d", i);
- do {
- ACQUIRE_LOCK(&gc_threads[i]->wake_mutex);
- if (gc_threads[i]->wakeup) {
- RELEASE_LOCK(&gc_threads[i]->wake_mutex);
- continue;
- } else {
- break;
- }
- } while (1);
- gc_threads[i]->wakeup = rtsTrue;
- signalCondition(&gc_threads[i]->wake_cond);
- RELEASE_LOCK(&gc_threads[i]->wake_mutex);
+ if (gc_threads[i]->wakeup != GC_THREAD_STANDING_BY) barf("wakeup_gc_threads");
+
+ gc_threads[i]->wakeup = GC_THREAD_RUNNING;
+ ACQUIRE_SPIN_LOCK(&gc_threads[i]->mut_spin);
+ RELEASE_SPIN_LOCK(&gc_threads[i]->gc_spin);
}
#endif
}
// standby state, otherwise they may still be executing inside
// any_work(), and may even remain awake until the next GC starts.
static void
-shutdown_gc_threads (nat n_threads USED_IF_THREADS)
+shutdown_gc_threads (nat n_threads USED_IF_THREADS, nat me USED_IF_THREADS)
+{
+#if defined(THREADED_RTS)
+ nat i;
+ for (i=0; i < n_threads; i++) {
+ if (i == me) continue;
+ while (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE) { write_barrier(); }
+ }
+#endif
+}
+
+void
+releaseGCThreads (Capability *cap USED_IF_THREADS)
{
#if defined(THREADED_RTS)
+ nat n_threads = RtsFlags.ParFlags.nNodes;
+ nat me = cap->no;
nat i;
- rtsBool wakeup;
- for (i=1; i < n_threads; i++) {
- do {
- ACQUIRE_LOCK(&gc_threads[i]->wake_mutex);
- wakeup = gc_threads[i]->wakeup;
- // wakeup is false while the thread is waiting
- RELEASE_LOCK(&gc_threads[i]->wake_mutex);
- } while (wakeup);
+ for (i=0; i < n_threads; i++) {
+ if (i == me) continue;
+ if (gc_threads[i]->wakeup != GC_THREAD_WAITING_TO_CONTINUE)
+ barf("releaseGCThreads");
+
+ gc_threads[i]->wakeup = GC_THREAD_INACTIVE;
+ ACQUIRE_SPIN_LOCK(&gc_threads[i]->gc_spin);
+ RELEASE_SPIN_LOCK(&gc_threads[i]->mut_spin);
}
#endif
}
for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ ASSERT(stp->gen_no == g);
+
+ // we'll construct a new list of threads in this step
+ // during GC, throw away the current list.
+ stp->old_threads = stp->threads;
+ stp->threads = END_TSO_QUEUE;
+
// generation 0, step 0 doesn't need to-space
if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
- stp = &generations[g].steps[s];
- ASSERT(stp->gen_no == g);
-
// deprecate the existing blocks
stp->old_blocks = stp->blocks;
stp->n_old_blocks = stp->n_blocks;
stp->blocks = NULL;
stp->n_blocks = 0;
stp->n_words = 0;
+ stp->live_estimate = 0;
// we don't have any to-be-scavenged blocks yet
stp->todos = NULL;
}
// for a compacted step, we need to allocate the bitmap
- if (stp->is_compacted) {
+ if (stp->mark) {
nat bitmap_size; // in bytes
bdescr *bitmap_bdescr;
StgWord *bitmap;
bd->u.bitmap = bitmap;
bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
- // Also at this point we set the BF_COMPACTED flag
+ // Also at this point we set the BF_MARKED flag
// for this block. The invariant is that
- // BF_COMPACTED is always unset, except during GC
+ // BF_MARKED is always unset, except during GC
// when it is set on those blocks which will be
// compacted.
- bd->flags |= BF_COMPACTED;
+ if (!(bd->flags & BF_FRAGMENTED)) {
+ bd->flags |= BF_MARKED;
+ }
}
}
}
static void
init_uncollected_gen (nat g, nat threads)
{
- nat s, t, i;
+ nat s, t, n;
step_workspace *ws;
step *stp;
bdescr *bd;
+ // save the current mutable lists for this generation, and
+ // allocate a fresh block for each one. We'll traverse these
+ // mutable lists as roots early on in the GC.
+ generations[g].saved_mut_list = generations[g].mut_list;
+ generations[g].mut_list = allocBlock();
+ for (n = 0; n < n_capabilities; n++) {
+ capabilities[n].saved_mut_lists[g] = capabilities[n].mut_lists[g];
+ capabilities[n].mut_lists[g] = allocBlock();
+ }
+
for (s = 0; s < generations[g].n_steps; s++) {
stp = &generations[g].steps[s];
stp->scavenged_large_objects = NULL;
stp->n_scavenged_large_blocks = 0;
}
- for (t = 0; t < threads; t++) {
- for (s = 0; s < generations[g].n_steps; s++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+
+ for (t = 0; t < threads; t++) {
ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
- stp = ws->step;
ws->buffer_todo_bd = NULL;
ws->todo_large_objects = NULL;
alloc_todo_block(ws,0);
}
}
- }
- // Move the private mutable lists from each capability onto the
- // main mutable list for the generation.
- for (i = 0; i < n_capabilities; i++) {
- for (bd = capabilities[i].mut_lists[g];
- bd->link != NULL; bd = bd->link) {
- /* nothing */
- }
- bd->link = generations[g].mut_list;
- generations[g].mut_list = capabilities[i].mut_lists[g];
- capabilities[i].mut_lists[g] = allocBlock();
+ // deal out any more partial blocks to the threads' part_lists
+ t = 0;
+ while (stp->blocks && isPartiallyFull(stp->blocks))
+ {
+ bd = stp->blocks;
+ stp->blocks = bd->link;
+ ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
+ bd->link = ws->part_list;
+ ws->part_list = bd;
+ ws->n_part_blocks += 1;
+ bd->u.scan = bd->free;
+ stp->n_blocks -= 1;
+ stp->n_words -= bd->free - bd->start;
+ t++;
+ if (t == n_gc_threads) t = 0;
+ }
}
}
t->static_objects = END_OF_STATIC_LIST;
t->scavenged_static_objects = END_OF_STATIC_LIST;
t->scan_bd = NULL;
+ t->mut_lists = capabilities[t->thread_index].mut_lists;
t->evac_step = 0;
t->failed_to_evac = rtsFalse;
t->eager_promotion = rtsTrue;
nat g;
if (major_gc && RtsFlags.GcFlags.generations > 1) {
- nat live, size, min_alloc;
+ nat live, size, min_alloc, words;
nat max = RtsFlags.GcFlags.maxHeapSize;
nat gens = RtsFlags.GcFlags.generations;
// live in the oldest generations
- live = (oldest_gen->steps[0].n_words + BLOCK_SIZE_W - 1) / BLOCK_SIZE_W+
- oldest_gen->steps[0].n_large_blocks;
+ if (oldest_gen->steps[0].live_estimate != 0) {
+ words = oldest_gen->steps[0].live_estimate;
+ } else {
+ words = oldest_gen->steps[0].n_words;
+ }
+ live = (words + BLOCK_SIZE_W - 1) / BLOCK_SIZE_W +
+ oldest_gen->steps[0].n_large_blocks;
// default max size for all generations except zero
size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
(max > 0 &&
oldest_gen->steps[0].n_blocks >
(RtsFlags.GcFlags.compactThreshold * max) / 100))) {
- oldest_gen->steps[0].is_compacted = 1;
+ oldest_gen->steps[0].mark = 1;
+ oldest_gen->steps[0].compact = 1;
// debugBelch("compaction: on\n", live);
} else {
- oldest_gen->steps[0].is_compacted = 0;
+ oldest_gen->steps[0].mark = 0;
+ oldest_gen->steps[0].compact = 0;
// debugBelch("compaction: off\n", live);
}
+ if (RtsFlags.GcFlags.sweep) {
+ oldest_gen->steps[0].mark = 1;
+ }
+
// if we're going to go over the maximum heap size, reduce the
// size of the generations accordingly. The calculation is
// different if compaction is turned on, because we don't need
heapOverflow();
}
- if (oldest_gen->steps[0].is_compacted) {
+ if (oldest_gen->steps[0].compact) {
if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
size = (max - min_alloc) / ((gens - 1) * 2 - 1);
}