gct->evac_step = 0;
GetRoots(mark_root);
+#if defined(RTS_USER_SIGNALS)
+ // mark the signal handlers (signals should be already blocked)
+ markSignalHandlers(mark_root);
+#endif
+
// Mark the weak pointer list, and prepare to detect dead weak pointers.
markWeakPtrList();
initWeakForGC();
resize_generations();
// Guess the amount of live data for stats.
- live = calcLive();
+ live = calcLiveBlocks() * BLOCK_SIZE_W;
+ debugTrace(DEBUG_gc, "Slop: %ldKB",
+ (live - calcLiveWords()) / (1024/sizeof(W_)));
// Free the small objects allocated via allocate(), since this will
// all have been copied into G0S1 now.
gct = saved_gct;
}
+/* -----------------------------------------------------------------------------
+ * Mark all nodes pointed to by sparks in the spark queues (for GC) Does an
+ * implicit slide i.e. after marking all sparks are at the beginning of the
+ * spark pool and the spark pool only contains sparkable closures
+ * -------------------------------------------------------------------------- */
+
+#ifdef THREADED_RTS
+static void
+markSparkQueue (evac_fn evac, Capability *cap)
+{
+ StgClosure **sparkp, **to_sparkp;
+ nat n, pruned_sparks; // stats only
+ StgSparkPool *pool;
+
+ PAR_TICKY_MARK_SPARK_QUEUE_START();
+
+ n = 0;
+ pruned_sparks = 0;
+
+ pool = &(cap->r.rSparks);
+
+ ASSERT_SPARK_POOL_INVARIANTS(pool);
+
+#if defined(PARALLEL_HASKELL)
+ // stats only
+ n = 0;
+ pruned_sparks = 0;
+#endif
+
+ sparkp = pool->hd;
+ to_sparkp = pool->hd;
+ while (sparkp != pool->tl) {
+ ASSERT(*sparkp!=NULL);
+ ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgClosure *)*sparkp)));
+ // ToDo?: statistics gathering here (also for GUM!)
+ if (closure_SHOULD_SPARK(*sparkp)) {
+ evac(sparkp);
+ *to_sparkp++ = *sparkp;
+ if (to_sparkp == pool->lim) {
+ to_sparkp = pool->base;
+ }
+ n++;
+ } else {
+ pruned_sparks++;
+ }
+ sparkp++;
+ if (sparkp == pool->lim) {
+ sparkp = pool->base;
+ }
+ }
+ pool->tl = to_sparkp;
+
+ PAR_TICKY_MARK_SPARK_QUEUE_END(n);
+
+#if defined(PARALLEL_HASKELL)
+ debugTrace(DEBUG_sched,
+ "marked %d sparks and pruned %d sparks on [%x]",
+ n, pruned_sparks, mytid);
+#else
+ debugTrace(DEBUG_sched,
+ "marked %d sparks and pruned %d sparks",
+ n, pruned_sparks);
+#endif
+
+ debugTrace(DEBUG_sched,
+ "new spark queue len=%d; (hd=%p; tl=%p)\n",
+ sparkPoolSize(pool), pool->hd, pool->tl);
+}
+#endif
+
/* ---------------------------------------------------------------------------
Where are the roots that we know about?
------------------------------------------------------------------------ */
-/* This has to be protected either by the scheduler monitor, or by the
- garbage collection monitor (probably the latter).
- KH @ 25/10/99
-*/
-
void
GetRoots( evac_fn evac )
{
Capability *cap;
Task *task;
- for (i = 0; i < n_capabilities; i++) {
+ // Each GC thread is responsible for following roots from the
+ // Capability of the same number. There will usually be the same
+ // or fewer Capabilities as GC threads, but just in case there
+ // are more, we mark every Capability whose number is the GC
+ // thread's index plus a multiple of the number of GC threads.
+ for (i = gct->thread_index; i < n_capabilities; i += n_gc_threads) {
cap = &capabilities[i];
evac((StgClosure **)(void *)&cap->run_queue_hd);
evac((StgClosure **)(void *)&cap->run_queue_tl);
evac((StgClosure **)(void *)&task->suspended_tso);
}
+#if defined(THREADED_RTS)
+ markSparkQueue(evac,cap);
+#endif
}
#if !defined(THREADED_RTS)
evac((StgClosure **)(void *)&blocked_queue_tl);
evac((StgClosure **)(void *)&sleeping_queue);
#endif
-
- // evac((StgClosure **)&blackhole_queue);
-
-#if defined(THREADED_RTS)
- markSparkQueue(evac);
-#endif
-
-#if defined(RTS_USER_SIGNALS)
- // mark the signal handlers (signals should be already blocked)
- markSignalHandlers(evac);
-#endif
}
/* -----------------------------------------------------------------------------
// GarbageCollect(), or this is a worker thread and the main
// thread bumped gc_running_threads before waking us up.
+ // Every thread evacuates some roots.
+ gct->evac_step = 0;
+ GetRoots(mark_root);
+
loop:
scavenge_loop();
// scavenge_loop() only exits when there's no work to do
if (gct->papi_events == -1) {
papi_init_eventset(&gct->papi_events);
}
- papi_thread_start_gc_count(gct->papi_events);
+ papi_thread_start_gc1_count(gct->papi_events);
#endif
gc_thread_work();
#ifdef USE_PAPI
// count events in this thread towards the GC totals
- papi_thread_stop_gc_count(gct->papi_events);
+ papi_thread_stop_gc1_count(gct->papi_events);
#endif
}
}
// If the block at the head of the list in this generation
// is less than 3/4 full, then use it as a todo block.
- if (isPartiallyFull(stp->blocks))
+ if (stp->blocks && isPartiallyFull(stp->blocks))
{
ws->todo_bd = stp->blocks;
+ ws->todo_free = ws->todo_bd->free;
+ ws->todo_lim = ws->todo_bd->start + BLOCK_SIZE_W;
stp->blocks = stp->blocks->link;
stp->n_blocks -= 1;
ws->todo_bd->link = NULL;