#include "RtsUtils.h"
#include "ParTicky.h"
#include "Trace.h"
+#include "Prelude.h"
#include "SMP.h" // for cas
}
StgClosure *
-tryStealSpark (SparkPool *pool)
+tryStealSpark (Capability *cap)
{
+ SparkPool *pool = cap->sparks;
StgClosure *stolen;
do {
* -------------------------------------------------------------------------- */
void
-createSparkThread (Capability *cap, StgClosure *p)
+createSparkThread (Capability *cap)
{
StgTSO *tso;
- tso = createGenThread (cap, RtsFlags.GcFlags.initialStkSize, p);
+ tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize,
+ &base_GHCziConc_runSparks_closure);
appendToRunQueue(cap,tso);
- cap->sparks_converted++;
}
/* -----------------------------------------------------------------------------
* the spark pool only contains sparkable closures.
* -------------------------------------------------------------------------- */
-static void
-pruneSparkQueue (Capability *cap)
+void
+pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
{
SparkPool *pool;
- StgClosurePtr spark, *elements;
+ StgClosurePtr spark, tmp, *elements;
nat n, pruned_sparks; // stats only
StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
+ const StgInfoTable *info;
PAR_TICKY_MARK_SPARK_QUEUE_START();
pool = cap->sparks;
+ // it is possible that top > bottom, indicating an empty pool. We
+ // fix that here; this is only necessary because the loop below
+ // assumes it.
+ if (pool->top > pool->bottom)
+ pool->top = pool->bottom;
+
+ // Take this opportunity to reset top/bottom modulo the size of
+ // the array, to avoid overflow. This is only possible because no
+ // stealing is happening during GC.
+ pool->bottom -= pool->top & ~pool->moduloSize;
+ pool->top &= pool->moduloSize;
+ pool->topBound = pool->top;
+
debugTrace(DEBUG_sched,
"markSparkQueue: current spark queue len=%d; (hd=%ld; tl=%ld)",
sparkPoolSize(pool), pool->bottom, pool->top);
botInd, otherwise move on */
spark = elements[currInd];
- /* if valuable work: shift inside the pool */
- if ( closure_SHOULD_SPARK(spark) ) {
- elements[botInd] = spark; // keep entry (new address)
- botInd++;
- n++;
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_pruned++;
+ // We have to be careful here: in the parallel GC, another
+ // thread might evacuate this closure while we're looking at it,
+ // so grab the info pointer just once.
+ info = spark->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+ /* if valuable work: shift inside the pool */
+ if (closure_SHOULD_SPARK(tmp)) {
+ elements[botInd] = tmp; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_pruned++;
+ }
+ } else {
+ if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
+ elements[botInd] = spark; // keep entry (new address)
+ evac (user, &elements[botInd]);
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_pruned++;
+ }
}
currInd++;
ASSERT_SPARK_POOL_INVARIANTS(pool);
}
-void
-pruneSparkQueues (void)
-{
- nat i;
- for (i = 0; i < n_capabilities; i++) {
- pruneSparkQueue(&capabilities[i]);
- }
-}
-
/* GC for the spark pool, called inside Capability.c for all
capabilities in turn. Blindly "evac"s complete spark pool. */
void