X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2FSparks.c;h=9e4492ac1925c66b4fe942d8a6480671200d4aac;hb=2b398b2215fd5238e222bcb3013aa41d7b631cfa;hp=360ea41a052f54e8cea28bbdcc6711dd20161ca5;hpb=99df892cc9620fcc92747b79bba75dad8a1d295c;p=ghc-hetmet.git diff --git a/rts/Sparks.c b/rts/Sparks.c index 360ea41..9e4492a 100644 --- a/rts/Sparks.c +++ b/rts/Sparks.c @@ -44,6 +44,7 @@ #include "RtsUtils.h" #include "ParTicky.h" #include "Trace.h" +#include "Prelude.h" #include "SMP.h" // for cas @@ -201,11 +202,15 @@ steal(SparkPool *deque) StgClosurePtr stolen; StgWord b,t; - ASSERT_SPARK_POOL_INVARIANTS(deque); +// Can't do this on someone else's spark pool: +// ASSERT_SPARK_POOL_INVARIANTS(deque); b = deque->bottom; t = deque->top; - if (b - t <= 0 ) { + + // NB. b and t are unsigned; we need a signed value for the test + // below. + if ((long)b - (long)t <= 0 ) { return NULL; /* already looks empty, abort */ } @@ -221,14 +226,17 @@ steal(SparkPool *deque) return NULL; } /* else: OK, top has been incremented by the cas call */ - ASSERT_SPARK_POOL_INVARIANTS(deque); +// Can't do this on someone else's spark pool: +// ASSERT_SPARK_POOL_INVARIANTS(deque); + /* return stolen element */ return stolen; } StgClosure * -tryStealSpark (SparkPool *pool) +tryStealSpark (Capability *cap) { + SparkPool *pool = cap->sparks; StgClosure *stolen; do { @@ -253,7 +261,7 @@ looksEmpty(SparkPool* deque) StgWord t = deque->top; StgWord b = deque->bottom; /* try to prefer false negatives by reading top first */ - return (b - t <= 0); + return ((long)b - (long)t <= 0); /* => array is *never* completely filled, always 1 place free! */ } @@ -264,13 +272,13 @@ looksEmpty(SparkPool* deque) * -------------------------------------------------------------------------- */ void -createSparkThread (Capability *cap, StgClosure *p) +createSparkThread (Capability *cap) { StgTSO *tso; - tso = createGenThread (cap, RtsFlags.GcFlags.initialStkSize, p); + tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize, + &base_GHCziConc_runSparks_closure); appendToRunQueue(cap,tso); - cap->sparks_converted++; } /* ----------------------------------------------------------------------------- @@ -298,7 +306,10 @@ pushBottom (SparkPool* deque, StgClosurePtr elem) This is why we do not just call empty(deque) here. */ t = deque->topBound; - if ( b - t >= sz ) { /* nota bene: sz == deque->size - 1, thus ">=" */ + if ( (StgInt)b - (StgInt)t >= (StgInt)sz ) { + /* NB. 1. sz == deque->size - 1, thus ">=" + 2. signed comparison, it is possible that t > b + */ /* could be full, check the real top value in this case */ t = deque->top; deque->topBound = t; @@ -371,13 +382,14 @@ newSpark (StgRegTable *reg, StgClosure *p) * the spark pool only contains sparkable closures. * -------------------------------------------------------------------------- */ -static void -pruneSparkQueue (Capability *cap) +void +pruneSparkQueue (evac_fn evac, void *user, Capability *cap) { SparkPool *pool; - StgClosurePtr spark, *elements; + StgClosurePtr spark, tmp, *elements; nat n, pruned_sparks; // stats only StgWord botInd,oldBotInd,currInd; // indices in array (always < size) + const StgInfoTable *info; PAR_TICKY_MARK_SPARK_QUEUE_START(); @@ -386,6 +398,19 @@ pruneSparkQueue (Capability *cap) pool = cap->sparks; + // it is possible that top > bottom, indicating an empty pool. We + // fix that here; this is only necessary because the loop below + // assumes it. + if (pool->top > pool->bottom) + pool->top = pool->bottom; + + // Take this opportunity to reset top/bottom modulo the size of + // the array, to avoid overflow. This is only possible because no + // stealing is happening during GC. + pool->bottom -= pool->top & ~pool->moduloSize; + pool->top &= pool->moduloSize; + pool->topBound = pool->top; + debugTrace(DEBUG_sched, "markSparkQueue: current spark queue len=%d; (hd=%ld; tl=%ld)", sparkPoolSize(pool), pool->bottom, pool->top); @@ -440,14 +465,31 @@ pruneSparkQueue (Capability *cap) botInd, otherwise move on */ spark = elements[currInd]; - /* if valuable work: shift inside the pool */ - if ( closure_SHOULD_SPARK(spark) ) { - elements[botInd] = spark; // keep entry (new address) - botInd++; - n++; - } else { - pruned_sparks++; // discard spark - cap->sparks_pruned++; + // We have to be careful here: in the parallel GC, another + // thread might evacuate this closure while we're looking at it, + // so grab the info pointer just once. + info = spark->header.info; + if (IS_FORWARDING_PTR(info)) { + tmp = (StgClosure*)UN_FORWARDING_PTR(info); + /* if valuable work: shift inside the pool */ + if (closure_SHOULD_SPARK(tmp)) { + elements[botInd] = tmp; // keep entry (new address) + botInd++; + n++; + } else { + pruned_sparks++; // discard spark + cap->sparks_pruned++; + } + } else { + if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) { + elements[botInd] = spark; // keep entry (new address) + evac (user, &elements[botInd]); + botInd++; + n++; + } else { + pruned_sparks++; // discard spark + cap->sparks_pruned++; + } } currInd++; @@ -477,15 +519,6 @@ pruneSparkQueue (Capability *cap) ASSERT_SPARK_POOL_INVARIANTS(pool); } -void -pruneSparkQueues (void) -{ - nat i; - for (i = 0; i < n_capabilities; i++) { - pruneSparkQueue(&capabilities[i]); - } -} - /* GC for the spark pool, called inside Capability.c for all capabilities in turn. Blindly "evac"s complete spark pool. */ void