StgTSO *tso;
tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize,
- &base_GHCziConc_runSparks_closure);
+ (StgClosure *)runSparks_closure);
- postEvent(cap, EVENT_CREATE_SPARK_THREAD, 0, tso->id);
+ traceEventCreateSparkThread(cap, tso->id);
appendToRunQueue(cap,tso);
}
if (closure_SHOULD_SPARK(p)) {
pushWSDeque(pool,p);
- }
-
- cap->sparks_created++;
-
- postEvent(cap, EVENT_CREATE_SPARK, cap->r.rCurrentTSO->id, 0);
+ cap->sparks_created++;
+ } else {
+ cap->sparks_dud++;
+ }
return 1;
}
* -------------------------------------------------------------------------- */
void
-pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
+pruneSparkQueue (Capability *cap)
{
SparkPool *pool;
StgClosurePtr spark, tmp, *elements;
pool->top &= pool->moduloSize;
pool->topBound = pool->top;
- debugTrace(DEBUG_sched,
+ debugTrace(DEBUG_sparks,
"markSparkQueue: current spark queue len=%ld; (hd=%ld; tl=%ld)",
sparkPoolSize(pool), pool->bottom, pool->top);
// We have to be careful here: in the parallel GC, another
// thread might evacuate this closure while we're looking at it,
// so grab the info pointer just once.
- info = spark->header.info;
- if (IS_FORWARDING_PTR(info)) {
- tmp = (StgClosure*)UN_FORWARDING_PTR(info);
- /* if valuable work: shift inside the pool */
- if (closure_SHOULD_SPARK(tmp)) {
- elements[botInd] = tmp; // keep entry (new address)
- botInd++;
- n++;
- } else {
- pruned_sparks++; // discard spark
- cap->sparks_pruned++;
- }
+ if (GET_CLOSURE_TAG(spark) != 0) {
+ // Tagged pointer is a value, so the spark has fizzled. It
+ // probably never happens that we get a tagged pointer in
+ // the spark pool, because we would have pruned the spark
+ // during the previous GC cycle if it turned out to be
+ // evaluated, but it doesn't hurt to have this check for
+ // robustness.
+ pruned_sparks++;
+ cap->sparks_fizzled++;
} else {
- if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
- elements[botInd] = spark; // keep entry (new address)
- evac (user, &elements[botInd]);
- botInd++;
- n++;
+ info = spark->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+ /* if valuable work: shift inside the pool */
+ if (closure_SHOULD_SPARK(tmp)) {
+ elements[botInd] = tmp; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
+ } else if (HEAP_ALLOCED(spark)) {
+ if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+ if (closure_SHOULD_SPARK(spark)) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_gcd++;
+ }
} else {
- pruned_sparks++; // discard spark
- cap->sparks_pruned++;
+ if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
+ if (*THUNK_STATIC_LINK(spark) != NULL) {
+ elements[botInd] = spark; // keep entry (new address)
+ botInd++;
+ n++;
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_gcd++;
+ }
+ } else {
+ pruned_sparks++; // discard spark
+ cap->sparks_fizzled++;
+ }
}
}
+
currInd++;
// in the loop, we may reach the bounds, and instantly wrap around
pool->bottom = (oldBotInd <= botInd) ? botInd : (botInd + pool->size);
// first free place we did not use (corrected by wraparound)
- debugTrace(DEBUG_sched, "pruned %d sparks", pruned_sparks);
+ debugTrace(DEBUG_sparks, "pruned %d sparks", pruned_sparks);
- debugTrace(DEBUG_sched,
+ debugTrace(DEBUG_sparks,
"new spark queue len=%ld; (hd=%ld; tl=%ld)",
sparkPoolSize(pool), pool->bottom, pool->top);
top++;
}
- debugTrace(DEBUG_sched,
+ debugTrace(DEBUG_sparks,
"traversed spark queue, len=%ld; (hd=%ld; tl=%ld)",
sparkPoolSize(pool), pool->bottom, pool->top);
}