merge upstream HEAD
[ghc-hetmet.git] / rts / Sparks.c
index e5e6d7e..a826190 100644 (file)
@@ -45,7 +45,7 @@ createSparkThread (Capability *cap)
     StgTSO *tso;
 
     tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize, 
-                          &base_GHCziConc_runSparks_closure);
+                          (StgClosure *)runSparks_closure);
 
     traceEventCreateSparkThread(cap, tso->id);
 
@@ -71,9 +71,10 @@ newSpark (StgRegTable *reg, StgClosure *p)
 
     if (closure_SHOULD_SPARK(p)) {
         pushWSDeque(pool,p);
-    }  
-
-    cap->sparks_created++;
+        cap->sparks_created++;
+    } else {
+        cap->sparks_dud++;
+    }
 
     return 1;
 }
@@ -112,7 +113,7 @@ tryStealSpark (Capability *cap)
  * -------------------------------------------------------------------------- */
 
 void
-pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
+pruneSparkQueue (Capability *cap)
 { 
     SparkPool *pool;
     StgClosurePtr spark, tmp, *elements;
@@ -196,29 +197,59 @@ pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
       // We have to be careful here: in the parallel GC, another
       // thread might evacuate this closure while we're looking at it,
       // so grab the info pointer just once.
-      info = spark->header.info;
-      if (IS_FORWARDING_PTR(info)) {
-          tmp = (StgClosure*)UN_FORWARDING_PTR(info);
-          /* if valuable work: shift inside the pool */
-          if (closure_SHOULD_SPARK(tmp)) {
-              elements[botInd] = tmp; // keep entry (new address)
-              botInd++;
-              n++;
-          } else {
-              pruned_sparks++; // discard spark
-              cap->sparks_pruned++;
-          }
+      if (GET_CLOSURE_TAG(spark) != 0) {
+          // Tagged pointer is a value, so the spark has fizzled.  It
+          // probably never happens that we get a tagged pointer in
+          // the spark pool, because we would have pruned the spark
+          // during the previous GC cycle if it turned out to be
+          // evaluated, but it doesn't hurt to have this check for
+          // robustness.
+          pruned_sparks++;
+          cap->sparks_fizzled++;
       } else {
-          if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
-              elements[botInd] = spark; // keep entry (new address)
-              evac (user, &elements[botInd]);
-              botInd++;
-              n++;
+          info = spark->header.info;
+          if (IS_FORWARDING_PTR(info)) {
+              tmp = (StgClosure*)UN_FORWARDING_PTR(info);
+              /* if valuable work: shift inside the pool */
+              if (closure_SHOULD_SPARK(tmp)) {
+                  elements[botInd] = tmp; // keep entry (new address)
+                  botInd++;
+                  n++;
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->sparks_fizzled++;
+              }
+          } else if (HEAP_ALLOCED(spark)) {
+              if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
+                  if (closure_SHOULD_SPARK(spark)) {
+                      elements[botInd] = spark; // keep entry (new address)
+                      botInd++;
+                      n++;
+                  } else {
+                      pruned_sparks++; // discard spark
+                      cap->sparks_fizzled++;
+                  }
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->sparks_gcd++;
+              }
           } else {
-              pruned_sparks++; // discard spark
-              cap->sparks_pruned++;
+              if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
+                  if (*THUNK_STATIC_LINK(spark) != NULL) {
+                      elements[botInd] = spark; // keep entry (new address)
+                      botInd++;
+                      n++;
+                  } else {
+                      pruned_sparks++; // discard spark
+                      cap->sparks_gcd++;
+                  }
+              } else {
+                  pruned_sparks++; // discard spark
+                  cap->sparks_fizzled++;
+              }
           }
       }
+
       currInd++;
 
       // in the loop, we may reach the bounds, and instantly wrap around