Fix sanity checking after fix to #2917
[ghc-hetmet.git] / rts / Capability.c
index 27a2d51..bd6d56f 100644 (file)
@@ -26,6 +26,7 @@
 #include "Schedule.h"
 #include "Sparks.h"
 #include "Trace.h"
+#include "GC.h"
 
 // one global capability, this is the Capability for non-threaded
 // builds, and for +RTS -N1
@@ -57,15 +58,18 @@ globalWorkToDo (void)
 StgClosure *
 findSpark (Capability *cap)
 {
-  /* use the normal Sparks.h interface (internally modified to enable
-     concurrent stealing) 
-     and immediately turn the spark into a thread when successful
-  */
   Capability *robbed;
   StgClosurePtr spark;
   rtsBool retry;
   nat i = 0;
 
+  if (!emptyRunQueue(cap)) {
+      // If there are other threads, don't try to run any new
+      // sparks: sparks might be speculative, we don't want to take
+      // resources away from the main computation.
+      return 0;
+  }
+
   // first try to get a spark from our own pool.
   // We should be using reclaimSpark(), because it works without
   // needing any atomic instructions:
@@ -187,6 +191,7 @@ initCapability( Capability *cap, nat i )
 
     cap->no = i;
     cap->in_haskell        = rtsFalse;
+    cap->in_gc             = rtsFalse;
 
     cap->run_queue_hd      = END_TSO_QUEUE;
     cap->run_queue_tl      = END_TSO_QUEUE;
@@ -212,6 +217,9 @@ initCapability( Capability *cap, nat i )
     cap->mut_lists  = stgMallocBytes(sizeof(bdescr *) *
                                     RtsFlags.GcFlags.generations,
                                     "initCapability");
+    cap->saved_mut_lists = stgMallocBytes(sizeof(bdescr *) *
+                                          RtsFlags.GcFlags.generations,
+                                          "initCapability");
 
     for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
        cap->mut_lists[g] = NULL;
@@ -286,10 +294,10 @@ initCapabilities( void )
 
 void setContextSwitches(void)
 {
-  nat i;
-  for (i=0; i < n_capabilities; i++) {
-    capabilities[i].context_switch = 1;
-  }
+    nat i;
+    for (i=0; i < n_capabilities; i++) {
+        contextSwitchCapability(&capabilities[i]);
+    }
 }
 
 /* ----------------------------------------------------------------------------
@@ -355,14 +363,7 @@ releaseCapability_ (Capability* cap,
        return;
     }
 
-    /* if waiting_for_gc was the reason to release the cap: thread
-       comes from yieldCap->releaseAndQueueWorker. Unconditionally set
-       cap. free and return (see default after the if-protected other
-       special cases). Thread will wait on cond.var and re-acquire the
-       same cap after GC (GC-triggering cap. calls releaseCap and
-       enters the spare_workers case)
-    */
-    if (waiting_for_gc) {
+    if (waiting_for_gc == PENDING_GC_SEQ) {
       last_free_capability = cap; // needed?
       trace(TRACE_sched | DEBUG_sched, 
            "GC pending, set capability %d free", cap->no);
@@ -481,14 +482,17 @@ waitForReturnCapability (Capability **pCap, Task *task)
        if (!cap->running_task) {
            nat i;
            // otherwise, search for a free capability
+            cap = NULL;
            for (i = 0; i < n_capabilities; i++) {
-               cap = &capabilities[i];
-               if (!cap->running_task) {
+               if (!capabilities[i].running_task) {
+                    cap = &capabilities[i];
                    break;
                }
            }
-           // Can't find a free one, use last_free_capability.
-           cap = last_free_capability;
+            if (cap == NULL) {
+                // Can't find a free one, use last_free_capability.
+                cap = last_free_capability;
+            }
        }
 
        // record the Capability as the one this Task is now assocated with.
@@ -554,6 +558,12 @@ yieldCapability (Capability** pCap, Task *task)
 {
     Capability *cap = *pCap;
 
+    if (waiting_for_gc == PENDING_GC_PAR) {
+       debugTrace(DEBUG_sched, "capability %d: becoming a GC thread", cap->no);
+        gcWorkerThread(cap);
+        return;
+    }
+
        debugTrace(DEBUG_sched, "giving up capability %d", cap->no);
 
        // We must now release the capability and wait to be woken up
@@ -652,58 +662,21 @@ wakeupThreadOnCapability (Capability *my_cap,
 }
 
 /* ----------------------------------------------------------------------------
- * prodCapabilities
- *
- * Used to indicate that the interrupted flag is now set, or some
- * other global condition that might require waking up a Task on each
- * Capability.
- * ------------------------------------------------------------------------- */
-
-static void
-prodCapabilities(rtsBool all)
-{
-    nat i;
-    Capability *cap;
-    Task *task;
-
-    for (i=0; i < n_capabilities; i++) {
-       cap = &capabilities[i];
-       ACQUIRE_LOCK(&cap->lock);
-       if (!cap->running_task) {
-           if (cap->spare_workers) {
-               trace(TRACE_sched, "resuming capability %d", cap->no);
-               task = cap->spare_workers;
-               ASSERT(!task->stopped);
-               giveCapabilityToTask(cap,task);
-               if (!all) {
-                   RELEASE_LOCK(&cap->lock);
-                   return;
-               }
-           }
-       }
-       RELEASE_LOCK(&cap->lock);
-    }
-    return;
-}
-
-void
-prodAllCapabilities (void)
-{
-    prodCapabilities(rtsTrue);
-}
-
-/* ----------------------------------------------------------------------------
- * prodOneCapability
+ * prodCapability
  *
- * Like prodAllCapabilities, but we only require a single Task to wake
- * up in order to service some global event, such as checking for
- * deadlock after some idle time has passed.
+ * If a Capability is currently idle, wake up a Task on it.  Used to 
+ * get every Capability into the GC.
  * ------------------------------------------------------------------------- */
 
 void
-prodOneCapability (void)
+prodCapability (Capability *cap, Task *task)
 {
-    prodCapabilities(rtsFalse);
+    ACQUIRE_LOCK(&cap->lock);
+    if (!cap->running_task) {
+        cap->running_task = task;
+        releaseCapability_(cap,rtsTrue);
+    }
+    RELEASE_LOCK(&cap->lock);
 }
 
 /* ----------------------------------------------------------------------------