Move the freeing of Capabilities later in the shutdown sequence
[ghc-hetmet.git] / rts / Schedule.c
index 8c254cc..8ab964d 100644 (file)
@@ -137,20 +137,24 @@ static Capability *schedule (Capability *initialCapability, Task *task);
 // scheduler clearer.
 //
 static void schedulePreLoop (void);
+static void scheduleFindWork (Capability *cap);
 #if defined(THREADED_RTS)
-static void schedulePushWork(Capability *cap, Task *task);
+static void scheduleYield (Capability **pcap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
 static void scheduleCheckBlackHoles (Capability *cap);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
+static void schedulePushWork(Capability *cap, Task *task);
 #if defined(PARALLEL_HASKELL)
 static rtsBool scheduleGetRemoteWork(Capability *cap);
 static void scheduleSendPendingMessages(void);
+#endif
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
 static void scheduleActivateSpark(Capability *cap);
 #endif
-static void schedulePostRunThread(StgTSO *t);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
 static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
                                         StgTSO *t);
@@ -281,23 +285,6 @@ schedule (Capability *initialCapability, Task *task)
 
   while (TERMINATION_CONDITION) {
 
-#if defined(THREADED_RTS)
-      if (first) {
-         // don't yield the first time, we want a chance to run this
-         // thread for a bit, even if there are others banging at the
-         // door.
-         first = rtsFalse;
-         ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
-      } else {
-         // Yield the capability to higher-priority tasks if necessary.
-         yieldCapability(&cap, task);
-      }
-#endif
-      
-#if defined(THREADED_RTS)
-      schedulePushWork(cap,task);
-#endif
-
     // Check whether we have re-entered the RTS from Haskell without
     // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
     // call).
@@ -365,59 +352,13 @@ schedule (Capability *initialCapability, Task *task)
        barf("sched_state: %d", sched_state);
     }
 
-#if defined(THREADED_RTS)
-    // If the run queue is empty, take a spark and turn it into a thread.
-    {
-       if (emptyRunQueue(cap)) {
-           StgClosure *spark;
-           spark = findSpark(cap);
-           if (spark != NULL) {
-               debugTrace(DEBUG_sched,
-                          "turning spark of closure %p into a thread",
-                          (StgClosure *)spark);
-               createSparkThread(cap,spark);     
-           }
-       }
-    }
-#endif // THREADED_RTS
-
-    scheduleStartSignalHandlers(cap);
+    scheduleFindWork(cap);
 
-    // Only check the black holes here if we've nothing else to do.
-    // During normal execution, the black hole list only gets checked
-    // at GC time, to avoid repeatedly traversing this possibly long
-    // list each time around the scheduler.
-    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
-
-    scheduleCheckWakeupThreads(cap);
-
-    scheduleCheckBlockedThreads(cap);
+    /* work pushing, currently relevant only for THREADED_RTS:
+       (pushes threads, wakes up idle capabilities for stealing) */
+    schedulePushWork(cap,task);
 
 #if defined(PARALLEL_HASKELL)
-    /* message processing and work distribution goes here */ 
-
-    /* if messages have been buffered... a NOOP in THREADED_RTS */
-    scheduleSendPendingMessages();
-
-    /* If the run queue is empty,...*/
-    if (emptyRunQueue(cap)) {
-      /* ...take one of our own sparks and turn it into a thread */
-      scheduleActivateSpark(cap);
-
-       /* if this did not work, try to steal a spark from someone else */
-      if (emptyRunQueue(cap)) {
-       receivedFinish = scheduleGetRemoteWork(cap);
-       continue; //  a new round, (hopefully) with new work
-       /* 
-          in GUM, this a) sends out a FISH and returns IF no fish is
-                          out already
-                       b) (blocking) awaits and receives messages
-          
-          in Eden, this is only the blocking receive, as b) in GUM.
-       */
-      }
-    } 
-
     /* since we perform a blocking receive and continue otherwise,
        either we never reach here or we definitely have work! */
     // from here: non-empty run queue
@@ -430,9 +371,10 @@ schedule (Capability *initialCapability, Task *task)
                                above, waits for messages as well! */
       processMessages(cap, &receivedFinish);
     }
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL: non-empty run queue!
 
     scheduleDetectDeadlock(cap,task);
+
 #if defined(THREADED_RTS)
     cap = task->cap;    // reload cap, it might have changed
 #endif
@@ -445,12 +387,28 @@ schedule (Capability *initialCapability, Task *task)
     //
     // win32: might be here due to awaitEvent() being abandoned
     // as a result of a console event having been delivered.
-    if ( emptyRunQueue(cap) ) {
+    
+#if defined(THREADED_RTS)
+    if (first) 
+    {
+    // XXX: ToDo
+    //     // don't yield the first time, we want a chance to run this
+    //     // thread for a bit, even if there are others banging at the
+    //     // door.
+    //     first = rtsFalse;
+    //     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
+    }
+
+  yield:
+    scheduleYield(&cap,task);
+    if (emptyRunQueue(cap)) continue; // look for work again
+#endif
+
 #if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
+    if ( emptyRunQueue(cap) ) {
        ASSERT(sched_state >= SCHED_INTERRUPTING);
-#endif
-       continue; // nothing to do
     }
+#endif
 
     // 
     // Get a thread to run
@@ -522,6 +480,7 @@ run_thread:
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
+    ASSERT(t->bound ? t->bound->cap == cap : 1);
 
     prev_what_next = t->what_next;
 
@@ -607,7 +566,7 @@ run_thread:
        debugTrace(DEBUG_sched,
                   "--<< thread %lu (%s) stopped: blocked",
                   (unsigned long)t->id, whatNext_strs[t->what_next]);
-       continue;
+        goto yield;
     }
 #endif
 
@@ -622,7 +581,7 @@ run_thread:
     CCCS = CCS_SYSTEM;
 #endif
     
-    schedulePostRunThread(t);
+    schedulePostRunThread(cap,t);
 
     t = threadStackUnderflow(task,t);
 
@@ -674,16 +633,118 @@ schedulePreLoop(void)
 }
 
 /* -----------------------------------------------------------------------------
+ * scheduleFindWork()
+ *
+ * Search for work to do, and handle messages from elsewhere.
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleFindWork (Capability *cap)
+{
+    scheduleStartSignalHandlers(cap);
+
+    // Only check the black holes here if we've nothing else to do.
+    // During normal execution, the black hole list only gets checked
+    // at GC time, to avoid repeatedly traversing this possibly long
+    // list each time around the scheduler.
+    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
+
+    scheduleCheckWakeupThreads(cap);
+
+    scheduleCheckBlockedThreads(cap);
+
+#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
+    // Try to activate one of our own sparks
+    if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
+#endif
+
+#if defined(THREADED_RTS)
+    // Try to steak work if we don't have any
+    if (emptyRunQueue(cap)) { stealWork(cap); }
+#endif
+    
+#if defined(PARALLEL_HASKELL)
+    // if messages have been buffered...
+    scheduleSendPendingMessages();
+#endif
+
+#if defined(PARALLEL_HASKELL)
+    if (emptyRunQueue(cap)) {
+       receivedFinish = scheduleGetRemoteWork(cap);
+       continue; //  a new round, (hopefully) with new work
+       /* 
+          in GUM, this a) sends out a FISH and returns IF no fish is
+                          out already
+                       b) (blocking) awaits and receives messages
+          
+          in Eden, this is only the blocking receive, as b) in GUM.
+       */
+    }
+#endif
+}
+
+#if defined(THREADED_RTS)
+STATIC_INLINE rtsBool
+shouldYieldCapability (Capability *cap, Task *task)
+{
+    // we need to yield this capability to someone else if..
+    //   - another thread is initiating a GC
+    //   - another Task is returning from a foreign call
+    //   - the thread at the head of the run queue cannot be run
+    //     by this Task (it is bound to another Task, or it is unbound
+    //     and this task it bound).
+    return (waiting_for_gc || 
+            cap->returning_tasks_hd != NULL ||
+            (!emptyRunQueue(cap) && (task->tso == NULL
+                                     ? cap->run_queue_hd->bound != NULL
+                                     : cap->run_queue_hd->bound != task)));
+}
+
+// This is the single place where a Task goes to sleep.  There are
+// two reasons it might need to sleep:
+//    - there are no threads to run
+//    - we need to yield this Capability to someone else 
+//      (see shouldYieldCapability())
+//
+// The return value indicates whether 
+
+static void
+scheduleYield (Capability **pcap, Task *task)
+{
+    Capability *cap = *pcap;
+
+    // if we have work, and we don't need to give up the Capability, continue.
+    if (!emptyRunQueue(cap) && !shouldYieldCapability(cap,task))
+        return;
+
+    // otherwise yield (sleep), and keep yielding if necessary.
+    do {
+        yieldCapability(&cap,task);
+    } 
+    while (shouldYieldCapability(cap,task));
+
+    // note there may still be no threads on the run queue at this
+    // point, the caller has to check.
+
+    *pcap = cap;
+    return;
+}
+#endif
+    
+/* -----------------------------------------------------------------------------
  * schedulePushWork()
  *
  * Push work to other Capabilities if we have some.
  * -------------------------------------------------------------------------- */
 
-#if defined(THREADED_RTS)
 static void
 schedulePushWork(Capability *cap USED_IF_THREADS, 
                 Task *task      USED_IF_THREADS)
 {
+  /* following code not for PARALLEL_HASKELL. I kept the call general,
+     future GUM versions might use pushing in a distributed setup */
+#if defined(THREADED_RTS)
+
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
@@ -726,7 +787,12 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
        StgTSO *prev, *t, *next;
        rtsBool pushed_to_all;
 
-       debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
+       debugTrace(DEBUG_sched, 
+                  "cap %d: %s and %d free capabilities, sharing...", 
+                  cap->no, 
+                  (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
+                  "excess threads on run queue":"sparks to share (>=2)",
+                  n_free_caps);
 
        i = 0;
        pushed_to_all = rtsFalse;
@@ -760,6 +826,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            cap->run_queue_tl = prev;
        }
 
+#ifdef SPARK_PUSHING
+       /* JB I left this code in place, it would work but is not necessary */
+
        // If there are some free capabilities that we didn't push any
        // threads to, then try to push a spark to each one.
        if (!pushed_to_all) {
@@ -767,7 +836,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            // i is the next free capability to push to
            for (; i < n_free_caps; i++) {
                if (emptySparkPoolCap(free_caps[i])) {
-                   spark = findSpark(cap);
+                   spark = tryStealSpark(cap->sparks);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
                        newSpark(&(free_caps[i]->r), spark);
@@ -775,16 +844,19 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                }
            }
        }
+#endif /* SPARK_PUSHING */
 
        // release the capabilities
        for (i = 0; i < n_free_caps; i++) {
            task->cap = free_caps[i];
-           releaseCapability(free_caps[i]);
+           releaseAndWakeupCapability(free_caps[i]);
        }
     }
     task->cap = cap; // reset to point to our Capability.
+
+#endif /* THREADED_RTS */
+
 }
-#endif
 
 /* ----------------------------------------------------------------------------
  * Start any pending signal handlers
@@ -965,7 +1037,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
  * ------------------------------------------------------------------------- */
 
 #if defined(PARALLEL_HASKELL)
-static StgTSO *
+static void
 scheduleSendPendingMessages(void)
 {
 
@@ -984,10 +1056,10 @@ scheduleSendPendingMessages(void)
 #endif
 
 /* ----------------------------------------------------------------------------
- * Activate spark threads (PARALLEL_HASKELL only)
+ * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
  * ------------------------------------------------------------------------- */
 
-#if defined(PARALLEL_HASKELL)
+#if defined(PARALLEL_HASKELL) || defined(THREADED_RTS)
 static void
 scheduleActivateSpark(Capability *cap)
 {
@@ -1003,7 +1075,12 @@ scheduleActivateSpark(Capability *cap)
         on our run queue in the meantime ? But would need a lock.. */
       return;
 
-    spark = findSpark(cap); // defined in Sparks.c
+    // Really we should be using reclaimSpark() here, but
+    // experimentally it doesn't seem to perform as well as just
+    // stealing from our own spark pool:
+    // spark = reclaimSpark(cap->sparks);
+    spark = tryStealSpark(cap->sparks); // defined in Sparks.c
 
     if (spark != NULL) {
       debugTrace(DEBUG_sched,
@@ -1012,15 +1089,15 @@ scheduleActivateSpark(Capability *cap)
       createSparkThread(cap,spark); // defined in Sparks.c
     }
 }
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL || THREADED_RTS
 
 /* ----------------------------------------------------------------------------
  * Get work from a remote node (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
     
 #if defined(PARALLEL_HASKELL)
-static rtsBool
-scheduleGetRemoteWork(Capability *cap)
+static rtsBool /* return value used in PARALLEL_HASKELL only */
+scheduleGetRemoteWork (Capability *cap STG_UNUSED)
 {
 #if defined(PARALLEL_HASKELL)
   rtsBool receivedFinish = rtsFalse;
@@ -1057,14 +1134,14 @@ scheduleGetRemoteWork(Capability *cap)
 
 #endif /* PARALLEL_HASKELL */
 }
-#endif // PARALLEL_HASKELL
+#endif // PARALLEL_HASKELL || THREADED_RTS
 
 /* ----------------------------------------------------------------------------
  * After running a thread...
  * ------------------------------------------------------------------------- */
 
 static void
-schedulePostRunThread (StgTSO *t)
+schedulePostRunThread (Capability *cap, StgTSO *t)
 {
     // We have to be able to catch transactions that are in an
     // infinite loop as a result of seeing an inconsistent view of
@@ -1085,8 +1162,7 @@ schedulePostRunThread (StgTSO *t)
             // ATOMICALLY_FRAME, aborting the (nested)
             // transaction, and saving the stack of any
             // partially-evaluated thunks on the heap.
-            throwToSingleThreaded_(&capabilities[0], t, 
-                                   NULL, rtsTrue, NULL);
+            throwToSingleThreaded_(cap, t, NULL, rtsTrue, NULL);
             
             ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
         }
@@ -1484,6 +1560,14 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
        performHeapProfile = rtsFalse;
     }
 
+#ifdef SPARKBALANCE
+    /* JB 
+       Once we are all together... this would be the place to balance all
+       spark pools. No concurrent stealing or adding of new sparks can
+       occur. Should be defined in Sparks.c. */
+    balanceSparkPoolsCaps(n_capabilities, capabilities);
+#endif
+
 #if defined(THREADED_RTS)
     // release our stash of capabilities.
     for (i = 0; i < n_capabilities; i++) {
@@ -1765,7 +1849,7 @@ suspendThread (StgRegTable *reg)
 
   suspendTask(cap,task);
   cap->in_haskell = rtsFalse;
-  releaseCapability_(cap);
+  releaseCapability_(cap,rtsFalse);
   
   RELEASE_LOCK(&cap->lock);
 
@@ -2030,14 +2114,13 @@ exitScheduler(
        boundTaskExiting(task);
        stopTaskManager();
     }
-#else
-    freeCapability(&MainCapability);
 #endif
 }
 
 void
 freeScheduler( void )
 {
+    freeCapabilities();
     freeTaskManager();
     if (n_capabilities != 1) {
         stgFree(capabilities);
@@ -2126,10 +2209,17 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
   }
 
   /* Try to double the current stack size.  If that takes us over the
-   * maximum stack size for this thread, then use the maximum instead.
-   * Finally round up so the TSO ends up as a whole number of blocks.
+   * maximum stack size for this thread, then use the maximum instead
+   * (that is, unless we're already at or over the max size and we
+   * can't raise the StackOverflow exception (see above), in which
+   * case just double the size). Finally round up so the TSO ends up as
+   * a whole number of blocks.
    */
-  new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  if (tso->stack_size >= tso->max_stack_size) {
+      new_stack_size = tso->stack_size * 2;
+  } else { 
+      new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  }
   new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */