RTS tidyup sweep, first phase
[ghc-hetmet.git] / rts / Schedule.c
index 1bd68c2..b3d523e 100644 (file)
@@ -9,37 +9,25 @@
 #include "PosixSource.h"
 #define KEEP_LOCKCLOSURE
 #include "Rts.h"
 #include "PosixSource.h"
 #define KEEP_LOCKCLOSURE
 #include "Rts.h"
-#include "SchedAPI.h"
+
+#include "sm/Storage.h"
 #include "RtsUtils.h"
 #include "RtsUtils.h"
-#include "RtsFlags.h"
-#include "OSThreads.h"
-#include "Storage.h"
 #include "StgRun.h"
 #include "StgRun.h"
-#include "Hooks.h"
 #include "Schedule.h"
 #include "Schedule.h"
-#include "StgMiscClosures.h"
 #include "Interpreter.h"
 #include "Printer.h"
 #include "RtsSignals.h"
 #include "Sanity.h"
 #include "Stats.h"
 #include "STM.h"
 #include "Interpreter.h"
 #include "Printer.h"
 #include "RtsSignals.h"
 #include "Sanity.h"
 #include "Stats.h"
 #include "STM.h"
-#include "Timer.h"
 #include "Prelude.h"
 #include "ThreadLabels.h"
 #include "Prelude.h"
 #include "ThreadLabels.h"
-#include "LdvProfile.h"
 #include "Updates.h"
 #include "Proftimer.h"
 #include "ProfHeap.h"
 #include "Updates.h"
 #include "Proftimer.h"
 #include "ProfHeap.h"
-#if defined(GRAN) || defined(PARALLEL_HASKELL)
-# include "GranSimRts.h"
-# include "GranSim.h"
-# include "ParallelRts.h"
-# include "Parallel.h"
-# include "ParallelDebug.h"
-# include "FetchMe.h"
-# include "HLC.h"
-#endif
+#include "Weak.h"
+#include "eventlog/EventLog.h"
+#include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
 #include "Sparks.h"
 #include "Capability.h"
 #include "Task.h"
 #include "Sparks.h"
 #include "Capability.h"
 #include "Task.h"
@@ -50,7 +38,8 @@
 #include "Trace.h"
 #include "RaiseAsync.h"
 #include "Threads.h"
 #include "Trace.h"
 #include "RaiseAsync.h"
 #include "Threads.h"
-#include "ThrIOManager.h"
+#include "Timer.h"
+#include "ThreadPaused.h"
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
 #include <errno.h>
 #endif
 
 #include <errno.h>
 #endif
 
-// Turn off inlining when debugging - it obfuscates things
-#ifdef DEBUG
-# undef  STATIC_INLINE
-# define STATIC_INLINE static
-#endif
-
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
 
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
 
-#if defined(GRAN)
-
-StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
-/* rtsTime TimeOfNextEvent, EndOfTimeSlice;            now in GranSim.c */
-
-/* 
-   In GranSim we have a runnable and a blocked queue for each processor.
-   In order to minimise code changes new arrays run_queue_hds/tls
-   are created. run_queue_hd is then a short cut (macro) for
-   run_queue_hds[CurrentProc] (see GranSim.h).
-   -- HWL
-*/
-StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
-StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
-StgTSO *ccalling_threadss[MAX_PROC];
-/* We use the same global list of threads (all_threads) in GranSim as in
-   the std RTS (i.e. we are cheating). However, we don't use this list in
-   the GranSim specific code at the moment (so we are only potentially
-   cheating).  */
-
-#else /* !GRAN */
-
 #if !defined(THREADED_RTS)
 // Blocked/sleeping thrads
 StgTSO *blocked_queue_hd = NULL;
 #if !defined(THREADED_RTS)
 // Blocked/sleeping thrads
 StgTSO *blocked_queue_hd = NULL;
@@ -110,7 +71,6 @@ StgTSO *sleeping_queue = NULL;    // perhaps replace with a hash table?
  * LOCK: sched_mutex+capability, or all capabilities
  */
 StgTSO *blackhole_queue = NULL;
  * LOCK: sched_mutex+capability, or all capabilities
  */
 StgTSO *blackhole_queue = NULL;
-#endif
 
 /* The blackhole_queue should be checked for threads to wake up.  See
  * Schedule.h for more thorough comment.
 
 /* The blackhole_queue should be checked for threads to wake up.  See
  * Schedule.h for more thorough comment.
@@ -118,31 +78,24 @@ StgTSO *blackhole_queue = NULL;
  */
 rtsBool blackholes_need_checking = rtsFalse;
 
  */
 rtsBool blackholes_need_checking = rtsFalse;
 
-/* Linked list of all threads.
- * Used for detecting garbage collected threads.
- * LOCK: sched_mutex+capability, or all capabilities
- */
-StgTSO *all_threads = NULL;
-
-/* flag set by signal handler to precipitate a context switch
- * LOCK: none (just an advisory flag)
+/* Set to true when the latest garbage collection failed to reclaim
+ * enough space, and the runtime should proceed to shut itself down in
+ * an orderly fashion (emitting profiling info etc.)
  */
  */
-int context_switch = 0;
+rtsBool heap_overflow = rtsFalse;
 
 /* flag that tracks whether we have done any execution in this time slice.
  * LOCK: currently none, perhaps we should lock (but needs to be
  * updated in the fast path of the scheduler).
 
 /* flag that tracks whether we have done any execution in this time slice.
  * LOCK: currently none, perhaps we should lock (but needs to be
  * updated in the fast path of the scheduler).
+ *
+ * NB. must be StgWord, we do xchg() on it.
  */
  */
-nat recent_activity = ACTIVITY_YES;
+volatile StgWord recent_activity = ACTIVITY_YES;
 
 /* if this flag is set as well, give up execution
 
 /* if this flag is set as well, give up execution
- * LOCK: none (changes once, from false->true)
+ * LOCK: none (changes monotonically)
  */
  */
-rtsBool sched_state = SCHED_RUNNING;
-
-#if defined(GRAN)
-StgTSO *CurrentTSO;
-#endif
+volatile StgWord sched_state = SCHED_RUNNING;
 
 /*  This is used in `TSO.h' and gcc 2.96 insists that this variable actually 
  *  exists - earlier gccs apparently didn't.
 
 /*  This is used in `TSO.h' and gcc 2.96 insists that this variable actually 
  *  exists - earlier gccs apparently didn't.
@@ -165,12 +118,6 @@ rtsBool shutting_down_scheduler = rtsFalse;
 Mutex sched_mutex;
 #endif
 
 Mutex sched_mutex;
 #endif
 
-#if defined(PARALLEL_HASKELL)
-StgTSO *LastTSO;
-rtsTime TimeOfLastYield;
-rtsBool emitSchedule = rtsTrue;
-#endif
-
 #if !defined(mingw32_HOST_OS)
 #define FORKPROCESS_PRIMOP_SUPPORTED
 #endif
 #if !defined(mingw32_HOST_OS)
 #define FORKPROCESS_PRIMOP_SUPPORTED
 #endif
@@ -187,26 +134,20 @@ static Capability *schedule (Capability *initialCapability, Task *task);
 // scheduler clearer.
 //
 static void schedulePreLoop (void);
 // scheduler clearer.
 //
 static void schedulePreLoop (void);
+static void scheduleFindWork (Capability *cap);
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-static void schedulePushWork(Capability *cap, Task *task);
+static void scheduleYield (Capability **pcap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
 static void scheduleCheckBlackHoles (Capability *cap);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
 static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
 static void scheduleCheckBlackHoles (Capability *cap);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
-#if defined(GRAN)
-static StgTSO *scheduleProcessEvent(rtsEvent *event);
-#endif
-#if defined(PARALLEL_HASKELL)
-static StgTSO *scheduleSendPendingMessages(void);
-static void scheduleActivateSpark(void);
-static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
-#endif
-#if defined(PAR) || defined(GRAN)
-static void scheduleGranParReport(void);
+static void schedulePushWork(Capability *cap, Task *task);
+#if defined(THREADED_RTS)
+static void scheduleActivateSpark(Capability *cap);
 #endif
 #endif
-static void schedulePostRunThread(void);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
 static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
                                         StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
 static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
                                         StgTSO *t);
@@ -222,6 +163,7 @@ static Capability *scheduleDoGC(Capability *cap, Task *task,
 static rtsBool checkBlackHoles(Capability *cap);
 
 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
 static rtsBool checkBlackHoles(Capability *cap);
 
 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
+static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
 
 static void deleteThread (Capability *cap, StgTSO *tso);
 static void deleteAllThreads (Capability *cap);
 
 static void deleteThread (Capability *cap, StgTSO *tso);
 static void deleteAllThreads (Capability *cap);
@@ -230,11 +172,6 @@ static void deleteAllThreads (Capability *cap);
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
-#if defined(PARALLEL_HASKELL)
-StgTSO * createSparkThread(rtsSpark spark);
-StgTSO * activateSpark (rtsSpark spark);  
-#endif
-
 #ifdef DEBUG
 static char *whatNext_strs[] = {
   "(unknown)",
 #ifdef DEBUG
 static char *whatNext_strs[] = {
   "(unknown)",
@@ -253,18 +190,8 @@ static char *whatNext_strs[] = {
 STATIC_INLINE void
 addToRunQueue( Capability *cap, StgTSO *t )
 {
 STATIC_INLINE void
 addToRunQueue( Capability *cap, StgTSO *t )
 {
-#if defined(PARALLEL_HASKELL)
-    if (RtsFlags.ParFlags.doFairScheduling) { 
-       // this does round-robin scheduling; good for concurrency
-       appendToRunQueue(cap,t);
-    } else {
-       // this does unfair scheduling; good for parallelism
-       pushOnRunQueue(cap,t);
-    }
-#else
     // this does round-robin scheduling; good for concurrency
     appendToRunQueue(cap,t);
     // this does round-robin scheduling; good for concurrency
     appendToRunQueue(cap,t);
-#endif
 }
 
 /* ---------------------------------------------------------------------------
 }
 
 /* ---------------------------------------------------------------------------
@@ -284,6 +211,7 @@ addToRunQueue( Capability *cap, StgTSO *t )
      This revolves around the global event queue, which determines what 
      to do next. Therefore, it's more complicated than either the 
      concurrent or the parallel (GUM) setup.
      This revolves around the global event queue, which determines what 
      to do next. Therefore, it's more complicated than either the 
      concurrent or the parallel (GUM) setup.
+  This version has been entirely removed (JB 2008/08).
 
    GUM version:
      GUM iterates over incoming messages.
 
    GUM version:
      GUM iterates over incoming messages.
@@ -294,6 +222,12 @@ addToRunQueue( Capability *cap, StgTSO *t )
      (see PendingFetches).
      This is not the ugliest code you could imagine, but it's bloody close.
 
      (see PendingFetches).
      This is not the ugliest code you could imagine, but it's bloody close.
 
+  (JB 2008/08) This version was formerly indicated by a PP-Flag PAR,
+  now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it,
+  as well as future GUM versions. This file has been refurbished to
+  only contain valid code, which is however incomplete, refers to
+  invalid includes etc.
+
    ------------------------------------------------------------------------ */
 
 static Capability *
    ------------------------------------------------------------------------ */
 
 static Capability *
@@ -302,16 +236,6 @@ schedule (Capability *initialCapability, Task *task)
   StgTSO *t;
   Capability *cap;
   StgThreadReturnCode ret;
   StgTSO *t;
   Capability *cap;
   StgThreadReturnCode ret;
-#if defined(GRAN)
-  rtsEvent *event;
-#elif defined(PARALLEL_HASKELL)
-  StgTSO *tso;
-  GlobalTaskId pe;
-  rtsBool receivedFinish = rtsFalse;
-# if defined(DEBUG)
-  nat tp_size, sp_size; // stats only
-# endif
-#endif
   nat prev_what_next;
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
   nat prev_what_next;
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
@@ -328,43 +252,20 @@ schedule (Capability *initialCapability, Task *task)
              "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
              task, initialCapability);
 
              "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
              task, initialCapability);
 
+  if (running_finalizers) {
+      errorBelch("error: a C finalizer called back into Haskell.\n"
+                 "   This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n"
+                 "   To create finalizers that may call back into Haskll, use\n"
+                 "   Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr.");
+      stg_exit(EXIT_FAILURE);
+  }
+
   schedulePreLoop();
 
   // -----------------------------------------------------------
   // Scheduler loop starts here:
 
   schedulePreLoop();
 
   // -----------------------------------------------------------
   // Scheduler loop starts here:
 
-#if defined(PARALLEL_HASKELL)
-#define TERMINATION_CONDITION        (!receivedFinish)
-#elif defined(GRAN)
-#define TERMINATION_CONDITION        ((event = get_next_event()) != (rtsEvent*)NULL) 
-#else
-#define TERMINATION_CONDITION        rtsTrue
-#endif
-
-  while (TERMINATION_CONDITION) {
-
-#if defined(GRAN)
-      /* Choose the processor with the next event */
-      CurrentProc = event->proc;
-      CurrentTSO = event->tso;
-#endif
-
-#if defined(THREADED_RTS)
-      if (first) {
-         // don't yield the first time, we want a chance to run this
-         // thread for a bit, even if there are others banging at the
-         // door.
-         first = rtsFalse;
-         ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
-      } else {
-         // Yield the capability to higher-priority tasks if necessary.
-         yieldCapability(&cap, task);
-      }
-#endif
-      
-#if defined(THREADED_RTS)
-      schedulePushWork(cap,task);
-#endif
+  while (1) {
 
     // Check whether we have re-entered the RTS from Haskell without
     // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
 
     // Check whether we have re-entered the RTS from Haskell without
     // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
@@ -419,7 +320,14 @@ schedule (Capability *initialCapability, Task *task)
 #endif
        /* scheduleDoGC() deletes all the threads */
        cap = scheduleDoGC(cap,task,rtsFalse);
 #endif
        /* scheduleDoGC() deletes all the threads */
        cap = scheduleDoGC(cap,task,rtsFalse);
-       break;
+
+        // after scheduleDoGC(), we must be shutting down.  Either some
+        // other Capability did the final GC, or we did it above,
+        // either way we can fall through to the SCHED_SHUTTING_DOWN
+        // case now.
+        ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+        // fall through
+
     case SCHED_SHUTTING_DOWN:
        debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
        // If we are a worker, just exit.  If we're a bound thread
     case SCHED_SHUTTING_DOWN:
        debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
        // If we are a worker, just exit.  If we're a bound thread
@@ -433,35 +341,14 @@ schedule (Capability *initialCapability, Task *task)
        barf("sched_state: %d", sched_state);
     }
 
        barf("sched_state: %d", sched_state);
     }
 
-#if defined(THREADED_RTS)
-    // If the run queue is empty, take a spark and turn it into a thread.
-    {
-       if (emptyRunQueue(cap)) {
-           StgClosure *spark;
-           spark = findSpark(cap);
-           if (spark != NULL) {
-               debugTrace(DEBUG_sched,
-                          "turning spark of closure %p into a thread",
-                          (StgClosure *)spark);
-               createSparkThread(cap,spark);     
-           }
-       }
-    }
-#endif // THREADED_RTS
+    scheduleFindWork(cap);
 
 
-    scheduleStartSignalHandlers(cap);
-
-    // Only check the black holes here if we've nothing else to do.
-    // During normal execution, the black hole list only gets checked
-    // at GC time, to avoid repeatedly traversing this possibly long
-    // list each time around the scheduler.
-    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
-
-    scheduleCheckWakeupThreads(cap);
-
-    scheduleCheckBlockedThreads(cap);
+    /* work pushing, currently relevant only for THREADED_RTS:
+       (pushes threads, wakes up idle capabilities for stealing) */
+    schedulePushWork(cap,task);
 
     scheduleDetectDeadlock(cap,task);
 
     scheduleDetectDeadlock(cap,task);
+
 #if defined(THREADED_RTS)
     cap = task->cap;    // reload cap, it might have changed
 #endif
 #if defined(THREADED_RTS)
     cap = task->cap;    // reload cap, it might have changed
 #endif
@@ -474,54 +361,37 @@ schedule (Capability *initialCapability, Task *task)
     //
     // win32: might be here due to awaitEvent() being abandoned
     // as a result of a console event having been delivered.
     //
     // win32: might be here due to awaitEvent() being abandoned
     // as a result of a console event having been delivered.
-    if ( emptyRunQueue(cap) ) {
-#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
-       ASSERT(sched_state >= SCHED_INTERRUPTING);
-#endif
-       continue; // nothing to do
+    
+#if defined(THREADED_RTS)
+    if (first) 
+    {
+    // XXX: ToDo
+    //     // don't yield the first time, we want a chance to run this
+    //     // thread for a bit, even if there are others banging at the
+    //     // door.
+    //     first = rtsFalse;
+    //     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     }
 
     }
 
-#if defined(PARALLEL_HASKELL)
-    scheduleSendPendingMessages();
-    if (emptyRunQueue(cap) && scheduleActivateSpark()) 
-       continue;
-
-#if defined(SPARKS)
-    ASSERT(next_fish_to_send_at==0);  // i.e. no delayed fishes left!
+  yield:
+    scheduleYield(&cap,task);
+    if (emptyRunQueue(cap)) continue; // look for work again
 #endif
 
 #endif
 
-    /* If we still have no work we need to send a FISH to get a spark
-       from another PE */
-    if (emptyRunQueue(cap)) {
-       if (!scheduleGetRemoteWork(&receivedFinish)) continue;
-       ASSERT(rtsFalse); // should not happen at the moment
-    }
-    // from here: non-empty run queue.
-    //  TODO: merge above case with this, only one call processMessages() !
-    if (PacketsWaiting()) {  /* process incoming messages, if
-                               any pending...  only in else
-                               because getRemoteWork waits for
-                               messages as well */
-       receivedFinish = processMessages();
+#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
+    if ( emptyRunQueue(cap) ) {
+       ASSERT(sched_state >= SCHED_INTERRUPTING);
     }
 #endif
 
     }
 #endif
 
-#if defined(GRAN)
-    scheduleProcessEvent(event);
-#endif
-
     // 
     // Get a thread to run
     //
     t = popRunQueue(cap);
 
     // 
     // Get a thread to run
     //
     t = popRunQueue(cap);
 
-#if defined(GRAN) || defined(PAR)
-    scheduleGranParReport(); // some kind of debuging output
-#else
     // Sanity check the thread we're about to run.  This can be
     // expensive if there is lots of thread switching going on...
     IF_DEBUG(sanity,checkTSO(t));
     // Sanity check the thread we're about to run.  This can be
     // expensive if there is lots of thread switching going on...
     IF_DEBUG(sanity,checkTSO(t));
-#endif
 
 #if defined(THREADED_RTS)
     // Check whether we can run this thread in the current task.
 
 #if defined(THREADED_RTS)
     // Check whether we can run this thread in the current task.
@@ -555,19 +425,31 @@ schedule (Capability *initialCapability, Task *task)
     }
 #endif
 
     }
 #endif
 
-    cap->r.rCurrentTSO = t;
-    
+    // If we're shutting down, and this thread has not yet been
+    // killed, kill it now.  This sometimes happens when a finalizer
+    // thread is created by the final GC, or a thread previously
+    // in a foreign call returns.
+    if (sched_state >= SCHED_INTERRUPTING &&
+        !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
+        deleteThread(cap,t);
+    }
+
     /* context switches are initiated by the timer signal, unless
      * the user specified "context switch as often as possible", with
      * +RTS -C0
      */
     if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
        && !emptyThreadQueues(cap)) {
     /* context switches are initiated by the timer signal, unless
      * the user specified "context switch as often as possible", with
      * +RTS -C0
      */
     if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
        && !emptyThreadQueues(cap)) {
-       context_switch = 1;
+       cap->context_switch = 1;
     }
         
 run_thread:
 
     }
         
 run_thread:
 
+    // CurrentTSO is the thread to run.  t might be different if we
+    // loop back to run_thread, so make sure to set CurrentTSO after
+    // that.
+    cap->r.rCurrentTSO = t;
+
     debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", 
                              (long)t->id, whatNext_strs[t->what_next]);
 
     debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", 
                              (long)t->id, whatNext_strs[t->what_next]);
 
@@ -581,6 +463,7 @@ run_thread:
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
+    ASSERT(t->bound ? t->bound->cap == cap : 1);
 
     prev_what_next = t->what_next;
 
 
     prev_what_next = t->what_next;
 
@@ -591,14 +474,14 @@ run_thread:
 
     cap->in_haskell = rtsTrue;
 
 
     cap->in_haskell = rtsTrue;
 
-    dirtyTSO(t);
+    dirty_TSO(cap,t);
 
 #if defined(THREADED_RTS)
     if (recent_activity == ACTIVITY_DONE_GC) {
         // ACTIVITY_DONE_GC means we turned off the timer signal to
         // conserve power (see #1623).  Re-enable it here.
         nat prev;
 
 #if defined(THREADED_RTS)
     if (recent_activity == ACTIVITY_DONE_GC) {
         // ACTIVITY_DONE_GC means we turned off the timer signal to
         // conserve power (see #1623).  Re-enable it here.
         nat prev;
-        prev = xchg(&recent_activity, ACTIVITY_YES);
+        prev = xchg((P_)&recent_activity, ACTIVITY_YES);
         if (prev == ACTIVITY_DONE_GC) {
             startTimer();
         }
         if (prev == ACTIVITY_DONE_GC) {
             startTimer();
         }
@@ -607,6 +490,8 @@ run_thread:
     }
 #endif
 
     }
 #endif
 
+    postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
+
     switch (prev_what_next) {
        
     case ThreadKilled:
     switch (prev_what_next) {
        
     case ThreadKilled:
@@ -655,6 +540,8 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
     t->saved_winerror = GetLastError();
 #endif
 
+    postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
+
 #if defined(THREADED_RTS)
     // If ret is ThreadBlocked, and this Task is bound to the TSO that
     // blocked, we are in limbo - the TSO is now owned by whatever it
 #if defined(THREADED_RTS)
     // If ret is ThreadBlocked, and this Task is bound to the TSO that
     // blocked, we are in limbo - the TSO is now owned by whatever it
@@ -666,7 +553,7 @@ run_thread:
        debugTrace(DEBUG_sched,
                   "--<< thread %lu (%s) stopped: blocked",
                   (unsigned long)t->id, whatNext_strs[t->what_next]);
        debugTrace(DEBUG_sched,
                   "--<< thread %lu (%s) stopped: blocked",
                   (unsigned long)t->id, whatNext_strs[t->what_next]);
-       continue;
+        goto yield;
     }
 #endif
 
     }
 #endif
 
@@ -681,7 +568,11 @@ run_thread:
     CCCS = CCS_SYSTEM;
 #endif
     
     CCCS = CCS_SYSTEM;
 #endif
     
-    schedulePostRunThread();
+    schedulePostRunThread(cap,t);
+
+    if (ret != StackOverflow) {
+        t = threadStackUnderflow(task,t);
+    }
 
     ready_to_gc = rtsFalse;
 
 
     ready_to_gc = rtsFalse;
 
@@ -718,9 +609,6 @@ run_thread:
       cap = scheduleDoGC(cap,task,rtsFalse);
     }
   } /* end of while() */
       cap = scheduleDoGC(cap,task,rtsFalse);
     }
   } /* end of while() */
-
-  debugTrace(PAR_DEBUG_verbose,
-            "== Leaving schedule() after having received Finish");
 }
 
 /* ----------------------------------------------------------------------------
 }
 
 /* ----------------------------------------------------------------------------
@@ -730,36 +618,103 @@ run_thread:
 static void
 schedulePreLoop(void)
 {
 static void
 schedulePreLoop(void)
 {
-#if defined(GRAN) 
-    /* set up first event to get things going */
-    /* ToDo: assign costs for system setup and init MainTSO ! */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread, 
-             CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
-    
-    debugTrace (DEBUG_gran,
-               "GRAN: Init CurrentTSO (in schedule) = %p", 
-               CurrentTSO);
-    IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
-    
-    if (RtsFlags.GranFlags.Light) {
-       /* Save current time; GranSim Light only */
-       CurrentTSO->gran.clock = CurrentTime[CurrentProc];
-    }      
+  // initialisation for scheduler - what cannot go into initScheduler()  
+}
+
+/* -----------------------------------------------------------------------------
+ * scheduleFindWork()
+ *
+ * Search for work to do, and handle messages from elsewhere.
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleFindWork (Capability *cap)
+{
+    scheduleStartSignalHandlers(cap);
+
+    // Only check the black holes here if we've nothing else to do.
+    // During normal execution, the black hole list only gets checked
+    // at GC time, to avoid repeatedly traversing this possibly long
+    // list each time around the scheduler.
+    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
+
+    scheduleCheckWakeupThreads(cap);
+
+    scheduleCheckBlockedThreads(cap);
+
+#if defined(THREADED_RTS)
+    if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
 #endif
 }
 
 #endif
 }
 
+#if defined(THREADED_RTS)
+STATIC_INLINE rtsBool
+shouldYieldCapability (Capability *cap, Task *task)
+{
+    // we need to yield this capability to someone else if..
+    //   - another thread is initiating a GC
+    //   - another Task is returning from a foreign call
+    //   - the thread at the head of the run queue cannot be run
+    //     by this Task (it is bound to another Task, or it is unbound
+    //     and this task it bound).
+    return (waiting_for_gc || 
+            cap->returning_tasks_hd != NULL ||
+            (!emptyRunQueue(cap) && (task->tso == NULL
+                                     ? cap->run_queue_hd->bound != NULL
+                                     : cap->run_queue_hd->bound != task)));
+}
+
+// This is the single place where a Task goes to sleep.  There are
+// two reasons it might need to sleep:
+//    - there are no threads to run
+//    - we need to yield this Capability to someone else 
+//      (see shouldYieldCapability())
+//
+// Careful: the scheduler loop is quite delicate.  Make sure you run
+// the tests in testsuite/concurrent (all ways) after modifying this,
+// and also check the benchmarks in nofib/parallel for regressions.
+
+static void
+scheduleYield (Capability **pcap, Task *task)
+{
+    Capability *cap = *pcap;
+
+    // if we have work, and we don't need to give up the Capability, continue.
+    if (!shouldYieldCapability(cap,task) && 
+        (!emptyRunQueue(cap) ||
+         !emptyWakeupQueue(cap) ||
+         blackholes_need_checking ||
+         sched_state >= SCHED_INTERRUPTING))
+        return;
+
+    // otherwise yield (sleep), and keep yielding if necessary.
+    do {
+        yieldCapability(&cap,task);
+    } 
+    while (shouldYieldCapability(cap,task));
+
+    // note there may still be no threads on the run queue at this
+    // point, the caller has to check.
+
+    *pcap = cap;
+    return;
+}
+#endif
+    
 /* -----------------------------------------------------------------------------
  * schedulePushWork()
  *
  * Push work to other Capabilities if we have some.
  * -------------------------------------------------------------------------- */
 
 /* -----------------------------------------------------------------------------
  * schedulePushWork()
  *
  * Push work to other Capabilities if we have some.
  * -------------------------------------------------------------------------- */
 
-#if defined(THREADED_RTS)
 static void
 schedulePushWork(Capability *cap USED_IF_THREADS, 
                 Task *task      USED_IF_THREADS)
 {
 static void
 schedulePushWork(Capability *cap USED_IF_THREADS, 
                 Task *task      USED_IF_THREADS)
 {
+  /* following code not for PARALLEL_HASKELL. I kept the call general,
+     future GUM versions might use pushing in a distributed setup */
+#if defined(THREADED_RTS)
+
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
@@ -768,9 +723,11 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
 
     // Check whether we have more threads on our run queue, or sparks
     // in our pool, that we could hand to another Capability.
 
     // Check whether we have more threads on our run queue, or sparks
     // in our pool, that we could hand to another Capability.
-    if ((emptyRunQueue(cap) || cap->run_queue_hd->link == END_TSO_QUEUE)
-       && sparkPoolSizeCap(cap) < 2) {
-       return;
+    if (cap->run_queue_hd == END_TSO_QUEUE) {
+        if (sparkPoolSizeCap(cap) < 2) return;
+    } else {
+        if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
+            sparkPoolSizeCap(cap) < 1) return;
     }
 
     // First grab as many free Capabilities as we can.
     }
 
     // First grab as many free Capabilities as we can.
@@ -802,32 +759,40 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
        StgTSO *prev, *t, *next;
        rtsBool pushed_to_all;
 
        StgTSO *prev, *t, *next;
        rtsBool pushed_to_all;
 
-       debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
+       debugTrace(DEBUG_sched, 
+                  "cap %d: %s and %d free capabilities, sharing...", 
+                  cap->no, 
+                  (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
+                  "excess threads on run queue":"sparks to share (>=2)",
+                  n_free_caps);
 
        i = 0;
        pushed_to_all = rtsFalse;
 
        if (cap->run_queue_hd != END_TSO_QUEUE) {
            prev = cap->run_queue_hd;
 
        i = 0;
        pushed_to_all = rtsFalse;
 
        if (cap->run_queue_hd != END_TSO_QUEUE) {
            prev = cap->run_queue_hd;
-           t = prev->link;
-           prev->link = END_TSO_QUEUE;
+           t = prev->_link;
+           prev->_link = END_TSO_QUEUE;
            for (; t != END_TSO_QUEUE; t = next) {
            for (; t != END_TSO_QUEUE; t = next) {
-               next = t->link;
-               t->link = END_TSO_QUEUE;
+               next = t->_link;
+               t->_link = END_TSO_QUEUE;
                if (t->what_next == ThreadRelocated
                    || t->bound == task // don't move my bound thread
                    || tsoLocked(t)) {  // don't move a locked thread
                if (t->what_next == ThreadRelocated
                    || t->bound == task // don't move my bound thread
                    || tsoLocked(t)) {  // don't move a locked thread
-                   prev->link = t;
+                   setTSOLink(cap, prev, t);
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
-                   prev->link = t;
+                   setTSOLink(cap, prev, t);
                    prev = t;
                } else {
                    debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
                    prev = t;
                } else {
                    debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
+
+        postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
+
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    t->cap = free_caps[i];
                    i++;
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    t->cap = free_caps[i];
                    i++;
@@ -836,6 +801,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            cap->run_queue_tl = prev;
        }
 
            cap->run_queue_tl = prev;
        }
 
+#ifdef SPARK_PUSHING
+       /* JB I left this code in place, it would work but is not necessary */
+
        // If there are some free capabilities that we didn't push any
        // threads to, then try to push a spark to each one.
        if (!pushed_to_all) {
        // If there are some free capabilities that we didn't push any
        // threads to, then try to push a spark to each one.
        if (!pushed_to_all) {
@@ -843,24 +811,30 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            // i is the next free capability to push to
            for (; i < n_free_caps; i++) {
                if (emptySparkPoolCap(free_caps[i])) {
            // i is the next free capability to push to
            for (; i < n_free_caps; i++) {
                if (emptySparkPoolCap(free_caps[i])) {
-                   spark = findSpark(cap);
+                   spark = tryStealSpark(cap->sparks);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
+
+      postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
+
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
            }
        }
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
            }
        }
+#endif /* SPARK_PUSHING */
 
        // release the capabilities
        for (i = 0; i < n_free_caps; i++) {
            task->cap = free_caps[i];
 
        // release the capabilities
        for (i = 0; i < n_free_caps; i++) {
            task->cap = free_caps[i];
-           releaseCapability(free_caps[i]);
+           releaseAndWakeupCapability(free_caps[i]);
        }
     }
     task->cap = cap; // reset to point to our Capability.
        }
     }
     task->cap = cap; // reset to point to our Capability.
+
+#endif /* THREADED_RTS */
+
 }
 }
-#endif
 
 /* ----------------------------------------------------------------------------
  * Start any pending signal handlers
 
 /* ----------------------------------------------------------------------------
  * Start any pending signal handlers
@@ -919,7 +893,7 @@ scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
            cap->run_queue_hd = cap->wakeup_queue_hd;
            cap->run_queue_tl = cap->wakeup_queue_tl;
        } else {
            cap->run_queue_hd = cap->wakeup_queue_hd;
            cap->run_queue_tl = cap->wakeup_queue_tl;
        } else {
-           cap->run_queue_tl->link = cap->wakeup_queue_hd;
+           setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
            cap->run_queue_tl = cap->wakeup_queue_tl;
        }
        cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
            cap->run_queue_tl = cap->wakeup_queue_tl;
        }
        cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
@@ -938,8 +912,13 @@ scheduleCheckBlackHoles (Capability *cap)
     {
        ACQUIRE_LOCK(&sched_mutex);
        if ( blackholes_need_checking ) {
     {
        ACQUIRE_LOCK(&sched_mutex);
        if ( blackholes_need_checking ) {
-           checkBlackHoles(cap);
            blackholes_need_checking = rtsFalse;
            blackholes_need_checking = rtsFalse;
+            // important that we reset the flag *before* checking the
+            // blackhole queue, otherwise we could get deadlock.  This
+            // happens as follows: we wake up a thread that
+            // immediately runs on another Capability, blocks on a
+            // blackhole, and then we reset the blackholes_need_checking flag.
+           checkBlackHoles(cap);
        }
        RELEASE_LOCK(&sched_mutex);
     }
        }
        RELEASE_LOCK(&sched_mutex);
     }
@@ -952,12 +931,6 @@ scheduleCheckBlackHoles (Capability *cap)
 static void
 scheduleDetectDeadlock (Capability *cap, Task *task)
 {
 static void
 scheduleDetectDeadlock (Capability *cap, Task *task)
 {
-
-#if defined(PARALLEL_HASKELL)
-    // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
-    return;
-#endif
-
     /* 
      * Detect deadlock: when we have no threads to run, there are no
      * threads blocked, waiting for I/O, or sleeping, and all the
     /* 
      * Detect deadlock: when we have no threads to run, there are no
      * threads blocked, waiting for I/O, or sleeping, and all the
@@ -983,12 +956,11 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
        // they are unreachable and will therefore be sent an
        // exception.  Any threads thus released will be immediately
        // runnable.
        // they are unreachable and will therefore be sent an
        // exception.  Any threads thus released will be immediately
        // runnable.
-       cap = scheduleDoGC (cap, task, rtsTrue/*force  major GC*/);
+       cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
+        // when force_major == rtsTrue. scheduleDoGC sets
+        // recent_activity to ACTIVITY_DONE_GC and turns off the timer
+        // signal.
 
 
-       recent_activity = ACTIVITY_DONE_GC;
-        // disable timer signals (see #1623)
-        stopTimer();
-       
        if ( !emptyRunQueue(cap) ) return;
 
 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
        if ( !emptyRunQueue(cap) ) return;
 
 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
@@ -1008,6 +980,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
 
            // either we have threads to run, or we were interrupted:
            ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
 
            // either we have threads to run, or we were interrupted:
            ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
+
+            return;
        }
 #endif
 
        }
 #endif
 
@@ -1022,7 +996,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
            case BlockedOnException:
            case BlockedOnMVar:
                throwToSingleThreaded(cap, task->tso, 
            case BlockedOnException:
            case BlockedOnMVar:
                throwToSingleThreaded(cap, task->tso, 
-                                     (StgClosure *)NonTermination_closure);
+                                     (StgClosure *)nonTermination_closure);
                return;
            default:
                barf("deadlock: main thread blocked in a strange way");
                return;
            default:
                barf("deadlock: main thread blocked in a strange way");
@@ -1033,164 +1007,15 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
     }
 }
 
     }
 }
 
-/* ----------------------------------------------------------------------------
- * Process an event (GRAN only)
- * ------------------------------------------------------------------------- */
-
-#if defined(GRAN)
-static StgTSO *
-scheduleProcessEvent(rtsEvent *event)
-{
-    StgTSO *t;
-
-    if (RtsFlags.GranFlags.Light)
-      GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
-
-    /* adjust time based on time-stamp */
-    if (event->time > CurrentTime[CurrentProc] &&
-        event->evttype != ContinueThread)
-      CurrentTime[CurrentProc] = event->time;
-    
-    /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
-    if (!RtsFlags.GranFlags.Light)
-      handleIdlePEs();
-
-    IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
-
-    /* main event dispatcher in GranSim */
-    switch (event->evttype) {
-      /* Should just be continuing execution */
-    case ContinueThread:
-      IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
-      /* ToDo: check assertion
-      ASSERT(run_queue_hd != (StgTSO*)NULL &&
-            run_queue_hd != END_TSO_QUEUE);
-      */
-      /* Ignore ContinueThreads for fetching threads (if synchr comm) */
-      if (!RtsFlags.GranFlags.DoAsyncFetch &&
-         procStatus[CurrentProc]==Fetching) {
-       debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }        
-      /* Ignore ContinueThreads for completed threads */
-      if (CurrentTSO->what_next == ThreadComplete) {
-       debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n", 
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }        
-      /* Ignore ContinueThreads for threads that are being migrated */
-      if (PROCS(CurrentTSO)==Nowhere) { 
-       debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }
-      /* The thread should be at the beginning of the run queue */
-      if (CurrentTSO!=run_queue_hds[CurrentProc]) { 
-       debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       break; // run the thread anyway
-      }
-      /*
-      new_event(proc, proc, CurrentTime[proc],
-               FindWork,
-               (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
-      goto next_thread; 
-      */ /* Catches superfluous CONTINUEs -- should be unnecessary */
-      break; // now actually run the thread; DaH Qu'vam yImuHbej 
-
-    case FetchNode:
-      do_the_fetchnode(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case GlobalBlock:
-      do_the_globalblock(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case FetchReply:
-      do_the_fetchreply(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case UnblockThread:   /* Move from the blocked queue to the tail of */
-      do_the_unblock(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case ResumeThread:  /* Move from the blocked queue to the tail of */
-      /* the runnable queue ( i.e. Qu' SImqa'lu') */ 
-      event->tso->gran.blocktime += 
-       CurrentTime[CurrentProc] - event->tso->gran.blockedat;
-      do_the_startthread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case StartThread:
-      do_the_startthread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case MoveThread:
-      do_the_movethread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case MoveSpark:
-      do_the_movespark(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case FindWork:
-      do_the_findwork(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    default:
-      barf("Illegal event type %u\n", event->evttype);
-    }  /* switch */
-    
-    /* This point was scheduler_loop in the old RTS */
-
-    IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
-
-    TimeOfLastEvent = CurrentTime[CurrentProc];
-    TimeOfNextEvent = get_time_of_next_event();
-    IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
-    // CurrentTSO = ThreadQueueHd;
-
-    IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n", 
-                        TimeOfNextEvent));
-
-    if (RtsFlags.GranFlags.Light) 
-      GranSimLight_leave_system(event, &ActiveTSO); 
-
-    EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
-
-    IF_DEBUG(gran, 
-            debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
-
-    /* in a GranSim setup the TSO stays on the run queue */
-    t = CurrentTSO;
-    /* Take a thread from the run queue. */
-    POP_RUN_QUEUE(t); // take_off_run_queue(t);
-
-    IF_DEBUG(gran, 
-            debugBelch("GRAN: About to run current thread, which is\n");
-            G_TSO(t,5));
-
-    context_switch = 0; // turned on via GranYield, checking events and time slice
-
-    IF_DEBUG(gran, 
-            DumpGranEvent(GR_SCHEDULE, t));
-
-    procStatus[CurrentProc] = Busy;
-}
-#endif // GRAN
 
 /* ----------------------------------------------------------------------------
  * Send pending messages (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
 
 #if defined(PARALLEL_HASKELL)
 
 /* ----------------------------------------------------------------------------
  * Send pending messages (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
 
 #if defined(PARALLEL_HASKELL)
-static StgTSO *
+static void
 scheduleSendPendingMessages(void)
 {
 scheduleSendPendingMessages(void)
 {
-    StgSparkPool *pool;
-    rtsSpark spark;
-    StgTSO *t;
 
 # if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
 
 # if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
@@ -1207,339 +1032,54 @@ scheduleSendPendingMessages(void)
 #endif
 
 /* ----------------------------------------------------------------------------
 #endif
 
 /* ----------------------------------------------------------------------------
- * Activate spark threads (PARALLEL_HASKELL only)
- * ------------------------------------------------------------------------- */
-
-#if defined(PARALLEL_HASKELL)
-static void
-scheduleActivateSpark(void)
-{
-#if defined(SPARKS)
-  ASSERT(emptyRunQueue());
-/* We get here if the run queue is empty and want some work.
-   We try to turn a spark into a thread, and add it to the run queue,
-   from where it will be picked up in the next iteration of the scheduler
-   loop.
-*/
-
-      /* :-[  no local threads => look out for local sparks */
-      /* the spark pool for the current PE */
-      pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
-      if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
-         pool->hd < pool->tl) {
-       /* 
-        * ToDo: add GC code check that we really have enough heap afterwards!!
-        * Old comment:
-        * If we're here (no runnable threads) and we have pending
-        * sparks, we must have a space problem.  Get enough space
-        * to turn one of those pending sparks into a
-        * thread... 
-        */
-
-       spark = findSpark(rtsFalse);            /* get a spark */
-       if (spark != (rtsSpark) NULL) {
-         tso = createThreadFromSpark(spark);       /* turn the spark into a thread */
-         IF_PAR_DEBUG(fish, // schedule,
-                      debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
-                            tso->id, tso, advisory_thread_count));
-
-         if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
-           IF_PAR_DEBUG(fish, // schedule,
-                        debugBelch("==^^ failed to create thread from spark @ %lx\n",
-                            spark));
-           return rtsFalse; /* failed to generate a thread */
-         }                  /* otherwise fall through & pick-up new tso */
-       } else {
-         IF_PAR_DEBUG(fish, // schedule,
-                      debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n", 
-                            spark_queue_len(pool)));
-         return rtsFalse;  /* failed to generate a thread */
-       }
-       return rtsTrue;  /* success in generating a thread */
-  } else { /* no more threads permitted or pool empty */
-    return rtsFalse;  /* failed to generateThread */
-  }
-#else
-  tso = NULL; // avoid compiler warning only
-  return rtsFalse;  /* dummy in non-PAR setup */
-#endif // SPARKS
-}
-#endif // PARALLEL_HASKELL
-
-/* ----------------------------------------------------------------------------
- * Get work from a remote node (PARALLEL_HASKELL only)
- * ------------------------------------------------------------------------- */
-    
-#if defined(PARALLEL_HASKELL)
-static rtsBool
-scheduleGetRemoteWork(rtsBool *receivedFinish)
-{
-  ASSERT(emptyRunQueue());
-
-  if (RtsFlags.ParFlags.BufferTime) {
-       IF_PAR_DEBUG(verbose, 
-               debugBelch("...send all pending data,"));
-        {
-         nat i;
-         for (i=1; i<=nPEs; i++)
-           sendImmediately(i); // send all messages away immediately
-       }
-  }
-# ifndef SPARKS
-       //++EDEN++ idle() , i.e. send all buffers, wait for work
-       // suppress fishing in EDEN... just look for incoming messages
-       // (blocking receive)
-  IF_PAR_DEBUG(verbose, 
-              debugBelch("...wait for incoming messages...\n"));
-  *receivedFinish = processMessages(); // blocking receive...
-
-       // and reenter scheduling loop after having received something
-       // (return rtsFalse below)
-
-# else /* activate SPARKS machinery */
-/* We get here, if we have no work, tried to activate a local spark, but still
-   have no work. We try to get a remote spark, by sending a FISH message.
-   Thread migration should be added here, and triggered when a sequence of 
-   fishes returns without work. */
-       delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
-
-      /* =8-[  no local sparks => look for work on other PEs */
-       /*
-        * We really have absolutely no work.  Send out a fish
-        * (there may be some out there already), and wait for
-        * something to arrive.  We clearly can't run any threads
-        * until a SCHEDULE or RESUME arrives, and so that's what
-        * we're hoping to see.  (Of course, we still have to
-        * respond to other types of messages.)
-        */
-       rtsTime now = msTime() /*CURRENT_TIME*/;
-       IF_PAR_DEBUG(verbose, 
-                    debugBelch("--  now=%ld\n", now));
-       IF_PAR_DEBUG(fish, // verbose,
-            if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-                (last_fish_arrived_at!=0 &&
-                 last_fish_arrived_at+delay > now)) {
-              debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
-                    now, last_fish_arrived_at+delay, 
-                    last_fish_arrived_at,
-                    delay);
-            });
-  
-       if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-           advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
-         if (last_fish_arrived_at==0 ||
-             (last_fish_arrived_at+delay <= now)) {           // send FISH now!
-           /* outstandingFishes is set in sendFish, processFish;
-              avoid flooding system with fishes via delay */
-    next_fish_to_send_at = 0;  
-  } else {
-    /* ToDo: this should be done in the main scheduling loop to avoid the
-             busy wait here; not so bad if fish delay is very small  */
-    int iq = 0; // DEBUGGING -- HWL
-    next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send  
-    /* send a fish when ready, but process messages that arrive in the meantime */
-    do {
-      if (PacketsWaiting()) {
-        iq++; // DEBUGGING
-        *receivedFinish = processMessages();
-      }
-      now = msTime();
-    } while (!*receivedFinish || now<next_fish_to_send_at);
-    // JB: This means the fish could become obsolete, if we receive
-    // work. Better check for work again? 
-    // last line: while (!receivedFinish || !haveWork || now<...)
-    // next line: if (receivedFinish || haveWork )
-
-    if (*receivedFinish) // no need to send a FISH if we are finishing anyway
-      return rtsFalse;  // NB: this will leave scheduler loop
-                       // immediately after return!
-                         
-    IF_PAR_DEBUG(fish, // verbose,
-              debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
-
-  }
-
-    // JB: IMHO, this should all be hidden inside sendFish(...)
-    /* pe = choosePE(); 
-       sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY, 
-                NEW_FISH_HUNGER);
-
-    // Global statistics: count no. of fishes
-    if (RtsFlags.ParFlags.ParStats.Global &&
-         RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
-          globalParStats.tot_fish_mess++;
-          }
-    */ 
-
-  /* delayed fishes must have been sent by now! */
-  next_fish_to_send_at = 0;  
-  }
-      
-  *receivedFinish = processMessages();
-# endif /* SPARKS */
-
- return rtsFalse;
- /* NB: this function always returns rtsFalse, meaning the scheduler
-    loop continues with the next iteration; 
-    rationale: 
-      return code means success in finding work; we enter this function
-      if there is no local work, thus have to send a fish which takes
-      time until it arrives with work; in the meantime we should process
-      messages in the main loop;
- */
-}
-#endif // PARALLEL_HASKELL
-
-/* ----------------------------------------------------------------------------
- * PAR/GRAN: Report stats & debugging info(?)
+ * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
  * ------------------------------------------------------------------------- */
 
  * ------------------------------------------------------------------------- */
 
-#if defined(PAR) || defined(GRAN)
+#if defined(THREADED_RTS)
 static void
 static void
-scheduleGranParReport(void)
+scheduleActivateSpark(Capability *cap)
 {
 {
-  ASSERT(run_queue_hd != END_TSO_QUEUE);
-
-  /* Take a thread from the run queue, if we have work */
-  POP_RUN_QUEUE(t);  // take_off_run_queue(END_TSO_QUEUE);
-
-    /* If this TSO has got its outport closed in the meantime, 
-     *   it mustn't be run. Instead, we have to clean it up as if it was finished.
-     * It has to be marked as TH_DEAD for this purpose.
-     * If it is TH_TERM instead, it is supposed to have finished in the normal way.
-
-JB: TODO: investigate wether state change field could be nuked
-     entirely and replaced by the normal tso state (whatnext
-     field). All we want to do is to kill tsos from outside.
-     */
-
-    /* ToDo: write something to the log-file
-    if (RTSflags.ParFlags.granSimStats && !sameThread)
-        DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
-
-    CurrentTSO = t;
-    */
-    /* the spark pool for the current PE */
-    pool = &(cap.r.rSparks); //  cap = (old) MainCap
-
-    IF_DEBUG(scheduler, 
-            debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
-                  run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
-
-    IF_PAR_DEBUG(fish,
-            debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
-                  run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
-
-    if (RtsFlags.ParFlags.ParStats.Full && 
-       (t->par.sparkname != (StgInt)0) && // only log spark generated threads
-       (emitSchedule || // forced emit
-         (t && LastTSO && t->id != LastTSO->id))) {
-      /* 
-        we are running a different TSO, so write a schedule event to log file
-        NB: If we use fair scheduling we also have to write  a deschedule 
-            event for LastTSO; with unfair scheduling we know that the
-            previous tso has blocked whenever we switch to another tso, so
-            we don't need it in GUM for now
-      */
-      IF_PAR_DEBUG(fish, // schedule,
-                  debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
-
-      DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
-                      GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
-      emitSchedule = rtsFalse;
+    if (anySparks())
+    {
+        createSparkThread(cap);
+        debugTrace(DEBUG_sched, "creating a spark thread");
     }
     }
-}     
-#endif
+}
+#endif // PARALLEL_HASKELL || THREADED_RTS
 
 /* ----------------------------------------------------------------------------
  * After running a thread...
  * ------------------------------------------------------------------------- */
 
 static void
 
 /* ----------------------------------------------------------------------------
  * After running a thread...
  * ------------------------------------------------------------------------- */
 
 static void
-schedulePostRunThread(void)
+schedulePostRunThread (Capability *cap, StgTSO *t)
 {
 {
-#if defined(PAR)
-    /* HACK 675: if the last thread didn't yield, make sure to print a 
-       SCHEDULE event to the log file when StgRunning the next thread, even
-       if it is the same one as before */
-    LastTSO = t; 
-    TimeOfLastYield = CURRENT_TIME;
-#endif
+    // We have to be able to catch transactions that are in an
+    // infinite loop as a result of seeing an inconsistent view of
+    // memory, e.g. 
+    //
+    //   atomically $ do
+    //       [a,b] <- mapM readTVar [ta,tb]
+    //       when (a == b) loop
+    //
+    // and a is never equal to b given a consistent view of memory.
+    //
+    if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
+        if (!stmValidateNestOfTransactions (t -> trec)) {
+            debugTrace(DEBUG_sched | DEBUG_stm,
+                       "trec %p found wasting its time", t);
+            
+            // strip the stack back to the
+            // ATOMICALLY_FRAME, aborting the (nested)
+            // transaction, and saving the stack of any
+            // partially-evaluated thunks on the heap.
+            throwToSingleThreaded_(cap, t, NULL, rtsTrue);
+            
+            ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
+        }
+    }
 
   /* some statistics gathering in the parallel case */
 
   /* some statistics gathering in the parallel case */
-
-#if defined(GRAN) || defined(PAR) || defined(EDEN)
-  switch (ret) {
-    case HeapOverflow:
-# if defined(GRAN)
-      IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_heapover++;
-# elif defined(PAR)
-      globalParStats.tot_heapover++;
-# endif
-      break;
-
-     case StackOverflow:
-# if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_stackover++;
-# elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_stackover++;
-# endif
-      break;
-
-    case ThreadYielding:
-# if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_yields++;
-# elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_yields++;
-# endif
-      break; 
-
-    case ThreadBlocked:
-# if defined(GRAN)
-       debugTrace(DEBUG_sched, 
-                  "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", 
-                  t->id, t, whatNext_strs[t->what_next], t->block_info.closure, 
-                  (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
-              if (t->block_info.closure!=(StgClosure*)NULL)
-                print_bq(t->block_info.closure);
-              debugBelch("\n"));
-
-      // ??? needed; should emit block before
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t)); 
-      prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
-      /*
-       ngoq Dogh!
-      ASSERT(procStatus[CurrentProc]==Busy || 
-             ((procStatus[CurrentProc]==Fetching) && 
-             (t->block_info.closure!=(StgClosure*)NULL)));
-      if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
-         !(!RtsFlags.GranFlags.DoAsyncFetch &&
-           procStatus[CurrentProc]==Fetching)) 
-       procStatus[CurrentProc] = Idle;
-      */
-# elif defined(PAR)
-//++PAR++  blockThread() writes the event (change?)
-# endif
-    break;
-
-  case ThreadFinished:
-    break;
-
-  default:
-    barf("parGlobalStats: unknown return code");
-    break;
-    }
-#endif
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -1618,28 +1158,15 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
     }
     
     debugTrace(DEBUG_sched,
     }
     
     debugTrace(DEBUG_sched,
-              "--<< thread %ld (%s) stopped: HeapOverflow\n", 
+              "--<< thread %ld (%s) stopped: HeapOverflow",
               (long)t->id, whatNext_strs[t->what_next]);
 
               (long)t->id, whatNext_strs[t->what_next]);
 
-#if defined(GRAN)
-    ASSERT(!is_on_queue(t,CurrentProc));
-#elif defined(PARALLEL_HASKELL)
-    /* Currently we emit a DESCHEDULE event before GC in GUM.
-       ToDo: either add separate event to distinguish SYSTEM time from rest
-       or just nuke this DESCHEDULE (and the following SCHEDULE) */
-    if (0 && RtsFlags.ParFlags.ParStats.Full) {
-       DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
-                        GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
-       emitSchedule = rtsTrue;
-    }
-#endif
-      
-    if (context_switch) {
+    if (cap->r.rHpLim == NULL || cap->context_switch) {
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
-        context_switch = 0;
+        cap->context_switch = 0;
         addToRunQueue(cap,t);
     } else {
         pushOnRunQueue(cap,t);
         addToRunQueue(cap,t);
     } else {
         pushOnRunQueue(cap,t);
@@ -1689,7 +1216,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
     // the CPU because the tick always arrives during GC).  This way
     // penalises threads that do a lot of allocation, but that seems
     // better than the alternative.
     // the CPU because the tick always arrives during GC).  This way
     // penalises threads that do a lot of allocation, but that seems
     // better than the alternative.
-    context_switch = 0;
+    cap->context_switch = 0;
     
     /* put the thread back on the run queue.  Then, if we're ready to
      * GC, check whether this is the last task to stop.  If so, wake
     
     /* put the thread back on the run queue.  Then, if we're ready to
      * GC, check whether this is the last task to stop.  If so, wake
@@ -1711,7 +1238,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
     IF_DEBUG(sanity,
             //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
             checkTSO(t));
     IF_DEBUG(sanity,
             //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
             checkTSO(t));
-    ASSERT(t->link == END_TSO_QUEUE);
+    ASSERT(t->_link == END_TSO_QUEUE);
     
     // Shortcut if we're just switching evaluators: don't bother
     // doing stack squeezing (which can be expensive), just run the
     
     // Shortcut if we're just switching evaluators: don't bother
     // doing stack squeezing (which can be expensive), just run the
@@ -1719,28 +1246,9 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
     if (t->what_next != prev_what_next) {
        return rtsTrue;
     }
     if (t->what_next != prev_what_next) {
        return rtsTrue;
     }
-    
-#if defined(GRAN)
-    ASSERT(!is_on_queue(t,CurrentProc));
-      
-    IF_DEBUG(sanity,
-            //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
-            checkThreadQsSanity(rtsTrue));
-
-#endif
 
     addToRunQueue(cap,t);
 
 
     addToRunQueue(cap,t);
 
-#if defined(GRAN)
-    /* add a ContinueThread event to actually process the thread */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread,
-             t, (StgClosure*)NULL, (rtsSpark*)NULL);
-    IF_GRAN_DEBUG(bq, 
-                 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
-                 G_EVENTQ(0);
-                 G_CURR_THREADQ(0));
-#endif
     return rtsFalse;
 }
 
     return rtsFalse;
 }
 
@@ -1750,47 +1258,11 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
 
 static void
 scheduleHandleThreadBlocked( StgTSO *t
 
 static void
 scheduleHandleThreadBlocked( StgTSO *t
-#if !defined(GRAN) && !defined(DEBUG)
+#if !defined(DEBUG)
     STG_UNUSED
 #endif
     )
 {
     STG_UNUSED
 #endif
     )
 {
-#if defined(GRAN)
-    IF_DEBUG(scheduler,
-            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n", 
-                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
-            if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
-    
-    // ??? needed; should emit block before
-    IF_DEBUG(gran, 
-            DumpGranEvent(GR_DESCHEDULE, t)); 
-    prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
-    /*
-      ngoq Dogh!
-      ASSERT(procStatus[CurrentProc]==Busy || 
-      ((procStatus[CurrentProc]==Fetching) && 
-      (t->block_info.closure!=(StgClosure*)NULL)));
-      if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
-      !(!RtsFlags.GranFlags.DoAsyncFetch &&
-      procStatus[CurrentProc]==Fetching)) 
-      procStatus[CurrentProc] = Idle;
-    */
-#elif defined(PAR)
-    IF_DEBUG(scheduler,
-            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n", 
-                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
-    IF_PAR_DEBUG(bq,
-                
-                if (t->block_info.closure!=(StgClosure*)NULL) 
-                print_bq(t->block_info.closure));
-    
-    /* Send a fetch (if BlockedOnGA) and dump event to log file */
-    blockThread(t);
-    
-    /* whatever we schedule next, we must log that schedule */
-    emitSchedule = rtsTrue;
-    
-#else /* !GRAN */
 
       // We don't need to do anything.  The thread is blocked, and it
       // has tidied up its stack and placed itself on whatever queue
 
       // We don't need to do anything.  The thread is blocked, and it
       // has tidied up its stack and placed itself on whatever queue
@@ -1813,12 +1285,6 @@ scheduleHandleThreadBlocked( StgTSO *t
        debugTraceEnd();
     }
 #endif
        debugTraceEnd();
     }
 #endif
-    
-    /* Only for dumping event to log file 
-       ToDo: do I need this in GranSim, too?
-       blockThread(t);
-    */
-#endif
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -1837,47 +1303,11 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
     debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", 
               (unsigned long)t->id, whatNext_strs[t->what_next]);
 
     debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", 
               (unsigned long)t->id, whatNext_strs[t->what_next]);
 
-#if defined(GRAN)
-      endThread(t, CurrentProc); // clean-up the thread
-#elif defined(PARALLEL_HASKELL)
-      /* For now all are advisory -- HWL */
-      //if(t->priority==AdvisoryPriority) ??
-      advisory_thread_count--; // JB: Caution with this counter, buggy!
-      
-# if defined(DIST)
-      if(t->dist.priority==RevalPriority)
-       FinishReval(t);
-# endif
-    
-# if defined(EDENOLD)
-      // the thread could still have an outport... (BUG)
-      if (t->eden.outport != -1) {
-      // delete the outport for the tso which has finished...
-       IF_PAR_DEBUG(eden_ports,
-                  debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
-                             t->eden.outport, t->id));
-       deleteOPT(t);
-      }
-      // thread still in the process (HEAVY BUG! since outport has just been closed...)
-      if (t->eden.epid != -1) {
-       IF_PAR_DEBUG(eden_ports,
-                  debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
-                          t->id, t->eden.epid));
-       removeTSOfromProcess(t);
-      }
-# endif 
-
-# if defined(PAR)
-      if (RtsFlags.ParFlags.ParStats.Full &&
-         !RtsFlags.ParFlags.ParStats.Suppressed) 
-       DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
-
-      //  t->par only contains statistics: left out for now...
-      IF_PAR_DEBUG(fish,
-                  debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
-                             t->id,t,t->par.sparkname));
-# endif
-#endif // PARALLEL_HASKELL
+    // blocked exceptions can now complete, even if the thread was in
+    // blocked mode (see #2910).  This unconditionally calls
+    // lockTSO(), which ensures that we don't miss any threads that
+    // are engaged in throwTo() with this thread as a target.
+    awakenBlockedExceptionQueue (cap, t);
 
       //
       // Check whether the thread that just completed was a bound
 
       //
       // Check whether the thread that just completed was a bound
@@ -1898,7 +1328,7 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
              // point where we can deal with this.  Leaving it on the run
              // queue also ensures that the garbage collector knows about
              // this thread and its return value (it gets dropped from the
              // point where we can deal with this.  Leaving it on the run
              // queue also ensures that the garbage collector knows about
              // this thread and its return value (it gets dropped from the
-             // all_threads list so there's no other way to find it).
+             // step->threads list so there's no other way to find it).
              appendToRunQueue(cap,t);
              return rtsFalse;
 #else
              appendToRunQueue(cap,t);
              return rtsFalse;
 #else
@@ -1921,7 +1351,11 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
                  *(task->ret) = NULL;
              }
              if (sched_state >= SCHED_INTERRUPTING) {
                  *(task->ret) = NULL;
              }
              if (sched_state >= SCHED_INTERRUPTING) {
-                 task->stat = Interrupted;
+                  if (heap_overflow) {
+                      task->stat = HeapExhausted;
+                  } else {
+                      task->stat = Interrupted;
+                  }
              } else {
                  task->stat = Killed;
              }
              } else {
                  task->stat = Killed;
              }
@@ -1960,15 +1394,32 @@ scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
 static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
 static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
-    StgTSO *t;
     rtsBool heap_census;
 #ifdef THREADED_RTS
     rtsBool heap_census;
 #ifdef THREADED_RTS
-    static volatile StgWord waiting_for_gc;
-    rtsBool was_waiting;
+    /* extern static volatile StgWord waiting_for_gc; 
+       lives inside capability.c */
+    rtsBool gc_type, prev_pending_gc;
     nat i;
 #endif
 
     nat i;
 #endif
 
+    if (sched_state == SCHED_SHUTTING_DOWN) {
+        // The final GC has already been done, and the system is
+        // shutting down.  We'll probably deadlock if we try to GC
+        // now.
+        return cap;
+    }
+
 #ifdef THREADED_RTS
 #ifdef THREADED_RTS
+    if (sched_state < SCHED_INTERRUPTING
+        && RtsFlags.ParFlags.parGcEnabled
+        && N >= RtsFlags.ParFlags.parGcGen
+        && ! oldest_gen->steps[0].mark)
+    {
+        gc_type = PENDING_GC_PAR;
+    } else {
+        gc_type = PENDING_GC_SEQ;
+    }
+
     // In order to GC, there must be no threads running Haskell code.
     // Therefore, the GC thread needs to hold *all* the capabilities,
     // and release them after the GC has completed.  
     // In order to GC, there must be no threads running Haskell code.
     // Therefore, the GC thread needs to hold *all* the capabilities,
     // and release them after the GC has completed.  
@@ -1979,122 +1430,153 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     // actually did the GC.  But it's quite hard to arrange for all
     // the other tasks to sleep and stay asleep.
     //
     // actually did the GC.  But it's quite hard to arrange for all
     // the other tasks to sleep and stay asleep.
     //
-       
-    was_waiting = cas(&waiting_for_gc, 0, 1);
-    if (was_waiting) {
+
+    /*  Other capabilities are prevented from running yet more Haskell
+       threads if waiting_for_gc is set. Tested inside
+       yieldCapability() and releaseCapability() in Capability.c */
+
+    prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
+    if (prev_pending_gc) {
        do {
        do {
-           debugTrace(DEBUG_sched, "someone else is trying to GC...");
-           if (cap) yieldCapability(&cap,task);
+           debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...", 
+                       prev_pending_gc);
+            ASSERT(cap);
+            yieldCapability(&cap,task);
        } while (waiting_for_gc);
        return cap;  // NOTE: task->cap might have changed here
     }
 
        } while (waiting_for_gc);
        return cap;  // NOTE: task->cap might have changed here
     }
 
-    for (i=0; i < n_capabilities; i++) {
-       debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
-       if (cap != &capabilities[i]) {
-           Capability *pcap = &capabilities[i];
-           // we better hope this task doesn't get migrated to
-           // another Capability while we're waiting for this one.
-           // It won't, because load balancing happens while we have
-           // all the Capabilities, but even so it's a slightly
-           // unsavoury invariant.
-           task->cap = pcap;
-           context_switch = 1;
-           waitForReturnCapability(&pcap, task);
-           if (pcap != &capabilities[i]) {
-               barf("scheduleDoGC: got the wrong capability");
-           }
-       }
+    setContextSwitches();
+
+    // The final shutdown GC is always single-threaded, because it's
+    // possible that some of the Capabilities have no worker threads.
+    
+    if (gc_type == PENDING_GC_SEQ)
+    {
+        postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+        // single-threaded GC: grab all the capabilities
+        for (i=0; i < n_capabilities; i++) {
+            debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
+            if (cap != &capabilities[i]) {
+                Capability *pcap = &capabilities[i];
+                // we better hope this task doesn't get migrated to
+                // another Capability while we're waiting for this one.
+                // It won't, because load balancing happens while we have
+                // all the Capabilities, but even so it's a slightly
+                // unsavoury invariant.
+                task->cap = pcap;
+                waitForReturnCapability(&pcap, task);
+                if (pcap != &capabilities[i]) {
+                    barf("scheduleDoGC: got the wrong capability");
+                }
+            }
+        }
     }
     }
+    else
+    {
+        // multi-threaded GC: make sure all the Capabilities donate one
+        // GC thread each.
+        postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+        debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
 
 
-    waiting_for_gc = rtsFalse;
+        waitForGcThreads(cap);
+    }
 #endif
 
 #endif
 
-    /* Kick any transactions which are invalid back to their
-     * atomically frames.  When next scheduled they will try to
-     * commit, this commit will fail and they will retry.
-     */
-    { 
-       StgTSO *next;
-
-       for (t = all_threads; t != END_TSO_QUEUE; t = next) {
-           if (t->what_next == ThreadRelocated) {
-               next = t->link;
-           } else {
-               next = t->global_link;
-               
-               // This is a good place to check for blocked
-               // exceptions.  It might be the case that a thread is
-               // blocked on delivering an exception to a thread that
-               // is also blocked - we try to ensure that this
-               // doesn't happen in throwTo(), but it's too hard (or
-               // impossible) to close all the race holes, so we
-               // accept that some might get through and deal with
-               // them here.  A GC will always happen at some point,
-               // even if the system is otherwise deadlocked.
-               maybePerformBlockedException (&capabilities[0], t);
-
-               if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
-                   if (!stmValidateNestOfTransactions (t -> trec)) {
-                       debugTrace(DEBUG_sched | DEBUG_stm,
-                                  "trec %p found wasting its time", t);
-                       
-                       // strip the stack back to the
-                       // ATOMICALLY_FRAME, aborting the (nested)
-                       // transaction, and saving the stack of any
-                       // partially-evaluated thunks on the heap.
-                       throwToSingleThreaded_(&capabilities[0], t, 
-                                              NULL, rtsTrue, NULL);
-                       
-#ifdef REG_R1
-                       ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
-#endif
-                   }
-               }
-           }
-       }
-    }
-    
     // so this happens periodically:
     if (cap) scheduleCheckBlackHoles(cap);
     
     IF_DEBUG(scheduler, printAllThreads());
 
     // so this happens periodically:
     if (cap) scheduleCheckBlackHoles(cap);
     
     IF_DEBUG(scheduler, printAllThreads());
 
+delete_threads_and_gc:
     /*
      * We now have all the capabilities; if we're in an interrupting
      * state, then we should take the opportunity to delete all the
      * threads in the system.
      */
     /*
      * We now have all the capabilities; if we're in an interrupting
      * state, then we should take the opportunity to delete all the
      * threads in the system.
      */
-    if (sched_state >= SCHED_INTERRUPTING) {
-       deleteAllThreads(&capabilities[0]);
+    if (sched_state == SCHED_INTERRUPTING) {
+       deleteAllThreads(cap);
        sched_state = SCHED_SHUTTING_DOWN;
     }
     
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
        sched_state = SCHED_SHUTTING_DOWN;
     }
     
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
-    /* everybody back, start the GC.
-     * Could do it in this thread, or signal a condition var
-     * to do it in another thread.  Either way, we need to
-     * broadcast on gc_pending_cond afterward.
-     */
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
+    postEvent(cap, EVENT_GC_START, 0, 0);
     debugTrace(DEBUG_sched, "doing GC");
     debugTrace(DEBUG_sched, "doing GC");
+    // reset waiting_for_gc *before* GC, so that when the GC threads
+    // emerge they don't immediately re-enter the GC.
+    waiting_for_gc = 0;
+    GarbageCollect(force_major || heap_census, gc_type, cap);
+#else
+    GarbageCollect(force_major || heap_census, 0, cap);
 #endif
 #endif
-    GarbageCollect(force_major || heap_census);
-    
+    postEvent(cap, EVENT_GC_END, 0, 0);
+
+    if (recent_activity == ACTIVITY_INACTIVE && force_major)
+    {
+        // We are doing a GC because the system has been idle for a
+        // timeslice and we need to check for deadlock.  Record the
+        // fact that we've done a GC and turn off the timer signal;
+        // it will get re-enabled if we run any threads after the GC.
+        recent_activity = ACTIVITY_DONE_GC;
+        stopTimer();
+    }
+    else
+    {
+        // the GC might have taken long enough for the timer to set
+        // recent_activity = ACTIVITY_INACTIVE, but we aren't
+        // necessarily deadlocked:
+        recent_activity = ACTIVITY_YES;
+    }
+
+#if defined(THREADED_RTS)
+    if (gc_type == PENDING_GC_PAR)
+    {
+        releaseGCThreads(cap);
+    }
+#endif
+
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();
        performHeapProfile = rtsFalse;
     }
 
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();
        performHeapProfile = rtsFalse;
     }
 
+    if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
+        // GC set the heap_overflow flag, so we should proceed with
+        // an orderly shutdown now.  Ultimately we want the main
+        // thread to return to its caller with HeapExhausted, at which
+        // point the caller should call hs_exit().  The first step is
+        // to delete all the threads.
+        //
+        // Another way to do this would be to raise an exception in
+        // the main thread, which we really should do because it gives
+        // the program a chance to clean up.  But how do we find the
+        // main thread?  It should presumably be the same one that
+        // gets ^C exceptions, but that's all done on the Haskell side
+        // (GHC.TopHandler).
+       sched_state = SCHED_INTERRUPTING;
+        goto delete_threads_and_gc;
+    }
+
+#ifdef SPARKBALANCE
+    /* JB 
+       Once we are all together... this would be the place to balance all
+       spark pools. No concurrent stealing or adding of new sparks can
+       occur. Should be defined in Sparks.c. */
+    balanceSparkPoolsCaps(n_capabilities, capabilities);
+#endif
+
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-    // release our stash of capabilities.
-    for (i = 0; i < n_capabilities; i++) {
-       if (cap != &capabilities[i]) {
-           task->cap = &capabilities[i];
-           releaseCapability(&capabilities[i]);
-       }
+    if (gc_type == PENDING_GC_SEQ) {
+        // release our stash of capabilities.
+        for (i = 0; i < n_capabilities; i++) {
+            if (cap != &capabilities[i]) {
+                task->cap = &capabilities[i];
+                releaseCapability(&capabilities[i]);
+            }
+        }
     }
     if (cap) {
        task->cap = cap;
     }
     if (cap) {
        task->cap = cap;
@@ -2103,17 +1585,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     }
 #endif
 
     }
 #endif
 
-#if defined(GRAN)
-    /* add a ContinueThread event to continue execution of current thread */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread,
-             t, (StgClosure*)NULL, (rtsSpark*)NULL);
-    IF_GRAN_DEBUG(bq, 
-                 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
-                 G_EVENTQ(0);
-                 G_CURR_THREADQ(0));
-#endif /* GRAN */
-
     return cap;
 }
 
     return cap;
 }
 
@@ -2133,6 +1604,7 @@ forkProcess(HsStablePtr *entry
     pid_t pid;
     StgTSO* t,*next;
     Capability *cap;
     pid_t pid;
     StgTSO* t,*next;
     Capability *cap;
+    nat s;
     
 #if defined(THREADED_RTS)
     if (RtsFlags.ParFlags.nNodes > 1) {
     
 #if defined(THREADED_RTS)
     if (RtsFlags.ParFlags.nNodes > 1) {
@@ -2180,9 +1652,10 @@ forkProcess(HsStablePtr *entry
        // all Tasks, because they correspond to OS threads that are
        // now gone.
 
        // all Tasks, because they correspond to OS threads that are
        // now gone.
 
-       for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+        for (s = 0; s < total_steps; s++) {
+          for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
            if (t->what_next == ThreadRelocated) {
            if (t->what_next == ThreadRelocated) {
-               next = t->link;
+               next = t->_link;
            } else {
                next = t->global_link;
                // don't allow threads to catch the ThreadKilled
            } else {
                next = t->global_link;
                // don't allow threads to catch the ThreadKilled
@@ -2190,6 +1663,7 @@ forkProcess(HsStablePtr *entry
                // threads may be evaluating thunks that we need later.
                deleteThread_(cap,t);
            }
                // threads may be evaluating thunks that we need later.
                deleteThread_(cap,t);
            }
+          }
        }
        
        // Empty the run queue.  It seems tempting to let all the
        }
        
        // Empty the run queue.  It seems tempting to let all the
@@ -2203,9 +1677,11 @@ forkProcess(HsStablePtr *entry
        // don't exist now:
        cap->suspended_ccalling_tasks = NULL;
 
        // don't exist now:
        cap->suspended_ccalling_tasks = NULL;
 
-       // Empty the all_threads list.  Otherwise, the garbage
+       // Empty the threads lists.  Otherwise, the garbage
        // collector may attempt to resurrect some of these threads.
        // collector may attempt to resurrect some of these threads.
-       all_threads = END_TSO_QUEUE;
+        for (s = 0; s < total_steps; s++) {
+            all_steps[s].threads = END_TSO_QUEUE;
+        }
 
        // Wipe the task list, except the current Task.
        ACQUIRE_LOCK(&sched_mutex);
 
        // Wipe the task list, except the current Task.
        ACQUIRE_LOCK(&sched_mutex);
@@ -2255,14 +1731,18 @@ deleteAllThreads ( Capability *cap )
     // NOTE: only safe to call if we own all capabilities.
 
     StgTSO* t, *next;
     // NOTE: only safe to call if we own all capabilities.
 
     StgTSO* t, *next;
+    nat s;
+
     debugTrace(DEBUG_sched,"deleting all threads");
     debugTrace(DEBUG_sched,"deleting all threads");
-    for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+    for (s = 0; s < total_steps; s++) {
+      for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
        if (t->what_next == ThreadRelocated) {
        if (t->what_next == ThreadRelocated) {
-           next = t->link;
+           next = t->_link;
        } else {
            next = t->global_link;
            deleteThread(cap,t);
        }
        } else {
            next = t->global_link;
            deleteThread(cap,t);
        }
+      }
     }      
 
     // The run queue now contains a bunch of ThreadKilled threads.  We
     }      
 
     // The run queue now contains a bunch of ThreadKilled threads.  We
@@ -2347,6 +1827,7 @@ suspendThread (StgRegTable *reg)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
+  postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
   debugTrace(DEBUG_sched, 
             "thread %lu did a safe foreign call", 
             (unsigned long)cap->r.rCurrentTSO->id);
   debugTrace(DEBUG_sched, 
             "thread %lu did a safe foreign call", 
             (unsigned long)cap->r.rCurrentTSO->id);
@@ -2371,7 +1852,7 @@ suspendThread (StgRegTable *reg)
 
   suspendTask(cap,task);
   cap->in_haskell = rtsFalse;
 
   suspendTask(cap,task);
   cap->in_haskell = rtsFalse;
-  releaseCapability_(cap);
+  releaseCapability_(cap,rtsFalse);
   
   RELEASE_LOCK(&cap->lock);
 
   
   RELEASE_LOCK(&cap->lock);
 
@@ -2417,11 +1898,16 @@ resumeThread (void *task_)
 
     tso = task->suspended_tso;
     task->suspended_tso = NULL;
 
     tso = task->suspended_tso;
     task->suspended_tso = NULL;
-    tso->link = END_TSO_QUEUE;
+    tso->_link = END_TSO_QUEUE; // no write barrier reqd
+
+    postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
     debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
     
     if (tso->why_blocked == BlockedOnCCall) {
     debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
     
     if (tso->why_blocked == BlockedOnCCall) {
-       awakenBlockedExceptionQueue(cap,tso);
+        // avoid locking the TSO if we don't have to
+        if (tso->blocked_exceptions != END_TSO_QUEUE) {
+            awakenBlockedExceptionQueue(cap,tso);
+        }
        tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
     }
     
        tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
     }
     
@@ -2436,7 +1922,7 @@ resumeThread (void *task_)
 #endif
 
     /* We might have GC'd, mark the TSO dirty again */
 #endif
 
     /* We might have GC'd, mark the TSO dirty again */
-    dirtyTSO(tso);
+    dirty_TSO(cap,tso);
 
     IF_DEBUG(sanity, checkTSO(tso));
 
 
     IF_DEBUG(sanity, checkTSO(tso));
 
@@ -2471,7 +1957,8 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-       migrateThreadToCapability_lock(&capabilities[cpu],tso);
+        postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
+       wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
     }
 #else
     appendToRunQueue(cap,tso);
     }
 #else
     appendToRunQueue(cap,tso);
@@ -2499,13 +1986,6 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 
     debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
 
 
     debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
 
-#if defined(GRAN)
-    /* GranSim specific init */
-    CurrentTSO = m->tso;                // the TSO to run
-    procStatus[MainProc] = Busy;        // status of main PE
-    CurrentProc = MainProc;             // PE to run it on
-#endif
-
     cap = schedule(cap,task);
 
     ASSERT(task->stat != NoStatus);
     cap = schedule(cap,task);
 
     ASSERT(task->stat != NoStatus);
@@ -2520,7 +2000,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
  * ------------------------------------------------------------------------- */
 
 #if defined(THREADED_RTS)
  * ------------------------------------------------------------------------- */
 
 #if defined(THREADED_RTS)
-void
+void OSThreadProcAttr
 workerStart(Task *task)
 {
     Capability *cap;
 workerStart(Task *task)
 {
     Capability *cap;
@@ -2530,15 +2010,32 @@ workerStart(Task *task)
     cap = task->cap;
     RELEASE_LOCK(&task->lock);
 
     cap = task->cap;
     RELEASE_LOCK(&task->lock);
 
+    if (RtsFlags.ParFlags.setAffinity) {
+        setThreadAffinity(cap->no, n_capabilities);
+    }
+
     // set the thread-local pointer to the Task:
     taskEnter(task);
 
     // schedule() runs without a lock.
     cap = schedule(cap,task);
 
     // set the thread-local pointer to the Task:
     taskEnter(task);
 
     // schedule() runs without a lock.
     cap = schedule(cap,task);
 
-    // On exit from schedule(), we have a Capability.
-    releaseCapability(cap);
+    // On exit from schedule(), we have a Capability, but possibly not
+    // the same one we started with.
+
+    // During shutdown, the requirement is that after all the
+    // Capabilities are shut down, all workers that are shutting down
+    // have finished workerTaskStop().  This is why we hold on to
+    // cap->lock until we've finished workerTaskStop() below.
+    //
+    // There may be workers still involved in foreign calls; those
+    // will just block in waitForReturnCapability() because the
+    // Capability has been shut down.
+    //
+    ACQUIRE_LOCK(&cap->lock);
+    releaseCapability_(cap,rtsFalse);
     workerTaskStop(task);
     workerTaskStop(task);
+    RELEASE_LOCK(&cap->lock);
 }
 #endif
 
 }
 #endif
 
@@ -2554,27 +2051,14 @@ workerStart(Task *task)
 void 
 initScheduler(void)
 {
 void 
 initScheduler(void)
 {
-#if defined(GRAN)
-  nat i;
-  for (i=0; i<=MAX_PROC; i++) {
-    run_queue_hds[i]      = END_TSO_QUEUE;
-    run_queue_tls[i]      = END_TSO_QUEUE;
-    blocked_queue_hds[i]  = END_TSO_QUEUE;
-    blocked_queue_tls[i]  = END_TSO_QUEUE;
-    ccalling_threadss[i]  = END_TSO_QUEUE;
-    blackhole_queue[i]    = END_TSO_QUEUE;
-    sleeping_queue        = END_TSO_QUEUE;
-  }
-#elif !defined(THREADED_RTS)
+#if !defined(THREADED_RTS)
   blocked_queue_hd  = END_TSO_QUEUE;
   blocked_queue_tl  = END_TSO_QUEUE;
   sleeping_queue    = END_TSO_QUEUE;
 #endif
 
   blackhole_queue   = END_TSO_QUEUE;
   blocked_queue_hd  = END_TSO_QUEUE;
   blocked_queue_tl  = END_TSO_QUEUE;
   sleeping_queue    = END_TSO_QUEUE;
 #endif
 
   blackhole_queue   = END_TSO_QUEUE;
-  all_threads       = END_TSO_QUEUE;
 
 
-  context_switch = 0;
   sched_state    = SCHED_RUNNING;
   recent_activity = ACTIVITY_YES;
 
   sched_state    = SCHED_RUNNING;
   recent_activity = ACTIVITY_YES;
 
@@ -2594,7 +2078,7 @@ initScheduler(void)
 
   initTaskManager();
 
 
   initTaskManager();
 
-#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
+#if defined(THREADED_RTS)
   initSparkPools();
 #endif
 
   initSparkPools();
 #endif
 
@@ -2617,8 +2101,6 @@ initScheduler(void)
   }
 #endif
 
   }
 #endif
 
-  trace(TRACE_sched, "start: %d capabilities", n_capabilities);
-
   RELEASE_LOCK(&sched_mutex);
 }
 
   RELEASE_LOCK(&sched_mutex);
 }
 
@@ -2633,16 +2115,14 @@ exitScheduler(
 {
     Task *task = NULL;
 
 {
     Task *task = NULL;
 
-#if defined(THREADED_RTS)
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
-#endif
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
-       scheduleDoGC(NULL,task,rtsFalse);    
+        waitForReturnCapability(&task->cap,task);
+       scheduleDoGC(task->cap,task,rtsFalse);    
+        releaseCapability(task->cap);
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
@@ -2654,103 +2134,32 @@ exitScheduler(
            shutdownCapability(&capabilities[i], task, wait_foreign);
        }
        boundTaskExiting(task);
            shutdownCapability(&capabilities[i], task, wait_foreign);
        }
        boundTaskExiting(task);
-       stopTaskManager();
     }
     }
-#else
-    freeCapability(&MainCapability);
 #endif
 }
 
 void
 freeScheduler( void )
 {
 #endif
 }
 
 void
 freeScheduler( void )
 {
-    freeTaskManager();
-    if (n_capabilities != 1) {
-        stgFree(capabilities);
-    }
-#if defined(THREADED_RTS)
-    closeMutex(&sched_mutex);
-#endif
-}
-
-/* ---------------------------------------------------------------------------
-   Where are the roots that we know about?
-
-        - all the threads on the runnable queue
-        - all the threads on the blocked queue
-        - all the threads on the sleeping queue
-       - all the thread currently executing a _ccall_GC
-        - all the "main threads"
-     
-   ------------------------------------------------------------------------ */
-
-/* This has to be protected either by the scheduler monitor, or by the
-       garbage collection monitor (probably the latter).
-       KH @ 25/10/99
-*/
+    nat still_running;
 
 
-void
-GetRoots( evac_fn evac )
-{
-    nat i;
-    Capability *cap;
-    Task *task;
-
-#if defined(GRAN)
-    for (i=0; i<=RtsFlags.GranFlags.proc; i++) {
-       if ((run_queue_hds[i] != END_TSO_QUEUE) && ((run_queue_hds[i] != NULL)))
-           evac((StgClosure **)&run_queue_hds[i]);
-       if ((run_queue_tls[i] != END_TSO_QUEUE) && ((run_queue_tls[i] != NULL)))
-           evac((StgClosure **)&run_queue_tls[i]);
-       
-       if ((blocked_queue_hds[i] != END_TSO_QUEUE) && ((blocked_queue_hds[i] != NULL)))
-           evac((StgClosure **)&blocked_queue_hds[i]);
-       if ((blocked_queue_tls[i] != END_TSO_QUEUE) && ((blocked_queue_tls[i] != NULL)))
-           evac((StgClosure **)&blocked_queue_tls[i]);
-       if ((ccalling_threadss[i] != END_TSO_QUEUE) && ((ccalling_threadss[i] != NULL)))
-           evac((StgClosure **)&ccalling_threads[i]);
+    ACQUIRE_LOCK(&sched_mutex);
+    still_running = freeTaskManager();
+    // We can only free the Capabilities if there are no Tasks still
+    // running.  We might have a Task about to return from a foreign
+    // call into waitForReturnCapability(), for example (actually,
+    // this should be the *only* thing that a still-running Task can
+    // do at this point, and it will block waiting for the
+    // Capability).
+    if (still_running == 0) {
+        freeCapabilities();
+        if (n_capabilities != 1) {
+            stgFree(capabilities);
+        }
     }
     }
-
-    markEventQueue();
-
-#else /* !GRAN */
-
-    for (i = 0; i < n_capabilities; i++) {
-       cap = &capabilities[i];
-       evac((StgClosure **)(void *)&cap->run_queue_hd);
-       evac((StgClosure **)(void *)&cap->run_queue_tl);
+    RELEASE_LOCK(&sched_mutex);
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-       evac((StgClosure **)(void *)&cap->wakeup_queue_hd);
-       evac((StgClosure **)(void *)&cap->wakeup_queue_tl);
-#endif
-       for (task = cap->suspended_ccalling_tasks; task != NULL; 
-            task=task->next) {
-           debugTrace(DEBUG_sched,
-                      "evac'ing suspended TSO %lu", (unsigned long)task->suspended_tso->id);
-           evac((StgClosure **)(void *)&task->suspended_tso);
-       }
-
-    }
-    
-
-#if !defined(THREADED_RTS)
-    evac((StgClosure **)(void *)&blocked_queue_hd);
-    evac((StgClosure **)(void *)&blocked_queue_tl);
-    evac((StgClosure **)(void *)&sleeping_queue);
-#endif 
-#endif
-
-    // evac((StgClosure **)&blackhole_queue);
-
-#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL) || defined(GRAN)
-    markSparkQueue(evac);
-#endif
-    
-#if defined(RTS_USER_SIGNALS)
-    // mark the signal handlers (signals should be already blocked)
-    if (RtsFlags.MiscFlags.install_signal_handlers) {
-        markSignalHandlers(evac);
-    }
+    closeMutex(&sched_mutex);
 #endif
 }
 
 #endif
 }
 
@@ -2766,13 +2175,15 @@ static void
 performGC_(rtsBool force_major)
 {
     Task *task;
 performGC_(rtsBool force_major)
 {
     Task *task;
+
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
     // suspended_ccalling_tasks queue.
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
     // suspended_ccalling_tasks queue.
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
-    scheduleDoGC(NULL,task,force_major);
+
+    waitForReturnCapability(&task->cap,task);
+    scheduleDoGC(task->cap,task,force_major);
+    releaseCapability(task->cap);
     boundTaskExiting(task);
 }
 
     boundTaskExiting(task);
 }
 
@@ -2833,10 +2244,17 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
   }
 
   /* Try to double the current stack size.  If that takes us over the
   }
 
   /* Try to double the current stack size.  If that takes us over the
-   * maximum stack size for this thread, then use the maximum instead.
-   * Finally round up so the TSO ends up as a whole number of blocks.
+   * maximum stack size for this thread, then use the maximum instead
+   * (that is, unless we're already at or over the max size and we
+   * can't raise the StackOverflow exception (see above), in which
+   * case just double the size). Finally round up so the TSO ends up as
+   * a whole number of blocks.
    */
    */
-  new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  if (tso->stack_size >= tso->max_stack_size) {
+      new_stack_size = tso->stack_size * 2;
+  } else { 
+      new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  }
   new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
   new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
@@ -2867,17 +2285,10 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
    * dead TSO's stack.
    */
   tso->what_next = ThreadRelocated;
    * dead TSO's stack.
    */
   tso->what_next = ThreadRelocated;
-  tso->link = dest;
+  setTSOLink(cap,tso,dest);
   tso->sp = (P_)&(tso->stack[tso->stack_size]);
   tso->why_blocked = NotBlocked;
 
   tso->sp = (P_)&(tso->stack[tso->stack_size]);
   tso->why_blocked = NotBlocked;
 
-  IF_PAR_DEBUG(verbose,
-              debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
-                    tso->id, tso, tso->stack_size);
-              /* If we're debugging, just print out the top of the stack */
-              printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, 
-                                               tso->sp+64)));
-  
   unlockTSO(dest);
   unlockTSO(tso);
 
   unlockTSO(dest);
   unlockTSO(tso);
 
@@ -2889,6 +2300,65 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
   return dest;
 }
 
   return dest;
 }
 
+static StgTSO *
+threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
+{
+    bdescr *bd, *new_bd;
+    lnat free_w, tso_size_w;
+    StgTSO *new_tso;
+
+    tso_size_w = tso_sizeW(tso);
+
+    if (tso_size_w < MBLOCK_SIZE_W ||
+          // TSO is less than 2 mblocks (since the first mblock is
+          // shorter than MBLOCK_SIZE_W)
+        (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
+          // or TSO is not a whole number of megablocks (ensuring
+          // precondition of splitLargeBlock() below)
+        (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
+          // or TSO is smaller than the minimum stack size (rounded up)
+        (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
+          // or stack is using more than 1/4 of the available space
+    {
+        // then do nothing
+        return tso;
+    }
+
+    // don't allow throwTo() to modify the blocked_exceptions queue
+    // while we are moving the TSO:
+    lockClosure((StgClosure *)tso);
+
+    // this is the number of words we'll free
+    free_w = round_to_mblocks(tso_size_w/2);
+
+    bd = Bdescr((StgPtr)tso);
+    new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
+    bd->free = bd->start + TSO_STRUCT_SIZEW;
+
+    new_tso = (StgTSO *)new_bd->start;
+    memcpy(new_tso,tso,TSO_STRUCT_SIZE);
+    new_tso->stack_size = new_bd->free - new_tso->stack;
+
+    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+               (long)tso->id, tso_size_w, tso_sizeW(new_tso));
+
+    tso->what_next = ThreadRelocated;
+    tso->_link = new_tso; // no write barrier reqd: same generation
+
+    // The TSO attached to this Task may have moved, so update the
+    // pointer to it.
+    if (task->tso == tso) {
+        task->tso = new_tso;
+    }
+
+    unlockTSO(new_tso);
+    unlockTSO(tso);
+
+    IF_DEBUG(sanity,checkTSO(new_tso));
+
+    return new_tso;
+}
+
 /* ---------------------------------------------------------------------------
    Interrupt execution
    - usually called inside a signal handler so it mustn't do anything fancy.   
 /* ---------------------------------------------------------------------------
    Interrupt execution
    - usually called inside a signal handler so it mustn't do anything fancy.   
@@ -2898,8 +2368,10 @@ void
 interruptStgRts(void)
 {
     sched_state = SCHED_INTERRUPTING;
 interruptStgRts(void)
 {
     sched_state = SCHED_INTERRUPTING;
-    context_switch = 1;
+    setContextSwitches();
+#if defined(THREADED_RTS)
     wakeUpRts();
     wakeUpRts();
+#endif
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -2915,16 +2387,15 @@ interruptStgRts(void)
    will have interrupted any blocking system call in progress anyway.
    -------------------------------------------------------------------------- */
 
    will have interrupted any blocking system call in progress anyway.
    -------------------------------------------------------------------------- */
 
-void
-wakeUpRts(void)
-{
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
+void wakeUpRts(void)
+{
     // This forces the IO Manager thread to wakeup, which will
     // in turn ensure that some OS thread wakes up and runs the
     // scheduler loop, which will cause a GC and deadlock check.
     ioManagerWakeup();
     // This forces the IO Manager thread to wakeup, which will
     // in turn ensure that some OS thread wakes up and runs the
     // scheduler loop, which will cause a GC and deadlock check.
     ioManagerWakeup();
-#endif
 }
 }
+#endif
 
 /* -----------------------------------------------------------------------------
  * checkBlackHoles()
 
 /* -----------------------------------------------------------------------------
  * checkBlackHoles()
@@ -2957,18 +2428,20 @@ checkBlackHoles (Capability *cap)
     prev = &blackhole_queue;
     t = blackhole_queue;
     while (t != END_TSO_QUEUE) {
     prev = &blackhole_queue;
     t = blackhole_queue;
     while (t != END_TSO_QUEUE) {
+        if (t->what_next == ThreadRelocated) {
+            t = t->_link;
+            continue;
+        }
        ASSERT(t->why_blocked == BlockedOnBlackHole);
        type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
            IF_DEBUG(sanity,checkTSO(t));
            t = unblockOne(cap, t);
        ASSERT(t->why_blocked == BlockedOnBlackHole);
        type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
            IF_DEBUG(sanity,checkTSO(t));
            t = unblockOne(cap, t);
-           // urk, the threads migrate to the current capability
-           // here, but we'd like to keep them on the original one.
            *prev = t;
            any_woke_up = rtsTrue;
        } else {
            *prev = t;
            any_woke_up = rtsTrue;
        } else {
-           prev = &t->link;
-           t = t->link;
+           prev = &t->_link;
+           t = t->_link;
        }
     }
 
        }
     }
 
@@ -3176,11 +2649,15 @@ resurrectThreads (StgTSO *threads)
 {
     StgTSO *tso, *next;
     Capability *cap;
 {
     StgTSO *tso, *next;
     Capability *cap;
+    step *step;
 
     for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
        next = tso->global_link;
 
     for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
        next = tso->global_link;
-       tso->global_link = all_threads;
-       all_threads = tso;
+
+        step = Bdescr((P_)tso)->step;
+       tso->global_link = step->threads;
+       step->threads = tso;
+
        debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
        
        // Wake up the thread on the Capability it was last on
        debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
        
        // Wake up the thread on the Capability it was last on
@@ -3191,15 +2668,15 @@ resurrectThreads (StgTSO *threads)
        case BlockedOnException:
            /* Called by GC - sched_mutex lock is currently held. */
            throwToSingleThreaded(cap, tso,
        case BlockedOnException:
            /* Called by GC - sched_mutex lock is currently held. */
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)BlockedOnDeadMVar_closure);
+                                 (StgClosure *)blockedOnDeadMVar_closure);
            break;
        case BlockedOnBlackHole:
            throwToSingleThreaded(cap, tso,
            break;
        case BlockedOnBlackHole:
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)NonTermination_closure);
+                                 (StgClosure *)nonTermination_closure);
            break;
        case BlockedOnSTM:
            throwToSingleThreaded(cap, tso,
            break;
        case BlockedOnSTM:
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)BlockedIndefinitely_closure);
+                                 (StgClosure *)blockedIndefinitely_closure);
            break;
        case NotBlocked:
            /* This might happen if the thread was blocked on a black hole
            break;
        case NotBlocked:
            /* This might happen if the thread was blocked on a black hole
@@ -3212,3 +2689,37 @@ resurrectThreads (StgTSO *threads)
        }
     }
 }
        }
     }
 }
+
+/* -----------------------------------------------------------------------------
+   performPendingThrowTos is called after garbage collection, and
+   passed a list of threads that were found to have pending throwTos
+   (tso->blocked_exceptions was not empty), and were blocked.
+   Normally this doesn't happen, because we would deliver the
+   exception directly if the target thread is blocked, but there are
+   small windows where it might occur on a multiprocessor (see
+   throwTo()).
+
+   NB. we must be holding all the capabilities at this point, just
+   like resurrectThreads().
+   -------------------------------------------------------------------------- */
+
+void
+performPendingThrowTos (StgTSO *threads)
+{
+    StgTSO *tso, *next;
+    Capability *cap;
+    step *step;
+
+    for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
+       next = tso->global_link;
+
+        step = Bdescr((P_)tso)->step;
+       tso->global_link = step->threads;
+       step->threads = tso;
+
+       debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
+       
+       cap = tso->cap;
+        maybePerformBlockedException(cap, tso);
+    }
+}