Interruptible FFI calls with pthread_kill and CancelSynchronousIO. v4
[ghc-hetmet.git] / rts / Schedule.c
index fd84fde..0850749 100644 (file)
@@ -9,37 +9,24 @@
 #include "PosixSource.h"
 #define KEEP_LOCKCLOSURE
 #include "Rts.h"
 #include "PosixSource.h"
 #define KEEP_LOCKCLOSURE
 #include "Rts.h"
-#include "SchedAPI.h"
+
+#include "sm/Storage.h"
 #include "RtsUtils.h"
 #include "RtsUtils.h"
-#include "RtsFlags.h"
-#include "OSThreads.h"
-#include "Storage.h"
 #include "StgRun.h"
 #include "StgRun.h"
-#include "Hooks.h"
 #include "Schedule.h"
 #include "Schedule.h"
-#include "StgMiscClosures.h"
 #include "Interpreter.h"
 #include "Printer.h"
 #include "RtsSignals.h"
 #include "Interpreter.h"
 #include "Printer.h"
 #include "RtsSignals.h"
-#include "Sanity.h"
+#include "sm/Sanity.h"
 #include "Stats.h"
 #include "STM.h"
 #include "Stats.h"
 #include "STM.h"
-#include "Timer.h"
 #include "Prelude.h"
 #include "ThreadLabels.h"
 #include "Prelude.h"
 #include "ThreadLabels.h"
-#include "LdvProfile.h"
 #include "Updates.h"
 #include "Proftimer.h"
 #include "ProfHeap.h"
 #include "Updates.h"
 #include "Proftimer.h"
 #include "ProfHeap.h"
-#if defined(GRAN) || defined(PARALLEL_HASKELL)
-# include "GranSimRts.h"
-# include "GranSim.h"
-# include "ParallelRts.h"
-# include "Parallel.h"
-# include "ParallelDebug.h"
-# include "FetchMe.h"
-# include "HLC.h"
-#endif
+#include "Weak.h"
+#include "sm/GC.h" // waitForGcThreads, releaseGCThreads, N
 #include "Sparks.h"
 #include "Capability.h"
 #include "Task.h"
 #include "Sparks.h"
 #include "Capability.h"
 #include "Task.h"
@@ -50,7 +37,9 @@
 #include "Trace.h"
 #include "RaiseAsync.h"
 #include "Threads.h"
 #include "Trace.h"
 #include "RaiseAsync.h"
 #include "Threads.h"
-#include "ThrIOManager.h"
+#include "Timer.h"
+#include "ThreadPaused.h"
+#include "Messages.h"
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
 #include <errno.h>
 #endif
 
 #include <errno.h>
 #endif
 
-// Turn off inlining when debugging - it obfuscates things
-#ifdef DEBUG
-# undef  STATIC_INLINE
-# define STATIC_INLINE static
-#endif
-
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
 
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
 
-#if defined(GRAN)
-
-StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
-/* rtsTime TimeOfNextEvent, EndOfTimeSlice;            now in GranSim.c */
-
-/* 
-   In GranSim we have a runnable and a blocked queue for each processor.
-   In order to minimise code changes new arrays run_queue_hds/tls
-   are created. run_queue_hd is then a short cut (macro) for
-   run_queue_hds[CurrentProc] (see GranSim.h).
-   -- HWL
-*/
-StgTSO *run_queue_hds[MAX_PROC], *run_queue_tls[MAX_PROC];
-StgTSO *blocked_queue_hds[MAX_PROC], *blocked_queue_tls[MAX_PROC];
-StgTSO *ccalling_threadss[MAX_PROC];
-/* We use the same global list of threads (all_threads) in GranSim as in
-   the std RTS (i.e. we are cheating). However, we don't use this list in
-   the GranSim specific code at the moment (so we are only potentially
-   cheating).  */
-
-#else /* !GRAN */
-
 #if !defined(THREADED_RTS)
 // Blocked/sleeping thrads
 StgTSO *blocked_queue_hd = NULL;
 #if !defined(THREADED_RTS)
 // Blocked/sleeping thrads
 StgTSO *blocked_queue_hd = NULL;
@@ -106,37 +67,24 @@ StgTSO *blocked_queue_tl = NULL;
 StgTSO *sleeping_queue = NULL;    // perhaps replace with a hash table?
 #endif
 
 StgTSO *sleeping_queue = NULL;    // perhaps replace with a hash table?
 #endif
 
-/* Threads blocked on blackholes.
- * LOCK: sched_mutex+capability, or all capabilities
- */
-StgTSO *blackhole_queue = NULL;
-#endif
-
-/* The blackhole_queue should be checked for threads to wake up.  See
- * Schedule.h for more thorough comment.
- * LOCK: none (doesn't matter if we miss an update)
+/* Set to true when the latest garbage collection failed to reclaim
+ * enough space, and the runtime should proceed to shut itself down in
+ * an orderly fashion (emitting profiling info etc.)
  */
  */
-rtsBool blackholes_need_checking = rtsFalse;
-
-/* flag set by signal handler to precipitate a context switch
- * LOCK: none (just an advisory flag)
- */
-int context_switch = 0;
+rtsBool heap_overflow = rtsFalse;
 
 /* flag that tracks whether we have done any execution in this time slice.
  * LOCK: currently none, perhaps we should lock (but needs to be
  * updated in the fast path of the scheduler).
 
 /* flag that tracks whether we have done any execution in this time slice.
  * LOCK: currently none, perhaps we should lock (but needs to be
  * updated in the fast path of the scheduler).
+ *
+ * NB. must be StgWord, we do xchg() on it.
  */
  */
-nat recent_activity = ACTIVITY_YES;
+volatile StgWord recent_activity = ACTIVITY_YES;
 
 /* if this flag is set as well, give up execution
 
 /* if this flag is set as well, give up execution
- * LOCK: none (changes once, from false->true)
+ * LOCK: none (changes monotonically)
  */
  */
-rtsBool sched_state = SCHED_RUNNING;
-
-#if defined(GRAN)
-StgTSO *CurrentTSO;
-#endif
+volatile StgWord sched_state = SCHED_RUNNING;
 
 /*  This is used in `TSO.h' and gcc 2.96 insists that this variable actually 
  *  exists - earlier gccs apparently didn't.
 
 /*  This is used in `TSO.h' and gcc 2.96 insists that this variable actually 
  *  exists - earlier gccs apparently didn't.
@@ -159,12 +107,6 @@ rtsBool shutting_down_scheduler = rtsFalse;
 Mutex sched_mutex;
 #endif
 
 Mutex sched_mutex;
 #endif
 
-#if defined(PARALLEL_HASKELL)
-StgTSO *LastTSO;
-rtsTime TimeOfLastYield;
-rtsBool emitSchedule = rtsTrue;
-#endif
-
 #if !defined(mingw32_HOST_OS)
 #define FORKPROCESS_PRIMOP_SUPPORTED
 #endif
 #if !defined(mingw32_HOST_OS)
 #define FORKPROCESS_PRIMOP_SUPPORTED
 #endif
@@ -181,26 +123,19 @@ static Capability *schedule (Capability *initialCapability, Task *task);
 // scheduler clearer.
 //
 static void schedulePreLoop (void);
 // scheduler clearer.
 //
 static void schedulePreLoop (void);
+static void scheduleFindWork (Capability *cap);
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-static void schedulePushWork(Capability *cap, Task *task);
+static void scheduleYield (Capability **pcap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
-static void scheduleCheckWakeupThreads(Capability *cap USED_IF_NOT_THREADS);
-static void scheduleCheckBlackHoles (Capability *cap);
+static void scheduleProcessInbox(Capability *cap);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
-#if defined(GRAN)
-static StgTSO *scheduleProcessEvent(rtsEvent *event);
-#endif
-#if defined(PARALLEL_HASKELL)
-static StgTSO *scheduleSendPendingMessages(void);
-static void scheduleActivateSpark(void);
-static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
-#endif
-#if defined(PAR) || defined(GRAN)
-static void scheduleGranParReport(void);
+static void schedulePushWork(Capability *cap, Task *task);
+#if defined(THREADED_RTS)
+static void scheduleActivateSpark(Capability *cap);
 #endif
 #endif
-static void schedulePostRunThread(StgTSO *t);
+static void schedulePostRunThread(Capability *cap, StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
 static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
                                         StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
 static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
                                         StgTSO *t);
@@ -213,10 +148,8 @@ static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
 static Capability *scheduleDoGC(Capability *cap, Task *task,
                                rtsBool force_major);
 
 static Capability *scheduleDoGC(Capability *cap, Task *task,
                                rtsBool force_major);
 
-static rtsBool checkBlackHoles(Capability *cap);
-
 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
 static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
-static StgTSO *threadStackUnderflow(Task *task, StgTSO *tso);
+static StgTSO *threadStackUnderflow(Capability *cap, Task *task, StgTSO *tso);
 
 static void deleteThread (Capability *cap, StgTSO *tso);
 static void deleteAllThreads (Capability *cap);
 
 static void deleteThread (Capability *cap, StgTSO *tso);
 static void deleteAllThreads (Capability *cap);
@@ -225,43 +158,6 @@ static void deleteAllThreads (Capability *cap);
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
-#if defined(PARALLEL_HASKELL)
-StgTSO * createSparkThread(rtsSpark spark);
-StgTSO * activateSpark (rtsSpark spark);  
-#endif
-
-#ifdef DEBUG
-static char *whatNext_strs[] = {
-  "(unknown)",
-  "ThreadRunGHC",
-  "ThreadInterpret",
-  "ThreadKilled",
-  "ThreadRelocated",
-  "ThreadComplete"
-};
-#endif
-
-/* -----------------------------------------------------------------------------
- * Putting a thread on the run queue: different scheduling policies
- * -------------------------------------------------------------------------- */
-
-STATIC_INLINE void
-addToRunQueue( Capability *cap, StgTSO *t )
-{
-#if defined(PARALLEL_HASKELL)
-    if (RtsFlags.ParFlags.doFairScheduling) { 
-       // this does round-robin scheduling; good for concurrency
-       appendToRunQueue(cap,t);
-    } else {
-       // this does unfair scheduling; good for parallelism
-       pushOnRunQueue(cap,t);
-    }
-#else
-    // this does round-robin scheduling; good for concurrency
-    appendToRunQueue(cap,t);
-#endif
-}
-
 /* ---------------------------------------------------------------------------
    Main scheduling loop.
 
 /* ---------------------------------------------------------------------------
    Main scheduling loop.
 
@@ -279,6 +175,7 @@ addToRunQueue( Capability *cap, StgTSO *t )
      This revolves around the global event queue, which determines what 
      to do next. Therefore, it's more complicated than either the 
      concurrent or the parallel (GUM) setup.
      This revolves around the global event queue, which determines what 
      to do next. Therefore, it's more complicated than either the 
      concurrent or the parallel (GUM) setup.
+  This version has been entirely removed (JB 2008/08).
 
    GUM version:
      GUM iterates over incoming messages.
 
    GUM version:
      GUM iterates over incoming messages.
@@ -289,6 +186,12 @@ addToRunQueue( Capability *cap, StgTSO *t )
      (see PendingFetches).
      This is not the ugliest code you could imagine, but it's bloody close.
 
      (see PendingFetches).
      This is not the ugliest code you could imagine, but it's bloody close.
 
+  (JB 2008/08) This version was formerly indicated by a PP-Flag PAR,
+  now by PP-flag PARALLEL_HASKELL. The Eden RTS (in GHC-6.x) uses it,
+  as well as future GUM versions. This file has been refurbished to
+  only contain valid code, which is however incomplete, refers to
+  invalid includes etc.
+
    ------------------------------------------------------------------------ */
 
 static Capability *
    ------------------------------------------------------------------------ */
 
 static Capability *
@@ -297,16 +200,6 @@ schedule (Capability *initialCapability, Task *task)
   StgTSO *t;
   Capability *cap;
   StgThreadReturnCode ret;
   StgTSO *t;
   Capability *cap;
   StgThreadReturnCode ret;
-#if defined(GRAN)
-  rtsEvent *event;
-#elif defined(PARALLEL_HASKELL)
-  StgTSO *tso;
-  GlobalTaskId pe;
-  rtsBool receivedFinish = rtsFalse;
-# if defined(DEBUG)
-  nat tp_size, sp_size; // stats only
-# endif
-#endif
   nat prev_what_next;
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
   nat prev_what_next;
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
@@ -319,47 +212,14 @@ schedule (Capability *initialCapability, Task *task)
   // The sched_mutex is *NOT* held
   // NB. on return, we still hold a capability.
 
   // The sched_mutex is *NOT* held
   // NB. on return, we still hold a capability.
 
-  debugTrace (DEBUG_sched, 
-             "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
-             task, initialCapability);
+  debugTrace (DEBUG_sched, "cap %d: schedule()", initialCapability->no);
 
   schedulePreLoop();
 
   // -----------------------------------------------------------
   // Scheduler loop starts here:
 
 
   schedulePreLoop();
 
   // -----------------------------------------------------------
   // Scheduler loop starts here:
 
-#if defined(PARALLEL_HASKELL)
-#define TERMINATION_CONDITION        (!receivedFinish)
-#elif defined(GRAN)
-#define TERMINATION_CONDITION        ((event = get_next_event()) != (rtsEvent*)NULL) 
-#else
-#define TERMINATION_CONDITION        rtsTrue
-#endif
-
-  while (TERMINATION_CONDITION) {
-
-#if defined(GRAN)
-      /* Choose the processor with the next event */
-      CurrentProc = event->proc;
-      CurrentTSO = event->tso;
-#endif
-
-#if defined(THREADED_RTS)
-      if (first) {
-         // don't yield the first time, we want a chance to run this
-         // thread for a bit, even if there are others banging at the
-         // door.
-         first = rtsFalse;
-         ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
-      } else {
-         // Yield the capability to higher-priority tasks if necessary.
-         yieldCapability(&cap, task);
-      }
-#endif
-      
-#if defined(THREADED_RTS)
-      schedulePushWork(cap,task);
-#endif
+  while (1) {
 
     // Check whether we have re-entered the RTS from Haskell without
     // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
 
     // Check whether we have re-entered the RTS from Haskell without
     // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
@@ -414,13 +274,20 @@ schedule (Capability *initialCapability, Task *task)
 #endif
        /* scheduleDoGC() deletes all the threads */
        cap = scheduleDoGC(cap,task,rtsFalse);
 #endif
        /* scheduleDoGC() deletes all the threads */
        cap = scheduleDoGC(cap,task,rtsFalse);
-       break;
+
+        // after scheduleDoGC(), we must be shutting down.  Either some
+        // other Capability did the final GC, or we did it above,
+        // either way we can fall through to the SCHED_SHUTTING_DOWN
+        // case now.
+        ASSERT(sched_state == SCHED_SHUTTING_DOWN);
+        // fall through
+
     case SCHED_SHUTTING_DOWN:
        debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
        // If we are a worker, just exit.  If we're a bound thread
        // then we will exit below when we've removed our TSO from
        // the run queue.
     case SCHED_SHUTTING_DOWN:
        debugTrace(DEBUG_sched, "SCHED_SHUTTING_DOWN");
        // If we are a worker, just exit.  If we're a bound thread
        // then we will exit below when we've removed our TSO from
        // the run queue.
-       if (task->tso == NULL && emptyRunQueue(cap)) {
+       if (!isBoundTask(task) && emptyRunQueue(cap)) {
            return cap;
        }
        break;
            return cap;
        }
        break;
@@ -428,35 +295,14 @@ schedule (Capability *initialCapability, Task *task)
        barf("sched_state: %d", sched_state);
     }
 
        barf("sched_state: %d", sched_state);
     }
 
-#if defined(THREADED_RTS)
-    // If the run queue is empty, take a spark and turn it into a thread.
-    {
-       if (emptyRunQueue(cap)) {
-           StgClosure *spark;
-           spark = findSpark(cap);
-           if (spark != NULL) {
-               debugTrace(DEBUG_sched,
-                          "turning spark of closure %p into a thread",
-                          (StgClosure *)spark);
-               createSparkThread(cap,spark);     
-           }
-       }
-    }
-#endif // THREADED_RTS
-
-    scheduleStartSignalHandlers(cap);
-
-    // Only check the black holes here if we've nothing else to do.
-    // During normal execution, the black hole list only gets checked
-    // at GC time, to avoid repeatedly traversing this possibly long
-    // list each time around the scheduler.
-    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
-
-    scheduleCheckWakeupThreads(cap);
+    scheduleFindWork(cap);
 
 
-    scheduleCheckBlockedThreads(cap);
+    /* work pushing, currently relevant only for THREADED_RTS:
+       (pushes threads, wakes up idle capabilities for stealing) */
+    schedulePushWork(cap,task);
 
     scheduleDetectDeadlock(cap,task);
 
     scheduleDetectDeadlock(cap,task);
+
 #if defined(THREADED_RTS)
     cap = task->cap;    // reload cap, it might have changed
 #endif
 #if defined(THREADED_RTS)
     cap = task->cap;    // reload cap, it might have changed
 #endif
@@ -469,78 +315,61 @@ schedule (Capability *initialCapability, Task *task)
     //
     // win32: might be here due to awaitEvent() being abandoned
     // as a result of a console event having been delivered.
     //
     // win32: might be here due to awaitEvent() being abandoned
     // as a result of a console event having been delivered.
-    if ( emptyRunQueue(cap) ) {
-#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
-       ASSERT(sched_state >= SCHED_INTERRUPTING);
-#endif
-       continue; // nothing to do
+    
+#if defined(THREADED_RTS)
+    if (first) 
+    {
+    // XXX: ToDo
+    //     // don't yield the first time, we want a chance to run this
+    //     // thread for a bit, even if there are others banging at the
+    //     // door.
+    //     first = rtsFalse;
+    //     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     }
 
     }
 
-#if defined(PARALLEL_HASKELL)
-    scheduleSendPendingMessages();
-    if (emptyRunQueue(cap) && scheduleActivateSpark()) 
-       continue;
+    scheduleYield(&cap,task);
 
 
-#if defined(SPARKS)
-    ASSERT(next_fish_to_send_at==0);  // i.e. no delayed fishes left!
+    if (emptyRunQueue(cap)) continue; // look for work again
 #endif
 
 #endif
 
-    /* If we still have no work we need to send a FISH to get a spark
-       from another PE */
-    if (emptyRunQueue(cap)) {
-       if (!scheduleGetRemoteWork(&receivedFinish)) continue;
-       ASSERT(rtsFalse); // should not happen at the moment
-    }
-    // from here: non-empty run queue.
-    //  TODO: merge above case with this, only one call processMessages() !
-    if (PacketsWaiting()) {  /* process incoming messages, if
-                               any pending...  only in else
-                               because getRemoteWork waits for
-                               messages as well */
-       receivedFinish = processMessages();
+#if !defined(THREADED_RTS) && !defined(mingw32_HOST_OS)
+    if ( emptyRunQueue(cap) ) {
+       ASSERT(sched_state >= SCHED_INTERRUPTING);
     }
 #endif
 
     }
 #endif
 
-#if defined(GRAN)
-    scheduleProcessEvent(event);
-#endif
-
     // 
     // Get a thread to run
     //
     t = popRunQueue(cap);
 
     // 
     // Get a thread to run
     //
     t = popRunQueue(cap);
 
-#if defined(GRAN) || defined(PAR)
-    scheduleGranParReport(); // some kind of debuging output
-#else
     // Sanity check the thread we're about to run.  This can be
     // expensive if there is lots of thread switching going on...
     IF_DEBUG(sanity,checkTSO(t));
     // Sanity check the thread we're about to run.  This can be
     // expensive if there is lots of thread switching going on...
     IF_DEBUG(sanity,checkTSO(t));
-#endif
 
 #if defined(THREADED_RTS)
     // Check whether we can run this thread in the current task.
     // If not, we have to pass our capability to the right task.
     {
 
 #if defined(THREADED_RTS)
     // Check whether we can run this thread in the current task.
     // If not, we have to pass our capability to the right task.
     {
-       Task *bound = t->bound;
+        InCall *bound = t->bound;
       
        if (bound) {
       
        if (bound) {
-           if (bound == task) {
-               debugTrace(DEBUG_sched,
-                          "### Running thread %lu in bound thread", (unsigned long)t->id);
+           if (bound->task == task) {
                // yes, the Haskell thread is bound to the current native thread
            } else {
                debugTrace(DEBUG_sched,
                // yes, the Haskell thread is bound to the current native thread
            } else {
                debugTrace(DEBUG_sched,
-                          "### thread %lu bound to another OS thread", (unsigned long)t->id);
+                          "thread %lu bound to another OS thread",
+                           (unsigned long)t->id);
                // no, bound to a different Haskell thread: pass to that thread
                pushOnRunQueue(cap,t);
                continue;
            }
        } else {
            // The thread we want to run is unbound.
                // no, bound to a different Haskell thread: pass to that thread
                pushOnRunQueue(cap,t);
                continue;
            }
        } else {
            // The thread we want to run is unbound.
-           if (task->tso) { 
+           if (task->incall->tso) { 
                debugTrace(DEBUG_sched,
                debugTrace(DEBUG_sched,
-                          "### this OS thread cannot run thread %lu", (unsigned long)t->id);
+                          "this OS thread cannot run thread %lu",
+                           (unsigned long)t->id);
                // no, the current native thread is bound to a different
                // Haskell thread, so pass it to any worker thread
                pushOnRunQueue(cap,t);
                // no, the current native thread is bound to a different
                // Haskell thread, so pass it to any worker thread
                pushOnRunQueue(cap,t);
@@ -550,13 +379,22 @@ schedule (Capability *initialCapability, Task *task)
     }
 #endif
 
     }
 #endif
 
+    // If we're shutting down, and this thread has not yet been
+    // killed, kill it now.  This sometimes happens when a finalizer
+    // thread is created by the final GC, or a thread previously
+    // in a foreign call returns.
+    if (sched_state >= SCHED_INTERRUPTING &&
+        !(t->what_next == ThreadComplete || t->what_next == ThreadKilled)) {
+        deleteThread(cap,t);
+    }
+
     /* context switches are initiated by the timer signal, unless
      * the user specified "context switch as often as possible", with
      * +RTS -C0
      */
     if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
        && !emptyThreadQueues(cap)) {
     /* context switches are initiated by the timer signal, unless
      * the user specified "context switch as often as possible", with
      * +RTS -C0
      */
     if (RtsFlags.ConcFlags.ctxtSwitchTicks == 0
        && !emptyThreadQueues(cap)) {
-       context_switch = 1;
+       cap->context_switch = 1;
     }
         
 run_thread:
     }
         
 run_thread:
@@ -566,19 +404,14 @@ run_thread:
     // that.
     cap->r.rCurrentTSO = t;
 
     // that.
     cap->r.rCurrentTSO = t;
 
-    debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", 
-                             (long)t->id, whatNext_strs[t->what_next]);
-
     startHeapProfTimer();
 
     startHeapProfTimer();
 
-    // Check for exceptions blocked on this thread
-    maybePerformBlockedException (cap, t);
-
     // ----------------------------------------------------------------------
     // Run the current thread 
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
     // ----------------------------------------------------------------------
     // Run the current thread 
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
+    ASSERT(t->bound ? t->bound->task->cap == cap : 1);
 
     prev_what_next = t->what_next;
 
 
     prev_what_next = t->what_next;
 
@@ -600,11 +433,17 @@ run_thread:
         if (prev == ACTIVITY_DONE_GC) {
             startTimer();
         }
         if (prev == ACTIVITY_DONE_GC) {
             startTimer();
         }
-    } else {
+    } else if (recent_activity != ACTIVITY_INACTIVE) {
+        // If we reached ACTIVITY_INACTIVE, then don't reset it until
+        // we've done the GC.  The thread running here might just be
+        // the IO manager thread that handle_tick() woke up via
+        // wakeUpRts().
         recent_activity = ACTIVITY_YES;
     }
 #endif
 
         recent_activity = ACTIVITY_YES;
     }
 #endif
 
+    traceEventRunThread(cap, t);
+
     switch (prev_what_next) {
        
     case ThreadKilled:
     switch (prev_what_next) {
        
     case ThreadKilled:
@@ -637,13 +476,6 @@ run_thread:
     // happened.  So find the new location:
     t = cap->r.rCurrentTSO;
 
     // happened.  So find the new location:
     t = cap->r.rCurrentTSO;
 
-    // We have run some Haskell code: there might be blackhole-blocked
-    // threads to wake up now.
-    // Lock-free test here should be ok, we're just setting a flag.
-    if ( blackhole_queue != END_TSO_QUEUE ) {
-       blackholes_need_checking = rtsTrue;
-    }
-    
     // And save the current errno in this thread.
     // XXX: possibly bogus for SMP because this thread might already
     // be running again, see code below.
     // And save the current errno in this thread.
     // XXX: possibly bogus for SMP because this thread might already
     // be running again, see code below.
@@ -653,20 +485,7 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
     t->saved_winerror = GetLastError();
 #endif
 
-#if defined(THREADED_RTS)
-    // If ret is ThreadBlocked, and this Task is bound to the TSO that
-    // blocked, we are in limbo - the TSO is now owned by whatever it
-    // is blocked on, and may in fact already have been woken up,
-    // perhaps even on a different Capability.  It may be the case
-    // that task->cap != cap.  We better yield this Capability
-    // immediately and return to normaility.
-    if (ret == ThreadBlocked) {
-       debugTrace(DEBUG_sched,
-                  "--<< thread %lu (%s) stopped: blocked",
-                  (unsigned long)t->id, whatNext_strs[t->what_next]);
-       continue;
-    }
-#endif
+    traceEventStopThread(cap, t, ret);
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
@@ -679,9 +498,11 @@ run_thread:
     CCCS = CCS_SYSTEM;
 #endif
     
     CCCS = CCS_SYSTEM;
 #endif
     
-    schedulePostRunThread(t);
+    schedulePostRunThread(cap,t);
 
 
-    t = threadStackUnderflow(task,t);
+    if (ret != StackOverflow) {
+        t = threadStackUnderflow(cap,task,t);
+    }
 
     ready_to_gc = rtsFalse;
 
 
     ready_to_gc = rtsFalse;
 
@@ -720,6 +541,30 @@ run_thread:
   } /* end of while() */
 }
 
   } /* end of while() */
 }
 
+/* -----------------------------------------------------------------------------
+ * Run queue operations
+ * -------------------------------------------------------------------------- */
+
+void
+removeFromRunQueue (Capability *cap, StgTSO *tso)
+{
+    if (tso->block_info.prev == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_hd == tso);
+        cap->run_queue_hd = tso->_link;
+    } else {
+        setTSOLink(cap, tso->block_info.prev, tso->_link);
+    }
+    if (tso->_link == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_tl == tso);
+        cap->run_queue_tl = tso->block_info.prev;
+    } else {
+        setTSOPrev(cap, tso->_link, tso->block_info.prev);
+    }
+    tso->_link = tso->block_info.prev = END_TSO_QUEUE;
+
+    IF_DEBUG(sanity, checkRunQueue(cap));
+}
+
 /* ----------------------------------------------------------------------------
  * Setting up the scheduler loop
  * ------------------------------------------------------------------------- */
 /* ----------------------------------------------------------------------------
  * Setting up the scheduler loop
  * ------------------------------------------------------------------------- */
@@ -727,54 +572,119 @@ run_thread:
 static void
 schedulePreLoop(void)
 {
 static void
 schedulePreLoop(void)
 {
-#if defined(GRAN) 
-    /* set up first event to get things going */
-    /* ToDo: assign costs for system setup and init MainTSO ! */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread, 
-             CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
-    
-    debugTrace (DEBUG_gran,
-               "GRAN: Init CurrentTSO (in schedule) = %p", 
-               CurrentTSO);
-    IF_DEBUG(gran, G_TSO(CurrentTSO, 5));
-    
-    if (RtsFlags.GranFlags.Light) {
-       /* Save current time; GranSim Light only */
-       CurrentTSO->gran.clock = CurrentTime[CurrentProc];
-    }      
+  // initialisation for scheduler - what cannot go into initScheduler()  
+}
+
+/* -----------------------------------------------------------------------------
+ * scheduleFindWork()
+ *
+ * Search for work to do, and handle messages from elsewhere.
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleFindWork (Capability *cap)
+{
+    scheduleStartSignalHandlers(cap);
+
+    scheduleProcessInbox(cap);
+
+    scheduleCheckBlockedThreads(cap);
+
+#if defined(THREADED_RTS)
+    if (emptyRunQueue(cap)) { scheduleActivateSpark(cap); }
 #endif
 }
 
 #endif
 }
 
+#if defined(THREADED_RTS)
+STATIC_INLINE rtsBool
+shouldYieldCapability (Capability *cap, Task *task)
+{
+    // we need to yield this capability to someone else if..
+    //   - another thread is initiating a GC
+    //   - another Task is returning from a foreign call
+    //   - the thread at the head of the run queue cannot be run
+    //     by this Task (it is bound to another Task, or it is unbound
+    //     and this task it bound).
+    return (waiting_for_gc || 
+            cap->returning_tasks_hd != NULL ||
+            (!emptyRunQueue(cap) && (task->incall->tso == NULL
+                                     ? cap->run_queue_hd->bound != NULL
+                                     : cap->run_queue_hd->bound != task->incall)));
+}
+
+// This is the single place where a Task goes to sleep.  There are
+// two reasons it might need to sleep:
+//    - there are no threads to run
+//    - we need to yield this Capability to someone else 
+//      (see shouldYieldCapability())
+//
+// Careful: the scheduler loop is quite delicate.  Make sure you run
+// the tests in testsuite/concurrent (all ways) after modifying this,
+// and also check the benchmarks in nofib/parallel for regressions.
+
+static void
+scheduleYield (Capability **pcap, Task *task)
+{
+    Capability *cap = *pcap;
+
+    // if we have work, and we don't need to give up the Capability, continue.
+    //
+    if (!shouldYieldCapability(cap,task) && 
+        (!emptyRunQueue(cap) ||
+         !emptyInbox(cap) ||
+         sched_state >= SCHED_INTERRUPTING))
+        return;
+
+    // otherwise yield (sleep), and keep yielding if necessary.
+    do {
+        yieldCapability(&cap,task);
+    } 
+    while (shouldYieldCapability(cap,task));
+
+    // note there may still be no threads on the run queue at this
+    // point, the caller has to check.
+
+    *pcap = cap;
+    return;
+}
+#endif
+    
 /* -----------------------------------------------------------------------------
  * schedulePushWork()
  *
  * Push work to other Capabilities if we have some.
  * -------------------------------------------------------------------------- */
 
 /* -----------------------------------------------------------------------------
  * schedulePushWork()
  *
  * Push work to other Capabilities if we have some.
  * -------------------------------------------------------------------------- */
 
-#if defined(THREADED_RTS)
 static void
 schedulePushWork(Capability *cap USED_IF_THREADS, 
                 Task *task      USED_IF_THREADS)
 {
 static void
 schedulePushWork(Capability *cap USED_IF_THREADS, 
                 Task *task      USED_IF_THREADS)
 {
+  /* following code not for PARALLEL_HASKELL. I kept the call general,
+     future GUM versions might use pushing in a distributed setup */
+#if defined(THREADED_RTS)
+
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
-    // migration can be turned off with +RTS -qg
+    // migration can be turned off with +RTS -qm
     if (!RtsFlags.ParFlags.migrate) return;
 
     // Check whether we have more threads on our run queue, or sparks
     // in our pool, that we could hand to another Capability.
     if (!RtsFlags.ParFlags.migrate) return;
 
     // Check whether we have more threads on our run queue, or sparks
     // in our pool, that we could hand to another Capability.
-    if ((emptyRunQueue(cap) || cap->run_queue_hd->_link == END_TSO_QUEUE)
-       && sparkPoolSizeCap(cap) < 2) {
-       return;
+    if (cap->run_queue_hd == END_TSO_QUEUE) {
+        if (sparkPoolSizeCap(cap) < 2) return;
+    } else {
+        if (cap->run_queue_hd->_link == END_TSO_QUEUE &&
+            sparkPoolSizeCap(cap) < 1) return;
     }
 
     // First grab as many free Capabilities as we can.
     for (i=0, n_free_caps=0; i < n_capabilities; i++) {
        cap0 = &capabilities[i];
        if (cap != cap0 && tryGrabCapability(cap0,task)) {
     }
 
     // First grab as many free Capabilities as we can.
     for (i=0, n_free_caps=0; i < n_capabilities; i++) {
        cap0 = &capabilities[i];
        if (cap != cap0 && tryGrabCapability(cap0,task)) {
-           if (!emptyRunQueue(cap0) || cap->returning_tasks_hd != NULL) {
+           if (!emptyRunQueue(cap0)
+                || cap->returning_tasks_hd != NULL
+                || cap->inbox != (Message*)END_TSO_QUEUE) {
                // it already has some work, we just grabbed it at 
                // the wrong moment.  Or maybe it's deadlocked!
                releaseCapability(cap0);
                // it already has some work, we just grabbed it at 
                // the wrong moment.  Or maybe it's deadlocked!
                releaseCapability(cap0);
@@ -799,7 +709,12 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
        StgTSO *prev, *t, *next;
        rtsBool pushed_to_all;
 
        StgTSO *prev, *t, *next;
        rtsBool pushed_to_all;
 
-       debugTrace(DEBUG_sched, "excess threads on run queue and %d free capabilities, sharing...", n_free_caps);
+       debugTrace(DEBUG_sched, 
+                  "cap %d: %s and %d free capabilities, sharing...", 
+                  cap->no, 
+                  (!emptyRunQueue(cap) && cap->run_queue_hd->_link != END_TSO_QUEUE)?
+                  "excess threads on run queue":"sparks to share (>=2)",
+                  n_free_caps);
 
        i = 0;
        pushed_to_all = rtsFalse;
 
        i = 0;
        pushed_to_all = rtsFalse;
@@ -812,27 +727,36 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                next = t->_link;
                t->_link = END_TSO_QUEUE;
                if (t->what_next == ThreadRelocated
                next = t->_link;
                t->_link = END_TSO_QUEUE;
                if (t->what_next == ThreadRelocated
-                   || t->bound == task // don't move my bound thread
+                   || t->bound == task->incall // don't move my bound thread
                    || tsoLocked(t)) {  // don't move a locked thread
                    setTSOLink(cap, prev, t);
                    || tsoLocked(t)) {  // don't move a locked thread
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
                    setTSOLink(cap, prev, t);
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else {
                    prev = t;
                } else {
-                   debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
                    appendToRunQueue(free_caps[i],t);
-                   if (t->bound) { t->bound->cap = free_caps[i]; }
+
+                    traceEventMigrateThread (cap, t, free_caps[i]->no);
+
+                   if (t->bound) { t->bound->task->cap = free_caps[i]; }
                    t->cap = free_caps[i];
                    i++;
                }
            }
            cap->run_queue_tl = prev;
                    t->cap = free_caps[i];
                    i++;
                }
            }
            cap->run_queue_tl = prev;
+
+            IF_DEBUG(sanity, checkRunQueue(cap));
        }
 
        }
 
+#ifdef SPARK_PUSHING
+       /* JB I left this code in place, it would work but is not necessary */
+
        // If there are some free capabilities that we didn't push any
        // threads to, then try to push a spark to each one.
        if (!pushed_to_all) {
        // If there are some free capabilities that we didn't push any
        // threads to, then try to push a spark to each one.
        if (!pushed_to_all) {
@@ -840,24 +764,30 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            // i is the next free capability to push to
            for (; i < n_free_caps; i++) {
                if (emptySparkPoolCap(free_caps[i])) {
            // i is the next free capability to push to
            for (; i < n_free_caps; i++) {
                if (emptySparkPoolCap(free_caps[i])) {
-                   spark = findSpark(cap);
+                   spark = tryStealSpark(cap->sparks);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
+
+            traceEventStealSpark(free_caps[i], t, cap->no);
+
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
            }
        }
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
            }
        }
+#endif /* SPARK_PUSHING */
 
        // release the capabilities
        for (i = 0; i < n_free_caps; i++) {
            task->cap = free_caps[i];
 
        // release the capabilities
        for (i = 0; i < n_free_caps; i++) {
            task->cap = free_caps[i];
-           releaseCapability(free_caps[i]);
+           releaseAndWakeupCapability(free_caps[i]);
        }
     }
     task->cap = cap; // reset to point to our Capability.
        }
     }
     task->cap = cap; // reset to point to our Capability.
+
+#endif /* THREADED_RTS */
+
 }
 }
-#endif
 
 /* ----------------------------------------------------------------------------
  * Start any pending signal handlers
 
 /* ----------------------------------------------------------------------------
  * Start any pending signal handlers
@@ -894,54 +824,11 @@ scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
     //
     if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
     {
     //
     if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
     {
-       awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
+       awaitEvent (emptyRunQueue(cap));
     }
 #endif
 }
 
     }
 #endif
 }
 
-
-/* ----------------------------------------------------------------------------
- * Check for threads woken up by other Capabilities
- * ------------------------------------------------------------------------- */
-
-static void
-scheduleCheckWakeupThreads(Capability *cap USED_IF_THREADS)
-{
-#if defined(THREADED_RTS)
-    // Any threads that were woken up by other Capabilities get
-    // appended to our run queue.
-    if (!emptyWakeupQueue(cap)) {
-       ACQUIRE_LOCK(&cap->lock);
-       if (emptyRunQueue(cap)) {
-           cap->run_queue_hd = cap->wakeup_queue_hd;
-           cap->run_queue_tl = cap->wakeup_queue_tl;
-       } else {
-           setTSOLink(cap, cap->run_queue_tl, cap->wakeup_queue_hd);
-           cap->run_queue_tl = cap->wakeup_queue_tl;
-       }
-       cap->wakeup_queue_hd = cap->wakeup_queue_tl = END_TSO_QUEUE;
-       RELEASE_LOCK(&cap->lock);
-    }
-#endif
-}
-
-/* ----------------------------------------------------------------------------
- * Check for threads blocked on BLACKHOLEs that can be woken up
- * ------------------------------------------------------------------------- */
-static void
-scheduleCheckBlackHoles (Capability *cap)
-{
-    if ( blackholes_need_checking ) // check without the lock first
-    {
-       ACQUIRE_LOCK(&sched_mutex);
-       if ( blackholes_need_checking ) {
-           checkBlackHoles(cap);
-           blackholes_need_checking = rtsFalse;
-       }
-       RELEASE_LOCK(&sched_mutex);
-    }
-}
-
 /* ----------------------------------------------------------------------------
  * Detect deadlock conditions and attempt to resolve them.
  * ------------------------------------------------------------------------- */
 /* ----------------------------------------------------------------------------
  * Detect deadlock conditions and attempt to resolve them.
  * ------------------------------------------------------------------------- */
@@ -949,12 +836,6 @@ scheduleCheckBlackHoles (Capability *cap)
 static void
 scheduleDetectDeadlock (Capability *cap, Task *task)
 {
 static void
 scheduleDetectDeadlock (Capability *cap, Task *task)
 {
-
-#if defined(PARALLEL_HASKELL)
-    // ToDo: add deadlock detection in GUM (similar to THREADED_RTS) -- HWL
-    return;
-#endif
-
     /* 
      * Detect deadlock: when we have no threads to run, there are no
      * threads blocked, waiting for I/O, or sleeping, and all the
     /* 
      * Detect deadlock: when we have no threads to run, there are no
      * threads blocked, waiting for I/O, or sleeping, and all the
@@ -980,12 +861,11 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
        // they are unreachable and will therefore be sent an
        // exception.  Any threads thus released will be immediately
        // runnable.
        // they are unreachable and will therefore be sent an
        // exception.  Any threads thus released will be immediately
        // runnable.
-       cap = scheduleDoGC (cap, task, rtsTrue/*force  major GC*/);
+       cap = scheduleDoGC (cap, task, rtsTrue/*force major GC*/);
+        // when force_major == rtsTrue. scheduleDoGC sets
+        // recent_activity to ACTIVITY_DONE_GC and turns off the timer
+        // signal.
 
 
-       recent_activity = ACTIVITY_DONE_GC;
-        // disable timer signals (see #1623)
-        stopTimer();
-       
        if ( !emptyRunQueue(cap) ) return;
 
 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
        if ( !emptyRunQueue(cap) ) return;
 
 #if defined(RTS_USER_SIGNALS) && !defined(THREADED_RTS)
@@ -1005,6 +885,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
 
            // either we have threads to run, or we were interrupted:
            ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
 
            // either we have threads to run, or we were interrupted:
            ASSERT(!emptyRunQueue(cap) || sched_state >= SCHED_INTERRUPTING);
+
+            return;
        }
 #endif
 
        }
 #endif
 
@@ -1012,14 +894,14 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
        /* Probably a real deadlock.  Send the current main thread the
         * Deadlock exception.
         */
        /* Probably a real deadlock.  Send the current main thread the
         * Deadlock exception.
         */
-       if (task->tso) {
-           switch (task->tso->why_blocked) {
+       if (task->incall->tso) {
+           switch (task->incall->tso->why_blocked) {
            case BlockedOnSTM:
            case BlockedOnBlackHole:
            case BlockedOnSTM:
            case BlockedOnBlackHole:
-           case BlockedOnException:
+           case BlockedOnMsgThrowTo:
            case BlockedOnMVar:
            case BlockedOnMVar:
-               throwToSingleThreaded(cap, task->tso, 
-                                     (StgClosure *)NonTermination_closure);
+               throwToSingleThreaded(cap, task->incall->tso, 
+                                     (StgClosure *)nonTermination_closure);
                return;
            default:
                barf("deadlock: main thread blocked in a strange way");
                return;
            default:
                barf("deadlock: main thread blocked in a strange way");
@@ -1030,164 +912,15 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
     }
 }
 
     }
 }
 
-/* ----------------------------------------------------------------------------
- * Process an event (GRAN only)
- * ------------------------------------------------------------------------- */
-
-#if defined(GRAN)
-static StgTSO *
-scheduleProcessEvent(rtsEvent *event)
-{
-    StgTSO *t;
-
-    if (RtsFlags.GranFlags.Light)
-      GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
-
-    /* adjust time based on time-stamp */
-    if (event->time > CurrentTime[CurrentProc] &&
-        event->evttype != ContinueThread)
-      CurrentTime[CurrentProc] = event->time;
-    
-    /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
-    if (!RtsFlags.GranFlags.Light)
-      handleIdlePEs();
-
-    IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
-
-    /* main event dispatcher in GranSim */
-    switch (event->evttype) {
-      /* Should just be continuing execution */
-    case ContinueThread:
-      IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
-      /* ToDo: check assertion
-      ASSERT(run_queue_hd != (StgTSO*)NULL &&
-            run_queue_hd != END_TSO_QUEUE);
-      */
-      /* Ignore ContinueThreads for fetching threads (if synchr comm) */
-      if (!RtsFlags.GranFlags.DoAsyncFetch &&
-         procStatus[CurrentProc]==Fetching) {
-       debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }        
-      /* Ignore ContinueThreads for completed threads */
-      if (CurrentTSO->what_next == ThreadComplete) {
-       debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n", 
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }        
-      /* Ignore ContinueThreads for threads that are being migrated */
-      if (PROCS(CurrentTSO)==Nowhere) { 
-       debugBelch("ghuH: trying to run the migrating TSO %d (%p) [PE %d] (ignoring ContinueThread)\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }
-      /* The thread should be at the beginning of the run queue */
-      if (CurrentTSO!=run_queue_hds[CurrentProc]) { 
-       debugBelch("ghuH: TSO %d (%p) [PE %d] is not at the start of the run_queue when doing a ContinueThread\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       break; // run the thread anyway
-      }
-      /*
-      new_event(proc, proc, CurrentTime[proc],
-               FindWork,
-               (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
-      goto next_thread; 
-      */ /* Catches superfluous CONTINUEs -- should be unnecessary */
-      break; // now actually run the thread; DaH Qu'vam yImuHbej 
-
-    case FetchNode:
-      do_the_fetchnode(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case GlobalBlock:
-      do_the_globalblock(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case FetchReply:
-      do_the_fetchreply(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case UnblockThread:   /* Move from the blocked queue to the tail of */
-      do_the_unblock(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case ResumeThread:  /* Move from the blocked queue to the tail of */
-      /* the runnable queue ( i.e. Qu' SImqa'lu') */ 
-      event->tso->gran.blocktime += 
-       CurrentTime[CurrentProc] - event->tso->gran.blockedat;
-      do_the_startthread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case StartThread:
-      do_the_startthread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case MoveThread:
-      do_the_movethread(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case MoveSpark:
-      do_the_movespark(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    case FindWork:
-      do_the_findwork(event);
-      goto next_thread;             /* handle next event in event queue  */
-      
-    default:
-      barf("Illegal event type %u\n", event->evttype);
-    }  /* switch */
-    
-    /* This point was scheduler_loop in the old RTS */
-
-    IF_DEBUG(gran, debugBelch("GRAN: after main switch\n"));
-
-    TimeOfLastEvent = CurrentTime[CurrentProc];
-    TimeOfNextEvent = get_time_of_next_event();
-    IgnoreEvents=(TimeOfNextEvent==0); // HWL HACK
-    // CurrentTSO = ThreadQueueHd;
-
-    IF_DEBUG(gran, debugBelch("GRAN: time of next event is: %ld\n", 
-                        TimeOfNextEvent));
-
-    if (RtsFlags.GranFlags.Light) 
-      GranSimLight_leave_system(event, &ActiveTSO); 
-
-    EndOfTimeSlice = CurrentTime[CurrentProc]+RtsFlags.GranFlags.time_slice;
-
-    IF_DEBUG(gran, 
-            debugBelch("GRAN: end of time-slice is %#lx\n", EndOfTimeSlice));
-
-    /* in a GranSim setup the TSO stays on the run queue */
-    t = CurrentTSO;
-    /* Take a thread from the run queue. */
-    POP_RUN_QUEUE(t); // take_off_run_queue(t);
-
-    IF_DEBUG(gran, 
-            debugBelch("GRAN: About to run current thread, which is\n");
-            G_TSO(t,5));
-
-    context_switch = 0; // turned on via GranYield, checking events and time slice
-
-    IF_DEBUG(gran, 
-            DumpGranEvent(GR_SCHEDULE, t));
-
-    procStatus[CurrentProc] = Busy;
-}
-#endif // GRAN
 
 /* ----------------------------------------------------------------------------
  * Send pending messages (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
 
 #if defined(PARALLEL_HASKELL)
 
 /* ----------------------------------------------------------------------------
  * Send pending messages (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
 
 #if defined(PARALLEL_HASKELL)
-static StgTSO *
+static void
 scheduleSendPendingMessages(void)
 {
 scheduleSendPendingMessages(void)
 {
-    StgSparkPool *pool;
-    rtsSpark spark;
-    StgTSO *t;
 
 # if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
 
 # if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
@@ -1204,256 +937,47 @@ scheduleSendPendingMessages(void)
 #endif
 
 /* ----------------------------------------------------------------------------
 #endif
 
 /* ----------------------------------------------------------------------------
- * Activate spark threads (PARALLEL_HASKELL only)
+ * Process message in the current Capability's inbox
  * ------------------------------------------------------------------------- */
 
  * ------------------------------------------------------------------------- */
 
-#if defined(PARALLEL_HASKELL)
 static void
 static void
-scheduleActivateSpark(void)
-{
-#if defined(SPARKS)
-  ASSERT(emptyRunQueue());
-/* We get here if the run queue is empty and want some work.
-   We try to turn a spark into a thread, and add it to the run queue,
-   from where it will be picked up in the next iteration of the scheduler
-   loop.
-*/
-
-      /* :-[  no local threads => look out for local sparks */
-      /* the spark pool for the current PE */
-      pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
-      if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
-         pool->hd < pool->tl) {
-       /* 
-        * ToDo: add GC code check that we really have enough heap afterwards!!
-        * Old comment:
-        * If we're here (no runnable threads) and we have pending
-        * sparks, we must have a space problem.  Get enough space
-        * to turn one of those pending sparks into a
-        * thread... 
-        */
-
-       spark = findSpark(rtsFalse);            /* get a spark */
-       if (spark != (rtsSpark) NULL) {
-         tso = createThreadFromSpark(spark);       /* turn the spark into a thread */
-         IF_PAR_DEBUG(fish, // schedule,
-                      debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
-                            tso->id, tso, advisory_thread_count));
-
-         if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
-           IF_PAR_DEBUG(fish, // schedule,
-                        debugBelch("==^^ failed to create thread from spark @ %lx\n",
-                            spark));
-           return rtsFalse; /* failed to generate a thread */
-         }                  /* otherwise fall through & pick-up new tso */
-       } else {
-         IF_PAR_DEBUG(fish, // schedule,
-                      debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n", 
-                            spark_queue_len(pool)));
-         return rtsFalse;  /* failed to generate a thread */
-       }
-       return rtsTrue;  /* success in generating a thread */
-  } else { /* no more threads permitted or pool empty */
-    return rtsFalse;  /* failed to generateThread */
-  }
-#else
-  tso = NULL; // avoid compiler warning only
-  return rtsFalse;  /* dummy in non-PAR setup */
-#endif // SPARKS
-}
-#endif // PARALLEL_HASKELL
-
-/* ----------------------------------------------------------------------------
- * Get work from a remote node (PARALLEL_HASKELL only)
- * ------------------------------------------------------------------------- */
-    
-#if defined(PARALLEL_HASKELL)
-static rtsBool
-scheduleGetRemoteWork(rtsBool *receivedFinish)
+scheduleProcessInbox (Capability *cap USED_IF_THREADS)
 {
 {
-  ASSERT(emptyRunQueue());
-
-  if (RtsFlags.ParFlags.BufferTime) {
-       IF_PAR_DEBUG(verbose, 
-               debugBelch("...send all pending data,"));
-        {
-         nat i;
-         for (i=1; i<=nPEs; i++)
-           sendImmediately(i); // send all messages away immediately
-       }
-  }
-# ifndef SPARKS
-       //++EDEN++ idle() , i.e. send all buffers, wait for work
-       // suppress fishing in EDEN... just look for incoming messages
-       // (blocking receive)
-  IF_PAR_DEBUG(verbose, 
-              debugBelch("...wait for incoming messages...\n"));
-  *receivedFinish = processMessages(); // blocking receive...
-
-       // and reenter scheduling loop after having received something
-       // (return rtsFalse below)
-
-# else /* activate SPARKS machinery */
-/* We get here, if we have no work, tried to activate a local spark, but still
-   have no work. We try to get a remote spark, by sending a FISH message.
-   Thread migration should be added here, and triggered when a sequence of 
-   fishes returns without work. */
-       delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
-
-      /* =8-[  no local sparks => look for work on other PEs */
-       /*
-        * We really have absolutely no work.  Send out a fish
-        * (there may be some out there already), and wait for
-        * something to arrive.  We clearly can't run any threads
-        * until a SCHEDULE or RESUME arrives, and so that's what
-        * we're hoping to see.  (Of course, we still have to
-        * respond to other types of messages.)
-        */
-       rtsTime now = msTime() /*CURRENT_TIME*/;
-       IF_PAR_DEBUG(verbose, 
-                    debugBelch("--  now=%ld\n", now));
-       IF_PAR_DEBUG(fish, // verbose,
-            if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-                (last_fish_arrived_at!=0 &&
-                 last_fish_arrived_at+delay > now)) {
-              debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
-                    now, last_fish_arrived_at+delay, 
-                    last_fish_arrived_at,
-                    delay);
-            });
-  
-       if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-           advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
-         if (last_fish_arrived_at==0 ||
-             (last_fish_arrived_at+delay <= now)) {           // send FISH now!
-           /* outstandingFishes is set in sendFish, processFish;
-              avoid flooding system with fishes via delay */
-    next_fish_to_send_at = 0;  
-  } else {
-    /* ToDo: this should be done in the main scheduling loop to avoid the
-             busy wait here; not so bad if fish delay is very small  */
-    int iq = 0; // DEBUGGING -- HWL
-    next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send  
-    /* send a fish when ready, but process messages that arrive in the meantime */
-    do {
-      if (PacketsWaiting()) {
-        iq++; // DEBUGGING
-        *receivedFinish = processMessages();
-      }
-      now = msTime();
-    } while (!*receivedFinish || now<next_fish_to_send_at);
-    // JB: This means the fish could become obsolete, if we receive
-    // work. Better check for work again? 
-    // last line: while (!receivedFinish || !haveWork || now<...)
-    // next line: if (receivedFinish || haveWork )
-
-    if (*receivedFinish) // no need to send a FISH if we are finishing anyway
-      return rtsFalse;  // NB: this will leave scheduler loop
-                       // immediately after return!
-                         
-    IF_PAR_DEBUG(fish, // verbose,
-              debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
-
-  }
-
-    // JB: IMHO, this should all be hidden inside sendFish(...)
-    /* pe = choosePE(); 
-       sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY, 
-                NEW_FISH_HUNGER);
-
-    // Global statistics: count no. of fishes
-    if (RtsFlags.ParFlags.ParStats.Global &&
-         RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
-          globalParStats.tot_fish_mess++;
-          }
-    */ 
+#if defined(THREADED_RTS)
+    Message *m;
 
 
-  /* delayed fishes must have been sent by now! */
-  next_fish_to_send_at = 0;  
-  }
-      
-  *receivedFinish = processMessages();
-# endif /* SPARKS */
-
- return rtsFalse;
- /* NB: this function always returns rtsFalse, meaning the scheduler
-    loop continues with the next iteration; 
-    rationale: 
-      return code means success in finding work; we enter this function
-      if there is no local work, thus have to send a fish which takes
-      time until it arrives with work; in the meantime we should process
-      messages in the main loop;
- */
+    while (!emptyInbox(cap)) {
+        ACQUIRE_LOCK(&cap->lock);
+        m = cap->inbox;
+        cap->inbox = m->link;
+        RELEASE_LOCK(&cap->lock);
+        executeMessage(cap, (Message *)m);
+    }
+#endif
 }
 }
-#endif // PARALLEL_HASKELL
 
 /* ----------------------------------------------------------------------------
 
 /* ----------------------------------------------------------------------------
- * PAR/GRAN: Report stats & debugging info(?)
- * ------------------------------------------------------------------------- */
-
-#if defined(PAR) || defined(GRAN)
-static void
-scheduleGranParReport(void)
-{
-  ASSERT(run_queue_hd != END_TSO_QUEUE);
-
-  /* Take a thread from the run queue, if we have work */
-  POP_RUN_QUEUE(t);  // take_off_run_queue(END_TSO_QUEUE);
-
-    /* If this TSO has got its outport closed in the meantime, 
-     *   it mustn't be run. Instead, we have to clean it up as if it was finished.
-     * It has to be marked as TH_DEAD for this purpose.
-     * If it is TH_TERM instead, it is supposed to have finished in the normal way.
-
-JB: TODO: investigate wether state change field could be nuked
-     entirely and replaced by the normal tso state (whatnext
-     field). All we want to do is to kill tsos from outside.
-     */
-
-    /* ToDo: write something to the log-file
-    if (RTSflags.ParFlags.granSimStats && !sameThread)
-        DumpGranEvent(GR_SCHEDULE, RunnableThreadsHd);
-
-    CurrentTSO = t;
-    */
-    /* the spark pool for the current PE */
-    pool = &(cap.r.rSparks); //  cap = (old) MainCap
-
-    IF_DEBUG(scheduler, 
-            debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
-                  run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
-
-    IF_PAR_DEBUG(fish,
-            debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
-                  run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
-
-    if (RtsFlags.ParFlags.ParStats.Full && 
-       (t->par.sparkname != (StgInt)0) && // only log spark generated threads
-       (emitSchedule || // forced emit
-         (t && LastTSO && t->id != LastTSO->id))) {
-      /* 
-        we are running a different TSO, so write a schedule event to log file
-        NB: If we use fair scheduling we also have to write  a deschedule 
-            event for LastTSO; with unfair scheduling we know that the
-            previous tso has blocked whenever we switch to another tso, so
-            we don't need it in GUM for now
-      */
-      IF_PAR_DEBUG(fish, // schedule,
-                  debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
+ * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
+ * ------------------------------------------------------------------------- */
 
 
-      DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
-                      GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
-      emitSchedule = rtsFalse;
+#if defined(THREADED_RTS)
+static void
+scheduleActivateSpark(Capability *cap)
+{
+    if (anySparks())
+    {
+        createSparkThread(cap);
+        debugTrace(DEBUG_sched, "creating a spark thread");
     }
     }
-}     
-#endif
+}
+#endif // PARALLEL_HASKELL || THREADED_RTS
 
 /* ----------------------------------------------------------------------------
  * After running a thread...
  * ------------------------------------------------------------------------- */
 
 static void
 
 /* ----------------------------------------------------------------------------
  * After running a thread...
  * ------------------------------------------------------------------------- */
 
 static void
-schedulePostRunThread (StgTSO *t)
+schedulePostRunThread (Capability *cap, StgTSO *t)
 {
     // We have to be able to catch transactions that are in an
     // infinite loop as a result of seeing an inconsistent view of
 {
     // We have to be able to catch transactions that are in an
     // infinite loop as a result of seeing an inconsistent view of
@@ -1474,97 +998,13 @@ schedulePostRunThread (StgTSO *t)
             // ATOMICALLY_FRAME, aborting the (nested)
             // transaction, and saving the stack of any
             // partially-evaluated thunks on the heap.
             // ATOMICALLY_FRAME, aborting the (nested)
             // transaction, and saving the stack of any
             // partially-evaluated thunks on the heap.
-            throwToSingleThreaded_(&capabilities[0], t, 
-                                   NULL, rtsTrue, NULL);
+            throwToSingleThreaded_(cap, t, NULL, rtsTrue);
             
             
-#ifdef REG_R1
-            ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
-#endif
+//            ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
         }
     }
 
         }
     }
 
-#if defined(PAR)
-    /* HACK 675: if the last thread didn't yield, make sure to print a 
-       SCHEDULE event to the log file when StgRunning the next thread, even
-       if it is the same one as before */
-    LastTSO = t; 
-    TimeOfLastYield = CURRENT_TIME;
-#endif
-
   /* some statistics gathering in the parallel case */
   /* some statistics gathering in the parallel case */
-
-#if defined(GRAN) || defined(PAR) || defined(EDEN)
-  switch (ret) {
-    case HeapOverflow:
-# if defined(GRAN)
-      IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_heapover++;
-# elif defined(PAR)
-      globalParStats.tot_heapover++;
-# endif
-      break;
-
-     case StackOverflow:
-# if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_stackover++;
-# elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_stackover++;
-# endif
-      break;
-
-    case ThreadYielding:
-# if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_yields++;
-# elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_yields++;
-# endif
-      break; 
-
-    case ThreadBlocked:
-# if defined(GRAN)
-       debugTrace(DEBUG_sched, 
-                  "--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", 
-                  t->id, t, whatNext_strs[t->what_next], t->block_info.closure, 
-                  (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
-              if (t->block_info.closure!=(StgClosure*)NULL)
-                print_bq(t->block_info.closure);
-              debugBelch("\n"));
-
-      // ??? needed; should emit block before
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t)); 
-      prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
-      /*
-       ngoq Dogh!
-      ASSERT(procStatus[CurrentProc]==Busy || 
-             ((procStatus[CurrentProc]==Fetching) && 
-             (t->block_info.closure!=(StgClosure*)NULL)));
-      if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
-         !(!RtsFlags.GranFlags.DoAsyncFetch &&
-           procStatus[CurrentProc]==Fetching)) 
-       procStatus[CurrentProc] = Idle;
-      */
-# elif defined(PAR)
-//++PAR++  blockThread() writes the event (change?)
-# endif
-    break;
-
-  case ThreadFinished:
-    break;
-
-  default:
-    barf("parGlobalStats: unknown return code");
-    break;
-    }
-#endif
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -1584,7 +1024,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
        
        debugTrace(DEBUG_sched,
                   "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", 
        
        debugTrace(DEBUG_sched,
                   "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", 
-                  (long)t->id, whatNext_strs[t->what_next], blocks);
+                  (long)t->id, what_next_strs[t->what_next], blocks);
     
        // don't do this if the nursery is (nearly) full, we'll GC first.
        if (cap->r.rCurrentNursery->link != NULL ||
     
        // don't do this if the nursery is (nearly) full, we'll GC first.
        if (cap->r.rCurrentNursery->link != NULL ||
@@ -1602,10 +1042,6 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
            if (cap->r.rCurrentNursery->u.back != NULL) {
                cap->r.rCurrentNursery->u.back->link = bd;
            } else {
            if (cap->r.rCurrentNursery->u.back != NULL) {
                cap->r.rCurrentNursery->u.back->link = bd;
            } else {
-#if !defined(THREADED_RTS)
-               ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
-                      g0s0 == cap->r.rNursery);
-#endif
                cap->r.rNursery->blocks = bd;
            }             
            cap->r.rCurrentNursery->u.back = bd;
                cap->r.rNursery->blocks = bd;
            }             
            cap->r.rCurrentNursery->u.back = bd;
@@ -1620,8 +1056,8 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
            { 
                bdescr *x;
                for (x = bd; x < bd + blocks; x++) {
            { 
                bdescr *x;
                for (x = bd; x < bd + blocks; x++) {
-                   x->step = cap->r.rNursery;
-                   x->gen_no = 0;
+                    initBdescr(x,g0,g0);
+                    x->free = x->start;
                    x->flags = 0;
                }
            }
                    x->flags = 0;
                }
            }
@@ -1642,30 +1078,13 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
        }
     }
     
        }
     }
     
-    debugTrace(DEBUG_sched,
-              "--<< thread %ld (%s) stopped: HeapOverflow\n", 
-              (long)t->id, whatNext_strs[t->what_next]);
-
-#if defined(GRAN)
-    ASSERT(!is_on_queue(t,CurrentProc));
-#elif defined(PARALLEL_HASKELL)
-    /* Currently we emit a DESCHEDULE event before GC in GUM.
-       ToDo: either add separate event to distinguish SYSTEM time from rest
-       or just nuke this DESCHEDULE (and the following SCHEDULE) */
-    if (0 && RtsFlags.ParFlags.ParStats.Full) {
-       DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
-                        GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
-       emitSchedule = rtsTrue;
-    }
-#endif
-      
-    if (context_switch) {
+    if (cap->r.rHpLim == NULL || cap->context_switch) {
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
-        context_switch = 0;
-        addToRunQueue(cap,t);
+        cap->context_switch = 0;
+        appendToRunQueue(cap,t);
     } else {
         pushOnRunQueue(cap,t);
     }
     } else {
         pushOnRunQueue(cap,t);
     }
@@ -1680,10 +1099,6 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
 static void
 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
 {
 static void
 scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
 {
-    debugTrace (DEBUG_sched,
-               "--<< thread %ld (%s) stopped, StackOverflow", 
-               (long)t->id, whatNext_strs[t->what_next]);
-
     /* just adjust the stack for this thread, then pop it back
      * on the run queue.
      */
     /* just adjust the stack for this thread, then pop it back
      * on the run queue.
      */
@@ -1694,8 +1109,8 @@ scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
        /* The TSO attached to this Task may have moved, so update the
         * pointer to it.
         */
        /* The TSO attached to this Task may have moved, so update the
         * pointer to it.
         */
-       if (task->tso == t) {
-           task->tso = new_t;
+       if (task->incall->tso == t) {
+           task->incall->tso = new_t;
        }
        pushOnRunQueue(cap,new_t);
     }
        }
        pushOnRunQueue(cap,new_t);
     }
@@ -1708,64 +1123,38 @@ scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
 static rtsBool
 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
 {
 static rtsBool
 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
 {
-    // Reset the context switch flag.  We don't do this just before
-    // running the thread, because that would mean we would lose ticks
-    // during GC, which can lead to unfair scheduling (a thread hogs
-    // the CPU because the tick always arrives during GC).  This way
-    // penalises threads that do a lot of allocation, but that seems
-    // better than the alternative.
-    context_switch = 0;
-    
     /* put the thread back on the run queue.  Then, if we're ready to
      * GC, check whether this is the last task to stop.  If so, wake
      * up the GC thread.  getThread will block during a GC until the
      * GC is finished.
      */
     /* put the thread back on the run queue.  Then, if we're ready to
      * GC, check whether this is the last task to stop.  If so, wake
      * up the GC thread.  getThread will block during a GC until the
      * GC is finished.
      */
-#ifdef DEBUG
-    if (t->what_next != prev_what_next) {
-       debugTrace(DEBUG_sched,
-                  "--<< thread %ld (%s) stopped to switch evaluators", 
-                  (long)t->id, whatNext_strs[t->what_next]);
-    } else {
-       debugTrace(DEBUG_sched,
-                  "--<< thread %ld (%s) stopped, yielding",
-                  (long)t->id, whatNext_strs[t->what_next]);
-    }
-#endif
-    
-    IF_DEBUG(sanity,
-            //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
-            checkTSO(t));
+
     ASSERT(t->_link == END_TSO_QUEUE);
     
     // Shortcut if we're just switching evaluators: don't bother
     // doing stack squeezing (which can be expensive), just run the
     // thread.
     ASSERT(t->_link == END_TSO_QUEUE);
     
     // Shortcut if we're just switching evaluators: don't bother
     // doing stack squeezing (which can be expensive), just run the
     // thread.
-    if (t->what_next != prev_what_next) {
+    if (cap->context_switch == 0 && t->what_next != prev_what_next) {
+       debugTrace(DEBUG_sched,
+                  "--<< thread %ld (%s) stopped to switch evaluators", 
+                  (long)t->id, what_next_strs[t->what_next]);
        return rtsTrue;
     }
        return rtsTrue;
     }
+
+    // Reset the context switch flag.  We don't do this just before
+    // running the thread, because that would mean we would lose ticks
+    // during GC, which can lead to unfair scheduling (a thread hogs
+    // the CPU because the tick always arrives during GC).  This way
+    // penalises threads that do a lot of allocation, but that seems
+    // better than the alternative.
+    cap->context_switch = 0;
     
     
-#if defined(GRAN)
-    ASSERT(!is_on_queue(t,CurrentProc));
-      
     IF_DEBUG(sanity,
     IF_DEBUG(sanity,
-            //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
-            checkThreadQsSanity(rtsTrue));
-
-#endif
+            //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
+            checkTSO(t));
 
 
-    addToRunQueue(cap,t);
+    appendToRunQueue(cap,t);
 
 
-#if defined(GRAN)
-    /* add a ContinueThread event to actually process the thread */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread,
-             t, (StgClosure*)NULL, (rtsSpark*)NULL);
-    IF_GRAN_DEBUG(bq, 
-                 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
-                 G_EVENTQ(0);
-                 G_CURR_THREADQ(0));
-#endif
     return rtsFalse;
 }
 
     return rtsFalse;
 }
 
@@ -1775,47 +1164,11 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
 
 static void
 scheduleHandleThreadBlocked( StgTSO *t
 
 static void
 scheduleHandleThreadBlocked( StgTSO *t
-#if !defined(GRAN) && !defined(DEBUG)
+#if !defined(DEBUG)
     STG_UNUSED
 #endif
     )
 {
     STG_UNUSED
 #endif
     )
 {
-#if defined(GRAN)
-    IF_DEBUG(scheduler,
-            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n", 
-                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
-            if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
-    
-    // ??? needed; should emit block before
-    IF_DEBUG(gran, 
-            DumpGranEvent(GR_DESCHEDULE, t)); 
-    prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
-    /*
-      ngoq Dogh!
-      ASSERT(procStatus[CurrentProc]==Busy || 
-      ((procStatus[CurrentProc]==Fetching) && 
-      (t->block_info.closure!=(StgClosure*)NULL)));
-      if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
-      !(!RtsFlags.GranFlags.DoAsyncFetch &&
-      procStatus[CurrentProc]==Fetching)) 
-      procStatus[CurrentProc] = Idle;
-    */
-#elif defined(PAR)
-    IF_DEBUG(scheduler,
-            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n", 
-                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
-    IF_PAR_DEBUG(bq,
-                
-                if (t->block_info.closure!=(StgClosure*)NULL) 
-                print_bq(t->block_info.closure));
-    
-    /* Send a fetch (if BlockedOnGA) and dump event to log file */
-    blockThread(t);
-    
-    /* whatever we schedule next, we must log that schedule */
-    emitSchedule = rtsTrue;
-    
-#else /* !GRAN */
 
       // We don't need to do anything.  The thread is blocked, and it
       // has tidied up its stack and placed itself on whatever queue
 
       // We don't need to do anything.  The thread is blocked, and it
       // has tidied up its stack and placed itself on whatever queue
@@ -1823,26 +1176,12 @@ scheduleHandleThreadBlocked( StgTSO *t
 
     // ASSERT(t->why_blocked != NotBlocked);
     // Not true: for example,
 
     // ASSERT(t->why_blocked != NotBlocked);
     // Not true: for example,
-    //    - in THREADED_RTS, the thread may already have been woken
-    //      up by another Capability.  This actually happens: try
-    //      conc023 +RTS -N2.
     //    - the thread may have woken itself up already, because
     //      threadPaused() might have raised a blocked throwTo
     //      exception, see maybePerformBlockedException().
 
 #ifdef DEBUG
     //    - the thread may have woken itself up already, because
     //      threadPaused() might have raised a blocked throwTo
     //      exception, see maybePerformBlockedException().
 
 #ifdef DEBUG
-    if (traceClass(DEBUG_sched)) {
-       debugTraceBegin("--<< thread %lu (%s) stopped: ", 
-                       (unsigned long)t->id, whatNext_strs[t->what_next]);
-       printThreadBlockage(t);
-       debugTraceEnd();
-    }
-#endif
-    
-    /* Only for dumping event to log file 
-       ToDo: do I need this in GranSim, too?
-       blockThread(t);
-    */
+    traceThreadStatus(DEBUG_sched, t);
 #endif
 }
 
 #endif
 }
 
@@ -1859,50 +1198,10 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
      * We also end up here if the thread kills itself with an
      * uncaught exception, see Exception.cmm.
      */
      * We also end up here if the thread kills itself with an
      * uncaught exception, see Exception.cmm.
      */
-    debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", 
-              (unsigned long)t->id, whatNext_strs[t->what_next]);
-
-#if defined(GRAN)
-      endThread(t, CurrentProc); // clean-up the thread
-#elif defined(PARALLEL_HASKELL)
-      /* For now all are advisory -- HWL */
-      //if(t->priority==AdvisoryPriority) ??
-      advisory_thread_count--; // JB: Caution with this counter, buggy!
-      
-# if defined(DIST)
-      if(t->dist.priority==RevalPriority)
-       FinishReval(t);
-# endif
-    
-# if defined(EDENOLD)
-      // the thread could still have an outport... (BUG)
-      if (t->eden.outport != -1) {
-      // delete the outport for the tso which has finished...
-       IF_PAR_DEBUG(eden_ports,
-                  debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
-                             t->eden.outport, t->id));
-       deleteOPT(t);
-      }
-      // thread still in the process (HEAVY BUG! since outport has just been closed...)
-      if (t->eden.epid != -1) {
-       IF_PAR_DEBUG(eden_ports,
-                  debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
-                          t->id, t->eden.epid));
-       removeTSOfromProcess(t);
-      }
-# endif 
-
-# if defined(PAR)
-      if (RtsFlags.ParFlags.ParStats.Full &&
-         !RtsFlags.ParFlags.ParStats.Suppressed) 
-       DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
 
 
-      //  t->par only contains statistics: left out for now...
-      IF_PAR_DEBUG(fish,
-                  debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
-                             t->id,t,t->par.sparkname));
-# endif
-#endif // PARALLEL_HASKELL
+    // blocked exceptions can now complete, even if the thread was in
+    // blocked mode (see #2910).
+    awakenBlockedExceptionQueue (cap, t);
 
       //
       // Check whether the thread that just completed was a bound
 
       //
       // Check whether the thread that just completed was a bound
@@ -1916,7 +1215,7 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
 
       if (t->bound) {
 
 
       if (t->bound) {
 
-         if (t->bound != task) {
+         if (t->bound != task->incall) {
 #if !defined(THREADED_RTS)
              // Must be a bound thread that is not the topmost one.  Leave
              // it on the run queue until the stack has unwound to the
 #if !defined(THREADED_RTS)
              // Must be a bound thread that is not the topmost one.  Leave
              // it on the run queue until the stack has unwound to the
@@ -1933,27 +1232,42 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
 #endif
          }
 
 #endif
          }
 
-         ASSERT(task->tso == t);
+         ASSERT(task->incall->tso == t);
 
          if (t->what_next == ThreadComplete) {
 
          if (t->what_next == ThreadComplete) {
-             if (task->ret) {
+             if (task->incall->ret) {
                  // NOTE: return val is tso->sp[1] (see StgStartup.hc)
                  // NOTE: return val is tso->sp[1] (see StgStartup.hc)
-                 *(task->ret) = (StgClosure *)task->tso->sp[1]; 
+                 *(task->incall->ret) = (StgClosure *)task->incall->tso->sp[1]; 
              }
              }
-             task->stat = Success;
+             task->incall->stat = Success;
          } else {
          } else {
-             if (task->ret) {
-                 *(task->ret) = NULL;
+             if (task->incall->ret) {
+                 *(task->incall->ret) = NULL;
              }
              if (sched_state >= SCHED_INTERRUPTING) {
              }
              if (sched_state >= SCHED_INTERRUPTING) {
-                 task->stat = Interrupted;
+                  if (heap_overflow) {
+                      task->incall->stat = HeapExhausted;
+                  } else {
+                      task->incall->stat = Interrupted;
+                  }
              } else {
              } else {
-                 task->stat = Killed;
+                 task->incall->stat = Killed;
              }
          }
 #ifdef DEBUG
              }
          }
 #ifdef DEBUG
-         removeThreadLabel((StgWord)task->tso->id);
+         removeThreadLabel((StgWord)task->incall->tso->id);
 #endif
 #endif
+
+          // We no longer consider this thread and task to be bound to
+          // each other.  The TSO lives on until it is GC'd, but the
+          // task is about to be released by the caller, and we don't
+          // want anyone following the pointer from the TSO to the
+          // defunct task (which might have already been
+          // re-used). This was a real bug: the GC updated
+          // tso->bound->tso which lead to a deadlock.
+          t->bound = NULL;
+          task->incall->tso = NULL;
+
          return rtsTrue; // tells schedule() to return
       }
 
          return rtsTrue; // tells schedule() to return
       }
 
@@ -1985,15 +1299,32 @@ scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
 static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
 static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
-    StgTSO *t;
     rtsBool heap_census;
 #ifdef THREADED_RTS
     rtsBool heap_census;
 #ifdef THREADED_RTS
-    static volatile StgWord waiting_for_gc;
-    rtsBool was_waiting;
+    /* extern static volatile StgWord waiting_for_gc; 
+       lives inside capability.c */
+    rtsBool gc_type, prev_pending_gc;
     nat i;
 #endif
 
     nat i;
 #endif
 
+    if (sched_state == SCHED_SHUTTING_DOWN) {
+        // The final GC has already been done, and the system is
+        // shutting down.  We'll probably deadlock if we try to GC
+        // now.
+        return cap;
+    }
+
 #ifdef THREADED_RTS
 #ifdef THREADED_RTS
+    if (sched_state < SCHED_INTERRUPTING
+        && RtsFlags.ParFlags.parGcEnabled
+        && N >= RtsFlags.ParFlags.parGcGen
+        && ! oldest_gen->mark)
+    {
+        gc_type = PENDING_GC_PAR;
+    } else {
+        gc_type = PENDING_GC_SEQ;
+    }
+
     // In order to GC, there must be no threads running Haskell code.
     // Therefore, the GC thread needs to hold *all* the capabilities,
     // and release them after the GC has completed.  
     // In order to GC, there must be no threads running Haskell code.
     // Therefore, the GC thread needs to hold *all* the capabilities,
     // and release them after the GC has completed.  
@@ -2004,77 +1335,156 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     // actually did the GC.  But it's quite hard to arrange for all
     // the other tasks to sleep and stay asleep.
     //
     // actually did the GC.  But it's quite hard to arrange for all
     // the other tasks to sleep and stay asleep.
     //
-       
-    was_waiting = cas(&waiting_for_gc, 0, 1);
-    if (was_waiting) {
+
+    /*  Other capabilities are prevented from running yet more Haskell
+       threads if waiting_for_gc is set. Tested inside
+       yieldCapability() and releaseCapability() in Capability.c */
+
+    prev_pending_gc = cas(&waiting_for_gc, 0, gc_type);
+    if (prev_pending_gc) {
        do {
        do {
-           debugTrace(DEBUG_sched, "someone else is trying to GC...");
-           if (cap) yieldCapability(&cap,task);
+           debugTrace(DEBUG_sched, "someone else is trying to GC (%d)...", 
+                       prev_pending_gc);
+            ASSERT(cap);
+            yieldCapability(&cap,task);
        } while (waiting_for_gc);
        return cap;  // NOTE: task->cap might have changed here
     }
 
        } while (waiting_for_gc);
        return cap;  // NOTE: task->cap might have changed here
     }
 
-    for (i=0; i < n_capabilities; i++) {
-       debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
-       if (cap != &capabilities[i]) {
-           Capability *pcap = &capabilities[i];
-           // we better hope this task doesn't get migrated to
-           // another Capability while we're waiting for this one.
-           // It won't, because load balancing happens while we have
-           // all the Capabilities, but even so it's a slightly
-           // unsavoury invariant.
-           task->cap = pcap;
-           context_switch = 1;
-           waitForReturnCapability(&pcap, task);
-           if (pcap != &capabilities[i]) {
-               barf("scheduleDoGC: got the wrong capability");
-           }
-       }
+    setContextSwitches();
+
+    // The final shutdown GC is always single-threaded, because it's
+    // possible that some of the Capabilities have no worker threads.
+    
+    if (gc_type == PENDING_GC_SEQ)
+    {
+        traceEventRequestSeqGc(cap);
+    }
+    else
+    {
+        traceEventRequestParGc(cap);
+        debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
+    }
+
+    if (gc_type == PENDING_GC_SEQ)
+    {
+        // single-threaded GC: grab all the capabilities
+        for (i=0; i < n_capabilities; i++) {
+            debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
+            if (cap != &capabilities[i]) {
+                Capability *pcap = &capabilities[i];
+                // we better hope this task doesn't get migrated to
+                // another Capability while we're waiting for this one.
+                // It won't, because load balancing happens while we have
+                // all the Capabilities, but even so it's a slightly
+                // unsavoury invariant.
+                task->cap = pcap;
+                waitForReturnCapability(&pcap, task);
+                if (pcap != &capabilities[i]) {
+                    barf("scheduleDoGC: got the wrong capability");
+                }
+            }
+        }
+    }
+    else
+    {
+        // multi-threaded GC: make sure all the Capabilities donate one
+        // GC thread each.
+        waitForGcThreads(cap);
     }
 
     }
 
-    waiting_for_gc = rtsFalse;
 #endif
 
 #endif
 
-    // so this happens periodically:
-    if (cap) scheduleCheckBlackHoles(cap);
-    
     IF_DEBUG(scheduler, printAllThreads());
 
     IF_DEBUG(scheduler, printAllThreads());
 
+delete_threads_and_gc:
     /*
      * We now have all the capabilities; if we're in an interrupting
      * state, then we should take the opportunity to delete all the
      * threads in the system.
      */
     /*
      * We now have all the capabilities; if we're in an interrupting
      * state, then we should take the opportunity to delete all the
      * threads in the system.
      */
-    if (sched_state >= SCHED_INTERRUPTING) {
-       deleteAllThreads(&capabilities[0]);
+    if (sched_state == SCHED_INTERRUPTING) {
+       deleteAllThreads(cap);
        sched_state = SCHED_SHUTTING_DOWN;
     }
     
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
        sched_state = SCHED_SHUTTING_DOWN;
     }
     
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
-    /* everybody back, start the GC.
-     * Could do it in this thread, or signal a condition var
-     * to do it in another thread.  Either way, we need to
-     * broadcast on gc_pending_cond afterward.
-     */
+    traceEventGcStart(cap);
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-    debugTrace(DEBUG_sched, "doing GC");
+    // reset waiting_for_gc *before* GC, so that when the GC threads
+    // emerge they don't immediately re-enter the GC.
+    waiting_for_gc = 0;
+    GarbageCollect(force_major || heap_census, gc_type, cap);
+#else
+    GarbageCollect(force_major || heap_census, 0, cap);
 #endif
 #endif
-    GarbageCollect(force_major || heap_census);
-    
+    traceEventGcEnd(cap);
+
+    if (recent_activity == ACTIVITY_INACTIVE && force_major)
+    {
+        // We are doing a GC because the system has been idle for a
+        // timeslice and we need to check for deadlock.  Record the
+        // fact that we've done a GC and turn off the timer signal;
+        // it will get re-enabled if we run any threads after the GC.
+        recent_activity = ACTIVITY_DONE_GC;
+        stopTimer();
+    }
+    else
+    {
+        // the GC might have taken long enough for the timer to set
+        // recent_activity = ACTIVITY_INACTIVE, but we aren't
+        // necessarily deadlocked:
+        recent_activity = ACTIVITY_YES;
+    }
+
+#if defined(THREADED_RTS)
+    if (gc_type == PENDING_GC_PAR)
+    {
+        releaseGCThreads(cap);
+    }
+#endif
+
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();
        performHeapProfile = rtsFalse;
     }
 
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();
        performHeapProfile = rtsFalse;
     }
 
+    if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
+        // GC set the heap_overflow flag, so we should proceed with
+        // an orderly shutdown now.  Ultimately we want the main
+        // thread to return to its caller with HeapExhausted, at which
+        // point the caller should call hs_exit().  The first step is
+        // to delete all the threads.
+        //
+        // Another way to do this would be to raise an exception in
+        // the main thread, which we really should do because it gives
+        // the program a chance to clean up.  But how do we find the
+        // main thread?  It should presumably be the same one that
+        // gets ^C exceptions, but that's all done on the Haskell side
+        // (GHC.TopHandler).
+       sched_state = SCHED_INTERRUPTING;
+        goto delete_threads_and_gc;
+    }
+
+#ifdef SPARKBALANCE
+    /* JB 
+       Once we are all together... this would be the place to balance all
+       spark pools. No concurrent stealing or adding of new sparks can
+       occur. Should be defined in Sparks.c. */
+    balanceSparkPoolsCaps(n_capabilities, capabilities);
+#endif
+
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
-    // release our stash of capabilities.
-    for (i = 0; i < n_capabilities; i++) {
-       if (cap != &capabilities[i]) {
-           task->cap = &capabilities[i];
-           releaseCapability(&capabilities[i]);
-       }
+    if (gc_type == PENDING_GC_SEQ) {
+        // release our stash of capabilities.
+        for (i = 0; i < n_capabilities; i++) {
+            if (cap != &capabilities[i]) {
+                task->cap = &capabilities[i];
+                releaseCapability(&capabilities[i]);
+            }
+        }
     }
     if (cap) {
        task->cap = cap;
     }
     if (cap) {
        task->cap = cap;
@@ -2083,17 +1493,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     }
 #endif
 
     }
 #endif
 
-#if defined(GRAN)
-    /* add a ContinueThread event to continue execution of current thread */
-    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-             ContinueThread,
-             t, (StgClosure*)NULL, (rtsSpark*)NULL);
-    IF_GRAN_DEBUG(bq, 
-                 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
-                 G_EVENTQ(0);
-                 G_CURR_THREADQ(0));
-#endif /* GRAN */
-
     return cap;
 }
 
     return cap;
 }
 
@@ -2109,11 +1508,10 @@ forkProcess(HsStablePtr *entry
            )
 {
 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
            )
 {
 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
-    Task *task;
     pid_t pid;
     StgTSO* t,*next;
     Capability *cap;
     pid_t pid;
     StgTSO* t,*next;
     Capability *cap;
-    nat s;
+    nat g;
     
 #if defined(THREADED_RTS)
     if (RtsFlags.ParFlags.nNodes > 1) {
     
 #if defined(THREADED_RTS)
     if (RtsFlags.ParFlags.nNodes > 1) {
@@ -2135,10 +1533,14 @@ forkProcess(HsStablePtr *entry
     ACQUIRE_LOCK(&cap->lock);
     ACQUIRE_LOCK(&cap->running_task->lock);
 
     ACQUIRE_LOCK(&cap->lock);
     ACQUIRE_LOCK(&cap->running_task->lock);
 
+    stopTimer(); // See #4074
+
     pid = fork();
     
     if (pid) { // parent
        
     pid = fork();
     
     if (pid) { // parent
        
+        startTimer(); // #4074
+
         RELEASE_LOCK(&sched_mutex);
         RELEASE_LOCK(&cap->lock);
         RELEASE_LOCK(&cap->running_task->lock);
         RELEASE_LOCK(&sched_mutex);
         RELEASE_LOCK(&cap->lock);
         RELEASE_LOCK(&cap->running_task->lock);
@@ -2161,8 +1563,8 @@ forkProcess(HsStablePtr *entry
        // all Tasks, because they correspond to OS threads that are
        // now gone.
 
        // all Tasks, because they correspond to OS threads that are
        // now gone.
 
-        for (s = 0; s < total_steps; s++) {
-          for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
+        for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+          for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
            if (t->what_next == ThreadRelocated) {
                next = t->_link;
            } else {
            if (t->what_next == ThreadRelocated) {
                next = t->_link;
            } else {
@@ -2171,6 +1573,13 @@ forkProcess(HsStablePtr *entry
                // exception, but we do want to raiseAsync() because these
                // threads may be evaluating thunks that we need later.
                deleteThread_(cap,t);
                // exception, but we do want to raiseAsync() because these
                // threads may be evaluating thunks that we need later.
                deleteThread_(cap,t);
+
+                // stop the GC from updating the InCall to point to
+                // the TSO.  This is only necessary because the
+                // OSThread bound to the TSO has been killed, and
+                // won't get a chance to exit in the usual way (see
+                // also scheduleHandleThreadFinished).
+                t->bound = NULL;
            }
           }
        }
            }
           }
        }
@@ -2184,25 +1593,15 @@ forkProcess(HsStablePtr *entry
 
        // Any suspended C-calling Tasks are no more, their OS threads
        // don't exist now:
 
        // Any suspended C-calling Tasks are no more, their OS threads
        // don't exist now:
-       cap->suspended_ccalling_tasks = NULL;
+       cap->suspended_ccalls = NULL;
 
        // Empty the threads lists.  Otherwise, the garbage
        // collector may attempt to resurrect some of these threads.
 
        // Empty the threads lists.  Otherwise, the garbage
        // collector may attempt to resurrect some of these threads.
-        for (s = 0; s < total_steps; s++) {
-            all_steps[s].threads = END_TSO_QUEUE;
+        for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+            generations[g].threads = END_TSO_QUEUE;
         }
 
         }
 
-       // Wipe the task list, except the current Task.
-       ACQUIRE_LOCK(&sched_mutex);
-       for (task = all_tasks; task != NULL; task=task->all_link) {
-           if (task != cap->running_task) {
-#if defined(THREADED_RTS)
-                initMutex(&task->lock); // see #1391
-#endif
-               discardTask(task);
-           }
-       }
-       RELEASE_LOCK(&sched_mutex);
+        discardTasksExcept(cap->running_task);
 
 #if defined(THREADED_RTS)
        // Wipe our spare workers list, they no longer exist.  New
 
 #if defined(THREADED_RTS)
        // Wipe our spare workers list, they no longer exist.  New
@@ -2217,6 +1616,10 @@ forkProcess(HsStablePtr *entry
         initTimer();
         startTimer();
 
         initTimer();
         startTimer();
 
+#if defined(THREADED_RTS)
+        cap = ioManagerStartCap(cap);
+#endif
+
        cap = rts_evalStableIO(cap, entry, NULL);  // run the action
        rts_checkSchedStatus("forkProcess",cap);
        
        cap = rts_evalStableIO(cap, entry, NULL);  // run the action
        rts_checkSchedStatus("forkProcess",cap);
        
@@ -2226,7 +1629,6 @@ forkProcess(HsStablePtr *entry
     }
 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
     barf("forkProcess#: primop not supported on this platform, sorry!\n");
     }
 #else /* !FORKPROCESS_PRIMOP_SUPPORTED */
     barf("forkProcess#: primop not supported on this platform, sorry!\n");
-    return -1;
 #endif
 }
 
 #endif
 }
 
@@ -2240,19 +1642,19 @@ deleteAllThreads ( Capability *cap )
     // NOTE: only safe to call if we own all capabilities.
 
     StgTSO* t, *next;
     // NOTE: only safe to call if we own all capabilities.
 
     StgTSO* t, *next;
-    nat s;
+    nat g;
 
     debugTrace(DEBUG_sched,"deleting all threads");
 
     debugTrace(DEBUG_sched,"deleting all threads");
-    for (s = 0; s < total_steps; s++) {
-      for (t = all_steps[s].threads; t != END_TSO_QUEUE; t = next) {
-       if (t->what_next == ThreadRelocated) {
-           next = t->_link;
-       } else {
-           next = t->global_link;
-           deleteThread(cap,t);
-       }
-      }
-    }      
+    for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+        for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
+            if (t->what_next == ThreadRelocated) {
+                next = t->_link;
+            } else {
+                next = t->global_link;
+                deleteThread(cap,t);
+            }
+        }
+    }
 
     // The run queue now contains a bunch of ThreadKilled threads.  We
     // must not throw these away: the main thread(s) will be in there
 
     // The run queue now contains a bunch of ThreadKilled threads.  We
     // must not throw these away: the main thread(s) will be in there
@@ -2267,35 +1669,41 @@ deleteAllThreads ( Capability *cap )
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
-   Managing the suspended_ccalling_tasks list.
+   Managing the suspended_ccalls list.
    Locks required: sched_mutex
    -------------------------------------------------------------------------- */
 
 STATIC_INLINE void
 suspendTask (Capability *cap, Task *task)
 {
    Locks required: sched_mutex
    -------------------------------------------------------------------------- */
 
 STATIC_INLINE void
 suspendTask (Capability *cap, Task *task)
 {
-    ASSERT(task->next == NULL && task->prev == NULL);
-    task->next = cap->suspended_ccalling_tasks;
-    task->prev = NULL;
-    if (cap->suspended_ccalling_tasks) {
-       cap->suspended_ccalling_tasks->prev = task;
+    InCall *incall;
+    
+    incall = task->incall;
+    ASSERT(incall->next == NULL && incall->prev == NULL);
+    incall->next = cap->suspended_ccalls;
+    incall->prev = NULL;
+    if (cap->suspended_ccalls) {
+       cap->suspended_ccalls->prev = incall;
     }
     }
-    cap->suspended_ccalling_tasks = task;
+    cap->suspended_ccalls = incall;
 }
 
 STATIC_INLINE void
 recoverSuspendedTask (Capability *cap, Task *task)
 {
 }
 
 STATIC_INLINE void
 recoverSuspendedTask (Capability *cap, Task *task)
 {
-    if (task->prev) {
-       task->prev->next = task->next;
+    InCall *incall;
+
+    incall = task->incall;
+    if (incall->prev) {
+       incall->prev->next = incall->next;
     } else {
     } else {
-       ASSERT(cap->suspended_ccalling_tasks == task);
-       cap->suspended_ccalling_tasks = task->next;
+       ASSERT(cap->suspended_ccalls == incall);
+       cap->suspended_ccalls = incall->next;
     }
     }
-    if (task->next) {
-       task->next->prev = task->prev;
+    if (incall->next) {
+       incall->next->prev = incall->prev;
     }
     }
-    task->next = task->prev = NULL;
+    incall->next = incall->prev = NULL;
 }
 
 /* ---------------------------------------------------------------------------
 }
 
 /* ---------------------------------------------------------------------------
@@ -2308,13 +1716,17 @@ recoverSuspendedTask (Capability *cap, Task *task)
  * the whole system.
  *
  * The Haskell thread making the C call is put to sleep for the
  * the whole system.
  *
  * The Haskell thread making the C call is put to sleep for the
- * duration of the call, on the susepended_ccalling_threads queue.  We
+ * duration of the call, on the suspended_ccalling_threads queue.  We
  * give out a token to the task, which it can use to resume the thread
  * on return from the C function.
  * give out a token to the task, which it can use to resume the thread
  * on return from the C function.
+ *
+ * If this is an interruptible C call, this means that the FFI call may be
+ * unceremoniously terminated and should be scheduled on an
+ * unbound worker thread.
  * ------------------------------------------------------------------------- */
    
 void *
  * ------------------------------------------------------------------------- */
    
 void *
-suspendThread (StgRegTable *reg)
+suspendThread (StgRegTable *reg, rtsBool interruptible)
 {
   Capability *cap;
   int saved_errno;
 {
   Capability *cap;
   int saved_errno;
@@ -2336,41 +1748,31 @@ suspendThread (StgRegTable *reg)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
-  debugTrace(DEBUG_sched, 
-            "thread %lu did a safe foreign call", 
-            (unsigned long)cap->r.rCurrentTSO->id);
+  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
 
   threadPaused(cap,tso);
 
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
 
   threadPaused(cap,tso);
 
-  if ((tso->flags & TSO_BLOCKEX) == 0)  {
-      tso->why_blocked = BlockedOnCCall;
-      tso->flags |= TSO_BLOCKEX;
-      tso->flags &= ~TSO_INTERRUPTIBLE;
+  if (interruptible) {
+    tso->why_blocked = BlockedOnCCall_Interruptible;
   } else {
   } else {
-      tso->why_blocked = BlockedOnCCall_NoUnblockExc;
+    tso->why_blocked = BlockedOnCCall;
   }
 
   // Hand back capability
   }
 
   // Hand back capability
-  task->suspended_tso = tso;
+  task->incall->suspended_tso = tso;
+  task->incall->suspended_cap = cap;
 
   ACQUIRE_LOCK(&cap->lock);
 
   suspendTask(cap,task);
   cap->in_haskell = rtsFalse;
 
   ACQUIRE_LOCK(&cap->lock);
 
   suspendTask(cap,task);
   cap->in_haskell = rtsFalse;
-  releaseCapability_(cap);
+  releaseCapability_(cap,rtsFalse);
   
   RELEASE_LOCK(&cap->lock);
 
   
   RELEASE_LOCK(&cap->lock);
 
-#if defined(THREADED_RTS)
-  /* Preparing to leave the RTS, so ensure there's a native thread/task
-     waiting to take over.
-  */
-  debugTrace(DEBUG_sched, "thread %lu: leaving RTS", (unsigned long)tso->id);
-#endif
-
   errno = saved_errno;
 #if mingw32_HOST_OS
   SetLastError(saved_winerror);
   errno = saved_errno;
 #if mingw32_HOST_OS
   SetLastError(saved_winerror);
@@ -2382,6 +1784,7 @@ StgRegTable *
 resumeThread (void *task_)
 {
     StgTSO *tso;
 resumeThread (void *task_)
 {
     StgTSO *tso;
+    InCall *incall;
     Capability *cap;
     Task *task = task_;
     int saved_errno;
     Capability *cap;
     Task *task = task_;
     int saved_errno;
@@ -2394,24 +1797,31 @@ resumeThread (void *task_)
     saved_winerror = GetLastError();
 #endif
 
     saved_winerror = GetLastError();
 #endif
 
-    cap = task->cap;
+    incall = task->incall;
+    cap = incall->suspended_cap;
+    task->cap = cap;
+
     // Wait for permission to re-enter the RTS with the result.
     waitForReturnCapability(&cap,task);
     // we might be on a different capability now... but if so, our
     // Wait for permission to re-enter the RTS with the result.
     waitForReturnCapability(&cap,task);
     // we might be on a different capability now... but if so, our
-    // entry on the suspended_ccalling_tasks list will also have been
+    // entry on the suspended_ccalls list will also have been
     // migrated.
 
     // Remove the thread from the suspended list
     recoverSuspendedTask(cap,task);
 
     // migrated.
 
     // Remove the thread from the suspended list
     recoverSuspendedTask(cap,task);
 
-    tso = task->suspended_tso;
-    task->suspended_tso = NULL;
+    tso = incall->suspended_tso;
+    incall->suspended_tso = NULL;
+    incall->suspended_cap = NULL;
     tso->_link = END_TSO_QUEUE; // no write barrier reqd
     tso->_link = END_TSO_QUEUE; // no write barrier reqd
-    debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
+
+    traceEventRunThread(cap, tso);
     
     
-    if (tso->why_blocked == BlockedOnCCall) {
-       awakenBlockedExceptionQueue(cap,tso);
-       tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
+    if ((tso->flags & TSO_BLOCKEX) == 0) {
+        // avoid locking the TSO if we don't have to
+        if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
+            awakenBlockedExceptionQueue(cap,tso);
+        }
     }
     
     /* Reset blocking status */
     }
     
     /* Reset blocking status */
@@ -2460,7 +1870,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-       migrateThreadToCapability_lock(&capabilities[cpu],tso);
+        migrateThread(cap, tso, &capabilities[cpu]);
     }
 #else
     appendToRunQueue(cap,tso);
     }
 #else
     appendToRunQueue(cap,tso);
@@ -2471,36 +1881,31 @@ Capability *
 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 {
     Task *task;
 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 {
     Task *task;
+    StgThreadID id;
 
     // We already created/initialised the Task
     task = cap->running_task;
 
     // This TSO is now a bound thread; make the Task and TSO
     // point to each other.
 
     // We already created/initialised the Task
     task = cap->running_task;
 
     // This TSO is now a bound thread; make the Task and TSO
     // point to each other.
-    tso->bound = task;
+    tso->bound = task->incall;
     tso->cap = cap;
 
     tso->cap = cap;
 
-    task->tso = tso;
-    task->ret = ret;
-    task->stat = NoStatus;
+    task->incall->tso = tso;
+    task->incall->ret = ret;
+    task->incall->stat = NoStatus;
 
     appendToRunQueue(cap,tso);
 
 
     appendToRunQueue(cap,tso);
 
-    debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
-
-#if defined(GRAN)
-    /* GranSim specific init */
-    CurrentTSO = m->tso;                // the TSO to run
-    procStatus[MainProc] = Busy;        // status of main PE
-    CurrentProc = MainProc;             // PE to run it on
-#endif
+    id = tso->id;
+    debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)id);
 
     cap = schedule(cap,task);
 
 
     cap = schedule(cap,task);
 
-    ASSERT(task->stat != NoStatus);
+    ASSERT(task->incall->stat != NoStatus);
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
-    debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
+    debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id);
     return cap;
 }
 
     return cap;
 }
 
@@ -2509,25 +1914,27 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
  * ------------------------------------------------------------------------- */
 
 #if defined(THREADED_RTS)
  * ------------------------------------------------------------------------- */
 
 #if defined(THREADED_RTS)
-void
-workerStart(Task *task)
+void scheduleWorker (Capability *cap, Task *task)
 {
 {
-    Capability *cap;
-
-    // See startWorkerTask().
-    ACQUIRE_LOCK(&task->lock);
-    cap = task->cap;
-    RELEASE_LOCK(&task->lock);
-
-    // set the thread-local pointer to the Task:
-    taskEnter(task);
-
     // schedule() runs without a lock.
     cap = schedule(cap,task);
 
     // schedule() runs without a lock.
     cap = schedule(cap,task);
 
-    // On exit from schedule(), we have a Capability.
-    releaseCapability(cap);
+    // On exit from schedule(), we have a Capability, but possibly not
+    // the same one we started with.
+
+    // During shutdown, the requirement is that after all the
+    // Capabilities are shut down, all workers that are shutting down
+    // have finished workerTaskStop().  This is why we hold on to
+    // cap->lock until we've finished workerTaskStop() below.
+    //
+    // There may be workers still involved in foreign calls; those
+    // will just block in waitForReturnCapability() because the
+    // Capability has been shut down.
+    //
+    ACQUIRE_LOCK(&cap->lock);
+    releaseCapability_(cap,rtsFalse);
     workerTaskStop(task);
     workerTaskStop(task);
+    RELEASE_LOCK(&cap->lock);
 }
 #endif
 
 }
 #endif
 
@@ -2543,26 +1950,12 @@ workerStart(Task *task)
 void 
 initScheduler(void)
 {
 void 
 initScheduler(void)
 {
-#if defined(GRAN)
-  nat i;
-  for (i=0; i<=MAX_PROC; i++) {
-    run_queue_hds[i]      = END_TSO_QUEUE;
-    run_queue_tls[i]      = END_TSO_QUEUE;
-    blocked_queue_hds[i]  = END_TSO_QUEUE;
-    blocked_queue_tls[i]  = END_TSO_QUEUE;
-    ccalling_threadss[i]  = END_TSO_QUEUE;
-    blackhole_queue[i]    = END_TSO_QUEUE;
-    sleeping_queue        = END_TSO_QUEUE;
-  }
-#elif !defined(THREADED_RTS)
+#if !defined(THREADED_RTS)
   blocked_queue_hd  = END_TSO_QUEUE;
   blocked_queue_tl  = END_TSO_QUEUE;
   sleeping_queue    = END_TSO_QUEUE;
 #endif
 
   blocked_queue_hd  = END_TSO_QUEUE;
   blocked_queue_tl  = END_TSO_QUEUE;
   sleeping_queue    = END_TSO_QUEUE;
 #endif
 
-  blackhole_queue   = END_TSO_QUEUE;
-
-  context_switch = 0;
   sched_state    = SCHED_RUNNING;
   recent_activity = ACTIVITY_YES;
 
   sched_state    = SCHED_RUNNING;
   recent_activity = ACTIVITY_YES;
 
@@ -2582,10 +1975,12 @@ initScheduler(void)
 
   initTaskManager();
 
 
   initTaskManager();
 
-#if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
+#if defined(THREADED_RTS)
   initSparkPools();
 #endif
 
   initSparkPools();
 #endif
 
+  RELEASE_LOCK(&sched_mutex);
+
 #if defined(THREADED_RTS)
   /*
    * Eagerly start one worker to run each Capability, except for
 #if defined(THREADED_RTS)
   /*
    * Eagerly start one worker to run each Capability, except for
@@ -2599,38 +1994,28 @@ initScheduler(void)
       for (i = 1; i < n_capabilities; i++) {
          cap = &capabilities[i];
          ACQUIRE_LOCK(&cap->lock);
       for (i = 1; i < n_capabilities; i++) {
          cap = &capabilities[i];
          ACQUIRE_LOCK(&cap->lock);
-         startWorkerTask(cap, workerStart);
+         startWorkerTask(cap);
          RELEASE_LOCK(&cap->lock);
       }
   }
 #endif
          RELEASE_LOCK(&cap->lock);
       }
   }
 #endif
-
-  trace(TRACE_sched, "start: %d capabilities", n_capabilities);
-
-  RELEASE_LOCK(&sched_mutex);
 }
 
 void
 }
 
 void
-exitScheduler(
-    rtsBool wait_foreign
-#if !defined(THREADED_RTS)
-                         __attribute__((unused))
-#endif
-)
+exitScheduler (rtsBool wait_foreign USED_IF_THREADS)
                /* see Capability.c, shutdownCapability() */
 {
     Task *task = NULL;
 
                /* see Capability.c, shutdownCapability() */
 {
     Task *task = NULL;
 
-#if defined(THREADED_RTS)
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
-#endif
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
-       scheduleDoGC(NULL,task,rtsFalse);    
+        waitForReturnCapability(&task->cap,task);
+       scheduleDoGC(task->cap,task,rtsFalse);
+        ASSERT(task->incall->tso == NULL);
+        releaseCapability(task->cap);
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
@@ -2639,23 +2024,35 @@ exitScheduler(
        nat i;
        
        for (i = 0; i < n_capabilities; i++) {
        nat i;
        
        for (i = 0; i < n_capabilities; i++) {
+            ASSERT(task->incall->tso == NULL);
            shutdownCapability(&capabilities[i], task, wait_foreign);
        }
            shutdownCapability(&capabilities[i], task, wait_foreign);
        }
-       boundTaskExiting(task);
-       stopTaskManager();
     }
     }
-#else
-    freeCapability(&MainCapability);
 #endif
 #endif
+
+    boundTaskExiting(task);
 }
 
 void
 freeScheduler( void )
 {
 }
 
 void
 freeScheduler( void )
 {
-    freeTaskManager();
-    if (n_capabilities != 1) {
-        stgFree(capabilities);
+    nat still_running;
+
+    ACQUIRE_LOCK(&sched_mutex);
+    still_running = freeTaskManager();
+    // We can only free the Capabilities if there are no Tasks still
+    // running.  We might have a Task about to return from a foreign
+    // call into waitForReturnCapability(), for example (actually,
+    // this should be the *only* thing that a still-running Task can
+    // do at this point, and it will block waiting for the
+    // Capability).
+    if (still_running == 0) {
+        freeCapabilities();
+        if (n_capabilities != 1) {
+            stgFree(capabilities);
+        }
     }
     }
+    RELEASE_LOCK(&sched_mutex);
 #if defined(THREADED_RTS)
     closeMutex(&sched_mutex);
 #endif
 #if defined(THREADED_RTS)
     closeMutex(&sched_mutex);
 #endif
@@ -2673,13 +2070,15 @@ static void
 performGC_(rtsBool force_major)
 {
     Task *task;
 performGC_(rtsBool force_major)
 {
     Task *task;
+
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
-    // suspended_ccalling_tasks queue.
-    ACQUIRE_LOCK(&sched_mutex);
+    // suspended_ccalls queue.
     task = newBoundTask();
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
-    scheduleDoGC(NULL,task,force_major);
+
+    waitForReturnCapability(&task->cap,task);
+    scheduleDoGC(task->cap,task,force_major);
+    releaseCapability(task->cap);
     boundTaskExiting(task);
 }
 
     boundTaskExiting(task);
 }
 
@@ -2714,16 +2113,27 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
 
   IF_DEBUG(sanity,checkTSO(tso));
 
 
   IF_DEBUG(sanity,checkTSO(tso));
 
-  // don't allow throwTo() to modify the blocked_exceptions queue
-  // while we are moving the TSO:
-  lockClosure((StgClosure *)tso);
-
-  if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
+  if (tso->stack_size >= tso->max_stack_size
+      && !(tso->flags & TSO_BLOCKEX)) {
       // NB. never raise a StackOverflow exception if the thread is
       // inside Control.Exceptino.block.  It is impractical to protect
       // against stack overflow exceptions, since virtually anything
       // can raise one (even 'catch'), so this is the only sensible
       // thing to do here.  See bug #767.
       // NB. never raise a StackOverflow exception if the thread is
       // inside Control.Exceptino.block.  It is impractical to protect
       // against stack overflow exceptions, since virtually anything
       // can raise one (even 'catch'), so this is the only sensible
       // thing to do here.  See bug #767.
+      //
+
+      if (tso->flags & TSO_SQUEEZED) {
+          return tso;
+      }
+      // #3677: In a stack overflow situation, stack squeezing may
+      // reduce the stack size, but we don't know whether it has been
+      // reduced enough for the stack check to succeed if we try
+      // again.  Fortunately stack squeezing is idempotent, so all we
+      // need to do is record whether *any* squeezing happened.  If we
+      // are at the stack's absolute -K limit, and stack squeezing
+      // happened, then we try running the thread again.  The
+      // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
+      // squeezing happened or not.
 
       debugTrace(DEBUG_gc,
                 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
 
       debugTrace(DEBUG_gc,
                 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
@@ -2734,16 +2144,36 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
                                                tso->sp+64)));
 
       // Send this thread the StackOverflow exception
                                                tso->sp+64)));
 
       // Send this thread the StackOverflow exception
-      unlockTSO(tso);
       throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
       return tso;
   }
 
       throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
       return tso;
   }
 
+
+  // We also want to avoid enlarging the stack if squeezing has
+  // already released some of it.  However, we don't want to get into
+  // a pathalogical situation where a thread has a nearly full stack
+  // (near its current limit, but not near the absolute -K limit),
+  // keeps allocating a little bit, squeezing removes a little bit,
+  // and then it runs again.  So to avoid this, if we squeezed *and*
+  // there is still less than BLOCK_SIZE_W words free, then we enlarge
+  // the stack anyway.
+  if ((tso->flags & TSO_SQUEEZED) && 
+      ((W_)(tso->sp - tso->stack) >= BLOCK_SIZE_W)) {
+      return tso;
+  }
+
   /* Try to double the current stack size.  If that takes us over the
   /* Try to double the current stack size.  If that takes us over the
-   * maximum stack size for this thread, then use the maximum instead.
-   * Finally round up so the TSO ends up as a whole number of blocks.
+   * maximum stack size for this thread, then use the maximum instead
+   * (that is, unless we're already at or over the max size and we
+   * can't raise the StackOverflow exception (see above), in which
+   * case just double the size). Finally round up so the TSO ends up as
+   * a whole number of blocks.
    */
    */
-  new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  if (tso->stack_size >= tso->max_stack_size) {
+      new_stack_size = tso->stack_size * 2;
+  } else { 
+      new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
+  }
   new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
   new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
@@ -2753,7 +2183,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
             "increasing stack size from %ld words to %d.",
             (long)tso->stack_size, new_stack_size);
 
             "increasing stack size from %ld words to %d.",
             (long)tso->stack_size, new_stack_size);
 
-  dest = (StgTSO *)allocateLocal(cap,new_tso_size);
+  dest = (StgTSO *)allocate(cap,new_tso_size);
   TICK_ALLOC_TSO(new_stack_size,0);
 
   /* copy the TSO block and the old stack into the new area */
   TICK_ALLOC_TSO(new_stack_size,0);
 
   /* copy the TSO block and the old stack into the new area */
@@ -2773,21 +2203,12 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
    * of the stack, so we don't attempt to scavenge any part of the
    * dead TSO's stack.
    */
    * of the stack, so we don't attempt to scavenge any part of the
    * dead TSO's stack.
    */
-  tso->what_next = ThreadRelocated;
   setTSOLink(cap,tso,dest);
   setTSOLink(cap,tso,dest);
+  write_barrier(); // other threads seeing ThreadRelocated will look at _link
+  tso->what_next = ThreadRelocated;
   tso->sp = (P_)&(tso->stack[tso->stack_size]);
   tso->why_blocked = NotBlocked;
 
   tso->sp = (P_)&(tso->stack[tso->stack_size]);
   tso->why_blocked = NotBlocked;
 
-  IF_PAR_DEBUG(verbose,
-              debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
-                    tso->id, tso, tso->stack_size);
-              /* If we're debugging, just print out the top of the stack */
-              printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, 
-                                               tso->sp+64)));
-  
-  unlockTSO(dest);
-  unlockTSO(tso);
-
   IF_DEBUG(sanity,checkTSO(dest));
 #if 0
   IF_DEBUG(scheduler,printTSO(dest));
   IF_DEBUG(sanity,checkTSO(dest));
 #if 0
   IF_DEBUG(scheduler,printTSO(dest));
@@ -2797,50 +2218,60 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
 }
 
 static StgTSO *
 }
 
 static StgTSO *
-threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
+threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso)
 {
     bdescr *bd, *new_bd;
 {
     bdescr *bd, *new_bd;
-    lnat new_tso_size_w, tso_size_w;
+    lnat free_w, tso_size_w;
     StgTSO *new_tso;
 
     tso_size_w = tso_sizeW(tso);
 
     StgTSO *new_tso;
 
     tso_size_w = tso_sizeW(tso);
 
-    if (tso_size_w < MBLOCK_SIZE_W || 
+    if (tso_size_w < MBLOCK_SIZE_W ||
+          // TSO is less than 2 mblocks (since the first mblock is
+          // shorter than MBLOCK_SIZE_W)
+        (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
+          // or TSO is not a whole number of megablocks (ensuring
+          // precondition of splitLargeBlock() below)
+        (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
+          // or TSO is smaller than the minimum stack size (rounded up)
         (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
         (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
+          // or stack is using more than 1/4 of the available space
     {
     {
+        // then do nothing
         return tso;
     }
 
         return tso;
     }
 
-    // don't allow throwTo() to modify the blocked_exceptions queue
-    // while we are moving the TSO:
-    lockClosure((StgClosure *)tso);
-
-    new_tso_size_w = round_to_mblocks(tso_size_w/2);
-
-    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
-               tso->id, tso_size_w, new_tso_size_w);
+    // this is the number of words we'll free
+    free_w = round_to_mblocks(tso_size_w/2);
 
     bd = Bdescr((StgPtr)tso);
 
     bd = Bdescr((StgPtr)tso);
-    new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
-    new_bd->free = bd->free;
+    new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
     bd->free = bd->start + TSO_STRUCT_SIZEW;
 
     new_tso = (StgTSO *)new_bd->start;
     memcpy(new_tso,tso,TSO_STRUCT_SIZE);
     bd->free = bd->start + TSO_STRUCT_SIZEW;
 
     new_tso = (StgTSO *)new_bd->start;
     memcpy(new_tso,tso,TSO_STRUCT_SIZE);
-    new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+    new_tso->stack_size = new_bd->free - new_tso->stack;
+
+    // The original TSO was dirty and probably on the mutable
+    // list. The new TSO is not yet on the mutable list, so we better
+    // put it there.
+    new_tso->dirty = 0;
+    new_tso->flags &= ~TSO_LINK_DIRTY;
+    dirty_TSO(cap, new_tso);
+
+    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+               (long)tso->id, tso_size_w, tso_sizeW(new_tso));
 
 
-    tso->what_next = ThreadRelocated;
     tso->_link = new_tso; // no write barrier reqd: same generation
     tso->_link = new_tso; // no write barrier reqd: same generation
+    write_barrier(); // other threads seeing ThreadRelocated will look at _link
+    tso->what_next = ThreadRelocated;
 
     // The TSO attached to this Task may have moved, so update the
     // pointer to it.
 
     // The TSO attached to this Task may have moved, so update the
     // pointer to it.
-    if (task->tso == tso) {
-        task->tso = new_tso;
+    if (task->incall->tso == tso) {
+        task->incall->tso = new_tso;
     }
 
     }
 
-    unlockTSO(new_tso);
-    unlockTSO(tso);
-
     IF_DEBUG(sanity,checkTSO(new_tso));
 
     return new_tso;
     IF_DEBUG(sanity,checkTSO(new_tso));
 
     return new_tso;
@@ -2855,8 +2286,10 @@ void
 interruptStgRts(void)
 {
     sched_state = SCHED_INTERRUPTING;
 interruptStgRts(void)
 {
     sched_state = SCHED_INTERRUPTING;
-    context_switch = 1;
+    setContextSwitches();
+#if defined(THREADED_RTS)
     wakeUpRts();
     wakeUpRts();
+#endif
 }
 
 /* -----------------------------------------------------------------------------
 }
 
 /* -----------------------------------------------------------------------------
@@ -2872,65 +2305,15 @@ interruptStgRts(void)
    will have interrupted any blocking system call in progress anyway.
    -------------------------------------------------------------------------- */
 
    will have interrupted any blocking system call in progress anyway.
    -------------------------------------------------------------------------- */
 
-void
-wakeUpRts(void)
-{
 #if defined(THREADED_RTS)
 #if defined(THREADED_RTS)
+void wakeUpRts(void)
+{
     // This forces the IO Manager thread to wakeup, which will
     // in turn ensure that some OS thread wakes up and runs the
     // scheduler loop, which will cause a GC and deadlock check.
     ioManagerWakeup();
     // This forces the IO Manager thread to wakeup, which will
     // in turn ensure that some OS thread wakes up and runs the
     // scheduler loop, which will cause a GC and deadlock check.
     ioManagerWakeup();
-#endif
-}
-
-/* -----------------------------------------------------------------------------
- * checkBlackHoles()
- *
- * Check the blackhole_queue for threads that can be woken up.  We do
- * this periodically: before every GC, and whenever the run queue is
- * empty.
- *
- * An elegant solution might be to just wake up all the blocked
- * threads with awakenBlockedQueue occasionally: they'll go back to
- * sleep again if the object is still a BLACKHOLE.  Unfortunately this
- * doesn't give us a way to tell whether we've actually managed to
- * wake up any threads, so we would be busy-waiting.
- *
- * -------------------------------------------------------------------------- */
-
-static rtsBool
-checkBlackHoles (Capability *cap)
-{
-    StgTSO **prev, *t;
-    rtsBool any_woke_up = rtsFalse;
-    StgHalfWord type;
-
-    // blackhole_queue is global:
-    ASSERT_LOCK_HELD(&sched_mutex);
-
-    debugTrace(DEBUG_sched, "checking threads blocked on black holes");
-
-    // ASSUMES: sched_mutex
-    prev = &blackhole_queue;
-    t = blackhole_queue;
-    while (t != END_TSO_QUEUE) {
-       ASSERT(t->why_blocked == BlockedOnBlackHole);
-       type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
-       if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
-           IF_DEBUG(sanity,checkTSO(t));
-           t = unblockOne(cap, t);
-           // urk, the threads migrate to the current capability
-           // here, but we'd like to keep them on the original one.
-           *prev = t;
-           any_woke_up = rtsTrue;
-       } else {
-           prev = &t->_link;
-           t = t->_link;
-       }
-    }
-
-    return any_woke_up;
 }
 }
+#endif
 
 /* -----------------------------------------------------------------------------
    Deleting threads
 
 /* -----------------------------------------------------------------------------
    Deleting threads
@@ -2941,7 +2324,7 @@ checkBlackHoles (Capability *cap)
    -------------------------------------------------------------------------- */
 
 static void 
    -------------------------------------------------------------------------- */
 
 static void 
-deleteThread (Capability *cap, StgTSO *tso)
+deleteThread (Capability *cap STG_UNUSED, StgTSO *tso)
 {
     // NOTE: must only be called on a TSO that we have exclusive
     // access to, because we will call throwToSingleThreaded() below.
 {
     // NOTE: must only be called on a TSO that we have exclusive
     // access to, because we will call throwToSingleThreaded() below.
@@ -2949,8 +2332,8 @@ deleteThread (Capability *cap, StgTSO *tso)
     // we must own all Capabilities.
 
     if (tso->why_blocked != BlockedOnCCall &&
     // we must own all Capabilities.
 
     if (tso->why_blocked != BlockedOnCCall &&
-       tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
-       throwToSingleThreaded(cap,tso,NULL);
+       tso->why_blocked != BlockedOnCCall_Interruptible) {
+       throwToSingleThreaded(tso->cap,tso,NULL);
     }
 }
 
     }
 }
 
@@ -2961,9 +2344,9 @@ deleteThread_(Capability *cap, StgTSO *tso)
   // like deleteThread(), but we delete threads in foreign calls, too.
 
     if (tso->why_blocked == BlockedOnCCall ||
   // like deleteThread(), but we delete threads in foreign calls, too.
 
     if (tso->why_blocked == BlockedOnCCall ||
-       tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
-       unblockOne(cap,tso);
+       tso->why_blocked == BlockedOnCCall_Interruptible) {
        tso->what_next = ThreadKilled;
        tso->what_next = ThreadKilled;
+       appendToRunQueue(tso->cap, tso);
     } else {
        deleteThread(cap,tso);
     }
     } else {
        deleteThread(cap,tso);
     }
@@ -3019,11 +2402,12 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
            // Only create raise_closure if we need to.
            if (raise_closure == NULL) {
                raise_closure = 
            // Only create raise_closure if we need to.
            if (raise_closure == NULL) {
                raise_closure = 
-                   (StgThunk *)allocateLocal(cap,sizeofW(StgThunk)+1);
+                   (StgThunk *)allocate(cap,sizeofW(StgThunk)+1);
                SET_HDR(raise_closure, &stg_raise_info, CCCS);
                raise_closure->payload[0] = exception;
            }
                SET_HDR(raise_closure, &stg_raise_info, CCCS);
                raise_closure->payload[0] = exception;
            }
-           UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
+            updateThunk(cap, tso, ((StgUpdateFrame *)p)->updatee,
+                        (StgClosure *)raise_closure);
            p = next;
            continue;
 
            p = next;
            continue;
 
@@ -3097,7 +2481,7 @@ findRetryFrameHelper (StgTSO *tso)
       
     case CATCH_STM_FRAME: {
         StgTRecHeader *trec = tso -> trec;
       
     case CATCH_STM_FRAME: {
         StgTRecHeader *trec = tso -> trec;
-       StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+       StgTRecHeader *outer = trec -> enclosing_trec;
         debugTrace(DEBUG_stm,
                   "found CATCH_STM_FRAME at %p during retry", p);
         debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
         debugTrace(DEBUG_stm,
                   "found CATCH_STM_FRAME at %p during retry", p);
         debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
@@ -3133,14 +2517,14 @@ resurrectThreads (StgTSO *threads)
 {
     StgTSO *tso, *next;
     Capability *cap;
 {
     StgTSO *tso, *next;
     Capability *cap;
-    step *step;
+    generation *gen;
 
     for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
        next = tso->global_link;
 
 
     for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
        next = tso->global_link;
 
-        step = Bdescr((P_)tso)->step;
-       tso->global_link = step->threads;
-       step->threads = tso;
+        gen = Bdescr((P_)tso)->gen;
+       tso->global_link = gen->threads;
+       gen->threads = tso;
 
        debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
        
 
        debugTrace(DEBUG_sched, "resurrecting thread %lu", (unsigned long)tso->id);
        
@@ -3149,18 +2533,17 @@ resurrectThreads (StgTSO *threads)
        
        switch (tso->why_blocked) {
        case BlockedOnMVar:
        
        switch (tso->why_blocked) {
        case BlockedOnMVar:
-       case BlockedOnException:
            /* Called by GC - sched_mutex lock is currently held. */
            throwToSingleThreaded(cap, tso,
            /* Called by GC - sched_mutex lock is currently held. */
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)BlockedOnDeadMVar_closure);
+                                 (StgClosure *)blockedIndefinitelyOnMVar_closure);
            break;
        case BlockedOnBlackHole:
            throwToSingleThreaded(cap, tso,
            break;
        case BlockedOnBlackHole:
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)NonTermination_closure);
+                                 (StgClosure *)nonTermination_closure);
            break;
        case BlockedOnSTM:
            throwToSingleThreaded(cap, tso,
            break;
        case BlockedOnSTM:
            throwToSingleThreaded(cap, tso,
-                                 (StgClosure *)BlockedIndefinitely_closure);
+                                 (StgClosure *)blockedIndefinitelyOnSTM_closure);
            break;
        case NotBlocked:
            /* This might happen if the thread was blocked on a black hole
            break;
        case NotBlocked:
            /* This might happen if the thread was blocked on a black hole
@@ -3168,42 +2551,15 @@ resurrectThreads (StgTSO *threads)
             * can wake up threads, remember...).
             */
            continue;
             * can wake up threads, remember...).
             */
            continue;
+        case BlockedOnMsgThrowTo:
+            // This can happen if the target is masking, blocks on a
+            // black hole, and then is found to be unreachable.  In
+            // this case, we want to let the target wake up and carry
+            // on, and do nothing to this thread.
+            continue;
        default:
        default:
-           barf("resurrectThreads: thread blocked in a strange way");
+           barf("resurrectThreads: thread blocked in a strange way: %d",
+                 tso->why_blocked);
        }
     }
 }
        }
     }
 }
-
-/* -----------------------------------------------------------------------------
-   performPendingThrowTos is called after garbage collection, and
-   passed a list of threads that were found to have pending throwTos
-   (tso->blocked_exceptions was not empty), and were blocked.
-   Normally this doesn't happen, because we would deliver the
-   exception directly if the target thread is blocked, but there are
-   small windows where it might occur on a multiprocessor (see
-   throwTo()).
-
-   NB. we must be holding all the capabilities at this point, just
-   like resurrectThreads().
-   -------------------------------------------------------------------------- */
-
-void
-performPendingThrowTos (StgTSO *threads)
-{
-    StgTSO *tso, *next;
-    Capability *cap;
-    step *step;
-
-    for (tso = threads; tso != END_TSO_QUEUE; tso = next) {
-       next = tso->global_link;
-
-        step = Bdescr((P_)tso)->step;
-       tso->global_link = step->threads;
-       step->threads = tso;
-
-       debugTrace(DEBUG_sched, "performing blocked throwTo to thread %lu", (unsigned long)tso->id);
-       
-       cap = tso->cap;
-        maybePerformBlockedException(cap, tso);
-    }
-}