merge GHC HEAD
[ghc-hetmet.git] / rts / Schedule.c
index 70e0246..fd5536b 100644 (file)
@@ -39,6 +39,7 @@
 #include "Threads.h"
 #include "Timer.h"
 #include "ThreadPaused.h"
+#include "Messages.h"
 
 #ifdef HAVE_SYS_TYPES_H
 #include <sys/types.h>
@@ -55,6 +56,9 @@
 #include <errno.h>
 #endif
 
+#ifdef TRACING
+#include "eventlog/EventLog.h"
+#endif
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
@@ -66,17 +70,6 @@ StgTSO *blocked_queue_tl = NULL;
 StgTSO *sleeping_queue = NULL;    // perhaps replace with a hash table?
 #endif
 
-/* Threads blocked on blackholes.
- * LOCK: sched_mutex+capability, or all capabilities
- */
-StgTSO *blackhole_queue = NULL;
-
-/* The blackhole_queue should be checked for threads to wake up.  See
- * Schedule.h for more thorough comment.
- * LOCK: none (doesn't matter if we miss an update)
- */
-rtsBool blackholes_need_checking = rtsFalse;
-
 /* Set to true when the latest garbage collection failed to reclaim
  * enough space, and the runtime should proceed to shut itself down in
  * an orderly fashion (emitting profiling info etc.)
@@ -135,12 +128,11 @@ static Capability *schedule (Capability *initialCapability, Task *task);
 static void schedulePreLoop (void);
 static void scheduleFindWork (Capability *cap);
 #if defined(THREADED_RTS)
-static void scheduleYield (Capability **pcap, Task *task, rtsBool);
+static void scheduleYield (Capability **pcap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
 static void scheduleProcessInbox(Capability *cap);
-static void scheduleCheckBlackHoles (Capability *cap);
 static void scheduleDetectDeadlock (Capability *cap, Task *task);
 static void schedulePushWork(Capability *cap, Task *task);
 #if defined(THREADED_RTS)
@@ -148,9 +140,7 @@ static void scheduleActivateSpark(Capability *cap);
 #endif
 static void schedulePostRunThread(Capability *cap, StgTSO *t);
 static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
-static void scheduleHandleStackOverflow( Capability *cap, Task *task, 
-                                        StgTSO *t);
-static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t, 
+static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
                                    nat prev_what_next );
 static void scheduleHandleThreadBlocked( StgTSO *t );
 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
@@ -159,11 +149,6 @@ static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
 static Capability *scheduleDoGC(Capability *cap, Task *task,
                                rtsBool force_major);
 
-static rtsBool checkBlackHoles(Capability *cap);
-
-static StgTSO *threadStackOverflow(Capability *cap, StgTSO *tso);
-static StgTSO *threadStackUnderflow(Capability *cap, Task *task, StgTSO *tso);
-
 static void deleteThread (Capability *cap, StgTSO *tso);
 static void deleteAllThreads (Capability *cap);
 
@@ -171,17 +156,6 @@ static void deleteAllThreads (Capability *cap);
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
-/* -----------------------------------------------------------------------------
- * Putting a thread on the run queue: different scheduling policies
- * -------------------------------------------------------------------------- */
-
-STATIC_INLINE void
-addToRunQueue( Capability *cap, StgTSO *t )
-{
-    // this does round-robin scheduling; good for concurrency
-    appendToRunQueue(cap,t);
-}
-
 /* ---------------------------------------------------------------------------
    Main scheduling loop.
 
@@ -228,7 +202,6 @@ schedule (Capability *initialCapability, Task *task)
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
   rtsBool first = rtsTrue;
-  rtsBool force_yield = rtsFalse;
 #endif
   
   cap = initialCapability;
@@ -352,9 +325,7 @@ schedule (Capability *initialCapability, Task *task)
     //     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     }
 
-  yield:
-    scheduleYield(&cap,task,force_yield);
-    force_yield = rtsFalse;
+    scheduleYield(&cap,task);
 
     if (emptyRunQueue(cap)) continue; // look for work again
 #endif
@@ -433,9 +404,6 @@ run_thread:
 
     startHeapProfTimer();
 
-    // Check for exceptions blocked on this thread
-    maybePerformBlockedException (cap, t);
-
     // ----------------------------------------------------------------------
     // Run the current thread 
 
@@ -453,6 +421,7 @@ run_thread:
     cap->in_haskell = rtsTrue;
 
     dirty_TSO(cap,t);
+    dirty_STACK(cap,t->stackobj);
 
 #if defined(THREADED_RTS)
     if (recent_activity == ACTIVITY_DONE_GC) {
@@ -506,13 +475,6 @@ run_thread:
     // happened.  So find the new location:
     t = cap->r.rCurrentTSO;
 
-    // We have run some Haskell code: there might be blackhole-blocked
-    // threads to wake up now.
-    // Lock-free test here should be ok, we're just setting a flag.
-    if ( blackhole_queue != END_TSO_QUEUE ) {
-       blackholes_need_checking = rtsTrue;
-    }
-    
     // And save the current errno in this thread.
     // XXX: possibly bogus for SMP because this thread might already
     // be running again, see code below.
@@ -522,20 +484,17 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
-    traceEventStopThread(cap, t, ret);
-
-#if defined(THREADED_RTS)
-    // If ret is ThreadBlocked, and this Task is bound to the TSO that
-    // blocked, we are in limbo - the TSO is now owned by whatever it
-    // is blocked on, and may in fact already have been woken up,
-    // perhaps even on a different Capability.  It may be the case
-    // that task->cap != cap.  We better yield this Capability
-    // immediately and return to normaility.
     if (ret == ThreadBlocked) {
-        force_yield = rtsTrue;
-        goto yield;
+        if (t->why_blocked == BlockedOnBlackHole) {
+            StgTSO *owner = blackHoleOwner(t->block_info.bh->bh);
+            traceEventStopThread(cap, t, t->why_blocked + 6,
+                                 owner != NULL ? owner->id : 0);
+        } else {
+            traceEventStopThread(cap, t, t->why_blocked + 6, 0);
+        }
+    } else {
+        traceEventStopThread(cap, t, ret, 0);
     }
-#endif
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
@@ -550,10 +509,6 @@ run_thread:
     
     schedulePostRunThread(cap,t);
 
-    if (ret != StackOverflow) {
-        t = threadStackUnderflow(cap,task,t);
-    }
-
     ready_to_gc = rtsFalse;
 
     switch (ret) {
@@ -562,8 +517,11 @@ run_thread:
        break;
 
     case StackOverflow:
-       scheduleHandleStackOverflow(cap,task,t);
-       break;
+        // just adjust the stack for this thread, then pop it back
+        // on the run queue.
+        threadStackOverflow(cap, t);
+        pushOnRunQueue(cap,t);
+        break;
 
     case ThreadYielding:
        if (scheduleHandleYield(cap, t, prev_what_next)) {
@@ -591,6 +549,30 @@ run_thread:
   } /* end of while() */
 }
 
+/* -----------------------------------------------------------------------------
+ * Run queue operations
+ * -------------------------------------------------------------------------- */
+
+void
+removeFromRunQueue (Capability *cap, StgTSO *tso)
+{
+    if (tso->block_info.prev == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_hd == tso);
+        cap->run_queue_hd = tso->_link;
+    } else {
+        setTSOLink(cap, tso->block_info.prev, tso->_link);
+    }
+    if (tso->_link == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_tl == tso);
+        cap->run_queue_tl = tso->block_info.prev;
+    } else {
+        setTSOPrev(cap, tso->_link, tso->block_info.prev);
+    }
+    tso->_link = tso->block_info.prev = END_TSO_QUEUE;
+
+    IF_DEBUG(sanity, checkRunQueue(cap));
+}
+
 /* ----------------------------------------------------------------------------
  * Setting up the scheduler loop
  * ------------------------------------------------------------------------- */
@@ -612,12 +594,6 @@ scheduleFindWork (Capability *cap)
 {
     scheduleStartSignalHandlers(cap);
 
-    // Only check the black holes here if we've nothing else to do.
-    // During normal execution, the black hole list only gets checked
-    // at GC time, to avoid repeatedly traversing this possibly long
-    // list each time around the scheduler.
-    if (emptyRunQueue(cap)) { scheduleCheckBlackHoles(cap); }
-
     scheduleProcessInbox(cap);
 
     scheduleCheckBlockedThreads(cap);
@@ -655,26 +631,15 @@ shouldYieldCapability (Capability *cap, Task *task)
 // and also check the benchmarks in nofib/parallel for regressions.
 
 static void
-scheduleYield (Capability **pcap, Task *task, rtsBool force_yield)
+scheduleYield (Capability **pcap, Task *task)
 {
     Capability *cap = *pcap;
 
     // if we have work, and we don't need to give up the Capability, continue.
     //
-    // The force_yield flag is used when a bound thread blocks.  This
-    // is a particularly tricky situation: the current Task does not
-    // own the TSO any more, since it is on some queue somewhere, and
-    // might be woken up or manipulated by another thread at any time.
-    // The TSO and Task might be migrated to another Capability.
-    // Certain invariants might be in doubt, such as task->bound->cap
-    // == cap.  We have to yield the current Capability immediately,
-    // no messing around.
-    //
-    if (!force_yield &&
-        !shouldYieldCapability(cap,task) && 
+    if (!shouldYieldCapability(cap,task) && 
         (!emptyRunQueue(cap) ||
          !emptyInbox(cap) ||
-         blackholes_need_checking ||
          sched_state >= SCHED_INTERRUPTING))
         return;
 
@@ -769,16 +734,17 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
            for (; t != END_TSO_QUEUE; t = next) {
                next = t->_link;
                t->_link = END_TSO_QUEUE;
-               if (t->what_next == ThreadRelocated
-                   || t->bound == task->incall // don't move my bound thread
+                if (t->bound == task->incall // don't move my bound thread
                    || tsoLocked(t)) {  // don't move a locked thread
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else {
                    appendToRunQueue(free_caps[i],t);
@@ -791,6 +757,8 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                }
            }
            cap->run_queue_tl = prev;
+
+            IF_DEBUG(sanity, checkRunQueue(cap));
        }
 
 #ifdef SPARK_PUSHING
@@ -863,126 +831,12 @@ scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
     //
     if ( !emptyQueue(blocked_queue_hd) || !emptyQueue(sleeping_queue) )
     {
-       awaitEvent( emptyRunQueue(cap) && !blackholes_need_checking );
-    }
-#endif
-}
-
-
-/* ----------------------------------------------------------------------------
- * Check for threads woken up by other Capabilities
- * ------------------------------------------------------------------------- */
-
-#if defined(THREADED_RTS)
-static void
-executeMessage (Capability *cap, Message *m)
-{
-    const StgInfoTable *i;
-
-loop:
-    write_barrier(); // allow m->header to be modified by another thread
-    i = m->header.info;
-    if (i == &stg_MSG_WAKEUP_info)
-    {
-        MessageWakeup *w = (MessageWakeup *)m;
-        StgTSO *tso = w->tso;
-        debugTraceCap(DEBUG_sched, cap, "message: wakeup thread %ld", 
-                      (lnat)tso->id);
-        ASSERT(tso->cap == cap);
-        ASSERT(tso->why_blocked == BlockedOnMsgWakeup);
-        ASSERT(tso->block_info.closure == (StgClosure *)m);
-        tso->why_blocked = NotBlocked;
-        appendToRunQueue(cap, tso);
-    }
-    else if (i == &stg_MSG_THROWTO_info)
-    {
-        MessageThrowTo *t = (MessageThrowTo *)m;
-        nat r;
-        const StgInfoTable *i;
-
-        i = lockClosure((StgClosure*)m);
-        if (i != &stg_MSG_THROWTO_info) {
-            unlockClosure((StgClosure*)m, i);
-            goto loop;
-        }
-
-        debugTraceCap(DEBUG_sched, cap, "message: throwTo %ld -> %ld", 
-                      (lnat)t->source->id, (lnat)t->target->id);
-
-        ASSERT(t->source->why_blocked == BlockedOnMsgThrowTo);
-        ASSERT(t->source->block_info.closure == (StgClosure *)m);
-
-        r = throwToMsg(cap, t);
-
-        switch (r) {
-        case THROWTO_SUCCESS:
-            ASSERT(t->source->sp[0] == (StgWord)&stg_block_throwto_info);
-            t->source->sp += 3;
-            unblockOne(cap, t->source);
-            // this message is done
-            unlockClosure((StgClosure*)m, &stg_IND_info);
-            break;
-        case THROWTO_BLOCKED:
-            // unlock the message
-            unlockClosure((StgClosure*)m, &stg_MSG_THROWTO_info);
-            break;
-        }
-    }
-    else if (i == &stg_IND_info)
-    {
-        // message was revoked
-        return;
-    }
-    else if (i == &stg_WHITEHOLE_info)
-    {
-        goto loop;
-    }
-    else
-    {
-        barf("executeMessage: %p", i);
-    }
-}
-#endif
-
-static void
-scheduleProcessInbox (Capability *cap USED_IF_THREADS)
-{
-#if defined(THREADED_RTS)
-    Message *m;
-
-    while (!emptyInbox(cap)) {
-        ACQUIRE_LOCK(&cap->lock);
-        m = cap->inbox;
-        cap->inbox = m->link;
-        RELEASE_LOCK(&cap->lock);
-        executeMessage(cap, (Message *)m);
+       awaitEvent (emptyRunQueue(cap));
     }
 #endif
 }
 
 /* ----------------------------------------------------------------------------
- * Check for threads blocked on BLACKHOLEs that can be woken up
- * ------------------------------------------------------------------------- */
-static void
-scheduleCheckBlackHoles (Capability *cap)
-{
-    if ( blackholes_need_checking ) // check without the lock first
-    {
-       ACQUIRE_LOCK(&sched_mutex);
-       if ( blackholes_need_checking ) {
-           blackholes_need_checking = rtsFalse;
-            // important that we reset the flag *before* checking the
-            // blackhole queue, otherwise we could get deadlock.  This
-            // happens as follows: we wake up a thread that
-            // immediately runs on another Capability, blocks on a
-            // blackhole, and then we reset the blackholes_need_checking flag.
-           checkBlackHoles(cap);
-       }
-       RELEASE_LOCK(&sched_mutex);
-    }
-}
-
-/* ----------------------------------------------------------------------------
  * Detect deadlock conditions and attempt to resolve them.
  * ------------------------------------------------------------------------- */
 
@@ -1090,6 +944,50 @@ scheduleSendPendingMessages(void)
 #endif
 
 /* ----------------------------------------------------------------------------
+ * Process message in the current Capability's inbox
+ * ------------------------------------------------------------------------- */
+
+static void
+scheduleProcessInbox (Capability *cap USED_IF_THREADS)
+{
+#if defined(THREADED_RTS)
+    Message *m, *next;
+    int r;
+
+    while (!emptyInbox(cap)) {
+        if (cap->r.rCurrentNursery->link == NULL ||
+            g0->n_new_large_words >= large_alloc_lim) {
+            scheduleDoGC(cap, cap->running_task, rtsFalse);
+        }
+
+        // don't use a blocking acquire; if the lock is held by
+        // another thread then just carry on.  This seems to avoid
+        // getting stuck in a message ping-pong situation with other
+        // processors.  We'll check the inbox again later anyway.
+        //
+        // We should really use a more efficient queue data structure
+        // here.  The trickiness is that we must ensure a Capability
+        // never goes idle if the inbox is non-empty, which is why we
+        // use cap->lock (cap->lock is released as the last thing
+        // before going idle; see Capability.c:releaseCapability()).
+        r = TRY_ACQUIRE_LOCK(&cap->lock);
+        if (r != 0) return;
+
+        m = cap->inbox;
+        cap->inbox = (Message*)END_TSO_QUEUE;
+
+        RELEASE_LOCK(&cap->lock);
+
+        while (m != (Message*)END_TSO_QUEUE) {
+            next = m->link;
+            executeMessage(cap, m);
+            m = next;
+        }
+    }
+#endif
+}
+
+/* ----------------------------------------------------------------------------
  * Activate spark threads (PARALLEL_HASKELL and THREADED_RTS)
  * ------------------------------------------------------------------------- */
 
@@ -1155,6 +1053,10 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
        
        blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
        
+        if (blocks > BLOCKS_PER_MBLOCK) {
+            barf("allocation of %ld bytes too large (GHC should have complained at compile-time)", (long)cap->r.rHpAlloc);
+        }
+
        debugTrace(DEBUG_sched,
                   "--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", 
                   (long)t->id, what_next_strs[t->what_next], blocks);
@@ -1164,10 +1066,8 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
            cap->r.rNursery->n_blocks == 1) {  // paranoia to prevent infinite loop
                                               // if the nursery has only one block.
            
-           ACQUIRE_SM_LOCK
-           bd = allocGroup( blocks );
-           RELEASE_SM_LOCK
-           cap->r.rNursery->n_blocks += blocks;
+            bd = allocGroup_lock(blocks);
+            cap->r.rNursery->n_blocks += blocks;
            
            // link the new group into the list
            bd->link = cap->r.rCurrentNursery;
@@ -1217,7 +1117,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
         cap->context_switch = 0;
-        addToRunQueue(cap,t);
+        appendToRunQueue(cap,t);
     } else {
         pushOnRunQueue(cap,t);
     }
@@ -1226,30 +1126,6 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
 }
 
 /* -----------------------------------------------------------------------------
- * Handle a thread that returned to the scheduler with ThreadStackOverflow
- * -------------------------------------------------------------------------- */
-
-static void
-scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
-{
-    /* just adjust the stack for this thread, then pop it back
-     * on the run queue.
-     */
-    { 
-       /* enlarge the stack */
-       StgTSO *new_t = threadStackOverflow(cap, t);
-       
-       /* The TSO attached to this Task may have moved, so update the
-        * pointer to it.
-        */
-       if (task->incall->tso == t) {
-           task->incall->tso = new_t;
-       }
-       pushOnRunQueue(cap,new_t);
-    }
-}
-
-/* -----------------------------------------------------------------------------
  * Handle a thread that returned to the scheduler with ThreadYielding
  * -------------------------------------------------------------------------- */
 
@@ -1286,7 +1162,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
             //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
             checkTSO(t));
 
-    addToRunQueue(cap,t);
+    appendToRunQueue(cap,t);
 
     return rtsFalse;
 }
@@ -1309,9 +1185,6 @@ scheduleHandleThreadBlocked( StgTSO *t
 
     // ASSERT(t->why_blocked != NotBlocked);
     // Not true: for example,
-    //    - in THREADED_RTS, the thread may already have been woken
-    //      up by another Capability.  This actually happens: try
-    //      conc023 +RTS -N2.
     //    - the thread may have woken itself up already, because
     //      threadPaused() might have raised a blocked throwTo
     //      exception, see maybePerformBlockedException().
@@ -1371,23 +1244,23 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
          ASSERT(task->incall->tso == t);
 
          if (t->what_next == ThreadComplete) {
-             if (task->ret) {
-                 // NOTE: return val is tso->sp[1] (see StgStartup.hc)
-                 *(task->ret) = (StgClosure *)task->incall->tso->sp[1]; 
+             if (task->incall->ret) {
+                  // NOTE: return val is stack->sp[1] (see StgStartup.hc)
+                  *(task->incall->ret) = (StgClosure *)task->incall->tso->stackobj->sp[1];
              }
-             task->stat = Success;
+             task->incall->stat = Success;
          } else {
-             if (task->ret) {
-                 *(task->ret) = NULL;
+             if (task->incall->ret) {
+                 *(task->incall->ret) = NULL;
              }
              if (sched_state >= SCHED_INTERRUPTING) {
                   if (heap_overflow) {
-                      task->stat = HeapExhausted;
+                      task->incall->stat = HeapExhausted;
                   } else {
-                      task->stat = Interrupted;
+                      task->incall->stat = Interrupted;
                   }
              } else {
-                 task->stat = Killed;
+                 task->incall->stat = Killed;
              }
          }
 #ifdef DEBUG
@@ -1502,9 +1375,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
         debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
     }
 
-    // do this while the other Capabilities stop:
-    if (cap) scheduleCheckBlackHoles(cap);
-
     if (gc_type == PENDING_GC_SEQ)
     {
         // single-threaded GC: grab all the capabilities
@@ -1532,11 +1402,6 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
         waitForGcThreads(cap);
     }
 
-#else /* !THREADED_RTS */
-
-    // do this while the other Capabilities stop:
-    if (cap) scheduleCheckBlackHoles(cap);
-
 #endif
 
     IF_DEBUG(scheduler, printAllThreads());
@@ -1582,6 +1447,12 @@ delete_threads_and_gc:
         recent_activity = ACTIVITY_YES;
     }
 
+    if (heap_census) {
+        debugTrace(DEBUG_sched, "performing heap census");
+        heapCensus();
+       performHeapProfile = rtsFalse;
+    }
+
 #if defined(THREADED_RTS)
     if (gc_type == PENDING_GC_PAR)
     {
@@ -1589,12 +1460,6 @@ delete_threads_and_gc:
     }
 #endif
 
-    if (heap_census) {
-        debugTrace(DEBUG_sched, "performing heap census");
-        heapCensus();
-       performHeapProfile = rtsFalse;
-    }
-
     if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
         // GC set the heap_overflow flag, so we should proceed with
         // an orderly shutdown now.  Ultimately we want the main
@@ -1677,10 +1542,18 @@ forkProcess(HsStablePtr *entry
     ACQUIRE_LOCK(&cap->lock);
     ACQUIRE_LOCK(&cap->running_task->lock);
 
+    stopTimer(); // See #4074
+
+#if defined(TRACING)
+    flushEventLog(); // so that child won't inherit dirty file buffers
+#endif
+
     pid = fork();
     
     if (pid) { // parent
        
+        startTimer(); // #4074
+
         RELEASE_LOCK(&sched_mutex);
         RELEASE_LOCK(&cap->lock);
         RELEASE_LOCK(&cap->running_task->lock);
@@ -1697,7 +1570,11 @@ forkProcess(HsStablePtr *entry
         initMutex(&cap->running_task->lock);
 #endif
 
-       // Now, all OS threads except the thread that forked are
+#ifdef TRACING
+        resetTracing();
+#endif
+
+        // Now, all OS threads except the thread that forked are
        // stopped.  We need to stop all Haskell threads, including
        // those involved in foreign calls.  Also we need to delete
        // all Tasks, because they correspond to OS threads that are
@@ -1705,15 +1582,18 @@ forkProcess(HsStablePtr *entry
 
         for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
           for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
-           if (t->what_next == ThreadRelocated) {
-               next = t->_link;
-           } else {
-               next = t->global_link;
+                next = t->global_link;
                // don't allow threads to catch the ThreadKilled
                // exception, but we do want to raiseAsync() because these
                // threads may be evaluating thunks that we need later.
                deleteThread_(cap,t);
-           }
+
+                // stop the GC from updating the InCall to point to
+                // the TSO.  This is only necessary because the
+                // OSThread bound to the TSO has been killed, and
+                // won't get a chance to exit in the usual way (see
+                // also scheduleHandleThreadFinished).
+                t->bound = NULL;
           }
        }
        
@@ -1740,7 +1620,8 @@ forkProcess(HsStablePtr *entry
        // Wipe our spare workers list, they no longer exist.  New
        // workers will be created if necessary.
        cap->spare_workers = NULL;
-       cap->returning_tasks_hd = NULL;
+        cap->n_spare_workers = 0;
+        cap->returning_tasks_hd = NULL;
        cap->returning_tasks_tl = NULL;
 #endif
 
@@ -1780,12 +1661,8 @@ deleteAllThreads ( Capability *cap )
     debugTrace(DEBUG_sched,"deleting all threads");
     for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
         for (t = generations[g].threads; t != END_TSO_QUEUE; t = next) {
-            if (t->what_next == ThreadRelocated) {
-                next = t->_link;
-            } else {
                 next = t->global_link;
                 deleteThread(cap,t);
-            }
         }
     }
 
@@ -1849,13 +1726,17 @@ recoverSuspendedTask (Capability *cap, Task *task)
  * the whole system.
  *
  * The Haskell thread making the C call is put to sleep for the
- * duration of the call, on the susepended_ccalling_threads queue.  We
+ * duration of the call, on the suspended_ccalling_threads queue.  We
  * give out a token to the task, which it can use to resume the thread
  * on return from the C function.
+ *
+ * If this is an interruptible C call, this means that the FFI call may be
+ * unceremoniously terminated and should be scheduled on an
+ * unbound worker thread.
  * ------------------------------------------------------------------------- */
    
 void *
-suspendThread (StgRegTable *reg)
+suspendThread (StgRegTable *reg, rtsBool interruptible)
 {
   Capability *cap;
   int saved_errno;
@@ -1877,19 +1758,17 @@ suspendThread (StgRegTable *reg)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
-  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL, 0);
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
 
   threadPaused(cap,tso);
 
-  if ((tso->flags & TSO_BLOCKEX) == 0)  {
-      tso->why_blocked = BlockedOnCCall;
-      tso->flags |= TSO_BLOCKEX;
-      tso->flags &= ~TSO_INTERRUPTIBLE;
+  if (interruptible) {
+    tso->why_blocked = BlockedOnCCall_Interruptible;
   } else {
-      tso->why_blocked = BlockedOnCCall_NoUnblockExc;
+    tso->why_blocked = BlockedOnCCall;
   }
 
   // Hand back capability
@@ -1948,17 +1827,16 @@ resumeThread (void *task_)
 
     traceEventRunThread(cap, tso);
     
-    if (tso->why_blocked == BlockedOnCCall) {
+    /* Reset blocking status */
+    tso->why_blocked  = NotBlocked;
+
+    if ((tso->flags & TSO_BLOCKEX) == 0) {
         // avoid locking the TSO if we don't have to
         if (tso->blocked_exceptions != END_BLOCKED_EXCEPTIONS_QUEUE) {
-            awakenBlockedExceptionQueue(cap,tso);
+            maybePerformBlockedException(cap,tso);
         }
-       tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
     }
     
-    /* Reset blocking status */
-    tso->why_blocked  = NotBlocked;
-    
     cap->r.rCurrentTSO = tso;
     cap->in_haskell = rtsTrue;
     errno = saved_errno;
@@ -1968,6 +1846,7 @@ resumeThread (void *task_)
 
     /* We might have GC'd, mark the TSO dirty again */
     dirty_TSO(cap,tso);
+    dirty_STACK(cap,tso->stackobj);
 
     IF_DEBUG(sanity, checkTSO(tso));
 
@@ -1995,15 +1874,14 @@ scheduleThread(Capability *cap, StgTSO *tso)
 void
 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
 {
-#if defined(THREADED_RTS)
     tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
                              // move this thread from now on.
+#if defined(THREADED_RTS)
     cpu %= RtsFlags.ParFlags.nNodes;
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-        traceEventMigrateThread (cap, tso, capabilities[cpu].no);
-       wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
+        migrateThread(cap, tso, &capabilities[cpu]);
     }
 #else
     appendToRunQueue(cap,tso);
@@ -2025,8 +1903,8 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
     tso->cap = cap;
 
     task->incall->tso = tso;
-    task->ret = ret;
-    task->stat = NoStatus;
+    task->incall->ret = ret;
+    task->incall->stat = NoStatus;
 
     appendToRunQueue(cap,tso);
 
@@ -2035,7 +1913,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 
     cap = schedule(cap,task);
 
-    ASSERT(task->stat != NoStatus);
+    ASSERT(task->incall->stat != NoStatus);
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id);
@@ -2089,8 +1967,6 @@ initScheduler(void)
   sleeping_queue    = END_TSO_QUEUE;
 #endif
 
-  blackhole_queue   = END_TSO_QUEUE;
-
   sched_state    = SCHED_RUNNING;
   recent_activity = ACTIVITY_YES;
 
@@ -2137,12 +2013,7 @@ initScheduler(void)
 }
 
 void
-exitScheduler(
-    rtsBool wait_foreign
-#if !defined(THREADED_RTS)
-                         __attribute__((unused))
-#endif
-)
+exitScheduler (rtsBool wait_foreign USED_IF_THREADS)
                /* see Capability.c, shutdownCapability() */
 {
     Task *task = NULL;
@@ -2159,16 +2030,7 @@ exitScheduler(
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
-#if defined(THREADED_RTS)
-    { 
-       nat i;
-       
-       for (i = 0; i < n_capabilities; i++) {
-            ASSERT(task->incall->tso == NULL);
-           shutdownCapability(&capabilities[i], task, wait_foreign);
-       }
-    }
-#endif
+    shutdownCapabilities(task, wait_foreign);
 
     boundTaskExiting(task);
 }
@@ -2198,6 +2060,16 @@ freeScheduler( void )
 #endif
 }
 
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS, 
+                    void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+    evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+    evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+    evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif 
+}
+
 /* -----------------------------------------------------------------------------
    performGC
 
@@ -2234,187 +2106,6 @@ performMajorGC(void)
     performGC_(rtsTrue);
 }
 
-/* -----------------------------------------------------------------------------
-   Stack overflow
-
-   If the thread has reached its maximum stack size, then raise the
-   StackOverflow exception in the offending thread.  Otherwise
-   relocate the TSO into a larger chunk of memory and adjust its stack
-   size appropriately.
-   -------------------------------------------------------------------------- */
-
-static StgTSO *
-threadStackOverflow(Capability *cap, StgTSO *tso)
-{
-  nat new_stack_size, stack_words;
-  lnat new_tso_size;
-  StgPtr new_sp;
-  StgTSO *dest;
-
-  IF_DEBUG(sanity,checkTSO(tso));
-
-  if (tso->stack_size >= tso->max_stack_size
-      && !(tso->flags & TSO_BLOCKEX)) {
-      // NB. never raise a StackOverflow exception if the thread is
-      // inside Control.Exceptino.block.  It is impractical to protect
-      // against stack overflow exceptions, since virtually anything
-      // can raise one (even 'catch'), so this is the only sensible
-      // thing to do here.  See bug #767.
-      //
-
-      if (tso->flags & TSO_SQUEEZED) {
-          return tso;
-      }
-      // #3677: In a stack overflow situation, stack squeezing may
-      // reduce the stack size, but we don't know whether it has been
-      // reduced enough for the stack check to succeed if we try
-      // again.  Fortunately stack squeezing is idempotent, so all we
-      // need to do is record whether *any* squeezing happened.  If we
-      // are at the stack's absolute -K limit, and stack squeezing
-      // happened, then we try running the thread again.  The
-      // TSO_SQUEEZED flag is set by threadPaused() to tell us whether
-      // squeezing happened or not.
-
-      debugTrace(DEBUG_gc,
-                "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",
-                (long)tso->id, tso, (long)tso->stack_size, (long)tso->max_stack_size);
-      IF_DEBUG(gc,
-              /* If we're debugging, just print out the top of the stack */
-              printStackChunk(tso->sp, stg_min(tso->stack+tso->stack_size, 
-                                               tso->sp+64)));
-
-      // Send this thread the StackOverflow exception
-      throwToSingleThreaded(cap, tso, (StgClosure *)stackOverflow_closure);
-      return tso;
-  }
-
-
-  // We also want to avoid enlarging the stack if squeezing has
-  // already released some of it.  However, we don't want to get into
-  // a pathalogical situation where a thread has a nearly full stack
-  // (near its current limit, but not near the absolute -K limit),
-  // keeps allocating a little bit, squeezing removes a little bit,
-  // and then it runs again.  So to avoid this, if we squeezed *and*
-  // there is still less than BLOCK_SIZE_W words free, then we enlarge
-  // the stack anyway.
-  if ((tso->flags & TSO_SQUEEZED) && 
-      ((W_)(tso->sp - tso->stack) >= BLOCK_SIZE_W)) {
-      return tso;
-  }
-
-  /* Try to double the current stack size.  If that takes us over the
-   * maximum stack size for this thread, then use the maximum instead
-   * (that is, unless we're already at or over the max size and we
-   * can't raise the StackOverflow exception (see above), in which
-   * case just double the size). Finally round up so the TSO ends up as
-   * a whole number of blocks.
-   */
-  if (tso->stack_size >= tso->max_stack_size) {
-      new_stack_size = tso->stack_size * 2;
-  } else { 
-      new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
-  }
-  new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
-                                      TSO_STRUCT_SIZE)/sizeof(W_);
-  new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
-  new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
-
-  debugTrace(DEBUG_sched, 
-            "increasing stack size from %ld words to %d.",
-            (long)tso->stack_size, new_stack_size);
-
-  dest = (StgTSO *)allocate(cap,new_tso_size);
-  TICK_ALLOC_TSO(new_stack_size,0);
-
-  /* copy the TSO block and the old stack into the new area */
-  memcpy(dest,tso,TSO_STRUCT_SIZE);
-  stack_words = tso->stack + tso->stack_size - tso->sp;
-  new_sp = (P_)dest + new_tso_size - stack_words;
-  memcpy(new_sp, tso->sp, stack_words * sizeof(W_));
-
-  /* relocate the stack pointers... */
-  dest->sp         = new_sp;
-  dest->stack_size = new_stack_size;
-       
-  /* Mark the old TSO as relocated.  We have to check for relocated
-   * TSOs in the garbage collector and any primops that deal with TSOs.
-   *
-   * It's important to set the sp value to just beyond the end
-   * of the stack, so we don't attempt to scavenge any part of the
-   * dead TSO's stack.
-   */
-  tso->what_next = ThreadRelocated;
-  setTSOLink(cap,tso,dest);
-  tso->sp = (P_)&(tso->stack[tso->stack_size]);
-  tso->why_blocked = NotBlocked;
-
-  IF_DEBUG(sanity,checkTSO(dest));
-#if 0
-  IF_DEBUG(scheduler,printTSO(dest));
-#endif
-
-  return dest;
-}
-
-static StgTSO *
-threadStackUnderflow (Capability *cap, Task *task, StgTSO *tso)
-{
-    bdescr *bd, *new_bd;
-    lnat free_w, tso_size_w;
-    StgTSO *new_tso;
-
-    tso_size_w = tso_sizeW(tso);
-
-    if (tso_size_w < MBLOCK_SIZE_W ||
-          // TSO is less than 2 mblocks (since the first mblock is
-          // shorter than MBLOCK_SIZE_W)
-        (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
-          // or TSO is not a whole number of megablocks (ensuring
-          // precondition of splitLargeBlock() below)
-        (tso_size_w <= round_up_to_mblocks(RtsFlags.GcFlags.initialStkSize)) ||
-          // or TSO is smaller than the minimum stack size (rounded up)
-        (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
-          // or stack is using more than 1/4 of the available space
-    {
-        // then do nothing
-        return tso;
-    }
-
-    // this is the number of words we'll free
-    free_w = round_to_mblocks(tso_size_w/2);
-
-    bd = Bdescr((StgPtr)tso);
-    new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
-    bd->free = bd->start + TSO_STRUCT_SIZEW;
-
-    new_tso = (StgTSO *)new_bd->start;
-    memcpy(new_tso,tso,TSO_STRUCT_SIZE);
-    new_tso->stack_size = new_bd->free - new_tso->stack;
-
-    // The original TSO was dirty and probably on the mutable
-    // list. The new TSO is not yet on the mutable list, so we better
-    // put it there.
-    new_tso->dirty = 0;
-    new_tso->flags &= ~TSO_LINK_DIRTY;
-    dirty_TSO(cap, new_tso);
-
-    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
-               (long)tso->id, tso_size_w, tso_sizeW(new_tso));
-
-    tso->what_next = ThreadRelocated;
-    tso->_link = new_tso; // no write barrier reqd: same generation
-
-    // The TSO attached to this Task may have moved, so update the
-    // pointer to it.
-    if (task->incall->tso == tso) {
-        task->incall->tso = new_tso;
-    }
-
-    IF_DEBUG(sanity,checkTSO(new_tso));
-
-    return new_tso;
-}
-
 /* ---------------------------------------------------------------------------
    Interrupt execution
    - usually called inside a signal handler so it mustn't do anything fancy.   
@@ -2454,57 +2145,6 @@ void wakeUpRts(void)
 #endif
 
 /* -----------------------------------------------------------------------------
- * checkBlackHoles()
- *
- * Check the blackhole_queue for threads that can be woken up.  We do
- * this periodically: before every GC, and whenever the run queue is
- * empty.
- *
- * An elegant solution might be to just wake up all the blocked
- * threads with awakenBlockedQueue occasionally: they'll go back to
- * sleep again if the object is still a BLACKHOLE.  Unfortunately this
- * doesn't give us a way to tell whether we've actually managed to
- * wake up any threads, so we would be busy-waiting.
- *
- * -------------------------------------------------------------------------- */
-
-static rtsBool
-checkBlackHoles (Capability *cap)
-{
-    StgTSO **prev, *t;
-    rtsBool any_woke_up = rtsFalse;
-    StgHalfWord type;
-
-    // blackhole_queue is global:
-    ASSERT_LOCK_HELD(&sched_mutex);
-
-    debugTrace(DEBUG_sched, "checking threads blocked on black holes");
-
-    // ASSUMES: sched_mutex
-    prev = &blackhole_queue;
-    t = blackhole_queue;
-    while (t != END_TSO_QUEUE) {
-        if (t->what_next == ThreadRelocated) {
-            t = t->_link;
-            continue;
-        }
-       ASSERT(t->why_blocked == BlockedOnBlackHole);
-       type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
-       if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
-           IF_DEBUG(sanity,checkTSO(t));
-           t = unblockOne(cap, t);
-           *prev = t;
-           any_woke_up = rtsTrue;
-       } else {
-           prev = &t->_link;
-           t = t->_link;
-       }
-    }
-
-    return any_woke_up;
-}
-
-/* -----------------------------------------------------------------------------
    Deleting threads
 
    This is used for interruption (^C) and forking, and corresponds to
@@ -2512,8 +2152,8 @@ checkBlackHoles (Capability *cap)
    exception.
    -------------------------------------------------------------------------- */
 
-static void 
-deleteThread (Capability *cap, StgTSO *tso)
+static void
+deleteThread (Capability *cap STG_UNUSED, StgTSO *tso)
 {
     // NOTE: must only be called on a TSO that we have exclusive
     // access to, because we will call throwToSingleThreaded() below.
@@ -2521,21 +2161,21 @@ deleteThread (Capability *cap, StgTSO *tso)
     // we must own all Capabilities.
 
     if (tso->why_blocked != BlockedOnCCall &&
-       tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
-       throwToSingleThreaded(cap,tso,NULL);
+       tso->why_blocked != BlockedOnCCall_Interruptible) {
+        throwToSingleThreaded(tso->cap,tso,NULL);
     }
 }
 
 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
-static void 
+static void
 deleteThread_(Capability *cap, StgTSO *tso)
 { // for forkProcess only:
   // like deleteThread(), but we delete threads in foreign calls, too.
 
     if (tso->why_blocked == BlockedOnCCall ||
-       tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
-       unblockOne(cap,tso);
+       tso->why_blocked == BlockedOnCCall_Interruptible) {
        tso->what_next = ThreadKilled;
+       appendToRunQueue(tso->cap, tso);
     } else {
        deleteThread(cap,tso);
     }
@@ -2581,7 +2221,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
     // we update any closures pointed to from update frames with the
     // raise closure that we just built.
     //
-    p = tso->sp;
+    p = tso->stackobj->sp;
     while(1) {
        info = get_ret_itbl((StgClosure *)p);
        next = p + stack_frame_sizeW((StgClosure *)p);
@@ -2595,27 +2235,33 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
                SET_HDR(raise_closure, &stg_raise_info, CCCS);
                raise_closure->payload[0] = exception;
            }
-           UPD_IND(cap, ((StgUpdateFrame *)p)->updatee,
-                    (StgClosure *)raise_closure);
+            updateThunk(cap, tso, ((StgUpdateFrame *)p)->updatee,
+                        (StgClosure *)raise_closure);
            p = next;
            continue;
 
         case ATOMICALLY_FRAME:
            debugTrace(DEBUG_stm, "found ATOMICALLY_FRAME at %p", p);
-            tso->sp = p;
+            tso->stackobj->sp = p;
             return ATOMICALLY_FRAME;
            
        case CATCH_FRAME:
-           tso->sp = p;
+            tso->stackobj->sp = p;
            return CATCH_FRAME;
 
         case CATCH_STM_FRAME:
            debugTrace(DEBUG_stm, "found CATCH_STM_FRAME at %p", p);
-            tso->sp = p;
+            tso->stackobj->sp = p;
             return CATCH_STM_FRAME;
            
-       case STOP_FRAME:
-           tso->sp = p;
+        case UNDERFLOW_FRAME:
+            tso->stackobj->sp = p;
+            threadStackUnderflow(cap,tso);
+            p = tso->stackobj->sp;
+            continue;
+
+        case STOP_FRAME:
+            tso->stackobj->sp = p;
            return STOP_FRAME;
 
         case CATCH_RETRY_FRAME:
@@ -2645,12 +2291,12 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
    -------------------------------------------------------------------------- */
 
 StgWord
-findRetryFrameHelper (StgTSO *tso)
+findRetryFrameHelper (Capability *cap, StgTSO *tso)
 {
   StgPtr           p, next;
   StgRetInfoTable *info;
 
-  p = tso -> sp;
+  p = tso->stackobj->sp;
   while (1) {
     info = get_ret_itbl((StgClosure *)p);
     next = p + stack_frame_sizeW((StgClosure *)p);
@@ -2659,13 +2305,13 @@ findRetryFrameHelper (StgTSO *tso)
     case ATOMICALLY_FRAME:
        debugTrace(DEBUG_stm,
                   "found ATOMICALLY_FRAME at %p during retry", p);
-       tso->sp = p;
+        tso->stackobj->sp = p;
        return ATOMICALLY_FRAME;
       
     case CATCH_RETRY_FRAME:
        debugTrace(DEBUG_stm,
                   "found CATCH_RETRY_FRAME at %p during retrry", p);
-       tso->sp = p;
+        tso->stackobj->sp = p;
        return CATCH_RETRY_FRAME;
       
     case CATCH_STM_FRAME: {
@@ -2674,13 +2320,17 @@ findRetryFrameHelper (StgTSO *tso)
         debugTrace(DEBUG_stm,
                   "found CATCH_STM_FRAME at %p during retry", p);
         debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
-       stmAbortTransaction(tso -> cap, trec);
-       stmFreeAbortedTRec(tso -> cap, trec);
+        stmAbortTransaction(cap, trec);
+        stmFreeAbortedTRec(cap, trec);
        tso -> trec = outer;
         p = next; 
         continue;
     }
       
+    case UNDERFLOW_FRAME:
+        threadStackUnderflow(cap,tso);
+        p = tso->stackobj->sp;
+        continue;
 
     default:
       ASSERT(info->i.type != CATCH_FRAME);
@@ -2740,6 +2390,12 @@ resurrectThreads (StgTSO *threads)
             * can wake up threads, remember...).
             */
            continue;
+        case BlockedOnMsgThrowTo:
+            // This can happen if the target is masking, blocks on a
+            // black hole, and then is found to be unreachable.  In
+            // this case, we want to let the target wake up and carry
+            // on, and do nothing to this thread.
+            continue;
        default:
            barf("resurrectThreads: thread blocked in a strange way: %d",
                  tso->why_blocked);