[project @ 2005-10-20 11:45:19 by simonmar]
[ghc-hetmet.git] / ghc / rts / Schedule.c
index 9e2a5d0..b78f9d2 100644 (file)
@@ -9,7 +9,7 @@
  *
  * WAY  Name     CPP flag  What's it for
  * --------------------------------------
- * mp   GUM      PAR          Parallel execution on a distrib. memory machine
+ * mp   GUM      PARALLEL_HASKELL          Parallel execution on a distrib. memory machine
  * s    SMP      SMP          Parallel execution on a shared memory machine
  * mg   GranSim  GRAN         Simulation of parallel execution
  * md   GUM/GdH  DIST         Distributed execution (based on GUM)
 #include "RtsUtils.h"
 #include "RtsFlags.h"
 #include "BlockAlloc.h"
+#include "OSThreads.h"
 #include "Storage.h"
 #include "StgRun.h"
 #include "Hooks.h"
 #define COMPILING_SCHEDULER
 #include "Schedule.h"
 #include "StgMiscClosures.h"
-#include "Storage.h"
 #include "Interpreter.h"
 #include "Exception.h"
 #include "Printer.h"
 #include "Signals.h"
 #include "Sanity.h"
 #include "Stats.h"
+#include "STM.h"
 #include "Timer.h"
 #include "Prelude.h"
 #include "ThreadLabels.h"
@@ -64,7 +65,7 @@
 #include "Proftimer.h"
 #include "ProfHeap.h"
 #endif
-#if defined(GRAN) || defined(PAR)
+#if defined(GRAN) || defined(PARALLEL_HASKELL)
 # include "GranSimRts.h"
 # include "GranSim.h"
 # include "ParallelRts.h"
@@ -75,7 +76,6 @@
 #endif
 #include "Sparks.h"
 #include "Capability.h"
-#include "OSThreads.h"
 #include  "Task.h"
 
 #ifdef HAVE_SYS_TYPES_H
 #include <errno.h>
 #endif
 
+// Turn off inlining when debugging - it obfuscates things
+#ifdef DEBUG
+# undef  STATIC_INLINE
+# define STATIC_INLINE static
+#endif
+
 #ifdef THREADED_RTS
 #define USED_IN_THREADED_RTS
 #else
  */
 StgMainThread *main_threads = NULL;
 
-/* Thread queues.
- * Locks required: sched_mutex.
- */
 #if defined(GRAN)
 
 StgTSO* ActiveTSO = NULL; /* for assigning system costs; GranSim-Light only */
@@ -135,14 +138,23 @@ StgTSO *ccalling_threadss[MAX_PROC];
 
 #else /* !GRAN */
 
+/* Thread queues.
+ * Locks required: sched_mutex.
+ */
 StgTSO *run_queue_hd = NULL;
 StgTSO *run_queue_tl = NULL;
 StgTSO *blocked_queue_hd = NULL;
 StgTSO *blocked_queue_tl = NULL;
+StgTSO *blackhole_queue = NULL;
 StgTSO *sleeping_queue = NULL;    /* perhaps replace with a hash table? */
 
 #endif
 
+/* The blackhole_queue should be checked for threads to wake up.  See
+ * Schedule.h for more thorough comment.
+ */
+rtsBool blackholes_need_checking = rtsFalse;
+
 /* Linked list of all threads.
  * Used for detecting garbage collected threads.
  */
@@ -154,15 +166,16 @@ StgTSO *all_threads = NULL;
  */
 static StgTSO *suspended_ccalling_threads;
 
-static StgTSO *threadStackOverflow(StgTSO *tso);
-
 /* KH: The following two flags are shared memory locations.  There is no need
        to lock them, since they are only unset at the end of a scheduler
        operation.
 */
 
 /* flag set by signal handler to precipitate a context switch */
-nat context_switch = 0;
+int context_switch = 0;
+
+/* flag that tracks whether we have done any execution in this time slice. */
+nat recent_activity = ACTIVITY_YES;
 
 /* if this flag is set as well, give up execution */
 rtsBool interrupted = rtsFalse;
@@ -202,8 +215,6 @@ StgTSO *CurrentTSO;
  */
 StgTSO dummy_tso;
 
-static rtsBool ready_to_gc;
-
 /*
  * Set to TRUE when entering a shutdown state (via shutdownHaskellAndExit()) --
  * in an MT setting, needed to signal that a worker thread shouldn't hang around
@@ -211,13 +222,6 @@ static rtsBool ready_to_gc;
  */
 static rtsBool shutting_down_scheduler = rtsFalse;
 
-void            addToBlockedQueue ( StgTSO *tso );
-
-static void     schedule          ( StgMainThread *mainThread, Capability *initialCapability );
-       void     interruptStgRts   ( void );
-
-static void     detectBlackHoles  ( void );
-
 #if defined(RTS_SUPPORTS_THREADS)
 /* ToDo: carefully document the invariants that go together
  *       with these synchronisation objects.
@@ -227,7 +231,7 @@ Mutex     term_mutex        = INIT_MUTEX_VAR;
 
 #endif /* RTS_SUPPORTS_THREADS */
 
-#if defined(PAR)
+#if defined(PARALLEL_HASKELL)
 StgTSO *LastTSO;
 rtsTime TimeOfLastYield;
 rtsBool emitSchedule = rtsTrue;
@@ -244,7 +248,66 @@ static char *whatNext_strs[] = {
 };
 #endif
 
-#if defined(PAR)
+/* -----------------------------------------------------------------------------
+ * static function prototypes
+ * -------------------------------------------------------------------------- */
+
+#if defined(RTS_SUPPORTS_THREADS)
+static void taskStart(void);
+#endif
+
+static void schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
+                     Capability *initialCapability );
+
+//
+// These function all encapsulate parts of the scheduler loop, and are
+// abstracted only to make the structure and control flow of the
+// scheduler clearer.
+//
+static void schedulePreLoop(void);
+static void scheduleStartSignalHandlers(void);
+static void scheduleCheckBlockedThreads(void);
+static void scheduleCheckBlackHoles(void);
+static void scheduleDetectDeadlock(void);
+#if defined(GRAN)
+static StgTSO *scheduleProcessEvent(rtsEvent *event);
+#endif
+#if defined(PARALLEL_HASKELL)
+static StgTSO *scheduleSendPendingMessages(void);
+static void scheduleActivateSpark(void);
+static rtsBool scheduleGetRemoteWork(rtsBool *receivedFinish);
+#endif
+#if defined(PAR) || defined(GRAN)
+static void scheduleGranParReport(void);
+#endif
+static void schedulePostRunThread(void);
+static rtsBool scheduleHandleHeapOverflow( Capability *cap, StgTSO *t );
+static void scheduleHandleStackOverflow( StgTSO *t);
+static rtsBool scheduleHandleYield( StgTSO *t, nat prev_what_next );
+static void scheduleHandleThreadBlocked( StgTSO *t );
+static rtsBool scheduleHandleThreadFinished( StgMainThread *mainThread, 
+                                            Capability *cap, StgTSO *t );
+static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
+static void scheduleDoGC(rtsBool force_major);
+
+static void unblockThread(StgTSO *tso);
+static rtsBool checkBlackHoles(void);
+static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
+                                  Capability *initialCapability
+                                  );
+static void scheduleThread_ (StgTSO* tso);
+static void AllRoots(evac_fn evac);
+
+static StgTSO *threadStackOverflow(StgTSO *tso);
+
+static void raiseAsync_(StgTSO *tso, StgClosure *exception, 
+                       rtsBool stop_at_atomically);
+
+static void printThreadBlockage(StgTSO *tso);
+static void printThreadStatus(StgTSO *tso);
+void printThreadQueue(StgTSO *tso);
+
+#if defined(PARALLEL_HASKELL)
 StgTSO * createSparkThread(rtsSpark spark);
 StgTSO * activateSpark (rtsSpark spark);  
 #endif
@@ -254,39 +317,57 @@ StgTSO * activateSpark (rtsSpark spark);
  * ------------------------------------------------------------------------- */
 
 #if defined(RTS_SUPPORTS_THREADS)
-static rtsBool startingWorkerThread = rtsFalse;
+static nat startingWorkerThread = 0;
 
-static void taskStart(void);
 static void
 taskStart(void)
 {
   ACQUIRE_LOCK(&sched_mutex);
-  startingWorkerThread = rtsFalse;
+  startingWorkerThread--;
   schedule(NULL,NULL);
+  taskStop();
   RELEASE_LOCK(&sched_mutex);
 }
 
 void
 startSchedulerTaskIfNecessary(void)
 {
-  if(run_queue_hd != END_TSO_QUEUE
-    || blocked_queue_hd != END_TSO_QUEUE
-    || sleeping_queue != END_TSO_QUEUE)
-  {
-    if(!startingWorkerThread)
-    { // we don't want to start another worker thread
-      // just because the last one hasn't yet reached the
-      // "waiting for capability" state
-      startingWorkerThread = rtsTrue;
-      if(!startTask(taskStart))
-      {
-        startingWorkerThread = rtsFalse;
-      }
+    if ( !EMPTY_RUN_QUEUE()
+        && !shutting_down_scheduler // not if we're shutting down
+        && startingWorkerThread==0)
+    {
+       // we don't want to start another worker thread
+       // just because the last one hasn't yet reached the
+       // "waiting for capability" state
+       startingWorkerThread++;
+       if (!maybeStartNewWorker(taskStart)) {
+           startingWorkerThread--;
+       }
     }
-  }
 }
 #endif
 
+/* -----------------------------------------------------------------------------
+ * Putting a thread on the run queue: different scheduling policies
+ * -------------------------------------------------------------------------- */
+
+STATIC_INLINE void
+addToRunQueue( StgTSO *t )
+{
+#if defined(PARALLEL_HASKELL)
+    if (RtsFlags.ParFlags.doFairScheduling) { 
+       // this does round-robin scheduling; good for concurrency
+       APPEND_TO_RUN_QUEUE(t);
+    } else {
+       // this does unfair scheduling; good for parallelism
+       PUSH_ON_RUN_QUEUE(t);
+    }
+#else
+    // this does round-robin scheduling; good for concurrency
+    APPEND_TO_RUN_QUEUE(t);
+#endif
+}
+    
 /* ---------------------------------------------------------------------------
    Main scheduling loop.
 
@@ -322,6 +403,7 @@ startSchedulerTaskIfNecessary(void)
      This is not the ugliest code you could imagine, but it's bloody close.
 
    ------------------------------------------------------------------------ */
+
 static void
 schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
           Capability *initialCapability )
@@ -331,9 +413,7 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
   StgThreadReturnCode ret;
 #if defined(GRAN)
   rtsEvent *event;
-#elif defined(PAR)
-  StgSparkPool *pool;
-  rtsSpark spark;
+#elif defined(PARALLEL_HASKELL)
   StgTSO *tso;
   GlobalTaskId pe;
   rtsBool receivedFinish = rtsFalse;
@@ -341,69 +421,50 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
   nat tp_size, sp_size; // stats only
 # endif
 #endif
-  rtsBool was_interrupted = rtsFalse;
   nat prev_what_next;
+  rtsBool ready_to_gc;
   
   // Pre-condition: sched_mutex is held.
   // We might have a capability, passed in as initialCapability.
   cap = initialCapability;
 
-#if defined(RTS_SUPPORTS_THREADS)
-  //
-  // in the threaded case, the capability is either passed in via the
-  // initialCapability parameter, or initialized inside the scheduler
-  // loop 
-  //
-  IF_DEBUG(scheduler,
-          sched_belch("### NEW SCHEDULER LOOP (main thr: %p, cap: %p)",
-                      mainThread, initialCapability);
-      );
-#else
+#if !defined(RTS_SUPPORTS_THREADS)
   // simply initialise it in the non-threaded case
   grabCapability(&cap);
 #endif
 
-#if defined(GRAN)
-  /* set up first event to get things going */
-  /* ToDo: assign costs for system setup and init MainTSO ! */
-  new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-           ContinueThread, 
-           CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
-
-  IF_DEBUG(gran,
-          debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n", CurrentTSO);
-          G_TSO(CurrentTSO, 5));
-
-  if (RtsFlags.GranFlags.Light) {
-    /* Save current time; GranSim Light only */
-    CurrentTSO->gran.clock = CurrentTime[CurrentProc];
-  }      
-
-  event = get_next_event();
-
-  while (event!=(rtsEvent*)NULL) {
-    /* Choose the processor with the next event */
-    CurrentProc = event->proc;
-    CurrentTSO = event->tso;
+  IF_DEBUG(scheduler,
+          sched_belch("### NEW SCHEDULER LOOP (main thr: %p, cap: %p)",
+                      mainThread, initialCapability);
+      );
 
-#elif defined(PAR)
+  schedulePreLoop();
 
-  while (!receivedFinish) {    /* set by processMessages */
-                               /* when receiving PP_FINISH message         */ 
+  // -----------------------------------------------------------
+  // Scheduler loop starts here:
 
-#else // everything except GRAN and PAR
+#if defined(PARALLEL_HASKELL)
+#define TERMINATION_CONDITION        (!receivedFinish)
+#elif defined(GRAN)
+#define TERMINATION_CONDITION        ((event = get_next_event()) != (rtsEvent*)NULL) 
+#else
+#define TERMINATION_CONDITION        rtsTrue
+#endif
 
-  while (1) {
+  while (TERMINATION_CONDITION) {
 
+#if defined(GRAN)
+      /* Choose the processor with the next event */
+      CurrentProc = event->proc;
+      CurrentTSO = event->tso;
 #endif
 
-     IF_DEBUG(scheduler, printAllThreads());
-
 #if defined(RTS_SUPPORTS_THREADS)
       // Yield the capability to higher-priority tasks if necessary.
       //
       if (cap != NULL) {
-         yieldCapability(&cap);
+         yieldCapability(&cap, 
+                         mainThread ? &mainThread->bound_thread_cond : NULL );
       }
 
       // If we do not currently hold a capability, we wait for one
@@ -416,169 +477,520 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
       // We now have a capability...
 #endif
 
-    //
-    // If we're interrupted (the user pressed ^C, or some other
-    // termination condition occurred), kill all the currently running
-    // threads.
-    //
-    if (interrupted) {
-       IF_DEBUG(scheduler, sched_belch("interrupted"));
-       interrupted = rtsFalse;
-       was_interrupted = rtsTrue;
-#if defined(RTS_SUPPORTS_THREADS)
-       // In the threaded RTS, deadlock detection doesn't work,
-       // so just exit right away.
-       errorBelch("interrupted");
-       releaseCapability(cap);
-       RELEASE_LOCK(&sched_mutex);
-       shutdownHaskellAndExit(EXIT_SUCCESS);
-#else
-       deleteAllThreads();
+#if 0 /* extra sanity checking */
+      { 
+         StgMainThread *m;
+         for (m = main_threads; m != NULL; m = m->link) {
+             ASSERT(get_itbl(m->tso)->type == TSO);
+         }
+      }
 #endif
-    }
 
-#if defined(RTS_USER_SIGNALS)
-    // check for signals each time around the scheduler
-    if (signals_pending()) {
-      RELEASE_LOCK(&sched_mutex); /* ToDo: kill */
-      startSignalHandlers();
-      ACQUIRE_LOCK(&sched_mutex);
+    // Check whether we have re-entered the RTS from Haskell without
+    // going via suspendThread()/resumeThread (i.e. a 'safe' foreign
+    // call).
+    if (cap->r.rInHaskell) {
+         errorBelch("schedule: re-entered unsafely.\n"
+                    "   Perhaps a 'foreign import unsafe' should be 'safe'?");
+         stg_exit(1);
     }
-#endif
 
     //
-    // Check whether any waiting threads need to be woken up.  If the
-    // run queue is empty, and there are no other tasks running, we
-    // can wait indefinitely for something to happen.
+    // Test for interruption.  If interrupted==rtsTrue, then either
+    // we received a keyboard interrupt (^C), or the scheduler is
+    // trying to shut down all the tasks (shutting_down_scheduler) in
+    // the threaded RTS.
     //
-    if ( !EMPTY_QUEUE(blocked_queue_hd) || !EMPTY_QUEUE(sleeping_queue)
-#if defined(RTS_SUPPORTS_THREADS)
-               || EMPTY_RUN_QUEUE()
-#endif
-       )
-    {
-      awaitEvent( EMPTY_RUN_QUEUE() );
+    if (interrupted) {
+       if (shutting_down_scheduler) {
+           IF_DEBUG(scheduler, sched_belch("shutting down"));
+           releaseCapability(cap);
+           if (mainThread) {
+               mainThread->stat = Interrupted;
+               mainThread->ret  = NULL;
+           }
+           return;
+       } else {
+           IF_DEBUG(scheduler, sched_belch("interrupted"));
+           deleteAllThreads();
+       }
     }
-    // we can be interrupted while waiting for I/O...
-    if (interrupted) continue;
 
-    /* 
-     * Detect deadlock: when we have no threads to run, there are no
-     * threads waiting on I/O or sleeping, and all the other tasks are
-     * waiting for work, we must have a deadlock of some description.
-     *
-     * We first try to find threads blocked on themselves (ie. black
-     * holes), and generate NonTermination exceptions where necessary.
-     *
-     * If no threads are black holed, we have a deadlock situation, so
-     * inform all the main threads.
-     */
-#if !defined(PAR) && !defined(RTS_SUPPORTS_THREADS)
-    if (   EMPTY_THREAD_QUEUES() )
+#if defined(not_yet) && defined(SMP)
+    //
+    // Top up the run queue from our spark pool.  We try to make the
+    // number of threads in the run queue equal to the number of
+    // free capabilities.
+    //
     {
-       IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
-       // Garbage collection can release some new threads due to
-       // either (a) finalizers or (b) threads resurrected because
-       // they are about to be send BlockedOnDeadMVar.  Any threads
-       // thus released will be immediately runnable.
-       GarbageCollect(GetRoots,rtsTrue);
+       StgClosure *spark;
+       if (EMPTY_RUN_QUEUE()) {
+           spark = findSpark(rtsFalse);
+           if (spark == NULL) {
+               break; /* no more sparks in the pool */
+           } else {
+               createSparkThread(spark);         
+               IF_DEBUG(scheduler,
+                        sched_belch("==^^ turning spark of closure %p into a thread",
+                                    (StgClosure *)spark));
+           }
+       }
+    }
+#endif // SMP
 
-       if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; }
+    scheduleStartSignalHandlers();
 
-       IF_DEBUG(scheduler, 
-                sched_belch("still deadlocked, checking for black holes..."));
-       detectBlackHoles();
+    // Only check the black holes here if we've nothing else to do.
+    // During normal execution, the black hole list only gets checked
+    // at GC time, to avoid repeatedly traversing this possibly long
+    // list each time around the scheduler.
+    if (EMPTY_RUN_QUEUE()) { scheduleCheckBlackHoles(); }
 
-       if ( !EMPTY_RUN_QUEUE() ) { goto not_deadlocked; }
+    scheduleCheckBlockedThreads();
 
-#if defined(RTS_USER_SIGNALS)
-       /* If we have user-installed signal handlers, then wait
-        * for signals to arrive rather then bombing out with a
-        * deadlock.
-        */
-       if ( anyUserHandlers() ) {
-           IF_DEBUG(scheduler, 
-                    sched_belch("still deadlocked, waiting for signals..."));
+    scheduleDetectDeadlock();
 
-           awaitUserSignals();
+    // Normally, the only way we can get here with no threads to
+    // run is if a keyboard interrupt received during 
+    // scheduleCheckBlockedThreads() or scheduleDetectDeadlock().
+    // Additionally, it is not fatal for the
+    // threaded RTS to reach here with no threads to run.
+    //
+    // win32: might be here due to awaitEvent() being abandoned
+    // as a result of a console event having been delivered.
+    if ( EMPTY_RUN_QUEUE() ) {
+#if !defined(RTS_SUPPORTS_THREADS) && !defined(mingw32_HOST_OS)
+       ASSERT(interrupted);
+#endif
+       continue; // nothing to do
+    }
 
-           // we might be interrupted...
-           if (interrupted) { continue; }
+#if defined(PARALLEL_HASKELL)
+    scheduleSendPendingMessages();
+    if (EMPTY_RUN_QUEUE() && scheduleActivateSpark()) 
+       continue;
 
-           if (signals_pending()) {
-               RELEASE_LOCK(&sched_mutex);
-               startSignalHandlers();
-               ACQUIRE_LOCK(&sched_mutex);
-           }
-           ASSERT(!EMPTY_RUN_QUEUE());
-           goto not_deadlocked;
-       }
+#if defined(SPARKS)
+    ASSERT(next_fish_to_send_at==0);  // i.e. no delayed fishes left!
 #endif
 
-       /* Probably a real deadlock.  Send the current main thread the
-        * Deadlock exception (or in the SMP build, send *all* main
-        * threads the deadlock exception, since none of them can make
-        * progress).
-        */
-       {
-           StgMainThread *m;
-           m = main_threads;
-           switch (m->tso->why_blocked) {
-           case BlockedOnBlackHole:
-           case BlockedOnException:
-           case BlockedOnMVar:
-               raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
-               break;
-           default:
-               barf("deadlock: main thread blocked in a strange way");
-           }
-       }
+    /* If we still have no work we need to send a FISH to get a spark
+       from another PE */
+    if (EMPTY_RUN_QUEUE()) {
+       if (!scheduleGetRemoteWork(&receivedFinish)) continue;
+       ASSERT(rtsFalse); // should not happen at the moment
+    }
+    // from here: non-empty run queue.
+    //  TODO: merge above case with this, only one call processMessages() !
+    if (PacketsWaiting()) {  /* process incoming messages, if
+                               any pending...  only in else
+                               because getRemoteWork waits for
+                               messages as well */
+       receivedFinish = processMessages();
     }
-  not_deadlocked:
+#endif
 
-#elif defined(RTS_SUPPORTS_THREADS)
-    // ToDo: add deadlock detection in threaded RTS
-#elif defined(PAR)
-    // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
+#if defined(GRAN)
+    scheduleProcessEvent(event);
+#endif
+
+    // 
+    // Get a thread to run
+    //
+    ASSERT(run_queue_hd != END_TSO_QUEUE);
+    POP_RUN_QUEUE(t);
+
+#if defined(GRAN) || defined(PAR)
+    scheduleGranParReport(); // some kind of debuging output
+#else
+    // Sanity check the thread we're about to run.  This can be
+    // expensive if there is lots of thread switching going on...
+    IF_DEBUG(sanity,checkTSO(t));
 #endif
 
 #if defined(RTS_SUPPORTS_THREADS)
-    if ( EMPTY_RUN_QUEUE() ) {
-       continue; // nothing to do
+    // Check whether we can run this thread in the current task.
+    // If not, we have to pass our capability to the right task.
+    {
+      StgMainThread *m = t->main;
+      
+      if(m)
+      {
+       if(m == mainThread)
+       {
+         IF_DEBUG(scheduler,
+           sched_belch("### Running thread %d in bound thread", t->id));
+         // yes, the Haskell thread is bound to the current native thread
+       }
+       else
+       {
+         IF_DEBUG(scheduler,
+           sched_belch("### thread %d bound to another OS thread", t->id));
+         // no, bound to a different Haskell thread: pass to that thread
+         PUSH_ON_RUN_QUEUE(t);
+         continue;
+       }
+      }
+      else
+      {
+       if(mainThread != NULL)
+        // The thread we want to run is unbound.
+       {
+         IF_DEBUG(scheduler,
+           sched_belch("### this OS thread cannot run thread %d", t->id));
+         // no, the current native thread is bound to a different
+         // Haskell thread, so pass it to any worker thread
+         PUSH_ON_RUN_QUEUE(t);
+         continue; 
+       }
+      }
     }
 #endif
 
-#if defined(GRAN)
-    if (RtsFlags.GranFlags.Light)
-      GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
-
-    /* adjust time based on time-stamp */
-    if (event->time > CurrentTime[CurrentProc] &&
-        event->evttype != ContinueThread)
-      CurrentTime[CurrentProc] = event->time;
+    cap->r.rCurrentTSO = t;
     
-    /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
-    if (!RtsFlags.GranFlags.Light)
-      handleIdlePEs();
+    /* context switches are now initiated by the timer signal, unless
+     * the user specified "context switch as often as possible", with
+     * +RTS -C0
+     */
+    if ((RtsFlags.ConcFlags.ctxtSwitchTicks == 0
+        && (run_queue_hd != END_TSO_QUEUE
+            || blocked_queue_hd != END_TSO_QUEUE
+            || sleeping_queue != END_TSO_QUEUE)))
+       context_switch = 1;
 
-    IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
+run_thread:
 
-    /* main event dispatcher in GranSim */
-    switch (event->evttype) {
-      /* Should just be continuing execution */
-    case ContinueThread:
-      IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
-      /* ToDo: check assertion
-      ASSERT(run_queue_hd != (StgTSO*)NULL &&
-            run_queue_hd != END_TSO_QUEUE);
-      */
-      /* Ignore ContinueThreads for fetching threads (if synchr comm) */
-      if (!RtsFlags.GranFlags.DoAsyncFetch &&
-         procStatus[CurrentProc]==Fetching) {
-       debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
-             CurrentTSO->id, CurrentTSO, CurrentProc);
-       goto next_thread;
-      }        
+    RELEASE_LOCK(&sched_mutex);
+
+    IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...", 
+                             (long)t->id, whatNext_strs[t->what_next]));
+
+#if defined(PROFILING)
+    startHeapProfTimer();
+#endif
+
+    // ----------------------------------------------------------------------
+    // Run the current thread 
+
+    prev_what_next = t->what_next;
+
+    errno = t->saved_errno;
+    cap->r.rInHaskell = rtsTrue;
+
+    recent_activity = ACTIVITY_YES;
+
+    switch (prev_what_next) {
+
+    case ThreadKilled:
+    case ThreadComplete:
+       /* Thread already finished, return to scheduler. */
+       ret = ThreadFinished;
+       break;
+
+    case ThreadRunGHC:
+       ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
+       break;
+
+    case ThreadInterpret:
+       ret = interpretBCO(cap);
+       break;
+
+    default:
+      barf("schedule: invalid what_next field");
+    }
+
+#if defined(SMP)
+    // in SMP mode, we might return with a different capability than
+    // we started with, if the Haskell thread made a foreign call.  So
+    // let's find out what our current Capability is:
+    cap = myCapability();
+#endif
+
+    cap->r.rInHaskell = rtsFalse;
+
+    // The TSO might have moved, eg. if it re-entered the RTS and a GC
+    // happened.  So find the new location:
+    t = cap->r.rCurrentTSO;
+
+    // And save the current errno in this thread.
+    t->saved_errno = errno;
+
+    // ----------------------------------------------------------------------
+    
+    /* Costs for the scheduler are assigned to CCS_SYSTEM */
+#if defined(PROFILING)
+    stopHeapProfTimer();
+    CCCS = CCS_SYSTEM;
+#endif
+    
+    ACQUIRE_LOCK(&sched_mutex);
+
+    // We have run some Haskell code: there might be blackhole-blocked
+    // threads to wake up now.
+    if ( blackhole_queue != END_TSO_QUEUE ) {
+       blackholes_need_checking = rtsTrue;
+    }
+    
+#if defined(RTS_SUPPORTS_THREADS)
+    IF_DEBUG(scheduler,debugBelch("sched (task %p): ", osThreadId()););
+#elif !defined(GRAN) && !defined(PARALLEL_HASKELL)
+    IF_DEBUG(scheduler,debugBelch("sched: "););
+#endif
+    
+    schedulePostRunThread();
+
+    ready_to_gc = rtsFalse;
+
+    switch (ret) {
+    case HeapOverflow:
+       ready_to_gc = scheduleHandleHeapOverflow(cap,t);
+       break;
+
+    case StackOverflow:
+       scheduleHandleStackOverflow(t);
+       break;
+
+    case ThreadYielding:
+       if (scheduleHandleYield(t, prev_what_next)) {
+            // shortcut for switching between compiler/interpreter:
+           goto run_thread; 
+       }
+       break;
+
+    case ThreadBlocked:
+       scheduleHandleThreadBlocked(t);
+       break;
+
+    case ThreadFinished:
+       if (scheduleHandleThreadFinished(mainThread, cap, t)) return;;
+       break;
+
+    default:
+      barf("schedule: invalid thread return code %d", (int)ret);
+    }
+
+    if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
+    if (ready_to_gc) { scheduleDoGC(rtsFalse); }
+  } /* end of while() */
+
+  IF_PAR_DEBUG(verbose,
+              debugBelch("== Leaving schedule() after having received Finish\n"));
+}
+
+/* ----------------------------------------------------------------------------
+ * Setting up the scheduler loop
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+
+static void
+schedulePreLoop(void)
+{
+#if defined(GRAN) 
+    /* set up first event to get things going */
+    /* ToDo: assign costs for system setup and init MainTSO ! */
+    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+             ContinueThread, 
+             CurrentTSO, (StgClosure*)NULL, (rtsSpark*)NULL);
+    
+    IF_DEBUG(gran,
+            debugBelch("GRAN: Init CurrentTSO (in schedule) = %p\n", 
+                       CurrentTSO);
+            G_TSO(CurrentTSO, 5));
+    
+    if (RtsFlags.GranFlags.Light) {
+       /* Save current time; GranSim Light only */
+       CurrentTSO->gran.clock = CurrentTime[CurrentProc];
+    }      
+#endif
+}
+
+/* ----------------------------------------------------------------------------
+ * Start any pending signal handlers
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+
+static void
+scheduleStartSignalHandlers(void)
+{
+#if defined(RTS_USER_SIGNALS) && !defined(RTS_SUPPORTS_THREADS)
+    if (signals_pending()) {
+      RELEASE_LOCK(&sched_mutex); /* ToDo: kill */
+      startSignalHandlers();
+      ACQUIRE_LOCK(&sched_mutex);
+    }
+#endif
+}
+
+/* ----------------------------------------------------------------------------
+ * Check for blocked threads that can be woken up.
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+
+static void
+scheduleCheckBlockedThreads(void)
+{
+    //
+    // Check whether any waiting threads need to be woken up.  If the
+    // run queue is empty, and there are no other tasks running, we
+    // can wait indefinitely for something to happen.
+    //
+    if ( !EMPTY_QUEUE(blocked_queue_hd) || !EMPTY_QUEUE(sleeping_queue) )
+    {
+#if defined(RTS_SUPPORTS_THREADS)
+       // We shouldn't be here...
+       barf("schedule: awaitEvent() in threaded RTS");
+#else
+       awaitEvent( EMPTY_RUN_QUEUE() && !blackholes_need_checking );
+#endif
+    }
+}
+
+
+/* ----------------------------------------------------------------------------
+ * Check for threads blocked on BLACKHOLEs that can be woken up
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+static void
+scheduleCheckBlackHoles( void )
+{
+    if ( blackholes_need_checking )
+    {
+       checkBlackHoles();
+       blackholes_need_checking = rtsFalse;
+    }
+}
+
+/* ----------------------------------------------------------------------------
+ * Detect deadlock conditions and attempt to resolve them.
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
+
+static void
+scheduleDetectDeadlock()
+{
+
+#if defined(PARALLEL_HASKELL)
+    // ToDo: add deadlock detection in GUM (similar to SMP) -- HWL
+    return;
+#endif
+
+    /* 
+     * Detect deadlock: when we have no threads to run, there are no
+     * threads blocked, waiting for I/O, or sleeping, and all the
+     * other tasks are waiting for work, we must have a deadlock of
+     * some description.
+     */
+    if ( EMPTY_THREAD_QUEUES() )
+    {
+#if defined(RTS_SUPPORTS_THREADS)
+       /* 
+        * In the threaded RTS, we only check for deadlock if there
+        * has been no activity in a complete timeslice.  This means
+        * we won't eagerly start a full GC just because we don't have
+        * any threads to run currently.
+        */
+       if (recent_activity != ACTIVITY_INACTIVE) return;
+#endif
+
+       IF_DEBUG(scheduler, sched_belch("deadlocked, forcing major GC..."));
+
+       // Garbage collection can release some new threads due to
+       // either (a) finalizers or (b) threads resurrected because
+       // they are unreachable and will therefore be sent an
+       // exception.  Any threads thus released will be immediately
+       // runnable.
+
+       scheduleDoGC( rtsTrue/*force  major GC*/ );
+       recent_activity = ACTIVITY_DONE_GC;
+       if ( !EMPTY_RUN_QUEUE() ) return;
+
+#if defined(RTS_USER_SIGNALS) && !defined(RTS_SUPPORTS_THREADS)
+       /* If we have user-installed signal handlers, then wait
+        * for signals to arrive rather then bombing out with a
+        * deadlock.
+        */
+       if ( anyUserHandlers() ) {
+           IF_DEBUG(scheduler, 
+                    sched_belch("still deadlocked, waiting for signals..."));
+
+           awaitUserSignals();
+
+           if (signals_pending()) {
+               RELEASE_LOCK(&sched_mutex);
+               startSignalHandlers();
+               ACQUIRE_LOCK(&sched_mutex);
+           }
+
+           // either we have threads to run, or we were interrupted:
+           ASSERT(!EMPTY_RUN_QUEUE() || interrupted);
+       }
+#endif
+
+#if !defined(RTS_SUPPORTS_THREADS)
+       /* Probably a real deadlock.  Send the current main thread the
+        * Deadlock exception (or in the SMP build, send *all* main
+        * threads the deadlock exception, since none of them can make
+        * progress).
+        */
+       {
+           StgMainThread *m;
+           m = main_threads;
+           switch (m->tso->why_blocked) {
+           case BlockedOnSTM:
+           case BlockedOnBlackHole:
+           case BlockedOnException:
+           case BlockedOnMVar:
+               raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
+               return;
+           default:
+               barf("deadlock: main thread blocked in a strange way");
+           }
+       }
+#endif
+    }
+}
+
+/* ----------------------------------------------------------------------------
+ * Process an event (GRAN only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(GRAN)
+static StgTSO *
+scheduleProcessEvent(rtsEvent *event)
+{
+    StgTSO *t;
+
+    if (RtsFlags.GranFlags.Light)
+      GranSimLight_enter_system(event, &ActiveTSO); // adjust ActiveTSO etc
+
+    /* adjust time based on time-stamp */
+    if (event->time > CurrentTime[CurrentProc] &&
+        event->evttype != ContinueThread)
+      CurrentTime[CurrentProc] = event->time;
+    
+    /* Deal with the idle PEs (may issue FindWork or MoveSpark events) */
+    if (!RtsFlags.GranFlags.Light)
+      handleIdlePEs();
+
+    IF_DEBUG(gran, debugBelch("GRAN: switch by event-type\n"));
+
+    /* main event dispatcher in GranSim */
+    switch (event->evttype) {
+      /* Should just be continuing execution */
+    case ContinueThread:
+      IF_DEBUG(gran, debugBelch("GRAN: doing ContinueThread\n"));
+      /* ToDo: check assertion
+      ASSERT(run_queue_hd != (StgTSO*)NULL &&
+            run_queue_hd != END_TSO_QUEUE);
+      */
+      /* Ignore ContinueThreads for fetching threads (if synchr comm) */
+      if (!RtsFlags.GranFlags.DoAsyncFetch &&
+         procStatus[CurrentProc]==Fetching) {
+       debugBelch("ghuH: Spurious ContinueThread while Fetching ignored; TSO %d (%p) [PE %d]\n",
+             CurrentTSO->id, CurrentTSO, CurrentProc);
+       goto next_thread;
+      }        
       /* Ignore ContinueThreads for completed threads */
       if (CurrentTSO->what_next == ThreadComplete) {
        debugBelch("ghuH: found a ContinueThread event for completed thread %d (%p) [PE %d] (ignoring ContinueThread)\n", 
@@ -683,22 +1095,58 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
             DumpGranEvent(GR_SCHEDULE, t));
 
     procStatus[CurrentProc] = Busy;
+}
+#endif // GRAN
 
-#elif defined(PAR)
+/* ----------------------------------------------------------------------------
+ * Send pending messages (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PARALLEL_HASKELL)
+static StgTSO *
+scheduleSendPendingMessages(void)
+{
+    StgSparkPool *pool;
+    rtsSpark spark;
+    StgTSO *t;
+
+# if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
         processFetches();
     }
+# endif
+    
+    if (RtsFlags.ParFlags.BufferTime) {
+       // if we use message buffering, we must send away all message
+       // packets which have become too old...
+       sendOldBuffers(); 
+    }
+}
+#endif
 
-    /* ToDo: phps merge with spark activation above */
-    /* check whether we have local work and send requests if we have none */
-    if (EMPTY_RUN_QUEUE()) {  /* no runnable threads */
-      /* :-[  no local threads => look out for local sparks */
-      /* the spark pool for the current PE */
-      pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
-      if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
-         pool->hd < pool->tl) {
-       /* 
-        * ToDo: add GC code check that we really have enough heap afterwards!!
+/* ----------------------------------------------------------------------------
+ * Activate spark threads (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PARALLEL_HASKELL)
+static void
+scheduleActivateSpark(void)
+{
+#if defined(SPARKS)
+  ASSERT(EMPTY_RUN_QUEUE());
+/* We get here if the run queue is empty and want some work.
+   We try to turn a spark into a thread, and add it to the run queue,
+   from where it will be picked up in the next iteration of the scheduler
+   loop.
+*/
+
+      /* :-[  no local threads => look out for local sparks */
+      /* the spark pool for the current PE */
+      pool = &(cap.r.rSparks); // JB: cap = (old) MainCap
+      if (advisory_thread_count < RtsFlags.ParFlags.maxThreads &&
+         pool->hd < pool->tl) {
+       /* 
+        * ToDo: add GC code check that we really have enough heap afterwards!!
         * Old comment:
         * If we're here (no runnable threads) and we have pending
         * sparks, we must have a space problem.  Get enough space
@@ -706,29 +1154,73 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
         * thread... 
         */
 
-       spark = findSpark(rtsFalse);                /* get a spark */
-       if (spark != (rtsSpark) NULL) {
-         tso = activateSpark(spark);       /* turn the spark into a thread */
-         IF_PAR_DEBUG(schedule,
-                      debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
-                            tso->id, tso, advisory_thread_count));
-
-         if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
-           debugBelch("==^^ failed to activate spark\n");
-           goto next_thread;
-         }               /* otherwise fall through & pick-up new tso */
-       } else {
-         IF_PAR_DEBUG(verbose,
-                      debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n", 
-                            spark_queue_len(pool)));
-         goto next_thread;
+       spark = findSpark(rtsFalse);            /* get a spark */
+       if (spark != (rtsSpark) NULL) {
+         tso = createThreadFromSpark(spark);       /* turn the spark into a thread */
+         IF_PAR_DEBUG(fish, // schedule,
+                      debugBelch("==== schedule: Created TSO %d (%p); %d threads active\n",
+                            tso->id, tso, advisory_thread_count));
+
+         if (tso==END_TSO_QUEUE) { /* failed to activate spark->back to loop */
+           IF_PAR_DEBUG(fish, // schedule,
+                        debugBelch("==^^ failed to create thread from spark @ %lx\n",
+                            spark));
+           return rtsFalse; /* failed to generate a thread */
+         }                  /* otherwise fall through & pick-up new tso */
+       } else {
+         IF_PAR_DEBUG(fish, // schedule,
+                      debugBelch("==^^ no local sparks (spark pool contains only NFs: %d)\n", 
+                            spark_queue_len(pool)));
+         return rtsFalse;  /* failed to generate a thread */
+       }
+       return rtsTrue;  /* success in generating a thread */
+  } else { /* no more threads permitted or pool empty */
+    return rtsFalse;  /* failed to generateThread */
+  }
+#else
+  tso = NULL; // avoid compiler warning only
+  return rtsFalse;  /* dummy in non-PAR setup */
+#endif // SPARKS
+}
+#endif // PARALLEL_HASKELL
+
+/* ----------------------------------------------------------------------------
+ * Get work from a remote node (PARALLEL_HASKELL only)
+ * ------------------------------------------------------------------------- */
+    
+#if defined(PARALLEL_HASKELL)
+static rtsBool
+scheduleGetRemoteWork(rtsBool *receivedFinish)
+{
+  ASSERT(EMPTY_RUN_QUEUE());
+
+  if (RtsFlags.ParFlags.BufferTime) {
+       IF_PAR_DEBUG(verbose, 
+               debugBelch("...send all pending data,"));
+        {
+         nat i;
+         for (i=1; i<=nPEs; i++)
+           sendImmediately(i); // send all messages away immediately
        }
-      }
+  }
+# ifndef SPARKS
+       //++EDEN++ idle() , i.e. send all buffers, wait for work
+       // suppress fishing in EDEN... just look for incoming messages
+       // (blocking receive)
+  IF_PAR_DEBUG(verbose, 
+              debugBelch("...wait for incoming messages...\n"));
+  *receivedFinish = processMessages(); // blocking receive...
+
+       // and reenter scheduling loop after having received something
+       // (return rtsFalse below)
+
+# else /* activate SPARKS machinery */
+/* We get here, if we have no work, tried to activate a local spark, but still
+   have no work. We try to get a remote spark, by sending a FISH message.
+   Thread migration should be added here, and triggered when a sequence of 
+   fishes returns without work. */
+       delay = (RtsFlags.ParFlags.fishDelay!=0ll ? RtsFlags.ParFlags.fishDelay : 0ll);
 
-      /* If we still have no work we need to send a FISH to get a spark
-        from another PE 
-      */
-      if (EMPTY_RUN_QUEUE()) {
       /* =8-[  no local sparks => look for work on other PEs */
        /*
         * We really have absolutely no work.  Send out a fish
@@ -738,48 +1230,106 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
         * we're hoping to see.  (Of course, we still have to
         * respond to other types of messages.)
         */
-       TIME now = msTime() /*CURRENT_TIME*/;
+       rtsTime now = msTime() /*CURRENT_TIME*/;
        IF_PAR_DEBUG(verbose, 
                     debugBelch("--  now=%ld\n", now));
-       IF_PAR_DEBUG(verbose,
-                    if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-                        (last_fish_arrived_at!=0 &&
-                         last_fish_arrived_at+RtsFlags.ParFlags.fishDelay > now)) {
-                      debugBelch("--$$ delaying FISH until %ld (last fish %ld, delay %ld, now %ld)\n",
-                            last_fish_arrived_at+RtsFlags.ParFlags.fishDelay,
-                            last_fish_arrived_at,
-                            RtsFlags.ParFlags.fishDelay, now);
-                    });
-       
+       IF_PAR_DEBUG(fish, // verbose,
+            if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
+                (last_fish_arrived_at!=0 &&
+                 last_fish_arrived_at+delay > now)) {
+              debugBelch("--$$ <%llu> delaying FISH until %llu (last fish %llu, delay %llu)\n",
+                    now, last_fish_arrived_at+delay, 
+                    last_fish_arrived_at,
+                    delay);
+            });
+  
        if (outstandingFishes < RtsFlags.ParFlags.maxFishes &&
-           (last_fish_arrived_at==0 ||
-            (last_fish_arrived_at+RtsFlags.ParFlags.fishDelay <= now))) {
-         /* outstandingFishes is set in sendFish, processFish;
-            avoid flooding system with fishes via delay */
-         pe = choosePE();
-         sendFish(pe, mytid, NEW_FISH_AGE, NEW_FISH_HISTORY, 
-                  NEW_FISH_HUNGER);
-
-         // Global statistics: count no. of fishes
-         if (RtsFlags.ParFlags.ParStats.Global &&
-             RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
-           globalParStats.tot_fish_mess++;
-         }
-       }
-      
-       receivedFinish = processMessages();
-       goto next_thread;
+           advisory_thread_count < RtsFlags.ParFlags.maxThreads) { // send a FISH, but when?
+         if (last_fish_arrived_at==0 ||
+             (last_fish_arrived_at+delay <= now)) {           // send FISH now!
+           /* outstandingFishes is set in sendFish, processFish;
+              avoid flooding system with fishes via delay */
+    next_fish_to_send_at = 0;  
+  } else {
+    /* ToDo: this should be done in the main scheduling loop to avoid the
+             busy wait here; not so bad if fish delay is very small  */
+    int iq = 0; // DEBUGGING -- HWL
+    next_fish_to_send_at = last_fish_arrived_at+delay; // remember when to send  
+    /* send a fish when ready, but process messages that arrive in the meantime */
+    do {
+      if (PacketsWaiting()) {
+        iq++; // DEBUGGING
+        *receivedFinish = processMessages();
       }
-    } else if (PacketsWaiting()) {  /* Look for incoming messages */
-      receivedFinish = processMessages();
-    }
+      now = msTime();
+    } while (!*receivedFinish || now<next_fish_to_send_at);
+    // JB: This means the fish could become obsolete, if we receive
+    // work. Better check for work again? 
+    // last line: while (!receivedFinish || !haveWork || now<...)
+    // next line: if (receivedFinish || haveWork )
+
+    if (*receivedFinish) // no need to send a FISH if we are finishing anyway
+      return rtsFalse;  // NB: this will leave scheduler loop
+                       // immediately after return!
+                         
+    IF_PAR_DEBUG(fish, // verbose,
+              debugBelch("--$$ <%llu> sent delayed fish (%d processMessages); active/total threads=%d/%d\n",now,iq,run_queue_len(),advisory_thread_count));
 
-    /* Now we are sure that we have some work available */
-    ASSERT(run_queue_hd != END_TSO_QUEUE);
+  }
 
-    /* Take a thread from the run queue, if we have work */
-    POP_RUN_QUEUE(t);  // take_off_run_queue(END_TSO_QUEUE);
-    IF_DEBUG(sanity,checkTSO(t));
+    // JB: IMHO, this should all be hidden inside sendFish(...)
+    /* pe = choosePE(); 
+       sendFish(pe, thisPE, NEW_FISH_AGE, NEW_FISH_HISTORY, 
+                NEW_FISH_HUNGER);
+
+    // Global statistics: count no. of fishes
+    if (RtsFlags.ParFlags.ParStats.Global &&
+         RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
+          globalParStats.tot_fish_mess++;
+          }
+    */ 
+
+  /* delayed fishes must have been sent by now! */
+  next_fish_to_send_at = 0;  
+  }
+      
+  *receivedFinish = processMessages();
+# endif /* SPARKS */
+
+ return rtsFalse;
+ /* NB: this function always returns rtsFalse, meaning the scheduler
+    loop continues with the next iteration; 
+    rationale: 
+      return code means success in finding work; we enter this function
+      if there is no local work, thus have to send a fish which takes
+      time until it arrives with work; in the meantime we should process
+      messages in the main loop;
+ */
+}
+#endif // PARALLEL_HASKELL
+
+/* ----------------------------------------------------------------------------
+ * PAR/GRAN: Report stats & debugging info(?)
+ * ------------------------------------------------------------------------- */
+
+#if defined(PAR) || defined(GRAN)
+static void
+scheduleGranParReport(void)
+{
+  ASSERT(run_queue_hd != END_TSO_QUEUE);
+
+  /* Take a thread from the run queue, if we have work */
+  POP_RUN_QUEUE(t);  // take_off_run_queue(END_TSO_QUEUE);
+
+    /* If this TSO has got its outport closed in the meantime, 
+     *   it mustn't be run. Instead, we have to clean it up as if it was finished.
+     * It has to be marked as TH_DEAD for this purpose.
+     * If it is TH_TERM instead, it is supposed to have finished in the normal way.
+
+JB: TODO: investigate wether state change field could be nuked
+     entirely and replaced by the normal tso state (whatnext
+     field). All we want to do is to kill tsos from outside.
+     */
 
     /* ToDo: write something to the log-file
     if (RTSflags.ParFlags.granSimStats && !sameThread)
@@ -788,25 +1338,20 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
     CurrentTSO = t;
     */
     /* the spark pool for the current PE */
-    pool = &(MainRegTable.rSparks); // generalise to cap = &MainRegTable
+    pool = &(cap.r.rSparks); //  cap = (old) MainCap
 
     IF_DEBUG(scheduler, 
             debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
                   run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
 
-# if 1
-    if (0 && RtsFlags.ParFlags.ParStats.Full && 
-       t && LastTSO && t->id != LastTSO->id && 
-       LastTSO->why_blocked == NotBlocked && 
-       LastTSO->what_next != ThreadComplete) {
-      // if previously scheduled TSO not blocked we have to record the context switch
-      DumpVeryRawGranEvent(TimeOfLastYield, CURRENT_PROC, CURRENT_PROC,
-                          GR_DESCHEDULE, LastTSO, (StgClosure *)NULL, 0, 0);
-    }
+    IF_PAR_DEBUG(fish,
+            debugBelch("--=^ %d threads, %d sparks on [%#x]\n", 
+                  run_queue_len(), spark_queue_len(pool), CURRENT_PROC));
 
     if (RtsFlags.ParFlags.ParStats.Full && 
-       (emitSchedule /* forced emit */ ||
-        (t && LastTSO && t->id != LastTSO->id))) {
+       (t->par.sparkname != (StgInt)0) && // only log spark generated threads
+       (emitSchedule || // forced emit
+         (t && LastTSO && t->id != LastTSO->id))) {
       /* 
         we are running a different TSO, so write a schedule event to log file
         NB: If we use fair scheduling we also have to write  a deschedule 
@@ -814,134 +1359,24 @@ schedule( StgMainThread *mainThread USED_WHEN_RTS_SUPPORTS_THREADS,
             previous tso has blocked whenever we switch to another tso, so
             we don't need it in GUM for now
       */
+      IF_PAR_DEBUG(fish, // schedule,
+                  debugBelch("____ scheduling spark generated thread %d (%lx) (%lx) via a forced emit\n",t->id,t,t->par.sparkname));
+
       DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
                       GR_SCHEDULE, t, (StgClosure *)NULL, 0, 0);
       emitSchedule = rtsFalse;
     }
-     
-# endif
-#else /* !GRAN && !PAR */
-  
-    // grab a thread from the run queue
-    ASSERT(run_queue_hd != END_TSO_QUEUE);
-    POP_RUN_QUEUE(t);
-
-    // Sanity check the thread we're about to run.  This can be
-    // expensive if there is lots of thread switching going on...
-    IF_DEBUG(sanity,checkTSO(t));
-#endif
-
-#ifdef THREADED_RTS
-    {
-      StgMainThread *m = t->main;
-      
-      if(m)
-      {
-       if(m == mainThread)
-       {
-         IF_DEBUG(scheduler,
-           sched_belch("### Running thread %d in bound thread", t->id));
-         // yes, the Haskell thread is bound to the current native thread
-       }
-       else
-       {
-         IF_DEBUG(scheduler,
-           sched_belch("### thread %d bound to another OS thread", t->id));
-         // no, bound to a different Haskell thread: pass to that thread
-         PUSH_ON_RUN_QUEUE(t);
-         passCapability(&m->bound_thread_cond);
-         continue;
-       }
-      }
-      else
-      {
-       if(mainThread != NULL)
-        // The thread we want to run is bound.
-       {
-         IF_DEBUG(scheduler,
-           sched_belch("### this OS thread cannot run thread %d", t->id));
-         // no, the current native thread is bound to a different
-         // Haskell thread, so pass it to any worker thread
-         PUSH_ON_RUN_QUEUE(t);
-         passCapabilityToWorker();
-         continue; 
-       }
-      }
-    }
-#endif
-
-    cap->r.rCurrentTSO = t;
-    
-    /* context switches are now initiated by the timer signal, unless
-     * the user specified "context switch as often as possible", with
-     * +RTS -C0
-     */
-    if ((RtsFlags.ConcFlags.ctxtSwitchTicks == 0
-        && (run_queue_hd != END_TSO_QUEUE
-            || blocked_queue_hd != END_TSO_QUEUE
-            || sleeping_queue != END_TSO_QUEUE)))
-       context_switch = 1;
-
-run_thread:
-
-    RELEASE_LOCK(&sched_mutex);
-
-    IF_DEBUG(scheduler, sched_belch("-->> running thread %ld %s ...", 
-                             (long)t->id, whatNext_strs[t->what_next]));
-
-#ifdef PROFILING
-    startHeapProfTimer();
+}     
 #endif
 
-    /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-    /* Run the current thread 
-     */
-    prev_what_next = t->what_next;
-
-    errno = t->saved_errno;
-
-    switch (prev_what_next) {
-
-    case ThreadKilled:
-    case ThreadComplete:
-       /* Thread already finished, return to scheduler. */
-       ret = ThreadFinished;
-       break;
-
-    case ThreadRunGHC:
-       ret = StgRun((StgFunPtr) stg_returnToStackTop, &cap->r);
-       break;
-
-    case ThreadInterpret:
-       ret = interpretBCO(cap);
-       break;
-
-    default:
-      barf("schedule: invalid what_next field");
-    }
-
-    // The TSO might have moved, so find the new location:
-    t = cap->r.rCurrentTSO;
-
-    // And save the current errno in this thread.
-    t->saved_errno = errno;
+/* ----------------------------------------------------------------------------
+ * After running a thread...
+ * ASSUMES: sched_mutex
+ * ------------------------------------------------------------------------- */
 
-    /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
-    
-    /* Costs for the scheduler are assigned to CCS_SYSTEM */
-#ifdef PROFILING
-    stopHeapProfTimer();
-    CCCS = CCS_SYSTEM;
-#endif
-    
-    ACQUIRE_LOCK(&sched_mutex);
-    
-#ifdef RTS_SUPPORTS_THREADS
-    IF_DEBUG(scheduler,debugBelch("sched (task %p): ", osThreadId()););
-#elif !defined(GRAN) && !defined(PAR)
-    IF_DEBUG(scheduler,debugBelch("sched: "););
-#endif
-    
+static void
+schedulePostRunThread(void)
+{
 #if defined(PAR)
     /* HACK 675: if the last thread didn't yield, make sure to print a 
        SCHEDULE event to the log file when StgRunning the next thread, even
@@ -950,122 +1385,193 @@ run_thread:
     TimeOfLastYield = CURRENT_TIME;
 #endif
 
-    switch (ret) {
+  /* some statistics gathering in the parallel case */
+
+#if defined(GRAN) || defined(PAR) || defined(EDEN)
+  switch (ret) {
     case HeapOverflow:
-#if defined(GRAN)
+# if defined(GRAN)
       IF_DEBUG(gran, DumpGranEvent(GR_DESCHEDULE, t));
       globalGranStats.tot_heapover++;
-#elif defined(PAR)
+# elif defined(PAR)
       globalParStats.tot_heapover++;
-#endif
+# endif
+      break;
 
-      // did the task ask for a large block?
-      if (cap->r.rHpAlloc > BLOCK_SIZE) {
-         // if so, get one and push it on the front of the nursery.
-         bdescr *bd;
-         nat blocks;
-         
-         blocks = (nat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
+     case StackOverflow:
+# if defined(GRAN)
+      IF_DEBUG(gran, 
+              DumpGranEvent(GR_DESCHEDULE, t));
+      globalGranStats.tot_stackover++;
+# elif defined(PAR)
+      // IF_DEBUG(par, 
+      // DumpGranEvent(GR_DESCHEDULE, t);
+      globalParStats.tot_stackover++;
+# endif
+      break;
 
-         IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %d)\n", 
-                                  (long)t->id, whatNext_strs[t->what_next], blocks));
+    case ThreadYielding:
+# if defined(GRAN)
+      IF_DEBUG(gran, 
+              DumpGranEvent(GR_DESCHEDULE, t));
+      globalGranStats.tot_yields++;
+# elif defined(PAR)
+      // IF_DEBUG(par, 
+      // DumpGranEvent(GR_DESCHEDULE, t);
+      globalParStats.tot_yields++;
+# endif
+      break; 
 
-         // don't do this if it would push us over the
-         // alloc_blocks_lim limit; we'll GC first.
-         if (alloc_blocks + blocks < alloc_blocks_lim) {
+    case ThreadBlocked:
+# if defined(GRAN)
+      IF_DEBUG(scheduler,
+              debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: ", 
+                         t->id, t, whatNext_strs[t->what_next], t->block_info.closure, 
+                         (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
+              if (t->block_info.closure!=(StgClosure*)NULL)
+                print_bq(t->block_info.closure);
+              debugBelch("\n"));
 
-             alloc_blocks += blocks;
-             bd = allocGroup( blocks );
+      // ??? needed; should emit block before
+      IF_DEBUG(gran, 
+              DumpGranEvent(GR_DESCHEDULE, t)); 
+      prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
+      /*
+       ngoq Dogh!
+      ASSERT(procStatus[CurrentProc]==Busy || 
+             ((procStatus[CurrentProc]==Fetching) && 
+             (t->block_info.closure!=(StgClosure*)NULL)));
+      if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
+         !(!RtsFlags.GranFlags.DoAsyncFetch &&
+           procStatus[CurrentProc]==Fetching)) 
+       procStatus[CurrentProc] = Idle;
+      */
+# elif defined(PAR)
+//++PAR++  blockThread() writes the event (change?)
+# endif
+    break;
 
-             // link the new group into the list
-             bd->link = cap->r.rCurrentNursery;
-             bd->u.back = cap->r.rCurrentNursery->u.back;
-             if (cap->r.rCurrentNursery->u.back != NULL) {
-                 cap->r.rCurrentNursery->u.back->link = bd;
-             } else {
-                 ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
-                        g0s0->blocks == cap->r.rNursery);
-                 cap->r.rNursery = g0s0->blocks = bd;
-             }           
-             cap->r.rCurrentNursery->u.back = bd;
-
-             // initialise it as a nursery block.  We initialise the
-             // step, gen_no, and flags field of *every* sub-block in
-             // this large block, because this is easier than making
-             // sure that we always find the block head of a large
-             // block whenever we call Bdescr() (eg. evacuate() and
-             // isAlive() in the GC would both have to do this, at
-             // least).
-             { 
-                 bdescr *x;
-                 for (x = bd; x < bd + blocks; x++) {
-                     x->step = g0s0;
-                     x->gen_no = 0;
-                     x->flags = 0;
-                 }
-             }
+  case ThreadFinished:
+    break;
 
-             // don't forget to update the block count in g0s0.
-             g0s0->n_blocks += blocks;
-             // This assert can be a killer if the app is doing lots
-             // of large block allocations.
-             ASSERT(countBlocks(g0s0->blocks) == g0s0->n_blocks);
-
-             // now update the nursery to point to the new block
-             cap->r.rCurrentNursery = bd;
-
-             // we might be unlucky and have another thread get on the
-             // run queue before us and steal the large block, but in that
-             // case the thread will just end up requesting another large
-             // block.
-             PUSH_ON_RUN_QUEUE(t);
-             break;
-         }
-      }
+  default:
+    barf("parGlobalStats: unknown return code");
+    break;
+    }
+#endif
+}
 
-      /* make all the running tasks block on a condition variable,
-       * maybe set context_switch and wait till they all pile in,
-       * then have them wait on a GC condition variable.
-       */
-      IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n", 
-                              (long)t->id, whatNext_strs[t->what_next]));
-      threadPaused(t);
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadHeepOverflow
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
+
+static rtsBool
+scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
+{
+    // did the task ask for a large block?
+    if (cap->r.rHpAlloc > BLOCK_SIZE) {
+       // if so, get one and push it on the front of the nursery.
+       bdescr *bd;
+       lnat blocks;
+       
+       blocks = (lnat)BLOCK_ROUND_UP(cap->r.rHpAlloc) / BLOCK_SIZE;
+       
+       IF_DEBUG(scheduler,
+                debugBelch("--<< thread %ld (%s) stopped: requesting a large block (size %ld)\n", 
+                           (long)t->id, whatNext_strs[t->what_next], blocks));
+       
+       // don't do this if the nursery is (nearly) full, we'll GC first.
+       if (cap->r.rCurrentNursery->link != NULL ||
+           cap->r.rNursery->n_blocks == 1) {  // paranoia to prevent infinite loop
+                                              // if the nursery has only one block.
+           
+           ACQUIRE_SM_LOCK
+           bd = allocGroup( blocks );
+           RELEASE_SM_LOCK
+           cap->r.rNursery->n_blocks += blocks;
+           
+           // link the new group into the list
+           bd->link = cap->r.rCurrentNursery;
+           bd->u.back = cap->r.rCurrentNursery->u.back;
+           if (cap->r.rCurrentNursery->u.back != NULL) {
+               cap->r.rCurrentNursery->u.back->link = bd;
+           } else {
+#if !defined(SMP)
+               ASSERT(g0s0->blocks == cap->r.rCurrentNursery &&
+                      g0s0 == cap->r.rNursery);
+#endif
+               cap->r.rNursery->blocks = bd;
+           }             
+           cap->r.rCurrentNursery->u.back = bd;
+           
+           // initialise it as a nursery block.  We initialise the
+           // step, gen_no, and flags field of *every* sub-block in
+           // this large block, because this is easier than making
+           // sure that we always find the block head of a large
+           // block whenever we call Bdescr() (eg. evacuate() and
+           // isAlive() in the GC would both have to do this, at
+           // least).
+           { 
+               bdescr *x;
+               for (x = bd; x < bd + blocks; x++) {
+                   x->step = cap->r.rNursery;
+                   x->gen_no = 0;
+                   x->flags = 0;
+               }
+           }
+           
+           // This assert can be a killer if the app is doing lots
+           // of large block allocations.
+           IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
+           
+           // now update the nursery to point to the new block
+           cap->r.rCurrentNursery = bd;
+           
+           // we might be unlucky and have another thread get on the
+           // run queue before us and steal the large block, but in that
+           // case the thread will just end up requesting another large
+           // block.
+           PUSH_ON_RUN_QUEUE(t);
+           return rtsFalse;  /* not actually GC'ing */
+       }
+    }
+    
+    IF_DEBUG(scheduler,
+            debugBelch("--<< thread %ld (%s) stopped: HeapOverflow\n", 
+                       (long)t->id, whatNext_strs[t->what_next]));
 #if defined(GRAN)
-      ASSERT(!is_on_queue(t,CurrentProc));
-#elif defined(PAR)
-      /* Currently we emit a DESCHEDULE event before GC in GUM.
-         ToDo: either add separate event to distinguish SYSTEM time from rest
-              or just nuke this DESCHEDULE (and the following SCHEDULE) */
-      if (0 && RtsFlags.ParFlags.ParStats.Full) {
+    ASSERT(!is_on_queue(t,CurrentProc));
+#elif defined(PARALLEL_HASKELL)
+    /* Currently we emit a DESCHEDULE event before GC in GUM.
+       ToDo: either add separate event to distinguish SYSTEM time from rest
+       or just nuke this DESCHEDULE (and the following SCHEDULE) */
+    if (0 && RtsFlags.ParFlags.ParStats.Full) {
        DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
                         GR_DESCHEDULE, t, (StgClosure *)NULL, 0, 0);
        emitSchedule = rtsTrue;
-      }
+    }
 #endif
       
-      ready_to_gc = rtsTrue;
-      context_switch = 1;              /* stop other threads ASAP */
-      PUSH_ON_RUN_QUEUE(t);
-      /* actual GC is done at the end of the while loop */
-      break;
-      
-    case StackOverflow:
-#if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_stackover++;
-#elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_stackover++;
-#endif
-      IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n", 
-                              (long)t->id, whatNext_strs[t->what_next]));
-      /* just adjust the stack for this thread, then pop it back
-       * on the run queue.
-       */
-      threadPaused(t);
-      { 
+    PUSH_ON_RUN_QUEUE(t);
+    return rtsTrue;
+    /* actual GC is done at the end of the while loop in schedule() */
+}
+
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadStackOverflow
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleHandleStackOverflow( StgTSO *t)
+{
+    IF_DEBUG(scheduler,debugBelch("--<< thread %ld (%s) stopped, StackOverflow\n", 
+                                 (long)t->id, whatNext_strs[t->what_next]));
+    /* just adjust the stack for this thread, then pop it back
+     * on the run queue.
+     */
+    { 
        /* enlarge the stack */
        StgTSO *new_t = threadStackOverflow(t);
        
@@ -1077,172 +1583,211 @@ run_thread:
            t->main->tso = new_t;
        }
        PUSH_ON_RUN_QUEUE(new_t);
-      }
-      break;
+    }
+}
 
-    case ThreadYielding:
-      // Reset the context switch flag.  We don't do this just before
-      // running the thread, because that would mean we would lose ticks
-      // during GC, which can lead to unfair scheduling (a thread hogs
-      // the CPU because the tick always arrives during GC).  This way
-      // penalises threads that do a lot of allocation, but that seems
-      // better than the alternative.
-      context_switch = 0;
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadYielding
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
 
+static rtsBool
+scheduleHandleYield( StgTSO *t, nat prev_what_next )
+{
+    // Reset the context switch flag.  We don't do this just before
+    // running the thread, because that would mean we would lose ticks
+    // during GC, which can lead to unfair scheduling (a thread hogs
+    // the CPU because the tick always arrives during GC).  This way
+    // penalises threads that do a lot of allocation, but that seems
+    // better than the alternative.
+    context_switch = 0;
+    
+    /* put the thread back on the run queue.  Then, if we're ready to
+     * GC, check whether this is the last task to stop.  If so, wake
+     * up the GC thread.  getThread will block during a GC until the
+     * GC is finished.
+     */
+    IF_DEBUG(scheduler,
+            if (t->what_next != prev_what_next) {
+                debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n", 
+                           (long)t->id, whatNext_strs[t->what_next]);
+            } else {
+                debugBelch("--<< thread %ld (%s) stopped, yielding\n",
+                           (long)t->id, whatNext_strs[t->what_next]);
+            }
+       );
+    
+    IF_DEBUG(sanity,
+            //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
+            checkTSO(t));
+    ASSERT(t->link == END_TSO_QUEUE);
+    
+    // Shortcut if we're just switching evaluators: don't bother
+    // doing stack squeezing (which can be expensive), just run the
+    // thread.
+    if (t->what_next != prev_what_next) {
+       return rtsTrue;
+    }
+    
 #if defined(GRAN)
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t));
-      globalGranStats.tot_yields++;
-#elif defined(PAR)
-      // IF_DEBUG(par, 
-      // DumpGranEvent(GR_DESCHEDULE, t);
-      globalParStats.tot_yields++;
+    ASSERT(!is_on_queue(t,CurrentProc));
+      
+    IF_DEBUG(sanity,
+            //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
+            checkThreadQsSanity(rtsTrue));
+
 #endif
-      /* put the thread back on the run queue.  Then, if we're ready to
-       * GC, check whether this is the last task to stop.  If so, wake
-       * up the GC thread.  getThread will block during a GC until the
-       * GC is finished.
-       */
-      IF_DEBUG(scheduler,
-               if (t->what_next != prev_what_next) {
-                  debugBelch("--<< thread %ld (%s) stopped to switch evaluators\n", 
-                        (long)t->id, whatNext_strs[t->what_next]);
-               } else {
-                   debugBelch("--<< thread %ld (%s) stopped, yielding\n",
-                        (long)t->id, whatNext_strs[t->what_next]);
-               }
-               );
-
-      IF_DEBUG(sanity,
-              //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
-              checkTSO(t));
-      ASSERT(t->link == END_TSO_QUEUE);
-
-      // Shortcut if we're just switching evaluators: don't bother
-      // doing stack squeezing (which can be expensive), just run the
-      // thread.
-      if (t->what_next != prev_what_next) {
-         goto run_thread;
-      }
 
-      threadPaused(t);
+    addToRunQueue(t);
 
 #if defined(GRAN)
-      ASSERT(!is_on_queue(t,CurrentProc));
-
-      IF_DEBUG(sanity,
-              //debugBelch("&& Doing sanity check on all ThreadQueues (and their TSOs).");
-              checkThreadQsSanity(rtsTrue));
-#endif
-
-#if defined(PAR)
-      if (RtsFlags.ParFlags.doFairScheduling) { 
-       /* this does round-robin scheduling; good for concurrency */
-       APPEND_TO_RUN_QUEUE(t);
-      } else {
-       /* this does unfair scheduling; good for parallelism */
-       PUSH_ON_RUN_QUEUE(t);
-      }
-#else
-      // this does round-robin scheduling; good for concurrency
-      APPEND_TO_RUN_QUEUE(t);
+    /* add a ContinueThread event to actually process the thread */
+    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+             ContinueThread,
+             t, (StgClosure*)NULL, (rtsSpark*)NULL);
+    IF_GRAN_DEBUG(bq, 
+                 debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
+                 G_EVENTQ(0);
+                 G_CURR_THREADQ(0));
 #endif
+    return rtsFalse;
+}
 
-#if defined(GRAN)
-      /* add a ContinueThread event to actually process the thread */
-      new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-               ContinueThread,
-               t, (StgClosure*)NULL, (rtsSpark*)NULL);
-      IF_GRAN_DEBUG(bq, 
-              debugBelch("GRAN: eventq and runnableq after adding yielded thread to queue again:\n");
-              G_EVENTQ(0);
-              G_CURR_THREADQ(0));
-#endif /* GRAN */
-      break;
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadBlocked
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
 
-    case ThreadBlocked:
+static void
+scheduleHandleThreadBlocked( StgTSO *t
+#if !defined(GRAN) && !defined(DEBUG)
+    STG_UNUSED
+#endif
+    )
+{
 #if defined(GRAN)
-      IF_DEBUG(scheduler,
-              debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n", 
-                              t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
-              if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
-
-      // ??? needed; should emit block before
-      IF_DEBUG(gran, 
-              DumpGranEvent(GR_DESCHEDULE, t)); 
-      prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
-      /*
-       ngoq Dogh!
+    IF_DEBUG(scheduler,
+            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p [PE %d] with BQ: \n", 
+                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure, (t->block_info.closure==(StgClosure*)NULL ? 99 : where_is(t->block_info.closure)));
+            if (t->block_info.closure!=(StgClosure*)NULL) print_bq(t->block_info.closure));
+    
+    // ??? needed; should emit block before
+    IF_DEBUG(gran, 
+            DumpGranEvent(GR_DESCHEDULE, t)); 
+    prune_eventq(t, (StgClosure *)NULL); // prune ContinueThreads for t
+    /*
+      ngoq Dogh!
       ASSERT(procStatus[CurrentProc]==Busy || 
-             ((procStatus[CurrentProc]==Fetching) && 
-             (t->block_info.closure!=(StgClosure*)NULL)));
+      ((procStatus[CurrentProc]==Fetching) && 
+      (t->block_info.closure!=(StgClosure*)NULL)));
       if (run_queue_hds[CurrentProc] == END_TSO_QUEUE &&
-         !(!RtsFlags.GranFlags.DoAsyncFetch &&
-           procStatus[CurrentProc]==Fetching)) 
-       procStatus[CurrentProc] = Idle;
-      */
+      !(!RtsFlags.GranFlags.DoAsyncFetch &&
+      procStatus[CurrentProc]==Fetching)) 
+      procStatus[CurrentProc] = Idle;
+    */
 #elif defined(PAR)
-      IF_DEBUG(scheduler,
-              debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n", 
-                    t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
-      IF_PAR_DEBUG(bq,
+    IF_DEBUG(scheduler,
+            debugBelch("--<< thread %ld (%p; %s) stopped, blocking on node %p with BQ: \n", 
+                       t->id, t, whatNext_strs[t->what_next], t->block_info.closure));
+    IF_PAR_DEBUG(bq,
+                
+                if (t->block_info.closure!=(StgClosure*)NULL) 
+                print_bq(t->block_info.closure));
+    
+    /* Send a fetch (if BlockedOnGA) and dump event to log file */
+    blockThread(t);
+    
+    /* whatever we schedule next, we must log that schedule */
+    emitSchedule = rtsTrue;
+    
+#else /* !GRAN */
 
-                  if (t->block_info.closure!=(StgClosure*)NULL) 
-                    print_bq(t->block_info.closure));
+      // We don't need to do anything.  The thread is blocked, and it
+      // has tidied up its stack and placed itself on whatever queue
+      // it needs to be on.
 
-      /* Send a fetch (if BlockedOnGA) and dump event to log file */
-      blockThread(t);
+#if !defined(SMP)
+    ASSERT(t->why_blocked != NotBlocked);
+            // This might not be true under SMP: we don't have
+            // exclusive access to this TSO, so someone might have
+            // woken it up by now.  This actually happens: try
+            // conc023 +RTS -N2.
+#endif
 
-      /* whatever we schedule next, we must log that schedule */
-      emitSchedule = rtsTrue;
+    IF_DEBUG(scheduler,
+            debugBelch("--<< thread %d (%s) stopped: ", 
+                       t->id, whatNext_strs[t->what_next]);
+            printThreadBlockage(t);
+            debugBelch("\n"));
+    
+    /* Only for dumping event to log file 
+       ToDo: do I need this in GranSim, too?
+       blockThread(t);
+    */
+#endif
+}
 
-#else /* !GRAN */
-      /* don't need to do anything.  Either the thread is blocked on
-       * I/O, in which case we'll have called addToBlockedQueue
-       * previously, or it's blocked on an MVar or Blackhole, in which
-       * case it'll be on the relevant queue already.
-       */
-      IF_DEBUG(scheduler,
-              debugBelch("--<< thread %d (%s) stopped: ", 
-                      t->id, whatNext_strs[t->what_next]);
-              printThreadBlockage(t);
-              debugBelch("\n"));
+/* -----------------------------------------------------------------------------
+ * Handle a thread that returned to the scheduler with ThreadFinished
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
 
-      /* Only for dumping event to log file 
-        ToDo: do I need this in GranSim, too?
-      blockThread(t);
-      */
-#endif
-      threadPaused(t);
-      break;
+static rtsBool
+scheduleHandleThreadFinished( StgMainThread *mainThread
+                             USED_WHEN_RTS_SUPPORTS_THREADS,
+                             Capability *cap,
+                             StgTSO *t )
+{
+    /* Need to check whether this was a main thread, and if so,
+     * return with the return value.
+     *
+     * We also end up here if the thread kills itself with an
+     * uncaught exception, see Exception.cmm.
+     */
+    IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n", 
+                                 t->id, whatNext_strs[t->what_next]));
 
-    case ThreadFinished:
-      /* Need to check whether this was a main thread, and if so, signal
-       * the task that started it with the return value.  If we have no
-       * more main threads, we probably need to stop all the tasks until
-       * we get a new one.
-       */
-      /* We also end up here if the thread kills itself with an
-       * uncaught exception, see Exception.hc.
-       */
-      IF_DEBUG(scheduler,debugBelch("--++ thread %d (%s) finished\n", 
-                              t->id, whatNext_strs[t->what_next]));
 #if defined(GRAN)
       endThread(t, CurrentProc); // clean-up the thread
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
       /* For now all are advisory -- HWL */
       //if(t->priority==AdvisoryPriority) ??
-      advisory_thread_count--;
+      advisory_thread_count--; // JB: Caution with this counter, buggy!
       
-# ifdef DIST
+# if defined(DIST)
       if(t->dist.priority==RevalPriority)
        FinishReval(t);
 # endif
-      
+    
+# if defined(EDENOLD)
+      // the thread could still have an outport... (BUG)
+      if (t->eden.outport != -1) {
+      // delete the outport for the tso which has finished...
+       IF_PAR_DEBUG(eden_ports,
+                  debugBelch("WARNING: Scheduler removes outport %d for TSO %d.\n",
+                             t->eden.outport, t->id));
+       deleteOPT(t);
+      }
+      // thread still in the process (HEAVY BUG! since outport has just been closed...)
+      if (t->eden.epid != -1) {
+       IF_PAR_DEBUG(eden_ports,
+                  debugBelch("WARNING: Scheduler removes TSO %d from process %d .\n",
+                          t->id, t->eden.epid));
+       removeTSOfromProcess(t);
+      }
+# endif 
+
+# if defined(PAR)
       if (RtsFlags.ParFlags.ParStats.Full &&
          !RtsFlags.ParFlags.ParStats.Suppressed) 
        DumpEndEvent(CURRENT_PROC, t, rtsFalse /* not mandatory */);
-#endif
+
+      //  t->par only contains statistics: left out for now...
+      IF_PAR_DEBUG(fish,
+                  debugBelch("**** end thread: ended sparked thread %d (%lx); sparkname: %lx\n",
+                             t->id,t,t->par.sparkname));
+# endif
+#endif // PARALLEL_HASKELL
 
       //
       // Check whether the thread that just completed was a main
@@ -1275,7 +1820,7 @@ run_thread:
              if (mainThread->ret) {
                  *(mainThread->ret) = NULL;
              }
-             if (was_interrupted) {
+             if (interrupted) {
                  mainThread->stat = Interrupted;
              } else {
                  mainThread->stat = Killed;
@@ -1285,15 +1830,16 @@ run_thread:
          removeThreadLabel((StgWord)mainThread->tso->id);
 #endif
          if (mainThread->prev == NULL) {
+             ASSERT(mainThread == main_threads);
              main_threads = mainThread->link;
          } else {
              mainThread->prev->link = mainThread->link;
          }
          if (mainThread->link != NULL) {
-             mainThread->link->prev = NULL;
+             mainThread->link->prev = mainThread->prev;
          }
          releaseCapability(cap);
-         return;
+         return rtsTrue; // tells schedule() to return
       }
 
 #ifdef RTS_SUPPORTS_THREADS
@@ -1309,13 +1855,17 @@ run_thread:
          APPEND_TO_RUN_QUEUE(t);
       }
 #endif
-      break;
+      return rtsFalse;
+}
 
-    default:
-      barf("schedule: invalid thread return code %d", (int)ret);
-    }
+/* -----------------------------------------------------------------------------
+ * Perform a heap census, if PROFILING
+ * -------------------------------------------------------------------------- */
 
-#ifdef PROFILING
+static rtsBool
+scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
+{
+#if defined(PROFILING)
     // When we have +RTS -i0 and we're heap profiling, do a census at
     // every GC.  This lets us get repeatable runs for debugging.
     if (performHeapProfile ||
@@ -1324,48 +1874,125 @@ run_thread:
        GarbageCollect(GetRoots, rtsTrue);
        heapCensus();
        performHeapProfile = rtsFalse;
-       ready_to_gc = rtsFalse; // we already GC'd
+       return rtsTrue;  // true <=> we already GC'd
     }
 #endif
+    return rtsFalse;
+}
 
-    if (ready_to_gc) {
-      /* everybody back, start the GC.
-       * Could do it in this thread, or signal a condition var
-       * to do it in another thread.  Either way, we need to
-       * broadcast on gc_pending_cond afterward.
-       */
-#if defined(RTS_SUPPORTS_THREADS)
-      IF_DEBUG(scheduler,sched_belch("doing GC"));
+/* -----------------------------------------------------------------------------
+ * Perform a garbage collection if necessary
+ * ASSUMES: sched_mutex
+ * -------------------------------------------------------------------------- */
+
+static void
+scheduleDoGC( rtsBool force_major )
+{
+    StgTSO *t;
+#ifdef SMP
+    Capability *cap;
+    static rtsBool waiting_for_gc;
+    int n_capabilities = RtsFlags.ParFlags.nNodes - 1; 
+           // subtract one because we're already holding one.
+    Capability *caps[n_capabilities];
 #endif
-      GarbageCollect(GetRoots,rtsFalse);
-      ready_to_gc = rtsFalse;
-#if defined(GRAN)
-      /* add a ContinueThread event to continue execution of current thread */
-      new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
-               ContinueThread,
-               t, (StgClosure*)NULL, (rtsSpark*)NULL);
-      IF_GRAN_DEBUG(bq, 
-              debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
-              G_EVENTQ(0);
-              G_CURR_THREADQ(0));
-#endif /* GRAN */
+
+#ifdef SMP
+    // In order to GC, there must be no threads running Haskell code.
+    // Therefore, the GC thread needs to hold *all* the capabilities,
+    // and release them after the GC has completed.  
+    //
+    // This seems to be the simplest way: previous attempts involved
+    // making all the threads with capabilities give up their
+    // capabilities and sleep except for the *last* one, which
+    // actually did the GC.  But it's quite hard to arrange for all
+    // the other tasks to sleep and stay asleep.
+    //
+    // This does mean that there will be multiple entries in the 
+    // thread->capability hash table for the current thread, but
+    // they will be removed as normal when the capabilities are
+    // released again.
+    //
+       
+    // Someone else is already trying to GC
+    if (waiting_for_gc) return;
+    waiting_for_gc = rtsTrue;
+
+    while (n_capabilities > 0) {
+       IF_DEBUG(scheduler, sched_belch("ready_to_gc, grabbing all the capabilies (%d left)", n_capabilities));
+       waitForReturnCapability(&sched_mutex, &cap);
+       n_capabilities--;
+       caps[n_capabilities] = cap;
     }
 
-#if defined(GRAN)
-  next_thread:
-    IF_GRAN_DEBUG(unused,
-                 print_eventq(EventHd));
+    waiting_for_gc = rtsFalse;
+#endif
 
-    event = get_next_event();
-#elif defined(PAR)
-  next_thread:
-    /* ToDo: wait for next message to arrive rather than busy wait */
-#endif /* GRAN */
+    /* Kick any transactions which are invalid back to their
+     * atomically frames.  When next scheduled they will try to
+     * commit, this commit will fail and they will retry.
+     */
+    { 
+       StgTSO *next;
+
+       for (t = all_threads; t != END_TSO_QUEUE; t = next) {
+           if (t->what_next == ThreadRelocated) {
+               next = t->link;
+           } else {
+               next = t->global_link;
+               if (t -> trec != NO_TREC && t -> why_blocked == NotBlocked) {
+                   if (!stmValidateNestOfTransactions (t -> trec)) {
+                       IF_DEBUG(stm, sched_belch("trec %p found wasting its time", t));
+                       
+                       // strip the stack back to the ATOMICALLY_FRAME, aborting
+                       // the (nested) transaction, and saving the stack of any
+                       // partially-evaluated thunks on the heap.
+                       raiseAsync_(t, NULL, rtsTrue);
+                       
+#ifdef REG_R1
+                       ASSERT(get_itbl((StgClosure *)t->sp)->type == ATOMICALLY_FRAME);
+#endif
+                   }
+               }
+           }
+       }
+    }
+    
+    // so this happens periodically:
+    scheduleCheckBlackHoles();
+    
+    IF_DEBUG(scheduler, printAllThreads());
 
-  } /* end of while(1) */
+    /* everybody back, start the GC.
+     * Could do it in this thread, or signal a condition var
+     * to do it in another thread.  Either way, we need to
+     * broadcast on gc_pending_cond afterward.
+     */
+#if defined(RTS_SUPPORTS_THREADS)
+    IF_DEBUG(scheduler,sched_belch("doing GC"));
+#endif
+    GarbageCollect(GetRoots, force_major);
+    
+#if defined(SMP)
+    {
+       // release our stash of capabilities.
+       nat i;
+       for (i = 0; i < RtsFlags.ParFlags.nNodes-1; i++) {
+           releaseCapability(caps[i]);
+       }
+    }
+#endif
 
-  IF_PAR_DEBUG(verbose,
-              debugBelch("== Leaving schedule() after having received Finish\n"));
+#if defined(GRAN)
+    /* add a ContinueThread event to continue execution of current thread */
+    new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc],
+             ContinueThread,
+             t, (StgClosure*)NULL, (rtsSpark*)NULL);
+    IF_GRAN_DEBUG(bq, 
+                 debugBelch("GRAN: eventq and runnableq after Garbage collection:\n\n");
+                 G_EVENTQ(0);
+                 G_CURR_THREADQ(0));
+#endif /* GRAN */
 }
 
 /* ---------------------------------------------------------------------------
@@ -1376,7 +2003,7 @@ run_thread:
 StgBool
 rtsSupportsBoundThreads(void)
 {
-#ifdef THREADED_RTS
+#if defined(RTS_SUPPORTS_THREADS)
   return rtsTrue;
 #else
   return rtsFalse;
@@ -1390,7 +2017,7 @@ rtsSupportsBoundThreads(void)
 StgBool
 isThreadBound(StgTSO* tso USED_IN_THREADED_RTS)
 {
-#ifdef THREADED_RTS
+#if defined(RTS_SUPPORTS_THREADS)
   return (tso->main != NULL);
 #endif
   return rtsFalse;
@@ -1400,7 +2027,7 @@ isThreadBound(StgTSO* tso USED_IN_THREADED_RTS)
  * Singleton fork(). Do not copy any running threads.
  * ------------------------------------------------------------------------- */
 
-#ifndef mingw32_TARGET_OS
+#ifndef mingw32_HOST_OS
 #define FORKPROCESS_PRIMOP_SUPPORTED
 #endif
 
@@ -1455,12 +2082,6 @@ forkProcess(HsStablePtr *entry
       stgFree(m);
     }
     
-# ifdef RTS_SUPPORTS_THREADS
-    resetTaskManagerAfterFork();      // tell startTask() and friends that
-    startingWorkerThread = rtsFalse;  // we have no worker threads any more
-    resetWorkerWakeupPipeAfterFork();
-# endif
-    
     rc = rts_evalStableIO(entry, NULL);  // run the action
     rts_checkSchedStatus("forkProcess",rc);
     
@@ -1490,8 +2111,12 @@ deleteAllThreads ( void )
   StgTSO* t, *next;
   IF_DEBUG(scheduler,sched_belch("deleting all threads"));
   for (t = all_threads; t != END_TSO_QUEUE; t = next) {
-      next = t->global_link;
-      deleteThread(t);
+      if (t->what_next == ThreadRelocated) {
+         next = t->link;
+      } else {
+         next = t->global_link;
+         deleteThread(t);
+      }
   }      
 
   // The run queue now contains a bunch of ThreadKilled threads.  We
@@ -1501,6 +2126,7 @@ deleteAllThreads ( void )
   // being GC'd, and we don't want the "main thread has been GC'd" panic.
 
   ASSERT(blocked_queue_hd == END_TSO_QUEUE);
+  ASSERT(blackhole_queue == END_TSO_QUEUE);
   ASSERT(sleeping_queue == END_TSO_QUEUE);
 }
 
@@ -1557,6 +2183,7 @@ suspendThread( StgRegTable *reg )
   tok = cap->r.rCurrentTSO->id;
 
   /* Hand back capability */
+  cap->r.rInHaskell = rtsFalse;
   releaseCapability(cap);
   
 #if defined(RTS_SUPPORTS_THREADS)
@@ -1566,8 +2193,6 @@ suspendThread( StgRegTable *reg )
   IF_DEBUG(scheduler, sched_belch("worker (token %d): leaving RTS", tok));
 #endif
 
-  /* Other threads _might_ be available for execution; signal this */
-  THREAD_RUNNABLE();
   RELEASE_LOCK(&sched_mutex);
   
   errno = saved_errno;
@@ -1615,17 +2240,12 @@ resumeThread( StgInt tok )
   tso->why_blocked  = NotBlocked;
 
   cap->r.rCurrentTSO = tso;
+  cap->r.rInHaskell = rtsTrue;
   RELEASE_LOCK(&sched_mutex);
   errno = saved_errno;
   return &cap->r;
 }
 
-
-/* ---------------------------------------------------------------------------
- * Static functions
- * ------------------------------------------------------------------------ */
-static void unblockThread(StgTSO *tso);
-
 /* ---------------------------------------------------------------------------
  * Comparing Thread ids.
  *
@@ -1693,12 +2313,11 @@ StgTSO *
 createThread(nat size)
 #endif
 {
-
     StgTSO *tso;
     nat stack_size;
 
     /* First check whether we should create a thread at all */
-#if defined(PAR)
+#if defined(PARALLEL_HASKELL)
   /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
   if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) {
     threadsIgnored++;
@@ -1745,6 +2364,8 @@ createThread(nat size)
                               - TSO_STRUCT_SIZEW;
   tso->sp           = (P_)&(tso->stack) + stack_size;
 
+  tso->trec = NO_TREC;
+
 #ifdef PROFILING
   tso->prof.CCCS = CCS_MAIN;
 #endif
@@ -1767,7 +2388,7 @@ createThread(nat size)
 #if defined(GRAN) 
   if (RtsFlags.GranFlags.GranSimStats.Full) 
     DumpGranEvent(GR_START,tso);
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
   if (RtsFlags.ParFlags.ParStats.Full) 
     DumpGranEvent(GR_STARTQ,tso);
   /* HACk to avoid SCHEDULE 
@@ -1807,7 +2428,7 @@ createThread(nat size)
     tso->gran.clock  = 0;
 
   IF_DEBUG(gran,printTSO(tso));
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
 # if defined(DEBUG)
   tso->par.magic = TSO_MAGIC; // debugging only
 # endif
@@ -1831,7 +2452,7 @@ createThread(nat size)
   globalGranStats.threads_created_on_PE[CurrentProc]++;
   globalGranStats.tot_sq_len += spark_queue_len(CurrentProc);
   globalGranStats.tot_sq_probes++;
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
   // collect parallel global statistics (currently done together with GC stats)
   if (RtsFlags.ParFlags.ParStats.Global &&
       RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
@@ -1844,13 +2465,13 @@ createThread(nat size)
   IF_GRAN_DEBUG(pri,
                sched_belch("==__ schedule: Created TSO %d (%p);",
                      CurrentProc, tso, tso->id));
-#elif defined(PAR)
-    IF_PAR_DEBUG(verbose,
-                sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
-                      (long)tso->id, tso, advisory_thread_count));
+#elif defined(PARALLEL_HASKELL)
+  IF_PAR_DEBUG(verbose,
+              sched_belch("==__ schedule: Created TSO %d (%p); %d threads active",
+                          (long)tso->id, tso, advisory_thread_count));
 #else
   IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words", 
-                               (long)tso->id, (long)tso->stack_size));
+                                (long)tso->id, (long)tso->stack_size));
 #endif    
   return tso;
 }
@@ -1860,9 +2481,10 @@ createThread(nat size)
    all parallel thread creation calls should fall through the following routine.
 */
 StgTSO *
-createSparkThread(rtsSpark spark) 
+createThreadFromSpark(rtsSpark spark) 
 { StgTSO *tso;
   ASSERT(spark != (rtsSpark)NULL);
+// JB: TAKE CARE OF THIS COUNTER! BUGGY
   if (advisory_thread_count >= RtsFlags.ParFlags.maxThreads) 
   { threadsIgnored++;
     barf("{createSparkThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)",
@@ -1878,8 +2500,8 @@ createSparkThread(rtsSpark spark)
     tso->priority = AdvisoryPriority;
 #endif
     pushClosure(tso,spark);
-    PUSH_ON_RUN_QUEUE(tso);
-    advisory_thread_count++;    
+    addToRunQueue(tso);
+    advisory_thread_count++;  // JB: TAKE CARE OF THIS COUNTER! BUGGY
   }
   return tso;
 }
@@ -1889,7 +2511,7 @@ createSparkThread(rtsSpark spark)
   Turn a spark into a thread.
   ToDo: fix for SMP (needs to acquire SCHED_MUTEX!)
 */
-#if defined(PAR)
+#if 0
 StgTSO *
 activateSpark (rtsSpark spark) 
 {
@@ -1898,9 +2520,9 @@ activateSpark (rtsSpark spark)
   tso = createSparkThread(spark);
   if (RtsFlags.ParFlags.ParStats.Full) {   
     //ASSERT(run_queue_hd == END_TSO_QUEUE); // I think ...
-    IF_PAR_DEBUG(verbose,
-                debugBelch("==^^ activateSpark: turning spark of closure %p (%s) into a thread\n",
-                      (StgClosure *)spark, info_type((StgClosure *)spark)));
+      IF_PAR_DEBUG(verbose,
+                  debugBelch("==^^ activateSpark: turning spark of closure %p (%s) into a thread\n",
+                             (StgClosure *)spark, info_type((StgClosure *)spark)));
   }
   // ToDo: fwd info on local/global spark to thread -- HWL
   // tso->gran.exported =  spark->exported;
@@ -1911,11 +2533,6 @@ activateSpark (rtsSpark spark)
 }
 #endif
 
-static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
-                                  Capability *initialCapability
-                                  );
-
-
 /* ---------------------------------------------------------------------------
  * scheduleThread()
  *
@@ -1926,23 +2543,20 @@ static SchedulerStatus waitThread_(/*out*/StgMainThread* m,
  * on this thread's stack before the scheduler is invoked.
  * ------------------------------------------------------------------------ */
 
-static void scheduleThread_ (StgTSO* tso);
-
 void
-scheduleThread_(StgTSO *tso)
+scheduleThreadLocked(StgTSO *tso)
 {
-  // Precondition: sched_mutex must be held.
   // The thread goes at the *end* of the run-queue, to avoid possible
   // starvation of any threads already on the queue.
   APPEND_TO_RUN_QUEUE(tso);
-  THREAD_RUNNABLE();
+  threadRunnable();
 }
 
 void
 scheduleThread(StgTSO* tso)
 {
   ACQUIRE_LOCK(&sched_mutex);
-  scheduleThread_(tso);
+  scheduleThreadLocked(tso);
   RELEASE_LOCK(&sched_mutex);
 }
 
@@ -1995,7 +2609,7 @@ scheduleWaitThread(StgTSO* tso, /*[out]*/HaskellObj* ret,
     IF_DEBUG(scheduler, sched_belch("waiting for thread (%d)", tso->id));
     
     APPEND_TO_RUN_QUEUE(tso);
-    // NB. Don't call THREAD_RUNNABLE() here, because the thread is
+    // NB. Don't call threadRunnable() here, because the thread is
     // bound and only runnable by *this* OS thread, so waking up other
     // workers will just slow things down.
 
@@ -2023,6 +2637,7 @@ initScheduler(void)
     blocked_queue_hds[i]  = END_TSO_QUEUE;
     blocked_queue_tls[i]  = END_TSO_QUEUE;
     ccalling_threadss[i]  = END_TSO_QUEUE;
+    blackhole_queue[i]    = END_TSO_QUEUE;
     sleeping_queue        = END_TSO_QUEUE;
   }
 #else
@@ -2030,6 +2645,7 @@ initScheduler(void)
   run_queue_tl      = END_TSO_QUEUE;
   blocked_queue_hd  = END_TSO_QUEUE;
   blocked_queue_tl  = END_TSO_QUEUE;
+  blackhole_queue   = END_TSO_QUEUE;
   sleeping_queue    = END_TSO_QUEUE;
 #endif 
 
@@ -2060,11 +2676,16 @@ initScheduler(void)
   initCapabilities();
   
 #if defined(RTS_SUPPORTS_THREADS)
-    /* start our haskell execution tasks */
-    startTaskManager(0,taskStart);
+  initTaskManager();
 #endif
 
-#if /* defined(SMP) ||*/ defined(PAR)
+#if defined(SMP)
+  /* eagerly start some extra workers */
+  startingWorkerThread = RtsFlags.ParFlags.nNodes;
+  startTasks(RtsFlags.ParFlags.nNodes, taskStart);
+#endif
+
+#if /* defined(SMP) ||*/ defined(PARALLEL_HASKELL)
   initSparkPools();
 #endif
 
@@ -2074,10 +2695,65 @@ initScheduler(void)
 void
 exitScheduler( void )
 {
+    interrupted = rtsTrue;
+    shutting_down_scheduler = rtsTrue;
+
 #if defined(RTS_SUPPORTS_THREADS)
-  stopTaskManager();
+    if (threadIsTask(osThreadId())) { taskStop(); }
+    stopTaskManager();
+    //
+    // What can we do here?  There are a bunch of worker threads, it
+    // might be nice to let them exit cleanly.  There may be some main
+    // threads in the run queue; we should let them return to their
+    // callers with an Interrupted state.  We can't in general wait
+    // for all the running Tasks to stop, because some might be off in
+    // a C call that is blocked.
+    // 
+    // Letting the run queue drain is the safest thing.  That lets any
+    // main threads return that can return, and cleans up all the
+    // runnable threads.  Then we grab all the Capabilities to stop
+    // anything unexpected happening while we shut down.
+    //
+    // ToDo: this doesn't let us get the time stats from the worker
+    // tasks, because they haven't called taskStop().
+    //
+    ACQUIRE_LOCK(&sched_mutex);
+    { 
+       nat i;
+       for (i = 1000; i > 0; i--) {
+           if (EMPTY_RUN_QUEUE()) {
+               IF_DEBUG(scheduler, sched_belch("run queue is empty"));
+               break;
+           }
+           IF_DEBUG(scheduler, sched_belch("yielding"));
+           RELEASE_LOCK(&sched_mutex);
+           prodWorker();
+           yieldThread();
+           ACQUIRE_LOCK(&sched_mutex);
+       }
+    }
+
+#ifdef SMP
+    {
+       Capability *cap;
+       int n_capabilities = RtsFlags.ParFlags.nNodes; 
+       Capability *caps[n_capabilities];
+       nat i;
+
+       while (n_capabilities > 0) {
+           IF_DEBUG(scheduler, sched_belch("exitScheduler: grabbing all the capabilies (%d left)", n_capabilities));
+           waitForReturnCapability(&sched_mutex, &cap);
+           n_capabilities--;
+           caps[n_capabilities] = cap;
+       }
+    }
+#else
+    {
+       Capability *cap;
+       waitForReturnCapability(&sched_mutex, &cap);
+    }
+#endif
 #endif
-  shutting_down_scheduler = rtsTrue;
 }
 
 /* ----------------------------------------------------------------------------
@@ -2089,8 +2765,7 @@ exitScheduler( void )
    ToDo: no support for two-space collection at the moment???
    ------------------------------------------------------------------------- */
 
-static
-SchedulerStatus
+static SchedulerStatus
 waitThread_(StgMainThread* m, Capability *initialCapability)
 {
   SchedulerStatus stat;
@@ -2185,11 +2860,15 @@ GetRoots( evac_fn evac )
   }
 #endif 
 
+  if (blackhole_queue != END_TSO_QUEUE) {
+      evac((StgClosure **)&blackhole_queue);
+  }
+
   if (suspended_ccalling_threads != END_TSO_QUEUE) {
       evac((StgClosure **)&suspended_ccalling_threads);
   }
 
-#if defined(PAR) || defined(GRAN)
+#if defined(PARALLEL_HASKELL) || defined(GRAN)
   markSparkQueue(evac);
 #endif
 
@@ -2259,7 +2938,8 @@ performGCWithRoots(void (*get_roots)(evac_fn))
 static StgTSO *
 threadStackOverflow(StgTSO *tso)
 {
-  nat new_stack_size, new_tso_size, stack_words;
+  nat new_stack_size, stack_words;
+  lnat new_tso_size;
   StgPtr new_sp;
   StgTSO *dest;
 
@@ -2283,7 +2963,7 @@ threadStackOverflow(StgTSO *tso)
    * Finally round up so the TSO ends up as a whole number of blocks.
    */
   new_stack_size = stg_min(tso->stack_size * 2, tso->max_stack_size);
-  new_tso_size   = (nat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
+  new_tso_size   = (lnat)BLOCK_ROUND_UP(new_stack_size * sizeof(W_) + 
                                       TSO_STRUCT_SIZE)/sizeof(W_);
   new_tso_size = round_to_mblocks(new_tso_size);  /* Be MBLOCK-friendly */
   new_stack_size = new_tso_size - TSO_STRUCT_SIZEW;
@@ -2314,7 +2994,6 @@ threadStackOverflow(StgTSO *tso)
   tso->link = dest;
   tso->sp = (P_)&(tso->stack[tso->stack_size]);
   tso->why_blocked = NotBlocked;
-  dest->mut_link = NULL;
 
   IF_PAR_DEBUG(verbose,
               debugBelch("@@ threadStackOverflow of TSO %d (now at %p): stack size increased to %ld\n",
@@ -2340,7 +3019,7 @@ STATIC_INLINE void
 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
 {
 }
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
 STATIC_INLINE void
 unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
 {
@@ -2374,7 +3053,7 @@ unblockCount ( StgBlockingQueueElement *bqe, StgClosure *node )
 #endif
 
 #if defined(GRAN)
-static StgBlockingQueueElement *
+StgBlockingQueueElement *
 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
 {
     StgTSO *tso;
@@ -2413,8 +3092,8 @@ unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
     IF_DEBUG(scheduler,debugBelch("-- Waking up thread %ld (%p)\n", 
                             tso->id, tso));
 }
-#elif defined(PAR)
-static StgBlockingQueueElement *
+#elif defined(PARALLEL_HASKELL)
+StgBlockingQueueElement *
 unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
 {
     StgBlockingQueueElement *next;
@@ -2426,7 +3105,7 @@ unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
       next = bqe->link;
       ((StgTSO *)bqe)->link = END_TSO_QUEUE; // debugging?
       APPEND_TO_RUN_QUEUE((StgTSO *)bqe); 
-      THREAD_RUNNABLE();
+      threadRunnable();
       unblockCount(bqe, node);
       /* reset blocking status after dumping event */
       ((StgTSO *)bqe)->why_blocked = NotBlocked;
@@ -2459,8 +3138,8 @@ unblockOneLocked(StgBlockingQueueElement *bqe, StgClosure *node)
   return next;
 }
 
-#else /* !GRAN && !PAR */
-static StgTSO *
+#else /* !GRAN && !PARALLEL_HASKELL */
+StgTSO *
 unblockOneLocked(StgTSO *tso)
 {
   StgTSO *next;
@@ -2471,13 +3150,13 @@ unblockOneLocked(StgTSO *tso)
   next = tso->link;
   tso->link = END_TSO_QUEUE;
   APPEND_TO_RUN_QUEUE(tso);
-  THREAD_RUNNABLE();
+  threadRunnable();
   IF_DEBUG(scheduler,sched_belch("waking up thread %ld", (long)tso->id));
   return next;
 }
 #endif
 
-#if defined(GRAN) || defined(PAR)
+#if defined(GRAN) || defined(PARALLEL_HASKELL)
 INLINE_ME StgBlockingQueueElement *
 unblockOne(StgBlockingQueueElement *bqe, StgClosure *node)
 {
@@ -2579,7 +3258,7 @@ awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
                debugBelch("## BQ Stats of %p: [%d entries] %s\n",
                        node, len, (bqe!=END_BQ_QUEUE) ? "RBH" : ""));
 }
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
 void 
 awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
 {
@@ -2611,11 +3290,13 @@ awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node)
   RELEASE_LOCK(&sched_mutex);
 }
 
-#else   /* !GRAN && !PAR */
+#else   /* !GRAN && !PARALLEL_HASKELL */
 
 void
 awakenBlockedQueueNoLock(StgTSO *tso)
 {
+  if (tso == NULL) return; // hack; see bug #1235728, and comments in
+                          // Exception.cmm
   while (tso != END_TSO_QUEUE) {
     tso = unblockOneLocked(tso);
   }
@@ -2624,6 +3305,8 @@ awakenBlockedQueueNoLock(StgTSO *tso)
 void
 awakenBlockedQueue(StgTSO *tso)
 {
+  if (tso == NULL) return; // hack; see bug #1235728, and comments in
+                          // Exception.cmm
   ACQUIRE_LOCK(&sched_mutex);
   while (tso != END_TSO_QUEUE) {
     tso = unblockOneLocked(tso);
@@ -2642,9 +3325,11 @@ interruptStgRts(void)
 {
     interrupted    = 1;
     context_switch = 1;
-#ifdef RTS_SUPPORTS_THREADS
-    wakeBlockedWorkerThread();
-#endif
+    threadRunnable();
+    /* ToDo: if invoked from a signal handler, this threadRunnable
+     * only works if there's another thread (not this one) waiting to
+     * be woken up.
+     */
 }
 
 /* -----------------------------------------------------------------------------
@@ -2655,7 +3340,7 @@ interruptStgRts(void)
    This has nothing to do with the UnblockThread event in GranSim. -- HWL
    -------------------------------------------------------------------------- */
 
-#if defined(GRAN) || defined(PAR)
+#if defined(GRAN) || defined(PARALLEL_HASKELL)
 /*
   NB: only the type of the blocking queue is different in GranSim and GUM
       the operations on the queue-elements are the same
@@ -2674,6 +3359,14 @@ unblockThread(StgTSO *tso)
   case NotBlocked:
     return;  /* not blocked */
 
+  case BlockedOnSTM:
+    // Be careful: nothing to do here!  We tell the scheduler that the thread
+    // is runnable and we leave it to the stack-walking code to abort the 
+    // transaction while unwinding the stack.  We should perhaps have a debugging
+    // test to make sure that this really happens and that the 'zombie' transaction
+    // does not get committed.
+    goto done;
+
   case BlockedOnMVar:
     ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
     {
@@ -2740,7 +3433,7 @@ unblockThread(StgTSO *tso)
 
   case BlockedOnRead:
   case BlockedOnWrite:
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
   case BlockedOnDoProc:
 #endif
     {
@@ -2760,6 +3453,12 @@ unblockThread(StgTSO *tso)
              blocked_queue_tl = (StgTSO *)prev;
            }
          }
+#if defined(mingw32_HOST_OS)
+         /* (Cooperatively) signal that the worker thread should abort
+          * the request.
+          */
+         abandonWorkRequest(tso->block_info.async_result->reqID);
+#endif
          goto done;
        }
       }
@@ -2807,6 +3506,14 @@ unblockThread(StgTSO *tso)
 
   switch (tso->why_blocked) {
 
+  case BlockedOnSTM:
+    // Be careful: nothing to do here!  We tell the scheduler that the thread
+    // is runnable and we leave it to the stack-walking code to abort the 
+    // transaction while unwinding the stack.  We should perhaps have a debugging
+    // test to make sure that this really happens and that the 'zombie' transaction
+    // does not get committed.
+    goto done;
+
   case BlockedOnMVar:
     ASSERT(get_itbl(tso->block_info.closure)->type == MVAR);
     {
@@ -2828,12 +3535,9 @@ unblockThread(StgTSO *tso)
     }
 
   case BlockedOnBlackHole:
-    ASSERT(get_itbl(tso->block_info.closure)->type == BLACKHOLE_BQ);
     {
-      StgBlockingQueue *bq = (StgBlockingQueue *)(tso->block_info.closure);
-
-      last = &bq->blocking_queue;
-      for (t = bq->blocking_queue; t != END_TSO_QUEUE; 
+      last = &blackhole_queue;
+      for (t = blackhole_queue; t != END_TSO_QUEUE; 
           last = &t->link, t = t->link) {
        if (t == tso) {
          *last = tso->link;
@@ -2870,7 +3574,7 @@ unblockThread(StgTSO *tso)
 
   case BlockedOnRead:
   case BlockedOnWrite:
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
   case BlockedOnDoProc:
 #endif
     {
@@ -2889,6 +3593,12 @@ unblockThread(StgTSO *tso)
              blocked_queue_tl = prev;
            }
          }
+#if defined(mingw32_HOST_OS)
+         /* (Cooperatively) signal that the worker thread should abort
+          * the request.
+          */
+         abandonWorkRequest(tso->block_info.async_result->reqID);
+#endif
          goto done;
        }
       }
@@ -2925,6 +3635,50 @@ unblockThread(StgTSO *tso)
 #endif
 
 /* -----------------------------------------------------------------------------
+ * checkBlackHoles()
+ *
+ * Check the blackhole_queue for threads that can be woken up.  We do
+ * this periodically: before every GC, and whenever the run queue is
+ * empty.
+ *
+ * An elegant solution might be to just wake up all the blocked
+ * threads with awakenBlockedQueue occasionally: they'll go back to
+ * sleep again if the object is still a BLACKHOLE.  Unfortunately this
+ * doesn't give us a way to tell whether we've actually managed to
+ * wake up any threads, so we would be busy-waiting.
+ *
+ * -------------------------------------------------------------------------- */
+
+static rtsBool
+checkBlackHoles( void )
+{
+    StgTSO **prev, *t;
+    rtsBool any_woke_up = rtsFalse;
+    StgHalfWord type;
+
+    IF_DEBUG(scheduler, sched_belch("checking threads blocked on black holes"));
+
+    // ASSUMES: sched_mutex
+    prev = &blackhole_queue;
+    t = blackhole_queue;
+    while (t != END_TSO_QUEUE) {
+       ASSERT(t->why_blocked == BlockedOnBlackHole);
+       type = get_itbl(t->block_info.closure)->type;
+       if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
+           IF_DEBUG(sanity,checkTSO(t));
+           t = unblockOneLocked(t);
+           *prev = t;
+           any_woke_up = rtsTrue;
+       } else {
+           prev = &t->link;
+           t = t->link;
+       }
+    }
+
+    return any_woke_up;
+}
+
+/* -----------------------------------------------------------------------------
  * raiseAsync()
  *
  * The following function implements the magic for raising an
@@ -2961,7 +3715,10 @@ unblockThread(StgTSO *tso)
 void 
 deleteThread(StgTSO *tso)
 {
-  raiseAsync(tso,NULL);
+  if (tso->why_blocked != BlockedOnCCall &&
+      tso->why_blocked != BlockedOnCCall_NoUnblockExc) {
+      raiseAsync(tso,NULL);
+  }
 }
 
 #ifdef FORKPROCESS_PRIMOP_SUPPORTED
@@ -2996,6 +3753,12 @@ raiseAsyncWithLock(StgTSO *tso, StgClosure *exception)
 void
 raiseAsync(StgTSO *tso, StgClosure *exception)
 {
+    raiseAsync_(tso, exception, rtsFalse);
+}
+
+static void
+raiseAsync_(StgTSO *tso, StgClosure *exception, rtsBool stop_at_atomically)
+{
     StgRetInfoTable *info;
     StgPtr sp;
   
@@ -3039,6 +3802,10 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
        // top of the stack applied to the exception.
        // 
        // 5. If it's a STOP_FRAME, then kill the thread.
+        // 
+        // NB: if we pass an ATOMICALLY_FRAME then abort the associated 
+        // transaction
+       
        
        StgPtr frame;
        
@@ -3047,13 +3814,45 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
        
        while (info->i.type != UPDATE_FRAME
               && (info->i.type != CATCH_FRAME || exception == NULL)
-              && info->i.type != STOP_FRAME) {
+              && info->i.type != STOP_FRAME
+              && (info->i.type != ATOMICALLY_FRAME || stop_at_atomically == rtsFalse))
+       {
+            if (info->i.type == CATCH_RETRY_FRAME || info->i.type == ATOMICALLY_FRAME) {
+              // IF we find an ATOMICALLY_FRAME then we abort the
+              // current transaction and propagate the exception.  In
+              // this case (unlike ordinary exceptions) we do not care
+              // whether the transaction is valid or not because its
+              // possible validity cannot have caused the exception
+              // and will not be visible after the abort.
+              IF_DEBUG(stm,
+                       debugBelch("Found atomically block delivering async exception\n"));
+              stmAbortTransaction(tso -> trec);
+              tso -> trec = stmGetEnclosingTRec(tso -> trec);
+            }
            frame += stack_frame_sizeW((StgClosure *)frame);
            info = get_ret_itbl((StgClosure *)frame);
        }
        
        switch (info->i.type) {
            
+       case ATOMICALLY_FRAME:
+           ASSERT(stop_at_atomically);
+           ASSERT(stmGetEnclosingTRec(tso->trec) == NO_TREC);
+           stmCondemnTransaction(tso -> trec);
+#ifdef REG_R1
+           tso->sp = frame;
+#else
+           // R1 is not a register: the return convention for IO in
+           // this case puts the return value on the stack, so we
+           // need to set up the stack to return to the atomically
+           // frame properly...
+           tso->sp = frame - 2;
+           tso->sp[1] = (StgWord) &stg_NO_FINALIZER_closure; // why not?
+           tso->sp[0] = (StgWord) &stg_ut_1_0_unreg_info;
+#endif
+           tso->what_next = ThreadRunGHC;
+           return;
+
        case CATCH_FRAME:
            // If we find a CATCH_FRAME, and we've got an exception to raise,
            // then build the THUNK raise(exception), and leave it on
@@ -3063,12 +3862,12 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
 #ifdef PROFILING
            StgCatchFrame *cf = (StgCatchFrame *)frame;
 #endif
-           StgClosure *raise;
+           StgThunk *raise;
            
            // we've got an exception to raise, so let's pass it to the
            // handler in this frame.
            //
-           raise = (StgClosure *)allocate(sizeofW(StgClosure)+1);
+           raise = (StgThunk *)allocate(sizeofW(StgThunk)+MIN_UPD_SIZE);
            TICK_ALLOC_SE_THK(1,0);
            SET_HDR(raise,&stg_raise_info,cf->header.prof.ccs);
            raise->payload[0] = exception;
@@ -3106,7 +3905,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
            // fun field.
            //
            words = frame - sp - 1;
-           ap = (StgAP_STACK *)allocate(PAP_sizeW(words));
+           ap = (StgAP_STACK *)allocate(AP_STACK_sizeW(words));
            
            ap->size = words;
            ap->fun  = (StgClosure *)sp[0];
@@ -3173,7 +3972,7 @@ raiseAsync(StgTSO *tso, StgClosure *exception)
 StgWord
 raiseExceptionHelper (StgTSO *tso, StgClosure *exception)
 {
-    StgClosure *raise_closure = NULL;
+    StgThunk *raise_closure = NULL;
     StgPtr p, next;
     StgRetInfoTable *info;
     //
@@ -3210,22 +4009,33 @@ raiseExceptionHelper (StgTSO *tso, StgClosure *exception)
            // Only create raise_closure if we need to.
            if (raise_closure == NULL) {
                raise_closure = 
-                   (StgClosure *)allocate(sizeofW(StgClosure)+MIN_UPD_SIZE);
+                   (StgThunk *)allocate(sizeofW(StgThunk)+MIN_UPD_SIZE);
                SET_HDR(raise_closure, &stg_raise_info, CCCS);
                raise_closure->payload[0] = exception;
            }
-           UPD_IND(((StgUpdateFrame *)p)->updatee,raise_closure);
+           UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
            p = next;
            continue;
+
+        case ATOMICALLY_FRAME:
+            IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p\n", p));
+            tso->sp = p;
+            return ATOMICALLY_FRAME;
            
        case CATCH_FRAME:
            tso->sp = p;
            return CATCH_FRAME;
+
+        case CATCH_STM_FRAME:
+            IF_DEBUG(stm, debugBelch("Found CATCH_STM_FRAME at %p\n", p));
+            tso->sp = p;
+            return CATCH_STM_FRAME;
            
        case STOP_FRAME:
            tso->sp = p;
            return STOP_FRAME;
 
+        case CATCH_RETRY_FRAME:
        default:
            p = next; 
            continue;
@@ -3233,6 +4043,55 @@ raiseExceptionHelper (StgTSO *tso, StgClosure *exception)
     }
 }
 
+
+/* -----------------------------------------------------------------------------
+   findRetryFrameHelper
+
+   This function is called by the retry# primitive.  It traverses the stack
+   leaving tso->sp referring to the frame which should handle the retry.  
+
+   This should either be a CATCH_RETRY_FRAME (if the retry# is within an orElse#) 
+   or should be a ATOMICALLY_FRAME (if the retry# reaches the top level).  
+
+   We skip CATCH_STM_FRAMEs because retries are not considered to be exceptions,
+   despite the similar implementation.
+
+   We should not expect to see CATCH_FRAME or STOP_FRAME because those should
+   not be created within memory transactions.
+   -------------------------------------------------------------------------- */
+
+StgWord
+findRetryFrameHelper (StgTSO *tso)
+{
+  StgPtr           p, next;
+  StgRetInfoTable *info;
+
+  p = tso -> sp;
+  while (1) {
+    info = get_ret_itbl((StgClosure *)p);
+    next = p + stack_frame_sizeW((StgClosure *)p);
+    switch (info->i.type) {
+      
+    case ATOMICALLY_FRAME:
+      IF_DEBUG(stm, debugBelch("Found ATOMICALLY_FRAME at %p during retrry\n", p));
+      tso->sp = p;
+      return ATOMICALLY_FRAME;
+      
+    case CATCH_RETRY_FRAME:
+      IF_DEBUG(stm, debugBelch("Found CATCH_RETRY_FRAME at %p during retrry\n", p));
+      tso->sp = p;
+      return CATCH_RETRY_FRAME;
+      
+    case CATCH_STM_FRAME:
+    default:
+      ASSERT(info->i.type != CATCH_FRAME);
+      ASSERT(info->i.type != STOP_FRAME);
+      p = next; 
+      continue;
+    }
+  }
+}
+
 /* -----------------------------------------------------------------------------
    resurrectThreads is called after garbage collection on the list of
    threads found to be garbage.  Each of these threads will be woken
@@ -3263,6 +4122,9 @@ resurrectThreads( StgTSO *threads )
     case BlockedOnBlackHole:
       raiseAsync(tso,(StgClosure *)NonTermination_closure);
       break;
+    case BlockedOnSTM:
+      raiseAsync(tso,(StgClosure *)BlockedIndefinitely_closure);
+      break;
     case NotBlocked:
       /* This might happen if the thread was blocked on a black hole
        * belonging to a thread that we've just woken up (raiseAsync
@@ -3275,95 +4137,32 @@ resurrectThreads( StgTSO *threads )
   }
 }
 
-/* -----------------------------------------------------------------------------
- * Blackhole detection: if we reach a deadlock, test whether any
- * threads are blocked on themselves.  Any threads which are found to
- * be self-blocked get sent a NonTermination exception.
- *
- * This is only done in a deadlock situation in order to avoid
- * performance overhead in the normal case.
- *
- * Locks: sched_mutex is held upon entry and exit.
- * -------------------------------------------------------------------------- */
-
-static void
-detectBlackHoles( void )
-{
-    StgTSO *tso = all_threads;
-    StgPtr frame;
-    StgClosure *blocked_on;
-    StgRetInfoTable *info;
-
-    for (tso = all_threads; tso != END_TSO_QUEUE; tso = tso->global_link) {
-
-       while (tso->what_next == ThreadRelocated) {
-           tso = tso->link;
-           ASSERT(get_itbl(tso)->type == TSO);
-       }
-      
-       if (tso->why_blocked != BlockedOnBlackHole) {
-           continue;
-       }
-       blocked_on = tso->block_info.closure;
-
-       frame = tso->sp;
-
-       while(1) {
-           info = get_ret_itbl((StgClosure *)frame);
-           switch (info->i.type) {
-           case UPDATE_FRAME:
-               if (((StgUpdateFrame *)frame)->updatee == blocked_on) {
-                   /* We are blocking on one of our own computations, so
-                    * send this thread the NonTermination exception.  
-                    */
-                   IF_DEBUG(scheduler, 
-                            sched_belch("thread %d is blocked on itself", tso->id));
-                   raiseAsync(tso, (StgClosure *)NonTermination_closure);
-                   goto done;
-               }
-               
-               frame = (StgPtr)((StgUpdateFrame *)frame + 1);
-               continue;
-
-           case STOP_FRAME:
-               goto done;
-
-               // normal stack frames; do nothing except advance the pointer
-           default:
-               frame += stack_frame_sizeW((StgClosure *)frame);
-           }
-       }   
-       done: ;
-    }
-}
-
 /* ----------------------------------------------------------------------------
  * Debugging: why is a thread blocked
  * [Also provides useful information when debugging threaded programs
  *  at the Haskell source code level, so enable outside of DEBUG. --sof 7/02]
    ------------------------------------------------------------------------- */
 
-static
-void
+static void
 printThreadBlockage(StgTSO *tso)
 {
   switch (tso->why_blocked) {
   case BlockedOnRead:
-    debugBelch("is blocked on read from fd %d", tso->block_info.fd);
+    debugBelch("is blocked on read from fd %d", (int)(tso->block_info.fd));
     break;
   case BlockedOnWrite:
-    debugBelch("is blocked on write to fd %d", tso->block_info.fd);
+    debugBelch("is blocked on write to fd %d", (int)(tso->block_info.fd));
     break;
-#if defined(mingw32_TARGET_OS)
+#if defined(mingw32_HOST_OS)
     case BlockedOnDoProc:
-    debugBelch("is blocked on proc (request: %d)", tso->block_info.async_result->reqID);
+    debugBelch("is blocked on proc (request: %ld)", tso->block_info.async_result->reqID);
     break;
 #endif
   case BlockedOnDelay:
-    debugBelch("is blocked until %d", tso->block_info.target);
+    debugBelch("is blocked until %ld", (long)(tso->block_info.target));
     break;
   case BlockedOnMVar:
-    debugBelch("is blocked on an MVar");
+    debugBelch("is blocked on an MVar @ %p", tso->block_info.closure);
     break;
   case BlockedOnException:
     debugBelch("is blocked on delivering an exception to thread %d",
@@ -3375,7 +4174,7 @@ printThreadBlockage(StgTSO *tso)
   case NotBlocked:
     debugBelch("is not blocked");
     break;
-#if defined(PAR)
+#if defined(PARALLEL_HASKELL)
   case BlockedOnGA:
     debugBelch("is blocked on global address; local FM_BQ is %p (%s)",
            tso->block_info.closure, info_type(tso->block_info.closure));
@@ -3391,14 +4190,16 @@ printThreadBlockage(StgTSO *tso)
   case BlockedOnCCall_NoUnblockExc:
     debugBelch("is blocked on an external call (exceptions were already blocked)");
     break;
+  case BlockedOnSTM:
+    debugBelch("is blocked on an STM operation");
+    break;
   default:
     barf("printThreadBlockage: strange tso->why_blocked: %d for TSO %d (%d)",
         tso->why_blocked, tso->id, tso);
   }
 }
 
-static
-void
+static void
 printThreadStatus(StgTSO *tso)
 {
   switch (tso->what_next) {
@@ -3417,7 +4218,6 @@ void
 printAllThreads(void)
 {
   StgTSO *t;
-  void *label;
 
 # if defined(GRAN)
   char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
@@ -3425,7 +4225,7 @@ printAllThreads(void)
                       time_string, rtsFalse/*no commas!*/);
 
   debugBelch("all threads at [%s]:\n", time_string);
-# elif defined(PAR)
+# elif defined(PARALLEL_HASKELL)
   char time_string[TIME_STR_LEN], node_str[NODE_STR_LEN];
   ullong_format_string(CURRENT_TIME,
                       time_string, rtsFalse/*no commas!*/);
@@ -3435,21 +4235,49 @@ printAllThreads(void)
   debugBelch("all threads:\n");
 # endif
 
-  for (t = all_threads; t != END_TSO_QUEUE; t = t->global_link) {
-    debugBelch("\tthread %d @ %p ", t->id, (void *)t);
-    label = lookupThreadLabel(t->id);
-    if (label) debugBelch("[\"%s\"] ",(char *)label);
-    printThreadStatus(t);
-    debugBelch("\n");
+  for (t = all_threads; t != END_TSO_QUEUE; ) {
+    debugBelch("\tthread %4d @ %p ", t->id, (void *)t);
+#if defined(DEBUG)
+    {
+      void *label = lookupThreadLabel(t->id);
+      if (label) debugBelch("[\"%s\"] ",(char *)label);
+    }
+#endif
+    if (t->what_next == ThreadRelocated) {
+       debugBelch("has been relocated...\n");
+       t = t->link;
+    } else {
+       printThreadStatus(t);
+       debugBelch("\n");
+       t = t->global_link;
+    }
   }
 }
-    
+
 #ifdef DEBUG
 
+// useful from gdb
+void 
+printThreadQueue(StgTSO *t)
+{
+    nat i = 0;
+    for (; t != END_TSO_QUEUE; t = t->link) {
+       debugBelch("\tthread %d @ %p ", t->id, (void *)t);
+       if (t->what_next == ThreadRelocated) {
+           debugBelch("has been relocated...\n");
+       } else {
+           printThreadStatus(t);
+           debugBelch("\n");
+       }
+       i++;
+    }
+    debugBelch("%d threads on queue\n", i);
+}
+
 /* 
    Print a whole blocking queue attached to node (debugging only).
 */
-# if defined(PAR)
+# if defined(PARALLEL_HASKELL)
 void 
 print_bq (StgClosure *node)
 {
@@ -3576,28 +4404,9 @@ print_bq (StgClosure *node)
   } /* for */
   debugBelch("\n");
 }
-#else
-/* 
-   Nice and easy: only TSOs on the blocking queue
-*/
-void 
-print_bq (StgClosure *node)
-{
-  StgTSO *tso;
-
-  ASSERT(node!=(StgClosure*)NULL);         // sanity check
-  for (tso = ((StgBlockingQueue*)node)->blocking_queue;
-       tso != END_TSO_QUEUE; 
-       tso=tso->link) {
-    ASSERT(tso!=NULL && tso!=END_TSO_QUEUE);   // sanity check
-    ASSERT(get_itbl(tso)->type == TSO);  // guess what, sanity check
-    debugBelch(" TSO %d (%p),", tso->id, tso);
-  }
-  debugBelch("\n");
-}
 # endif
 
-#if defined(PAR)
+#if defined(PARALLEL_HASKELL)
 static nat
 run_queue_len(void)
 {
@@ -3620,7 +4429,7 @@ sched_belch(char *s, ...)
   va_start(ap,s);
 #ifdef RTS_SUPPORTS_THREADS
   debugBelch("sched (task %p): ", osThreadId());
-#elif defined(PAR)
+#elif defined(PARALLEL_HASKELL)
   debugBelch("== ");
 #else
   debugBelch("sched: ");