Remove dead code isHsVar
[ghc-hetmet.git] / rts / Schedule.c
index 666b59e..f376286 100644 (file)
@@ -285,7 +285,9 @@ schedule (Capability *initialCapability, Task *task)
 
   if (running_finalizers) {
       errorBelch("error: a C finalizer called back into Haskell.\n"
-                 "   use Foreign.Concurrent.newForeignPtr for Haskell finalizers.");
+                 "   This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n"
+                 "   To create finalizers that may call back into Haskll, use\n"
+                 "   Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr.");
       stg_exit(EXIT_FAILURE);
   }
 
@@ -858,7 +860,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
 
-                    postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
+        postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
 
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    t->cap = free_caps[i];
@@ -881,6 +883,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    spark = tryStealSpark(cap->sparks);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
+
+      postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
+
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
@@ -2125,6 +2130,10 @@ workerStart(Task *task)
     cap = task->cap;
     RELEASE_LOCK(&task->lock);
 
+    if (RtsFlags.ParFlags.setAffinity) {
+        setThreadAffinity(cap->no, n_capabilities);
+    }
+
     // set the thread-local pointer to the Task:
     taskEnter(task);
 
@@ -2226,9 +2235,7 @@ exitScheduler(
 {
     Task *task = NULL;
 
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
@@ -2292,9 +2299,7 @@ performGC_(rtsBool force_major)
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
     // suspended_ccalling_tasks queue.
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
 
     waitForReturnCapability(&task->cap,task);
     scheduleDoGC(task->cap,task,force_major);
@@ -2431,9 +2436,16 @@ threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
 
     tso_size_w = tso_sizeW(tso);
 
-    if (tso_size_w < MBLOCK_SIZE_W || 
+    if (tso_size_w < MBLOCK_SIZE_W ||
+          // TSO is less than 2 mblocks (since the first mblock is
+          // shorter than MBLOCK_SIZE_W)
+        (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
+          // or TSO is not a whole number of megablocks (ensuring
+          // precondition of splitLargeBlock() below)
         (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
+          // or stack is using more than 1/4 of the available space
     {
+        // then do nothing
         return tso;
     }
 
@@ -2540,6 +2552,10 @@ checkBlackHoles (Capability *cap)
     prev = &blackhole_queue;
     t = blackhole_queue;
     while (t != END_TSO_QUEUE) {
+        if (t->what_next == ThreadRelocated) {
+            t = t->_link;
+            continue;
+        }
        ASSERT(t->why_blocked == BlockedOnBlackHole);
        type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {