start finalizers on the current Capability rather than last_free_capability
[ghc-hetmet.git] / rts / Schedule.c
index 38e3a3c..040d16f 100644 (file)
@@ -32,6 +32,7 @@
 #include "Proftimer.h"
 #include "ProfHeap.h"
 #include "GC.h"
+#include "Weak.h"
 
 /* PARALLEL_HASKELL includes go here */
 
@@ -281,6 +282,12 @@ schedule (Capability *initialCapability, Task *task)
              "### NEW SCHEDULER LOOP (task: %p, cap: %p)",
              task, initialCapability);
 
+  if (running_finalizers) {
+      errorBelch("error: a C finalizer called back into Haskell.\n"
+                 "   use Foreign.Concurrent.newForeignPtr for Haskell finalizers.");
+      stg_exit(EXIT_FAILURE);
+  }
+
   schedulePreLoop();
 
   // -----------------------------------------------------------
@@ -737,6 +744,7 @@ scheduleYield (Capability **pcap, Task *task)
     // if we have work, and we don't need to give up the Capability, continue.
     if (!shouldYieldCapability(cap,task) && 
         (!emptyRunQueue(cap) ||
+         !emptyWakeupQueue(cap) ||
          blackholes_need_checking ||
          sched_state >= SCHED_INTERRUPTING))
         return;
@@ -1260,7 +1268,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
               "--<< thread %ld (%s) stopped: HeapOverflow",
               (long)t->id, whatNext_strs[t->what_next]);
 
-    if (cap->context_switch) {
+    if (cap->r.rHpLim == NULL || cap->context_switch) {
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
@@ -1402,6 +1410,12 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
     debugTrace(DEBUG_sched, "--++ thread %lu (%s) finished", 
               (unsigned long)t->id, whatNext_strs[t->what_next]);
 
+    // blocked exceptions can now complete, even if the thread was in
+    // blocked mode (see #2910).  This unconditionally calls
+    // lockTSO(), which ensures that we don't miss any threads that
+    // are engaged in throwTo() with this thread as a target.
+    awakenBlockedExceptionQueue (cap, t);
+
       //
       // Check whether the thread that just completed was a bound
       // thread, and if so return with the result.  
@@ -1602,6 +1616,30 @@ delete_threads_and_gc:
     GarbageCollect(force_major || heap_census, 0, cap);
 #endif
 
+    if (recent_activity == ACTIVITY_INACTIVE && force_major)
+    {
+        // We are doing a GC because the system has been idle for a
+        // timeslice and we need to check for deadlock.  Record the
+        // fact that we've done a GC and turn off the timer signal;
+        // it will get re-enabled if we run any threads after the GC.
+        recent_activity = ACTIVITY_DONE_GC;
+        stopTimer();
+    }
+    else
+    {
+        // the GC might have taken long enough for the timer to set
+        // recent_activity = ACTIVITY_INACTIVE, but we aren't
+        // necessarily deadlocked:
+        recent_activity = ACTIVITY_YES;
+    }
+
+#if defined(THREADED_RTS)
+    if (gc_type == PENDING_GC_PAR)
+    {
+        releaseGCThreads(cap);
+    }
+#endif
+
     if (heap_census) {
         debugTrace(DEBUG_sched, "performing heap census");
         heapCensus();
@@ -1633,16 +1671,6 @@ delete_threads_and_gc:
     balanceSparkPoolsCaps(n_capabilities, capabilities);
 #endif
 
-    if (force_major)
-    {
-        // We've just done a major GC and we don't need the timer
-        // signal turned on any more (#1623).
-        // NB. do this *before* releasing the Capabilities, to avoid
-        // deadlocks!
-        recent_activity = ACTIVITY_DONE_GC;
-        stopTimer();
-    }
-
 #if defined(THREADED_RTS)
     if (gc_type == PENDING_GC_SEQ) {
         // release our stash of capabilities.
@@ -1976,7 +2004,10 @@ resumeThread (void *task_)
     debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
     
     if (tso->why_blocked == BlockedOnCCall) {
-       awakenBlockedExceptionQueue(cap,tso);
+        // avoid locking the TSO if we don't have to
+        if (tso->blocked_exceptions != END_TSO_QUEUE) {
+            awakenBlockedExceptionQueue(cap,tso);
+        }
        tso->flags &= ~(TSO_BLOCKEX | TSO_INTERRUPTIBLE);
     }
     
@@ -2181,22 +2212,16 @@ exitScheduler(
 {
     Task *task = NULL;
 
-#if defined(THREADED_RTS)
     ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
     RELEASE_LOCK(&sched_mutex);
-#endif
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
-#if defined(THREADED_RTS)
         waitForReturnCapability(&task->cap,task);
        scheduleDoGC(task->cap,task,rtsFalse);    
         releaseCapability(task->cap);
-#else
-       scheduleDoGC(&MainCapability,task,rtsFalse);    
-#endif
     }
     sched_state = SCHED_SHUTTING_DOWN;