Add several new record features
[ghc-hetmet.git] / rts / Schedule.c
index e328b5b..f3d956a 100644 (file)
 #include "ThreadLabels.h"
 #include "LdvProfile.h"
 #include "Updates.h"
-#ifdef PROFILING
 #include "Proftimer.h"
 #include "ProfHeap.h"
-#endif
 #if defined(GRAN) || defined(PARALLEL_HASKELL)
 # include "GranSimRts.h"
 # include "GranSim.h"
@@ -216,7 +214,7 @@ static rtsBool scheduleHandleYield( Capability *cap, StgTSO *t,
 static void scheduleHandleThreadBlocked( StgTSO *t );
 static rtsBool scheduleHandleThreadFinished( Capability *cap, Task *task,
                                             StgTSO *t );
-static rtsBool scheduleDoHeapProfile(rtsBool ready_to_gc);
+static rtsBool scheduleNeedHeapProfile(rtsBool ready_to_gc);
 static Capability *scheduleDoGC(Capability *cap, Task *task,
                                rtsBool force_major);
 
@@ -572,9 +570,7 @@ run_thread:
     debugTrace(DEBUG_sched, "-->> running thread %ld %s ...", 
                              (long)t->id, whatNext_strs[t->what_next]);
 
-#if defined(PROFILING)
     startHeapProfTimer();
-#endif
 
     // Check for exceptions blocked on this thread
     maybePerformBlockedException (cap, t);
@@ -667,8 +663,8 @@ run_thread:
     // ----------------------------------------------------------------------
     
     // Costs for the scheduler are assigned to CCS_SYSTEM
-#if defined(PROFILING)
     stopHeapProfTimer();
+#if defined(PROFILING)
     CCCS = CCS_SYSTEM;
 #endif
     
@@ -705,8 +701,7 @@ run_thread:
       barf("schedule: invalid thread return code %d", (int)ret);
     }
 
-    if (scheduleDoHeapProfile(ready_to_gc)) { ready_to_gc = rtsFalse; }
-    if (ready_to_gc) {
+    if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
       cap = scheduleDoGC(cap,task,rtsFalse);
     }
   } /* end of while() */
@@ -862,7 +857,8 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
 static void
 scheduleStartSignalHandlers(Capability *cap)
 {
-    if (signals_pending()) { // safe outside the lock
+    if (RtsFlags.MiscFlags.install_signal_handlers && signals_pending()) {
+        // safe outside the lock
        startSignalHandlers(cap);
     }
 }
@@ -985,7 +981,7 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
         * for signals to arrive rather then bombing out with a
         * deadlock.
         */
-       if ( anyUserHandlers() ) {
+       if ( RtsFlags.MiscFlags.install_signal_handlers && anyUserHandlers() ) {
            debugTrace(DEBUG_sched,
                       "still deadlocked, waiting for signals...");
 
@@ -1919,36 +1915,21 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
 }
 
 /* -----------------------------------------------------------------------------
- * Perform a heap census, if PROFILING
+ * Perform a heap census
  * -------------------------------------------------------------------------- */
 
 static rtsBool
-scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
+scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
 {
-#if defined(PROFILING)
     // When we have +RTS -i0 and we're heap profiling, do a census at
     // every GC.  This lets us get repeatable runs for debugging.
     if (performHeapProfile ||
        (RtsFlags.ProfFlags.profileInterval==0 &&
         RtsFlags.ProfFlags.doHeapProfile && ready_to_gc)) {
-
-       // checking black holes is necessary before GC, otherwise
-       // there may be threads that are unreachable except by the
-       // blackhole queue, which the GC will consider to be
-       // deadlocked.
-       scheduleCheckBlackHoles(&MainCapability);
-
-       debugTrace(DEBUG_sched, "garbage collecting before heap census");
-       GarbageCollect(rtsTrue);
-
-       debugTrace(DEBUG_sched, "performing heap census");
-       heapCensus();
-
-       performHeapProfile = rtsFalse;
-       return rtsTrue;  // true <=> we already GC'd
+        return rtsTrue;
+    } else {
+        return rtsFalse;
     }
-#endif
-    return rtsFalse;
 }
 
 /* -----------------------------------------------------------------------------
@@ -1959,6 +1940,7 @@ static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
     StgTSO *t;
+    rtsBool heap_census;
 #ifdef THREADED_RTS
     static volatile StgWord waiting_for_gc;
     rtsBool was_waiting;
@@ -2066,6 +2048,8 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
        deleteAllThreads(&capabilities[0]);
        sched_state = SCHED_SHUTTING_DOWN;
     }
+    
+    heap_census = scheduleNeedHeapProfile(rtsTrue);
 
     /* everybody back, start the GC.
      * Could do it in this thread, or signal a condition var
@@ -2075,8 +2059,14 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 #if defined(THREADED_RTS)
     debugTrace(DEBUG_sched, "doing GC");
 #endif
-    GarbageCollect(force_major);
+    GarbageCollect(force_major || heap_census);
     
+    if (heap_census) {
+        debugTrace(DEBUG_sched, "performing heap census");
+        heapCensus();
+       performHeapProfile = rtsFalse;
+    }
+
 #if defined(THREADED_RTS)
     // release our stash of capabilities.
     for (i = 0; i < n_capabilities; i++) {
@@ -2110,7 +2100,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
  * Singleton fork(). Do not copy any running threads.
  * ------------------------------------------------------------------------- */
 
-StgInt
+pid_t
 forkProcess(HsStablePtr *entry
 #ifndef FORKPROCESS_PRIMOP_SUPPORTED
            STG_UNUSED
@@ -2708,7 +2698,9 @@ GetRoots( evac_fn evac )
     
 #if defined(RTS_USER_SIGNALS)
     // mark the signal handlers (signals should be already blocked)
-    markSignalHandlers(evac);
+    if (RtsFlags.MiscFlags.install_signal_handlers) {
+        markSignalHandlers(evac);
+    }
 #endif
 }
 
@@ -2769,7 +2761,12 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
   // while we are moving the TSO:
   lockClosure((StgClosure *)tso);
 
-  if (tso->stack_size >= tso->max_stack_size) {
+  if (tso->stack_size >= tso->max_stack_size && !(tso->flags & TSO_BLOCKEX)) {
+      // NB. never raise a StackOverflow exception if the thread is
+      // inside Control.Exceptino.block.  It is impractical to protect
+      // against stack overflow exceptions, since virtually anything
+      // can raise one (even 'catch'), so this is the only sensible
+      // thing to do here.  See bug #767.
 
       debugTrace(DEBUG_gc,
                 "threadStackOverflow of TSO %ld (%p): stack too large (now %ld; max is %ld)",