don't make -ddump-if-trace imply -no-recomp
[ghc-hetmet.git] / rts / Schedule.c
index 441e979..1bd68c2 100644 (file)
@@ -593,7 +593,19 @@ run_thread:
 
     dirtyTSO(t);
 
-    recent_activity = ACTIVITY_YES;
+#if defined(THREADED_RTS)
+    if (recent_activity == ACTIVITY_DONE_GC) {
+        // ACTIVITY_DONE_GC means we turned off the timer signal to
+        // conserve power (see #1623).  Re-enable it here.
+        nat prev;
+        prev = xchg(&recent_activity, ACTIVITY_YES);
+        if (prev == ACTIVITY_DONE_GC) {
+            startTimer();
+        }
+    } else {
+        recent_activity = ACTIVITY_YES;
+    }
+#endif
 
     switch (prev_what_next) {
        
@@ -974,6 +986,8 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
        cap = scheduleDoGC (cap, task, rtsTrue/*force  major GC*/);
 
        recent_activity = ACTIVITY_DONE_GC;
+        // disable timer signals (see #1623)
+        stopTimer();
        
        if ( !emptyRunQueue(cap) ) return;
 
@@ -1620,7 +1634,16 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
     }
 #endif
       
-    pushOnRunQueue(cap,t);
+    if (context_switch) {
+        // Sometimes we miss a context switch, e.g. when calling
+        // primitives in a tight loop, MAYBE_GC() doesn't check the
+        // context switch flag, and we end up waiting for a GC.
+        // See #1984, and concurrent/should_run/1984
+        context_switch = 0;
+        addToRunQueue(cap,t);
+    } else {
+        pushOnRunQueue(cap,t);
+    }
     return rtsTrue;
     /* actual GC is done at the end of the while loop in schedule() */
 }
@@ -2123,16 +2146,34 @@ forkProcess(HsStablePtr *entry
     // ToDo: for SMP, we should probably acquire *all* the capabilities
     cap = rts_lock();
     
+    // no funny business: hold locks while we fork, otherwise if some
+    // other thread is holding a lock when the fork happens, the data
+    // structure protected by the lock will forever be in an
+    // inconsistent state in the child.  See also #1391.
+    ACQUIRE_LOCK(&sched_mutex);
+    ACQUIRE_LOCK(&cap->lock);
+    ACQUIRE_LOCK(&cap->running_task->lock);
+
     pid = fork();
     
     if (pid) { // parent
        
+        RELEASE_LOCK(&sched_mutex);
+        RELEASE_LOCK(&cap->lock);
+        RELEASE_LOCK(&cap->running_task->lock);
+
        // just return the pid
        rts_unlock(cap);
        return pid;
        
     } else { // child
        
+#if defined(THREADED_RTS)
+        initMutex(&sched_mutex);
+        initMutex(&cap->lock);
+        initMutex(&cap->running_task->lock);
+#endif
+
        // Now, all OS threads except the thread that forked are
        // stopped.  We need to stop all Haskell threads, including
        // those involved in foreign calls.  Also we need to delete
@@ -2170,6 +2211,9 @@ forkProcess(HsStablePtr *entry
        ACQUIRE_LOCK(&sched_mutex);
        for (task = all_tasks; task != NULL; task=task->all_link) {
            if (task != cap->running_task) {
+#if defined(THREADED_RTS)
+                initMutex(&task->lock); // see #1391
+#endif
                discardTask(task);
            }
        }
@@ -2185,6 +2229,7 @@ forkProcess(HsStablePtr *entry
 
         // On Unix, all timers are reset in the child, so we need to start
         // the timer again.
+        initTimer();
         startTimer();
 
        cap = rts_evalStableIO(cap, entry, NULL);  // run the action
@@ -2531,6 +2576,7 @@ initScheduler(void)
 
   context_switch = 0;
   sched_state    = SCHED_RUNNING;
+  recent_activity = ACTIVITY_YES;
 
 #if defined(THREADED_RTS)
   /* Initialise the mutex and condition variables used by
@@ -2577,7 +2623,13 @@ initScheduler(void)
 }
 
 void
-exitScheduler( void )
+exitScheduler(
+    rtsBool wait_foreign
+#if !defined(THREADED_RTS)
+                         __attribute__((unused))
+#endif
+)
+               /* see Capability.c, shutdownCapability() */
 {
     Task *task = NULL;
 
@@ -2599,7 +2651,7 @@ exitScheduler( void )
        nat i;
        
        for (i = 0; i < n_capabilities; i++) {
-           shutdownCapability(&capabilities[i], task);
+           shutdownCapability(&capabilities[i], task, wait_foreign);
        }
        boundTaskExiting(task);
        stopTaskManager();
@@ -2794,7 +2846,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
             "increasing stack size from %ld words to %d.",
             (long)tso->stack_size, new_stack_size);
 
-  dest = (StgTSO *)allocate(new_tso_size);
+  dest = (StgTSO *)allocateLocal(cap,new_tso_size);
   TICK_ALLOC_TSO(new_stack_size,0);
 
   /* copy the TSO block and the old stack into the new area */
@@ -2906,7 +2958,7 @@ checkBlackHoles (Capability *cap)
     t = blackhole_queue;
     while (t != END_TSO_QUEUE) {
        ASSERT(t->why_blocked == BlockedOnBlackHole);
-       type = get_itbl(t->block_info.closure)->type;
+       type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
            IF_DEBUG(sanity,checkTSO(t));
            t = unblockOne(cap, t);
@@ -3087,10 +3139,10 @@ findRetryFrameHelper (StgTSO *tso)
        return CATCH_RETRY_FRAME;
       
     case CATCH_STM_FRAME: {
-        debugTrace(DEBUG_stm,
-                  "found CATCH_STM_FRAME at %p during retry", p);
         StgTRecHeader *trec = tso -> trec;
        StgTRecHeader *outer = stmGetEnclosingTRec(trec);
+        debugTrace(DEBUG_stm,
+                  "found CATCH_STM_FRAME at %p during retry", p);
         debugTrace(DEBUG_stm, "trec=%p outer=%p", trec, outer);
        stmAbortTransaction(tso -> cap, trec);
        stmFreeAbortedTRec(tso -> cap, trec);