merge GHC HEAD
[ghc-hetmet.git] / rts / Schedule.c
index c115d2b..fd5536b 100644 (file)
@@ -484,7 +484,17 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
-    traceEventStopThread(cap, t, ret);
+    if (ret == ThreadBlocked) {
+        if (t->why_blocked == BlockedOnBlackHole) {
+            StgTSO *owner = blackHoleOwner(t->block_info.bh->bh);
+            traceEventStopThread(cap, t, t->why_blocked + 6,
+                                 owner != NULL ? owner->id : 0);
+        } else {
+            traceEventStopThread(cap, t, t->why_blocked + 6, 0);
+        }
+    } else {
+        traceEventStopThread(cap, t, ret, 0);
+    }
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
@@ -941,14 +951,38 @@ static void
 scheduleProcessInbox (Capability *cap USED_IF_THREADS)
 {
 #if defined(THREADED_RTS)
-    Message *m;
+    Message *m, *next;
+    int r;
 
     while (!emptyInbox(cap)) {
-        ACQUIRE_LOCK(&cap->lock);
+        if (cap->r.rCurrentNursery->link == NULL ||
+            g0->n_new_large_words >= large_alloc_lim) {
+            scheduleDoGC(cap, cap->running_task, rtsFalse);
+        }
+
+        // don't use a blocking acquire; if the lock is held by
+        // another thread then just carry on.  This seems to avoid
+        // getting stuck in a message ping-pong situation with other
+        // processors.  We'll check the inbox again later anyway.
+        //
+        // We should really use a more efficient queue data structure
+        // here.  The trickiness is that we must ensure a Capability
+        // never goes idle if the inbox is non-empty, which is why we
+        // use cap->lock (cap->lock is released as the last thing
+        // before going idle; see Capability.c:releaseCapability()).
+        r = TRY_ACQUIRE_LOCK(&cap->lock);
+        if (r != 0) return;
+
         m = cap->inbox;
-        cap->inbox = m->link;
+        cap->inbox = (Message*)END_TSO_QUEUE;
+
         RELEASE_LOCK(&cap->lock);
-        executeMessage(cap, (Message *)m);
+
+        while (m != (Message*)END_TSO_QUEUE) {
+            next = m->link;
+            executeMessage(cap, m);
+            m = next;
+        }
     }
 #endif
 }
@@ -1413,6 +1447,12 @@ delete_threads_and_gc:
         recent_activity = ACTIVITY_YES;
     }
 
+    if (heap_census) {
+        debugTrace(DEBUG_sched, "performing heap census");
+        heapCensus();
+       performHeapProfile = rtsFalse;
+    }
+
 #if defined(THREADED_RTS)
     if (gc_type == PENDING_GC_PAR)
     {
@@ -1420,12 +1460,6 @@ delete_threads_and_gc:
     }
 #endif
 
-    if (heap_census) {
-        debugTrace(DEBUG_sched, "performing heap census");
-        heapCensus();
-       performHeapProfile = rtsFalse;
-    }
-
     if (heap_overflow && sched_state < SCHED_INTERRUPTING) {
         // GC set the heap_overflow flag, so we should proceed with
         // an orderly shutdown now.  Ultimately we want the main
@@ -1724,7 +1758,7 @@ suspendThread (StgRegTable *reg, rtsBool interruptible)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
-  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL, 0);
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
@@ -1840,9 +1874,9 @@ scheduleThread(Capability *cap, StgTSO *tso)
 void
 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
 {
-#if defined(THREADED_RTS)
     tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
                              // move this thread from now on.
+#if defined(THREADED_RTS)
     cpu %= RtsFlags.ParFlags.nNodes;
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
@@ -1996,16 +2030,7 @@ exitScheduler (rtsBool wait_foreign USED_IF_THREADS)
     }
     sched_state = SCHED_SHUTTING_DOWN;
 
-#if defined(THREADED_RTS)
-    { 
-       nat i;
-       
-       for (i = 0; i < n_capabilities; i++) {
-            ASSERT(task->incall->tso == NULL);
-           shutdownCapability(&capabilities[i], task, wait_foreign);
-       }
-    }
-#endif
+    shutdownCapabilities(task, wait_foreign);
 
     boundTaskExiting(task);
 }
@@ -2035,6 +2060,16 @@ freeScheduler( void )
 #endif
 }
 
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS, 
+                    void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+    evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+    evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+    evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif 
+}
+
 /* -----------------------------------------------------------------------------
    performGC
 
@@ -2220,6 +2255,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
             return CATCH_STM_FRAME;
            
         case UNDERFLOW_FRAME:
+            tso->stackobj->sp = p;
             threadStackUnderflow(cap,tso);
             p = tso->stackobj->sp;
             continue;