Refactoring and tidy up
[ghc-hetmet.git] / rts / Schedule.c
index c115d2b..f5cb568 100644 (file)
@@ -484,7 +484,17 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
-    traceEventStopThread(cap, t, ret);
+    if (ret == ThreadBlocked) {
+        if (t->why_blocked == BlockedOnBlackHole) {
+            StgTSO *owner = blackHoleOwner(t->block_info.bh->bh);
+            traceEventStopThread(cap, t, t->why_blocked + 6,
+                                 owner != NULL ? owner->id : 0);
+        } else {
+            traceEventStopThread(cap, t, t->why_blocked + 6, 0);
+        }
+    } else {
+        traceEventStopThread(cap, t, ret, 0);
+    }
 
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
@@ -941,14 +951,38 @@ static void
 scheduleProcessInbox (Capability *cap USED_IF_THREADS)
 {
 #if defined(THREADED_RTS)
-    Message *m;
+    Message *m, *next;
+    int r;
 
     while (!emptyInbox(cap)) {
-        ACQUIRE_LOCK(&cap->lock);
+        if (cap->r.rCurrentNursery->link == NULL ||
+            g0->n_new_large_words >= large_alloc_lim) {
+            scheduleDoGC(cap, cap->running_task, rtsFalse);
+        }
+
+        // don't use a blocking acquire; if the lock is held by
+        // another thread then just carry on.  This seems to avoid
+        // getting stuck in a message ping-pong situation with other
+        // processors.  We'll check the inbox again later anyway.
+        //
+        // We should really use a more efficient queue data structure
+        // here.  The trickiness is that we must ensure a Capability
+        // never goes idle if the inbox is non-empty, which is why we
+        // use cap->lock (cap->lock is released as the last thing
+        // before going idle; see Capability.c:releaseCapability()).
+        r = TRY_ACQUIRE_LOCK(&cap->lock);
+        if (r != 0) return;
+
         m = cap->inbox;
-        cap->inbox = m->link;
+        cap->inbox = (Message*)END_TSO_QUEUE;
+
         RELEASE_LOCK(&cap->lock);
-        executeMessage(cap, (Message *)m);
+
+        while (m != (Message*)END_TSO_QUEUE) {
+            next = m->link;
+            executeMessage(cap, m);
+            m = next;
+        }
     }
 #endif
 }
@@ -1724,7 +1758,7 @@ suspendThread (StgRegTable *reg, rtsBool interruptible)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
-  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL, 0);
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
@@ -1840,9 +1874,9 @@ scheduleThread(Capability *cap, StgTSO *tso)
 void
 scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
 {
-#if defined(THREADED_RTS)
     tso->flags |= TSO_LOCKED; // we requested explicit affinity; don't
                              // move this thread from now on.
+#if defined(THREADED_RTS)
     cpu %= RtsFlags.ParFlags.nNodes;
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
@@ -2035,6 +2069,16 @@ freeScheduler( void )
 #endif
 }
 
+void markScheduler (evac_fn evac USED_IF_NOT_THREADS, 
+                    void *user USED_IF_NOT_THREADS)
+{
+#if !defined(THREADED_RTS)
+    evac(user, (StgClosure **)(void *)&blocked_queue_hd);
+    evac(user, (StgClosure **)(void *)&blocked_queue_tl);
+    evac(user, (StgClosure **)(void *)&sleeping_queue);
+#endif 
+}
+
 /* -----------------------------------------------------------------------------
    performGC
 
@@ -2220,6 +2264,7 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
             return CATCH_STM_FRAME;
            
         case UNDERFLOW_FRAME:
+            tso->stackobj->sp = p;
             threadStackUnderflow(cap,tso);
             p = tso->stackobj->sp;
             continue;