Change the representation of the MVar blocked queue
[ghc-hetmet.git] / rts / Schedule.c
index 72f6d44..f7b26a4 100644 (file)
@@ -125,7 +125,7 @@ static Capability *schedule (Capability *initialCapability, Task *task);
 static void schedulePreLoop (void);
 static void scheduleFindWork (Capability *cap);
 #if defined(THREADED_RTS)
-static void scheduleYield (Capability **pcap, Task *task, rtsBool);
+static void scheduleYield (Capability **pcap, Task *task);
 #endif
 static void scheduleStartSignalHandlers (Capability *cap);
 static void scheduleCheckBlockedThreads (Capability *cap);
@@ -158,17 +158,6 @@ static void deleteAllThreads (Capability *cap);
 static void deleteThread_(Capability *cap, StgTSO *tso);
 #endif
 
-/* -----------------------------------------------------------------------------
- * Putting a thread on the run queue: different scheduling policies
- * -------------------------------------------------------------------------- */
-
-STATIC_INLINE void
-addToRunQueue( Capability *cap, StgTSO *t )
-{
-    // this does round-robin scheduling; good for concurrency
-    appendToRunQueue(cap,t);
-}
-
 /* ---------------------------------------------------------------------------
    Main scheduling loop.
 
@@ -215,7 +204,6 @@ schedule (Capability *initialCapability, Task *task)
   rtsBool ready_to_gc;
 #if defined(THREADED_RTS)
   rtsBool first = rtsTrue;
-  rtsBool force_yield = rtsFalse;
 #endif
   
   cap = initialCapability;
@@ -339,9 +327,7 @@ schedule (Capability *initialCapability, Task *task)
     //     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     }
 
-  yield:
-    scheduleYield(&cap,task,force_yield);
-    force_yield = rtsFalse;
+    scheduleYield(&cap,task);
 
     if (emptyRunQueue(cap)) continue; // look for work again
 #endif
@@ -501,19 +487,6 @@ run_thread:
 
     traceEventStopThread(cap, t, ret);
 
-#if defined(THREADED_RTS)
-    // If ret is ThreadBlocked, and this Task is bound to the TSO that
-    // blocked, we are in limbo - the TSO is now owned by whatever it
-    // is blocked on, and may in fact already have been woken up,
-    // perhaps even on a different Capability.  It may be the case
-    // that task->cap != cap.  We better yield this Capability
-    // immediately and return to normaility.
-    if (ret == ThreadBlocked) {
-        force_yield = rtsTrue;
-        goto yield;
-    }
-#endif
-
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
     ASSERT(t->cap == cap);
 
@@ -568,6 +541,30 @@ run_thread:
   } /* end of while() */
 }
 
+/* -----------------------------------------------------------------------------
+ * Run queue operations
+ * -------------------------------------------------------------------------- */
+
+void
+removeFromRunQueue (Capability *cap, StgTSO *tso)
+{
+    if (tso->block_info.prev == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_hd == tso);
+        cap->run_queue_hd = tso->_link;
+    } else {
+        setTSOLink(cap, tso->block_info.prev, tso->_link);
+    }
+    if (tso->_link == END_TSO_QUEUE) {
+        ASSERT(cap->run_queue_tl == tso);
+        cap->run_queue_tl = tso->block_info.prev;
+    } else {
+        setTSOPrev(cap, tso->_link, tso->block_info.prev);
+    }
+    tso->_link = tso->block_info.prev = END_TSO_QUEUE;
+
+    IF_DEBUG(sanity, checkRunQueue(cap));
+}
+
 /* ----------------------------------------------------------------------------
  * Setting up the scheduler loop
  * ------------------------------------------------------------------------- */
@@ -626,23 +623,13 @@ shouldYieldCapability (Capability *cap, Task *task)
 // and also check the benchmarks in nofib/parallel for regressions.
 
 static void
-scheduleYield (Capability **pcap, Task *task, rtsBool force_yield)
+scheduleYield (Capability **pcap, Task *task)
 {
     Capability *cap = *pcap;
 
     // if we have work, and we don't need to give up the Capability, continue.
     //
-    // The force_yield flag is used when a bound thread blocks.  This
-    // is a particularly tricky situation: the current Task does not
-    // own the TSO any more, since it is on some queue somewhere, and
-    // might be woken up or manipulated by another thread at any time.
-    // The TSO and Task might be migrated to another Capability.
-    // Certain invariants might be in doubt, such as task->bound->cap
-    // == cap.  We have to yield the current Capability immediately,
-    // no messing around.
-    //
-    if (!force_yield &&
-        !shouldYieldCapability(cap,task) && 
+    if (!shouldYieldCapability(cap,task) && 
         (!emptyRunQueue(cap) ||
          !emptyInbox(cap) ||
          sched_state >= SCHED_INTERRUPTING))
@@ -743,12 +730,14 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    || t->bound == task->incall // don't move my bound thread
                    || tsoLocked(t)) {  // don't move a locked thread
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else if (i == n_free_caps) {
                    pushed_to_all = rtsTrue;
                    i = 0;
                    // keep one for us
                    setTSOLink(cap, prev, t);
+                    setTSOPrev(cap, t, prev);
                    prev = t;
                } else {
                    appendToRunQueue(free_caps[i],t);
@@ -761,6 +750,8 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                }
            }
            cap->run_queue_tl = prev;
+
+            IF_DEBUG(sanity, checkRunQueue(cap));
        }
 
 #ifdef SPARK_PUSHING
@@ -1093,7 +1084,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
         // context switch flag, and we end up waiting for a GC.
         // See #1984, and concurrent/should_run/1984
         cap->context_switch = 0;
-        addToRunQueue(cap,t);
+        appendToRunQueue(cap,t);
     } else {
         pushOnRunQueue(cap,t);
     }
@@ -1162,7 +1153,7 @@ scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
             //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
             checkTSO(t));
 
-    addToRunQueue(cap,t);
+    appendToRunQueue(cap,t);
 
     return rtsFalse;
 }
@@ -1874,8 +1865,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-        traceEventMigrateThread (cap, tso, capabilities[cpu].no);
-       wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
+        migrateThread(cap, tso, &capabilities[cpu]);
     }
 #else
     appendToRunQueue(cap,tso);
@@ -2355,8 +2345,8 @@ deleteThread_(Capability *cap, StgTSO *tso)
 
     if (tso->why_blocked == BlockedOnCCall ||
        tso->why_blocked == BlockedOnCCall_NoUnblockExc) {
-       unblockOne(cap,tso);
        tso->what_next = ThreadKilled;
+       appendToRunQueue(tso->cap, tso);
     } else {
        deleteThread(cap,tso);
     }