Document 'parseStaticFlags'.
[ghc-hetmet.git] / rts / Schedule.c
index 1b5afef..3f42814 100644 (file)
@@ -969,10 +969,10 @@ scheduleDetectDeadlock (Capability *cap, Task *task)
  * Send pending messages (PARALLEL_HASKELL only)
  * ------------------------------------------------------------------------- */
 
+#if defined(PARALLEL_HASKELL)
 static StgTSO *
 scheduleSendPendingMessages(void)
 {
-#if defined(PARALLEL_HASKELL)
 
 # if defined(PAR) // global Mem.Mgmt., omit for now
     if (PendingFetches != END_BF_QUEUE) {
@@ -985,8 +985,8 @@ scheduleSendPendingMessages(void)
        // packets which have become too old...
        sendOldBuffers(); 
     }
-#endif
 }
+#endif
 
 /* ----------------------------------------------------------------------------
  * Activate spark threads (PARALLEL_HASKELL only)
@@ -1402,7 +1402,6 @@ scheduleNeedHeapProfile( rtsBool ready_to_gc STG_UNUSED )
 static Capability *
 scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
 {
-    StgTSO *t;
     rtsBool heap_census;
 #ifdef THREADED_RTS
     /* extern static volatile StgWord waiting_for_gc; 
@@ -1871,7 +1870,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-       migrateThreadToCapability_lock(&capabilities[cpu],tso);
+       wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
     }
 #else
     appendToRunQueue(cap,tso);
@@ -1913,7 +1912,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
  * ------------------------------------------------------------------------- */
 
 #if defined(THREADED_RTS)
-void
+void OSThreadProcAttr
 workerStart(Task *task)
 {
     Capability *cap;
@@ -2193,7 +2192,7 @@ static StgTSO *
 threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
 {
     bdescr *bd, *new_bd;
-    lnat new_tso_size_w, tso_size_w;
+    lnat free_w, tso_size_w;
     StgTSO *new_tso;
 
     tso_size_w = tso_sizeW(tso);
@@ -2208,19 +2207,19 @@ threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
     // while we are moving the TSO:
     lockClosure((StgClosure *)tso);
 
-    new_tso_size_w = round_to_mblocks(tso_size_w/2);
-
-    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
-               tso->id, tso_size_w, new_tso_size_w);
+    // this is the number of words we'll free
+    free_w = round_to_mblocks(tso_size_w/2);
 
     bd = Bdescr((StgPtr)tso);
-    new_bd = splitLargeBlock(bd, new_tso_size_w / BLOCK_SIZE_W);
-    new_bd->free = bd->free;
+    new_bd = splitLargeBlock(bd, free_w / BLOCK_SIZE_W);
     bd->free = bd->start + TSO_STRUCT_SIZEW;
 
     new_tso = (StgTSO *)new_bd->start;
     memcpy(new_tso,tso,TSO_STRUCT_SIZE);
-    new_tso->stack_size = new_tso_size_w - TSO_STRUCT_SIZEW;
+    new_tso->stack_size = new_bd->free - new_tso->stack;
+
+    debugTrace(DEBUG_sched, "thread %ld: reducing TSO size from %lu words to %lu",
+               (long)tso->id, tso_size_w, tso_sizeW(new_tso));
 
     tso->what_next = ThreadRelocated;
     tso->_link = new_tso; // no write barrier reqd: same generation
@@ -2312,8 +2311,6 @@ checkBlackHoles (Capability *cap)
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
            IF_DEBUG(sanity,checkTSO(t));
            t = unblockOne(cap, t);
-           // urk, the threads migrate to the current capability
-           // here, but we'd like to keep them on the original one.
            *prev = t;
            any_woke_up = rtsTrue;
        } else {