Remove dead code isHsVar
[ghc-hetmet.git] / rts / Schedule.c
index 47636a3..f376286 100644 (file)
@@ -33,6 +33,7 @@
 #include "ProfHeap.h"
 #include "GC.h"
 #include "Weak.h"
+#include "EventLog.h"
 
 /* PARALLEL_HASKELL includes go here */
 
@@ -284,7 +285,9 @@ schedule (Capability *initialCapability, Task *task)
 
   if (running_finalizers) {
       errorBelch("error: a C finalizer called back into Haskell.\n"
-                 "   use Foreign.Concurrent.newForeignPtr for Haskell finalizers.");
+                 "   This was previously allowed, but is disallowed in GHC 6.10.2 and later.\n"
+                 "   To create finalizers that may call back into Haskll, use\n"
+                 "   Foreign.Concurrent.newForeignPtr instead of Foreign.newForeignPtr.");
       stg_exit(EXIT_FAILURE);
   }
 
@@ -539,6 +542,8 @@ run_thread:
     }
 #endif
 
+    postEvent(cap, EVENT_RUN_THREAD, t->id, 0);
+
     switch (prev_what_next) {
        
     case ThreadKilled:
@@ -587,6 +592,8 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
+    postEvent (cap, EVENT_STOP_THREAD, t->id, ret);
+
 #if defined(THREADED_RTS)
     // If ret is ThreadBlocked, and this Task is bound to the TSO that
     // blocked, we are in limbo - the TSO is now owned by whatever it
@@ -852,6 +859,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                } else {
                    debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
+
+        postEvent (cap, EVENT_MIGRATE_THREAD, t->id, free_caps[i]->no);
+
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    t->cap = free_caps[i];
                    i++;
@@ -873,6 +883,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    spark = tryStealSpark(cap->sparks);
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
+
+      postEvent(free_caps[i], EVENT_STEAL_SPARK, t->id, cap->no);
+
                        newSpark(&(free_caps[i]->r), spark);
                    }
                }
@@ -1268,7 +1281,7 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
               "--<< thread %ld (%s) stopped: HeapOverflow",
               (long)t->id, whatNext_strs[t->what_next]);
 
-    if (cap->context_switch) {
+    if (cap->r.rHpLim == NULL || cap->context_switch) {
         // Sometimes we miss a context switch, e.g. when calling
         // primitives in a tight loop, MAYBE_GC() doesn't check the
         // context switch flag, and we end up waiting for a GC.
@@ -1560,6 +1573,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     
     if (gc_type == PENDING_GC_SEQ)
     {
+        postEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
         // single-threaded GC: grab all the capabilities
         for (i=0; i < n_capabilities; i++) {
             debugTrace(DEBUG_sched, "ready_to_gc, grabbing all the capabilies (%d/%d)", i, n_capabilities);
@@ -1582,6 +1596,7 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     {
         // multi-threaded GC: make sure all the Capabilities donate one
         // GC thread each.
+        postEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
         debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
 
         waitForGcThreads(cap);
@@ -1607,6 +1622,7 @@ delete_threads_and_gc:
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
 #if defined(THREADED_RTS)
+    postEvent(cap, EVENT_GC_START, 0, 0);
     debugTrace(DEBUG_sched, "doing GC");
     // reset waiting_for_gc *before* GC, so that when the GC threads
     // emerge they don't immediately re-enter the GC.
@@ -1615,6 +1631,7 @@ delete_threads_and_gc:
 #else
     GarbageCollect(force_major || heap_census, 0, cap);
 #endif
+    postEvent(cap, EVENT_GC_END, 0, 0);
 
     if (recent_activity == ACTIVITY_INACTIVE && force_major)
     {
@@ -1930,6 +1947,7 @@ suspendThread (StgRegTable *reg)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
+  postEvent(cap, EVENT_STOP_THREAD, tso->id, THREAD_SUSPENDED_FOREIGN_CALL);
   debugTrace(DEBUG_sched, 
             "thread %lu did a safe foreign call", 
             (unsigned long)cap->r.rCurrentTSO->id);
@@ -2001,6 +2019,8 @@ resumeThread (void *task_)
     tso = task->suspended_tso;
     task->suspended_tso = NULL;
     tso->_link = END_TSO_QUEUE; // no write barrier reqd
+
+    postEvent(cap, EVENT_RUN_THREAD, tso->id, 0);
     debugTrace(DEBUG_sched, "thread %lu: re-entering RTS", (unsigned long)tso->id);
     
     if (tso->why_blocked == BlockedOnCCall) {
@@ -2057,6 +2077,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
+        postEvent (cap, EVENT_MIGRATE_THREAD, tso->id, capabilities[cpu].no);
        wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
     }
 #else
@@ -2109,6 +2130,10 @@ workerStart(Task *task)
     cap = task->cap;
     RELEASE_LOCK(&task->lock);
 
+    if (RtsFlags.ParFlags.setAffinity) {
+        setThreadAffinity(cap->no, n_capabilities);
+    }
+
     // set the thread-local pointer to the Task:
     taskEnter(task);
 
@@ -2196,8 +2221,6 @@ initScheduler(void)
   }
 #endif
 
-  trace(TRACE_sched, "start: %d capabilities", n_capabilities);
-
   RELEASE_LOCK(&sched_mutex);
 }
 
@@ -2212,9 +2235,7 @@ exitScheduler(
 {
     Task *task = NULL;
 
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
 
     // If we haven't killed all the threads yet, do it now.
     if (sched_state < SCHED_SHUTTING_DOWN) {
@@ -2278,9 +2299,7 @@ performGC_(rtsBool force_major)
     // We must grab a new Task here, because the existing Task may be
     // associated with a particular Capability, and chained onto the 
     // suspended_ccalling_tasks queue.
-    ACQUIRE_LOCK(&sched_mutex);
     task = newBoundTask();
-    RELEASE_LOCK(&sched_mutex);
 
     waitForReturnCapability(&task->cap,task);
     scheduleDoGC(task->cap,task,force_major);
@@ -2417,9 +2436,16 @@ threadStackUnderflow (Task *task STG_UNUSED, StgTSO *tso)
 
     tso_size_w = tso_sizeW(tso);
 
-    if (tso_size_w < MBLOCK_SIZE_W || 
+    if (tso_size_w < MBLOCK_SIZE_W ||
+          // TSO is less than 2 mblocks (since the first mblock is
+          // shorter than MBLOCK_SIZE_W)
+        (tso_size_w - BLOCKS_PER_MBLOCK*BLOCK_SIZE_W) % MBLOCK_SIZE_W != 0 ||
+          // or TSO is not a whole number of megablocks (ensuring
+          // precondition of splitLargeBlock() below)
         (nat)(tso->stack + tso->stack_size - tso->sp) > tso->stack_size / 4) 
+          // or stack is using more than 1/4 of the available space
     {
+        // then do nothing
         return tso;
     }
 
@@ -2526,6 +2552,10 @@ checkBlackHoles (Capability *cap)
     prev = &blackhole_queue;
     t = blackhole_queue;
     while (t != END_TSO_QUEUE) {
+        if (t->what_next == ThreadRelocated) {
+            t = t->_link;
+            continue;
+        }
        ASSERT(t->why_blocked == BlockedOnBlackHole);
        type = get_itbl(UNTAG_CLOSURE(t->block_info.closure))->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {