Two things to do with -dsuppress-uniques
[ghc-hetmet.git] / rts / Schedule.c
index 7b57c0d..66af8be 100644 (file)
@@ -463,12 +463,16 @@ run_thread:
         if (prev == ACTIVITY_DONE_GC) {
             startTimer();
         }
-    } else {
+    } else if (recent_activity != ACTIVITY_INACTIVE) {
+        // If we reached ACTIVITY_INACTIVE, then don't reset it until
+        // we've done the GC.  The thread running here might just be
+        // the IO manager thread that handle_tick() woke up via
+        // wakeUpRts().
         recent_activity = ACTIVITY_YES;
     }
 #endif
 
-    traceSchedEvent(cap, EVENT_RUN_THREAD, t, 0);
+    traceEventRunThread(cap, t);
 
     switch (prev_what_next) {
        
@@ -518,7 +522,7 @@ run_thread:
     t->saved_winerror = GetLastError();
 #endif
 
-    traceSchedEvent (cap, EVENT_STOP_THREAD, t, ret);
+    traceEventStopThread(cap, t, ret);
 
 #if defined(THREADED_RTS)
     // If ret is ThreadBlocked, and this Task is bound to the TSO that
@@ -705,7 +709,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
 
-    // migration can be turned off with +RTS -qg
+    // migration can be turned off with +RTS -qm
     if (!RtsFlags.ParFlags.migrate) return;
 
     // Check whether we have more threads on our run queue, or sparks
@@ -775,10 +779,9 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    setTSOLink(cap, prev, t);
                    prev = t;
                } else {
-                   debugTrace(DEBUG_sched, "pushing thread %lu to capability %d", (unsigned long)t->id, free_caps[i]->no);
                    appendToRunQueue(free_caps[i],t);
 
-                    traceSchedEvent (cap, EVENT_MIGRATE_THREAD, t, free_caps[i]->no);
+            traceEventMigrateThread (cap, t, free_caps[i]->no);
 
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    t->cap = free_caps[i];
@@ -802,7 +805,7 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
                    if (spark != NULL) {
                        debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
 
-      traceSchedEvent(free_caps[i], EVENT_STEAL_SPARK, t, cap->no);
+            traceEventStealSpark(free_caps[i], t, cap->no);
 
                        newSpark(&(free_caps[i]->r), spark);
                    }
@@ -1185,36 +1188,32 @@ scheduleHandleStackOverflow (Capability *cap, Task *task, StgTSO *t)
 static rtsBool
 scheduleHandleYield( Capability *cap, StgTSO *t, nat prev_what_next )
 {
-    // Reset the context switch flag.  We don't do this just before
-    // running the thread, because that would mean we would lose ticks
-    // during GC, which can lead to unfair scheduling (a thread hogs
-    // the CPU because the tick always arrives during GC).  This way
-    // penalises threads that do a lot of allocation, but that seems
-    // better than the alternative.
-    cap->context_switch = 0;
-    
     /* put the thread back on the run queue.  Then, if we're ready to
      * GC, check whether this is the last task to stop.  If so, wake
      * up the GC thread.  getThread will block during a GC until the
      * GC is finished.
      */
-#ifdef DEBUG
-    if (t->what_next != prev_what_next) {
-       debugTrace(DEBUG_sched,
-                  "--<< thread %ld (%s) stopped to switch evaluators", 
-                  (long)t->id, what_next_strs[t->what_next]);
-    }
-#endif
-    
+
     ASSERT(t->_link == END_TSO_QUEUE);
     
     // Shortcut if we're just switching evaluators: don't bother
     // doing stack squeezing (which can be expensive), just run the
     // thread.
-    if (t->what_next != prev_what_next) {
+    if (cap->context_switch == 0 && t->what_next != prev_what_next) {
+       debugTrace(DEBUG_sched,
+                  "--<< thread %ld (%s) stopped to switch evaluators", 
+                  (long)t->id, what_next_strs[t->what_next]);
        return rtsTrue;
     }
 
+    // Reset the context switch flag.  We don't do this just before
+    // running the thread, because that would mean we would lose ticks
+    // during GC, which can lead to unfair scheduling (a thread hogs
+    // the CPU because the tick always arrives during GC).  This way
+    // penalises threads that do a lot of allocation, but that seems
+    // better than the alternative.
+    cap->context_switch = 0;
+    
     IF_DEBUG(sanity,
             //debugBelch("&& Doing sanity check on yielding TSO %ld.", t->id);
             checkTSO(t));
@@ -1328,6 +1327,17 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
 #ifdef DEBUG
          removeThreadLabel((StgWord)task->tso->id);
 #endif
+
+          // We no longer consider this thread and task to be bound to
+          // each other.  The TSO lives on until it is GC'd, but the
+          // task is about to be released by the caller, and we don't
+          // want anyone following the pointer from the TSO to the
+          // defunct task (which might have already been
+          // re-used). This was a real bug: the GC updated
+          // tso->bound->tso which lead to a deadlock.
+          t->bound = NULL;
+          task->tso = NULL;
+
          return rtsTrue; // tells schedule() to return
       }
 
@@ -1418,11 +1428,11 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
     
     if (gc_type == PENDING_GC_SEQ)
     {
-        traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+        traceEventRequestSeqGc(cap);
     }
     else
     {
-        traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+        traceEventRequestParGc(cap);
         debugTrace(DEBUG_sched, "ready_to_gc, grabbing GC threads");
     }
 
@@ -1478,8 +1488,8 @@ delete_threads_and_gc:
     
     heap_census = scheduleNeedHeapProfile(rtsTrue);
 
+    traceEventGcStart(cap);
 #if defined(THREADED_RTS)
-    traceSchedEvent(cap, EVENT_GC_START, 0, 0);
     // reset waiting_for_gc *before* GC, so that when the GC threads
     // emerge they don't immediately re-enter the GC.
     waiting_for_gc = 0;
@@ -1487,7 +1497,7 @@ delete_threads_and_gc:
 #else
     GarbageCollect(force_major || heap_census, 0, cap);
 #endif
-    traceSchedEvent(cap, EVENT_GC_END, 0, 0);
+    traceEventGcEnd(cap);
 
     if (recent_activity == ACTIVITY_INACTIVE && force_major)
     {
@@ -1806,7 +1816,7 @@ suspendThread (StgRegTable *reg)
   task = cap->running_task;
   tso = cap->r.rCurrentTSO;
 
-  traceSchedEvent(cap, EVENT_STOP_THREAD, tso, THREAD_SUSPENDED_FOREIGN_CALL);
+  traceEventStopThread(cap, tso, THREAD_SUSPENDED_FOREIGN_CALL);
 
   // XXX this might not be necessary --SDM
   tso->what_next = ThreadRunGHC;
@@ -1869,7 +1879,7 @@ resumeThread (void *task_)
     task->suspended_tso = NULL;
     tso->_link = END_TSO_QUEUE; // no write barrier reqd
 
-    traceSchedEvent(cap, EVENT_RUN_THREAD, tso, tso->what_next);
+    traceEventRunThread(cap, tso);
     
     if (tso->why_blocked == BlockedOnCCall) {
         // avoid locking the TSO if we don't have to
@@ -1925,7 +1935,7 @@ scheduleThreadOn(Capability *cap, StgWord cpu USED_IF_THREADS, StgTSO *tso)
     if (cpu == cap->no) {
        appendToRunQueue(cap,tso);
     } else {
-        traceSchedEvent (cap, EVENT_MIGRATE_THREAD, tso, capabilities[cpu].no);
+        traceEventMigrateThread (cap, tso, capabilities[cpu].no);
        wakeupThreadOnCapability(cap, &capabilities[cpu], tso);
     }
 #else
@@ -1937,6 +1947,7 @@ Capability *
 scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 {
     Task *task;
+    StgThreadID id;
 
     // We already created/initialised the Task
     task = cap->running_task;
@@ -1952,14 +1963,15 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
 
     appendToRunQueue(cap,tso);
 
-    debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)tso->id);
+    id = tso->id;
+    debugTrace(DEBUG_sched, "new bound thread (%lu)", (unsigned long)id);
 
     cap = schedule(cap,task);
 
     ASSERT(task->stat != NoStatus);
     ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
-    debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)task->tso->id);
+    debugTrace(DEBUG_sched, "bound thread (%lu) finished", (unsigned long)id);
     return cap;
 }
 
@@ -2089,7 +2101,8 @@ exitScheduler(
     if (sched_state < SCHED_SHUTTING_DOWN) {
        sched_state = SCHED_INTERRUPTING;
         waitForReturnCapability(&task->cap,task);
-       scheduleDoGC(task->cap,task,rtsFalse);    
+       scheduleDoGC(task->cap,task,rtsFalse);
+        ASSERT(task->tso == NULL);
         releaseCapability(task->cap);
     }
     sched_state = SCHED_SHUTTING_DOWN;
@@ -2099,6 +2112,7 @@ exitScheduler(
        nat i;
        
        for (i = 0; i < n_capabilities; i++) {
+            ASSERT(task->tso == NULL);
            shutdownCapability(&capabilities[i], task, wait_foreign);
        }
     }
@@ -2201,6 +2215,7 @@ threadStackOverflow(Capability *cap, StgTSO *tso)
       //
 
       if (tso->flags & TSO_SQUEEZED) {
+          unlockTSO(tso);
           return tso;
       }
       // #3677: In a stack overflow situation, stack squeezing may
@@ -2545,7 +2560,8 @@ raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception)
                SET_HDR(raise_closure, &stg_raise_info, CCCS);
                raise_closure->payload[0] = exception;
            }
-           UPD_IND(((StgUpdateFrame *)p)->updatee,(StgClosure *)raise_closure);
+           UPD_IND(cap, ((StgUpdateFrame *)p)->updatee,
+                    (StgClosure *)raise_closure);
            p = next;
            continue;