fix a warning
[ghc-hetmet.git] / ghc / rts / Schedule.c
index 84df020..d72b459 100644 (file)
 # define STATIC_INLINE static
 #endif
 
-#ifdef THREADED_RTS
-#define USED_WHEN_THREADED_RTS
-#define USED_WHEN_NON_THREADED_RTS STG_UNUSED
-#else
-#define USED_WHEN_THREADED_RTS     STG_UNUSED
-#define USED_WHEN_NON_THREADED_RTS
-#endif
-
-#ifdef SMP
-#define USED_WHEN_SMP
-#else
-#define USED_WHEN_SMP STG_UNUSED
-#endif
-
 /* -----------------------------------------------------------------------------
  * Global variables
  * -------------------------------------------------------------------------- */
@@ -387,7 +373,7 @@ schedule (Capability *initialCapability, Task *task)
          // thread for a bit, even if there are others banging at the
          // door.
          first = rtsFalse;
-         ASSERT_CAPABILITY_INVARIANTS(cap,task);
+         ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
       } else {
          // Yield the capability to higher-priority tasks if necessary.
          yieldCapability(&cap, task);
@@ -578,6 +564,8 @@ run_thread:
     errno = t->saved_errno;
     cap->in_haskell = rtsTrue;
 
+    dirtyTSO(t);
+
     recent_activity = ACTIVITY_YES;
 
     switch (prev_what_next) {
@@ -633,13 +621,13 @@ run_thread:
     // immediately and return to normaility.
     if (ret == ThreadBlocked) {
        IF_DEBUG(scheduler,
-                debugBelch("--<< thread %d (%s) stopped: blocked\n",
-                           t->id, whatNext_strs[t->what_next]));
+                sched_belch("--<< thread %d (%s) stopped: blocked\n",
+                            t->id, whatNext_strs[t->what_next]));
        continue;
     }
 #endif
 
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     // ----------------------------------------------------------------------
     
@@ -681,7 +669,7 @@ run_thread:
 
     case ThreadFinished:
        if (scheduleHandleThreadFinished(cap, task, t)) return cap;
-       ASSERT_CAPABILITY_INVARIANTS(cap,task);
+       ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
        break;
 
     default:
@@ -730,8 +718,8 @@ schedulePreLoop(void)
 
 #ifdef SMP
 static void
-schedulePushWork(Capability *cap USED_WHEN_SMP, 
-                Task *task      USED_WHEN_SMP)
+schedulePushWork(Capability *cap USED_IF_SMP, 
+                Task *task      USED_IF_SMP)
 {
     Capability *free_caps[n_capabilities], *cap0;
     nat i, n_free_caps;
@@ -795,6 +783,7 @@ schedulePushWork(Capability *cap USED_WHEN_SMP,
                    prev->link = t;
                    prev = t;
                } else {
+                   IF_DEBUG(scheduler, sched_belch("pushing thread %d to capability %d", t->id, free_caps[i]->no));
                    appendToRunQueue(free_caps[i],t);
                    if (t->bound) { t->bound->cap = free_caps[i]; }
                    i++;
@@ -853,7 +842,7 @@ scheduleStartSignalHandlers(Capability *cap STG_UNUSED)
  * ------------------------------------------------------------------------- */
 
 static void
-scheduleCheckBlockedThreads(Capability *cap USED_WHEN_NON_THREADED_RTS)
+scheduleCheckBlockedThreads(Capability *cap USED_IF_NOT_THREADS)
 {
 #if !defined(THREADED_RTS)
     //
@@ -1881,7 +1870,7 @@ scheduleDoHeapProfile( rtsBool ready_to_gc STG_UNUSED )
  * -------------------------------------------------------------------------- */
 
 static void
-scheduleDoGC( Capability *cap, Task *task USED_WHEN_SMP, rtsBool force_major )
+scheduleDoGC( Capability *cap, Task *task USED_IF_SMP, rtsBool force_major )
 {
     StgTSO *t;
 #ifdef SMP
@@ -2021,7 +2010,7 @@ rtsSupportsBoundThreads(void)
  * ------------------------------------------------------------------------- */
  
 StgBool
-isThreadBound(StgTSO* tso USED_WHEN_THREADED_RTS)
+isThreadBound(StgTSO* tso USED_IF_THREADS)
 {
 #if defined(THREADED_RTS)
   return (tso->bound != NULL);
@@ -2087,9 +2076,13 @@ forkProcess(HsStablePtr *entry
        }
        RELEASE_LOCK(&sched_mutex);
 
+       cap->suspended_ccalling_tasks = NULL;
+
 #if defined(THREADED_RTS)
        // wipe our spare workers list.
        cap->spare_workers = NULL;
+       cap->returning_tasks_hd = NULL;
+       cap->returning_tasks_tl = NULL;
 #endif
 
        cap = rts_evalStableIO(cap, entry, NULL);  // run the action
@@ -2257,6 +2250,9 @@ resumeThread (void *task_)
     cap->in_haskell = rtsTrue;
     errno = saved_errno;
 
+    /* We might have GC'd, mark the TSO dirty again */
+    dirtyTSO(tso);
+
     return &cap->r;
 }
 
@@ -2370,6 +2366,7 @@ createThread(Capability *cap, nat size)
 
     tso->why_blocked  = NotBlocked;
     tso->blocked_exceptions = NULL;
+    tso->flags = TSO_DIRTY;
     
     tso->saved_errno = 0;
     tso->bound = NULL;
@@ -2599,7 +2596,7 @@ scheduleWaitThread (StgTSO* tso, /*[out]*/HaskellObj* ret, Capability *cap)
     cap = schedule(cap,task);
 
     ASSERT(task->stat != NoStatus);
-    ASSERT_CAPABILITY_INVARIANTS(cap,task);
+    ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
 
     IF_DEBUG(scheduler, sched_belch("bound thread (%d) finished", task->tso->id));
     return cap;
@@ -3661,6 +3658,9 @@ raiseAsync_(Capability *cap, StgTSO *tso, StgClosure *exception,
     // Remove it from any blocking queues
     unblockThread(cap,tso);
 
+    // mark it dirty; we're about to change its stack.
+    dirtyTSO(tso);
+
     sp = tso->sp;
     
     // The stack freezing code assumes there's a closure pointer on