[project @ 2005-10-20 11:45:19 by simonmar]
[ghc-hetmet.git] / ghc / rts / Schedule.c
index a496c3d..b78f9d2 100644 (file)
@@ -1486,7 +1486,9 @@ scheduleHandleHeapOverflow( Capability *cap, StgTSO *t )
            cap->r.rNursery->n_blocks == 1) {  // paranoia to prevent infinite loop
                                               // if the nursery has only one block.
            
+           ACQUIRE_SM_LOCK
            bd = allocGroup( blocks );
+           RELEASE_SM_LOCK
            cap->r.rNursery->n_blocks += blocks;
            
            // link the new group into the list
@@ -2311,12 +2313,9 @@ StgTSO *
 createThread(nat size)
 #endif
 {
-
     StgTSO *tso;
     nat stack_size;
 
-    ACQUIRE_LOCK(&sched_mutex);
-
     /* First check whether we should create a thread at all */
 #if defined(PARALLEL_HASKELL)
   /* check that no more than RtsFlags.ParFlags.maxThreads threads are created */
@@ -2324,7 +2323,6 @@ createThread(nat size)
     threadsIgnored++;
     debugBelch("{createThread}Daq ghuH: refusing to create another thread; no more than %d threads allowed (currently %d)\n",
          RtsFlags.ParFlags.maxThreads, advisory_thread_count);
-    RELEASE_LOCK(&sched_mutex);
     return END_TSO_QUEUE;
   }
   threadsCreated++;
@@ -2475,7 +2473,6 @@ createThread(nat size)
   IF_DEBUG(scheduler,sched_belch("created thread %ld, stack size = %lx words", 
                                 (long)tso->id, (long)tso->stack_size));
 #endif    
-  RELEASE_LOCK(&sched_mutex);
   return tso;
 }
 
@@ -2546,8 +2543,8 @@ activateSpark (rtsSpark spark)
  * on this thread's stack before the scheduler is invoked.
  * ------------------------------------------------------------------------ */
 
-static void
-scheduleThread_(StgTSO *tso)
+void
+scheduleThreadLocked(StgTSO *tso)
 {
   // The thread goes at the *end* of the run-queue, to avoid possible
   // starvation of any threads already on the queue.
@@ -2559,7 +2556,7 @@ void
 scheduleThread(StgTSO* tso)
 {
   ACQUIRE_LOCK(&sched_mutex);
-  scheduleThread_(tso);
+  scheduleThreadLocked(tso);
   RELEASE_LOCK(&sched_mutex);
 }
 
@@ -2700,9 +2697,62 @@ exitScheduler( void )
 {
     interrupted = rtsTrue;
     shutting_down_scheduler = rtsTrue;
+
 #if defined(RTS_SUPPORTS_THREADS)
     if (threadIsTask(osThreadId())) { taskStop(); }
     stopTaskManager();
+    //
+    // What can we do here?  There are a bunch of worker threads, it
+    // might be nice to let them exit cleanly.  There may be some main
+    // threads in the run queue; we should let them return to their
+    // callers with an Interrupted state.  We can't in general wait
+    // for all the running Tasks to stop, because some might be off in
+    // a C call that is blocked.
+    // 
+    // Letting the run queue drain is the safest thing.  That lets any
+    // main threads return that can return, and cleans up all the
+    // runnable threads.  Then we grab all the Capabilities to stop
+    // anything unexpected happening while we shut down.
+    //
+    // ToDo: this doesn't let us get the time stats from the worker
+    // tasks, because they haven't called taskStop().
+    //
+    ACQUIRE_LOCK(&sched_mutex);
+    { 
+       nat i;
+       for (i = 1000; i > 0; i--) {
+           if (EMPTY_RUN_QUEUE()) {
+               IF_DEBUG(scheduler, sched_belch("run queue is empty"));
+               break;
+           }
+           IF_DEBUG(scheduler, sched_belch("yielding"));
+           RELEASE_LOCK(&sched_mutex);
+           prodWorker();
+           yieldThread();
+           ACQUIRE_LOCK(&sched_mutex);
+       }
+    }
+
+#ifdef SMP
+    {
+       Capability *cap;
+       int n_capabilities = RtsFlags.ParFlags.nNodes; 
+       Capability *caps[n_capabilities];
+       nat i;
+
+       while (n_capabilities > 0) {
+           IF_DEBUG(scheduler, sched_belch("exitScheduler: grabbing all the capabilies (%d left)", n_capabilities));
+           waitForReturnCapability(&sched_mutex, &cap);
+           n_capabilities--;
+           caps[n_capabilities] = cap;
+       }
+    }
+#else
+    {
+       Capability *cap;
+       waitForReturnCapability(&sched_mutex, &cap);
+    }
+#endif
 #endif
 }
 
@@ -3615,6 +3665,7 @@ checkBlackHoles( void )
        ASSERT(t->why_blocked == BlockedOnBlackHole);
        type = get_itbl(t->block_info.closure)->type;
        if (type != BLACKHOLE && type != CAF_BLACKHOLE) {
+           IF_DEBUG(sanity,checkTSO(t));
            t = unblockOneLocked(t);
            *prev = t;
            any_woke_up = rtsTrue;