[project @ 2002-01-24 02:06:48 by sof]
[ghc-hetmet.git] / ghc / rts / Schedule.c
index 256aab9..86a725f 100644 (file)
@@ -1,5 +1,5 @@
 /* ---------------------------------------------------------------------------
- * $Id: Schedule.c,v 1.107 2001/11/22 14:25:12 simonmar Exp $
+ * $Id: Schedule.c,v 1.112 2002/01/24 02:06:48 sof Exp $
  *
  * (c) The GHC Team, 1998-2000
  *
@@ -98,8 +98,6 @@
 #ifdef PROFILING
 #include "Proftimer.h"
 #include "ProfHeap.h"
-#include "RetainerProfile.h"
-#include "LdvProfile.h"
 #endif
 #if defined(GRAN) || defined(PAR)
 # include "GranSimRts.h"
@@ -308,7 +306,7 @@ char *threadReturnCode_strs[] = {
 };
 #endif
 
-#ifdef PAR
+#if defined(PAR)
 StgTSO * createSparkThread(rtsSpark spark);
 StgTSO * activateSpark (rtsSpark spark);  
 #endif
@@ -496,8 +494,11 @@ schedule( void )
     /* Top up the run queue from our spark pool.  We try to make the
      * number of threads in the run queue equal to the number of
      * free capabilities.
+     *
+     * Disable spark support in SMP for now, non-essential & requires
+     * a little bit of work to make it compile cleanly. -- sof 1/02.
      */
-#if defined(SMP)
+#if 0 /* defined(SMP) */
     {
       nat n = n_free_capabilities;
       StgTSO *tso = run_queue_hd;
@@ -581,26 +582,46 @@ schedule( void )
        if (blocked_queue_hd == END_TSO_QUEUE
            && run_queue_hd == END_TSO_QUEUE
            && sleeping_queue == END_TSO_QUEUE) {
+
            IF_DEBUG(scheduler, sched_belch("still deadlocked, checking for black holes..."));
            detectBlackHoles();
+
+           // No black holes, so probably a real deadlock.  Send the
+           // current main thread the Deadlock exception (or in the SMP
+           // build, send *all* main threads the deadlock exception,
+           // since none of them can make progress).
            if (run_queue_hd == END_TSO_QUEUE) {
-               StgMainThread *m = main_threads;
+               StgMainThread *m;
 #ifdef SMP
-               for (; m != NULL; m = m->link) {
-                   deleteThread(m->tso);
-                   m->ret = NULL;
-                   m->stat = Deadlock;
-                   pthread_cond_broadcast(&m->wakeup);
+               for (m = main_threads; m != NULL; m = m->link) {
+                   switch (m->tso->why_blocked) {
+                   case BlockedOnBlackHole:
+                       raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
+                       break;
+                   case BlockedOnException:
+                   case BlockedOnMVar:
+                       raiseAsync(m->tso, (StgClosure *)Deadlock_closure);
+                       break;
+                   default:
+                       barf("deadlock: main thread blocked in a strange way");
+                   }
                }
-               main_threads = NULL;
 #else
-               deleteThread(m->tso);
-               m->ret = NULL;
-               m->stat = Deadlock;
-               main_threads = m->link;
-               return;
+               m = main_threads;
+               switch (m->tso->why_blocked) {
+               case BlockedOnBlackHole:
+                   raiseAsync(m->tso, (StgClosure *)NonTermination_closure);
+                   break;
+               case BlockedOnException:
+               case BlockedOnMVar:
+                   raiseAsync(m->tso, (StgClosure *)Deadlock_closure);
+                   break;
+               default:
+                   barf("deadlock: main thread blocked in a strange way");
+               }
 #endif
            }
+           ASSERT( run_queue_hd != END_TSO_QUEUE );
        }
     }
 #elif defined(PAR)
@@ -1278,32 +1299,8 @@ schedule( void )
 
 #ifdef PROFILING
     if (RtsFlags.ProfFlags.profileInterval==0 || performHeapProfile) {
-        if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { 
-           //
-           // Note: currently retainer profiling is performed after
-           // a major garbage collection.
-           //
-           GarbageCollect(GetRoots, rtsTrue);
-           retainerProfile();
-       } else if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV) {
-           //
-           // We have LdvCensus() preceded by a major garbage
-           // collection because we don't want *genuinely* dead
-           // closures to be involved in LDV profiling. Another good
-           // reason is to produce consistent profiling results
-           // regardless of the interval at which GCs are performed.
-           // In other words, we want LDV profiling results to be
-           // completely independent of the GC interval.
-           //
-           GarbageCollect(GetRoots, rtsTrue);
-           LdvCensus();
-       } else {
-           //
-           // Normal creator-based heap profile
-           //
-           GarbageCollect(GetRoots, rtsTrue);
-           heapCensus();
-       }
+       GarbageCollect(GetRoots, rtsTrue);
+       heapCensus();
        performHeapProfile = rtsFalse;
        ready_to_gc = rtsFalse; // we already GC'd
     }
@@ -1573,7 +1570,7 @@ createThread_(nat size, rtsBool have_lock)
   stack_size = size - TSO_STRUCT_SIZEW;
 
   tso = (StgTSO *)allocate(size);
-  TICK_ALLOC_TSO(size-TSO_STRUCT_SIZEW, 0);
+  TICK_ALLOC_TSO(stack_size, 0);
 
   SET_HDR(tso, &stg_TSO_info, CCS_SYSTEM);
 #if defined(GRAN)
@@ -1778,11 +1775,6 @@ activateSpark (rtsSpark spark)
 void
 scheduleThread(StgTSO *tso)
 {
-  if (tso==END_TSO_QUEUE){    
-    schedule();
-    return;
-  }
-
   ACQUIRE_LOCK(&sched_mutex);
 
   /* Put the new thread on the head of the runnable queue.  The caller
@@ -1811,7 +1803,7 @@ scheduleThread(StgTSO *tso)
 void
 taskStart(void) /*  ( void *arg STG_UNUSED)  */
 {
-  scheduleThread(END_TSO_QUEUE);
+  schedule();
 }
 #endif
 
@@ -2267,7 +2259,7 @@ GetRoots(evac_fn evac)
       evac((StgClosure **)&suspended_ccalling_threads);
   }
 
-#if defined(SMP) || defined(PAR) || defined(GRAN)
+#if defined(PAR) || defined(GRAN)
   markSparkQueue(evac);
 #endif
 }
@@ -2357,7 +2349,7 @@ threadStackOverflow(StgTSO *tso)
   IF_DEBUG(scheduler, fprintf(stderr,"== scheduler: increasing stack size from %d words to %d.\n", tso->stack_size, new_stack_size));
 
   dest = (StgTSO *)allocate(new_tso_size);
-  TICK_ALLOC_TSO(new_tso_size-sizeofW(StgTSO),0);
+  TICK_ALLOC_TSO(new_stack_size,0);
 
   /* copy the TSO block and the old stack into the new area */
   memcpy(dest,tso,TSO_STRUCT_SIZE);