Add ASSERTs to all calls of nameModule
[ghc-hetmet.git] / rts / sm / MarkWeak.c
index 455b586..78c84ed 100644 (file)
@@ -1,6 +1,6 @@
 /* -----------------------------------------------------------------------------
  *
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
  *
  * Weak pointers and weak-like things in the GC
  *
@@ -15,6 +15,7 @@
 #include "Storage.h"
 #include "MarkWeak.h"
 #include "GC.h"
+#include "GCThread.h"
 #include "Evac.h"
 #include "Trace.h"
 #include "Schedule.h"
@@ -73,10 +74,11 @@ static WeakStage weak_stage;
  */
 StgWeak *old_weak_ptr_list; // also pending finaliser list
 
-/* List of all threads during GC
- */
+// List of threads found to be unreachable
 StgTSO *resurrected_threads;
-static StgTSO *old_all_threads;
+
+// List of blocked threads found to have pending throwTos
+StgTSO *exception_threads;
 
 void
 initWeakForGC(void)
@@ -84,13 +86,8 @@ initWeakForGC(void)
     old_weak_ptr_list = weak_ptr_list;
     weak_ptr_list = NULL;
     weak_stage = WeakPtrs;
-
-    /* The all_threads list is like the weak_ptr_list.  
-     * See traverseWeakPtrList() for the details.
-     */
-    old_all_threads = all_threads;
-    all_threads = END_TSO_QUEUE;
     resurrected_threads = END_TSO_QUEUE;
+    exception_threads = END_TSO_QUEUE;
 }
 
 rtsBool 
@@ -99,6 +96,7 @@ traverseWeakPtrList(void)
   StgWeak *w, **last_w, *next_w;
   StgClosure *new;
   rtsBool flag = rtsFalse;
+  const StgInfoTable *info;
 
   switch (weak_stage) {
 
@@ -109,7 +107,7 @@ traverseWeakPtrList(void)
       /* doesn't matter where we evacuate values/finalizers to, since
        * these pointers are treated as roots (iff the keys are alive).
        */
-      evac_gen = 0;
+      gct->evac_step = 0;
       
       last_w = &old_weak_ptr_list;
       for (w = old_weak_ptr_list; w != NULL; w = next_w) {
@@ -123,12 +121,14 @@ traverseWeakPtrList(void)
              continue;
          }
          
-         switch (get_itbl(w)->type) {
-
-         case EVACUATED:
-             next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+          info = w->header.info;
+          if (IS_FORWARDING_PTR(info)) {
+             next_w = (StgWeak *)UN_FORWARDING_PTR(info);
              *last_w = next_w;
              continue;
+          }
+
+         switch (INFO_PTR_TO_STRUCT(info)->type) {
 
          case WEAK:
              /* Now, check whether the key is reachable.
@@ -137,8 +137,8 @@ traverseWeakPtrList(void)
              if (new != NULL) {
                  w->key = new;
                  // evacuate the value and finalizer 
-                 w->value = evacuate(w->value);
-                 w->finalizer = evacuate(w->finalizer);
+                 evacuate(&w->value);
+                 evacuate(&w->finalizer);
                  // remove this weak ptr from the old_weak_ptr list 
                  *last_w = w->link;
                  // and put it on the new weak ptr list 
@@ -169,7 +169,7 @@ traverseWeakPtrList(void)
        */
       if (flag == rtsFalse) {
          for (w = old_weak_ptr_list; w; w = w->link) {
-             w->finalizer = evacuate(w->finalizer);
+             evacuate(&w->finalizer);
          }
 
          // Next, move to the WeakThreads stage after fully
@@ -184,53 +184,85 @@ traverseWeakPtrList(void)
        * the weak ptr list.  If we discover any threads that are about to
        * become garbage, we wake them up and administer an exception.
        */
-      {
-         StgTSO *t, *tmp, *next, **prev;
+     {
+          StgTSO *t, *tmp, *next, **prev;
+          nat g, s;
+          step *stp;
          
-         prev = &old_all_threads;
-         for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
-             
-             tmp = (StgTSO *)isAlive((StgClosure *)t);
-             
-             if (tmp != NULL) {
-                 t = tmp;
-             }
+          // Traverse thread lists for generations we collected...
+          for (g = 0; g <= N; g++) {
+              for (s = 0; s < generations[g].n_steps; s++) {
+                  stp = &generations[g].steps[s];
+
+                  prev = &stp->old_threads;
+
+                  for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
              
-             ASSERT(get_itbl(t)->type == TSO);
-             switch (t->what_next) {
-             case ThreadRelocated:
-                 next = t->link;
-                 *prev = next;
-                 continue;
-             case ThreadKilled:
-             case ThreadComplete:
-                 // finshed or died.  The thread might still be alive, but we
-                 // don't keep it on the all_threads list.  Don't forget to
-                 // stub out its global_link field.
-                 next = t->global_link;
-                 t->global_link = END_TSO_QUEUE;
-                 *prev = next;
-                 continue;
-             default:
-                 ;
-             }
+                      tmp = (StgTSO *)isAlive((StgClosure *)t);
              
-             if (tmp == NULL) {
-                 // not alive (yet): leave this thread on the
-                 // old_all_threads list.
-                 prev = &(t->global_link);
-                 next = t->global_link;
-             } 
-             else {
-                 // alive: move this thread onto the all_threads list.
-                 next = t->global_link;
-                 t->global_link = all_threads;
-                 all_threads  = t;
-                 *prev = next;
-             }
-         }
+                      if (tmp != NULL) {
+                          t = tmp;
+                      }
+
+                      ASSERT(get_itbl(t)->type == TSO);
+                      if (t->what_next == ThreadRelocated) {
+                          next = t->_link;
+                          *prev = next;
+                          continue;
+                      }
+
+                      next = t->global_link;
+
+                      // This is a good place to check for blocked
+                      // exceptions.  It might be the case that a thread is
+                      // blocked on delivering an exception to a thread that
+                      // is also blocked - we try to ensure that this
+                      // doesn't happen in throwTo(), but it's too hard (or
+                      // impossible) to close all the race holes, so we
+                      // accept that some might get through and deal with
+                      // them here.  A GC will always happen at some point,
+                      // even if the system is otherwise deadlocked.
+                      //
+                      // If an unreachable thread has blocked
+                      // exceptions, we really want to perform the
+                      // blocked exceptions rather than throwing
+                      // BlockedIndefinitely exceptions.  This is the
+                      // only place we can discover such threads.
+                      // The target thread might even be
+                      // ThreadFinished or ThreadKilled.  Bugs here
+                      // will only be seen when running on a
+                      // multiprocessor.
+                      if (t->blocked_exceptions != END_TSO_QUEUE) {
+                          if (tmp == NULL) {
+                              evacuate((StgClosure **)&t);
+                              flag = rtsTrue;
+                          }
+                          t->global_link = exception_threads;
+                          exception_threads = t;
+                          *prev = next;
+                          continue;
+                      }
+
+                      if (tmp == NULL) {
+                          // not alive (yet): leave this thread on the
+                          // old_all_threads list.
+                          prev = &(t->global_link);
+                      } 
+                      else {
+                          // alive
+                          *prev = next;
+
+                          // move this thread onto the correct threads list.
+                          step *new_step;
+                          new_step = Bdescr((P_)t)->step;
+                          t->global_link = new_step->threads;
+                          new_step->threads  = t;
+                      }
+                  }
+              }
+          }
       }
-      
+
       /* If we evacuated any threads, we need to go back to the scavenger.
        */
       if (flag) return rtsTrue;
@@ -238,13 +270,34 @@ traverseWeakPtrList(void)
       /* And resurrect any threads which were about to become garbage.
        */
       {
+          nat g, s;
+          step *stp;
          StgTSO *t, *tmp, *next;
-         for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
-             next = t->global_link;
-             tmp = (StgTSO *)evacuate((StgClosure *)t);
-             tmp->global_link = resurrected_threads;
-             resurrected_threads = tmp;
-         }
+
+          for (g = 0; g <= N; g++) {
+              for (s = 0; s < generations[g].n_steps; s++) {
+                  stp = &generations[g].steps[s];
+
+                  for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
+                      next = t->global_link;
+
+                      // ThreadFinished and ThreadComplete: we have to keep
+                      // these on the all_threads list until they
+                      // become garbage, because they might get
+                      // pending exceptions.
+                      switch (t->what_next) {
+                      case ThreadKilled:
+                      case ThreadComplete:
+                          continue;
+                      default:
+                          tmp = t;
+                          evacuate((StgClosure **)&tmp);
+                          tmp->global_link = resurrected_threads;
+                          resurrected_threads = tmp;
+                      }
+                  }
+              }
+          }
       }
       
       /* Finally, we can update the blackhole_queue.  This queue
@@ -256,7 +309,7 @@ traverseWeakPtrList(void)
        */
       { 
          StgTSO **pt;
-         for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+         for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
              *pt = (StgTSO *)isAlive((StgClosure *)*pt);
              ASSERT(*pt != NULL);
          }
@@ -289,7 +342,7 @@ traverseBlackholeQueue (void)
     flag = rtsFalse;
     prev = NULL;
 
-    for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
+    for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
         // if the thread is not yet alive...
        if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
             // if the closure it is blocked on is either (a) a
@@ -301,8 +354,14 @@ traverseBlackholeQueue (void)
                     continue;
                 }
             }
-            t = (StgTSO *)evacuate((StgClosure *)t);
-            if (prev) prev->link = t;
+            evacuate((StgClosure **)&t);
+            if (prev) {
+                prev->_link = t;
+            } else {
+                blackhole_queue = t;
+            }
+                 // no write barrier when on the blackhole queue,
+                 // because we traverse the whole queue on every GC.
             flag = rtsTrue;
        }
     }
@@ -324,14 +383,16 @@ traverseBlackholeQueue (void)
 void
 markWeakPtrList ( void )
 {
-  StgWeak *w, **last_w;
+  StgWeak *w, **last_w, *tmp;
 
   last_w = &weak_ptr_list;
   for (w = weak_ptr_list; w; w = w->link) {
       // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
-      ASSERT(w->header.info == &stg_DEAD_WEAK_info 
-            || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
-      w = (StgWeak *)evacuate((StgClosure *)w);
+      ASSERT(IS_FORWARDING_PTR(w->header.info)
+             || w->header.info == &stg_DEAD_WEAK_info 
+            || get_itbl(w)->type == WEAK);
+      tmp = w;
+      evacuate((StgClosure **)&tmp);
       *last_w = w;
       last_w = &(w->link);
   }