GC refactoring, remove "steps"
[ghc-hetmet.git] / rts / sm / MarkWeak.c
index 96b4f67..9d8e8c0 100644 (file)
  *
  * ---------------------------------------------------------------------------*/
 
+#include "PosixSource.h"
 #include "Rts.h"
-#include "Storage.h"
+
 #include "MarkWeak.h"
 #include "GC.h"
 #include "GCThread.h"
 #include "Evac.h"
 #include "Trace.h"
 #include "Schedule.h"
+#include "Weak.h"
+#include "Storage.h"
 
 /* -----------------------------------------------------------------------------
    Weak Pointers
@@ -80,6 +83,9 @@ StgTSO *resurrected_threads;
 // List of blocked threads found to have pending throwTos
 StgTSO *exception_threads;
 
+static void resurrectUnreachableThreads (generation *gen);
+static rtsBool tidyThreadList (generation *gen);
+
 void
 initWeakForGC(void)
 {
@@ -107,7 +113,7 @@ traverseWeakPtrList(void)
       /* doesn't matter where we evacuate values/finalizers to, since
        * these pointers are treated as roots (iff the keys are alive).
        */
-      gct->evac_step = 0;
+      gct->evac_gen = 0;
       
       last_w = &old_weak_ptr_list;
       for (w = old_weak_ptr_list; w != NULL; w = next_w) {
@@ -180,83 +186,23 @@ traverseWeakPtrList(void)
       return rtsTrue;
 
   case WeakThreads:
-      /* Now deal with the all_threads list, which behaves somewhat like
+      /* Now deal with the step->threads lists, which behave somewhat like
        * the weak ptr list.  If we discover any threads that are about to
        * become garbage, we wake them up and administer an exception.
        */
-     {
-          StgTSO *t, *tmp, *next, **prev;
-          nat g, s;
-          step *stp;
+  {
+      nat g;
          
-          // Traverse thread lists for generations we collected...
-          for (g = 0; g <= N; g++) {
-              for (s = 0; s < generations[g].n_steps; s++) {
-                  stp = &generations[g].steps[s];
-
-                  prev = &stp->old_threads;
-
-                  for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
-             
-                      tmp = (StgTSO *)isAlive((StgClosure *)t);
-             
-                      if (tmp != NULL) {
-                          t = tmp;
-                      }
-
-                      ASSERT(get_itbl(t)->type == TSO);
-                      switch (t->what_next) {
-                      case ThreadRelocated:
-                          next = t->_link;
-                          *prev = next;
-                          continue;
-                      case ThreadKilled:
-                      case ThreadComplete:
-                          // finshed or died.  The thread might still
-                          // be alive, but we don't keep it on the
-                          // all_threads list.  Don't forget to
-                          // stub out its global_link field.
-                          next = t->global_link;
-                          t->global_link = END_TSO_QUEUE;
-                          *prev = next;
-                          continue;
-                      default:
-                          ;
-                      }
-             
-                      if (tmp == NULL) {
-                          // not alive (yet): leave this thread on the
-                          // old_all_threads list.
-                          prev = &(t->global_link);
-                          next = t->global_link;
-                      } 
-                      else {
-                          // alive
-                          next = t->global_link;
-                          *prev = next;
-
-                          // This is a good place to check for blocked
-                          // exceptions.  It might be the case that a thread is
-                          // blocked on delivering an exception to a thread that
-                          // is also blocked - we try to ensure that this
-                          // doesn't happen in throwTo(), but it's too hard (or
-                          // impossible) to close all the race holes, so we
-                          // accept that some might get through and deal with
-                          // them here.  A GC will always happen at some point,
-                          // even if the system is otherwise deadlocked.
-                          if (t->blocked_exceptions != END_TSO_QUEUE) {
-                              t->global_link = exception_threads;
-                              exception_threads = t;
-                          } else {
-                              // move this thread onto the correct threads list.
-                              step *new_step;
-                              new_step = Bdescr((P_)t)->step;
-                              t->global_link = new_step->threads;
-                              new_step->threads  = t;
-                          }
-                      }
-                  }
-              }
+      // Traverse thread lists for generations we collected...
+//      ToDo when we have one gen per capability:
+//      for (n = 0; n < n_capabilities; n++) {
+//          if (tidyThreadList(&nurseries[n])) {
+//              flag = rtsTrue;
+//          }
+//      }              
+      for (g = 0; g <= N; g++) {
+          if (tidyThreadList(&generations[g])) {
+              flag = rtsTrue;
           }
       }
 
@@ -267,25 +213,12 @@ traverseWeakPtrList(void)
       /* And resurrect any threads which were about to become garbage.
        */
       {
-          nat g, s;
-          step *stp;
-         StgTSO *t, *tmp, *next;
-
+          nat g;
           for (g = 0; g <= N; g++) {
-              for (s = 0; s < generations[g].n_steps; s++) {
-                  stp = &generations[g].steps[s];
-
-                  for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
-                      next = t->global_link;
-                      tmp = t;
-                      evacuate((StgClosure **)&tmp);
-                      tmp->global_link = resurrected_threads;
-                      resurrected_threads = tmp;
-                  }
-              }
+              resurrectUnreachableThreads(&generations[g]);
           }
       }
-      
+        
       /* Finally, we can update the blackhole_queue.  This queue
        * simply strings together TSOs blocked on black holes, it is
        * not intended to keep anything alive.  Hence, we do not follow
@@ -300,15 +233,113 @@ traverseWeakPtrList(void)
              ASSERT(*pt != NULL);
          }
       }
-
+      
       weak_stage = WeakDone;  // *now* we're done,
       return rtsTrue;         // but one more round of scavenging, please
-
+  }
+      
   default:
       barf("traverse_weak_ptr_list");
       return rtsTrue;
   }
+}
+  
+  static void resurrectUnreachableThreads (generation *gen)
+{
+    StgTSO *t, *tmp, *next;
+
+    for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
+        next = t->global_link;
+        
+        // ThreadFinished and ThreadComplete: we have to keep
+        // these on the all_threads list until they
+        // become garbage, because they might get
+        // pending exceptions.
+        switch (t->what_next) {
+        case ThreadKilled:
+        case ThreadComplete:
+            continue;
+        default:
+            tmp = t;
+            evacuate((StgClosure **)&tmp);
+            tmp->global_link = resurrected_threads;
+            resurrected_threads = tmp;
+        }
+    }
+}
+
+static rtsBool tidyThreadList (generation *gen)
+{
+    StgTSO *t, *tmp, *next, **prev;
+    rtsBool flag = rtsFalse;
 
+    prev = &gen->old_threads;
+
+    for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
+             
+        tmp = (StgTSO *)isAlive((StgClosure *)t);
+       
+        if (tmp != NULL) {
+            t = tmp;
+        }
+        
+        ASSERT(get_itbl(t)->type == TSO);
+        if (t->what_next == ThreadRelocated) {
+            next = t->_link;
+            *prev = next;
+            continue;
+        }
+        
+        next = t->global_link;
+        
+        // This is a good place to check for blocked
+        // exceptions.  It might be the case that a thread is
+        // blocked on delivering an exception to a thread that
+        // is also blocked - we try to ensure that this
+        // doesn't happen in throwTo(), but it's too hard (or
+        // impossible) to close all the race holes, so we
+        // accept that some might get through and deal with
+        // them here.  A GC will always happen at some point,
+        // even if the system is otherwise deadlocked.
+        //
+        // If an unreachable thread has blocked
+        // exceptions, we really want to perform the
+        // blocked exceptions rather than throwing
+        // BlockedIndefinitely exceptions.  This is the
+        // only place we can discover such threads.
+        // The target thread might even be
+        // ThreadFinished or ThreadKilled.  Bugs here
+        // will only be seen when running on a
+        // multiprocessor.
+        if (t->blocked_exceptions != END_TSO_QUEUE) {
+            if (tmp == NULL) {
+                evacuate((StgClosure **)&t);
+                flag = rtsTrue;
+            }
+            t->global_link = exception_threads;
+            exception_threads = t;
+            *prev = next;
+            continue;
+        }
+        
+        if (tmp == NULL) {
+            // not alive (yet): leave this thread on the
+            // old_all_threads list.
+            prev = &(t->global_link);
+        } 
+        else {
+            // alive
+            *prev = next;
+            
+            // move this thread onto the correct threads list.
+            generation *new_gen;
+            new_gen = Bdescr((P_)t)->gen;
+            t->global_link = new_gen->threads;
+            new_gen->threads  = t;
+        }
+    }
+
+    return flag;
 }
 
 /* -----------------------------------------------------------------------------
@@ -341,7 +372,11 @@ traverseBlackholeQueue (void)
                 }
             }
             evacuate((StgClosure **)&t);
-            if (prev) prev->_link = t;
+            if (prev) {
+                prev->_link = t;
+            } else {
+                blackhole_queue = t;
+            }
                  // no write barrier when on the blackhole queue,
                  // because we traverse the whole queue on every GC.
             flag = rtsTrue;