A small GC optimisation
[ghc-hetmet.git] / rts / sm / MarkWeak.c
index 49134da..f4b576a 100644 (file)
@@ -1,6 +1,6 @@
 /* -----------------------------------------------------------------------------
  *
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
  *
  * Weak pointers and weak-like things in the GC
  *
  *
  * ---------------------------------------------------------------------------*/
 
+#include "PosixSource.h"
 #include "Rts.h"
-#include "Storage.h"
+
 #include "MarkWeak.h"
 #include "GC.h"
+#include "GCThread.h"
 #include "Evac.h"
 #include "Trace.h"
 #include "Schedule.h"
+#include "Weak.h"
+#include "Storage.h"
+#include "Threads.h"
 
 /* -----------------------------------------------------------------------------
    Weak Pointers
@@ -73,10 +78,11 @@ static WeakStage weak_stage;
  */
 StgWeak *old_weak_ptr_list; // also pending finaliser list
 
-/* List of all threads during GC
- */
+// List of threads found to be unreachable
 StgTSO *resurrected_threads;
-static StgTSO *old_all_threads;
+
+static void resurrectUnreachableThreads (generation *gen);
+static rtsBool tidyThreadList (generation *gen);
 
 void
 initWeakForGC(void)
@@ -84,12 +90,6 @@ initWeakForGC(void)
     old_weak_ptr_list = weak_ptr_list;
     weak_ptr_list = NULL;
     weak_stage = WeakPtrs;
-
-    /* The all_threads list is like the weak_ptr_list.  
-     * See traverseWeakPtrList() for the details.
-     */
-    old_all_threads = all_threads;
-    all_threads = END_TSO_QUEUE;
     resurrected_threads = END_TSO_QUEUE;
 }
 
@@ -99,6 +99,7 @@ traverseWeakPtrList(void)
   StgWeak *w, **last_w, *next_w;
   StgClosure *new;
   rtsBool flag = rtsFalse;
+  const StgInfoTable *info;
 
   switch (weak_stage) {
 
@@ -109,7 +110,7 @@ traverseWeakPtrList(void)
       /* doesn't matter where we evacuate values/finalizers to, since
        * these pointers are treated as roots (iff the keys are alive).
        */
-      evac_gen = 0;
+      gct->evac_gen_no = 0;
       
       last_w = &old_weak_ptr_list;
       for (w = old_weak_ptr_list; w != NULL; w = next_w) {
@@ -123,12 +124,8 @@ traverseWeakPtrList(void)
              continue;
          }
          
-         switch (get_itbl(w)->type) {
-
-         case EVACUATED:
-             next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
-             *last_w = next_w;
-             continue;
+          info = get_itbl(w);
+         switch (info->type) {
 
          case WEAK:
              /* Now, check whether the key is reachable.
@@ -137,8 +134,8 @@ traverseWeakPtrList(void)
              if (new != NULL) {
                  w->key = new;
                  // evacuate the value and finalizer 
-                 w->value = evacuate(w->value);
-                 w->finalizer = evacuate(w->finalizer);
+                 evacuate(&w->value);
+                 evacuate(&w->finalizer);
                  // remove this weak ptr from the old_weak_ptr list 
                  *last_w = w->link;
                  // and put it on the new weak ptr list 
@@ -169,7 +166,7 @@ traverseWeakPtrList(void)
        */
       if (flag == rtsFalse) {
          for (w = old_weak_ptr_list; w; w = w->link) {
-             w->finalizer = evacuate(w->finalizer);
+             evacuate(&w->finalizer);
          }
 
          // Next, move to the WeakThreads stage after fully
@@ -180,57 +177,26 @@ traverseWeakPtrList(void)
       return rtsTrue;
 
   case WeakThreads:
-      /* Now deal with the all_threads list, which behaves somewhat like
+      /* Now deal with the step->threads lists, which behave somewhat like
        * the weak ptr list.  If we discover any threads that are about to
        * become garbage, we wake them up and administer an exception.
        */
-      {
-         StgTSO *t, *tmp, *next, **prev;
+  {
+      nat g;
          
-         prev = &old_all_threads;
-         for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
-             
-             tmp = (StgTSO *)isAlive((StgClosure *)t);
-             
-             if (tmp != NULL) {
-                 t = tmp;
-             }
-             
-             ASSERT(get_itbl(t)->type == TSO);
-             switch (t->what_next) {
-             case ThreadRelocated:
-                 next = t->link;
-                 *prev = next;
-                 continue;
-             case ThreadKilled:
-             case ThreadComplete:
-                 // finshed or died.  The thread might still be alive, but we
-                 // don't keep it on the all_threads list.  Don't forget to
-                 // stub out its global_link field.
-                 next = t->global_link;
-                 t->global_link = END_TSO_QUEUE;
-                 *prev = next;
-                 continue;
-             default:
-                 ;
-             }
-             
-             if (tmp == NULL) {
-                 // not alive (yet): leave this thread on the
-                 // old_all_threads list.
-                 prev = &(t->global_link);
-                 next = t->global_link;
-             } 
-             else {
-                 // alive: move this thread onto the all_threads list.
-                 next = t->global_link;
-                 t->global_link = all_threads;
-                 all_threads  = t;
-                 *prev = next;
-             }
-         }
+      // Traverse thread lists for generations we collected...
+//      ToDo when we have one gen per capability:
+//      for (n = 0; n < n_capabilities; n++) {
+//          if (tidyThreadList(&nurseries[n])) {
+//              flag = rtsTrue;
+//          }
+//      }              
+      for (g = 0; g <= N; g++) {
+          if (tidyThreadList(&generations[g])) {
+              flag = rtsTrue;
+          }
       }
-      
+
       /* If we evacuated any threads, we need to go back to the scavenger.
        */
       if (flag) return rtsTrue;
@@ -238,78 +204,99 @@ traverseWeakPtrList(void)
       /* And resurrect any threads which were about to become garbage.
        */
       {
-         StgTSO *t, *tmp, *next;
-         for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
-             next = t->global_link;
-             tmp = (StgTSO *)evacuate((StgClosure *)t);
-             tmp->global_link = resurrected_threads;
-             resurrected_threads = tmp;
-         }
-      }
-      
-      /* Finally, we can update the blackhole_queue.  This queue
-       * simply strings together TSOs blocked on black holes, it is
-       * not intended to keep anything alive.  Hence, we do not follow
-       * pointers on the blackhole_queue until now, when we have
-       * determined which TSOs are otherwise reachable.  We know at
-       * this point that all TSOs have been evacuated, however.
-       */
-      { 
-         StgTSO **pt;
-         for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
-             *pt = (StgTSO *)isAlive((StgClosure *)*pt);
-             ASSERT(*pt != NULL);
-         }
+          nat g;
+          for (g = 0; g <= N; g++) {
+              resurrectUnreachableThreads(&generations[g]);
+          }
       }
-
+        
       weak_stage = WeakDone;  // *now* we're done,
       return rtsTrue;         // but one more round of scavenging, please
-
+  }
+      
   default:
       barf("traverse_weak_ptr_list");
       return rtsTrue;
   }
-
+}
+  
+  static void resurrectUnreachableThreads (generation *gen)
+{
+    StgTSO *t, *tmp, *next;
+
+    for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
+        next = t->global_link;
+        
+        // ThreadFinished and ThreadComplete: we have to keep
+        // these on the all_threads list until they
+        // become garbage, because they might get
+        // pending exceptions.
+        switch (t->what_next) {
+        case ThreadKilled:
+        case ThreadComplete:
+            continue;
+        default:
+            tmp = t;
+            evacuate((StgClosure **)&tmp);
+            tmp->global_link = resurrected_threads;
+            resurrected_threads = tmp;
+        }
+    }
 }
 
-/* -----------------------------------------------------------------------------
-   The blackhole queue
-   
-   Threads on this list behave like weak pointers during the normal
-   phase of garbage collection: if the blackhole is reachable, then
-   the thread is reachable too.
-   -------------------------------------------------------------------------- */
-rtsBool
-traverseBlackholeQueue (void)
+static rtsBool tidyThreadList (generation *gen)
 {
-    StgTSO *prev, *t, *tmp;
-    rtsBool flag;
-
-    flag = rtsFalse;
-    prev = NULL;
-
-    for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
-       if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
-           if (isAlive(t->block_info.closure)) {
-               t = (StgTSO *)evacuate((StgClosure *)t);
-               if (prev) prev->link = t;
-               flag = rtsTrue;
-           }
-       }
+    StgTSO *t, *tmp, *next, **prev;
+    rtsBool flag = rtsFalse;
+
+    prev = &gen->old_threads;
+
+    for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) {
+             
+        tmp = (StgTSO *)isAlive((StgClosure *)t);
+       
+        if (tmp != NULL) {
+            t = tmp;
+        }
+        
+        ASSERT(get_itbl(t)->type == TSO);
+        next = t->global_link;
+        
+        // if the thread is not masking exceptions but there are
+        // pending exceptions on its queue, then something has gone
+        // wrong.  However, pending exceptions are OK if there is an
+        // FFI call.
+        ASSERT(t->blocked_exceptions == END_BLOCKED_EXCEPTIONS_QUEUE
+               || t->why_blocked == BlockedOnCCall
+               || t->why_blocked == BlockedOnCCall_Interruptible
+               || (t->flags & TSO_BLOCKEX));
+        
+        if (tmp == NULL) {
+            // not alive (yet): leave this thread on the
+            // old_all_threads list.
+            prev = &(t->global_link);
+        } 
+        else {
+            // alive
+            *prev = next;
+            
+            // move this thread onto the correct threads list.
+            generation *new_gen;
+            new_gen = Bdescr((P_)t)->gen;
+            t->global_link = new_gen->threads;
+            new_gen->threads  = t;
+        }
     }
+
     return flag;
 }
 
 /* -----------------------------------------------------------------------------
-   After GC, the live weak pointer list may have forwarding pointers
-   on it, because a weak pointer object was evacuated after being
-   moved to the live weak pointer list.  We remove those forwarding
-   pointers here.
-
-   Also, we don't consider weak pointer objects to be reachable, but
-   we must nevertheless consider them to be "live" and retain them.
-   Therefore any weak pointer objects which haven't as yet been
-   evacuated need to be evacuated now.
+   Evacuate every weak pointer object on the weak_ptr_list, and update
+   the link fields.
+
+   ToDo: with a lot of weak pointers, this will be expensive.  We
+   should have a per-GC weak pointer list, just like threads.
    -------------------------------------------------------------------------- */
 
 void
@@ -320,11 +307,25 @@ markWeakPtrList ( void )
   last_w = &weak_ptr_list;
   for (w = weak_ptr_list; w; w = w->link) {
       // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
-      ASSERT(w->header.info == &stg_DEAD_WEAK_info 
-            || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
-      w = (StgWeak *)evacuate((StgClosure *)w);
-      *last_w = w;
-      last_w = &(w->link);
+
+#ifdef DEBUG
+      {   // careful to do this assertion only reading the info ptr
+          // once, because during parallel GC it might change under our feet.
+          const StgInfoTable *info;
+          info = w->header.info;
+          ASSERT(IS_FORWARDING_PTR(info)
+                 || info == &stg_DEAD_WEAK_info 
+                 || INFO_PTR_TO_STRUCT(info)->type == WEAK);
+      }
+#endif
+
+      evacuate((StgClosure **)last_w);
+      w = *last_w;
+      if (w->header.info == &stg_DEAD_WEAK_info) {
+          last_w = &(((StgDeadWeak*)w)->link);
+      } else {
+          last_w = &(w->link);
+      }
   }
 }