Allow work units smaller than a block to improve load balancing
[ghc-hetmet.git] / rts / sm / GC.c
index 09e2b2c..a361175 100644 (file)
@@ -11,7 +11,7 @@
  *
  * ---------------------------------------------------------------------------*/
 
-#include "PosixSource.h"
+// #include "PosixSource.h"
 #include "Rts.h"
 #include "RtsFlags.h"
 #include "RtsUtils.h"
@@ -51,6 +51,7 @@
 #include "Sparks.h"
 
 #include <string.h> // for memset()
+#include <unistd.h>
 
 /* -----------------------------------------------------------------------------
    Global variables
  * We build up a static object list while collecting generations 0..N,
  * which is then appended to the static object list of generation N+1.
  */
-StgClosure* static_objects;      // live static objects
-StgClosure* scavenged_static_objects;   // static objects scavenged so far
-#ifdef THREADED_RTS
-SpinLock static_objects_sync;
-#endif
 
 /* N is the oldest generation being collected, where the generations
  * are numbered starting at 0.  A major GC (indicated by the major_gc
@@ -118,7 +114,7 @@ nat mutlist_MUTVARS,
 
 /* Thread-local data for each GC thread
  */
-gc_thread *gc_threads = NULL;
+gc_thread **gc_threads = NULL;
 // gc_thread *gct = NULL;  // this thread's gct TODO: make thread-local
 
 // Number of threads running in *this* GC.  Affects how many
@@ -138,7 +134,7 @@ SpinLock recordMutableGen_sync;
 
 static void mark_root               (StgClosure **root);
 static void zero_static_object_list (StgClosure* first_static);
-static void initialise_N            (rtsBool force_major_gc);
+static nat  initialise_N            (rtsBool force_major_gc);
 static void alloc_gc_threads        (void);
 static void init_collected_gen      (nat g, nat threads);
 static void init_uncollected_gen    (nat g, nat threads);
@@ -151,6 +147,7 @@ static void gc_thread_work          (void);
 static nat  inc_running             (void);
 static nat  dec_running             (void);
 static void wakeup_gc_threads       (nat n_threads);
+static void shutdown_gc_threads     (nat n_threads);
 
 #if 0 && defined(DEBUG)
 static void gcCAFs                  (void);
@@ -187,7 +184,7 @@ GarbageCollect ( rtsBool force_major_gc )
   lnat live, allocated;
   lnat oldgen_saved_blocks = 0;
   gc_thread *saved_gct;
-  nat g, s, t;
+  nat g, s, t, n;
 
   // necessary if we stole a callee-saves register for gct:
   saved_gct = gct;
@@ -232,7 +229,7 @@ GarbageCollect ( rtsBool force_major_gc )
 
   /* Figure out which generation to collect
    */
-  initialise_N(force_major_gc);
+  n = initialise_N(force_major_gc);
 
   /* Allocate + initialise the gc_thread structures.
    */
@@ -246,11 +243,13 @@ GarbageCollect ( rtsBool force_major_gc )
    * We don't try to parallelise minor GC.
    */
 #if defined(THREADED_RTS)
-  if (N == 0) {
+  if (n < (4*1024*1024 / BLOCK_SIZE)) {
       n_gc_threads = 1;
   } else {
       n_gc_threads = RtsFlags.ParFlags.gcThreads;
   }
+  trace(TRACE_gc|DEBUG_gc, "GC (gen %d): %dKB to collect, using %d thread(s)",
+        N, n * (BLOCK_SIZE / 1024), n_gc_threads);
 #else
   n_gc_threads = 1;
 #endif
@@ -269,10 +268,10 @@ GarbageCollect ( rtsBool force_major_gc )
   // check stack sanity *before* GC (ToDo: check all threads) 
   IF_DEBUG(sanity, checkFreeListSanity());
 
-  /* Initialise the static object lists
-   */
-  static_objects = END_OF_STATIC_LIST;
-  scavenged_static_objects = END_OF_STATIC_LIST;
+  // Initialise all our gc_thread structures
+  for (t = 0; t < n_gc_threads; t++) {
+      init_gc_thread(gc_threads[t]);
+  }
 
   // Initialise all the generations/steps that we're collecting.
   for (g = 0; g <= N; g++) {
@@ -295,21 +294,8 @@ GarbageCollect ( rtsBool force_major_gc )
       mark_stack_bdescr = NULL;
   }
 
-  // Initialise all our gc_thread structures
-  for (t = 0; t < n_gc_threads; t++) {
-      init_gc_thread(&gc_threads[t]);
-  }
-
-  // the main thread is running: this prevents any other threads from
-  // exiting prematurely, so we can start them now.
-  inc_running();
-  wakeup_gc_threads(n_gc_threads);
-
-  // Initialise stats
-  copied = 0;
-
   // this is the main thread
-  gct = &gc_threads[0];
+  gct = gc_threads[0];
 
   /* -----------------------------------------------------------------------
    * follow all the roots that we know about:
@@ -319,15 +305,21 @@ GarbageCollect ( rtsBool force_major_gc )
    * Also do them in reverse generation order, for the usual reason:
    * namely to reduce the likelihood of spurious old->new pointers.
    */
-  { 
-    for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+  for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
       generations[g].saved_mut_list = generations[g].mut_list;
       generations[g].mut_list = allocBlock(); 
-        // mut_list always has at least one block.
-    }
-    for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+      // mut_list always has at least one block.
+  }
+
+  // the main thread is running: this prevents any other threads from
+  // exiting prematurely, so we can start them now.
+  // NB. do this after the mutable lists have been saved above, otherwise
+  // the other GC threads will be writing into the old mutable lists.
+  inc_running();
+  wakeup_gc_threads(n_gc_threads);
+
+  for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
       scavenge_mutable_list(&generations[g]);
-    }
   }
 
   // follow roots from the CAF list (used by GHCi)
@@ -378,6 +370,8 @@ GarbageCollect ( rtsBool force_major_gc )
       break;
   }
 
+  shutdown_gc_threads(n_gc_threads);
+
   // Update pointers from the Task list
   update_task_list();
 
@@ -421,32 +415,41 @@ GarbageCollect ( rtsBool force_major_gc )
       bdescr *prev;
 
       for (t = 0; t < n_gc_threads; t++) {
-         thr = &gc_threads[t];
-
-         for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
-             for (s = 0; s < generations[g].n_steps; s++) {
-                 ws = &thr->steps[g][s];
-                 if (g==0 && s==0) continue;
-
-                 // Not true?
-                 // ASSERT( ws->scan_bd == ws->todo_bd );
-                 ASSERT( ws->scan_bd ? ws->scan == ws->scan_bd->free : 1 );
-
-                 // Push the final block
-                 if (ws->scan_bd) { push_scan_block(ws->scan_bd, ws); }
-
-                 ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
-
-                 prev = ws->scavd_list;
-                 for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
-                     bd->flags &= ~BF_EVACUATED;        // now from-space 
-                     prev = bd;
-                 }
-                 prev->link = ws->stp->blocks;
-                 ws->stp->blocks = ws->scavd_list;
-                 ws->stp->n_blocks += ws->n_scavd_blocks;
-                 ASSERT(countBlocks(ws->stp->blocks) == ws->stp->n_blocks);
-             }
+         thr = gc_threads[t];
+
+          // not step 0
+          for (s = 1; s < total_steps; s++) {
+              ws = &thr->steps[s];
+              // Not true?
+              // ASSERT( ws->scan_bd == ws->todo_bd );
+              ASSERT( ws->scan_bd ? ws->scan_bd->u.scan == ws->scan_bd->free : 1 );
+
+              // Push the final block
+              if (ws->scan_bd) { push_scanned_block(ws->scan_bd, ws); }
+              
+              ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks);
+              
+              prev = ws->part_list;
+              for (bd = ws->part_list; bd != NULL; bd = bd->link) {
+                  bd->flags &= ~BF_EVACUATED;   // now from-space 
+                  prev = bd;
+              }
+              if (prev != NULL) {
+                  prev->link = ws->scavd_list;
+              }
+              for (bd = ws->scavd_list; bd != NULL; bd = bd->link) {
+                  bd->flags &= ~BF_EVACUATED;   // now from-space 
+                  prev = bd;
+              }
+              prev->link = ws->step->blocks;
+              if (ws->part_list != NULL) {
+                  ws->step->blocks = ws->part_list;
+              } else {
+                  ws->step->blocks = ws->scavd_list;
+              }
+              ws->step->n_blocks += ws->n_part_blocks;
+              ws->step->n_blocks += ws->n_scavd_blocks;
+              ASSERT(countBlocks(ws->step->blocks) == ws->step->n_blocks);
          }
       }
   }
@@ -466,6 +469,21 @@ GarbageCollect ( rtsBool force_major_gc )
 
   /* run through all the generations/steps and tidy up 
    */
+  copied = 0;
+  { 
+      nat i;
+      for (i=0; i < n_gc_threads; i++) {
+          if (n_gc_threads > 1) {
+              trace(TRACE_gc,"thread %d:", i);
+              trace(TRACE_gc,"   copied           %ld", gc_threads[i]->copied * sizeof(W_));
+              trace(TRACE_gc,"   any_work         %ld", gc_threads[i]->any_work);
+              trace(TRACE_gc,"   no_work          %ld", gc_threads[i]->no_work);
+              trace(TRACE_gc,"   scav_find_work %ld",   gc_threads[i]->scav_find_work);
+          }
+          copied += gc_threads[i]->copied;
+      }
+  }
+
   for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
 
     if (g <= N) {
@@ -619,12 +637,19 @@ GarbageCollect ( rtsBool force_major_gc )
 #ifdef PROFILING
   // resetStaticObjectForRetainerProfiling() must be called before
   // zeroing below.
-  resetStaticObjectForRetainerProfiling();
+  if (n_gc_threads > 1) {
+      barf("profiling is currently broken with multi-threaded GC");
+      // ToDo: fix the gct->scavenged_static_objects below
+  }
+  resetStaticObjectForRetainerProfiling(gct->scavenged_static_objects);
 #endif
 
   // zero the scavenged static object list 
   if (major_gc) {
-    zero_static_object_list(scavenged_static_objects);
+      nat i;
+      for (i = 0; i < n_gc_threads; i++) {
+          zero_static_object_list(gc_threads[i]->scavenged_static_objects);
+      }
   }
 
   // Reset the nursery
@@ -647,7 +672,7 @@ GarbageCollect ( rtsBool force_major_gc )
   IF_DEBUG(sanity, checkSanity());
 
   // extra GC trace info 
-  IF_DEBUG(gc, statDescribeGens());
+  if (traceClass(TRACE_gc|DEBUG_gc)) statDescribeGens();
 
 #ifdef DEBUG
   // symbol-table based profiling 
@@ -893,38 +918,59 @@ isAlive(StgClosure *p)
 
 /* -----------------------------------------------------------------------------
    Figure out which generation to collect, initialise N and major_gc.
+
+   Also returns the total number of blocks in generations that will be
+   collected.
    -------------------------------------------------------------------------- */
 
-static void
+static nat
 initialise_N (rtsBool force_major_gc)
 {
-    nat g;
+    int g;
+    nat s, blocks, blocks_total;
+
+    blocks = 0;
+    blocks_total = 0;
 
     if (force_major_gc) {
-       N = RtsFlags.GcFlags.generations - 1;
-       major_gc = rtsTrue;
+        N = RtsFlags.GcFlags.generations - 1;
     } else {
-       N = 0;
-       for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
-           if (generations[g].steps[0].n_blocks +
-               generations[g].steps[0].n_large_blocks
-               >= generations[g].max_blocks) {
-               N = g;
-           }
-       }
-       major_gc = (N == RtsFlags.GcFlags.generations-1);
+        N = 0;
+    }
+
+    for (g = RtsFlags.GcFlags.generations - 1; g >= 0; g--) {
+        blocks = 0;
+        for (s = 0; s < generations[g].n_steps; s++) {
+            blocks += generations[g].steps[s].n_blocks;
+            blocks += generations[g].steps[s].n_large_blocks;
+        }
+        if (blocks >= generations[g].max_blocks) {
+            N = stg_max(N,g);
+        }
+        if ((nat)g <= N) {
+            blocks_total += blocks;
+        }
     }
+
+    blocks_total += countNurseryBlocks();
+
+    major_gc = (N == RtsFlags.GcFlags.generations-1);
+    return blocks_total;
 }
 
 /* -----------------------------------------------------------------------------
    Initialise the gc_thread structures.
    -------------------------------------------------------------------------- */
 
-static void
-alloc_gc_thread (gc_thread *t, int n)
+static gc_thread *
+alloc_gc_thread (int n)
 {
-    nat g, s;
+    nat s;
     step_workspace *ws;
+    gc_thread *t;
+
+    t = stgMallocBytes(sizeof(gc_thread) + total_steps * sizeof(step_workspace),
+                       "alloc_gc_thread");
 
 #ifdef THREADED_RTS
     t->id = 0;
@@ -944,32 +990,26 @@ alloc_gc_thread (gc_thread *t, int n)
     t->papi_events = -1;
 #endif
 
-    t->steps = stgMallocBytes(RtsFlags.GcFlags.generations * 
-                               sizeof(step_workspace *), 
-                               "initialise_gc_thread");
-
-    for (g = 0; g < RtsFlags.GcFlags.generations; g++)
+    for (s = 0; s < total_steps; s++)
     {
-        t->steps[g] = stgMallocBytes(generations[g].n_steps * 
-                                      sizeof(step_workspace),
-                                      "initialise_gc_thread/2");
-
-        for (s = 0; s < generations[g].n_steps; s++)
-        {
-            ws = &t->steps[g][s];
-            ws->stp = &generations[g].steps[s];
-            ws->gct = t;
-
-            ws->scan_bd = NULL;
-            ws->scan = NULL;
-
-           ws->todo_bd = NULL;
-            ws->buffer_todo_bd = NULL;
-
-           ws->scavd_list = NULL;
-           ws->n_scavd_blocks = 0;
-        }
+        ws = &t->steps[s];
+        ws->step = &all_steps[s];
+        ASSERT(s == ws->step->abs_no);
+        ws->gct = t;
+        
+        ws->scan_bd = NULL;
+
+        ws->todo_bd = NULL;
+        ws->buffer_todo_bd = NULL;
+        
+        ws->part_list = NULL;
+        ws->n_part_blocks = 0;
+
+        ws->scavd_list = NULL;
+        ws->n_scavd_blocks = 0;
     }
+
+    return t;
 }
 
 
@@ -980,17 +1020,17 @@ alloc_gc_threads (void)
 #if defined(THREADED_RTS)
         nat i;
        gc_threads = stgMallocBytes (RtsFlags.ParFlags.gcThreads * 
-                                    sizeof(gc_thread), 
+                                    sizeof(gc_thread*), 
                                     "alloc_gc_threads");
 
        for (i = 0; i < RtsFlags.ParFlags.gcThreads; i++) {
-           alloc_gc_thread(&gc_threads[i], i);
+           gc_threads[i] = alloc_gc_thread(i);
        }
 #else
-       gc_threads = stgMallocBytes (sizeof(gc_thread), 
+       gc_threads = stgMallocBytes (sizeof(gc_thread*), 
                                     "alloc_gc_threads");
 
-       alloc_gc_thread(gc_threads, 0);
+       gc_threads[0] = alloc_gc_thread(0);
 #endif
     }
 }
@@ -1012,6 +1052,7 @@ inc_running (void)
     ACQUIRE_LOCK(&gc_running_mutex);
     n_running = ++gc_running_threads;
     RELEASE_LOCK(&gc_running_mutex);
+    ASSERT(n_running <= n_gc_threads);
     return n_running;
 }
 
@@ -1020,6 +1061,7 @@ dec_running (void)
 {
     nat n_running;
     ACQUIRE_LOCK(&gc_running_mutex);
+    ASSERT(n_gc_threads != 0);
     n_running = --gc_running_threads;
     RELEASE_LOCK(&gc_running_mutex);
     return n_running;
@@ -1054,6 +1096,7 @@ loop:
               gct->thread_index, r);
 
     while (gc_running_threads != 0) {
+        usleep(1);
        if (any_work()) {
            inc_running();
            goto loop;
@@ -1077,13 +1120,13 @@ gc_thread_mainloop (void)
 
        // Wait until we're told to wake up
        ACQUIRE_LOCK(&gct->wake_mutex);
+       gct->wakeup = rtsFalse;
        while (!gct->wakeup) {
            debugTrace(DEBUG_gc, "GC thread %d standing by...", 
                       gct->thread_index);
            waitCondition(&gct->wake_cond, &gct->wake_mutex);
        }
        RELEASE_LOCK(&gct->wake_mutex);
-       gct->wakeup = rtsFalse;
        if (gct->exit) break;
 
 #ifdef USE_PAPI
@@ -1130,7 +1173,7 @@ start_gc_threads (void)
        // Start from 1: the main thread is 0
        for (i = 1; i < RtsFlags.ParFlags.gcThreads; i++) {
            createOSThread(&id, (OSThreadProc*)&gc_thread_entry, 
-                          &gc_threads[i]);
+                          gc_threads[i]);
        }
        done = rtsTrue;
     }
@@ -1144,10 +1187,30 @@ wakeup_gc_threads (nat n_threads USED_IF_THREADS)
     nat i;
     for (i=1; i < n_threads; i++) {
        inc_running();
-       ACQUIRE_LOCK(&gc_threads[i].wake_mutex);
-       gc_threads[i].wakeup = rtsTrue;
-       signalCondition(&gc_threads[i].wake_cond);
-       RELEASE_LOCK(&gc_threads[i].wake_mutex);
+       ACQUIRE_LOCK(&gc_threads[i]->wake_mutex);
+       gc_threads[i]->wakeup = rtsTrue;
+       signalCondition(&gc_threads[i]->wake_cond);
+       RELEASE_LOCK(&gc_threads[i]->wake_mutex);
+    }
+#endif
+}
+
+// After GC is complete, we must wait for all GC threads to enter the
+// standby state, otherwise they may still be executing inside
+// any_work(), and may even remain awake until the next GC starts.
+static void
+shutdown_gc_threads (nat n_threads USED_IF_THREADS)
+{
+#if defined(THREADED_RTS)
+    nat i;
+    rtsBool wakeup;
+    for (i=1; i < n_threads; i++) {
+        do {
+            ACQUIRE_LOCK(&gc_threads[i]->wake_mutex);
+            wakeup = gc_threads[i]->wakeup;
+            // wakeup is false while the thread is waiting
+            RELEASE_LOCK(&gc_threads[i]->wake_mutex);
+        } while (wakeup);
     }
 #endif
 }
@@ -1194,6 +1257,7 @@ init_collected_gen (nat g, nat n_threads)
 
        // we don't have any to-be-scavenged blocks yet
        stp->todos = NULL;
+        stp->todos_last = NULL;
        stp->n_todos = 0;
 
        // initialise the large object queues.
@@ -1251,18 +1315,20 @@ init_collected_gen (nat g, nat n_threads)
            // we don't copy objects into g0s0, unless -G0
            if (g==0 && s==0 && RtsFlags.GcFlags.generations > 1) continue;
 
-           ws = &gc_threads[t].steps[g][s];
+           ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
 
            ws->scan_bd = NULL;
-           ws->scan = NULL;
 
            ws->todo_large_objects = NULL;
 
+            ws->part_list = NULL;
+            ws->n_part_blocks = 0;
+
            // allocate the first to-space block; extra blocks will be
            // chained on as necessary.
            ws->todo_bd = NULL;
            ws->buffer_todo_bd = NULL;
-           gc_alloc_todo_block(ws);
+           alloc_todo_block(ws,0);
 
            ws->scavd_list = NULL;
            ws->n_scavd_blocks = 0;
@@ -1292,12 +1358,15 @@ init_uncollected_gen (nat g, nat threads)
     for (t = 0; t < threads; t++) {
        for (s = 0; s < generations[g].n_steps; s++) {
            
-           ws = &gc_threads[t].steps[g][s];
-           stp = ws->stp;
+           ws = &gc_threads[t]->steps[g * RtsFlags.GcFlags.steps + s];
+           stp = ws->step;
            
            ws->buffer_todo_bd = NULL;
            ws->todo_large_objects = NULL;
 
+            ws->part_list = NULL;
+            ws->n_part_blocks = 0;
+
            ws->scavd_list = NULL;
            ws->n_scavd_blocks = 0;
 
@@ -1315,7 +1384,7 @@ init_uncollected_gen (nat g, nat threads)
                // this block is also the scan block; we must scan
                // from the current end point.
                ws->scan_bd = ws->todo_bd;
-               ws->scan = ws->scan_bd->free;
+               ws->scan_bd->u.scan = ws->scan_bd->free;
 
                // subtract the contents of this block from the stats,
                // because we'll count the whole block later.
@@ -1324,9 +1393,8 @@ init_uncollected_gen (nat g, nat threads)
            else
            {
                ws->scan_bd = NULL;
-               ws->scan = NULL;
                ws->todo_bd = NULL;
-               gc_alloc_todo_block(ws);
+               alloc_todo_block(ws,0);
            }
        }
     }
@@ -1351,10 +1419,17 @@ init_uncollected_gen (nat g, nat threads)
 static void
 init_gc_thread (gc_thread *t)
 {
+    t->static_objects = END_OF_STATIC_LIST;
+    t->scavenged_static_objects = END_OF_STATIC_LIST;
     t->evac_step = 0;
     t->failed_to_evac = rtsFalse;
     t->eager_promotion = rtsTrue;
     t->thunk_selector_depth = 0;
+    t->copied = 0;
+    t->any_work = 0;
+    t->no_work = 0;
+    t->scav_find_work = 0;
+
 }
 
 /* -----------------------------------------------------------------------------