/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.73 2000/03/16 17:27:12 simonmar Exp $
+ * $Id: GC.c,v 1.134 2002/04/19 10:25:00 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
*
* ---------------------------------------------------------------------------*/
-//@menu
-//* Includes::
-//* STATIC OBJECT LIST::
-//* Static function declarations::
-//* Garbage Collect::
-//* Weak Pointers::
-//* Evacuation::
-//* Scavenging::
-//* Reverting CAFs::
-//* Sanity code for CAF garbage collection::
-//* Lazy black holing::
-//* Stack squeezing::
-//* Pausing a thread::
-//* Index::
-//@end menu
-
-//@node Includes, STATIC OBJECT LIST
-//@subsection Includes
-
+#include "PosixSource.h"
#include "Rts.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "StoragePriv.h"
#include "Stats.h"
#include "Schedule.h"
-#include "SchedAPI.h" /* for ReverCAFs prototype */
+#include "SchedAPI.h" // for ReverCAFs prototype
#include "Sanity.h"
-#include "GC.h"
#include "BlockAlloc.h"
+#include "MBlock.h"
#include "Main.h"
#include "ProfHeap.h"
#include "SchedAPI.h"
#include "Weak.h"
#include "StablePriv.h"
#include "Prelude.h"
+#include "ParTicky.h" // ToDo: move into Rts.h
+#include "GCCompact.h"
#if defined(GRAN) || defined(PAR)
# include "GranSimRts.h"
# include "ParallelRts.h"
# include "ParallelDebug.h"
# endif
#endif
+#include "HsFFI.h"
+#include "Linker.h"
+#if defined(RTS_GTK_FRONTPANEL)
+#include "FrontPanel.h"
+#endif
-StgCAF* enteredCAFs;
-
-//@node STATIC OBJECT LIST, Static function declarations, Includes
-//@subsection STATIC OBJECT LIST
+#include "RetainerProfile.h"
+#include "LdvProfile.h"
/* STATIC OBJECT LIST.
*
* We build up a static object list while collecting generations 0..N,
* which is then appended to the static object list of generation N+1.
*/
-StgClosure* static_objects; /* live static objects */
-StgClosure* scavenged_static_objects; /* static objects scavenged so far */
+StgClosure* static_objects; // live static objects
+StgClosure* scavenged_static_objects; // static objects scavenged so far
/* N is the oldest generation being collected, where the generations
* are numbered starting at 0. A major GC (indicated by the major_gc
/* Weak pointers
*/
-static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
-static rtsBool weak_done; /* all done for this pass */
+StgWeak *old_weak_ptr_list; // also pending finaliser list
+
+/* Which stage of processing various kinds of weak pointer are we at?
+ * (see traverse_weak_ptr_list() below for discussion).
+ */
+typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
+static WeakStage weak_stage;
/* List of all threads during GC
*/
static StgTSO *old_all_threads;
-static StgTSO *resurrected_threads;
+StgTSO *resurrected_threads;
/* Flag indicating failure to evacuate an object to the desired
* generation.
/* Old to-space (used for two-space collector only)
*/
-bdescr *old_to_space;
-
+bdescr *old_to_blocks;
/* Data used for allocation area sizing.
*/
-lnat new_blocks; /* blocks allocated during this GC */
-lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
+lnat new_blocks; // blocks allocated during this GC
+lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
-//@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
-//@subsection Static function declarations
+/* Used to avoid long recursion due to selector thunks
+ */
+lnat thunk_selector_depth = 0;
+#define MAX_THUNK_SELECTOR_DEPTH 256
/* -----------------------------------------------------------------------------
Static function declarations
-------------------------------------------------------------------------- */
+static void mark_root ( StgClosure **root );
static StgClosure * evacuate ( StgClosure *q );
static void zero_static_object_list ( StgClosure* first_static );
static void zero_mutable_list ( StgMutClosure *first );
-static void revert_dead_CAFs ( void );
static rtsBool traverse_weak_ptr_list ( void );
-static void cleanup_weak_ptr_list ( StgWeak **list );
+static void mark_weak_ptr_list ( StgWeak **list );
+static void scavenge ( step * );
+static void scavenge_mark_stack ( void );
static void scavenge_stack ( StgPtr p, StgPtr stack_end );
-static void scavenge_large ( step *step );
-static void scavenge ( step *step );
+static rtsBool scavenge_one ( StgPtr p );
+static void scavenge_large ( step * );
static void scavenge_static ( void );
static void scavenge_mutable_list ( generation *g );
static void scavenge_mut_once_list ( generation *g );
-#ifdef DEBUG
+#if 0 && defined(DEBUG)
static void gcCAFs ( void );
#endif
-//@node Garbage Collect, Weak Pointers, Static function declarations
-//@subsection Garbage Collect
+/* -----------------------------------------------------------------------------
+ inline functions etc. for dealing with the mark bitmap & stack.
+ -------------------------------------------------------------------------- */
+
+#define MARK_STACK_BLOCKS 4
+
+static bdescr *mark_stack_bdescr;
+static StgPtr *mark_stack;
+static StgPtr *mark_sp;
+static StgPtr *mark_splim;
+
+// Flag and pointers used for falling back to a linear scan when the
+// mark stack overflows.
+static rtsBool mark_stack_overflowed;
+static bdescr *oldgen_scan_bd;
+static StgPtr oldgen_scan;
+
+static inline rtsBool
+mark_stack_empty(void)
+{
+ return mark_sp == mark_stack;
+}
+
+static inline rtsBool
+mark_stack_full(void)
+{
+ return mark_sp >= mark_splim;
+}
+
+static inline void
+reset_mark_stack(void)
+{
+ mark_sp = mark_stack;
+}
+
+static inline void
+push_mark_stack(StgPtr p)
+{
+ *mark_sp++ = p;
+}
+
+static inline StgPtr
+pop_mark_stack(void)
+{
+ return *--mark_sp;
+}
/* -----------------------------------------------------------------------------
GarbageCollect
- free from-space in each step, and set from-space = to-space.
+ Locks held: sched_mutex
+
-------------------------------------------------------------------------- */
-//@cindex GarbageCollect
-void GarbageCollect(void (*get_roots)(void))
+void
+GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
{
bdescr *bd;
- step *step;
+ step *stp;
lnat live, allocated, collected = 0, copied = 0;
+ lnat oldgen_saved_blocks = 0;
nat g, s;
#ifdef PROFILING
#if defined(DEBUG) && defined(GRAN)
IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
- Now, Now))
+ Now, Now));
#endif
- /* tell the stats department that we've started a GC */
+ // tell the stats department that we've started a GC
stat_startGC();
- /* attribute any costs to CCS_GC */
+ // Init stats and print par specific (timing) info
+ PAR_TICKY_PAR_START();
+
+ // attribute any costs to CCS_GC
#ifdef PROFILING
prev_CCS = CCCS;
CCCS = CCS_GC;
#endif
- /* Approximate how much we allocated */
+ /* Approximate how much we allocated.
+ * Todo: only when generating stats?
+ */
allocated = calcAllocated();
/* Figure out which generation to collect
*/
- N = 0;
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
- N = g;
+ if (force_major_gc) {
+ N = RtsFlags.GcFlags.generations - 1;
+ major_gc = rtsTrue;
+ } else {
+ N = 0;
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ >= generations[g].max_blocks) {
+ N = g;
+ }
}
+ major_gc = (N == RtsFlags.GcFlags.generations-1);
+ }
+
+#ifdef RTS_GTK_FRONTPANEL
+ if (RtsFlags.GcFlags.frontpanel) {
+ updateFrontPanelBeforeGC(N);
}
- major_gc = (N == RtsFlags.GcFlags.generations-1);
+#endif
- /* check stack sanity *before* GC (ToDo: check all threads) */
+ // check stack sanity *before* GC (ToDo: check all threads)
#if defined(GRAN)
// ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
#endif
- IF_DEBUG(sanity, checkFreeListSanity());
+ IF_DEBUG(sanity, checkFreeListSanity());
/* Initialise the static object lists
*/
/* Save the old to-space if we're doing a two-space collection
*/
if (RtsFlags.GcFlags.generations == 1) {
- old_to_space = g0s0->to_space;
- g0s0->to_space = NULL;
+ old_to_blocks = g0s0->to_blocks;
+ g0s0->to_blocks = NULL;
}
/* Keep a count of how many new blocks we allocated during this GC
for (s = 0; s < generations[g].n_steps; s++) {
- /* generation 0, step 0 doesn't need to-space */
+ // generation 0, step 0 doesn't need to-space
if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
* as necessary.
*/
bd = allocBlock();
- step = &generations[g].steps[s];
- ASSERT(step->gen->no == g);
- ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
- bd->gen = &generations[g];
- bd->step = step;
+ stp = &generations[g].steps[s];
+ ASSERT(stp->gen_no == g);
+ ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
+ bd->gen_no = g;
+ bd->step = stp;
bd->link = NULL;
- bd->evacuated = 1; /* it's a to-space block */
- step->hp = bd->start;
- step->hpLim = step->hp + BLOCK_SIZE_W;
- step->hp_bd = bd;
- step->to_space = bd;
- step->to_blocks = 1;
- step->scan = bd->start;
- step->scan_bd = bd;
- step->new_large_objects = NULL;
- step->scavenged_large_objects = NULL;
+ bd->flags = BF_EVACUATED; // it's a to-space block
+ stp->hp = bd->start;
+ stp->hpLim = stp->hp + BLOCK_SIZE_W;
+ stp->hp_bd = bd;
+ stp->to_blocks = bd;
+ stp->n_to_blocks = 1;
+ stp->scan = bd->start;
+ stp->scan_bd = bd;
+ stp->new_large_objects = NULL;
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
new_blocks++;
- /* mark the large objects as not evacuated yet */
- for (bd = step->large_objects; bd; bd = bd->link) {
- bd->evacuated = 0;
+ // mark the large objects as not evacuated yet
+ for (bd = stp->large_objects; bd; bd = bd->link) {
+ bd->flags = BF_LARGE;
+ }
+
+ // for a compacted step, we need to allocate the bitmap
+ if (stp->is_compacted) {
+ nat bitmap_size; // in bytes
+ bdescr *bitmap_bdescr;
+ StgWord *bitmap;
+
+ bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
+
+ if (bitmap_size > 0) {
+ bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
+ / BLOCK_SIZE);
+ stp->bitmap = bitmap_bdescr;
+ bitmap = bitmap_bdescr->start;
+
+ IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
+ bitmap_size, bitmap););
+
+ // don't forget to fill it with zeros!
+ memset(bitmap, 0, bitmap_size);
+
+ // for each block in this step, point to its bitmap from the
+ // block descriptor.
+ for (bd=stp->blocks; bd != NULL; bd = bd->link) {
+ bd->u.bitmap = bitmap;
+ bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
+ }
+ }
}
}
}
*/
for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- if (step->hp_bd == NULL) {
- bd = allocBlock();
- bd->gen = &generations[g];
- bd->step = step;
- bd->link = NULL;
- bd->evacuated = 0; /* *not* a to-space block */
- step->hp = bd->start;
- step->hpLim = step->hp + BLOCK_SIZE_W;
- step->hp_bd = bd;
- step->blocks = bd;
- step->n_blocks = 1;
- new_blocks++;
+ stp = &generations[g].steps[s];
+ if (stp->hp_bd == NULL) {
+ ASSERT(stp->blocks == NULL);
+ bd = allocBlock();
+ bd->gen_no = g;
+ bd->step = stp;
+ bd->link = NULL;
+ bd->flags = 0; // *not* a to-space block or a large object
+ stp->hp = bd->start;
+ stp->hpLim = stp->hp + BLOCK_SIZE_W;
+ stp->hp_bd = bd;
+ stp->blocks = bd;
+ stp->n_blocks = 1;
+ new_blocks++;
}
/* Set the scan pointer for older generations: remember we
* still have to scavenge objects that have been promoted. */
- step->scan = step->hp;
- step->scan_bd = step->hp_bd;
- step->to_space = NULL;
- step->to_blocks = 0;
- step->new_large_objects = NULL;
- step->scavenged_large_objects = NULL;
+ stp->scan = stp->hp;
+ stp->scan_bd = stp->hp_bd;
+ stp->to_blocks = NULL;
+ stp->n_to_blocks = 0;
+ stp->new_large_objects = NULL;
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
}
}
+ /* Allocate a mark stack if we're doing a major collection.
+ */
+ if (major_gc) {
+ mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
+ mark_stack = (StgPtr *)mark_stack_bdescr->start;
+ mark_sp = mark_stack;
+ mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
+ } else {
+ mark_stack_bdescr = NULL;
+ }
+
/* -----------------------------------------------------------------------
* follow all the roots that we know about:
* - mutable lists from each generation > N
generations[g].mut_list = END_MUT_LIST;
}
- /* Do the mut-once lists first */
+ // Do the mut-once lists first
for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
IF_PAR_DEBUG(verbose,
printMutOnceList(&generations[g]));
}
}
+ /* follow roots from the CAF list (used by GHCi)
+ */
+ evac_gen = 0;
+ markCAFs(mark_root);
+
/* follow all the roots that the application knows about.
*/
evac_gen = 0;
- get_roots();
+ get_roots(mark_root);
#if defined(PAR)
/* And don't forget to mark the TSO if we got here direct from
}
*/
- /* Mark the entries in the GALA table of the parallel system */
+ // Mark the entries in the GALA table of the parallel system
markLocalGAs(major_gc);
+ // Mark all entries on the list of pending fetches
+ markPendingFetches(major_gc);
#endif
/* Mark the weak pointer list, and prepare to detect dead weak
* pointers.
*/
+ mark_weak_ptr_list(&weak_ptr_list);
old_weak_ptr_list = weak_ptr_list;
weak_ptr_list = NULL;
- weak_done = rtsFalse;
+ weak_stage = WeakPtrs;
/* The all_threads list is like the weak_ptr_list.
* See traverse_weak_ptr_list() for the details.
/* Mark the stable pointer table.
*/
- markStablePtrTable(major_gc);
+ markStablePtrTable(mark_root);
#ifdef INTERPRETER
{
loop:
flag = rtsFalse;
- /* scavenge static objects */
+ // scavenge static objects
if (major_gc && static_objects != END_OF_STATIC_LIST) {
- scavenge_static();
+ IF_DEBUG(sanity, checkStaticObjects(static_objects));
+ scavenge_static();
}
/* When scavenging the older generations: Objects may have been
* generation.
*/
- /* scavenge each step in generations 0..maxgen */
+ // scavenge each step in generations 0..maxgen
{
- int gen, st;
+ long gen;
+ int st;
+
loop2:
- for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
- for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
+ // scavenge objects in compacted generation
+ if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
+ (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ scavenge_mark_stack();
+ flag = rtsTrue;
+ }
+
+ for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
+ for (st = generations[gen].n_steps; --st >= 0; ) {
if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
- step = &generations[gen].steps[st];
+ stp = &generations[gen].steps[st];
evac_gen = gen;
- if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
- scavenge(step);
+ if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
+ scavenge(stp);
flag = rtsTrue;
goto loop2;
}
- if (step->new_large_objects != NULL) {
- scavenge_large(step);
+ if (stp->new_large_objects != NULL) {
+ scavenge_large(stp);
flag = rtsTrue;
goto loop2;
}
}
}
}
+
if (flag) { goto loop; }
- /* must be last... */
- if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
+ // must be last... invariant is that everything is fully
+ // scavenged at this point.
+ if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
goto loop;
}
}
- /* Final traversal of the weak pointer list (see comment by
- * cleanUpWeakPtrList below).
+ /* Update the pointers from the "main thread" list - these are
+ * treated as weak pointers because we want to allow a main thread
+ * to get a BlockedOnDeadMVar exception in the same way as any other
+ * thread. Note that the threads should all have been retained by
+ * GC by virtue of being on the all_threads list, we're just
+ * updating pointers here.
*/
- cleanup_weak_ptr_list(&weak_ptr_list);
+ {
+ StgMainThread *m;
+ StgTSO *tso;
+ for (m = main_threads; m != NULL; m = m->link) {
+ tso = (StgTSO *) isAlive((StgClosure *)m->tso);
+ if (tso == NULL) {
+ barf("main thread has been GC'd");
+ }
+ m->tso = tso;
+ }
+ }
- /* Now see which stable names are still alive.
- */
- gcStablePtrTable(major_gc);
+#if defined(PAR)
+ // Reconstruct the Global Address tables used in GUM
+ rebuildGAtables(major_gc);
+ IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
+#endif
- /* revert dead CAFs and update enteredCAFs list */
- revert_dead_CAFs();
-
- /* Set the maximum blocks for the oldest generation, based on twice
- * the amount of live data now, adjusted to fit the maximum heap
- * size if necessary.
- *
- * This is an approximation, since in the worst case we'll need
- * twice the amount of live data plus whatever space the other
- * generations need.
- */
- if (RtsFlags.GcFlags.generations > 1) {
- if (major_gc) {
- oldest_gen->max_blocks =
- stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
- RtsFlags.GcFlags.minOldGenSize);
- if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
- oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
- if (((int)oldest_gen->max_blocks -
- (int)oldest_gen->steps[0].to_blocks) <
- (RtsFlags.GcFlags.pcFreeHeap *
- RtsFlags.GcFlags.maxHeapSize / 200)) {
- heapOverflow();
- }
+ // Now see which stable names are still alive.
+ gcStablePtrTable();
+
+ // Tidy the end of the to-space chains
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
+ stp->hp_bd->free = stp->hp;
+ stp->hp_bd->link = NULL;
+ }
}
- }
}
+#ifdef PROFILING
+ // We call processHeapClosureForDead() on every closure destroyed during
+ // the current garbage collection, so we invoke LdvCensusForDead().
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
+ || RtsFlags.ProfFlags.bioSelector != NULL)
+ LdvCensusForDead(N);
+#endif
+
+ // NO MORE EVACUATION AFTER THIS POINT!
+ // Finally: compaction of the oldest generation.
+ if (major_gc && oldest_gen->steps[0].is_compacted) {
+ // save number of blocks for stats
+ oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
+ compact(get_roots);
+ }
+
+ IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
+
/* run through all the generations/steps and tidy up
*/
copied = new_blocks * BLOCK_SIZE_W;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
if (g <= N) {
- generations[g].collections++; /* for stats */
+ generations[g].collections++; // for stats
}
for (s = 0; s < generations[g].n_steps; s++) {
bdescr *next;
- step = &generations[g].steps[s];
+ stp = &generations[g].steps[s];
if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
- /* Tidy the end of the to-space chains */
- step->hp_bd->free = step->hp;
- step->hp_bd->link = NULL;
- /* stats information: how much we copied */
+ // stats information: how much we copied
if (g <= N) {
- copied -= step->hp_bd->start + BLOCK_SIZE_W -
- step->hp_bd->free;
+ copied -= stp->hp_bd->start + BLOCK_SIZE_W -
+ stp->hp_bd->free;
}
}
- /* for generations we collected... */
+ // for generations we collected...
if (g <= N) {
- collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
+ // rough calculation of garbage collected, for stats output
+ if (stp->is_compacted) {
+ collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
+ } else {
+ collected += stp->n_blocks * BLOCK_SIZE_W;
+ }
/* free old memory and shift to-space into from-space for all
* the collected steps (except the allocation area). These
* freed blocks will probaby be quickly recycled.
*/
if (!(g == 0 && s == 0)) {
- freeChain(step->blocks);
- step->blocks = step->to_space;
- step->n_blocks = step->to_blocks;
- step->to_space = NULL;
- step->to_blocks = 0;
- for (bd = step->blocks; bd != NULL; bd = bd->link) {
- bd->evacuated = 0; /* now from-space */
- }
+ if (stp->is_compacted) {
+ // for a compacted step, just shift the new to-space
+ // onto the front of the now-compacted existing blocks.
+ for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED; // now from-space
+ }
+ // tack the new blocks on the end of the existing blocks
+ if (stp->blocks == NULL) {
+ stp->blocks = stp->to_blocks;
+ } else {
+ for (bd = stp->blocks; bd != NULL; bd = next) {
+ next = bd->link;
+ if (next == NULL) {
+ bd->link = stp->to_blocks;
+ }
+ }
+ }
+ // add the new blocks to the block tally
+ stp->n_blocks += stp->n_to_blocks;
+ } else {
+ freeChain(stp->blocks);
+ stp->blocks = stp->to_blocks;
+ stp->n_blocks = stp->n_to_blocks;
+ for (bd = stp->blocks; bd != NULL; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED; // now from-space
+ }
+ }
+ stp->to_blocks = NULL;
+ stp->n_to_blocks = 0;
}
/* LARGE OBJECTS. The current live large objects are chained on
* collection from large_objects. Any objects left on
* large_objects list are therefore dead, so we free them here.
*/
- for (bd = step->large_objects; bd != NULL; bd = next) {
+ for (bd = stp->large_objects; bd != NULL; bd = next) {
next = bd->link;
freeGroup(bd);
bd = next;
}
- for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
- bd->evacuated = 0;
- }
- step->large_objects = step->scavenged_large_objects;
-
- /* Set the maximum blocks for this generation, interpolating
- * between the maximum size of the oldest and youngest
- * generations.
- *
- * max_blocks = oldgen_max_blocks * G
- * ----------------------
- * oldest_gen
- */
- if (g != 0) {
-#if 0
- generations[g].max_blocks = (oldest_gen->max_blocks * g)
- / (RtsFlags.GcFlags.generations-1);
-#endif
- generations[g].max_blocks = oldest_gen->max_blocks;
+
+ // update the count of blocks used by large objects
+ for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
+ bd->flags &= ~BF_EVACUATED;
}
+ stp->large_objects = stp->scavenged_large_objects;
+ stp->n_large_blocks = stp->n_scavenged_large_blocks;
- /* for older generations... */
} else {
+ // for older generations...
/* For older generations, we need to append the
* scavenged_large_object list (i.e. large objects that have been
* promoted during this GC) to the large_object list for that step.
*/
- for (bd = step->scavenged_large_objects; bd; bd = next) {
+ for (bd = stp->scavenged_large_objects; bd; bd = next) {
next = bd->link;
- bd->evacuated = 0;
- dbl_link_onto(bd, &step->large_objects);
+ bd->flags &= ~BF_EVACUATED;
+ dbl_link_onto(bd, &stp->large_objects);
}
- /* add the new blocks we promoted during this GC */
- step->n_blocks += step->to_blocks;
+ // add the new blocks we promoted during this GC
+ stp->n_blocks += stp->n_to_blocks;
+ stp->n_large_blocks += stp->n_scavenged_large_blocks;
}
}
}
-
- /* Guess the amount of live data for stats. */
+
+ /* Reset the sizes of the older generations when we do a major
+ * collection.
+ *
+ * CURRENT STRATEGY: make all generations except zero the same size.
+ * We have to stay within the maximum heap size, and leave a certain
+ * percentage of the maximum heap size available to allocate into.
+ */
+ if (major_gc && RtsFlags.GcFlags.generations > 1) {
+ nat live, size, min_alloc;
+ nat max = RtsFlags.GcFlags.maxHeapSize;
+ nat gens = RtsFlags.GcFlags.generations;
+
+ // live in the oldest generations
+ live = oldest_gen->steps[0].n_blocks +
+ oldest_gen->steps[0].n_large_blocks;
+
+ // default max size for all generations except zero
+ size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
+ RtsFlags.GcFlags.minOldGenSize);
+
+ // minimum size for generation zero
+ min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
+ RtsFlags.GcFlags.minAllocAreaSize);
+
+ // Auto-enable compaction when the residency reaches a
+ // certain percentage of the maximum heap size (default: 30%).
+ if (RtsFlags.GcFlags.generations > 1 &&
+ (RtsFlags.GcFlags.compact ||
+ (max > 0 &&
+ oldest_gen->steps[0].n_blocks >
+ (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
+ oldest_gen->steps[0].is_compacted = 1;
+// fprintf(stderr,"compaction: on\n", live);
+ } else {
+ oldest_gen->steps[0].is_compacted = 0;
+// fprintf(stderr,"compaction: off\n", live);
+ }
+
+ // if we're going to go over the maximum heap size, reduce the
+ // size of the generations accordingly. The calculation is
+ // different if compaction is turned on, because we don't need
+ // to double the space required to collect the old generation.
+ if (max != 0) {
+
+ // this test is necessary to ensure that the calculations
+ // below don't have any negative results - we're working
+ // with unsigned values here.
+ if (max < min_alloc) {
+ heapOverflow();
+ }
+
+ if (oldest_gen->steps[0].is_compacted) {
+ if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2 - 1);
+ }
+ } else {
+ if ( (size * (gens - 1) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2);
+ }
+ }
+
+ if (size < live) {
+ heapOverflow();
+ }
+ }
+
+#if 0
+ fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
+ min_alloc, size, max);
+#endif
+
+ for (g = 0; g < gens; g++) {
+ generations[g].max_blocks = size;
+ }
+ }
+
+ // Guess the amount of live data for stats.
live = calcLive();
/* Free the small objects allocated via allocate(), since this will
alloc_HpLim = NULL;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ // Start a new pinned_object_block
+ pinned_object_block = NULL;
+
+ /* Free the mark stack.
+ */
+ if (mark_stack_bdescr != NULL) {
+ freeGroup(mark_stack_bdescr);
+ }
+
+ /* Free any bitmaps.
+ */
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+ if (stp->is_compacted && stp->bitmap != NULL) {
+ freeGroup(stp->bitmap);
+ }
+ }
+ }
+
/* Two-space collector:
* Free the old to-space, and estimate the amount of live data.
*/
if (RtsFlags.GcFlags.generations == 1) {
nat blocks;
- if (old_to_space != NULL) {
- freeChain(old_to_space);
+ if (old_to_blocks != NULL) {
+ freeChain(old_to_blocks);
}
- for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
- bd->evacuated = 0; /* now from-space */
+ for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
+ bd->flags = 0; // now from-space
}
/* For a two-space collector, we need to resize the nursery. */
/* set up a new nursery. Allocate a nursery size based on a
- * function of the amount of live data (currently a factor of 2,
- * should be configurable (ToDo)). Use the blocks from the old
- * nursery if possible, freeing up any left over blocks.
+ * function of the amount of live data (by default a factor of 2)
+ * Use the blocks from the old nursery if possible, freeing up any
+ * left over blocks.
*
* If we get near the maximum heap size, then adjust our nursery
* size accordingly. If the nursery is the same size as the live
*
* A normal 2-space collector would need 4L bytes to give the same
* performance we get from 3L bytes, reducing to the same
- * performance at 2L bytes.
+ * performance at 2L bytes.
*/
- blocks = g0s0->to_blocks;
+ blocks = g0s0->n_to_blocks;
- if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
- RtsFlags.GcFlags.maxHeapSize ) {
- int adjusted_blocks; /* signed on purpose */
+ if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
+ blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
+ RtsFlags.GcFlags.maxHeapSize ) {
+ long adjusted_blocks; // signed on purpose
int pc_free;
adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
- IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
+ IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
heapOverflow();
*/
if (RtsFlags.GcFlags.heapSizeSuggestion) {
- int blocks;
- nat needed = calcNeeded(); /* approx blocks needed at next GC */
+ long blocks;
+ nat needed = calcNeeded(); // approx blocks needed at next GC
/* Guess how much will be live in generation 0 step 0 next time.
- * A good approximation is the obtained by finding the
+ * A good approximation is obtained by finding the
* percentage of g0s0 that was live at the last minor GC.
*/
if (N == 0) {
* collection for collecting all steps except g0s0.
*/
blocks =
- (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
- (100 + (int)g0s0_pcnt_kept);
+ (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
+ (100 + (long)g0s0_pcnt_kept);
- if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
+ if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
blocks = RtsFlags.GcFlags.minAllocAreaSize;
}
resizeNursery((nat)blocks);
+
+ } else {
+ // we might have added extra large blocks to the nursery, so
+ // resize back to minAllocAreaSize again.
+ resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
}
}
- /* mark the garbage collected CAFs as dead */
-#ifdef DEBUG
+ // mark the garbage collected CAFs as dead
+#if 0 && defined(DEBUG) // doesn't work at the moment
if (major_gc) { gcCAFs(); }
#endif
- /* zero the scavenged static object list */
+#ifdef PROFILING
+ // resetStaticObjectForRetainerProfiling() must be called before
+ // zeroing below.
+ resetStaticObjectForRetainerProfiling();
+#endif
+
+ // zero the scavenged static object list
if (major_gc) {
zero_static_object_list(scavenged_static_objects);
}
- /* Reset the nursery
- */
+ // Reset the nursery
resetNurseries();
-#if defined(PAR)
- /* Reconstruct the Global Address tables used in GUM */
- RebuildGAtables(major_gc);
-#endif
-
- /* start any pending finalizers */
+ RELEASE_LOCK(&sched_mutex);
+
+ // start any pending finalizers
scheduleFinalizers(old_weak_ptr_list);
- /* send exceptions to any threads which were about to die */
+ ACQUIRE_LOCK(&sched_mutex);
+
+ // send exceptions to any threads which were about to die
resurrectThreads(resurrected_threads);
+
+ // Update the stable pointer hash table.
+ updateStablePtrTable(major_gc);
- /* check sanity after GC */
- IF_DEBUG(sanity, checkSanity(N));
+ // check sanity after GC
+ IF_DEBUG(sanity, checkSanity());
- /* extra GC trace info */
- IF_DEBUG(gc, stat_describe_gens());
+ // extra GC trace info
+ IF_DEBUG(gc, statDescribeGens());
#ifdef DEBUG
- /* symbol-table based profiling */
- /* heapCensus(to_space); */ /* ToDo */
+ // symbol-table based profiling
+ /* heapCensus(to_blocks); */ /* ToDo */
#endif
- /* restore enclosing cost centre */
+ // restore enclosing cost centre
#ifdef PROFILING
- heapCensus();
CCCS = prev_CCS;
#endif
- /* check for memory leaks if sanity checking is on */
+ // check for memory leaks if sanity checking is on
IF_DEBUG(sanity, memInventory());
- /* ok, GC over: tell the stats department what happened. */
+#ifdef RTS_GTK_FRONTPANEL
+ if (RtsFlags.GcFlags.frontpanel) {
+ updateFrontPanelAfterGC( N, live );
+ }
+#endif
+
+ // ok, GC over: tell the stats department what happened.
stat_endGC(allocated, collected, live, copied, N);
+
+ //PAR_TICKY_TP();
}
-//@node Weak Pointers, Evacuation, Garbage Collect
-//@subsection Weak Pointers
/* -----------------------------------------------------------------------------
Weak Pointers
older generations than the one we're collecting. This could
probably be optimised by keeping per-generation lists of weak
pointers, but for a few weak pointers this scheme will work.
+
+ There are three distinct stages to processing weak pointers:
+
+ - weak_stage == WeakPtrs
+
+ We process all the weak pointers whos keys are alive (evacuate
+ their values and finalizers), and repeat until we can find no new
+ live keys. If no live keys are found in this pass, then we
+ evacuate the finalizers of all the dead weak pointers in order to
+ run them.
+
+ - weak_stage == WeakThreads
+
+ Now, we discover which *threads* are still alive. Pointers to
+ threads from the all_threads and main thread lists are the
+ weakest of all: a pointers from the finalizer of a dead weak
+ pointer can keep a thread alive. Any threads found to be unreachable
+ are evacuated and placed on the resurrected_threads list so we
+ can send them a signal later.
+
+ - weak_stage == WeakDone
+
+ No more evacuation is done.
+
-------------------------------------------------------------------------- */
-//@cindex traverse_weak_ptr_list
static rtsBool
traverse_weak_ptr_list(void)
StgClosure *new;
rtsBool flag = rtsFalse;
- if (weak_done) { return rtsFalse; }
-
- /* doesn't matter where we evacuate values/finalizers to, since
- * these pointers are treated as roots (iff the keys are alive).
- */
- evac_gen = 0;
-
- last_w = &old_weak_ptr_list;
- for (w = old_weak_ptr_list; w; w = next_w) {
-
- /* First, this weak pointer might have been evacuated. If so,
- * remove the forwarding pointer from the weak_ptr_list.
- */
- if (get_itbl(w)->type == EVACUATED) {
- w = (StgWeak *)((StgEvacuated *)w)->evacuee;
- *last_w = w;
- }
+ switch (weak_stage) {
- /* There might be a DEAD_WEAK on the list if finalizeWeak# was
- * called on a live weak pointer object. Just remove it.
- */
- if (w->header.info == &DEAD_WEAK_info) {
- next_w = ((StgDeadWeak *)w)->link;
- *last_w = next_w;
- continue;
- }
+ case WeakDone:
+ return rtsFalse;
- ASSERT(get_itbl(w)->type == WEAK);
+ case WeakPtrs:
+ /* doesn't matter where we evacuate values/finalizers to, since
+ * these pointers are treated as roots (iff the keys are alive).
+ */
+ evac_gen = 0;
+
+ last_w = &old_weak_ptr_list;
+ for (w = old_weak_ptr_list; w != NULL; w = next_w) {
+
+ /* There might be a DEAD_WEAK on the list if finalizeWeak# was
+ * called on a live weak pointer object. Just remove it.
+ */
+ if (w->header.info == &stg_DEAD_WEAK_info) {
+ next_w = ((StgDeadWeak *)w)->link;
+ *last_w = next_w;
+ continue;
+ }
+
+ switch (get_itbl(w)->type) {
+
+ case EVACUATED:
+ next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ *last_w = next_w;
+ continue;
+
+ case WEAK:
+ /* Now, check whether the key is reachable.
+ */
+ new = isAlive(w->key);
+ if (new != NULL) {
+ w->key = new;
+ // evacuate the value and finalizer
+ w->value = evacuate(w->value);
+ w->finalizer = evacuate(w->finalizer);
+ // remove this weak ptr from the old_weak_ptr list
+ *last_w = w->link;
+ // and put it on the new weak ptr list
+ next_w = w->link;
+ w->link = weak_ptr_list;
+ weak_ptr_list = w;
+ flag = rtsTrue;
+ IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
+ w, w->key));
+ continue;
+ }
+ else {
+ last_w = &(w->link);
+ next_w = w->link;
+ continue;
+ }
- /* Now, check whether the key is reachable.
- */
- if ((new = isAlive(w->key))) {
- w->key = new;
- /* evacuate the value and finalizer */
- w->value = evacuate(w->value);
- w->finalizer = evacuate(w->finalizer);
- /* remove this weak ptr from the old_weak_ptr list */
- *last_w = w->link;
- /* and put it on the new weak ptr list */
- next_w = w->link;
- w->link = weak_ptr_list;
- weak_ptr_list = w;
- flag = rtsTrue;
- IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
- continue;
- }
- else {
- last_w = &(w->link);
- next_w = w->link;
- continue;
- }
- }
+ default:
+ barf("traverse_weak_ptr_list: not WEAK");
+ }
+ }
+
+ /* If we didn't make any changes, then we can go round and kill all
+ * the dead weak pointers. The old_weak_ptr list is used as a list
+ * of pending finalizers later on.
+ */
+ if (flag == rtsFalse) {
+ for (w = old_weak_ptr_list; w; w = w->link) {
+ w->finalizer = evacuate(w->finalizer);
+ }
- /* Now deal with the all_threads list, which behaves somewhat like
- * the weak ptr list. If we discover any threads that are about to
- * become garbage, we wake them up and administer an exception.
- */
- {
- StgTSO *t, *tmp, *next, **prev;
+ // Next, move to the WeakThreads stage after fully
+ // scavenging the finalizers we've just evacuated.
+ weak_stage = WeakThreads;
+ }
- prev = &old_all_threads;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ return rtsTrue;
- /* Threads which have finished or died get dropped from
- * the list.
+ case WeakThreads:
+ /* Now deal with the all_threads list, which behaves somewhat like
+ * the weak ptr list. If we discover any threads that are about to
+ * become garbage, we wake them up and administer an exception.
*/
- switch (t->whatNext) {
- case ThreadKilled:
- case ThreadComplete:
- next = t->global_link;
- *prev = next;
- continue;
- default:
+ {
+ StgTSO *t, *tmp, *next, **prev;
+
+ prev = &old_all_threads;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+
+ (StgClosure *)tmp = isAlive((StgClosure *)t);
+
+ if (tmp != NULL) {
+ t = tmp;
+ }
+
+ ASSERT(get_itbl(t)->type == TSO);
+ switch (t->what_next) {
+ case ThreadRelocated:
+ next = t->link;
+ *prev = next;
+ continue;
+ case ThreadKilled:
+ case ThreadComplete:
+ // finshed or died. The thread might still be alive, but we
+ // don't keep it on the all_threads list. Don't forget to
+ // stub out its global_link field.
+ next = t->global_link;
+ t->global_link = END_TSO_QUEUE;
+ *prev = next;
+ continue;
+ default:
+ ;
+ }
+
+ if (tmp == NULL) {
+ // not alive (yet): leave this thread on the
+ // old_all_threads list.
+ prev = &(t->global_link);
+ next = t->global_link;
+ }
+ else {
+ // alive: move this thread onto the all_threads list.
+ next = t->global_link;
+ t->global_link = all_threads;
+ all_threads = t;
+ *prev = next;
+ }
+ }
}
-
- /* Threads which have already been determined to be alive are
- * moved onto the all_threads list.
+
+ /* And resurrect any threads which were about to become garbage.
*/
- (StgClosure *)tmp = isAlive((StgClosure *)t);
- if (tmp != NULL) {
- next = tmp->global_link;
- tmp->global_link = all_threads;
- all_threads = tmp;
- *prev = next;
- } else {
- prev = &(t->global_link);
- next = t->global_link;
- }
- }
- }
-
- /* If we didn't make any changes, then we can go round and kill all
- * the dead weak pointers. The old_weak_ptr list is used as a list
- * of pending finalizers later on.
- */
- if (flag == rtsFalse) {
- cleanup_weak_ptr_list(&old_weak_ptr_list);
- for (w = old_weak_ptr_list; w; w = w->link) {
- w->finalizer = evacuate(w->finalizer);
- }
-
- /* And resurrect any threads which were about to become garbage.
- */
- {
- StgTSO *t, *tmp, *next;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- (StgClosure *)tmp = evacuate((StgClosure *)t);
- tmp->global_link = resurrected_threads;
- resurrected_threads = tmp;
+ {
+ StgTSO *t, *tmp, *next;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+ (StgClosure *)tmp = evacuate((StgClosure *)t);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
}
- }
+
+ weak_stage = WeakDone; // *now* we're done,
+ return rtsTrue; // but one more round of scavenging, please
- weak_done = rtsTrue;
+ default:
+ barf("traverse_weak_ptr_list");
}
- return rtsTrue;
}
/* -----------------------------------------------------------------------------
evacuated need to be evacuated now.
-------------------------------------------------------------------------- */
-//@cindex cleanup_weak_ptr_list
static void
-cleanup_weak_ptr_list ( StgWeak **list )
+mark_weak_ptr_list ( StgWeak **list )
{
StgWeak *w, **last_w;
last_w = list;
for (w = *list; w; w = w->link) {
-
- if (get_itbl(w)->type == EVACUATED) {
- w = (StgWeak *)((StgEvacuated *)w)->evacuee;
- *last_w = w;
- }
-
- if (Bdescr((P_)w)->evacuated == 0) {
+ // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
+ ASSERT(w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
(StgClosure *)w = evacuate((StgClosure *)w);
*last_w = w;
- }
- last_w = &(w->link);
+ last_w = &(w->link);
}
}
isAlive determines whether the given closure is still alive (after
a garbage collection) or not. It returns the new address of the
closure if it is alive, or NULL otherwise.
+
+ NOTE: Use it before compaction only!
-------------------------------------------------------------------------- */
-//@cindex isAlive
StgClosure *
isAlive(StgClosure *p)
{
const StgInfoTable *info;
- nat size;
+ bdescr *bd;
while (1) {
* for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
*/
-#if 1 || !defined(PAR)
- /* ignore closures in generations that we're not collecting. */
- /* In GUM we use this routine when rebuilding GA tables; for some
- reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
- if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
- return p;
+ loop:
+ bd = Bdescr((P_)p);
+ // ignore closures in generations that we're not collecting.
+ if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
+ return p;
}
-#endif
-
+ // large objects have an evacuated flag
+ if (bd->flags & BF_LARGE) {
+ if (bd->flags & BF_EVACUATED) {
+ return p;
+ } else {
+ return NULL;
+ }
+ }
+ // check the mark bit for compacted steps
+ if (bd->step->is_compacted && is_marked((P_)p,bd)) {
+ return p;
+ }
+
switch (info->type) {
-
+
case IND:
case IND_STATIC:
case IND_PERM:
- case IND_OLDGEN: /* rely on compatible layout with StgInd */
+ case IND_OLDGEN: // rely on compatible layout with StgInd
case IND_OLDGEN_PERM:
- /* follow indirections */
+ // follow indirections
p = ((StgInd *)p)->indirectee;
continue;
-
+
case EVACUATED:
- /* alive! */
+ // alive!
return ((StgEvacuated *)p)->evacuee;
- case BCO:
- size = bco_sizeW((StgBCO*)p);
- goto large;
-
- case ARR_WORDS:
- size = arr_words_sizeW((StgArrWords *)p);
- goto large;
-
- case MUT_ARR_PTRS:
- case MUT_ARR_PTRS_FROZEN:
- size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
- goto large;
-
case TSO:
- if (((StgTSO *)p)->whatNext == ThreadRelocated) {
+ if (((StgTSO *)p)->what_next == ThreadRelocated) {
p = (StgClosure *)((StgTSO *)p)->link;
- continue;
+ goto loop;
}
-
- size = tso_sizeW((StgTSO *)p);
- large:
- if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
- && Bdescr((P_)p)->evacuated)
- return p;
- else
- return NULL;
default:
- /* dead. */
+ // dead.
return NULL;
}
}
}
-//@cindex MarkRoot
-StgClosure *
-MarkRoot(StgClosure *root)
+static void
+mark_root(StgClosure **root)
{
- return evacuate(root);
+ *root = evacuate(*root);
}
-//@cindex addBlock
-static void addBlock(step *step)
+static void
+addBlock(step *stp)
{
bdescr *bd = allocBlock();
- bd->gen = step->gen;
- bd->step = step;
+ bd->gen_no = stp->gen_no;
+ bd->step = stp;
- if (step->gen->no <= N) {
- bd->evacuated = 1;
+ if (stp->gen_no <= N) {
+ bd->flags = BF_EVACUATED;
} else {
- bd->evacuated = 0;
+ bd->flags = 0;
}
- step->hp_bd->free = step->hp;
- step->hp_bd->link = bd;
- step->hp = bd->start;
- step->hpLim = step->hp + BLOCK_SIZE_W;
- step->hp_bd = bd;
- step->to_blocks++;
+ stp->hp_bd->free = stp->hp;
+ stp->hp_bd->link = bd;
+ stp->hp = bd->start;
+ stp->hpLim = stp->hp + BLOCK_SIZE_W;
+ stp->hp_bd = bd;
+ stp->n_to_blocks++;
new_blocks++;
}
-//@cindex upd_evacuee
static __inline__ void
upd_evacuee(StgClosure *p, StgClosure *dest)
{
- p->header.info = &EVACUATED_info;
+ p->header.info = &stg_EVACUATED_info;
((StgEvacuated *)p)->evacuee = dest;
}
-//@cindex copy
static __inline__ StgClosure *
-copy(StgClosure *src, nat size, step *step)
+copy(StgClosure *src, nat size, step *stp)
{
P_ to, from, dest;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_org = size;
+#endif
TICK_GC_WORDS_COPIED(size);
/* Find out where we're going, using the handy "to" pointer in
* evacuate to an older generation, adjust it here (see comment
* by evacuate()).
*/
- if (step->gen->no < evac_gen) {
+ if (stp->gen_no < evac_gen) {
#ifdef NO_EAGER_PROMOTION
failed_to_evac = rtsTrue;
#else
- step = &generations[evac_gen].steps[0];
+ stp = &generations[evac_gen].steps[0];
#endif
}
/* chain a new block onto the to-space for the destination step if
* necessary.
*/
- if (step->hp + size >= step->hpLim) {
- addBlock(step);
+ if (stp->hp + size >= stp->hpLim) {
+ addBlock(stp);
}
- for(to = step->hp, from = (P_)src; size>0; --size) {
+ for(to = stp->hp, from = (P_)src; size>0; --size) {
*to++ = *from++;
}
- dest = step->hp;
- step->hp = to;
+ dest = stp->hp;
+ stp->hp = to;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(src, size_org);
+#endif
return (StgClosure *)dest;
}
* used to optimise evacuation of BLACKHOLEs.
*/
-//@cindex copyPart
-static __inline__ StgClosure *
-copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
+static StgClosure *
+copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
P_ dest, to, from;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_to_copy_org = size_to_copy;
+#endif
TICK_GC_WORDS_COPIED(size_to_copy);
- if (step->gen->no < evac_gen) {
+ if (stp->gen_no < evac_gen) {
#ifdef NO_EAGER_PROMOTION
failed_to_evac = rtsTrue;
#else
- step = &generations[evac_gen].steps[0];
+ stp = &generations[evac_gen].steps[0];
#endif
}
- if (step->hp + size_to_reserve >= step->hpLim) {
- addBlock(step);
+ if (stp->hp + size_to_reserve >= stp->hpLim) {
+ addBlock(stp);
}
- for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
+ for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
*to++ = *from++;
}
- dest = step->hp;
- step->hp += size_to_reserve;
+ dest = stp->hp;
+ stp->hp += size_to_reserve;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ // size_to_copy_org is wrong because the closure already occupies size_to_reserve
+ // words.
+ SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
+ // fill the slop
+ if (size_to_reserve - size_to_copy_org > 0)
+ FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+#endif
return (StgClosure *)dest;
}
-//@node Evacuation, Scavenging, Weak Pointers
-//@subsection Evacuation
/* -----------------------------------------------------------------------------
Evacuate a large object
large_alloc_list, and linking it on to the (singly-linked)
new_large_objects list, from where it will be scavenged later.
- Convention: bd->evacuated is /= 0 for a large object that has been
- evacuated, or 0 otherwise.
+ Convention: bd->flags has BF_EVACUATED set for a large object
+ that has been evacuated, or unset otherwise.
-------------------------------------------------------------------------- */
-//@cindex evacuate_large
static inline void
-evacuate_large(StgPtr p, rtsBool mutable)
+evacuate_large(StgPtr p)
{
bdescr *bd = Bdescr(p);
- step *step;
+ step *stp;
- /* should point to the beginning of the block */
- ASSERT(((W_)p & BLOCK_MASK) == 0);
-
- /* already evacuated? */
- if (bd->evacuated) {
+ // object must be at the beginning of the block (or be a ByteArray)
+ ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
+ (((W_)p & BLOCK_MASK) == 0));
+
+ // already evacuated?
+ if (bd->flags & BF_EVACUATED) {
/* Don't forget to set the failed_to_evac flag if we didn't get
* the desired destination (see comments in evacuate()).
*/
- if (bd->gen->no < evac_gen) {
+ if (bd->gen_no < evac_gen) {
failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return;
}
- step = bd->step;
- /* remove from large_object list */
- if (bd->back) {
- bd->back->link = bd->link;
- } else { /* first object in the list */
- step->large_objects = bd->link;
+ stp = bd->step;
+ // remove from large_object list
+ if (bd->u.back) {
+ bd->u.back->link = bd->link;
+ } else { // first object in the list
+ stp->large_objects = bd->link;
}
if (bd->link) {
- bd->link->back = bd->back;
+ bd->link->u.back = bd->u.back;
}
/* link it on to the evacuated large object list of the destination step
*/
- step = bd->step->to;
- if (step->gen->no < evac_gen) {
+ stp = bd->step->to;
+ if (stp->gen_no < evac_gen) {
#ifdef NO_EAGER_PROMOTION
failed_to_evac = rtsTrue;
#else
- step = &generations[evac_gen].steps[0];
+ stp = &generations[evac_gen].steps[0];
#endif
}
- bd->step = step;
- bd->gen = step->gen;
- bd->link = step->new_large_objects;
- step->new_large_objects = bd;
- bd->evacuated = 1;
-
- if (mutable) {
- recordMutable((StgMutClosure *)p);
- }
+ bd->step = stp;
+ bd->gen_no = stp->gen_no;
+ bd->link = stp->new_large_objects;
+ stp->new_large_objects = bd;
+ bd->flags |= BF_EVACUATED;
}
/* -----------------------------------------------------------------------------
the promotion until the next GC.
-------------------------------------------------------------------------- */
-//@cindex mkMutCons
static StgClosure *
mkMutCons(StgClosure *ptr, generation *gen)
{
StgMutVar *q;
- step *step;
+ step *stp;
- step = &gen->steps[0];
+ stp = &gen->steps[0];
/* chain a new block onto the to-space for the destination step if
* necessary.
*/
- if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
- addBlock(step);
+ if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
+ addBlock(stp);
}
- q = (StgMutVar *)step->hp;
- step->hp += sizeofW(StgMutVar);
+ q = (StgMutVar *)stp->hp;
+ stp->hp += sizeofW(StgMutVar);
- SET_HDR(q,&MUT_CONS_info,CCS_GC);
+ SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
q->var = ptr;
recordOldToNewPtrs((StgMutClosure *)q);
didn't manage to evacuate this object into evac_gen.
-------------------------------------------------------------------------- */
-//@cindex evacuate
static StgClosure *
evacuate(StgClosure *q)
{
StgClosure *to;
bdescr *bd = NULL;
- step *step;
+ step *stp;
const StgInfoTable *info;
loop:
if (HEAP_ALLOCED(q)) {
bd = Bdescr((P_)q);
- if (bd->gen->no > N) {
- /* Can't evacuate this object, because it's in a generation
- * older than the ones we're collecting. Let's hope that it's
- * in evac_gen or older, or we will have to make an IND_OLDGEN object.
- */
- if (bd->gen->no < evac_gen) {
- /* nope */
- failed_to_evac = rtsTrue;
- TICK_GC_FAILED_PROMOTION();
- }
- return q;
+
+ // not a group head: find the group head
+ if (bd->blocks == 0) { bd = bd->link; }
+
+ if (bd->gen_no > N) {
+ /* Can't evacuate this object, because it's in a generation
+ * older than the ones we're collecting. Let's hope that it's
+ * in evac_gen or older, or we will have to arrange to track
+ * this pointer using the mutable list.
+ */
+ if (bd->gen_no < evac_gen) {
+ // nope
+ failed_to_evac = rtsTrue;
+ TICK_GC_FAILED_PROMOTION();
+ }
+ return q;
+ }
+
+ /* evacuate large objects by re-linking them onto a different list.
+ */
+ if (bd->flags & BF_LARGE) {
+ info = get_itbl(q);
+ if (info->type == TSO &&
+ ((StgTSO *)q)->what_next == ThreadRelocated) {
+ q = (StgClosure *)((StgTSO *)q)->link;
+ goto loop;
+ }
+ evacuate_large((P_)q);
+ return q;
+ }
+
+ /* If the object is in a step that we're compacting, then we
+ * need to use an alternative evacuate procedure.
+ */
+ if (bd->step->is_compacted) {
+ if (!is_marked((P_)q,bd)) {
+ mark((P_)q,bd);
+ if (mark_stack_full()) {
+ mark_stack_overflowed = rtsTrue;
+ reset_mark_stack();
+ }
+ push_mark_stack((P_)q);
+ }
+ return q;
}
- step = bd->step->to;
+
+ stp = bd->step->to;
}
#ifdef DEBUG
- else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
+ else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
#endif
- /* make sure the info pointer is into text space */
+ // make sure the info pointer is into text space
ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
|| IS_HUGS_CONSTR_INFO(GET_INFO(q))));
info = get_itbl(q);
- /*
- if (info->type==RBH) {
- info = REVERT_INFOPTR(info);
- IF_DEBUG(gc,
- belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
- q, info_type(q), info, info_type_by_ip(info)));
- }
- */
switch (info -> type) {
- case BCO:
- {
- nat size = bco_sizeW((StgBCO*)q);
-
- if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q, rtsFalse);
- to = q;
- } else {
- /* just copy the block */
- to = copy(q,size,step);
- }
- return to;
- }
-
case MUT_VAR:
- ASSERT(q->header.info != &MUT_CONS_info);
case MVAR:
- to = copy(q,sizeW_fromITBL(info),step);
- recordMutable((StgMutClosure *)to);
- return to;
+ to = copy(q,sizeW_fromITBL(info),stp);
+ return to;
+
+ case CONSTR_0_1:
+ {
+ StgWord w = (StgWord)q->payload[0];
+ if (q->header.info == Czh_con_info &&
+ // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
+ (StgChar)w <= MAX_CHARLIKE) {
+ return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
+ }
+ if (q->header.info == Izh_con_info &&
+ (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
+ return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
+ }
+ // else, fall through ...
+ }
case FUN_1_0:
case FUN_0_1:
case CONSTR_1_0:
- case CONSTR_0_1:
- return copy(q,sizeofW(StgHeader)+1,step);
+ return copy(q,sizeofW(StgHeader)+1,stp);
- case THUNK_1_0: /* here because of MIN_UPD_SIZE */
+ case THUNK_1_0: // here because of MIN_UPD_SIZE
case THUNK_0_1:
case THUNK_1_1:
case THUNK_0_2:
case THUNK_2_0:
#ifdef NO_PROMOTE_THUNKS
- if (bd->gen->no == 0 &&
+ if (bd->gen_no == 0 &&
bd->step->no != 0 &&
- bd->step->no == bd->gen->n_steps-1) {
- step = bd->step;
+ bd->step->no == generations[bd->gen_no].n_steps-1) {
+ stp = bd->step;
}
#endif
- return copy(q,sizeofW(StgHeader)+2,step);
+ return copy(q,sizeofW(StgHeader)+2,stp);
case FUN_1_1:
case FUN_0_2:
case CONSTR_1_1:
case CONSTR_0_2:
case CONSTR_2_0:
- return copy(q,sizeofW(StgHeader)+2,step);
+ return copy(q,sizeofW(StgHeader)+2,stp);
case FUN:
case THUNK:
case CONSTR:
case IND_PERM:
case IND_OLDGEN_PERM:
- case CAF_UNENTERED:
- case CAF_ENTERED:
case WEAK:
case FOREIGN:
case STABLE_NAME:
- return copy(q,sizeW_fromITBL(info),step);
+ case BCO:
+ return copy(q,sizeW_fromITBL(info),stp);
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
- return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
+ return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
case BLACKHOLE_BQ:
- to = copy(q,BLACKHOLE_sizeW(),step);
- recordMutable((StgMutClosure *)to);
+ to = copy(q,BLACKHOLE_sizeW(),stp);
return to;
case THUNK_SELECTOR:
case CONSTR_1_1:
case CONSTR_0_2:
case CONSTR_STATIC:
+ case CONSTR_NOCAF_STATIC:
{
- StgWord32 offset = info->layout.selector_offset;
+ StgWord offset = info->layout.selector_offset;
- /* check that the size is in range */
+ // check that the size is in range
ASSERT(offset <
(StgWord32)(selectee_info->layout.payload.ptrs +
selectee_info->layout.payload.nptrs));
- /* perform the selection! */
+ // perform the selection!
q = selectee->payload[offset];
+ if (major_gc==rtsTrue) {TICK_GC_SEL_MAJOR();} else {TICK_GC_SEL_MINOR();}
/* if we're already in to-space, there's no need to continue
* with the evacuation, just update the source address with
*/
if (HEAP_ALLOCED(q)) {
bdescr *bd = Bdescr((P_)q);
- if (bd->evacuated) {
- if (bd->gen->no < evac_gen) {
+ if (bd->flags & BF_EVACUATED) {
+ if (bd->gen_no < evac_gen) {
failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
selectee = ((StgInd *)selectee)->indirectee;
goto selector_loop;
- case CAF_ENTERED:
- selectee = ((StgCAF *)selectee)->value;
- goto selector_loop;
-
case EVACUATED:
selectee = ((StgEvacuated *)selectee)->evacuee;
goto selector_loop;
+ case THUNK_SELECTOR:
+# if 0
+ /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
+ something) to go into an infinite loop when the nightly
+ stage2 compiles PrelTup.lhs. */
+
+ /* we can't recurse indefinitely in evacuate(), so set a
+ * limit on the number of times we can go around this
+ * loop.
+ */
+ if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
+ bdescr *bd;
+ bd = Bdescr((P_)selectee);
+ if (!bd->flags & BF_EVACUATED) {
+ thunk_selector_depth++;
+ selectee = evacuate(selectee);
+ thunk_selector_depth--;
+ goto selector_loop;
+ }
+ } else {
+ TICK_GC_SEL_ABANDONED();
+ // and fall through...
+ }
+# endif
+
+ case AP_UPD:
case THUNK:
case THUNK_1_0:
case THUNK_0_1:
case THUNK_1_1:
case THUNK_0_2:
case THUNK_STATIC:
- case THUNK_SELECTOR:
- /* aargh - do recursively???? */
- case CAF_UNENTERED:
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case SE_BLACKHOLE:
case BLACKHOLE:
case BLACKHOLE_BQ:
- /* not evaluated yet */
+ // not evaluated yet
break;
+#if defined(PAR)
+ // a copy of the top-level cases below
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+ //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
+ to = copy(q,BLACKHOLE_sizeW(),stp);
+ //ToDo: derive size etc from reverted IP
+ //to = copy(q,size,stp);
+ // recordMutable((StgMutClosure *)to);
+ return to;
+ }
+
+ case BLOCKED_FETCH:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
+ to = copy(q,sizeofW(StgBlockedFetch),stp);
+ return to;
+
+# ifdef DIST
+ case REMOTE_REF:
+# endif
+ case FETCH_ME:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ to = copy(q,sizeofW(StgFetchMe),stp);
+ return to;
+
+ case FETCH_ME_BQ:
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
+ return to;
+#endif
+
default:
barf("evacuate: THUNK_SELECTOR: strange selectee %d",
(int)(selectee_info->type));
}
}
- return copy(q,THUNK_SELECTOR_sizeW(),step);
+ return copy(q,THUNK_SELECTOR_sizeW(),stp);
case IND:
case IND_OLDGEN:
- /* follow chains of indirections, don't evacuate them */
+ // follow chains of indirections, don't evacuate them
q = ((StgInd*)q)->indirectee;
goto loop;
return q;
case IND_STATIC:
- if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
- IND_STATIC_LINK((StgClosure *)q) = static_objects;
- static_objects = (StgClosure *)q;
+ /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
+ * on the CAF list, so don't do anything with it here (we'll
+ * scavenge it later).
+ */
+ if (major_gc
+ && ((StgIndStatic *)q)->saved_info == NULL
+ && IND_STATIC_LINK((StgClosure *)q) == NULL) {
+ IND_STATIC_LINK((StgClosure *)q) = static_objects;
+ static_objects = (StgClosure *)q;
}
return q;
case STOP_FRAME:
case CATCH_FRAME:
case SEQ_FRAME:
- /* shouldn't see these */
+ // shouldn't see these
barf("evacuate: stack frame at %p\n", q);
case AP_UPD:
case PAP:
- /* these are special - the payload is a copy of a chunk of stack,
- tagging and all. */
- return copy(q,pap_sizeW((StgPAP *)q),step);
+ /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
+ * of stack, tagging and all.
+ */
+ return copy(q,pap_sizeW((StgPAP*)q),stp);
case EVACUATED:
/* Already evacuated, just return the forwarding address.
* set the failed_to_evac flag to indicate that we couldn't
* manage to promote the object to the desired generation.
*/
- if (evac_gen > 0) { /* optimisation */
+ if (evac_gen > 0) { // optimisation
StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (Bdescr((P_)p)->gen->no < evac_gen) {
- IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
+ if (Bdescr((P_)p)->gen_no < evac_gen) {
failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
return ((StgEvacuated*)q)->evacuee;
case ARR_WORDS:
- {
- nat size = arr_words_sizeW((StgArrWords *)q);
-
- if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q, rtsFalse);
- return q;
- } else {
- /* just copy the block */
- return copy(q,size,step);
- }
- }
+ // just copy the block
+ return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
- {
- nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
-
- if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
- to = q;
- } else {
- /* just copy the block */
- to = copy(q,size,step);
- if (info->type == MUT_ARR_PTRS) {
- recordMutable((StgMutClosure *)to);
- }
- }
- return to;
- }
+ // just copy the block
+ return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
case TSO:
{
StgTSO *tso = (StgTSO *)q;
- nat size = tso_sizeW(tso);
- int diff;
/* Deal with redirected TSOs (a TSO that's had its stack enlarged).
*/
- if (tso->whatNext == ThreadRelocated) {
+ if (tso->what_next == ThreadRelocated) {
q = (StgClosure *)tso->link;
goto loop;
}
- /* Large TSOs don't get moved, so no relocation is required.
- */
- if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q, rtsTrue);
- return q;
-
/* To evacuate a small TSO, we need to relocate the update frame
* list it contains.
*/
- } else {
- StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
-
- diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
-
- /* relocate the stack pointers... */
- new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
- new_tso->sp = (StgPtr)new_tso->sp + diff;
- new_tso->splim = (StgPtr)new_tso->splim + diff;
-
- relocate_TSO(tso, new_tso);
-
- recordMutable((StgMutClosure *)new_tso);
- return (StgClosure *)new_tso;
+ {
+ StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
+ move_TSO(tso, new_tso);
+ return (StgClosure *)new_tso;
}
}
case RBH: // cf. BLACKHOLE_BQ
{
//StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
- to = copy(q,BLACKHOLE_sizeW(),step);
+ to = copy(q,BLACKHOLE_sizeW(),stp);
//ToDo: derive size etc from reverted IP
- //to = copy(q,size,step);
- recordMutable((StgMutClosure *)to);
+ //to = copy(q,size,stp);
IF_DEBUG(gc,
belch("@@ evacuate: RBH %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
case BLOCKED_FETCH:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
- to = copy(q,sizeofW(StgBlockedFetch),step);
+ to = copy(q,sizeofW(StgBlockedFetch),stp);
IF_DEBUG(gc,
belch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
return to;
+# ifdef DIST
+ case REMOTE_REF:
+# endif
case FETCH_ME:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
- to = copy(q,sizeofW(StgFetchMe),step);
+ to = copy(q,sizeofW(StgFetchMe),stp);
IF_DEBUG(gc,
belch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
case FETCH_ME_BQ:
ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
- to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
+ to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
IF_DEBUG(gc,
belch("@@ evacuate: %p (%s) to %p (%s)",
q, info_type(q), to, info_type(to)));
}
/* -----------------------------------------------------------------------------
- relocate_TSO is called just after a TSO has been copied from src to
- dest. It adjusts the update frame list for the new location.
+ move_TSO is called to update the TSO structure after it has been
+ moved from one place to another.
+ -------------------------------------------------------------------------- */
+
+void
+move_TSO(StgTSO *src, StgTSO *dest)
+{
+ ptrdiff_t diff;
+
+ // relocate the stack pointers...
+ diff = (StgPtr)dest - (StgPtr)src; // In *words*
+ dest->sp = (StgPtr)dest->sp + diff;
+ dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
+
+ relocate_stack(dest, diff);
+}
+
+/* -----------------------------------------------------------------------------
+ relocate_stack is called to update the linkage between
+ UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
+ place to another.
-------------------------------------------------------------------------- */
-//@cindex relocate_TSO
StgTSO *
-relocate_TSO(StgTSO *src, StgTSO *dest)
+relocate_stack(StgTSO *dest, ptrdiff_t diff)
{
StgUpdateFrame *su;
StgCatchFrame *cf;
StgSeqFrame *sf;
- int diff;
-
- diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
su = dest->su;
while ((P_)su < dest->stack + dest->stack_size) {
switch (get_itbl(su)->type) {
- /* GCC actually manages to common up these three cases! */
+ // GCC actually manages to common up these three cases!
case UPDATE_FRAME:
su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
continue;
case STOP_FRAME:
- /* all done! */
+ // all done!
break;
default:
- barf("relocate_TSO %d", (int)(get_itbl(su)->type));
+ barf("relocate_stack %d", (int)(get_itbl(su)->type));
}
break;
}
return dest;
}
-//@node Scavenging, Reverting CAFs, Evacuation
-//@subsection Scavenging
-//@cindex scavenge_srt
static inline void
scavenge_srt(const StgInfoTable *info)
static void
scavengeTSO (StgTSO *tso)
{
- /* chase the link field for any TSOs on the same queue */
+ // chase the link field for any TSOs on the same queue
(StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
- || tso->why_blocked == BlockedOnException) {
+ || tso->why_blocked == BlockedOnException
+#if defined(PAR)
+ || tso->why_blocked == BlockedOnGA
+ || tso->why_blocked == BlockedOnGA_NoSend
+#endif
+ ) {
tso->block_info.closure = evacuate(tso->block_info.closure);
}
if ( tso->blocked_exceptions != NULL ) {
tso->blocked_exceptions =
(StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
}
- /* scavenge this thread's stack */
+ // scavenge this thread's stack
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
}
scavenging a mutable object where early promotion isn't such a good
idea.
-------------------------------------------------------------------------- */
-//@cindex scavenge
static void
-scavenge(step *step)
+scavenge(step *stp)
{
StgPtr p, q;
- const StgInfoTable *info;
+ StgInfoTable *info;
bdescr *bd;
- nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
+ nat saved_evac_gen = evac_gen;
- p = step->scan;
- bd = step->scan_bd;
+ p = stp->scan;
+ bd = stp->scan_bd;
failed_to_evac = rtsFalse;
* evacuated objects
*/
- while (bd != step->hp_bd || p < step->hp) {
+ while (bd != stp->hp_bd || p < stp->hp) {
- /* If we're at the end of this block, move on to the next block */
- if (bd != step->hp_bd && p == bd->free) {
+ // If we're at the end of this block, move on to the next block
+ if (bd != stp->hp_bd && p == bd->free) {
bd = bd->link;
p = bd->start;
continue;
}
- q = p; /* save ptr to object */
-
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
- || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
-
info = get_itbl((StgClosure *)p);
- /*
- if (info->type==RBH)
- info = REVERT_INFOPTR(info);
- */
-
- switch (info -> type) {
-
- case BCO:
- {
- StgBCO* bco = (StgBCO *)p;
- nat i;
- for (i = 0; i < bco->n_ptrs; i++) {
- bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
- }
- p += bco_sizeW(bco);
- break;
- }
-
+ ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
+
+ q = p;
+ switch (info->type) {
+
case MVAR:
- /* treat MVars specially, because we don't want to evacuate the
- * mut_link field in the middle of the closure.
- */
- {
+ /* treat MVars specially, because we don't want to evacuate the
+ * mut_link field in the middle of the closure.
+ */
+ {
StgMVar *mvar = ((StgMVar *)p);
evac_gen = 0;
(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
- p += sizeofW(StgMVar);
evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)mvar);
+ failed_to_evac = rtsFalse; // mutable.
+ p += sizeofW(StgMVar);
break;
- }
+ }
case THUNK_2_0:
case FUN_2_0:
- scavenge_srt(info);
+ scavenge_srt(info);
case CONSTR_2_0:
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
case THUNK_1_0:
- scavenge_srt(info);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
- break;
-
+ scavenge_srt(info);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ break;
+
case FUN_1_0:
- scavenge_srt(info);
+ scavenge_srt(info);
case CONSTR_1_0:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 1;
- break;
-
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 1;
+ break;
+
case THUNK_0_1:
- scavenge_srt(info);
- p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
- break;
-
+ scavenge_srt(info);
+ p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
+ break;
+
case FUN_0_1:
- scavenge_srt(info);
+ scavenge_srt(info);
case CONSTR_0_1:
- p += sizeofW(StgHeader) + 1;
- break;
-
+ p += sizeofW(StgHeader) + 1;
+ break;
+
case THUNK_0_2:
case FUN_0_2:
- scavenge_srt(info);
+ scavenge_srt(info);
case CONSTR_0_2:
- p += sizeofW(StgHeader) + 2;
- break;
-
+ p += sizeofW(StgHeader) + 2;
+ break;
+
case THUNK_1_1:
case FUN_1_1:
- scavenge_srt(info);
+ scavenge_srt(info);
case CONSTR_1_1:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ p += sizeofW(StgHeader) + 2;
+ break;
+
case FUN:
case THUNK:
- scavenge_srt(info);
- /* fall through */
-
+ scavenge_srt(info);
+ // fall through
+
case CONSTR:
case WEAK:
case FOREIGN:
case STABLE_NAME:
- {
+ case BCO:
+ {
StgPtr end;
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
}
p += info->layout.payload.nptrs;
break;
- }
+ }
case IND_PERM:
- if (step->gen->no != 0) {
- SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
+ if (stp->gen->no != 0) {
+#ifdef PROFILING
+ // @LDV profiling
+ // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
+ // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
+ LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
+ SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that p has just been created.
+ LDV_recordCreate((StgClosure *)p);
+#endif
}
- /* fall through */
+ // fall through
case IND_OLDGEN_PERM:
- ((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordOldToNewPtrs((StgMutClosure *)p);
- }
- p += sizeofW(StgIndOldGen);
- break;
-
- case CAF_UNENTERED:
- {
- StgCAF *caf = (StgCAF *)p;
-
- caf->body = evacuate(caf->body);
+ ((StgIndOldGen *)p)->indirectee =
+ evacuate(((StgIndOldGen *)p)->indirectee);
if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordOldToNewPtrs((StgMutClosure *)p);
- } else {
- caf->mut_link = NULL;
+ failed_to_evac = rtsFalse;
+ recordOldToNewPtrs((StgMutClosure *)p);
}
- p += sizeofW(StgCAF);
+ p += sizeofW(StgIndOldGen);
break;
- }
-
- case CAF_ENTERED:
- {
- StgCAF *caf = (StgCAF *)p;
-
- caf->body = evacuate(caf->body);
- caf->value = evacuate(caf->value);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordOldToNewPtrs((StgMutClosure *)p);
- } else {
- caf->mut_link = NULL;
- }
- p += sizeofW(StgCAF);
- break;
- }
case MUT_VAR:
- /* ignore MUT_CONSs */
- if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
evac_gen = 0;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
evac_gen = saved_evac_gen;
- }
- p += sizeofW(StgMutVar);
- break;
+ recordMutable((StgMutClosure *)p);
+ failed_to_evac = rtsFalse; // mutable anyhow
+ p += sizeofW(StgMutVar);
+ break;
+
+ case MUT_CONS:
+ // ignore these
+ failed_to_evac = rtsFalse; // mutable anyhow
+ p += sizeofW(StgMutVar);
+ break;
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
break;
case BLACKHOLE_BQ:
- {
+ {
StgBlockingQueue *bh = (StgBlockingQueue *)p;
(StgClosure *)bh->blocking_queue =
- evacuate((StgClosure *)bh->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)bh);
- }
+ evacuate((StgClosure *)bh->blocking_queue);
+ recordMutable((StgMutClosure *)bh);
+ failed_to_evac = rtsFalse;
p += BLACKHOLE_sizeW();
break;
- }
+ }
case THUNK_SELECTOR:
- {
+ {
StgSelector *s = (StgSelector *)p;
s->selectee = evacuate(s->selectee);
p += THUNK_SELECTOR_sizeW();
break;
- }
-
- case IND:
- case IND_OLDGEN:
- barf("scavenge:IND???\n");
-
- case CONSTR_INTLIKE:
- case CONSTR_CHARLIKE:
- case CONSTR_STATIC:
- case CONSTR_NOCAF_STATIC:
- case THUNK_STATIC:
- case FUN_STATIC:
- case IND_STATIC:
- /* Shouldn't see a static object here. */
- barf("scavenge: STATIC object\n");
-
- case RET_BCO:
- case RET_SMALL:
- case RET_VEC_SMALL:
- case RET_BIG:
- case RET_VEC_BIG:
- case RET_DYN:
- case UPDATE_FRAME:
- case STOP_FRAME:
- case CATCH_FRAME:
- case SEQ_FRAME:
- /* Shouldn't see stack frames here. */
- barf("scavenge: stack frame\n");
+ }
- case AP_UPD: /* same as PAPs */
+ case AP_UPD: // same as PAPs
case PAP:
- /* Treat a PAP just like a section of stack, not forgetting to
- * evacuate the function pointer too...
- */
- {
+ /* Treat a PAP just like a section of stack, not forgetting to
+ * evacuate the function pointer too...
+ */
+ {
StgPAP* pap = (StgPAP *)p;
pap->fun = evacuate(pap->fun);
scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
p += pap_sizeW(pap);
break;
- }
+ }
case ARR_WORDS:
- /* nothing to follow */
- p += arr_words_sizeW((StgArrWords *)p);
- break;
+ // nothing to follow
+ p += arr_words_sizeW((StgArrWords *)p);
+ break;
case MUT_ARR_PTRS:
- /* follow everything */
- {
+ // follow everything
+ {
StgPtr next;
- evac_gen = 0; /* repeatedly mutable */
+ evac_gen = 0; // repeatedly mutable
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
}
evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)q);
+ failed_to_evac = rtsFalse; // mutable anyhow.
break;
- }
+ }
case MUT_ARR_PTRS_FROZEN:
- /* follow everything */
- {
- StgPtr start = p, next;
+ // follow everything
+ {
+ StgPtr next;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
- }
- if (failed_to_evac) {
- /* we can do this easier... */
- recordMutable((StgMutClosure *)start);
- failed_to_evac = rtsFalse;
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
}
+ // it's tempting to recordMutable() if failed_to_evac is
+ // false, but that breaks some assumptions (eg. every
+ // closure on the mutable list is supposed to have the MUT
+ // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
break;
- }
+ }
case TSO:
- {
+ {
StgTSO *tso = (StgTSO *)p;
evac_gen = 0;
scavengeTSO(tso);
evac_gen = saved_evac_gen;
+ recordMutable((StgMutClosure *)tso);
+ failed_to_evac = rtsFalse; // mutable anyhow.
p += tso_sizeW(tso);
break;
- }
+ }
#if defined(PAR)
case RBH: // cf. BLACKHOLE_BQ
- {
- // nat size, ptrs, nonptrs, vhs;
- // char str[80];
- // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+ {
+#if 0
+ nat size, ptrs, nonptrs, vhs;
+ char str[80];
+ StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+#endif
StgRBH *rbh = (StgRBH *)p;
(StgClosure *)rbh->blocking_queue =
- evacuate((StgClosure *)rbh->blocking_queue);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)rbh);
- }
+ evacuate((StgClosure *)rbh->blocking_queue);
+ recordMutable((StgMutClosure *)to);
+ failed_to_evac = rtsFalse; // mutable anyhow.
IF_DEBUG(gc,
belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
p, info_type(p), (StgClosure *)rbh->blocking_queue));
// ToDo: use size of reverted closure here!
p += BLACKHOLE_sizeW();
break;
- }
+ }
case BLOCKED_FETCH:
- {
+ {
StgBlockedFetch *bf = (StgBlockedFetch *)p;
- /* follow the pointer to the node which is being demanded */
+ // follow the pointer to the node which is being demanded
(StgClosure *)bf->node =
- evacuate((StgClosure *)bf->node);
- /* follow the link to the rest of the blocking queue */
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
(StgClosure *)bf->link =
- evacuate((StgClosure *)bf->link);
+ evacuate((StgClosure *)bf->link);
if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)bf);
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
}
IF_DEBUG(gc,
belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
- bf, info_type((StgClosure *)bf),
- bf->node, info_type(bf->node)));
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
p += sizeofW(StgBlockedFetch);
break;
- }
+ }
+#ifdef DIST
+ case REMOTE_REF:
+#endif
case FETCH_ME:
- IF_DEBUG(gc,
- belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
- p, info_type((StgClosure *)p)));
- p += sizeofW(StgFetchMe);
- break; // nothing to do in this case
+ p += sizeofW(StgFetchMe);
+ break; // nothing to do in this case
case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
- {
+ {
StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
(StgClosure *)fmbq->blocking_queue =
- evacuate((StgClosure *)fmbq->blocking_queue);
+ evacuate((StgClosure *)fmbq->blocking_queue);
if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutable((StgMutClosure *)fmbq);
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
}
IF_DEBUG(gc,
belch("@@ scavenge: %p (%s) exciting, isn't it",
- p, info_type((StgClosure *)p)));
+ p, info_type((StgClosure *)p)));
p += sizeofW(StgFetchMeBlockingQueue);
break;
- }
+ }
#endif
- case EVACUATED:
- barf("scavenge: unimplemented/strange closure type\n");
-
default:
- barf("scavenge");
+ barf("scavenge: unimplemented/strange closure type %d @ %p",
+ info->type, p);
}
/* If we didn't manage to promote all the objects pointed to by
* mutable (because it contains old-to-new generation pointers).
*/
if (failed_to_evac) {
- mkMutCons((StgClosure *)q, &generations[evac_gen]);
- failed_to_evac = rtsFalse;
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
}
}
- step->scan_bd = bd;
- step->scan = p;
+ stp->scan_bd = bd;
+ stp->scan = p;
}
/* -----------------------------------------------------------------------------
- Scavenge one object.
+ Scavenge everything on the mark stack.
- This is used for objects that are temporarily marked as mutable
- because they contain old-to-new generation pointers. Only certain
- objects can have this property.
+ This is slightly different from scavenge():
+ - we don't walk linearly through the objects, so the scavenger
+ doesn't need to advance the pointer on to the next object.
-------------------------------------------------------------------------- */
-//@cindex scavenge_one
-static rtsBool
-scavenge_one(StgClosure *p)
+static void
+scavenge_mark_stack(void)
{
- const StgInfoTable *info;
- rtsBool no_luck;
+ StgPtr p, q;
+ StgInfoTable *info;
+ nat saved_evac_gen;
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
- || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
+ evac_gen = oldest_gen->no;
+ saved_evac_gen = evac_gen;
- info = get_itbl(p);
+linear_scan:
+ while (!mark_stack_empty()) {
+ p = pop_mark_stack();
- /* ngoq moHqu'!
- if (info->type==RBH)
- info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
- */
+ info = get_itbl((StgClosure *)p);
+ ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
+
+ q = p;
+ switch (info->type) {
+
+ case MVAR:
+ /* treat MVars specially, because we don't want to evacuate the
+ * mut_link field in the middle of the closure.
+ */
+ {
+ StgMVar *mvar = ((StgMVar *)p);
+ evac_gen = 0;
+ (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
+ (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
+ (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse; // mutable.
+ break;
+ }
- switch (info -> type) {
+ case FUN_2_0:
+ case THUNK_2_0:
+ scavenge_srt(info);
+ case CONSTR_2_0:
+ ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case FUN_1_0:
+ case FUN_1_1:
+ case THUNK_1_0:
+ case THUNK_1_1:
+ scavenge_srt(info);
+ case CONSTR_1_0:
+ case CONSTR_1_1:
+ ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ break;
+
+ case FUN_0_1:
+ case FUN_0_2:
+ case THUNK_0_1:
+ case THUNK_0_2:
+ scavenge_srt(info);
+ case CONSTR_0_1:
+ case CONSTR_0_2:
+ break;
+
+ case FUN:
+ case THUNK:
+ scavenge_srt(info);
+ // fall through
+
+ case CONSTR:
+ case WEAK:
+ case FOREIGN:
+ case STABLE_NAME:
+ case BCO:
+ {
+ StgPtr end;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
- case FUN:
- case FUN_1_0: /* hardly worth specialising these guys */
- case FUN_0_1:
- case FUN_1_1:
- case FUN_0_2:
- case FUN_2_0:
- case THUNK:
- case THUNK_1_0:
- case THUNK_0_1:
- case THUNK_1_1:
- case THUNK_0_2:
- case THUNK_2_0:
- case CONSTR:
- case CONSTR_1_0:
- case CONSTR_0_1:
- case CONSTR_1_1:
- case CONSTR_0_2:
- case CONSTR_2_0:
- case WEAK:
- case FOREIGN:
- case IND_PERM:
- case IND_OLDGEN_PERM:
- case CAF_UNENTERED:
- {
- StgPtr q, end;
+ case IND_PERM:
+ // don't need to do anything here: the only possible case
+ // is that we're in a 1-space compacting collector, with
+ // no "old" generation.
+ break;
+
+ case IND_OLDGEN:
+ case IND_OLDGEN_PERM:
+ ((StgIndOldGen *)p)->indirectee =
+ evacuate(((StgIndOldGen *)p)->indirectee);
+ if (failed_to_evac) {
+ recordOldToNewPtrs((StgMutClosure *)p);
+ }
+ failed_to_evac = rtsFalse;
+ break;
+
+ case MUT_VAR:
+ evac_gen = 0;
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+
+ case MUT_CONS:
+ // ignore these
+ failed_to_evac = rtsFalse;
+ break;
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ case ARR_WORDS:
+ break;
+
+ case BLACKHOLE_BQ:
+ {
+ StgBlockingQueue *bh = (StgBlockingQueue *)p;
+ (StgClosure *)bh->blocking_queue =
+ evacuate((StgClosure *)bh->blocking_queue);
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
+ }
+
+ case AP_UPD: // same as PAPs
+ case PAP:
+ /* Treat a PAP just like a section of stack, not forgetting to
+ * evacuate the function pointer too...
+ */
+ {
+ StgPAP* pap = (StgPAP *)p;
+
+ pap->fun = evacuate(pap->fun);
+ scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ break;
+ }
- end = (P_)p->payload + info->layout.payload.ptrs;
- for (q = (P_)p->payload; q < end; q++) {
- (StgClosure *)*q = evacuate((StgClosure *)*q);
- }
- break;
+ case MUT_ARR_PTRS:
+ // follow everything
+ {
+ StgPtr next;
+
+ evac_gen = 0; // repeatedly mutable
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ break;
+ }
+
+ case MUT_ARR_PTRS_FROZEN:
+ // follow everything
+ {
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
+
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)p;
+ evac_gen = 0;
+ scavengeTSO(tso);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+#if defined(PAR)
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+#if 0
+ nat size, ptrs, nonptrs, vhs;
+ char str[80];
+ StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+#endif
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ recordMutable((StgMutClosure *)rbh);
+ failed_to_evac = rtsFalse; // mutable anyhow.
+ IF_DEBUG(gc,
+ belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
+ p, info_type(p), (StgClosure *)rbh->blocking_queue));
+ break;
+ }
+
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ // follow the pointer to the node which is being demanded
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
+ bf, info_type((StgClosure *)bf),
+ bf->node, info_type(bf->node)));
+ break;
+ }
+
+#ifdef DIST
+ case REMOTE_REF:
+#endif
+ case FETCH_ME:
+ break; // nothing to do in this case
+
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
+ }
+ IF_DEBUG(gc,
+ belch("@@ scavenge: %p (%s) exciting, isn't it",
+ p, info_type((StgClosure *)p)));
+ break;
+ }
+#endif // PAR
+
+ default:
+ barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
+ info->type, p);
+ }
+
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ }
+
+ // mark the next bit to indicate "scavenged"
+ mark(q+1, Bdescr(q));
+
+ } // while (!mark_stack_empty())
+
+ // start a new linear scan if the mark stack overflowed at some point
+ if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
+ IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
+ mark_stack_overflowed = rtsFalse;
+ oldgen_scan_bd = oldest_gen->steps[0].blocks;
+ oldgen_scan = oldgen_scan_bd->start;
}
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- break;
+ if (oldgen_scan_bd) {
+ // push a new thing on the mark stack
+ loop:
+ // find a closure that is marked but not scavenged, and start
+ // from there.
+ while (oldgen_scan < oldgen_scan_bd->free
+ && !is_marked(oldgen_scan,oldgen_scan_bd)) {
+ oldgen_scan++;
+ }
- case THUNK_SELECTOR:
+ if (oldgen_scan < oldgen_scan_bd->free) {
+
+ // already scavenged?
+ if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto loop;
+ }
+ push_mark_stack(oldgen_scan);
+ // ToDo: bump the linear scan by the actual size of the object
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto linear_scan;
+ }
+
+ oldgen_scan_bd = oldgen_scan_bd->link;
+ if (oldgen_scan_bd != NULL) {
+ oldgen_scan = oldgen_scan_bd->start;
+ goto loop;
+ }
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge one object.
+
+ This is used for objects that are temporarily marked as mutable
+ because they contain old-to-new generation pointers. Only certain
+ objects can have this property.
+ -------------------------------------------------------------------------- */
+
+static rtsBool
+scavenge_one(StgPtr p)
+{
+ const StgInfoTable *info;
+ nat saved_evac_gen = evac_gen;
+ rtsBool no_luck;
+
+ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
+ || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
+
+ info = get_itbl((StgClosure *)p);
+
+ switch (info->type) {
+
+ case FUN:
+ case FUN_1_0: // hardly worth specialising these guys
+ case FUN_0_1:
+ case FUN_1_1:
+ case FUN_0_2:
+ case FUN_2_0:
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_2_0:
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_2_0:
+ case WEAK:
+ case FOREIGN:
+ case IND_PERM:
+ case IND_OLDGEN_PERM:
+ {
+ StgPtr q, end;
+
+ end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
+ (StgClosure *)*q = evacuate((StgClosure *)*q);
+ }
+ break;
+ }
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ break;
+
+ case THUNK_SELECTOR:
{
- StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
- break;
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
+ }
+
+ case ARR_WORDS:
+ // nothing to follow
+ break;
+
+ case MUT_ARR_PTRS:
+ {
+ // follow everything
+ StgPtr next;
+
+ evac_gen = 0; // repeatedly mutable
+ recordMutable((StgMutClosure *)p);
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+ case MUT_ARR_PTRS_FROZEN:
+ {
+ // follow everything
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
}
-
- case AP_UPD: /* same as PAPs */
- case PAP:
- /* Treat a PAP just like a section of stack, not forgetting to
- * evacuate the function pointer too...
- */
- {
- StgPAP* pap = (StgPAP *)p;
+
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)p;
- pap->fun = evacuate(pap->fun);
- scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
- break;
+ evac_gen = 0; // repeatedly mutable
+ scavengeTSO(tso);
+ recordMutable((StgMutClosure *)tso);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+ case AP_UPD:
+ case PAP:
+ {
+ StgPAP* pap = (StgPAP *)p;
+ pap->fun = evacuate(pap->fun);
+ scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ break;
}
- case IND_OLDGEN:
- /* This might happen if for instance a MUT_CONS was pointing to a
- * THUNK which has since been updated. The IND_OLDGEN will
- * be on the mutable list anyway, so we don't need to do anything
- * here.
- */
- break;
+ case IND_OLDGEN:
+ // This might happen if for instance a MUT_CONS was pointing to a
+ // THUNK which has since been updated. The IND_OLDGEN will
+ // be on the mutable list anyway, so we don't need to do anything
+ // here.
+ break;
- default:
- barf("scavenge_one: strange object");
- }
+ default:
+ barf("scavenge_one: strange object %d", (int)(info->type));
+ }
- no_luck = failed_to_evac;
- failed_to_evac = rtsFalse;
- return (no_luck);
+ no_luck = failed_to_evac;
+ failed_to_evac = rtsFalse;
+ return (no_luck);
}
-
/* -----------------------------------------------------------------------------
Scavenging mutable lists.
generations older than the one being collected) as roots. We also
remove non-mutable objects from the mutable list at this point.
-------------------------------------------------------------------------- */
-//@cindex scavenge_mut_once_list
static void
scavenge_mut_once_list(generation *gen)
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- /* make sure the info pointer is into text space */
+ // make sure the info pointer is into text space
ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
|| IS_HUGS_CONSTR_INFO(GET_INFO(p))));
((StgIndOldGen *)p)->indirectee =
evacuate(((StgIndOldGen *)p)->indirectee);
-#ifdef DEBUG
+#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
/* Debugging code to print out the size of the thing we just
* promoted
} else {
size = gen->steps[0].scan - start;
}
- fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
+ belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
#endif
p->mut_link = NULL;
}
continue;
-
- case MUT_VAR:
- /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
- * it from the mutable list if possible by promoting whatever it
- * points to.
- */
- ASSERT(p->header.info == &MUT_CONS_info);
- if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
- /* didn't manage to promote everything, so put the
- * MUT_CONS back on the list.
+
+ case MUT_CONS:
+ /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
+ * it from the mutable list if possible by promoting whatever it
+ * points to.
*/
- p->mut_link = new_list;
- new_list = p;
- }
- continue;
-
- case CAF_ENTERED:
- {
- StgCAF *caf = (StgCAF *)p;
- caf->body = evacuate(caf->body);
- caf->value = evacuate(caf->value);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- p->mut_link = new_list;
- new_list = p;
- } else {
- p->mut_link = NULL;
+ if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
+ /* didn't manage to promote everything, so put the
+ * MUT_CONS back on the list.
+ */
+ p->mut_link = new_list;
+ new_list = p;
}
- }
- continue;
-
- case CAF_UNENTERED:
- {
- StgCAF *caf = (StgCAF *)p;
- caf->body = evacuate(caf->body);
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- p->mut_link = new_list;
- new_list = p;
- } else {
- p->mut_link = NULL;
- }
- }
- continue;
+ continue;
default:
- /* shouldn't have anything else on the mutables list */
+ // shouldn't have anything else on the mutables list
barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
}
}
gen->mut_once_list = new_list;
}
-//@cindex scavenge_mutable_list
static void
scavenge_mutable_list(generation *gen)
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
- /* make sure the info pointer is into text space */
+ // make sure the info pointer is into text space
ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
|| IS_HUGS_CONSTR_INFO(GET_INFO(p))));
*/
switch(info->type) {
- case MUT_ARR_PTRS_FROZEN:
- /* remove this guy from the mutable list, but follow the ptrs
- * anyway (and make sure they get promoted to this gen).
- */
+ case MUT_ARR_PTRS:
+ // follow everything
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
{
StgPtr end, q;
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
- p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
-
end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- evac_gen = gen->no;
for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
(StgClosure *)*q = evacuate((StgClosure *)*q);
}
- evac_gen = 0;
-
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- }
continue;
}
-
- case MUT_ARR_PTRS:
- /* follow everything */
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
+
+ // Happens if a MUT_ARR_PTRS in the old generation is frozen
+ case MUT_ARR_PTRS_FROZEN:
{
StgPtr end, q;
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
- p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
-
+ evac_gen = gen->no;
end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
(StgClosure *)*q = evacuate((StgClosure *)*q);
}
+ evac_gen = 0;
+ p->mut_link = NULL;
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)p, gen);
+ }
continue;
}
-
+
case MUT_VAR:
- /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
- * it from the mutable list if possible by promoting whatever it
- * points to.
- */
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
- p, ((StgMutVar *)p)->var, p->mut_link));
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
+ continue;
- ASSERT(p->header.info != &MUT_CONS_info);
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- p->mut_link = gen->mut_list;
- gen->mut_list = p;
- continue;
-
case MVAR:
{
StgMVar *mvar = (StgMVar *)p;
-
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
- mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
-
(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
case BLACKHOLE_BQ:
{
StgBlockingQueue *bh = (StgBlockingQueue *)p;
-
- IF_DEBUG(gc,
- belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
- p, p->mut_link));
-
(StgClosure *)bh->blocking_queue =
evacuate((StgClosure *)bh->blocking_queue);
p->mut_link = gen->mut_list;
}
continue;
- // HWL: old PAR code deleted here
+#if defined(PAR)
+ // HWL: check whether all of these are necessary
+
+ case RBH: // cf. BLACKHOLE_BQ
+ {
+ // nat size, ptrs, nonptrs, vhs;
+ // char str[80];
+ // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
+ StgRBH *rbh = (StgRBH *)p;
+ (StgClosure *)rbh->blocking_queue =
+ evacuate((StgClosure *)rbh->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)rbh);
+ }
+ // ToDo: use size of reverted closure here!
+ p += BLACKHOLE_sizeW();
+ break;
+ }
+
+ case BLOCKED_FETCH:
+ {
+ StgBlockedFetch *bf = (StgBlockedFetch *)p;
+ // follow the pointer to the node which is being demanded
+ (StgClosure *)bf->node =
+ evacuate((StgClosure *)bf->node);
+ // follow the link to the rest of the blocking queue
+ (StgClosure *)bf->link =
+ evacuate((StgClosure *)bf->link);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)bf);
+ }
+ p += sizeofW(StgBlockedFetch);
+ break;
+ }
+
+#ifdef DIST
+ case REMOTE_REF:
+ barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
+#endif
+ case FETCH_ME:
+ p += sizeofW(StgFetchMe);
+ break; // nothing to do in this case
+
+ case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
+ {
+ StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
+ (StgClosure *)fmbq->blocking_queue =
+ evacuate((StgClosure *)fmbq->blocking_queue);
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ recordMutable((StgMutClosure *)fmbq);
+ }
+ p += sizeofW(StgFetchMeBlockingQueue);
+ break;
+ }
+#endif
default:
- /* shouldn't have anything else on the mutables list */
+ // shouldn't have anything else on the mutables list
barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
}
}
}
-//@cindex scavenge_static
static void
scavenge_static(void)
if (info->type==RBH)
info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
*/
- /* make sure the info pointer is into text space */
+ // make sure the info pointer is into text space
ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
|| IS_HUGS_CONSTR_INFO(GET_INFO(p))));
*/
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- scavenged_static_objects = STATIC_LINK(info,p);
+ scavenged_static_objects = IND_STATIC_LINK(p);
((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
oldest_gen->mut_once_list = (StgMutClosure *)ind;
}
case THUNK_STATIC:
case FUN_STATIC:
scavenge_srt(info);
- /* fall through */
+ break;
case CONSTR_STATIC:
{
StgPtr q, next;
next = (P_)p->payload + info->layout.payload.ptrs;
- /* evacuate the pointers */
+ // evacuate the pointers
for (q = (P_)p->payload; q < next; q++) {
(StgClosure *)*q = evacuate((StgClosure *)*q);
}
}
default:
- barf("scavenge_static");
+ barf("scavenge_static: strange closure %d", (int)(info->type));
}
ASSERT(failed_to_evac == rtsFalse);
- /* get the next static object from the list. Remeber, there might
+ /* get the next static object from the list. Remember, there might
* be more stuff on this list now that we've done some evacuating!
* (static_objects is a global)
*/
objects pointed to by it. We can use the same code for walking
PAPs, since these are just sections of copied stack.
-------------------------------------------------------------------------- */
-//@cindex scavenge_stack
static void
scavenge_stack(StgPtr p, StgPtr stack_end)
{
StgPtr q;
const StgInfoTable* info;
- StgWord32 bitmap;
+ StgWord bitmap;
- IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
+ //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
/*
* Each time around this loop, we are looking at a chunk of stack
while (p < stack_end) {
q = *(P_ *)p;
- /* If we've got a tag, skip over that many words on the stack */
+ // If we've got a tag, skip over that many words on the stack
if (IS_ARG_TAG((W_)q)) {
p += ARG_SIZE(q);
p++; continue;
*/
if (! LOOKS_LIKE_GHC_INFO(q) ) {
#ifdef DEBUG
- if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
+ if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
ASSERT(closure_STATIC((StgClosure *)q));
}
- /* otherwise, must be a pointer into the allocation space. */
+ // otherwise, must be a pointer into the allocation space.
#endif
(StgClosure *)*p = evacuate((StgClosure *)q);
switch (info->type) {
- /* Dynamic bitmap: the mask is stored on the stack */
+ // Dynamic bitmap: the mask is stored on the stack
case RET_DYN:
bitmap = ((StgRetDyn *)p)->liveness;
p = (P_)&((StgRetDyn *)p)->payload[0];
goto small_bitmap;
- /* probably a slow-entry point return address: */
+ // probably a slow-entry point return address:
case FUN:
case FUN_STATIC:
{
belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
old_p, p, old_p+1));
#else
- p++; /* what if FHS!=1 !? -- HWL */
+ p++; // what if FHS!=1 !? -- HWL
#endif
goto follow_srt;
}
case UPDATE_FRAME:
{
StgUpdateFrame *frame = (StgUpdateFrame *)p;
+
+ p += sizeofW(StgUpdateFrame);
+
+#ifndef not_yet
+ frame->updatee = evacuate(frame->updatee);
+ continue;
+#else // specialised code for update frames, not sure if it's worth it.
StgClosure *to;
nat type = get_itbl(frame->updatee)->type;
- p += sizeofW(StgUpdateFrame);
if (type == EVACUATED) {
frame->updatee = evacuate(frame->updatee);
continue;
} else {
bdescr *bd = Bdescr((P_)frame->updatee);
- step *step;
- if (bd->gen->no > N) {
- if (bd->gen->no < evac_gen) {
+ step *stp;
+ if (bd->gen_no > N) {
+ if (bd->gen_no < evac_gen) {
failed_to_evac = rtsTrue;
}
continue;
}
- /* Don't promote blackholes */
- step = bd->step;
- if (!(step->gen->no == 0 &&
- step->no != 0 &&
- step->no == step->gen->n_steps-1)) {
- step = step->to;
+ // Don't promote blackholes
+ stp = bd->step;
+ if (!(stp->gen_no == 0 &&
+ stp->no != 0 &&
+ stp->no == stp->gen->n_steps-1)) {
+ stp = stp->to;
}
switch (type) {
case BLACKHOLE:
case CAF_BLACKHOLE:
to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
- sizeofW(StgHeader), step);
+ sizeofW(StgHeader), stp);
frame->updatee = to;
continue;
case BLACKHOLE_BQ:
- to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
+ to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
frame->updatee = to;
recordMutable((StgMutClosure *)to);
continue;
barf("scavenge_stack: UPDATE_FRAME updatee");
}
}
+#endif
}
- /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
+ // small bitmap (< 32 entries, or 64 on a 64-bit machine)
case STOP_FRAME:
case CATCH_FRAME:
case SEQ_FRAME:
- {
- // StgPtr old_p = p; // debugging only -- HWL
- /* stack frames like these are ordinary closures and therefore may
- contain setup-specific fixed-header words (as in GranSim!);
- therefore, these cases should not use p++ but &(p->payload) -- HWL */
- // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
- bitmap = info->layout.bitmap;
-
- p = (StgPtr)&(((StgClosure *)p)->payload);
- // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1));
- goto small_bitmap;
- }
case RET_BCO:
case RET_SMALL:
case RET_VEC_SMALL:
bitmap = info->layout.bitmap;
p++;
- /* this assumes that the payload starts immediately after the info-ptr */
+ // this assumes that the payload starts immediately after the info-ptr
small_bitmap:
while (bitmap != 0) {
if ((bitmap & 1) == 0) {
scavenge_srt(info);
continue;
- /* large bitmap (> 32 entries) */
+ // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
case RET_BIG:
case RET_VEC_BIG:
{
for (i=0; i<large_bitmap->size; i++) {
bitmap = large_bitmap->bitmap[i];
- q = p + sizeof(W_) * 8;
+ q = p + BITS_IN(W_);
while (bitmap != 0) {
if ((bitmap & 1) == 0) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
}
}
- /* and don't forget to follow the SRT */
+ // and don't forget to follow the SRT
goto follow_srt;
}
default:
- barf("scavenge_stack: weird activation record found on stack.\n");
+ barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
}
}
}
objects are (repeatedly) mutable, so most of the time evac_gen will
be zero.
--------------------------------------------------------------------------- */
-//@cindex scavenge_large
static void
-scavenge_large(step *step)
+scavenge_large(step *stp)
{
bdescr *bd;
StgPtr p;
- const StgInfoTable* info;
- nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
- evac_gen = 0; /* most objects are mutable */
- bd = step->new_large_objects;
+ bd = stp->new_large_objects;
- for (; bd != NULL; bd = step->new_large_objects) {
+ for (; bd != NULL; bd = stp->new_large_objects) {
/* take this object *off* the large objects list and put it on
* the scavenged large objects list. This is so that we can
* treat new_large_objects as a stack and push new objects on
* the front when evacuating.
*/
- step->new_large_objects = bd->link;
- dbl_link_onto(bd, &step->scavenged_large_objects);
-
- p = bd->start;
- info = get_itbl((StgClosure *)p);
-
- switch (info->type) {
-
- /* only certain objects can be "large"... */
-
- case ARR_WORDS:
- /* nothing to follow */
- continue;
-
- case MUT_ARR_PTRS:
- /* follow everything */
- {
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
- }
- continue;
- }
-
- case MUT_ARR_PTRS_FROZEN:
- /* follow everything */
- {
- StgPtr start = p, next;
-
- evac_gen = saved_evac_gen; /* not really mutable */
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
- }
- evac_gen = 0;
- if (failed_to_evac) {
- recordMutable((StgMutClosure *)start);
- }
- continue;
- }
-
- case BCO:
- {
- StgBCO* bco = (StgBCO *)p;
- nat i;
- evac_gen = saved_evac_gen;
- for (i = 0; i < bco->n_ptrs; i++) {
- bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
- }
- evac_gen = 0;
- continue;
- }
+ stp->new_large_objects = bd->link;
+ dbl_link_onto(bd, &stp->scavenged_large_objects);
- case TSO:
- scavengeTSO((StgTSO *)p);
- // HWL: old PAR code deleted here
- continue;
+ // update the block count in this step.
+ stp->n_scavenged_large_blocks += bd->blocks;
- default:
- barf("scavenge_large: unknown/strange object");
+ p = bd->start;
+ if (scavenge_one(p)) {
+ mkMutCons((StgClosure *)p, stp->gen);
}
}
}
-//@cindex zero_static_object_list
+/* -----------------------------------------------------------------------------
+ Initialising the static object & mutable lists
+ -------------------------------------------------------------------------- */
static void
zero_static_object_list(StgClosure* first_static)
* It doesn't do any harm to zero all the mutable link fields on the
* mutable list.
*/
-//@cindex zero_mutable_list
static void
zero_mutable_list( StgMutClosure *first )
}
}
-//@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
-//@subsection Reverting CAFs
-
/* -----------------------------------------------------------------------------
Reverting CAFs
-------------------------------------------------------------------------- */
-//@cindex RevertCAFs
-void RevertCAFs(void)
+void
+revertCAFs( void )
{
- while (enteredCAFs != END_CAF_LIST) {
- StgCAF* caf = enteredCAFs;
-
- enteredCAFs = caf->link;
- ASSERT(get_itbl(caf)->type == CAF_ENTERED);
- SET_INFO(caf,&CAF_UNENTERED_info);
- caf->value = (StgClosure *)0xdeadbeef;
- caf->link = (StgCAF *)0xdeadbeef;
- }
- enteredCAFs = END_CAF_LIST;
-}
+ StgIndStatic *c;
-//@cindex revert_dead_CAFs
+ for (c = (StgIndStatic *)caf_list; c != NULL;
+ c = (StgIndStatic *)c->static_link)
+ {
+ c->header.info = c->saved_info;
+ c->saved_info = NULL;
+ // could, but not necessary: c->static_link = NULL;
+ }
+ caf_list = NULL;
+}
-void revert_dead_CAFs(void)
+void
+markCAFs( evac_fn evac )
{
- StgCAF* caf = enteredCAFs;
- enteredCAFs = END_CAF_LIST;
- while (caf != END_CAF_LIST) {
- StgCAF *next, *new;
- next = caf->link;
- new = (StgCAF*)isAlive((StgClosure*)caf);
- if (new) {
- new->link = enteredCAFs;
- enteredCAFs = new;
- } else {
- /* ASSERT(0); */
- SET_INFO(caf,&CAF_UNENTERED_info);
- caf->value = (StgClosure*)0xdeadbeef;
- caf->link = (StgCAF*)0xdeadbeef;
- }
- caf = next;
+ StgIndStatic *c;
+
+ for (c = (StgIndStatic *)caf_list; c != NULL;
+ c = (StgIndStatic *)c->static_link)
+ {
+ evac(&c->indirectee);
}
}
-//@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
-//@subsection Sanity code for CAF garbage collection
-
/* -----------------------------------------------------------------------------
Sanity code for CAF garbage collection.
time.
-------------------------------------------------------------------------- */
-#ifdef DEBUG
-//@cindex gcCAFs
+#if 0 && defined(DEBUG)
static void
gcCAFs(void)
ASSERT(info->type == IND_STATIC);
if (STATIC_LINK(info,p) == NULL) {
- IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
- /* black hole it */
- SET_INFO(p,&BLACKHOLE_info);
+ IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
+ // black hole it
+ SET_INFO(p,&stg_BLACKHOLE_info);
p = STATIC_LINK2(info,p);
*pp = p;
}
}
- /* fprintf(stderr, "%d CAFs live\n", i); */
+ // belch("%d CAFs live", i);
}
#endif
-//@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
-//@subsection Lazy black holing
/* -----------------------------------------------------------------------------
Lazy black holing.
some work, we have to run down the stack and black-hole all the
closures referred to by update frames.
-------------------------------------------------------------------------- */
-//@cindex threadLazyBlackHole
static void
threadLazyBlackHole(StgTSO *tso)
* The blackhole made for a CAF is a CAF_BLACKHOLE, so they
* don't interfere with this optimisation.
*/
- if (bh->header.info == &BLACKHOLE_info) {
+ if (bh->header.info == &stg_BLACKHOLE_info) {
return;
}
- if (bh->header.info != &BLACKHOLE_BQ_info &&
- bh->header.info != &CAF_BLACKHOLE_info) {
+ if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
+ bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
+ belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
+#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
+#endif
+ SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
#endif
- SET_INFO(bh,&BLACKHOLE_info);
}
update_frame = update_frame->link;
}
}
-//@node Stack squeezing, Pausing a thread, Lazy black holing
-//@subsection Stack squeezing
/* -----------------------------------------------------------------------------
* Stack squeezing
* lazy black holing here.
*
* -------------------------------------------------------------------------- */
-//@cindex threadSqueezeStack
static void
threadSqueezeStack(StgTSO *tso)
{
lnat displacement = 0;
StgUpdateFrame *frame;
- StgUpdateFrame *next_frame; /* Temporally next */
- StgUpdateFrame *prev_frame; /* Temporally previous */
+ StgUpdateFrame *next_frame; // Temporally next
+ StgUpdateFrame *prev_frame; // Temporally previous
StgPtr bottom;
rtsBool prev_was_update_frame;
#if DEBUG
*/
next_frame = NULL;
- /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
+ // bottom - sizeof(StgStopFrame) is the STOP_FRAME
while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
prev_frame = frame->link;
frame->link = next_frame;
frame, prev_frame);
})
switch (get_itbl(frame)->type) {
- case UPDATE_FRAME: upd_frames++;
- if (frame->updatee->header.info == &BLACKHOLE_info)
- bhs++;
- break;
- case STOP_FRAME: stop_frames++;
- break;
- case CATCH_FRAME: catch_frames++;
- break;
- case SEQ_FRAME: seq_frames++;
- break;
+ case UPDATE_FRAME:
+ upd_frames++;
+ if (frame->updatee->header.info == &stg_BLACKHOLE_info)
+ bhs++;
+ break;
+ case STOP_FRAME:
+ stop_frames++;
+ break;
+ case CATCH_FRAME:
+ catch_frames++;
+ break;
+ case SEQ_FRAME:
+ seq_frames++;
+ break;
default:
barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
frame, prev_frame);
}
#endif
if (get_itbl(frame)->type == UPDATE_FRAME
- && frame->updatee->header.info == &BLACKHOLE_info) {
+ && frame->updatee->header.info == &stg_BLACKHOLE_info) {
break;
}
}
if (prev_was_update_frame && is_update_frame &&
(P_)prev_frame == frame_bottom + displacement) {
- /* Now squeeze out the current frame */
+ // Now squeeze out the current frame
StgClosure *updatee_keep = prev_frame->updatee;
StgClosure *updatee_bypass = frame->updatee;
#if DEBUG
- IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
+ IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
squeezes++;
#endif
* and probably less bug prone, although it's probably much
* slower --SDM
*/
-#if 0 /* do it properly... */
+#if 0 // do it properly...
# if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
# error Unimplemented lazy BH warning. (KSW 1999-01)
# endif
- if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
- || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
+ if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
+ || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
) {
- /* Sigh. It has one. Don't lose those threads! */
- if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
- /* Urgh. Two queues. Merge them. */
+ // Sigh. It has one. Don't lose those threads!
+ if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
+ // Urgh. Two queues. Merge them.
P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
while (keep_tso->link != END_TSO_QUEUE) {
keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
} else {
- /* For simplicity, just swap the BQ for the BH */
+ // For simplicity, just swap the BQ for the BH
P_ temp = updatee_keep;
updatee_keep = updatee_bypass;
updatee_bypass = temp;
- /* Record the swap in the kept frame (below) */
+ // Record the swap in the kept frame (below)
prev_frame->updatee = updatee_keep;
}
}
/* wasn't there something about update squeezing and ticky to be
* sorted out? oh yes: we aren't counting each enter properly
* in this case. See the log somewhere. KSW 1999-04-21
+ *
+ * Check two things: that the two update frames don't point to
+ * the same object, and that the updatee_bypass isn't already an
+ * indirection. Both of these cases only happen when we're in a
+ * block hole-style loop (and there are multiple update frames
+ * on the stack pointing to the same closure), but they can both
+ * screw us up if we don't check.
*/
- UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
+ if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
+ // this wakes the threads up
+ UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
+ }
- sp = (P_)frame - 1; /* sp = stuff to slide */
+ sp = (P_)frame - 1; // sp = stuff to slide
displacement += sizeofW(StgUpdateFrame);
} else {
- /* No squeeze for this frame */
- sp = frame_bottom - 1; /* Keep the current frame */
+ // No squeeze for this frame
+ sp = frame_bottom - 1; // Keep the current frame
/* Do lazy black-holing.
*/
if (is_update_frame) {
StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
- if (bh->header.info != &BLACKHOLE_info &&
- bh->header.info != &BLACKHOLE_BQ_info &&
- bh->header.info != &CAF_BLACKHOLE_info) {
+ if (bh->header.info != &stg_BLACKHOLE_info &&
+ bh->header.info != &stg_BLACKHOLE_BQ_info &&
+ bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
+ belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
+#endif
+#ifdef DEBUG
+ /* zero out the slop so that the sanity checker can tell
+ * where the next closure is.
+ */
+ {
+ StgInfoTable *info = get_itbl(bh);
+ nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
+ /* don't zero out slop for a THUNK_SELECTOR, because its layout
+ * info is used for a different purpose, and it's exactly the
+ * same size as a BLACKHOLE in any case.
+ */
+ if (info->type != THUNK_SELECTOR) {
+ for (i = np; i < np + nw; i++) {
+ ((StgClosure *)bh)->payload[i] = 0;
+ }
+ }
+ }
+#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
+ SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
#endif
- SET_INFO(bh,&BLACKHOLE_info);
}
}
- /* Fix the link in the current frame (should point to the frame below) */
+ // Fix the link in the current frame (should point to the frame below)
frame->link = prev_frame;
prev_was_update_frame = is_update_frame;
}
- /* Now slide all words from sp up to the next frame */
+ // Now slide all words from sp up to the next frame
if (displacement > 0) {
P_ next_frame_bottom;
else
next_frame_bottom = tso->sp - 1;
-#if DEBUG
+#if 0
IF_DEBUG(gc,
- fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
- displacement))
+ belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
+ displacement))
#endif
while (sp >= next_frame_bottom) {
tso->sp += displacement;
tso->su = prev_frame;
-#if DEBUG
+#if 0
IF_DEBUG(gc,
- fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
+ belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
#endif
}
-//@node Pausing a thread, Index, Stack squeezing
-//@subsection Pausing a thread
/* -----------------------------------------------------------------------------
* Pausing a thread
* here. We also take the opportunity to do stack squeezing if it's
* turned on.
* -------------------------------------------------------------------------- */
-//@cindex threadPaused
-
void
threadPaused(StgTSO *tso)
{
if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
- threadSqueezeStack(tso); /* does black holing too */
+ threadSqueezeStack(tso); // does black holing too
else
threadLazyBlackHole(tso);
}
* -------------------------------------------------------------------------- */
#if DEBUG
-//@cindex printMutOnceList
void
printMutOnceList(generation *gen)
{
fputc('\n', stderr);
}
-//@cindex printMutableList
void
printMutableList(generation *gen)
{
StgMutClosure *p, *next;
- p = gen->saved_mut_list;
+ p = gen->mut_list;
next = p->mut_link;
- fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
+ fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
fprintf(stderr, "%p (%s), ",
p, info_type((StgClosure *)p));
}
fputc('\n', stderr);
}
-#endif /* DEBUG */
-
-//@node Index, , Pausing a thread
-//@subsection Index
-
-//@index
-//* GarbageCollect:: @cindex\s-+GarbageCollect
-//* MarkRoot:: @cindex\s-+MarkRoot
-//* RevertCAFs:: @cindex\s-+RevertCAFs
-//* addBlock:: @cindex\s-+addBlock
-//* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
-//* copy:: @cindex\s-+copy
-//* copyPart:: @cindex\s-+copyPart
-//* evacuate:: @cindex\s-+evacuate
-//* evacuate_large:: @cindex\s-+evacuate_large
-//* gcCAFs:: @cindex\s-+gcCAFs
-//* isAlive:: @cindex\s-+isAlive
-//* mkMutCons:: @cindex\s-+mkMutCons
-//* relocate_TSO:: @cindex\s-+relocate_TSO
-//* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
-//* scavenge:: @cindex\s-+scavenge
-//* scavenge_large:: @cindex\s-+scavenge_large
-//* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
-//* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
-//* scavenge_one:: @cindex\s-+scavenge_one
-//* scavenge_srt:: @cindex\s-+scavenge_srt
-//* scavenge_stack:: @cindex\s-+scavenge_stack
-//* scavenge_static:: @cindex\s-+scavenge_static
-//* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
-//* threadPaused:: @cindex\s-+threadPaused
-//* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
-//* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
-//* upd_evacuee:: @cindex\s-+upd_evacuee
-//* zero_mutable_list:: @cindex\s-+zero_mutable_list
-//* zero_static_object_list:: @cindex\s-+zero_static_object_list
-//@end index
+
+static inline rtsBool
+maybeLarge(StgClosure *closure)
+{
+ StgInfoTable *info = get_itbl(closure);
+
+ /* closure types that may be found on the new_large_objects list;
+ see scavenge_large */
+ return (info->type == MUT_ARR_PTRS ||
+ info->type == MUT_ARR_PTRS_FROZEN ||
+ info->type == TSO ||
+ info->type == ARR_WORDS);
+}
+
+
+#endif // DEBUG