/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.104 2001/07/23 17:23:19 simonmar Exp $
+ * $Id: GC.c,v 1.136 2002/07/10 09:28:54 simonmar Exp $
*
* (c) The GHC Team 1998-1999
*
*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
#include "RtsFlags.h"
#include "RtsUtils.h"
#include "FrontPanel.h"
#endif
+#include "RetainerProfile.h"
+#include "LdvProfile.h"
+
/* STATIC OBJECT LIST.
*
* During GC:
/* Weak pointers
*/
StgWeak *old_weak_ptr_list; // also pending finaliser list
-static rtsBool weak_done; // all done for this pass
+
+/* Which stage of processing various kinds of weak pointer are we at?
+ * (see traverse_weak_ptr_list() below for discussion).
+ */
+typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
+static WeakStage weak_stage;
/* List of all threads during GC
*/
static StgTSO *old_all_threads;
-static StgTSO *resurrected_threads;
+StgTSO *resurrected_threads;
/* Flag indicating failure to evacuate an object to the desired
* generation.
static void zero_mutable_list ( StgMutClosure *first );
static rtsBool traverse_weak_ptr_list ( void );
-static void cleanup_weak_ptr_list ( StgWeak **list );
+static void mark_weak_ptr_list ( StgWeak **list );
static void scavenge ( step * );
static void scavenge_mark_stack ( void );
static void scavenge_stack ( StgPtr p, StgPtr stack_end );
-static rtsBool scavenge_one ( StgClosure *p );
+static rtsBool scavenge_one ( StgPtr p );
static void scavenge_large ( step * );
static void scavenge_static ( void );
static void scavenge_mutable_list ( generation *g );
static void scavenge_mut_once_list ( generation *g );
-static void scavengeCAFs ( void );
#if 0 && defined(DEBUG)
static void gcCAFs ( void );
static StgPtr *mark_sp;
static StgPtr *mark_splim;
+// Flag and pointers used for falling back to a linear scan when the
+// mark stack overflows.
+static rtsBool mark_stack_overflowed;
+static bdescr *oldgen_scan_bd;
+static StgPtr oldgen_scan;
+
static inline rtsBool
mark_stack_empty(void)
{
}
static inline void
+reset_mark_stack(void)
+{
+ mark_sp = mark_stack;
+}
+
+static inline void
push_mark_stack(StgPtr p)
{
*mark_sp++ = p;
- free from-space in each step, and set from-space = to-space.
+ Locks held: sched_mutex
+
-------------------------------------------------------------------------- */
void
bdescr *bd;
step *stp;
lnat live, allocated, collected = 0, copied = 0;
+ lnat oldgen_saved_blocks = 0;
nat g, s;
#ifdef PROFILING
} else {
N = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ >= generations[g].max_blocks) {
N = g;
}
}
stp->scan_bd = bd;
stp->new_large_objects = NULL;
stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
new_blocks++;
// mark the large objects as not evacuated yet
for (bd = stp->large_objects; bd; bd = bd->link) {
stp->bitmap = bitmap_bdescr;
bitmap = bitmap_bdescr->start;
- IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
+ IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
bitmap_size, bitmap););
// don't forget to fill it with zeros!
stp->n_to_blocks = 0;
stp->new_large_objects = NULL;
stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
}
}
}
}
- scavengeCAFs();
+ /* follow roots from the CAF list (used by GHCi)
+ */
+ evac_gen = 0;
+ markCAFs(mark_root);
/* follow all the roots that the application knows about.
*/
/* Mark the weak pointer list, and prepare to detect dead weak
* pointers.
*/
+ mark_weak_ptr_list(&weak_ptr_list);
old_weak_ptr_list = weak_ptr_list;
weak_ptr_list = NULL;
- weak_done = rtsFalse;
+ weak_stage = WeakPtrs;
/* The all_threads list is like the weak_ptr_list.
* See traverse_weak_ptr_list() for the details.
scavenge_static();
}
- // scavenge objects in compacted generation
- if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
- scavenge_mark_stack();
- flag = rtsTrue;
- }
-
/* When scavenging the older generations: Objects may have been
* evacuated from generations <= N into older generations, and we
* need to scavenge these objects. We're going to try to ensure that
// scavenge each step in generations 0..maxgen
{
- int gen, st;
+ long gen;
+ int st;
+
loop2:
- for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
- for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
+ // scavenge objects in compacted generation
+ if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
+ (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ scavenge_mark_stack();
+ flag = rtsTrue;
+ }
+
+ for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
+ for (st = generations[gen].n_steps; --st >= 0; ) {
if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
continue;
}
if (flag) { goto loop; }
- // must be last...
+ // must be last... invariant is that everything is fully
+ // scavenged at this point.
if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
goto loop;
}
}
- /* Final traversal of the weak pointer list (see comment by
- * cleanUpWeakPtrList below).
+ /* Update the pointers from the "main thread" list - these are
+ * treated as weak pointers because we want to allow a main thread
+ * to get a BlockedOnDeadMVar exception in the same way as any other
+ * thread. Note that the threads should all have been retained by
+ * GC by virtue of being on the all_threads list, we're just
+ * updating pointers here.
*/
- cleanup_weak_ptr_list(&weak_ptr_list);
+ {
+ StgMainThread *m;
+ StgTSO *tso;
+ for (m = main_threads; m != NULL; m = m->link) {
+ tso = (StgTSO *) isAlive((StgClosure *)m->tso);
+ if (tso == NULL) {
+ barf("main thread has been GC'd");
+ }
+ m->tso = tso;
+ }
+ }
#if defined(PAR)
// Reconstruct the Global Address tables used in GUM
}
}
+#ifdef PROFILING
+ // We call processHeapClosureForDead() on every closure destroyed during
+ // the current garbage collection, so we invoke LdvCensusForDead().
+ if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
+ || RtsFlags.ProfFlags.bioSelector != NULL)
+ LdvCensusForDead(N);
+#endif
+
// NO MORE EVACUATION AFTER THIS POINT!
// Finally: compaction of the oldest generation.
- if (major_gc && RtsFlags.GcFlags.compact) {
+ if (major_gc && oldest_gen->steps[0].is_compacted) {
+ // save number of blocks for stats
+ oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
compact(get_roots);
}
// for generations we collected...
if (g <= N) {
- collected += stp->n_blocks * BLOCK_SIZE_W; // for stats
+ // rough calculation of garbage collected, for stats output
+ if (stp->is_compacted) {
+ collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
+ } else {
+ collected += stp->n_blocks * BLOCK_SIZE_W;
+ }
/* free old memory and shift to-space into from-space for all
* the collected steps (except the allocation area). These
freeGroup(bd);
bd = next;
}
+
+ // update the count of blocks used by large objects
for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
bd->flags &= ~BF_EVACUATED;
}
- stp->large_objects = stp->scavenged_large_objects;
-
- /* Set the maximum blocks for this generation, interpolating
- * between the maximum size of the oldest and youngest
- * generations.
- *
- * max_blocks = oldgen_max_blocks * G
- * ----------------------
- * oldest_gen
- */
- if (g != 0) {
-#if 0
- generations[g].max_blocks = (oldest_gen->max_blocks * g)
- / (RtsFlags.GcFlags.generations-1);
-#endif
- generations[g].max_blocks = oldest_gen->max_blocks;
- }
+ stp->large_objects = stp->scavenged_large_objects;
+ stp->n_large_blocks = stp->n_scavenged_large_blocks;
- // for older generations...
} else {
+ // for older generations...
/* For older generations, we need to append the
* scavenged_large_object list (i.e. large objects that have been
// add the new blocks we promoted during this GC
stp->n_blocks += stp->n_to_blocks;
+ stp->n_large_blocks += stp->n_scavenged_large_blocks;
}
}
}
-
- /* Set the maximum blocks for the oldest generation, based on twice
- * the amount of live data now, adjusted to fit the maximum heap
- * size if necessary.
+
+ /* Reset the sizes of the older generations when we do a major
+ * collection.
*
- * This is an approximation, since in the worst case we'll need
- * twice the amount of live data plus whatever space the other
- * generations need.
+ * CURRENT STRATEGY: make all generations except zero the same size.
+ * We have to stay within the maximum heap size, and leave a certain
+ * percentage of the maximum heap size available to allocate into.
*/
if (major_gc && RtsFlags.GcFlags.generations > 1) {
- oldest_gen->max_blocks =
- stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
- RtsFlags.GcFlags.minOldGenSize);
- if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
- oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
- if (((int)oldest_gen->max_blocks -
- (int)oldest_gen->steps[0].n_blocks) <
- (RtsFlags.GcFlags.pcFreeHeap *
- RtsFlags.GcFlags.maxHeapSize / 200)) {
- heapOverflow();
- }
+ nat live, size, min_alloc;
+ nat max = RtsFlags.GcFlags.maxHeapSize;
+ nat gens = RtsFlags.GcFlags.generations;
+
+ // live in the oldest generations
+ live = oldest_gen->steps[0].n_blocks +
+ oldest_gen->steps[0].n_large_blocks;
+
+ // default max size for all generations except zero
+ size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
+ RtsFlags.GcFlags.minOldGenSize);
+
+ // minimum size for generation zero
+ min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
+ RtsFlags.GcFlags.minAllocAreaSize);
+
+ // Auto-enable compaction when the residency reaches a
+ // certain percentage of the maximum heap size (default: 30%).
+ if (RtsFlags.GcFlags.generations > 1 &&
+ (RtsFlags.GcFlags.compact ||
+ (max > 0 &&
+ oldest_gen->steps[0].n_blocks >
+ (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
+ oldest_gen->steps[0].is_compacted = 1;
+// fprintf(stderr,"compaction: on\n", live);
+ } else {
+ oldest_gen->steps[0].is_compacted = 0;
+// fprintf(stderr,"compaction: off\n", live);
+ }
+
+ // if we're going to go over the maximum heap size, reduce the
+ // size of the generations accordingly. The calculation is
+ // different if compaction is turned on, because we don't need
+ // to double the space required to collect the old generation.
+ if (max != 0) {
+
+ // this test is necessary to ensure that the calculations
+ // below don't have any negative results - we're working
+ // with unsigned values here.
+ if (max < min_alloc) {
+ heapOverflow();
+ }
+
+ if (oldest_gen->steps[0].is_compacted) {
+ if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2 - 1);
+ }
+ } else {
+ if ( (size * (gens - 1) * 2) + min_alloc > max ) {
+ size = (max - min_alloc) / ((gens - 1) * 2);
+ }
+ }
+
+ if (size < live) {
+ heapOverflow();
+ }
+ }
+
+#if 0
+ fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
+ min_alloc, size, max);
+#endif
+
+ for (g = 0; g < gens; g++) {
+ generations[g].max_blocks = size;
}
}
- // Guess the amount of live data for stats.
+ // Guess the amount of live data for stats.
live = calcLive();
/* Free the small objects allocated via allocate(), since this will
alloc_HpLim = NULL;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ // Start a new pinned_object_block
+ pinned_object_block = NULL;
+
/* Free the mark stack.
*/
if (mark_stack_bdescr != NULL) {
/* For a two-space collector, we need to resize the nursery. */
/* set up a new nursery. Allocate a nursery size based on a
- * function of the amount of live data (currently a factor of 2,
- * should be configurable (ToDo)). Use the blocks from the old
- * nursery if possible, freeing up any left over blocks.
+ * function of the amount of live data (by default a factor of 2)
+ * Use the blocks from the old nursery if possible, freeing up any
+ * left over blocks.
*
* If we get near the maximum heap size, then adjust our nursery
* size accordingly. If the nursery is the same size as the live
*
* A normal 2-space collector would need 4L bytes to give the same
* performance we get from 3L bytes, reducing to the same
- * performance at 2L bytes.
+ * performance at 2L bytes.
*/
blocks = g0s0->n_to_blocks;
- if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
- RtsFlags.GcFlags.maxHeapSize ) {
- int adjusted_blocks; // signed on purpose
+ if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
+ blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
+ RtsFlags.GcFlags.maxHeapSize ) {
+ long adjusted_blocks; // signed on purpose
int pc_free;
adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
- IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
+ IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
heapOverflow();
*/
if (RtsFlags.GcFlags.heapSizeSuggestion) {
- int blocks;
+ long blocks;
nat needed = calcNeeded(); // approx blocks needed at next GC
/* Guess how much will be live in generation 0 step 0 next time.
- * A good approximation is the obtained by finding the
+ * A good approximation is obtained by finding the
* percentage of g0s0 that was live at the last minor GC.
*/
if (N == 0) {
* collection for collecting all steps except g0s0.
*/
blocks =
- (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
- (100 + (int)g0s0_pcnt_kept);
+ (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
+ (100 + (long)g0s0_pcnt_kept);
- if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
+ if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
blocks = RtsFlags.GcFlags.minAllocAreaSize;
}
resizeNursery((nat)blocks);
+
+ } else {
+ // we might have added extra large blocks to the nursery, so
+ // resize back to minAllocAreaSize again.
+ resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
}
}
if (major_gc) { gcCAFs(); }
#endif
+#ifdef PROFILING
+ // resetStaticObjectForRetainerProfiling() must be called before
+ // zeroing below.
+ resetStaticObjectForRetainerProfiling();
+#endif
+
// zero the scavenged static object list
if (major_gc) {
zero_static_object_list(scavenged_static_objects);
}
- /* Reset the nursery
- */
+ // Reset the nursery
resetNurseries();
+ RELEASE_LOCK(&sched_mutex);
+
// start any pending finalizers
scheduleFinalizers(old_weak_ptr_list);
// send exceptions to any threads which were about to die
resurrectThreads(resurrected_threads);
+
+ ACQUIRE_LOCK(&sched_mutex);
// Update the stable pointer hash table.
updateStablePtrTable(major_gc);
// restore enclosing cost centre
#ifdef PROFILING
- heapCensus();
CCCS = prev_CCS;
#endif
older generations than the one we're collecting. This could
probably be optimised by keeping per-generation lists of weak
pointers, but for a few weak pointers this scheme will work.
+
+ There are three distinct stages to processing weak pointers:
+
+ - weak_stage == WeakPtrs
+
+ We process all the weak pointers whos keys are alive (evacuate
+ their values and finalizers), and repeat until we can find no new
+ live keys. If no live keys are found in this pass, then we
+ evacuate the finalizers of all the dead weak pointers in order to
+ run them.
+
+ - weak_stage == WeakThreads
+
+ Now, we discover which *threads* are still alive. Pointers to
+ threads from the all_threads and main thread lists are the
+ weakest of all: a pointers from the finalizer of a dead weak
+ pointer can keep a thread alive. Any threads found to be unreachable
+ are evacuated and placed on the resurrected_threads list so we
+ can send them a signal later.
+
+ - weak_stage == WeakDone
+
+ No more evacuation is done.
+
-------------------------------------------------------------------------- */
static rtsBool
StgClosure *new;
rtsBool flag = rtsFalse;
- if (weak_done) { return rtsFalse; }
-
- /* doesn't matter where we evacuate values/finalizers to, since
- * these pointers are treated as roots (iff the keys are alive).
- */
- evac_gen = 0;
-
- last_w = &old_weak_ptr_list;
- for (w = old_weak_ptr_list; w != NULL; w = next_w) {
+ switch (weak_stage) {
- /* First, this weak pointer might have been evacuated. If so,
- * remove the forwarding pointer from the weak_ptr_list.
- */
- if (get_itbl(w)->type == EVACUATED) {
- w = (StgWeak *)((StgEvacuated *)w)->evacuee;
- *last_w = w;
- }
-
- /* There might be a DEAD_WEAK on the list if finalizeWeak# was
- * called on a live weak pointer object. Just remove it.
- */
- if (w->header.info == &stg_DEAD_WEAK_info) {
- next_w = ((StgDeadWeak *)w)->link;
- *last_w = next_w;
- continue;
- }
-
- ASSERT(get_itbl(w)->type == WEAK);
-
- /* Now, check whether the key is reachable.
- */
- if ((new = isAlive(w->key))) {
- w->key = new;
- // evacuate the value and finalizer
- w->value = evacuate(w->value);
- w->finalizer = evacuate(w->finalizer);
- // remove this weak ptr from the old_weak_ptr list
- *last_w = w->link;
- // and put it on the new weak ptr list
- next_w = w->link;
- w->link = weak_ptr_list;
- weak_ptr_list = w;
- flag = rtsTrue;
- IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
- continue;
- }
- else {
- last_w = &(w->link);
- next_w = w->link;
- continue;
- }
- }
+ case WeakDone:
+ return rtsFalse;
- /* Now deal with the all_threads list, which behaves somewhat like
- * the weak ptr list. If we discover any threads that are about to
- * become garbage, we wake them up and administer an exception.
- */
- {
- StgTSO *t, *tmp, *next, **prev;
+ case WeakPtrs:
+ /* doesn't matter where we evacuate values/finalizers to, since
+ * these pointers are treated as roots (iff the keys are alive).
+ */
+ evac_gen = 0;
+
+ last_w = &old_weak_ptr_list;
+ for (w = old_weak_ptr_list; w != NULL; w = next_w) {
+
+ /* There might be a DEAD_WEAK on the list if finalizeWeak# was
+ * called on a live weak pointer object. Just remove it.
+ */
+ if (w->header.info == &stg_DEAD_WEAK_info) {
+ next_w = ((StgDeadWeak *)w)->link;
+ *last_w = next_w;
+ continue;
+ }
+
+ switch (get_itbl(w)->type) {
- prev = &old_all_threads;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ case EVACUATED:
+ next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ *last_w = next_w;
+ continue;
- (StgClosure *)tmp = isAlive((StgClosure *)t);
-
- if (tmp != NULL) {
- t = tmp;
- }
+ case WEAK:
+ /* Now, check whether the key is reachable.
+ */
+ new = isAlive(w->key);
+ if (new != NULL) {
+ w->key = new;
+ // evacuate the value and finalizer
+ w->value = evacuate(w->value);
+ w->finalizer = evacuate(w->finalizer);
+ // remove this weak ptr from the old_weak_ptr list
+ *last_w = w->link;
+ // and put it on the new weak ptr list
+ next_w = w->link;
+ w->link = weak_ptr_list;
+ weak_ptr_list = w;
+ flag = rtsTrue;
+ IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p",
+ w, w->key));
+ continue;
+ }
+ else {
+ last_w = &(w->link);
+ next_w = w->link;
+ continue;
+ }
- ASSERT(get_itbl(t)->type == TSO);
- switch (t->what_next) {
- case ThreadRelocated:
- next = t->link;
- *prev = next;
- continue;
- case ThreadKilled:
- case ThreadComplete:
- // finshed or died. The thread might still be alive, but we
- // don't keep it on the all_threads list. Don't forget to
- // stub out its global_link field.
- next = t->global_link;
- t->global_link = END_TSO_QUEUE;
- *prev = next;
- continue;
- default:
- ;
+ default:
+ barf("traverse_weak_ptr_list: not WEAK");
+ }
}
+
+ /* If we didn't make any changes, then we can go round and kill all
+ * the dead weak pointers. The old_weak_ptr list is used as a list
+ * of pending finalizers later on.
+ */
+ if (flag == rtsFalse) {
+ for (w = old_weak_ptr_list; w; w = w->link) {
+ w->finalizer = evacuate(w->finalizer);
+ }
- if (tmp == NULL) {
- // not alive (yet): leave this thread on the old_all_threads list.
- prev = &(t->global_link);
- next = t->global_link;
- continue;
- }
- else {
- // alive: move this thread onto the all_threads list.
- next = t->global_link;
- t->global_link = all_threads;
- all_threads = t;
- *prev = next;
- break;
+ // Next, move to the WeakThreads stage after fully
+ // scavenging the finalizers we've just evacuated.
+ weak_stage = WeakThreads;
}
- }
- }
- /* If we didn't make any changes, then we can go round and kill all
- * the dead weak pointers. The old_weak_ptr list is used as a list
- * of pending finalizers later on.
- */
- if (flag == rtsFalse) {
- cleanup_weak_ptr_list(&old_weak_ptr_list);
- for (w = old_weak_ptr_list; w; w = w->link) {
- w->finalizer = evacuate(w->finalizer);
- }
+ return rtsTrue;
- /* And resurrect any threads which were about to become garbage.
- */
- {
- StgTSO *t, *tmp, *next;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- (StgClosure *)tmp = evacuate((StgClosure *)t);
- tmp->global_link = resurrected_threads;
- resurrected_threads = tmp;
+ case WeakThreads:
+ /* Now deal with the all_threads list, which behaves somewhat like
+ * the weak ptr list. If we discover any threads that are about to
+ * become garbage, we wake them up and administer an exception.
+ */
+ {
+ StgTSO *t, *tmp, *next, **prev;
+
+ prev = &old_all_threads;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+
+ (StgClosure *)tmp = isAlive((StgClosure *)t);
+
+ if (tmp != NULL) {
+ t = tmp;
+ }
+
+ ASSERT(get_itbl(t)->type == TSO);
+ switch (t->what_next) {
+ case ThreadRelocated:
+ next = t->link;
+ *prev = next;
+ continue;
+ case ThreadKilled:
+ case ThreadComplete:
+ // finshed or died. The thread might still be alive, but we
+ // don't keep it on the all_threads list. Don't forget to
+ // stub out its global_link field.
+ next = t->global_link;
+ t->global_link = END_TSO_QUEUE;
+ *prev = next;
+ continue;
+ default:
+ ;
+ }
+
+ if (tmp == NULL) {
+ // not alive (yet): leave this thread on the
+ // old_all_threads list.
+ prev = &(t->global_link);
+ next = t->global_link;
+ }
+ else {
+ // alive: move this thread onto the all_threads list.
+ next = t->global_link;
+ t->global_link = all_threads;
+ all_threads = t;
+ *prev = next;
+ }
+ }
}
- }
+
+ /* And resurrect any threads which were about to become garbage.
+ */
+ {
+ StgTSO *t, *tmp, *next;
+ for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+ (StgClosure *)tmp = evacuate((StgClosure *)t);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
+ }
+
+ weak_stage = WeakDone; // *now* we're done,
+ return rtsTrue; // but one more round of scavenging, please
- weak_done = rtsTrue;
+ default:
+ barf("traverse_weak_ptr_list");
}
- return rtsTrue;
}
/* -----------------------------------------------------------------------------
static void
-cleanup_weak_ptr_list ( StgWeak **list )
+mark_weak_ptr_list ( StgWeak **list )
{
StgWeak *w, **last_w;
last_w = list;
for (w = *list; w; w = w->link) {
-
- if (get_itbl(w)->type == EVACUATED) {
- w = (StgWeak *)((StgEvacuated *)w)->evacuee;
- *last_w = w;
- }
-
- if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
+ // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
+ ASSERT(w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
(StgClosure *)w = evacuate((StgClosure *)w);
*last_w = w;
- }
- last_w = &(w->link);
+ last_w = &(w->link);
}
}
loop:
bd = Bdescr((P_)p);
+
// ignore closures in generations that we're not collecting.
if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
return p;
}
// large objects have an evacuated flag
- if ((bd->flags & BF_LARGE) && (bd->flags & BF_EVACUATED)) {
- return p;
+ if (bd->flags & BF_LARGE) {
+ if (bd->flags & BF_EVACUATED) {
+ return p;
+ } else {
+ return NULL;
+ }
}
// check the mark bit for compacted steps
if (bd->step->is_compacted && is_marked((P_)p,bd)) {
copy(StgClosure *src, nat size, step *stp)
{
P_ to, from, dest;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_org = size;
+#endif
TICK_GC_WORDS_COPIED(size);
/* Find out where we're going, using the handy "to" pointer in
dest = stp->hp;
stp->hp = to;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ SET_EVACUAEE_FOR_LDV(src, size_org);
+#endif
return (StgClosure *)dest;
}
*/
-static __inline__ StgClosure *
+static StgClosure *
copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
{
P_ dest, to, from;
+#ifdef PROFILING
+ // @LDV profiling
+ nat size_to_copy_org = size_to_copy;
+#endif
TICK_GC_WORDS_COPIED(size_to_copy);
if (stp->gen_no < evac_gen) {
dest = stp->hp;
stp->hp += size_to_reserve;
upd_evacuee(src,(StgClosure *)dest);
+#ifdef PROFILING
+ // We store the size of the just evacuated object in the LDV word so that
+ // the profiler can guess the position of the next object later.
+ // size_to_copy_org is wrong because the closure already occupies size_to_reserve
+ // words.
+ SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
+ // fill the slop
+ if (size_to_reserve - size_to_copy_org > 0)
+ FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+#endif
return (StgClosure *)dest;
}
bdescr *bd = Bdescr(p);
step *stp;
- // should point to the beginning of the block
- ASSERT(((W_)p & BLOCK_MASK) == 0);
-
+ // object must be at the beginning of the block (or be a ByteArray)
+ ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
+ (((W_)p & BLOCK_MASK) == 0));
+
// already evacuated?
if (bd->flags & BF_EVACUATED) {
/* Don't forget to set the failed_to_evac flag if we didn't get
if (!is_marked((P_)q,bd)) {
mark((P_)q,bd);
if (mark_stack_full()) {
- barf("ToDo: mark stack full");
+ mark_stack_overflowed = rtsTrue;
+ reset_mark_stack();
}
push_mark_stack((P_)q);
}
case CONSTR_1_1:
case CONSTR_0_2:
case CONSTR_STATIC:
+ case CONSTR_NOCAF_STATIC:
{
- StgWord32 offset = info->layout.selector_offset;
+ StgWord offset = info->layout.selector_offset;
// check that the size is in range
ASSERT(offset <
// perform the selection!
q = selectee->payload[offset];
+ if (major_gc==rtsTrue) {TICK_GC_SEL_MAJOR();} else {TICK_GC_SEL_MINOR();}
/* if we're already in to-space, there's no need to continue
* with the evacuation, just update the source address with
thunk_selector_depth--;
goto selector_loop;
}
- }
- // otherwise, fall through...
+ } else {
+ TICK_GC_SEL_ABANDONED();
+ // and fall through...
+ }
# endif
case AP_UPD:
if (evac_gen > 0) { // optimisation
StgClosure *p = ((StgEvacuated*)q)->evacuee;
if (Bdescr((P_)p)->gen_no < evac_gen) {
- IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
failed_to_evac = rtsTrue;
TICK_GC_FAILED_PROMOTION();
}
void
move_TSO(StgTSO *src, StgTSO *dest)
{
- int diff;
+ ptrdiff_t diff;
// relocate the stack pointers...
diff = (StgPtr)dest - (StgPtr)src; // In *words*
-------------------------------------------------------------------------- */
StgTSO *
-relocate_stack(StgTSO *dest, int diff)
+relocate_stack(StgTSO *dest, ptrdiff_t diff)
{
StgUpdateFrame *su;
StgCatchFrame *cf;
}
case IND_PERM:
- if (stp->gen_no != 0) {
- SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
- }
+ if (stp->gen->no != 0) {
+#ifdef PROFILING
+ // @LDV profiling
+ // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
+ // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
+ LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
+ SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that p has just been created.
+ LDV_recordCreate((StgClosure *)p);
+#endif
+ }
// fall through
case IND_OLDGEN_PERM:
((StgIndOldGen *)p)->indirectee =
static void
scavenge_mark_stack(void)
{
- StgPtr p;
+ StgPtr p, q;
StgInfoTable *info;
nat saved_evac_gen;
evac_gen = oldest_gen->no;
saved_evac_gen = evac_gen;
+linear_scan:
while (!mark_stack_empty()) {
p = pop_mark_stack();
info = get_itbl((StgClosure *)p);
ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
+ q = p;
switch (info->type) {
case MVAR:
p, info_type((StgClosure *)p)));
break;
}
-#endif
+#endif // PAR
default:
barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- mkMutCons((StgClosure *)p, &generations[evac_gen]);
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
}
+
+ // mark the next bit to indicate "scavenged"
+ mark(q+1, Bdescr(q));
} // while (!mark_stack_empty())
-}
+
+ // start a new linear scan if the mark stack overflowed at some point
+ if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
+ IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
+ mark_stack_overflowed = rtsFalse;
+ oldgen_scan_bd = oldest_gen->steps[0].blocks;
+ oldgen_scan = oldgen_scan_bd->start;
+ }
+
+ if (oldgen_scan_bd) {
+ // push a new thing on the mark stack
+ loop:
+ // find a closure that is marked but not scavenged, and start
+ // from there.
+ while (oldgen_scan < oldgen_scan_bd->free
+ && !is_marked(oldgen_scan,oldgen_scan_bd)) {
+ oldgen_scan++;
+ }
+
+ if (oldgen_scan < oldgen_scan_bd->free) {
+
+ // already scavenged?
+ if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto loop;
+ }
+ push_mark_stack(oldgen_scan);
+ // ToDo: bump the linear scan by the actual size of the object
+ oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ goto linear_scan;
+ }
+
+ oldgen_scan_bd = oldgen_scan_bd->link;
+ if (oldgen_scan_bd != NULL) {
+ oldgen_scan = oldgen_scan_bd->start;
+ goto loop;
+ }
+ }
+}
/* -----------------------------------------------------------------------------
Scavenge one object.
-------------------------------------------------------------------------- */
static rtsBool
-scavenge_one(StgClosure *p)
+scavenge_one(StgPtr p)
{
- const StgInfoTable *info;
- rtsBool no_luck;
-
- ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
- || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
-
- info = get_itbl(p);
-
- switch (info -> type) {
-
- case FUN:
- case FUN_1_0: // hardly worth specialising these guys
- case FUN_0_1:
- case FUN_1_1:
- case FUN_0_2:
- case FUN_2_0:
- case THUNK:
- case THUNK_1_0:
- case THUNK_0_1:
- case THUNK_1_1:
- case THUNK_0_2:
- case THUNK_2_0:
- case CONSTR:
- case CONSTR_1_0:
- case CONSTR_0_1:
- case CONSTR_1_1:
- case CONSTR_0_2:
- case CONSTR_2_0:
- case WEAK:
- case FOREIGN:
- case IND_PERM:
- case IND_OLDGEN_PERM:
+ const StgInfoTable *info;
+ nat saved_evac_gen = evac_gen;
+ rtsBool no_luck;
+
+ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
+ || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
+
+ info = get_itbl((StgClosure *)p);
+
+ switch (info->type) {
+
+ case FUN:
+ case FUN_1_0: // hardly worth specialising these guys
+ case FUN_0_1:
+ case FUN_1_1:
+ case FUN_0_2:
+ case FUN_2_0:
+ case THUNK:
+ case THUNK_1_0:
+ case THUNK_0_1:
+ case THUNK_1_1:
+ case THUNK_0_2:
+ case THUNK_2_0:
+ case CONSTR:
+ case CONSTR_1_0:
+ case CONSTR_0_1:
+ case CONSTR_1_1:
+ case CONSTR_0_2:
+ case CONSTR_2_0:
+ case WEAK:
+ case FOREIGN:
+ case IND_PERM:
+ case IND_OLDGEN_PERM:
{
- StgPtr q, end;
-
- end = (P_)p->payload + info->layout.payload.ptrs;
- for (q = (P_)p->payload; q < end; q++) {
- (StgClosure *)*q = evacuate((StgClosure *)*q);
- }
- break;
+ StgPtr q, end;
+
+ end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
+ (StgClosure *)*q = evacuate((StgClosure *)*q);
+ }
+ break;
}
-
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- break;
-
- case THUNK_SELECTOR:
+
+ case CAF_BLACKHOLE:
+ case SE_CAF_BLACKHOLE:
+ case SE_BLACKHOLE:
+ case BLACKHOLE:
+ break;
+
+ case THUNK_SELECTOR:
{
- StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
- break;
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
}
- case AP_UPD: /* same as PAPs */
- case PAP:
- /* Treat a PAP just like a section of stack, not forgetting to
- * evacuate the function pointer too...
- */
- {
- StgPAP* pap = (StgPAP *)p;
+ case ARR_WORDS:
+ // nothing to follow
+ break;
- pap->fun = evacuate(pap->fun);
- scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
- break;
+ case MUT_ARR_PTRS:
+ {
+ // follow everything
+ StgPtr next;
+
+ evac_gen = 0; // repeatedly mutable
+ recordMutable((StgMutClosure *)p);
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
}
- case IND_OLDGEN:
- /* This might happen if for instance a MUT_CONS was pointing to a
- * THUNK which has since been updated. The IND_OLDGEN will
- * be on the mutable list anyway, so we don't need to do anything
- * here.
- */
- break;
+ case MUT_ARR_PTRS_FROZEN:
+ {
+ // follow everything
+ StgPtr next;
+
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
- case MUT_ARR_PTRS_FROZEN:
- {
- // follow everything
- StgPtr q, next;
+ case TSO:
+ {
+ StgTSO *tso = (StgTSO *)p;
+
+ evac_gen = 0; // repeatedly mutable
+ scavengeTSO(tso);
+ recordMutable((StgMutClosure *)tso);
+ evac_gen = saved_evac_gen;
+ failed_to_evac = rtsFalse;
+ break;
+ }
+
+ case AP_UPD:
+ case PAP:
+ {
+ StgPAP* pap = (StgPAP *)p;
+ pap->fun = evacuate(pap->fun);
+ scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ break;
+ }
- q = (StgPtr)p;
- next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
- (StgClosure *)*q = evacuate((StgClosure *)*q);
- }
- break;
- }
+ case IND_OLDGEN:
+ // This might happen if for instance a MUT_CONS was pointing to a
+ // THUNK which has since been updated. The IND_OLDGEN will
+ // be on the mutable list anyway, so we don't need to do anything
+ // here.
+ break;
- default:
- barf("scavenge_one: strange object %d", (int)(info->type));
- }
+ default:
+ barf("scavenge_one: strange object %d", (int)(info->type));
+ }
- no_luck = failed_to_evac;
- failed_to_evac = rtsFalse;
- return (no_luck);
+ no_luck = failed_to_evac;
+ failed_to_evac = rtsFalse;
+ return (no_luck);
}
/* -----------------------------------------------------------------------------
} else {
size = gen->steps[0].scan - start;
}
- fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
+ belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
#endif
p->mut_link = NULL;
}
continue;
-
+
case MUT_CONS:
/* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
* it from the mutable list if possible by promoting whatever it
* points to.
*/
- scavenge_one((StgClosure *)((StgMutVar *)p)->var);
- if (failed_to_evac == rtsTrue) {
+ if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
/* didn't manage to promote everything, so put the
* MUT_CONS back on the list.
*/
- failed_to_evac = rtsFalse;
p->mut_link = new_list;
new_list = p;
}
continue;
-
+
default:
// shouldn't have anything else on the mutables list
barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
continue;
}
+ // Happens if a MUT_ARR_PTRS in the old generation is frozen
+ case MUT_ARR_PTRS_FROZEN:
+ {
+ StgPtr end, q;
+
+ evac_gen = gen->no;
+ end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
+ (StgClosure *)*q = evacuate((StgClosure *)*q);
+ }
+ evac_gen = 0;
+ p->mut_link = NULL;
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ mkMutCons((StgClosure *)p, gen);
+ }
+ continue;
+ }
+
case MUT_VAR:
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
p->mut_link = gen->mut_list;
gen->mut_list = p;
continue;
-
+
case MVAR:
{
StgMVar *mvar = (StgMVar *)p;
*/
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- scavenged_static_objects = STATIC_LINK(info,p);
+ scavenged_static_objects = IND_STATIC_LINK(p);
((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
oldest_gen->mut_once_list = (StgMutClosure *)ind;
}
{
StgPtr q;
const StgInfoTable* info;
- StgWord32 bitmap;
+ StgWord bitmap;
//IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
scavenge_srt(info);
continue;
- // large bitmap (> 32 entries)
+ // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
case RET_BIG:
case RET_VEC_BIG:
{
for (i=0; i<large_bitmap->size; i++) {
bitmap = large_bitmap->bitmap[i];
- q = p + sizeof(W_) * 8;
+ q = p + BITS_IN(W_);
while (bitmap != 0) {
if ((bitmap & 1) == 0) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
scavenge_large(step *stp)
{
bdescr *bd;
- StgPtr p, q;
- const StgInfoTable* info;
- nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
+ StgPtr p;
bd = stp->new_large_objects;
stp->new_large_objects = bd->link;
dbl_link_onto(bd, &stp->scavenged_large_objects);
- p = bd->start;
- info = get_itbl((StgClosure *)p);
-
- // only certain objects can be "large"...
- q = p;
- switch (info->type) {
-
- case ARR_WORDS:
- // nothing to follow
- break;
-
- case MUT_ARR_PTRS:
- {
- // follow everything
- StgPtr next;
-
- evac_gen = 0; // repeatedly mutable
- recordMutable((StgMutClosure *)p);
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
- }
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
- break;
- }
-
- case MUT_ARR_PTRS_FROZEN:
- {
- // follow everything
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- (StgClosure *)*p = evacuate((StgClosure *)*p);
- }
- break;
- }
-
- case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
-
- evac_gen = 0; // repeatedly mutable
- scavengeTSO(tso);
- recordMutable((StgMutClosure *)tso);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsFalse;
- break;
- }
-
- case AP_UPD:
- case PAP:
- {
- StgPAP* pap = (StgPAP *)p;
- pap->fun = evacuate(pap->fun);
- scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
- break;
- }
-
- default:
- barf("scavenge_large: unknown/strange object %d", (int)(info->type));
- }
+ // update the block count in this step.
+ stp->n_scavenged_large_blocks += bd->blocks;
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ p = bd->start;
+ if (scavenge_one(p)) {
+ mkMutCons((StgClosure *)p, stp->gen);
}
}
}
}
void
-scavengeCAFs( void )
+markCAFs( evac_fn evac )
{
StgIndStatic *c;
- evac_gen = 0;
for (c = (StgIndStatic *)caf_list; c != NULL;
c = (StgIndStatic *)c->static_link)
{
- c->indirectee = evacuate(c->indirectee);
+ evac(&c->indirectee);
}
}
ASSERT(info->type == IND_STATIC);
if (STATIC_LINK(info,p) == NULL) {
- IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
+ IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
// black hole it
SET_INFO(p,&stg_BLACKHOLE_info);
p = STATIC_LINK2(info,p);
}
- // fprintf(stderr, "%d CAFs live\n", i);
+ // belch("%d CAFs live", i);
}
#endif
if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
+ belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
+#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
#endif
SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
+#endif
}
update_frame = update_frame->link;
StgClosure *updatee_bypass = frame->updatee;
#if DEBUG
- IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
+ IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
squeezes++;
#endif
bh->header.info != &stg_BLACKHOLE_BQ_info &&
bh->header.info != &stg_CAF_BLACKHOLE_info) {
#if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
- fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
+ belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
#ifdef DEBUG
/* zero out the slop so that the sanity checker can tell
{
StgInfoTable *info = get_itbl(bh);
nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
- /* don't zero out slop for a THUNK_SELECTOR, because it's layout
+ /* don't zero out slop for a THUNK_SELECTOR, because its layout
* info is used for a different purpose, and it's exactly the
* same size as a BLACKHOLE in any case.
*/
}
}
#endif
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh is now dead.
+ LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
+#endif
+ //
+ // Todo: maybe use SET_HDR() and remove LDV_recordCreate()?
+ //
SET_INFO(bh,&stg_BLACKHOLE_info);
+#ifdef PROFILING
+ // @LDV profiling
+ // We pretend that bh has just been created.
+ LDV_recordCreate(bh);
+#endif
}
}
else
next_frame_bottom = tso->sp - 1;
-#if DEBUG
+#if 0
IF_DEBUG(gc,
- fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
- displacement))
+ belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
+ displacement))
#endif
while (sp >= next_frame_bottom) {
tso->sp += displacement;
tso->su = prev_frame;
-#if DEBUG
+#if 0
IF_DEBUG(gc,
- fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
+ belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
#endif
}