/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.11 1999/02/05 14:48:01 simonm Exp $
+ * $Id: Storage.c,v 1.48 2001/08/09 12:46:06 sewardj Exp $
+ *
+ * (c) The GHC Team, 1998-1999
*
* Storage manager front end
*
#include "Hooks.h"
#include "BlockAlloc.h"
#include "MBlock.h"
-#include "gmp.h"
#include "Weak.h"
#include "Sanity.h"
#include "Storage.h"
+#include "Schedule.h"
#include "StoragePriv.h"
-bdescr *current_nursery; /* next available nursery block, or NULL */
+#ifndef SMP
nat nursery_blocks; /* number of blocks in the nursery */
+#endif
StgClosure *caf_list = NULL;
bdescr *small_alloc_list; /* allocate()d small objects */
bdescr *large_alloc_list; /* allocate()d large objects */
+bdescr *pinned_object_block; /* allocate pinned objects into this block */
nat alloc_blocks; /* number of allocate()d blocks since GC */
nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
generation *oldest_gen; /* oldest generation, for convenience */
step *g0s0; /* generation 0, step 0, for convenience */
+lnat total_allocated = 0; /* total memory allocated during run */
+
+/*
+ * Storage manager mutex: protects all the above state from
+ * simultaneous access by two STG threads.
+ */
+#ifdef SMP
+pthread_mutex_t sm_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
/*
* Forward references
*/
static void stgDeallocForGMP (void *ptr, size_t size);
void
-initStorage (void)
+initStorage( void )
{
nat g, s;
- step *step;
+ step *stp;
generation *gen;
- if (RtsFlags.GcFlags.heapSizeSuggestion >
+ /* If we're doing heap profiling, we want a two-space heap with a
+ * fixed-size allocation area so that we get roughly even-spaced
+ * samples.
+ */
+
+ /* As an experiment, try a 2 generation collector
+ */
+
+#if defined(PROFILING) || defined(DEBUG)
+ if (RtsFlags.ProfFlags.doHeapProfile) {
+ RtsFlags.GcFlags.generations = 1;
+ RtsFlags.GcFlags.steps = 1;
+ RtsFlags.GcFlags.oldGenFactor = 0;
+ RtsFlags.GcFlags.heapSizeSuggestion = 0;
+ }
+#endif
+
+ if (RtsFlags.GcFlags.maxHeapSize != 0 &&
+ RtsFlags.GcFlags.heapSizeSuggestion >
RtsFlags.GcFlags.maxHeapSize) {
- barf("Suggested heap size (-H<size>) is larger than max. heap size (-M<size>)\n");
+ RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
}
initBlockAllocator();
/* Initialise all steps */
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- step->no = s;
- step->blocks = NULL;
- step->n_blocks = 0;
- step->gen = &generations[g];
- step->hp = NULL;
- step->hpLim = NULL;
- step->hp_bd = NULL;
- step->large_objects = NULL;
- step->new_large_objects = NULL;
- step->scavenged_large_objects = NULL;
+ stp = &generations[g].steps[s];
+ stp->no = s;
+ stp->blocks = NULL;
+ stp->n_blocks = 0;
+ stp->gen = &generations[g];
+ stp->gen_no = g;
+ stp->hp = NULL;
+ stp->hpLim = NULL;
+ stp->hp_bd = NULL;
+ stp->scan = NULL;
+ stp->scan_bd = NULL;
+ stp->large_objects = NULL;
+ stp->n_large_blocks = 0;
+ stp->new_large_objects = NULL;
+ stp->scavenged_large_objects = NULL;
+ stp->n_scavenged_large_blocks = 0;
+ stp->is_compacted = 0;
+ stp->bitmap = NULL;
}
}
generations[g].steps[s].to = &generations[g+1].steps[0];
}
- /* The oldest generation has one step and its destination is the
- * same step. */
+ /* The oldest generation has one step and it is compacted. */
+ if (RtsFlags.GcFlags.compact) {
+ oldest_gen->steps[0].is_compacted = 1;
+ }
oldest_gen->steps[0].to = &oldest_gen->steps[0];
/* generation 0 is special: that's the nursery */
* don't want it to be a big one. This vague idea is borne out by
* rigorous experimental evidence.
*/
- step = &generations[0].steps[0];
- g0s0 = step;
- nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
- step->blocks = allocNursery(NULL, nursery_blocks);
- step->n_blocks = nursery_blocks;
- current_nursery = step->blocks;
- /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
+ g0s0 = &generations[0].steps[0];
+
+ allocNurseries();
weak_ptr_list = NULL;
caf_list = NULL;
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
-#ifdef COMPILER
/* Tell GNU multi-precision pkg about our custom alloc functions */
mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
+
+#ifdef SMP
+ pthread_mutex_init(&sm_mutex, NULL);
#endif
- IF_DEBUG(gc, stat_describe_gens());
+ IF_DEBUG(gc, statDescribeGens());
+}
+
+void
+exitStorage (void)
+{
+ stat_exit(calcAllocated());
}
-extern bdescr *
+/* -----------------------------------------------------------------------------
+ CAF management.
+
+ The entry code for every CAF does the following:
+
+ - builds a CAF_BLACKHOLE in the heap
+ - pushes an update frame pointing to the CAF_BLACKHOLE
+ - invokes UPD_CAF(), which:
+ - calls newCaf, below
+ - updates the CAF with a static indirection to the CAF_BLACKHOLE
+
+ Why do we build a BLACKHOLE in the heap rather than just updating
+ the thunk directly? It's so that we only need one kind of update
+ frame - otherwise we'd need a static version of the update frame too.
+
+ newCaf() does the following:
+
+ - it puts the CAF on the oldest generation's mut-once list.
+ This is so that we can treat the CAF as a root when collecting
+ younger generations.
+
+ For GHCI, we have additional requirements when dealing with CAFs:
+
+ - we must *retain* all dynamically-loaded CAFs ever entered,
+ just in case we need them again.
+ - we must be able to *revert* CAFs that have been evaluated, to
+ their pre-evaluated form.
+
+ To do this, we use an additional CAF list. When newCaf() is
+ called on a dynamically-loaded CAF, we add it to the CAF list
+ instead of the old-generation mutable list, and save away its
+ old info pointer (in caf->saved_info) for later reversion.
+
+ To revert all the CAFs, we traverse the CAF list and reset the
+ info pointer to caf->saved_info, then throw away the CAF list.
+ (see GC.c:revertCAFs()).
+
+ -- SDM 29/1/01
+
+ -------------------------------------------------------------------------- */
+
+void
+newCAF(StgClosure* caf)
+{
+ /* Put this CAF on the mutable list for the old generation.
+ * This is a HACK - the IND_STATIC closure doesn't really have
+ * a mut_link field, but we pretend it has - in fact we re-use
+ * the STATIC_LINK field for the time being, because when we
+ * come to do a major GC we won't need the mut_link field
+ * any more and can use it as a STATIC_LINK.
+ */
+ ACQUIRE_LOCK(&sm_mutex);
+
+ if (is_dynamically_loaded_rwdata_ptr((StgPtr)caf)) {
+ ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
+ ((StgIndStatic *)caf)->static_link = caf_list;
+ caf_list = caf;
+ } else {
+ ((StgIndStatic *)caf)->saved_info = NULL;
+ ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
+ oldest_gen->mut_once_list = (StgMutClosure *)caf;
+ }
+
+ RELEASE_LOCK(&sm_mutex);
+
+#ifdef PAR
+ /* If we are PAR or DIST then we never forget a CAF */
+ { globalAddr *newGA;
+ //belch("<##> Globalising CAF %08x %s",caf,info_type(caf));
+ newGA=makeGlobal(caf,rtsTrue); /*given full weight*/
+ ASSERT(newGA);
+ }
+#endif /* PAR */
+}
+
+/* -----------------------------------------------------------------------------
+ Nursery management.
+ -------------------------------------------------------------------------- */
+
+void
+allocNurseries( void )
+{
+#ifdef SMP
+ {
+ Capability *cap;
+ bdescr *bd;
+
+ g0s0->blocks = NULL;
+ g0s0->n_blocks = 0;
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ cap->rNursery = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
+ cap->rCurrentNursery = cap->rNursery;
+ for (bd = cap->rNursery; bd != NULL; bd = bd->link) {
+ bd->u.back = (bdescr *)cap;
+ }
+ }
+ /* Set the back links to be equal to the Capability,
+ * so we can do slightly better informed locking.
+ */
+ }
+#else /* SMP */
+ nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
+ g0s0->blocks = allocNursery(NULL, nursery_blocks);
+ g0s0->n_blocks = nursery_blocks;
+ g0s0->to_blocks = NULL;
+ g0s0->n_to_blocks = 0;
+ MainRegTable.rNursery = g0s0->blocks;
+ MainRegTable.rCurrentNursery = g0s0->blocks;
+ /* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
+#endif
+}
+
+void
+resetNurseries( void )
+{
+ bdescr *bd;
+#ifdef SMP
+ Capability *cap;
+
+ /* All tasks must be stopped */
+ ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes);
+
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ for (bd = cap->rNursery; bd; bd = bd->link) {
+ bd->free = bd->start;
+ ASSERT(bd->gen_no == 0);
+ ASSERT(bd->step == g0s0);
+ IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
+ }
+ cap->rCurrentNursery = cap->rNursery;
+ }
+#else
+ for (bd = g0s0->blocks; bd; bd = bd->link) {
+ bd->free = bd->start;
+ ASSERT(bd->gen_no == 0);
+ ASSERT(bd->step == g0s0);
+ IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
+ }
+ MainRegTable.rNursery = g0s0->blocks;
+ MainRegTable.rCurrentNursery = g0s0->blocks;
+#endif
+}
+
+bdescr *
allocNursery (bdescr *last_bd, nat blocks)
{
bdescr *bd;
bd = allocBlock();
bd->link = last_bd;
bd->step = g0s0;
- bd->gen = g0;
- bd->evacuated = 0;
+ bd->gen_no = 0;
+ bd->flags = 0;
bd->free = bd->start;
last_bd = bd;
}
return last_bd;
}
-extern void
+void
resizeNursery ( nat blocks )
{
bdescr *bd;
+#ifdef SMP
+ barf("resizeNursery: can't resize in SMP mode");
+#endif
+
if (nursery_blocks == blocks) {
ASSERT(g0s0->n_blocks == blocks);
return;
g0s0->n_blocks = nursery_blocks = blocks;
}
-void
-exitStorage (void)
-{
- lnat allocated;
- bdescr *bd;
-
- /* Return code ignored for now */
- /* ToDo: allocation figure is slightly wrong (see also GarbageCollect()) */
- allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
- for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
- allocated -= BLOCK_SIZE_W;
- }
- stat_exit(allocated);
-}
-
-void
-newCAF(StgClosure* caf)
-{
- /* Put this CAF on the mutable list for the old generation.
- * This is a HACK - the IND_STATIC closure doesn't really have
- * a mut_link field, but we pretend it has - in fact we re-use
- * the STATIC_LINK field for the time being, because when we
- * come to do a major GC we won't need the mut_link field
- * any more and can use it as a STATIC_LINK.
- */
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
- oldest_gen->mut_once_list = (StgMutClosure *)caf;
-
-#ifdef DEBUG
- {
- const StgInfoTable *info;
-
- info = get_itbl(caf);
- ASSERT(info->type == IND_STATIC);
- STATIC_LINK2(info,caf) = caf_list;
- caf_list = caf;
- }
-#endif
-}
-
/* -----------------------------------------------------------------------------
The allocate() interface
-------------------------------------------------------------------------- */
StgPtr
-allocate(nat n)
+allocate( nat n )
{
bdescr *bd;
StgPtr p;
- TICK_ALLOC_HEAP(n);
+ ACQUIRE_LOCK(&sm_mutex);
+
+ TICK_ALLOC_HEAP_NOCTR(n);
CCS_ALLOC(CCCS,n);
/* big allocation (>LARGE_OBJECT_THRESHOLD) */
nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
bd = allocGroup(req_blocks);
dbl_link_onto(bd, &g0s0->large_objects);
- bd->gen = g0;
+ bd->gen_no = 0;
bd->step = g0s0;
- bd->evacuated = 0;
+ bd->flags = BF_LARGE;
bd->free = bd->start;
/* don't add these blocks to alloc_blocks, since we're assuming
* that large objects are likely to remain live for quite a while
* (eg. running threads), so garbage collecting early won't make
* much difference.
*/
+ alloc_blocks += req_blocks;
+ RELEASE_LOCK(&sm_mutex);
return bd->start;
/* small allocation (<LARGE_OBJECT_THRESHOLD) */
bd = allocBlock();
bd->link = small_alloc_list;
small_alloc_list = bd;
- bd->gen = g0;
+ bd->gen_no = 0;
bd->step = g0s0;
- bd->evacuated = 0;
+ bd->flags = 0;
alloc_Hp = bd->start;
alloc_HpLim = bd->start + BLOCK_SIZE_W;
alloc_blocks++;
}
-
+
p = alloc_Hp;
alloc_Hp += n;
+ RELEASE_LOCK(&sm_mutex);
return p;
}
-lnat allocated_bytes(void)
+lnat
+allocated_bytes( void )
{
return (alloc_blocks * BLOCK_SIZE_W - (alloc_HpLim - alloc_Hp));
}
+/* ---------------------------------------------------------------------------
+ Allocate a fixed/pinned object.
+
+ We allocate small pinned objects into a single block, allocating a
+ new block when the current one overflows. The block is chained
+ onto the large_object_list of generation 0 step 0.
+
+ NOTE: The GC can't in general handle pinned objects. This
+ interface is only safe to use for ByteArrays, which have no
+ pointers and don't require scavenging. It works because the
+ block's descriptor has the BF_LARGE flag set, so the block is
+ treated as a large object and chained onto various lists, rather
+ than the individual objects being copied. However, when it comes
+ to scavenge the block, the GC will only scavenge the first object.
+ The reason is that the GC can't linearly scan a block of pinned
+ objects at the moment (doing so would require using the
+ mostly-copying techniques). But since we're restricting ourselves
+ to pinned ByteArrays, not scavenging is ok.
+
+ This function is called by newPinnedByteArray# which immediately
+ fills the allocated memory with a MutableByteArray#.
+ ------------------------------------------------------------------------- */
+
+StgPtr
+allocatePinned( nat n )
+{
+ StgPtr p;
+ bdescr *bd = pinned_object_block;
+
+ ACQUIRE_LOCK(&sm_mutex);
+
+ TICK_ALLOC_HEAP_NOCTR(n);
+ CCS_ALLOC(CCCS,n);
+
+ // If the request is for a large object, then allocate()
+ // will give us a pinned object anyway.
+ if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
+ RELEASE_LOCK(&sm_mutex);
+ return allocate(n);
+ }
+
+ // If we don't have a block of pinned objects yet, or the current
+ // one isn't large enough to hold the new object, allocate a new one.
+ if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
+ pinned_object_block = bd = allocBlock();
+ dbl_link_onto(bd, &g0s0->large_objects);
+ bd->gen_no = 0;
+ bd->step = g0s0;
+ bd->flags = BF_LARGE;
+ bd->free = bd->start;
+ alloc_blocks++;
+ }
+
+ p = bd->free;
+ bd->free += n;
+ RELEASE_LOCK(&sm_mutex);
+ return p;
+}
+
/* -----------------------------------------------------------------------------
Allocation functions for GMP.
/* allocate and fill it in. */
arr = (StgArrWords *)allocate(total_size_in_words);
- SET_ARR_HDR(arr, &ARR_WORDS_info, CCCS, data_size_in_words);
+ SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
/* and return a ptr to the goods inside the array */
return(BYTE_ARR_CTS(arr));
}
/* -----------------------------------------------------------------------------
- Stats and stuff
- -------------------------------------------------------------------------- */
+ * Stats and stuff
+ * -------------------------------------------------------------------------- */
+
+/* -----------------------------------------------------------------------------
+ * calcAllocated()
+ *
+ * Approximate how much we've allocated: number of blocks in the
+ * nursery + blocks allocated via allocate() - unused nusery blocks.
+ * This leaves a little slop at the end of each block, and doesn't
+ * take into account large objects (ToDo).
+ * -------------------------------------------------------------------------- */
+
+lnat
+calcAllocated( void )
+{
+ nat allocated;
+ bdescr *bd;
+
+#ifdef SMP
+ Capability *cap;
+
+ /* All tasks must be stopped. Can't assert that all the
+ capabilities are owned by the scheduler, though: one or more
+ tasks might have been stopped while they were running (non-main)
+ threads. */
+ /* ASSERT(n_free_capabilities == RtsFlags.ParFlags.nNodes); */
+
+ allocated =
+ n_free_capabilities * RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W
+ + allocated_bytes();
+
+ for (cap = free_capabilities; cap != NULL; cap = cap->link) {
+ for ( bd = cap->rCurrentNursery->link; bd != NULL; bd = bd->link ) {
+ allocated -= BLOCK_SIZE_W;
+ }
+ if (cap->rCurrentNursery->free < cap->rCurrentNursery->start
+ + BLOCK_SIZE_W) {
+ allocated -= (cap->rCurrentNursery->start + BLOCK_SIZE_W)
+ - cap->rCurrentNursery->free;
+ }
+ }
+
+#else /* !SMP */
+ bdescr *current_nursery = MainRegTable.rCurrentNursery;
+
+ allocated = (nursery_blocks * BLOCK_SIZE_W) + allocated_bytes();
+ for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
+ allocated -= BLOCK_SIZE_W;
+ }
+ if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
+ allocated -= (current_nursery->start + BLOCK_SIZE_W)
+ - current_nursery->free;
+ }
+#endif
+
+ total_allocated += allocated;
+ return allocated;
+}
/* Approximate the amount of live data in the heap. To be called just
* after garbage collection (see GarbageCollect()).
{
nat g, s;
lnat live = 0;
- step *step;
+ step *stp;
if (RtsFlags.GcFlags.generations == 1) {
- live = g0s0->to_blocks * BLOCK_SIZE_W +
+ live = (g0s0->n_to_blocks - 1) * BLOCK_SIZE_W +
((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
+ return live;
}
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
/* approximate amount of live data (doesn't take into account slop
- * at end of each block).
- */
+ * at end of each block).
+ */
if (g == 0 && s == 0) {
continue;
}
- step = &generations[g].steps[s];
- live += step->n_blocks * BLOCK_SIZE_W +
- ((lnat)step->hp_bd->free -(lnat)step->hp_bd->start) / sizeof(W_);
+ stp = &generations[g].steps[s];
+ live += (stp->n_large_blocks + stp->n_blocks - 1) * BLOCK_SIZE_W;
+ if (stp->hp_bd != NULL) {
+ live += ((lnat)stp->hp_bd->free - (lnat)stp->hp_bd->start)
+ / sizeof(W_);
+ }
}
}
return live;
extern lnat
calcNeeded(void)
{
- lnat needed = 0;
- nat g, s;
- step *step;
-
- for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- step = &generations[g].steps[s];
- if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
- needed += 2 * step->n_blocks;
- } else {
- needed += step->n_blocks;
- }
+ lnat needed = 0;
+ nat g, s;
+ step *stp;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ stp = &generations[g].steps[s];
+ if (generations[g].steps[0].n_blocks +
+ generations[g].steps[0].n_large_blocks
+ > generations[g].max_blocks
+ && stp->is_compacted == 0) {
+ needed += 2 * stp->n_blocks;
+ } else {
+ needed += stp->n_blocks;
+ }
+ }
}
- }
- return needed;
+ return needed;
}
/* -----------------------------------------------------------------------------
#ifdef DEBUG
-extern void
+void
memInventory(void)
{
nat g, s;
- step *step;
+ step *stp;
bdescr *bd;
lnat total_blocks = 0, free_blocks = 0;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- total_blocks += step->n_blocks;
+ stp = &generations[g].steps[s];
+ total_blocks += stp->n_blocks;
if (RtsFlags.GcFlags.generations == 1) {
/* two-space collector has a to-space too :-) */
- total_blocks += g0s0->to_blocks;
+ total_blocks += g0s0->n_to_blocks;
}
- for (bd = step->large_objects; bd; bd = bd->link) {
+ for (bd = stp->large_objects; bd; bd = bd->link) {
total_blocks += bd->blocks;
/* hack for megablock groups: they have an extra block or two in
the second and subsequent megablocks where the block
*/
if (bd->blocks > BLOCKS_PER_MBLOCK) {
total_blocks -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
- * bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE);
+ * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
}
}
}
/* count the blocks on the free list */
free_blocks = countFreeList();
- ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
-
-#if 0
if (total_blocks + free_blocks != mblocks_allocated *
BLOCKS_PER_MBLOCK) {
fprintf(stderr, "Blocks: %ld live + %ld free = %ld total (%ld around)\n",
total_blocks, free_blocks, total_blocks + free_blocks,
mblocks_allocated * BLOCKS_PER_MBLOCK);
}
-#endif
-}
-/* Full heap sanity check. */
+ ASSERT(total_blocks + free_blocks == mblocks_allocated * BLOCKS_PER_MBLOCK);
+}
-extern void
-checkSanity(nat N)
+static nat
+countBlocks(bdescr *bd)
{
- nat g, s;
-
- if (RtsFlags.GcFlags.generations == 1) {
- checkHeap(g0s0->to_space, NULL);
- checkChain(g0s0->large_objects);
- } else {
-
- for (g = 0; g <= N; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- if (g == 0 && s == 0) { continue; }
- checkHeap(generations[g].steps[s].blocks, NULL);
- }
+ nat n;
+ for (n=0; bd != NULL; bd=bd->link) {
+ n += bd->blocks;
}
- for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- checkHeap(generations[g].steps[s].blocks,
- generations[g].steps[s].blocks->start);
- checkChain(generations[g].steps[s].large_objects);
- }
+ return n;
+}
+
+/* Full heap sanity check. */
+void
+checkSanity( void )
+{
+ nat g, s;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ checkHeap(g0s0->to_blocks);
+ checkChain(g0s0->large_objects);
+ } else {
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ checkHeap(generations[g].steps[s].blocks);
+ checkChain(generations[g].steps[s].large_objects);
+ ASSERT(countBlocks(generations[g].steps[s].blocks)
+ == generations[g].steps[s].n_blocks);
+ ASSERT(countBlocks(generations[g].steps[s].large_objects)
+ == generations[g].steps[s].n_large_blocks);
+ if (g > 0) {
+ checkMutableList(generations[g].mut_list, g);
+ checkMutOnceList(generations[g].mut_once_list, g);
+ }
+ }
+ }
+ checkFreeListSanity();
}
- checkFreeListSanity();
- }
}
#endif