/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.4 1999/01/19 15:07:56 simonm Exp $
+ * $Id: Storage.c,v 1.18 1999/09/15 13:45:20 simonmar Exp $
+ *
+ * (c) The GHC Team, 1998-1999
*
* Storage manager front end
*
#include "MBlock.h"
#include "gmp.h"
#include "Weak.h"
+#include "Sanity.h"
#include "Storage.h"
#include "StoragePriv.h"
/*
* Forward references
*/
-static bdescr *allocNursery (nat blocks);
static void *stgAllocForGMP (size_t size_in_bytes);
static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
static void stgDeallocForGMP (void *ptr, size_t size);
{
nat g, s;
step *step;
+ generation *gen;
+
+ /* If we're doing heap profiling, we want a two-space heap with a
+ * fixed-size allocation area so that we get roughly even-spaced
+ * samples.
+ */
+#if defined(PROFILING) || defined(DEBUG)
+ if (RtsFlags.ProfFlags.doHeapProfile) {
+ RtsFlags.GcFlags.generations = 1;
+ RtsFlags.GcFlags.steps = 1;
+ RtsFlags.GcFlags.oldGenFactor = 0;
+ RtsFlags.GcFlags.heapSizeSuggestion = 0;
+ }
+#endif
+
+ if (RtsFlags.GcFlags.heapSizeSuggestion >
+ RtsFlags.GcFlags.maxHeapSize) {
+ RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
+ }
initBlockAllocator();
* sizeof(struct _generation),
"initStorage: gens");
- /* set up all generations */
+ /* Initialise all generations */
for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
- generations[g].no = g;
- generations[g].mut_list = END_MUT_LIST;
- generations[g].collections = 0;
- generations[g].failed_promotions = 0;
+ gen = &generations[g];
+ gen->no = g;
+ gen->mut_list = END_MUT_LIST;
+ gen->mut_once_list = END_MUT_LIST;
+ gen->collections = 0;
+ gen->failed_promotions = 0;
+ gen->max_blocks = 0;
}
- /* Oldest generation: one step */
- g = RtsFlags.GcFlags.generations-1;
- generations[g].n_steps = 1;
- generations[g].steps =
- stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
- generations[g].max_blocks = RtsFlags.GcFlags.minOldGenSize;
- step = &generations[g].steps[0];
- step->no = 0;
- step->gen = &generations[g];
- step->blocks = NULL;
- step->n_blocks = 0;
- step->to = step; /* destination is this step */
- step->hp = NULL;
- step->hpLim = NULL;
- step->hp_bd = NULL;
-
- /* set up all except the oldest generation with 2 steps */
- for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
- generations[g].n_steps = 2;
- generations[g].steps = stgMallocBytes (2 * sizeof(struct _step),
- "initStorage: steps");
- generations[g].max_blocks = RtsFlags.GcFlags.minOldGenSize;
+ /* A couple of convenience pointers */
+ g0 = &generations[0];
+ oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
+
+ /* Allocate step structures in each generation */
+ if (RtsFlags.GcFlags.generations > 1) {
+ /* Only for multiple-generations */
+
+ /* Oldest generation: one step */
+ oldest_gen->n_steps = 1;
+ oldest_gen->steps =
+ stgMallocBytes(1 * sizeof(struct _step), "initStorage: last step");
+
+ /* set up all except the oldest generation with 2 steps */
+ for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
+ generations[g].n_steps = RtsFlags.GcFlags.steps;
+ generations[g].steps =
+ stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
+ "initStorage: steps");
+ }
+
+ } else {
+ /* single generation, i.e. a two-space collector */
+ g0->n_steps = 1;
+ g0->steps = stgMallocBytes (sizeof(struct _step), "initStorage: steps");
}
- for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
+ /* Initialise all steps */
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
step = &generations[g].steps[s];
step->no = s;
step->blocks = NULL;
step->n_blocks = 0;
step->gen = &generations[g];
- if ( s == 1 ) {
- step->to = &generations[g+1].steps[0];
- } else {
- step->to = &generations[g].steps[s+1];
- }
step->hp = NULL;
step->hpLim = NULL;
step->hp_bd = NULL;
+ step->scan = NULL;
+ step->scan_bd = NULL;
step->large_objects = NULL;
step->new_large_objects = NULL;
step->scavenged_large_objects = NULL;
}
}
- oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
+ /* Set up the destination pointers in each younger gen. step */
+ for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
+ for (s = 0; s < generations[g].n_steps-1; s++) {
+ generations[g].steps[s].to = &generations[g].steps[s+1];
+ }
+ generations[g].steps[s].to = &generations[g+1].steps[0];
+ }
+
+ /* The oldest generation has one step and its destination is the
+ * same step. */
+ oldest_gen->steps[0].to = &oldest_gen->steps[0];
/* generation 0 is special: that's the nursery */
- g0 = &generations[0];
generations[0].max_blocks = 0;
- /* G0S0: the allocation area */
+ /* G0S0: the allocation area. Policy: keep the allocation area
+ * small to begin with, even if we have a large suggested heap
+ * size. Reason: we're going to do a major collection first, and we
+ * don't want it to be a big one. This vague idea is borne out by
+ * rigorous experimental evidence.
+ */
step = &generations[0].steps[0];
g0s0 = step;
- step->blocks = allocNursery(RtsFlags.GcFlags.minAllocAreaSize);
- step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
+ step->blocks = allocNursery(NULL, nursery_blocks);
+ step->n_blocks = nursery_blocks;
current_nursery = step->blocks;
+ g0s0->to_space = NULL;
/* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
weak_ptr_list = NULL;
IF_DEBUG(gc, stat_describe_gens());
}
-static bdescr *
-allocNursery (nat blocks)
+extern bdescr *
+allocNursery (bdescr *last_bd, nat blocks)
{
- bdescr *last_bd, *bd;
+ bdescr *bd;
nat i;
- last_bd = NULL;
/* Allocate a nursery */
for (i=0; i < blocks; i++) {
bd = allocBlock();
return last_bd;
}
+extern void
+resizeNursery ( nat blocks )
+{
+ bdescr *bd;
+
+ if (nursery_blocks == blocks) {
+ ASSERT(g0s0->n_blocks == blocks);
+ return;
+ }
+
+ else if (nursery_blocks < blocks) {
+ IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
+ blocks));
+ g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
+ }
+
+ else {
+ bdescr *next_bd;
+
+ IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
+ blocks));
+ for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
+ next_bd = bd->link;
+ freeGroup(bd);
+ bd = next_bd;
+ }
+ g0s0->blocks = bd;
+ }
+
+ g0s0->n_blocks = nursery_blocks = blocks;
+}
+
void
exitStorage (void)
{
}
void
-recordMutable(StgMutClosure *p)
-{
- bdescr *bd;
-
- ASSERT(closure_MUTABLE(p));
-
- bd = Bdescr((P_)p);
-
- /* no need to bother in generation 0 */
- if (bd->gen == g0) {
- return;
- }
-
- if (p->mut_link == NULL) {
- p->mut_link = bd->gen->mut_list;
- bd->gen->mut_list = p;
- }
-}
-
-void
newCAF(StgClosure* caf)
{
- const StgInfoTable *info;
-
/* Put this CAF on the mutable list for the old generation.
* This is a HACK - the IND_STATIC closure doesn't really have
* a mut_link field, but we pretend it has - in fact we re-use
* come to do a major GC we won't need the mut_link field
* any more and can use it as a STATIC_LINK.
*/
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list;
- oldest_gen->mut_list = (StgMutClosure *)caf;
+ ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
+ oldest_gen->mut_once_list = (StgMutClosure *)caf;
#ifdef DEBUG
- info = get_itbl(caf);
- ASSERT(info->type == IND_STATIC);
- STATIC_LINK2(info,caf) = caf_list;
- caf_list = caf;
+ {
+ const StgInfoTable *info;
+
+ info = get_itbl(caf);
+ ASSERT(info->type == IND_STATIC);
+#if 0
+ STATIC_LINK2(info,caf) = caf_list;
+ caf_list = caf;
+#endif
+ }
#endif
}
bdescr *bd;
StgPtr p;
- TICK_ALLOC_PRIM(n,wibble,wibble,wibble)
+ TICK_ALLOC_HEAP(n);
CCS_ALLOC(CCCS,n);
/* big allocation (>LARGE_OBJECT_THRESHOLD) */
}
/* -----------------------------------------------------------------------------
+ Stats and stuff
+ -------------------------------------------------------------------------- */
+
+/* Approximate the amount of live data in the heap. To be called just
+ * after garbage collection (see GarbageCollect()).
+ */
+extern lnat
+calcLive(void)
+{
+ nat g, s;
+ lnat live = 0;
+ step *step;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
+ return live;
+ }
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ /* approximate amount of live data (doesn't take into account slop
+ * at end of each block).
+ */
+ if (g == 0 && s == 0) {
+ continue;
+ }
+ step = &generations[g].steps[s];
+ live += (step->n_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_);
+ }
+ }
+ return live;
+}
+
+/* Approximate the number of blocks that will be needed at the next
+ * garbage collection.
+ *
+ * Assume: all data currently live will remain live. Steps that will
+ * be collected next time will therefore need twice as many blocks
+ * since all the data will be copied.
+ */
+extern lnat
+calcNeeded(void)
+{
+ lnat needed = 0;
+ nat g, s;
+ step *step;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ step = &generations[g].steps[s];
+ if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
+ needed += 2 * step->n_blocks;
+ } else {
+ needed += step->n_blocks;
+ }
+ }
+ }
+ return needed;
+}
+
+/* -----------------------------------------------------------------------------
Debugging
memInventory() checks for memory leaks by counting up all the
lnat total_blocks = 0, free_blocks = 0;
/* count the blocks we current have */
+
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
step = &generations[g].steps[s];
total_blocks += step->n_blocks;
+ if (RtsFlags.GcFlags.generations == 1) {
+ /* two-space collector has a to-space too :-) */
+ total_blocks += g0s0->to_blocks;
+ }
for (bd = step->large_objects; bd; bd = bd->link) {
total_blocks += bd->blocks;
/* hack for megablock groups: they have an extra block or two in
#endif
}
+/* Full heap sanity check. */
+
+extern void
+checkSanity(nat N)
+{
+ nat g, s;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ checkHeap(g0s0->to_space, NULL);
+ checkChain(g0s0->large_objects);
+ } else {
+
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ checkHeap(generations[g].steps[s].blocks, NULL);
+ }
+ }
+ for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ checkHeap(generations[g].steps[s].blocks,
+ generations[g].steps[s].blocks->start);
+ checkChain(generations[g].steps[s].large_objects);
+ }
+ }
+ checkFreeListSanity();
+ }
+}
+
#endif