/* -----------------------------------------------------------------------------
- * $Id: Storage.c,v 1.6 1999/01/21 10:31:51 simonm Exp $
+ * $Id: Storage.c,v 1.16 1999/03/02 19:50:12 sof Exp $
+ *
+ * (c) The GHC Team, 1998-1999
*
* Storage manager front end
*
#include "MBlock.h"
#include "gmp.h"
#include "Weak.h"
+#include "Sanity.h"
#include "Storage.h"
#include "StoragePriv.h"
step *step;
generation *gen;
+ if (RtsFlags.GcFlags.heapSizeSuggestion >
+ RtsFlags.GcFlags.maxHeapSize) {
+ RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
+ }
+
initBlockAllocator();
/* allocate generation info array */
gen = &generations[g];
gen->no = g;
gen->mut_list = END_MUT_LIST;
+ gen->mut_once_list = END_MUT_LIST;
gen->collections = 0;
gen->failed_promotions = 0;
- gen->max_blocks = RtsFlags.GcFlags.minOldGenSize;
+ gen->max_blocks = 0;
}
/* A couple of convenience pointers */
/* set up all except the oldest generation with 2 steps */
for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
- generations[g].n_steps = 2;
- generations[g].steps = stgMallocBytes (2 * sizeof(struct _step),
- "initStorage: steps");
+ generations[g].n_steps = RtsFlags.GcFlags.steps;
+ generations[g].steps =
+ stgMallocBytes (RtsFlags.GcFlags.steps * sizeof(struct _step),
+ "initStorage: steps");
}
} else {
step->hp = NULL;
step->hpLim = NULL;
step->hp_bd = NULL;
+ step->scan = NULL;
+ step->scan_bd = NULL;
step->large_objects = NULL;
step->new_large_objects = NULL;
step->scavenged_large_objects = NULL;
/* Set up the destination pointers in each younger gen. step */
for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- if ( s == 1 ) {
- step->to = &generations[g+1].steps[0];
- } else {
- step->to = &generations[g].steps[s+1];
- }
+ for (s = 0; s < generations[g].n_steps-1; s++) {
+ generations[g].steps[s].to = &generations[g].steps[s+1];
}
+ generations[g].steps[s].to = &generations[g+1].steps[0];
}
/* The oldest generation has one step and its destination is the
/* generation 0 is special: that's the nursery */
generations[0].max_blocks = 0;
- /* G0S0: the allocation area */
+ /* G0S0: the allocation area. Policy: keep the allocation area
+ * small to begin with, even if we have a large suggested heap
+ * size. Reason: we're going to do a major collection first, and we
+ * don't want it to be a big one. This vague idea is borne out by
+ * rigorous experimental evidence.
+ */
step = &generations[0].steps[0];
g0s0 = step;
- step->blocks = allocNursery(NULL, RtsFlags.GcFlags.minAllocAreaSize);
- step->n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
nursery_blocks = RtsFlags.GcFlags.minAllocAreaSize;
+ step->blocks = allocNursery(NULL, nursery_blocks);
+ step->n_blocks = nursery_blocks;
current_nursery = step->blocks;
+ g0s0->to_space = NULL;
/* hp, hpLim, hp_bd, to_space etc. aren't used in G0S0 */
weak_ptr_list = NULL;
return last_bd;
}
+extern void
+resizeNursery ( nat blocks )
+{
+ bdescr *bd;
+
+ if (nursery_blocks == blocks) {
+ ASSERT(g0s0->n_blocks == blocks);
+ return;
+ }
+
+ else if (nursery_blocks < blocks) {
+ IF_DEBUG(gc, fprintf(stderr, "Increasing size of nursery to %d blocks\n",
+ blocks));
+ g0s0->blocks = allocNursery(g0s0->blocks, blocks-nursery_blocks);
+ }
+
+ else {
+ bdescr *next_bd;
+
+ IF_DEBUG(gc, fprintf(stderr, "Decreasing size of nursery to %d blocks\n",
+ blocks));
+ for (bd = g0s0->blocks; nursery_blocks > blocks; nursery_blocks--) {
+ next_bd = bd->link;
+ freeGroup(bd);
+ bd = next_bd;
+ }
+ g0s0->blocks = bd;
+ }
+
+ g0s0->n_blocks = nursery_blocks = blocks;
+}
+
void
exitStorage (void)
{
}
void
-recordMutable(StgMutClosure *p)
-{
- bdescr *bd;
-
- ASSERT(closure_MUTABLE(p));
-
- bd = Bdescr((P_)p);
-
- /* no need to bother in generation 0 */
- if (bd->gen == g0) {
- return;
- }
-
- if (p->mut_link == NULL) {
- p->mut_link = bd->gen->mut_list;
- bd->gen->mut_list = p;
- }
-}
-
-void
newCAF(StgClosure* caf)
{
/* Put this CAF on the mutable list for the old generation.
* come to do a major GC we won't need the mut_link field
* any more and can use it as a STATIC_LINK.
*/
- ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_list;
- oldest_gen->mut_list = (StgMutClosure *)caf;
+ ((StgMutClosure *)caf)->mut_link = oldest_gen->mut_once_list;
+ oldest_gen->mut_once_list = (StgMutClosure *)caf;
#ifdef DEBUG
{
}
/* -----------------------------------------------------------------------------
+ Stats and stuff
+ -------------------------------------------------------------------------- */
+
+/* Approximate the amount of live data in the heap. To be called just
+ * after garbage collection (see GarbageCollect()).
+ */
+extern lnat
+calcLive(void)
+{
+ nat g, s;
+ lnat live = 0;
+ step *step;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ live = (g0s0->to_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)g0s0->hp_bd->free - (lnat)g0s0->hp_bd->start) / sizeof(W_);
+ return live;
+ }
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ /* approximate amount of live data (doesn't take into account slop
+ * at end of each block).
+ */
+ if (g == 0 && s == 0) {
+ continue;
+ }
+ step = &generations[g].steps[s];
+ live += (step->n_blocks - 1) * BLOCK_SIZE_W +
+ ((lnat)step->hp_bd->free - (lnat)step->hp_bd->start) / sizeof(W_);
+ }
+ }
+ return live;
+}
+
+/* Approximate the number of blocks that will be needed at the next
+ * garbage collection.
+ *
+ * Assume: all data currently live will remain live. Steps that will
+ * be collected next time will therefore need twice as many blocks
+ * since all the data will be copied.
+ */
+extern lnat
+calcNeeded(void)
+{
+ lnat needed = 0;
+ nat g, s;
+ step *step;
+
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ step = &generations[g].steps[s];
+ if (generations[g].steps[0].n_blocks > generations[g].max_blocks) {
+ needed += 2 * step->n_blocks;
+ } else {
+ needed += step->n_blocks;
+ }
+ }
+ }
+ return needed;
+}
+
+/* -----------------------------------------------------------------------------
Debugging
memInventory() checks for memory leaks by counting up all the
#endif
}
+/* Full heap sanity check. */
+
+extern void
+checkSanity(nat N)
+{
+ nat g, s;
+
+ if (RtsFlags.GcFlags.generations == 1) {
+ checkHeap(g0s0->to_space, NULL);
+ checkChain(g0s0->large_objects);
+ } else {
+
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ if (g == 0 && s == 0) { continue; }
+ checkHeap(generations[g].steps[s].blocks, NULL);
+ }
+ }
+ for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ checkHeap(generations[g].steps[s].blocks,
+ generations[g].steps[s].blocks->start);
+ checkChain(generations[g].steps[s].large_objects);
+ }
+ }
+ checkFreeListSanity();
+ }
+}
+
#endif