/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.33 1999/02/16 12:49:45 simonm Exp $
+ * $Id: GC.c,v 1.38 1999/02/23 15:45:06 simonm Exp $
*
* (c) The GHC Team 1998-1999
*
*/
bdescr *old_to_space;
+/* Data used for allocation area sizing.
+ */
+lnat new_blocks; /* blocks allocated during this GC */
+lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
+
/* -----------------------------------------------------------------------------
Static function declarations
-------------------------------------------------------------------------- */
{
bdescr *bd;
step *step;
- lnat live, allocated, collected = 0;
+ lnat live, allocated, collected = 0, copied = 0;
nat g, s;
#ifdef PROFILING
g0s0->to_space = NULL;
}
+ /* Keep a count of how many new blocks we allocated during this GC
+ * (used for resizing the allocation area, later).
+ */
+ new_blocks = 0;
+
/* Initialise to-space in all the generations/steps that we're
* collecting.
*/
step->hpLim = step->hp + BLOCK_SIZE_W;
step->hp_bd = bd;
step->to_space = bd;
- step->to_blocks = 1; /* ???? */
+ step->to_blocks = 1;
step->scan = bd->start;
step->scan_bd = bd;
step->new_large_objects = NULL;
step->scavenged_large_objects = NULL;
+ new_blocks++;
/* mark the large objects as not evacuated yet */
for (bd = step->large_objects; bd; bd = bd->link) {
bd->evacuated = 0;
step->hp_bd = bd;
step->blocks = bd;
step->n_blocks = 1;
+ new_blocks++;
}
/* Set the scan pointer for older generations: remember we
* still have to scavenge objects that have been promoted. */
loop2:
for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
+ if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
step = &generations[gen].steps[st];
evac_gen = gen;
if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
/* run through all the generations/steps and tidy up
*/
+ copied = new_blocks * BLOCK_SIZE_W;
for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
if (g <= N) {
/* Tidy the end of the to-space chains */
step->hp_bd->free = step->hp;
step->hp_bd->link = NULL;
+ /* stats information: how much we copied */
+ if (g <= N) {
+ copied -= step->hp_bd->start + BLOCK_SIZE_W -
+ step->hp_bd->free;
+ }
}
/* for generations we collected... */
* oldest_gen
*/
if (g != 0) {
+#if 0
generations[g].max_blocks = (oldest_gen->max_blocks * g)
/ (RtsFlags.GcFlags.generations-1);
+#endif
+ generations[g].max_blocks = oldest_gen->max_blocks;
}
/* for older generations... */
/* Guess the amount of live data for stats. */
live = calcLive();
+ /* Free the small objects allocated via allocate(), since this will
+ * all have been copied into G0S1 now.
+ */
+ if (small_alloc_list != NULL) {
+ freeChain(small_alloc_list);
+ }
+ small_alloc_list = NULL;
+ alloc_blocks = 0;
+ alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+
/* Two-space collector:
* Free the old to-space, and estimate the amount of live data.
*/
nat needed = calcNeeded(); /* approx blocks needed at next GC */
/* Guess how much will be live in generation 0 step 0 next time.
- * A good approximation is the amount of data that was live this
- * time: this assumes (1) that the size of G0S0 will be roughly
- * the same as last time, and (2) that the promotion rate will be
- * constant.
- *
- * If we don't know how much was live in G0S0 (because there's no
- * step 1), then assume 30% (which is usually an overestimate).
+ * A good approximation is the obtained by finding the
+ * percentage of g0s0 that was live at the last minor GC.
*/
- if (g0->n_steps == 1) {
- needed += (g0s0->n_blocks * 30) / 100;
- } else {
- needed += g0->steps[1].n_blocks;
+ if (N == 0) {
+ g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
}
- /* Now we have a rough guess at the number of blocks needed for
- * the next GC, subtract this from the user's suggested heap size
- * and use the rest for the allocation area.
+ /* Estimate a size for the allocation area based on the
+ * information available. We might end up going slightly under
+ * or over the suggested heap size, but we should be pretty
+ * close on average.
+ *
+ * Formula: suggested - needed
+ * ----------------------------
+ * 1 + g0s0_pcnt_kept/100
+ *
+ * where 'needed' is the amount of memory needed at the next
+ * collection for collecting all steps except g0s0.
*/
- blocks = (int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed;
+ blocks =
+ (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
+ (100 + (int)g0s0_pcnt_kept);
if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
blocks = RtsFlags.GcFlags.minAllocAreaSize;
}
current_nursery = g0s0->blocks;
- /* Free the small objects allocated via allocate(), since this will
- * all have been copied into G0S1 now.
- */
- if (small_alloc_list != NULL) {
- freeChain(small_alloc_list);
- }
- small_alloc_list = NULL;
- alloc_blocks = 0;
- alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
-
- /* start any pending finalisers */
- scheduleFinalisers(old_weak_ptr_list);
+ /* start any pending finalizers */
+ scheduleFinalizers(old_weak_ptr_list);
/* check sanity after GC */
IF_DEBUG(sanity, checkSanity(N));
IF_DEBUG(sanity, memInventory());
/* ok, GC over: tell the stats department what happened. */
- stat_endGC(allocated, collected, live, N);
+ stat_endGC(allocated, collected, live, copied, N);
}
/* -----------------------------------------------------------------------------
new live weak pointers, then all the currently unreachable ones are
dead.
- For generational GC: we just don't try to finalise weak pointers in
+ For generational GC: we just don't try to finalize weak pointers in
older generations than the one we're collecting. This could
probably be optimised by keeping per-generation lists of weak
pointers, but for a few weak pointers this scheme will work.
if (weak_done) { return rtsFalse; }
- /* doesn't matter where we evacuate values/finalisers to, since
+ /* doesn't matter where we evacuate values/finalizers to, since
* these pointers are treated as roots (iff the keys are alive).
*/
evac_gen = 0;
if ((new = isAlive(w->key))) {
w->key = new;
- /* evacuate the value and finaliser */
+ /* evacuate the value and finalizer */
w->value = evacuate(w->value);
- w->finaliser = evacuate(w->finaliser);
+ w->finalizer = evacuate(w->finalizer);
/* remove this weak ptr from the old_weak_ptr list */
*last_w = w->link;
/* and put it on the new weak ptr list */
/* If we didn't make any changes, then we can go round and kill all
* the dead weak pointers. The old_weak_ptr list is used as a list
- * of pending finalisers later on.
+ * of pending finalizers later on.
*/
if (flag == rtsFalse) {
for (w = old_weak_ptr_list; w; w = w->link) {
w->value = evacuate(w->value);
- w->finaliser = evacuate(w->finaliser);
+ w->finalizer = evacuate(w->finalizer);
}
weak_done = rtsTrue;
}
step->hpLim = step->hp + BLOCK_SIZE_W;
step->hp_bd = bd;
step->to_blocks++;
+ new_blocks++;
}
static __inline__ StgClosure *
((StgIndOldGen *)p)->indirectee =
evacuate(((StgIndOldGen *)p)->indirectee);
-#if 0
+#if 0
/* Debugging code to print out the size of the thing we just
* promoted
*/