/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.27 1999/02/05 16:02:41 simonm Exp $
+ * $Id: GC.c,v 1.37 1999/02/17 17:47:09 simonm Exp $
*
* (c) The GHC Team 1998-1999
*
*/
bdescr *old_to_space;
+/* Data used for allocation area sizing.
+ */
+lnat new_blocks; /* blocks allocated during this GC */
+lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
+
/* -----------------------------------------------------------------------------
Static function declarations
-------------------------------------------------------------------------- */
g0s0->to_space = NULL;
}
+ /* Keep a count of how many new blocks we allocated during this GC
+ * (used for resizing the allocation area, later).
+ */
+ new_blocks = 0;
+
/* Initialise to-space in all the generations/steps that we're
* collecting.
*/
loop2:
for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
+ if (gen == 0 && st == 0) {
+ continue;
+ }
step = &generations[gen].steps[st];
evac_gen = gen;
if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
* oldest_gen
*/
if (g != 0) {
+#if 0
generations[g].max_blocks = (oldest_gen->max_blocks * g)
/ (RtsFlags.GcFlags.generations-1);
+#endif
+ generations[g].max_blocks = oldest_gen->max_blocks;
}
/* for older generations... */
* performance we get from 3L bytes, reducing to the same
* performance at 2L bytes.
*/
- blocks = g0s0->n_blocks;
+ blocks = g0s0->to_blocks;
if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
RtsFlags.GcFlags.maxHeapSize ) {
nat needed = calcNeeded(); /* approx blocks needed at next GC */
/* Guess how much will be live in generation 0 step 0 next time.
- * A good approximation is the amount of data that was live this
- * time: this assumes (1) that the size of G0S0 will be roughly
- * the same as last time, and (2) that the promotion rate will be
- * constant.
- *
- * If we don't know how much was live in G0S0 (because there's no
- * step 1), then assume 30% (which is usually an overestimate).
+ * A good approximation is the obtained by finding the
+ * percentage of g0s0 that was live at the last minor GC.
*/
- if (g0->n_steps == 1) {
- needed += (g0s0->n_blocks * 30) / 100;
- } else {
- needed += g0->steps[1].n_blocks;
+ if (N == 0) {
+ g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
}
- /* Now we have a rough guess at the number of blocks needed for
- * the next GC, subtract this from the user's suggested heap size
- * and use the rest for the allocation area.
+ /* Estimate a size for the allocation area based on the
+ * information available. We might end up going slightly under
+ * or over the suggested heap size, but we should be pretty
+ * close on average.
+ *
+ * Formula: suggested - needed
+ * ----------------------------
+ * 1 + g0s0_pcnt_kept/100
+ *
+ * where 'needed' is the amount of memory needed at the next
+ * collection for collecting all steps except g0s0.
*/
- blocks = (int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed;
+ blocks =
+ (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
+ (100 + (int)g0s0_pcnt_kept);
if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
blocks = RtsFlags.GcFlags.minAllocAreaSize;
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
- /* start any pending finalisers */
- scheduleFinalisers(old_weak_ptr_list);
+ /* start any pending finalizers */
+ scheduleFinalizers(old_weak_ptr_list);
/* check sanity after GC */
IF_DEBUG(sanity, checkSanity(N));
new live weak pointers, then all the currently unreachable ones are
dead.
- For generational GC: we just don't try to finalise weak pointers in
+ For generational GC: we just don't try to finalize weak pointers in
older generations than the one we're collecting. This could
probably be optimised by keeping per-generation lists of weak
pointers, but for a few weak pointers this scheme will work.
if (weak_done) { return rtsFalse; }
- /* doesn't matter where we evacuate values/finalisers to, since
+ /* doesn't matter where we evacuate values/finalizers to, since
* these pointers are treated as roots (iff the keys are alive).
*/
evac_gen = 0;
if ((new = isAlive(w->key))) {
w->key = new;
- /* evacuate the value and finaliser */
+ /* evacuate the value and finalizer */
w->value = evacuate(w->value);
- w->finaliser = evacuate(w->finaliser);
+ w->finalizer = evacuate(w->finalizer);
/* remove this weak ptr from the old_weak_ptr list */
*last_w = w->link;
/* and put it on the new weak ptr list */
/* If we didn't make any changes, then we can go round and kill all
* the dead weak pointers. The old_weak_ptr list is used as a list
- * of pending finalisers later on.
+ * of pending finalizers later on.
*/
if (flag == rtsFalse) {
for (w = old_weak_ptr_list; w; w = w->link) {
w->value = evacuate(w->value);
- w->finaliser = evacuate(w->finaliser);
+ w->finalizer = evacuate(w->finalizer);
}
weak_done = rtsTrue;
}
step->hpLim = step->hp + BLOCK_SIZE_W;
step->hp_bd = bd;
step->to_blocks++;
+ new_blocks++;
}
static __inline__ StgClosure *
((StgIndOldGen *)p)->indirectee =
evacuate(((StgIndOldGen *)p)->indirectee);
-#if 0
+#if 0
/* Debugging code to print out the size of the thing we just
* promoted
*/
scavenge_mutable_list(generation *gen)
{
StgInfoTable *info;
- StgMutClosure *p, *next, *new_list;
+ StgMutClosure *p, *next;
p = gen->saved_mut_list;
- new_list = END_MUT_LIST;
next = p->mut_link;
evac_gen = 0;
if (failed_to_evac) {
failed_to_evac = rtsFalse;
- p->mut_link = new_list;
- new_list = p;
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
}
continue;
}
case MUT_ARR_PTRS:
/* follow everything */
- p->mut_link = new_list;
- new_list = p;
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
{
StgPtr end, q;
*/
ASSERT(p->header.info != &MUT_CONS_info);
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- p->mut_link = new_list;
- new_list = p;
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
continue;
case MVAR:
(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
- p->mut_link = new_list;
- new_list = p;
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
continue;
}
* point to some younger objects (because we set evac_gen to 0
* above).
*/
- tso->mut_link = new_list;
- new_list = (StgMutClosure *)tso;
+ tso->mut_link = gen->mut_list;
+ gen->mut_list = (StgMutClosure *)tso;
continue;
}
StgBlockingQueue *bh = (StgBlockingQueue *)p;
(StgClosure *)bh->blocking_queue =
evacuate((StgClosure *)bh->blocking_queue);
- p->mut_link = new_list;
- new_list = p;
+ p->mut_link = gen->mut_list;
+ gen->mut_list = p;
continue;
}
barf("scavenge_mut_list: strange object?");
}
}
-
- gen->mut_list = new_list;
}
static void