/* -----------------------------------------------------------------------------
- * $Id: GC.c,v 1.3 1999/01/06 11:52:43 simonm Exp $
+ * $Id: GC.c,v 1.8 1999/01/14 11:11:29 simonm Exp $
*
* Two-space garbage collector
*
static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
static rtsBool weak_done; /* all done for this pass */
+/* Flag indicating failure to evacuate an object to the desired
+ * generation.
+ */
+static rtsBool failed_to_evac;
+
/* -----------------------------------------------------------------------------
Static function declarations
-------------------------------------------------------------------------- */
{
bdescr *bd;
step *step;
- lnat live, allocated;
+ lnat live, allocated, collected = 0;
nat g, s;
#ifdef PROFILING
bd->gen = &generations[g];
bd->step = step;
bd->link = NULL;
+ bd->evacuated = 1; /* it's a to-space block */
step->hp = bd->start;
step->hpLim = step->hp + BLOCK_SIZE_W;
step->hp_bd = bd;
bd->gen = &generations[g];
bd->step = step;
bd->link = NULL;
+ bd->evacuated = 0; /* *not* a to-space block */
step->hp = bd->start;
step->hpLim = step->hp + BLOCK_SIZE_W;
step->hp_bd = bd;
step->to_blocks = 0;
step->new_large_objects = NULL;
step->scavenged_large_objects = NULL;
+#ifdef DEBUG
+ /* retain these so we can sanity-check later on */
+ step->old_scan = step->scan;
+ step->old_scan_bd = step->scan_bd;
+#endif
}
}
* - mutable lists from each generation > N
* we want to *scavenge* these roots, not evacuate them: they're not
* going to move in this GC.
+ * Also: do them in reverse generation order. This is because we
+ * often want to promote objects that are pointed to by older
+ * generations early, so we don't have to repeatedly copy them.
+ * Doing the generations in reverse order ensures that we don't end
+ * up in the situation where we want to evac an object to gen 3 and
+ * it has already been evaced to gen 2.
*/
- for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
- generations[g].mut_list =
- scavenge_mutable_list(generations[g].mut_list, g);
- }
-
+ {
+ StgMutClosure *tmp, **pp;
+ for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+ /* the act of scavenging the mutable list for this generation
+ * might place more objects on the mutable list itself. So we
+ * place the current mutable list in a temporary, scavenge it,
+ * and then append it to the new list.
+ */
+ tmp = generations[g].mut_list;
+ generations[g].mut_list = END_MUT_LIST;
+ tmp = scavenge_mutable_list(tmp, g);
+
+ pp = &generations[g].mut_list;
+ while (*pp != END_MUT_LIST) {
+ pp = &(*pp)->mut_link;
+ }
+ *pp = tmp;
+ }
+ }
/* And don't forget to mark the TSO if we got here direct from
* Haskell! */
if (CurrentTSO) {
scavenge_static();
}
- /* scavenge each step in generations 0..N */
- evac_gen = 0; /* just evac as normal */
- for (g = 0; g <= N; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
- scavenge(step);
- flag = rtsTrue;
- }
- if (step->new_large_objects != NULL) {
- scavenge_large(step);
- flag = rtsTrue;
- }
- }
- }
- if (flag) { goto loop; }
-
- /* Now scavenge all the older generations. Objects may have been
+ /* When scavenging the older generations: Objects may have been
* evacuated from generations <= N into older generations, and we
- * need to scavenge these objects. We're going to make sure that
+ * need to scavenge these objects. We're going to try to ensure that
* any evacuations that occur move the objects into at least the
- * same generation as the object being scavenged.
+ * same generation as the object being scavenged, otherwise we
+ * have to create new entries on the mutable list for the older
+ * generation.
*/
- for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
- for (s = 0; s < generations[g].n_steps; s++) {
- step = &generations[g].steps[s];
- evac_gen = g; /* evacuate to g at least */
- old_loop:
- if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
- scavenge(step);
- goto old_loop;
- }
- if (step->new_large_objects != NULL) {
- scavenge_large(step);
- goto old_loop;
+
+ /* scavenge each step in generations 0..maxgen */
+ {
+ int gen;
+ for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
+ for (s = 0; s < generations[gen].n_steps; s++) {
+ step = &generations[gen].steps[s];
+ evac_gen = gen;
+ if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
+ scavenge(step);
+ flag = rtsTrue;
+ }
+ if (step->new_large_objects != NULL) {
+ scavenge_large(step);
+ flag = rtsTrue;
+ }
}
}
}
+ if (flag) { goto loop; }
/* must be last... */
if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
/* run through all the generations/steps and tidy up
*/
- for (g = 0; g <= RtsFlags.GcFlags.generations; g++) {
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
+
+ if (g <= N) {
+ generations[g].collections++; /* for stats */
+ }
+
for (s = 0; s < generations[g].n_steps; s++) {
bdescr *next;
step = &generations[g].steps[s];
/* for generations we collected... */
if (g <= N) {
+ collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
+
/* free old memory and shift to-space into from-space for all
* the collected steps (except the allocation area). These
* freed blocks will probaby be quickly recycled.
step->n_blocks = step->to_blocks;
step->to_space = NULL;
step->to_blocks = 0;
+ for (bd = step->blocks; bd != NULL; bd = bd->link) {
+ bd->evacuated = 0; /* now from-space */
+ }
}
/* LARGE OBJECTS. The current live large objects are chained on
freeGroup(bd);
bd = next;
}
+ for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
+ bd->evacuated = 0;
+ }
step->large_objects = step->scavenged_large_objects;
/* Set the maximum blocks for this generation,
* using an arbitrary factor of the no. of blocks in step 0.
*/
if (g != 0) {
- generations[g].max_blocks =
- stg_max(generations[g].steps[s].n_blocks * 2,
+ generation *gen = &generations[g];
+ gen->max_blocks =
+ stg_max(gen->steps[s].n_blocks * 2,
RtsFlags.GcFlags.minAllocAreaSize * 4);
+ if (gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
+ gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
+ if (((int)gen->max_blocks - (int)gen->steps[0].n_blocks) <
+ (RtsFlags.GcFlags.pcFreeHeap *
+ RtsFlags.GcFlags.maxHeapSize / 200)) {
+ heapOverflow();
+ }
+ }
}
/* for older generations... */
*/
for (bd = step->scavenged_large_objects; bd; bd = next) {
next = bd->link;
+ bd->evacuated = 0;
dbl_link_onto(bd, &step->large_objects);
}
current_nursery = g0s0->blocks;
live = 0;
- for (g = 0; g <= RtsFlags.GcFlags.generations; g++) {
+ for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
/* approximate amount of live data (doesn't take into account slop
* at end of each block). ToDo: this more accurately.
alloc_blocks = 0;
alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
+ /* start any pending finalisers */
+ scheduleFinalisers(old_weak_ptr_list);
+
/* check sanity after GC */
#ifdef DEBUG
for (g = 0; g <= N; g++) {
for (s = 0; s < generations[g].n_steps; s++) {
if (g == 0 && s == 0) { continue; }
- IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks));
+ IF_DEBUG(sanity, checkHeap(generations[g].steps[s].blocks, NULL));
+ IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
+ }
+ }
+ for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ IF_DEBUG(sanity, checkHeap(generations[g].steps[s].old_scan_bd,
+ generations[g].steps[s].old_scan));
+ IF_DEBUG(sanity, checkChain(generations[g].steps[s].large_objects));
}
}
IF_DEBUG(sanity, checkFreeListSanity());
/* heapCensus(to_space); */ /* ToDo */
#endif
- /* start any pending finalisers */
- scheduleFinalisers(old_weak_ptr_list);
-
/* restore enclosing cost centre */
#ifdef PROFILING
CCCS = prev_CCS;
#endif
+ /* check for memory leaks if sanity checking is on */
+ IF_DEBUG(sanity, memInventory());
+
/* ok, GC over: tell the stats department what happened. */
- {
- char s[512]; /* bleugh */
- sprintf(s, "(Gen: %d)", N);
- stat_endGC(RtsFlags.GcFlags.minAllocAreaSize * BLOCK_SIZE_W,
- 0, live, s);
- }
+ stat_endGC(allocated, collected, live, N);
}
/* -----------------------------------------------------------------------------
loop:
/* ignore weak pointers in older generations */
if (!LOOKS_LIKE_STATIC(target) && Bdescr((P_)target)->gen->no > N) {
+ IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive (in old gen) at %p\n", w));
+ /* remove this weak ptr from the old_weak_ptr list */
+ *last_w = w->link;
+ /* and put it on the new weak ptr list */
next_w = w->link;
+ w->link = weak_ptr_list;
+ weak_ptr_list = w;
+ flag = rtsTrue;
continue;
}
return root;
}
+static inline void addBlock(step *step)
+{
+ bdescr *bd = allocBlock();
+ bd->gen = step->gen;
+ bd->step = step;
+
+ if (step->gen->no <= N) {
+ bd->evacuated = 1;
+ } else {
+ bd->evacuated = 0;
+ }
+
+ step->hp_bd->free = step->hp;
+ step->hp_bd->link = bd;
+ step->hp = bd->start;
+ step->hpLim = step->hp + BLOCK_SIZE_W;
+ step->hp_bd = bd;
+ step->to_blocks++;
+}
+
static __inline__ StgClosure *
copy(StgClosure *src, W_ size, bdescr *bd)
{
* necessary.
*/
if (step->hp + size >= step->hpLim) {
- bdescr *bd = allocBlock();
- bd->gen = step->gen;
- bd->step = step;
- step->hp_bd->free = step->hp;
- step->hp_bd->link = bd;
- step->hp = bd->start;
- step->hpLim = step->hp + BLOCK_SIZE_W;
- step->hp_bd = bd;
- step->to_blocks++;
+ addBlock(step);
}
dest = step->hp;
}
/* -----------------------------------------------------------------------------
+ Evacuate a mutable object
+
+ If we evacuate a mutable object to an old generation, cons the
+ object onto the older generation's mutable list.
+ -------------------------------------------------------------------------- */
+
+static inline void
+evacuate_mutable(StgMutClosure *c)
+{
+ bdescr *bd;
+
+ bd = Bdescr((P_)c);
+ if (bd->gen->no > 0) {
+ c->mut_link = bd->gen->mut_list;
+ bd->gen->mut_list = c;
+ }
+}
+
+/* -----------------------------------------------------------------------------
Evacuate a large object
This just consists of removing the object from the (doubly-linked)
-------------------------------------------------------------------------- */
static inline void
-evacuate_large(StgPtr p)
+evacuate_large(StgPtr p, rtsBool mutable)
{
bdescr *bd = Bdescr(p);
step *step;
ASSERT(((W_)p & BLOCK_MASK) == 0);
/* already evacuated? */
- if (bd->evacuated) { return; }
+ if (bd->evacuated) {
+ /* Don't forget to set the failed_to_evac flag if we didn't get
+ * the desired destination (see comments in evacuate()).
+ */
+ if (bd->gen->no < evac_gen) {
+ failed_to_evac = rtsTrue;
+ }
+ return;
+ }
step = bd->step;
/* remove from large_object list */
bd->link = step->new_large_objects;
step->new_large_objects = bd;
bd->evacuated = 1;
+
+ if (mutable) {
+ evacuate_mutable((StgMutClosure *)p);
+ }
}
/* -----------------------------------------------------------------------------
- Evacuate a mutable object
-
- If we evacuate a mutable object to a generation that we're not
- collecting, cons the object onto the older generation's mutable
- list.
+ Adding a MUT_CONS to an older generation.
+
+ This is necessary from time to time when we end up with an
+ old-to-new generation pointer in a non-mutable object. We defer
+ the promotion until the next GC.
-------------------------------------------------------------------------- */
-
-static inline void
-evacuate_mutable(StgMutClosure *c)
+
+static StgClosure *
+mkMutCons(StgClosure *ptr, generation *gen)
{
- bdescr *bd;
-
- bd = Bdescr((P_)c);
- if (bd->gen->no > N) {
- c->mut_link = bd->gen->mut_list;
- bd->gen->mut_list = c;
+ StgMutVar *q;
+ step *step;
+
+ step = &gen->steps[0];
+
+ /* chain a new block onto the to-space for the destination step if
+ * necessary.
+ */
+ if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
+ addBlock(step);
}
+
+ q = (StgMutVar *)step->hp;
+ step->hp += sizeofW(StgMutVar);
+
+ SET_HDR(q,&MUT_CONS_info,CCS_GC);
+ q->var = ptr;
+ evacuate_mutable((StgMutClosure *)q);
+
+ return (StgClosure *)q;
}
/* -----------------------------------------------------------------------------
it now resides in.
if M >= evac_gen do nothing
- if M < evac_gen replace object with an indirection and evacuate
- it to evac_gen.
+ if M < evac_gen set failed_to_evac flag to indicate that we
+ didn't manage to evacuate this object into evac_gen.
-------------------------------------------------------------------------- */
-static StgClosure *evacuate(StgClosure *q)
+static StgClosure *
+evacuate(StgClosure *q)
{
StgClosure *to;
bdescr *bd = NULL;
loop:
if (!LOOKS_LIKE_STATIC(q)) {
bd = Bdescr((P_)q);
- /* generation too old: leave it alone */
- if (bd->gen->no >= evac_gen && bd->gen->no > N) {
- return q;
- }
+ if (bd->gen->no > N) {
+ /* Can't evacuate this object, because it's in a generation
+ * older than the ones we're collecting. Let's hope that it's
+ * in evac_gen or older, or we will have to make an IND_OLDGEN object.
+ */
+ if (bd->gen->no < evac_gen) {
+ /* nope */
+ failed_to_evac = rtsTrue;
+ }
+ return q;
+ }
}
/* make sure the info pointer is into text space */
case THUNK_SELECTOR:
{
const StgInfoTable* selectee_info;
- StgClosure* selectee = stgCast(StgSelector*,q)->selectee;
- rtsBool evaced = rtsFalse;
+ StgClosure* selectee = ((StgSelector*)q)->selectee;
selector_loop:
selectee_info = get_itbl(selectee);
* with the evacuation, just update the source address with
* a pointer to the (evacuated) constructor field.
*/
- if (IS_USER_PTR(q) && evaced) {
- return q;
+ if (IS_USER_PTR(q)) {
+ bdescr *bd = Bdescr((P_)q);
+ if (bd->evacuated) {
+ if (bd->gen->no < evac_gen) {
+ failed_to_evac = rtsTrue;
+ }
+ return q;
+ }
}
/* otherwise, carry on and evacuate this constructor field,
goto selector_loop;
case EVACUATED:
- evaced = rtsTrue;
selectee = stgCast(StgEvacuated*,selectee)->evacuee;
goto selector_loop;
* HOWEVER: if the requested destination generation (evac_gen) is
* older than the actual generation (because the object was
* already evacuated to a younger generation) then we have to
- * re-evacuate it, replacing the old evacuated copy with an
- * indirection to the new copy.
+ * set the failed_to_evac flag to indicate that we couldn't
+ * manage to promote the object to the desired generation.
*/
if (evac_gen > 0) { /* optimisation */
StgClosure *p = ((StgEvacuated*)q)->evacuee;
- if (Bdescr((P_)p)->gen->no >= evac_gen) {
- return p;
- } else {
- nat padding_wds = sizeW_fromITBL(get_itbl(p)) - sizeofW(StgInd);
- StgClosure *new_p = evacuate(p); /* naughty recursive call */
- IF_DEBUG(gc, fprintf(stderr,"ouch! double evacuation\n"));
- ((StgEvacuated*)q)->evacuee = new_p;
- p->header.info = &IND_info;
- memset((P_)p + sizeofW(StgInd), 0, padding_wds * sizeof(W_));
- return new_p;
- }
+ if (Bdescr((P_)p)->gen->no < evac_gen) {
+ /* fprintf(stderr,"evac failed!\n");*/
+ failed_to_evac = rtsTrue;
+ }
}
return ((StgEvacuated*)q)->evacuee;
case MUT_ARR_WORDS:
case ARR_WORDS:
- case ARR_PTRS:
{
nat size = arr_words_sizeW(stgCast(StgArrWords*,q));
if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q);
+ evacuate_large((P_)q, rtsFalse);
return q;
} else {
/* just copy the block */
nat size = mut_arr_ptrs_sizeW(stgCast(StgMutArrPtrs*,q));
if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q);
+ evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
to = q;
} else {
/* just copy the block */
to = copy(q,size,bd);
upd_evacuee(q,to);
- }
- if (info->type == MUT_ARR_PTRS) {
- evacuate_mutable((StgMutClosure *)to);
+ if (info->type == MUT_ARR_PTRS) {
+ evacuate_mutable((StgMutClosure *)to);
+ }
}
return to;
}
/* Large TSOs don't get moved, so no relocation is required.
*/
if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
- evacuate_large((P_)q);
+ evacuate_large((P_)q, rtsFalse);
tso->mut_link = NULL; /* see below */
return q;
}
}
+/* -----------------------------------------------------------------------------
+ Scavenge a given step until there are no more objects in this step
+ to scavenge.
+
+ evac_gen is set by the caller to be either zero (for a step in a
+ generation < N) or G where G is the generation of the step being
+ scavenged.
+
+ We sometimes temporarily change evac_gen back to zero if we're
+ scavenging a mutable object where early promotion isn't such a good
+ idea.
+ -------------------------------------------------------------------------- */
+
+
static void
scavenge(step *step)
{
- StgPtr p;
+ StgPtr p, q;
const StgInfoTable *info;
bdescr *bd;
+ nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
p = step->scan;
bd = step->scan_bd;
+ failed_to_evac = rtsFalse;
+
/* scavenge phase - standard breadth-first scavenging of the
* evacuated objects
*/
continue;
}
+ q = p; /* save ptr to object */
+
ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
|| IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
}
p += bco_sizeW(bco);
- continue;
+ break;
}
case MVAR:
*/
{
StgMVar *mvar = ((StgMVar *)p);
+ evac_gen = 0;
(StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
(StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
(StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
p += sizeofW(StgMVar);
- continue;
+ evac_gen = saved_evac_gen;
+ break;
}
case FUN:
case CONSTR:
case WEAK:
case FOREIGN:
- case MUT_VAR:
case IND_PERM:
case IND_OLDGEN_PERM:
case CAF_UNENTERED:
(StgClosure *)*p = evacuate((StgClosure *)*p);
}
p += info->layout.payload.nptrs;
- continue;
+ break;
+ }
+
+ case MUT_VAR:
+ /* ignore MUT_CONSs */
+ if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
+ evac_gen = 0;
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ evac_gen = saved_evac_gen;
}
+ p += sizeofW(StgMutVar);
+ break;
case CAF_BLACKHOLE:
case BLACKHOLE:
(StgClosure *)bh->blocking_queue =
evacuate((StgClosure *)bh->blocking_queue);
p += BLACKHOLE_sizeW();
- continue;
+ break;
}
case THUNK_SELECTOR:
StgSelector *s = (StgSelector *)p;
s->selectee = evacuate(s->selectee);
p += THUNK_SELECTOR_sizeW();
- continue;
+ break;
}
case IND:
pap->fun = evacuate(pap->fun);
scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
p += pap_sizeW(pap);
- continue;
+ break;
}
case ARR_WORDS:
case MUT_ARR_WORDS:
/* nothing to follow */
p += arr_words_sizeW(stgCast(StgArrWords*,p));
- continue;
+ break;
- case ARR_PTRS:
+ case MUT_ARR_PTRS:
/* follow everything */
{
StgPtr next;
- next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p));
- for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) {
+ evac_gen = 0; /* repeatedly mutable */
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
}
- continue;
+ evac_gen = saved_evac_gen;
+ break;
}
- case MUT_ARR_PTRS:
case MUT_ARR_PTRS_FROZEN:
/* follow everything */
{
- StgPtr next;
+ StgPtr start = p, next;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
}
- continue;
+ if (failed_to_evac) {
+ /* we can do this easier... */
+ evacuate_mutable((StgMutClosure *)start);
+ failed_to_evac = rtsFalse;
+ }
+ break;
}
case TSO:
StgTSO *tso;
tso = (StgTSO *)p;
+ evac_gen = 0;
/* chase the link field for any TSOs on the same queue */
(StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
/* scavenge this thread's stack */
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
+ evac_gen = saved_evac_gen;
p += tso_sizeW(tso);
- continue;
+ break;
}
case BLOCKED_FETCH:
default:
barf("scavenge");
}
+
+ /* If we didn't manage to promote all the objects pointed to by
+ * the current object, then we have to designate this object as
+ * mutable (because it contains old-to-new generation pointers).
+ */
+ if (failed_to_evac) {
+ mkMutCons((StgClosure *)q, &generations[evac_gen]);
+ failed_to_evac = rtsFalse;
+ }
}
step->scan_bd = bd;
}
/* -----------------------------------------------------------------------------
+ Scavenge one object.
+
+ This is used for objects that are temporarily marked as mutable
+ because they contain old-to-new generation pointers. Only certain
+ objects can have this property.
+ -------------------------------------------------------------------------- */
+static rtsBool
+scavenge_one(StgPtr p)
+{
+ StgInfoTable *info;
+ rtsBool no_luck;
+
+ ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
+ || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
+
+ info = get_itbl((StgClosure *)p);
+
+ switch (info -> type) {
+
+ case FUN:
+ case THUNK:
+ case CONSTR:
+ case WEAK:
+ case FOREIGN:
+ case IND_PERM:
+ case IND_OLDGEN_PERM:
+ case CAF_UNENTERED:
+ case CAF_ENTERED:
+ {
+ StgPtr end;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ break;
+ }
+
+ case CAF_BLACKHOLE:
+ case BLACKHOLE:
+ {
+ StgBlackHole *bh = (StgBlackHole *)p;
+ (StgClosure *)bh->blocking_queue =
+ evacuate((StgClosure *)bh->blocking_queue);
+ break;
+ }
+
+ case THUNK_SELECTOR:
+ {
+ StgSelector *s = (StgSelector *)p;
+ s->selectee = evacuate(s->selectee);
+ break;
+ }
+
+ case AP_UPD: /* same as PAPs */
+ case PAP:
+ /* Treat a PAP just like a section of stack, not forgetting to
+ * evacuate the function pointer too...
+ */
+ {
+ StgPAP* pap = stgCast(StgPAP*,p);
+
+ pap->fun = evacuate(pap->fun);
+ scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
+ break;
+ }
+
+ case IND_OLDGEN:
+ /* This might happen if for instance a MUT_CONS was pointing to a
+ * THUNK which has since been updated. The IND_OLDGEN will
+ * be on the mutable list anyway, so we don't need to do anything
+ * here.
+ */
+ break;
+
+ default:
+ barf("scavenge_one: strange object");
+ }
+
+ no_luck = failed_to_evac;
+ failed_to_evac = rtsFalse;
+ return (no_luck);
+}
+
+
+/* -----------------------------------------------------------------------------
Scavenging mutable lists.
We treat the mutable list of each generation > N (i.e. all the
prev = &start;
start = p;
+ failed_to_evac = rtsFalse;
+
for (; p != END_MUT_LIST; p = *prev) {
/* make sure the info pointer is into text space */
case MUT_ARR_PTRS_FROZEN:
/* remove this guy from the mutable list, but follow the ptrs
- * anyway.
+ * anyway (and make sure they get promoted to this gen).
*/
- *prev = p->mut_link;
- goto do_array;
+ {
+ StgPtr end, q;
+
+ end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ evac_gen = gen;
+ for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
+ (StgClosure *)*q = evacuate((StgClosure *)*q);
+ }
+ evac_gen = 0;
+
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ prev = &p->mut_link;
+ } else {
+ *prev = p->mut_link;
+ }
+ continue;
+ }
case MUT_ARR_PTRS:
/* follow everything */
prev = &p->mut_link;
- do_array:
{
StgPtr end, q;
}
case MUT_VAR:
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- prev = &p->mut_link;
+ /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
+ * it from the mutable list if possible by promoting whatever it
+ * points to.
+ */
+ if (p->header.info == &MUT_CONS_info) {
+ evac_gen = gen;
+ if (scavenge_one((P_)((StgMutVar *)p)->var) == rtsTrue) {
+ /* didn't manage to promote everything, so leave the
+ * MUT_CONS on the list.
+ */
+ prev = &p->mut_link;
+ } else {
+ *prev = p->mut_link;
+ }
+ evac_gen = 0;
+ } else {
+ ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
+ prev = &p->mut_link;
+ }
continue;
case TSO:
case IND_OLDGEN:
case IND_OLDGEN_PERM:
case IND_STATIC:
- /* Remove these from the mutable list - we can be sure that the
- * objects they point to now reside in this generation because
- * we set evac_gen here ->
+ /* Try to pull the indirectee into this generation, so we can
+ * remove the indirection from the mutable list.
*/
evac_gen = gen;
((StgIndOldGen *)p)->indirectee =
- evacuate(((StgIndOldGen *)p)->indirectee);
+ evacuate(((StgIndOldGen *)p)->indirectee);
evac_gen = 0;
- *prev = p->mut_link;
- p->mut_link = NULL; /* paranoia? */
+
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ prev = &p->mut_link;
+ } else {
+ *prev = p->mut_link;
+ /* the mut_link field of an IND_STATIC is overloaded as the
+ * static link field too (it just so happens that we don't need
+ * both at the same time), so we need to NULL it out when
+ * removing this object from the mutable list because the static
+ * link fields are all assumed to be NULL before doing a major
+ * collection.
+ */
+ p->mut_link = NULL;
+ }
continue;
default:
{
StgInd *ind = (StgInd *)p;
ind->indirectee = evacuate(ind->indirectee);
+
+ /* might fail to evacuate it, in which case we have to pop it
+ * back on the mutable list (and take it off the
+ * scavenged_static list because the static link and mut link
+ * pointers are one and the same).
+ */
+ if (failed_to_evac) {
+ failed_to_evac = rtsFalse;
+ scavenged_static_objects = STATIC_LINK(info,p);
+ ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_list;
+ oldest_gen->mut_list = (StgMutClosure *)ind;
+ }
break;
}
barf("scavenge_static");
}
+ ASSERT(failed_to_evac == rtsFalse);
+
/* get the next static object from the list. Remeber, there might
* be more stuff on this list now that we've done some evacuating!
* (static_objects is a global)
case RET_BIG:
case RET_VEC_BIG:
{
+ StgPtr q;
StgLargeBitmap *large_bitmap;
nat i;
for (i=0; i<large_bitmap->size; i++) {
bitmap = large_bitmap->bitmap[i];
+ q = p + sizeof(W_) * 8;
while (bitmap != 0) {
if ((bitmap & 1) == 0) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
p++;
bitmap = bitmap >> 1;
}
+ if (i+1 < large_bitmap->size) {
+ while (p < q) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ p++;
+ }
+ }
}
/* and don't forget to follow the SRT */
barf("scavenge_stack: weird activation record found on stack.\n");
}
}
-}
+}
/*-----------------------------------------------------------------------------
scavenge the large object list.
+
+ evac_gen set by caller; similar games played with evac_gen as with
+ scavenge() - see comment at the top of scavenge(). Most large
+ objects are (repeatedly) mutable, so most of the time evac_gen will
+ be zero.
--------------------------------------------------------------------------- */
static void
bdescr *bd;
StgPtr p;
const StgInfoTable* info;
+ nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
+ evac_gen = 0; /* most objects are mutable */
bd = step->new_large_objects;
- evac_gen = step->gen->no;
for (; bd != NULL; bd = step->new_large_objects) {
*/
step->new_large_objects = bd->link;
dbl_link_onto(bd, &step->scavenged_large_objects);
- bd->evacuated = 0; /* ready for next GC */
p = bd->start;
info = get_itbl(stgCast(StgClosure*,p));
/* nothing to follow */
continue;
- case ARR_PTRS:
case MUT_ARR_PTRS:
- case MUT_ARR_PTRS_FROZEN:
/* follow everything */
{
StgPtr next;
- next = p + arr_ptrs_sizeW(stgCast(StgArrPtrs*,p));
- for (p = (P_)((StgArrPtrs *)p)->payload; p < next; p++) {
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
(StgClosure *)*p = evacuate((StgClosure *)*p);
}
continue;
}
+ case MUT_ARR_PTRS_FROZEN:
+ /* follow everything */
+ {
+ StgPtr start = p, next;
+
+ evac_gen = saved_evac_gen; /* not really mutable */
+ next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
+ for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
+ (StgClosure *)*p = evacuate((StgClosure *)*p);
+ }
+ evac_gen = 0;
+ if (failed_to_evac) {
+ evacuate_mutable((StgMutClosure *)start);
+ }
+ continue;
+ }
+
case BCO:
{
StgBCO* bco = stgCast(StgBCO*,p);
nat i;
+ evac_gen = saved_evac_gen;
for (i = 0; i < bco->n_ptrs; i++) {
bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
}
+ evac_gen = 0;
continue;
}
{
StgMutClosure *next, *c;
- for (c = first; c; c = next) {
+ for (c = first; c != END_MUT_LIST; c = next) {
next = c->mut_link;
c->mut_link = NULL;
}