/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
*
* Generational garbage collector: scavenging functions
*
+ * Documentation on the architecture of the Garbage Collector can be
+ * found in the online commentary:
+ *
+ * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
+ *
* ---------------------------------------------------------------------------*/
#include "Rts.h"
+#include "RtsFlags.h"
#include "Storage.h"
#include "MBlock.h"
#include "GC.h"
+#include "GCThread.h"
+#include "GCUtils.h"
#include "Compact.h"
#include "Evac.h"
#include "Scav.h"
#include "Apply.h"
#include "Trace.h"
#include "LdvProfile.h"
+#include "Sanity.h"
+#include "Capability.h"
static void scavenge_stack (StgPtr p, StgPtr stack_end);
StgLargeBitmap *large_bitmap,
nat size );
-/* Similar to scavenge_large_bitmap(), but we don't write back the
- * pointers we get back from evacuate().
- */
-static void
-scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
-{
- nat i, b, size;
- StgWord bitmap;
- StgClosure **p;
-
- b = 0;
- bitmap = large_srt->l.bitmap[b];
- size = (nat)large_srt->l.size;
- p = (StgClosure **)large_srt->srt;
- for (i = 0; i < size; ) {
- if ((bitmap & 1) != 0) {
- evacuate(*p);
- }
- i++;
- p++;
- if (i % BITS_IN(W_) == 0) {
- b++;
- bitmap = large_srt->l.bitmap[b];
- } else {
- bitmap = bitmap >> 1;
- }
- }
-}
-
-/* evacuate the SRT. If srt_bitmap is zero, then there isn't an
- * srt field in the info table. That's ok, because we'll
- * never dereference it.
- */
-STATIC_INLINE void
-scavenge_srt (StgClosure **srt, nat srt_bitmap)
-{
- nat bitmap;
- StgClosure **p;
-
- bitmap = srt_bitmap;
- p = srt;
-
- if (bitmap == (StgHalfWord)(-1)) {
- scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
- return;
- }
-
- while (bitmap != 0) {
- if ((bitmap & 1) != 0) {
-#ifdef ENABLE_WIN32_DLL_SUPPORT
- // Special-case to handle references to closures hiding out in DLLs, since
- // double indirections required to get at those. The code generator knows
- // which is which when generating the SRT, so it stores the (indirect)
- // reference to the DLL closure in the table by first adding one to it.
- // We check for this here, and undo the addition before evacuating it.
- //
- // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
- // closure that's fixed at link-time, and no extra magic is required.
- if ( (unsigned long)(*srt) & 0x1 ) {
- evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
- } else {
- evacuate(*p);
- }
-#else
- evacuate(*p);
+#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
+# define evacuate(a) evacuate1(a)
+# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
+# define scavenge_loop(a) scavenge_loop1(a)
+# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
+# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
#endif
- }
- p++;
- bitmap = bitmap >> 1;
- }
-}
+/* -----------------------------------------------------------------------------
+ Scavenge a TSO.
+ -------------------------------------------------------------------------- */
STATIC_INLINE void
-scavenge_thunk_srt(const StgInfoTable *info)
+scavenge_TSO_link (StgTSO *tso)
{
- StgThunkInfoTable *thunk_info;
-
- if (!major_gc) return;
-
- thunk_info = itbl_to_thunk_itbl(info);
- scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
+ // We don't always chase the link field: TSOs on the blackhole
+ // queue are not automatically alive, so the link field is a
+ // "weak" pointer in that case.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ evacuate((StgClosure **)&tso->_link);
+ }
}
-STATIC_INLINE void
-scavenge_fun_srt(const StgInfoTable *info)
+static void
+scavengeTSO (StgTSO *tso)
{
- StgFunInfoTable *fun_info;
+ rtsBool saved_eager;
+
+ if (tso->what_next == ThreadRelocated) {
+ // the only way this can happen is if the old TSO was on the
+ // mutable list. We might have other links to this defunct
+ // TSO, so we must update its link field.
+ evacuate((StgClosure**)&tso->_link);
+ return;
+ }
- if (!major_gc) return;
-
- fun_info = itbl_to_fun_itbl(info);
- scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
-}
+ debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
-/* -----------------------------------------------------------------------------
- Scavenge a TSO.
- -------------------------------------------------------------------------- */
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
-static void
-scavengeTSO (StgTSO *tso)
-{
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
) {
- tso->block_info.closure = evacuate(tso->block_info.closure);
+ evacuate(&tso->block_info.closure);
}
- tso->blocked_exceptions =
- (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
+ evacuate((StgClosure **)&tso->blocked_exceptions);
- // We don't always chase the link field: TSOs on the blackhole
- // queue are not automatically alive, so the link field is a
- // "weak" pointer in that case.
- if (tso->why_blocked != BlockedOnBlackHole) {
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
- }
-
// scavange current transaction record
- tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
+ evacuate((StgClosure **)&tso->trec);
// scavenge this thread's stack
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
+
+ if (gct->failed_to_evac) {
+ tso->flags |= TSO_DIRTY;
+ scavenge_TSO_link(tso);
+ } else {
+ tso->flags &= ~TSO_DIRTY;
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ tso->flags |= TSO_LINK_DIRTY;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
+ }
+
+ gct->eager_promotion = saved_eager;
}
/* -----------------------------------------------------------------------------
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
return p;
}
-STATIC_INLINE StgPtr
+STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
{
StgPtr p;
StgWord bitmap;
StgFunInfoTable *fun_info;
- fun_info = get_fun_itbl(fun);
+ fun_info = get_fun_itbl(UNTAG_CLOSURE(fun));
ASSERT(fun_info->i.type != PAP);
p = (StgPtr)payload;
small_bitmap:
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
return p;
}
-STATIC_INLINE StgPtr
+STATIC_INLINE GNUC_ATTR_HOT StgPtr
scavenge_PAP (StgPAP *pap)
{
- pap->fun = evacuate(pap->fun);
+ evacuate(&pap->fun);
return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
}
STATIC_INLINE StgPtr
scavenge_AP (StgAP *ap)
{
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
}
/* -----------------------------------------------------------------------------
- Scavenge a given step until there are no more objects in this step
- to scavenge.
+ Scavenge SRTs
+ -------------------------------------------------------------------------- */
+
+/* Similar to scavenge_large_bitmap(), but we don't write back the
+ * pointers we get back from evacuate().
+ */
+static void
+scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
+{
+ nat i, b, size;
+ StgWord bitmap;
+ StgClosure **p;
+
+ b = 0;
+ bitmap = large_srt->l.bitmap[b];
+ size = (nat)large_srt->l.size;
+ p = (StgClosure **)large_srt->srt;
+ for (i = 0; i < size; ) {
+ if ((bitmap & 1) != 0) {
+ evacuate(p);
+ }
+ i++;
+ p++;
+ if (i % BITS_IN(W_) == 0) {
+ b++;
+ bitmap = large_srt->l.bitmap[b];
+ } else {
+ bitmap = bitmap >> 1;
+ }
+ }
+}
- evac_gen is set by the caller to be either zero (for a step in a
+/* evacuate the SRT. If srt_bitmap is zero, then there isn't an
+ * srt field in the info table. That's ok, because we'll
+ * never dereference it.
+ */
+STATIC_INLINE GNUC_ATTR_HOT void
+scavenge_srt (StgClosure **srt, nat srt_bitmap)
+{
+ nat bitmap;
+ StgClosure **p;
+
+ bitmap = srt_bitmap;
+ p = srt;
+
+ if (bitmap == (StgHalfWord)(-1)) {
+ scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
+ return;
+ }
+
+ while (bitmap != 0) {
+ if ((bitmap & 1) != 0) {
+#if defined(__PIC__) && defined(mingw32_TARGET_OS)
+ // Special-case to handle references to closures hiding out in DLLs, since
+ // double indirections required to get at those. The code generator knows
+ // which is which when generating the SRT, so it stores the (indirect)
+ // reference to the DLL closure in the table by first adding one to it.
+ // We check for this here, and undo the addition before evacuating it.
+ //
+ // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
+ // closure that's fixed at link-time, and no extra magic is required.
+ if ( (unsigned long)(*srt) & 0x1 ) {
+ evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ } else {
+ evacuate(p);
+ }
+#else
+ evacuate(p);
+#endif
+ }
+ p++;
+ bitmap = bitmap >> 1;
+ }
+}
+
+
+STATIC_INLINE GNUC_ATTR_HOT void
+scavenge_thunk_srt(const StgInfoTable *info)
+{
+ StgThunkInfoTable *thunk_info;
+
+ if (!major_gc) return;
+
+ thunk_info = itbl_to_thunk_itbl(info);
+ scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
+}
+
+STATIC_INLINE GNUC_ATTR_HOT void
+scavenge_fun_srt(const StgInfoTable *info)
+{
+ StgFunInfoTable *fun_info;
+
+ if (!major_gc) return;
+
+ fun_info = itbl_to_fun_itbl(info);
+ scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
+}
+
+/* -----------------------------------------------------------------------------
+ Scavenge a block from the given scan pointer up to bd->free.
+
+ evac_step is set by the caller to be either zero (for a step in a
generation < N) or G where G is the generation of the step being
scavenged.
- We sometimes temporarily change evac_gen back to zero if we're
- scavenging a mutable object where early promotion isn't such a good
+ We sometimes temporarily change evac_step back to zero if we're
+ scavenging a mutable object where eager promotion isn't such a good
idea.
-------------------------------------------------------------------------- */
-void
-scavenge(step *stp)
+static GNUC_ATTR_HOT void
+scavenge_block (bdescr *bd)
{
StgPtr p, q;
StgInfoTable *info;
- bdescr *bd;
- nat saved_evac_gen = evac_gen;
-
- p = stp->scan;
- bd = stp->scan_bd;
+ step *saved_evac_step;
+ rtsBool saved_eager_promotion;
+ step_workspace *ws;
- failed_to_evac = rtsFalse;
+ debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
+ bd->start, bd->gen_no, bd->step->no, bd->u.scan);
- /* scavenge phase - standard breadth-first scavenging of the
- * evacuated objects
- */
+ gct->scan_bd = bd;
+ gct->evac_step = bd->step;
+ saved_evac_step = gct->evac_step;
+ saved_eager_promotion = gct->eager_promotion;
+ gct->failed_to_evac = rtsFalse;
- while (bd != stp->hp_bd || p < stp->hp) {
+ ws = &gct->steps[bd->step->abs_no];
- // If we're at the end of this block, move on to the next block
- if (bd != stp->hp_bd && p == bd->free) {
- bd = bd->link;
- p = bd->start;
- continue;
- }
+ p = bd->u.scan;
+
+ // we might be evacuating into the very object that we're
+ // scavenging, so we have to check the real bd->free pointer each
+ // time around the loop.
+ while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
+ ASSERT(bd->link == NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
- ASSERT(thunk_selector_depth == 0);
+ ASSERT(gct->thunk_selector_depth == 0);
q = p;
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ gct->eager_promotion = rtsFalse;
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
p += sizeofW(StgMVar);
break;
}
case FUN_2_0:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
case THUNK_2_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[1]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 2;
break;
case CONSTR_2_0:
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
case THUNK_1_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 1;
break;
case FUN_1_0:
scavenge_fun_srt(info);
case CONSTR_1_0:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 1;
break;
case THUNK_1_1:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
p += sizeofW(StgThunk) + 2;
break;
case FUN_1_1:
scavenge_fun_srt(info);
case CONSTR_1_1:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
p += sizeofW(StgHeader) + 2;
break;
scavenge_thunk_srt(info);
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p += info->layout.payload.nptrs;
break;
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p += info->layout.payload.nptrs;
break;
case BCO: {
StgBCO *bco = (StgBCO *)p;
- bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
- bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
- bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
- bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
+ evacuate((StgClosure **)&bco->instrs);
+ evacuate((StgClosure **)&bco->literals);
+ evacuate((StgClosure **)&bco->ptrs);
p += bco_sizeW(bco);
break;
}
case IND_PERM:
- if (stp->gen->no != 0) {
+ if (bd->gen_no != 0) {
#ifdef PROFILING
// @LDV profiling
// No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
}
// fall through
case IND_OLDGEN_PERM:
- ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
+ evacuate(&((StgInd *)p)->indirectee);
p += sizeofW(StgInd);
break;
case MUT_VAR_CLEAN:
- case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = eager_promotion;
-
- eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ case MUT_VAR_DIRTY:
+ gct->eager_promotion = rtsFalse;
+ evacuate(&((StgMutVar *)p)->var);
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
}
p += sizeofW(StgMutVar);
break;
- }
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
p += BLACKHOLE_sizeW();
break;
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
p += THUNK_SELECTOR_sizeW();
break;
}
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
p = (StgPtr)ap->payload + ap->size;
break;
// follow everything
{
StgPtr next;
- rtsBool saved_eager;
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue; // always put it on the mutable list.
+ gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break;
}
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
-
- eager_promotion = rtsFalse;
- scavengeTSO(tso);
- eager_promotion = saved_eager;
-
- if (failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- failed_to_evac = rtsTrue; // always on the mutable list
+ scavengeTSO(tso);
p += tso_sizeW(tso);
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVarWatchQueue);
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVar);
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecHeader);
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgAtomicInvariant);
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgInvariantCheckQueue);
break;
}
* Case (b) arises if we didn't manage to promote everything that
* the current object points to into the current generation.
*/
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- if (stp->gen_no > 0) {
- recordMutableGen((StgClosure *)q, stp->gen);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ if (bd->gen_no > 0) {
+ recordMutableGen_GC((StgClosure *)q, bd->gen_no);
}
}
}
- stp->scan_bd = bd;
- stp->scan = p;
-}
+ if (p > bd->free) {
+ gct->copied += ws->todo_free - bd->free;
+ bd->free = p;
+ }
+ debugTrace(DEBUG_gc, " scavenged %ld bytes",
+ (unsigned long)((bd->free - bd->u.scan) * sizeof(W_)));
+
+ // update stats: this is a block that has been scavenged
+ gct->scanned += bd->free - bd->u.scan;
+ bd->u.scan = bd->free;
+
+ if (bd != ws->todo_bd) {
+ // we're not going to evac any more objects into
+ // this block, so push it now.
+ push_scanned_block(bd, ws);
+ }
+
+ gct->scan_bd = NULL;
+}
/* -----------------------------------------------------------------------------
Scavenge everything on the mark stack.
doesn't need to advance the pointer on to the next object.
-------------------------------------------------------------------------- */
-void
+static void
scavenge_mark_stack(void)
{
StgPtr p, q;
StgInfoTable *info;
- nat saved_evac_gen;
+ step *saved_evac_step;
- evac_gen = oldest_gen->no;
- saved_evac_gen = evac_gen;
+ gct->evac_step = &oldest_gen->steps[0];
+ saved_evac_step = gct->evac_step;
linear_scan:
while (!mark_stack_empty()) {
info = get_itbl((StgClosure *)p);
q = p;
- switch (info->type) {
+ switch (info->type) {
- case MVAR:
- {
- StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
- break;
- }
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
+ {
+ rtsBool saved_eager_promotion = gct->eager_promotion;
+
+ StgMVar *mvar = ((StgMVar *)p);
+ gct->eager_promotion = rtsFalse;
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
+ break;
+ }
case FUN_2_0:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case THUNK_2_0:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[1]);
+ evacuate(&((StgThunk *)p)->payload[0]);
break;
case CONSTR_2_0:
- ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[1]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case FUN_1_0:
case FUN_1_1:
scavenge_fun_srt(info);
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case THUNK_1_0:
case THUNK_1_1:
scavenge_thunk_srt(info);
- ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
+ evacuate(&((StgThunk *)p)->payload[0]);
break;
case CONSTR_1_0:
case CONSTR_1_1:
- ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
+ evacuate(&((StgClosure *)p)->payload[0]);
break;
case FUN_0_1:
scavenge_thunk_srt(info);
end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
break;
}
end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
break;
}
case BCO: {
StgBCO *bco = (StgBCO *)p;
- bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
- bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
- bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
- bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
+ evacuate((StgClosure **)&bco->instrs);
+ evacuate((StgClosure **)&bco->literals);
+ evacuate((StgClosure **)&bco->ptrs);
break;
}
case IND_OLDGEN:
case IND_OLDGEN_PERM:
- ((StgInd *)p)->indirectee =
- evacuate(((StgInd *)p)->indirectee);
+ evacuate(&((StgInd *)p)->indirectee);
break;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
- eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ evacuate(&((StgMutVar *)p)->var);
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
case ARR_WORDS:
break;
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
break;
}
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
break;
}
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue; // mutable anyhow.
+ gct->failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
-
- eager_promotion = rtsFalse;
- scavengeTSO(tso);
- eager_promotion = saved_eager;
-
- if (failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- failed_to_evac = rtsTrue; // always on the mutable list
+ scavengeTSO((StgTSO*)p);
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
info->type, p);
}
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- if (evac_gen > 0) {
- recordMutableGen((StgClosure *)q, &generations[evac_gen]);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ if (gct->evac_step) {
+ recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
}
}
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- nat saved_evac_gen = evac_gen;
+ step *saved_evac_step = gct->evac_step;
rtsBool no_luck;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
switch (info->type) {
- case MVAR:
+ case MVAR_CLEAN:
+ case MVAR_DIRTY:
{
+ rtsBool saved_eager_promotion = gct->eager_promotion;
+
StgMVar *mvar = ((StgMVar *)p);
- evac_gen = 0;
- mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
- mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
- mvar->value = evacuate((StgClosure *)mvar->value);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable.
+ gct->eager_promotion = rtsFalse;
+ evacuate((StgClosure **)&mvar->head);
+ evacuate((StgClosure **)&mvar->tail);
+ evacuate((StgClosure **)&mvar->value);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ mvar->header.info = &stg_MVAR_DIRTY_info;
+ } else {
+ mvar->header.info = &stg_MVAR_CLEAN_info;
+ }
break;
}
end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
StgPtr q = p;
- rtsBool saved_eager_promotion = eager_promotion;
+ rtsBool saved_eager_promotion = gct->eager_promotion;
- eager_promotion = rtsFalse;
- ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- eager_promotion = saved_eager_promotion;
+ gct->eager_promotion = rtsFalse;
+ evacuate(&((StgMutVar *)p)->var);
+ gct->eager_promotion = saved_eager_promotion;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
break;
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
- s->selectee = evacuate(s->selectee);
+ evacuate(&s->selectee);
break;
}
{
StgAP_STACK *ap = (StgAP_STACK *)p;
- ap->fun = evacuate(ap->fun);
+ evacuate(&ap->fun);
scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
p = (StgPtr)ap->payload + ap->size;
break;
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = eager_promotion;
- eager_promotion = rtsFalse;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
q = p;
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
- eager_promotion = saved_eager;
+ gct->eager_promotion = saved_eager;
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
- failed_to_evac = rtsTrue;
+ gct->failed_to_evac = rtsTrue;
break;
}
next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (failed_to_evac) {
+ if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = eager_promotion;
-
- eager_promotion = rtsFalse;
- scavengeTSO(tso);
- eager_promotion = saved_eager;
-
- if (failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- failed_to_evac = rtsTrue; // always on the mutable list
+ scavengeTSO((StgTSO*)p);
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- evac_gen = 0;
- wq->closure = (StgClosure*)evacuate((StgClosure*)wq->closure);
- wq->next_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->next_queue_entry);
- wq->prev_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&wq->closure);
+ evacuate((StgClosure **)&wq->next_queue_entry);
+ evacuate((StgClosure **)&wq->prev_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- evac_gen = 0;
- tvar->current_value = evacuate((StgClosure*)tvar->current_value);
- tvar->first_watch_queue_entry = (StgTVarWatchQueue *)evacuate((StgClosure*)tvar->first_watch_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tvar->current_value);
+ evacuate((StgClosure **)&tvar->first_watch_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- evac_gen = 0;
- trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
- trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
- trec->invariants_to_check = (StgInvariantCheckQueue *)evacuate((StgClosure*)trec->invariants_to_check);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&trec->enclosing_trec);
+ evacuate((StgClosure **)&trec->current_chunk);
+ evacuate((StgClosure **)&trec->invariants_to_check);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- evac_gen = 0;
- tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
- e->expected_value = evacuate((StgClosure*)e->expected_value);
- e->new_value = evacuate((StgClosure*)e->new_value);
+ evacuate((StgClosure **)&e->tvar);
+ evacuate((StgClosure **)&e->expected_value);
+ evacuate((StgClosure **)&e->new_value);
}
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- evac_gen = 0;
- invariant->code = (StgClosure *)evacuate(invariant->code);
- invariant->last_execution = (StgTRecHeader *)evacuate((StgClosure*)invariant->last_execution);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate(&invariant->code);
+ evacuate((StgClosure **)&invariant->last_execution);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- evac_gen = 0;
- queue->invariant = (StgAtomicInvariant *)evacuate((StgClosure*)queue->invariant);
- queue->my_execution = (StgTRecHeader *)evacuate((StgClosure*)queue->my_execution);
- queue->next_queue_entry = (StgInvariantCheckQueue *)evacuate((StgClosure*)queue->next_queue_entry);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable
+ gct->evac_step = 0;
+ evacuate((StgClosure **)&queue->invariant);
+ evacuate((StgClosure **)&queue->my_execution);
+ evacuate((StgClosure **)&queue->next_queue_entry);
+ gct->evac_step = saved_evac_step;
+ gct->failed_to_evac = rtsTrue; // mutable
break;
}
* evacuated, so we perform that check here.
*/
StgClosure *q = ((StgInd *)p)->indirectee;
- if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
+ if (HEAP_ALLOCED_GC(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
break;
}
- ((StgInd *)p)->indirectee = evacuate(q);
+ evacuate(&((StgInd *)p)->indirectee);
}
#if 0 && defined(DEBUG)
barf("scavenge_one: strange object %d", (int)(info->type));
}
- no_luck = failed_to_evac;
- failed_to_evac = rtsFalse;
+ no_luck = gct->failed_to_evac;
+ gct->failed_to_evac = rtsFalse;
return (no_luck);
}
-------------------------------------------------------------------------- */
void
-scavenge_mutable_list(generation *gen)
+scavenge_mutable_list(bdescr *bd, generation *gen)
{
- bdescr *bd;
StgPtr p, q;
- bd = gen->saved_mut_list;
-
- evac_gen = gen->no;
+ gct->evac_step = &gen->steps[0];
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
p = (StgPtr)*q;
case MUT_ARR_PTRS_FROZEN:
case MUT_ARR_PTRS_FROZEN0:
mutlist_MUTARRS++; break;
+ case MVAR_CLEAN:
+ barf("MVAR_CLEAN on mutable list");
+ case MVAR_DIRTY:
+ mutlist_MVARS++; break;
default:
mutlist_OTHERS++; break;
}
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
- // list at all; others, such as MUT_ARR_PTRS_CLEAN and
- // TSO, are always on the mutable list.
+ // list at all; others, such as MUT_ARR_PTRS_CLEAN
+ // are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
continue;
case TSO: {
StgTSO *tso = (StgTSO *)p;
if ((tso->flags & TSO_DIRTY) == 0) {
- // A clean TSO: we don't have to traverse its
- // stack. However, we *do* follow the link field:
- // we don't want to have to mark a TSO dirty just
- // because we put it on a different queue.
- if (tso->why_blocked != BlockedOnBlackHole) {
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
- }
- recordMutableGen((StgClosure *)p,gen);
+ // Must be on the mutable list because its link
+ // field is dirty.
+ ASSERT(tso->flags & TSO_LINK_DIRTY);
+
+ scavenge_TSO_link(tso);
+ if (gct->failed_to_evac) {
+ recordMutableGen_GC((StgClosure *)p,gen->no);
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ tso->flags &= ~TSO_LINK_DIRTY;
+ }
continue;
}
}
if (scavenge_one(p)) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
}
}
}
+}
+
+void
+scavenge_capability_mut_lists (Capability *cap)
+{
+ nat g;
- // free the old mut_list
- freeChain(gen->saved_mut_list);
- gen->saved_mut_list = NULL;
+ /* Mutable lists from each generation > N
+ * we want to *scavenge* these roots, not evacuate them: they're not
+ * going to move in this GC.
+ * Also do them in reverse generation order, for the usual reason:
+ * namely to reduce the likelihood of spurious old->new pointers.
+ */
+ for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+ scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
+ freeChain_sync(cap->saved_mut_lists[g]);
+ cap->saved_mut_lists[g] = NULL;
+ }
}
/* -----------------------------------------------------------------------------
remove non-mutable objects from the mutable list at this point.
-------------------------------------------------------------------------- */
-void
+static void
scavenge_static(void)
{
- StgClosure* p = static_objects;
+ StgClosure* p;
const StgInfoTable *info;
+ debugTrace(DEBUG_gc, "scavenging static objects");
+
/* Always evacuate straight to the oldest generation for static
* objects */
- evac_gen = oldest_gen->no;
+ gct->evac_step = &oldest_gen->steps[0];
/* keep going until we've scavenged all the objects on the linked
list... */
- while (p != END_OF_STATIC_LIST) {
+ while (1) {
+
+ /* get the next static object from the list. Remember, there might
+ * be more stuff on this list after each evacuation...
+ * (static_objects is a global)
+ */
+ p = gct->static_objects;
+ if (p == END_OF_STATIC_LIST) {
+ break;
+ }
+
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl(p);
/*
- if (info->type==RBH)
- info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
+ if (info->type==RBH)
+ info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
*/
// make sure the info pointer is into text space
/* Take this object *off* the static_objects list,
* and put it on the scavenged_static_objects list.
*/
- static_objects = *STATIC_LINK(info,p);
- *STATIC_LINK(info,p) = scavenged_static_objects;
- scavenged_static_objects = p;
+ gct->static_objects = *STATIC_LINK(info,p);
+ *STATIC_LINK(info,p) = gct->scavenged_static_objects;
+ gct->scavenged_static_objects = p;
switch (info -> type) {
case IND_STATIC:
{
StgInd *ind = (StgInd *)p;
- ind->indirectee = evacuate(ind->indirectee);
+ evacuate(&ind->indirectee);
/* might fail to evacuate it, in which case we have to pop it
* back on the mutable list of the oldest generation. We
* leave it *on* the scavenged_static_objects list, though,
* in case we visit this object again.
*/
- if (failed_to_evac) {
- failed_to_evac = rtsFalse;
- recordMutableGen((StgClosure *)p,oldest_gen);
+ if (gct->failed_to_evac) {
+ gct->failed_to_evac = rtsFalse;
+ recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
}
break;
}
next = (P_)p->payload + info->layout.payload.ptrs;
// evacuate the pointers
for (q = (P_)p->payload; q < next; q++) {
- *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
+ evacuate((StgClosure **)q);
}
break;
}
barf("scavenge_static: strange closure %d", (int)(info->type));
}
- ASSERT(failed_to_evac == rtsFalse);
-
- /* get the next static object from the list. Remember, there might
- * be more stuff on this list now that we've done some evacuating!
- * (static_objects is a global)
- */
- p = static_objects;
+ ASSERT(gct->failed_to_evac == rtsFalse);
}
}
bitmap = large_bitmap->bitmap[b];
for (i = 0; i < size; ) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
i++;
p++;
{
while (size > 0) {
if ((bitmap & 1) == 0) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
}
p++;
bitmap = bitmap >> 1;
// the indirection into an IND_PERM, so that evacuate will
// copy the indirection into the old generation instead of
// discarding it.
- if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_PERM_info;
- }
- ((StgUpdateFrame *)p)->updatee
- = evacuate(((StgUpdateFrame *)p)->updatee);
- p += sizeofW(StgUpdateFrame);
- continue;
+ //
+ // Note [upd-black-hole]
+ // One slight hiccup is that the THUNK_SELECTOR machinery can
+ // overwrite the updatee with an IND. In parallel GC, this
+ // could even be happening concurrently, so we can't check for
+ // the IND. Fortunately if we assume that blackholing is
+ // happening (either lazy or eager), then we can be sure that
+ // the updatee is never a THUNK_SELECTOR and we're ok.
+ // NB. this is a new invariant: blackholing is not optional.
+ {
+ nat type;
+ const StgInfoTable *i;
+ StgClosure *updatee;
+
+ updatee = ((StgUpdateFrame *)p)->updatee;
+ i = updatee->header.info;
+ if (!IS_FORWARDING_PTR(i)) {
+ type = get_itbl(updatee)->type;
+ if (type == IND) {
+ updatee->header.info = &stg_IND_PERM_info;
+ } else if (type == IND_OLDGEN) {
+ updatee->header.info = &stg_IND_OLDGEN_PERM_info;
+ }
+ }
+ evacuate(&((StgUpdateFrame *)p)->updatee);
+ ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
+ p += sizeofW(StgUpdateFrame);
+ continue;
+ }
// small bitmap (< 32 entries, or 64 on a 64-bit machine)
case CATCH_STM_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL:
- case RET_VEC_SMALL:
bitmap = BITMAP_BITS(info->i.layout.bitmap);
size = BITMAP_SIZE(info->i.layout.bitmap);
// NOTE: the payload starts immediately after the info-ptr, we
nat size;
p++;
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
bco = (StgBCO *)*p;
p++;
size = BCO_BITMAP_SIZE(bco);
// large bitmap (> 32 entries, or > 64 on a 64-bit machine)
case RET_BIG:
- case RET_VEC_BIG:
{
nat size;
// follow the ptr words
for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
- *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
+ evacuate((StgClosure **)p);
p++;
}
continue;
StgRetFun *ret_fun = (StgRetFun *)p;
StgFunInfoTable *fun_info;
- ret_fun->fun = evacuate(ret_fun->fun);
- fun_info = get_fun_itbl(ret_fun->fun);
+ evacuate(&ret_fun->fun);
+ fun_info = get_fun_itbl(UNTAG_CLOSURE(ret_fun->fun));
p = scavenge_arg_block(fun_info, ret_fun->payload);
goto follow_srt;
}
/*-----------------------------------------------------------------------------
scavenge the large object list.
- evac_gen set by caller; similar games played with evac_gen as with
+ evac_step set by caller; similar games played with evac_step as with
scavenge() - see comment at the top of scavenge(). Most large
- objects are (repeatedly) mutable, so most of the time evac_gen will
+ objects are (repeatedly) mutable, so most of the time evac_step will
be zero.
--------------------------------------------------------------------------- */
-void
-scavenge_large(step *stp)
+static void
+scavenge_large (step_workspace *ws)
+{
+ bdescr *bd;
+ StgPtr p;
+
+ gct->evac_step = ws->step;
+
+ bd = ws->todo_large_objects;
+
+ for (; bd != NULL; bd = ws->todo_large_objects) {
+
+ // take this object *off* the large objects list and put it on
+ // the scavenged large objects list. This is so that we can
+ // treat new_large_objects as a stack and push new objects on
+ // the front when evacuating.
+ ws->todo_large_objects = bd->link;
+
+ ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
+ dbl_link_onto(bd, &ws->step->scavenged_large_objects);
+ ws->step->n_scavenged_large_blocks += bd->blocks;
+ RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
+
+ p = bd->start;
+ if (scavenge_one(p)) {
+ if (ws->step->gen_no > 0) {
+ recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
+ }
+ }
+
+ // stats
+ gct->scanned += closure_sizeW((StgClosure*)p);
+ }
+}
+
+/* ----------------------------------------------------------------------------
+ Look for work to do.
+
+ We look for the oldest step that has either a todo block that can
+ be scanned, or a block of work on the global queue that we can
+ scan.
+
+ It is important to take work from the *oldest* generation that we
+ has work available, because that minimizes the likelihood of
+ evacuating objects into a young generation when they should have
+ been eagerly promoted. This really does make a difference (the
+ cacheprof benchmark is one that is affected).
+
+ We also want to scan the todo block if possible before grabbing
+ work from the global queue, the reason being that we don't want to
+ steal work from the global queue and starve other threads if there
+ is other work we can usefully be doing.
+ ------------------------------------------------------------------------- */
+
+static rtsBool
+scavenge_find_work (void)
{
- bdescr *bd;
- StgPtr p;
+ int s;
+ step_workspace *ws;
+ rtsBool did_something, did_anything;
+ bdescr *bd;
- bd = stp->new_large_objects;
+ gct->scav_find_work++;
+
+ did_anything = rtsFalse;
+
+loop:
+ did_something = rtsFalse;
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[s];
+
+ gct->scan_bd = NULL;
+
+ // If we have a scan block with some work to do,
+ // scavenge everything up to the free pointer.
+ if (ws->todo_bd->u.scan < ws->todo_free)
+ {
+ scavenge_block(ws->todo_bd);
+ did_something = rtsTrue;
+ break;
+ }
- for (; bd != NULL; bd = stp->new_large_objects) {
+ // If we have any large objects to scavenge, do them now.
+ if (ws->todo_large_objects) {
+ scavenge_large(ws);
+ did_something = rtsTrue;
+ break;
+ }
- /* take this object *off* the large objects list and put it on
- * the scavenged large objects list. This is so that we can
- * treat new_large_objects as a stack and push new objects on
- * the front when evacuating.
- */
- stp->new_large_objects = bd->link;
- dbl_link_onto(bd, &stp->scavenged_large_objects);
+ if ((bd = grab_local_todo_block(ws)) != NULL) {
+ scavenge_block(bd);
+ did_something = rtsTrue;
+ break;
+ }
+ }
- // update the block count in this step.
- stp->n_scavenged_large_blocks += bd->blocks;
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
- p = bd->start;
- if (scavenge_one(p)) {
- if (stp->gen_no > 0) {
- recordMutableGen((StgClosure *)p, stp->gen);
- }
+#if defined(THREADED_RTS)
+ if (work_stealing) {
+ // look for work to steal
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ if ((bd = steal_todo_block(s)) != NULL) {
+ scavenge_block(bd);
+ did_something = rtsTrue;
+ break;
+ }
+ }
+
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
}
- }
+#endif
+
+ // only return when there is no more work to do
+
+ return did_anything;
+}
+
+/* ----------------------------------------------------------------------------
+ Scavenge until we can't find anything more to scavenge.
+ ------------------------------------------------------------------------- */
+
+void
+scavenge_loop(void)
+{
+ rtsBool work_to_do;
+
+loop:
+ work_to_do = rtsFalse;
+
+ // scavenge static objects
+ if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
+ IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
+ scavenge_static();
+ }
+
+ // scavenge objects in compacted generation
+ if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
+ (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ scavenge_mark_stack();
+ work_to_do = rtsTrue;
+ }
+
+ // Order is important here: we want to deal in full blocks as
+ // much as possible, so go for global work in preference to
+ // local work. Only if all the global work has been exhausted
+ // do we start scavenging the fragments of blocks in the local
+ // workspaces.
+ if (scavenge_find_work()) goto loop;
+
+ if (work_to_do) goto loop;
}