*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
-#include "RtsFlags.h"
+
#include "Storage.h"
-#include "MBlock.h"
#include "GC.h"
#include "GCThread.h"
#include "GCUtils.h"
#include "Compact.h"
+#include "MarkStack.h"
#include "Evac.h"
#include "Scav.h"
#include "Apply.h"
#include "Trace.h"
-#include "LdvProfile.h"
#include "Sanity.h"
#include "Capability.h"
+#include "LdvProfile.h"
static void scavenge_stack (StgPtr p, StgPtr stack_end);
#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
# define evacuate(a) evacuate1(a)
-# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
# define scavenge_loop(a) scavenge_loop1(a)
+# define scavenge_block(a) scavenge_block1(a)
# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
#endif
Scavenge a TSO.
-------------------------------------------------------------------------- */
-STATIC_INLINE void
-scavenge_TSO_link (StgTSO *tso)
-{
- // We don't always chase the link field: TSOs on the blackhole
- // queue are not automatically alive, so the link field is a
- // "weak" pointer in that case.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->_link);
- }
-}
-
static void
scavengeTSO (StgTSO *tso)
{
rtsBool saved_eager;
- if (tso->what_next == ThreadRelocated) {
- // the only way this can happen is if the old TSO was on the
- // mutable list. We might have other links to this defunct
- // TSO, so we must update its link field.
- evacuate((StgClosure**)&tso->_link);
- return;
- }
-
debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
+ // update the pointer from the Task.
+ if (tso->bound != NULL) {
+ tso->bound->tso = tso;
+ }
+
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
+ evacuate((StgClosure **)&tso->blocked_exceptions);
+ evacuate((StgClosure **)&tso->bq);
+
+ // scavange current transaction record
+ evacuate((StgClosure **)&tso->trec);
+
+ evacuate((StgClosure **)&tso->stackobj);
+
+ evacuate((StgClosure **)&tso->_link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
- || tso->why_blocked == BlockedOnException
+ || tso->why_blocked == BlockedOnMsgThrowTo
+ || tso->why_blocked == NotBlocked
) {
evacuate(&tso->block_info.closure);
}
- evacuate((StgClosure **)&tso->blocked_exceptions);
-
- // scavange current transaction record
- evacuate((StgClosure **)&tso->trec);
-
- // scavenge this thread's stack
- scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
+#ifdef THREADED_RTS
+ // in the THREADED_RTS, block_info.closure must always point to a
+ // valid closure, because we assume this in throwTo(). In the
+ // non-threaded RTS it might be a FD (for
+ // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
+ else {
+ tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
+ }
+#endif
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- scavenge_TSO_link(tso);
- } else {
- tso->flags &= ~TSO_DIRTY;
- scavenge_TSO_link(tso);
+ tso->dirty = gct->failed_to_evac;
+
+ gct->eager_promotion = saved_eager;
+}
+
+/* -----------------------------------------------------------------------------
+ Mutable arrays of pointers
+ -------------------------------------------------------------------------- */
+
+static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
+{
+ lnat m;
+ rtsBool any_failed;
+ StgPtr p, q;
+
+ any_failed = rtsFalse;
+ p = (StgPtr)&a->payload[0];
+ for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
+ {
+ q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
if (gct->failed_to_evac) {
- tso->flags |= TSO_LINK_DIRTY;
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
} else {
- tso->flags &= ~TSO_LINK_DIRTY;
+ *mutArrPtrsCard(a,m) = 0;
}
}
- gct->eager_promotion = saved_eager;
+ q = (StgPtr)&a->payload[a->ptrs];
+ if (p < q) {
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
+}
+
+// scavenge only the marked areas of a MUT_ARR_PTRS
+static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
+{
+ lnat m;
+ StgPtr p, q;
+ rtsBool any_failed;
+
+ any_failed = rtsFalse;
+ for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
+ {
+ if (*mutArrPtrsCard(a,m) != 0) {
+ p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
+ q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
+ (StgPtr)&a->payload[a->ptrs]);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
}
/* -----------------------------------------------------------------------------
while (bitmap != 0) {
if ((bitmap & 1) != 0) {
-#if defined(__PIC__) && defined(mingw32_TARGET_OS)
+#if defined(__PIC__) && defined(mingw32_HOST_OS)
// Special-case to handle references to closures hiding out in DLLs, since
// double indirections required to get at those. The code generator knows
// which is which when generating the SRT, so it stores the (indirect)
// If the SRT entry hasn't got bit 0 set, the SRT entry points to a
// closure that's fixed at link-time, and no extra magic is required.
if ( (unsigned long)(*srt) & 0x1 ) {
- evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
} else {
evacuate(p);
}
/* -----------------------------------------------------------------------------
Scavenge a block from the given scan pointer up to bd->free.
- evac_step is set by the caller to be either zero (for a step in a
+ evac_gen_no is set by the caller to be either zero (for a step in a
generation < N) or G where G is the generation of the step being
scavenged.
- We sometimes temporarily change evac_step back to zero if we're
+ We sometimes temporarily change evac_gen_no back to zero if we're
scavenging a mutable object where eager promotion isn't such a good
idea.
-------------------------------------------------------------------------- */
{
StgPtr p, q;
StgInfoTable *info;
- step *saved_evac_step;
rtsBool saved_eager_promotion;
- step_workspace *ws;
+ gen_workspace *ws;
- debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
- bd->start, bd->gen_no, bd->step->no, bd->u.scan);
+ debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
+ bd->start, bd->gen_no, bd->u.scan);
gct->scan_bd = bd;
- gct->evac_step = bd->step;
- saved_evac_step = gct->evac_step;
+ gct->evac_gen_no = bd->gen_no;
saved_eager_promotion = gct->eager_promotion;
gct->failed_to_evac = rtsFalse;
- ws = &gct->steps[bd->step->abs_no];
+ ws = &gct->gens[bd->gen->no];
p = bd->u.scan;
// time around the loop.
while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
+ ASSERT(bd->link == NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
gen_obj:
case CONSTR:
case WEAK:
- case STABLE_NAME:
+ case PRIM:
{
StgPtr end;
}
case IND_PERM:
- if (bd->gen_no != 0) {
-#ifdef PROFILING
- // @LDV profiling
- // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
- // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
- LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
-#endif
- //
- // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
- //
- SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
-
- // We pretend that p has just been created.
- LDV_RECORD_CREATE((StgClosure *)p);
- }
- // fall through
- case IND_OLDGEN_PERM:
+ case BLACKHOLE:
evacuate(&((StgInd *)p)->indirectee);
p += sizeofW(StgInd);
break;
p += sizeofW(StgMutVar);
break;
- case CAF_BLACKHOLE:
- case BLACKHOLE:
- p += BLACKHOLE_sizeW();
- break;
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ p += sizeofW(StgBlockingQueue);
+ break;
+ }
case THUNK_SELECTOR:
{
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
- // follow everything
{
- StgPtr next;
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
- // We don't eagerly promote objects pointed to by a mutable
- // array, but if we find the array only points to objects in
- // the same or an older generation, we mark it "clean" and
- // avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager_promotion;
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- scavengeTSO(tso);
- p += tso_sizeW(tso);
+ scavengeTSO((StgTSO *)p);
+ p += sizeofW(StgTSO);
break;
}
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVarWatchQueue);
- break;
- }
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVar);
- break;
- }
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+ p += stack_sizeW(stack);
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
- case TREC_HEADER:
+ case MUT_PRIM:
{
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
+ StgPtr end;
+
+ gct->eager_promotion = rtsFalse;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+ p += info->layout.payload.nptrs;
+
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTRecHeader);
- break;
+ break;
}
case TREC_CHUNK:
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgAtomicInvariant);
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgInvariantCheckQueue);
- break;
- }
-
default:
barf("scavenge: unimplemented/strange closure type %d @ %p",
info->type, p);
{
StgPtr p, q;
StgInfoTable *info;
- step *saved_evac_step;
+ rtsBool saved_eager_promotion;
- gct->evac_step = &oldest_gen->steps[0];
- saved_evac_step = gct->evac_step;
+ gct->evac_gen_no = oldest_gen->no;
+ saved_eager_promotion = gct->eager_promotion;
-linear_scan:
- while (!mark_stack_empty()) {
- p = pop_mark_stack();
+ while ((p = pop_mark_stack())) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&mvar->head);
gen_obj:
case CONSTR:
case WEAK:
- case STABLE_NAME:
+ case PRIM:
{
StgPtr end;
// no "old" generation.
break;
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
+ case IND:
+ case BLACKHOLE:
evacuate(&((StgInd *)p)->indirectee);
break;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
gct->eager_promotion = rtsFalse;
evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
break;
}
- case CAF_BLACKHOLE:
- case BLACKHOLE:
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ break;
+ }
+
case ARR_WORDS:
break;
case MUT_ARR_PTRS_DIRTY:
// follow everything
{
- StgPtr next;
- rtsBool saved_eager;
-
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next, q = p;
+ StgPtr q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
break;
}
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
+
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
+
+ case MUT_PRIM:
+ {
+ StgPtr end;
+
+ gct->eager_promotion = rtsFalse;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = rtsTrue; // mutable
+ break;
+ }
+
case TREC_CHUNK:
{
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
default:
barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
info->type, p);
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- if (gct->evac_step) {
- recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen_no);
+ if (gct->evac_gen_no) {
+ recordMutableGen_GC((StgClosure *)q, gct->evac_gen_no);
}
}
-
- // mark the next bit to indicate "scavenged"
- mark(q+1, Bdescr(q));
-
- } // while (!mark_stack_empty())
-
- // start a new linear scan if the mark stack overflowed at some point
- if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
- debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
- mark_stack_overflowed = rtsFalse;
- oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
- oldgen_scan = oldgen_scan_bd->start;
- }
-
- if (oldgen_scan_bd) {
- // push a new thing on the mark stack
- loop:
- // find a closure that is marked but not scavenged, and start
- // from there.
- while (oldgen_scan < oldgen_scan_bd->free
- && !is_marked(oldgen_scan,oldgen_scan_bd)) {
- oldgen_scan++;
- }
-
- if (oldgen_scan < oldgen_scan_bd->free) {
-
- // already scavenged?
- if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto loop;
- }
- push_mark_stack(oldgen_scan);
- // ToDo: bump the linear scan by the actual size of the object
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto linear_scan;
- }
-
- oldgen_scan_bd = oldgen_scan_bd->link;
- if (oldgen_scan_bd != NULL) {
- oldgen_scan = oldgen_scan_bd->start;
- goto loop;
- }
- }
+ } // while (p = pop_mark_stack())
}
/* -----------------------------------------------------------------------------
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- step *saved_evac_step = gct->evac_step;
rtsBool no_luck;
+ rtsBool saved_eager_promotion;
+ saved_eager_promotion = gct->eager_promotion;
+
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&mvar->head);
case CONSTR_0_2:
case CONSTR_2_0:
case WEAK:
+ case PRIM:
case IND_PERM:
{
StgPtr q, end;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
StgPtr q = p;
- rtsBool saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
evacuate(&((StgMutVar *)p)->var);
break;
}
- case CAF_BLACKHOLE:
- case BLACKHOLE:
- break;
-
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ break;
+ }
+
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
{
- StgPtr next, q;
- rtsBool saved_eager;
-
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
+
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue;
break;
}
case MUT_ARR_PTRS_FROZEN0:
{
// follow everything
- StgPtr next, q=p;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
-
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
break;
}
break;
}
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
+
+ case MUT_PRIM:
+ {
+ StgPtr end;
+
+ gct->eager_promotion = rtsFalse;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
- }
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
+ }
case TREC_CHUNK:
{
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
+ case IND:
+ // IND can happen, for example, when the interpreter allocates
+ // a gigantic AP closure (more than one block), which ends up
+ // on the large-object list and then gets updated. See #3424.
+ case BLACKHOLE:
case IND_STATIC:
- {
- /* Careful here: a THUNK can be on the mutable list because
- * it contains pointers to young gen objects. If such a thunk
- * is updated, the IND_OLDGEN will be added to the mutable
- * list again, and we'll scavenge it twice. evacuate()
- * doesn't check whether the object has already been
- * evacuated, so we perform that check here.
- */
- StgClosure *q = ((StgInd *)p)->indirectee;
- if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
- break;
- }
evacuate(&((StgInd *)p)->indirectee);
- }
#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
* promoted
*/
{
- StgPtr start = gen->steps[0].scan;
- bdescr *start_bd = gen->steps[0].scan_bd;
+ StgPtr start = gen->scan;
+ bdescr *start_bd = gen->scan_bd;
nat size = 0;
- scavenge(&gen->steps[0]);
- if (start_bd != gen->steps[0].scan_bd) {
+ scavenge(&gen);
+ if (start_bd != gen->scan_bd) {
size += (P_)BLOCK_ROUND_UP(start) - start;
start_bd = start_bd->link;
- while (start_bd != gen->steps[0].scan_bd) {
+ while (start_bd != gen->scan_bd) {
size += BLOCK_SIZE_W;
start_bd = start_bd->link;
}
- size += gen->steps[0].scan -
- (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
+ size += gen->scan -
+ (P_)BLOCK_ROUND_DOWN(gen->scan);
} else {
- size = gen->steps[0].scan - start;
+ size = gen->scan - start;
}
debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
scavenge_mutable_list(bdescr *bd, generation *gen)
{
StgPtr p, q;
+ nat gen_no;
- gct->evac_step = &gen->steps[0];
+ gen_no = gen->no;
+ gct->evac_gen_no = gen_no;
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
p = (StgPtr)*q;
#ifdef DEBUG
switch (get_itbl((StgClosure *)p)->type) {
case MUT_VAR_CLEAN:
- barf("MUT_VAR_CLEAN on mutable list");
+ // can happen due to concurrent writeMutVars
case MUT_VAR_DIRTY:
mutlist_MUTVARS++; break;
case MUT_ARR_PTRS_CLEAN:
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
- // list at all; others, such as MUT_ARR_PTRS_CLEAN
+ // list at all; others, such as TSO
// are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen_GC((StgClosure *)p,gen->no);
+ recordMutableGen_GC((StgClosure *)p,gen_no);
continue;
- case TSO: {
- StgTSO *tso = (StgTSO *)p;
- if ((tso->flags & TSO_DIRTY) == 0) {
- // Must be on the mutable list because its link
- // field is dirty.
- ASSERT(tso->flags & TSO_LINK_DIRTY);
-
- scavenge_TSO_link(tso);
- if (gct->failed_to_evac) {
- recordMutableGen_GC((StgClosure *)p,gen->no);
- gct->failed_to_evac = rtsFalse;
- } else {
- tso->flags &= ~TSO_LINK_DIRTY;
- }
- continue;
- }
- }
- default:
+ case MUT_ARR_PTRS_DIRTY:
+ {
+ rtsBool saved_eager_promotion;
+ saved_eager_promotion = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = rtsFalse;
+ recordMutableGen_GC((StgClosure *)p,gen_no);
+ continue;
+ }
+ default:
;
}
if (scavenge_one(p)) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen_GC((StgClosure *)p,gen->no);
+ recordMutableGen_GC((StgClosure *)p,gen_no);
}
}
}
/* Always evacuate straight to the oldest generation for static
* objects */
- gct->evac_step = &oldest_gen->steps[0];
+ gct->evac_gen_no = oldest_gen->no;
/* keep going until we've scavenged all the objects on the linked
list... */
static void
scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
{
- nat i, b;
+ nat i, j, b;
StgWord bitmap;
b = 0;
- bitmap = large_bitmap->bitmap[b];
- for (i = 0; i < size; ) {
- if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
- }
- i++;
- p++;
- if (i % BITS_IN(W_) == 0) {
- b++;
- bitmap = large_bitmap->bitmap[b];
- } else {
+
+ for (i = 0; i < size; b++) {
+ bitmap = large_bitmap->bitmap[b];
+ j = stg_min(size-i, BITS_IN(W_));
+ i += j;
+ for (; j > 0; j--, p++) {
+ if ((bitmap & 1) == 0) {
+ evacuate((StgClosure **)p);
+ }
bitmap = bitmap >> 1;
- }
+ }
}
}
// before GC, but that seems like overkill.
//
// Scavenging this update frame as normal would be disastrous;
- // the updatee would end up pointing to the value. So we turn
- // the indirection into an IND_PERM, so that evacuate will
- // copy the indirection into the old generation instead of
- // discarding it.
+ // the updatee would end up pointing to the value. So we
+ // check whether the value after evacuation is a BLACKHOLE,
+ // and if not, we change the update frame to an stg_enter
+ // frame that simply returns the value. Hence, blackholing is
+ // compulsory (otherwise we would have to check for thunks
+ // too).
//
// Note [upd-black-hole]
// One slight hiccup is that the THUNK_SELECTOR machinery can
// the updatee is never a THUNK_SELECTOR and we're ok.
// NB. this is a new invariant: blackholing is not optional.
{
- nat type;
- const StgInfoTable *i;
- StgClosure *updatee;
-
- updatee = ((StgUpdateFrame *)p)->updatee;
- i = updatee->header.info;
- if (!IS_FORWARDING_PTR(i)) {
- type = get_itbl(updatee)->type;
- if (type == IND) {
- updatee->header.info = &stg_IND_PERM_info;
- } else if (type == IND_OLDGEN) {
- updatee->header.info = &stg_IND_OLDGEN_PERM_info;
- }
+ StgUpdateFrame *frame = (StgUpdateFrame *)p;
+ StgClosure *v;
+
+ evacuate(&frame->updatee);
+ v = frame->updatee;
+ if (GET_CLOSURE_TAG(v) != 0 ||
+ (get_itbl(v)->type != BLACKHOLE)) {
+ // blackholing is compulsory, see above.
+ frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
}
- evacuate(&((StgUpdateFrame *)p)->updatee);
- ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
+ ASSERT(v->header.info != &stg_TSO_info);
p += sizeofW(StgUpdateFrame);
continue;
}
case CATCH_STM_FRAME:
case CATCH_RETRY_FRAME:
case ATOMICALLY_FRAME:
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL:
/*-----------------------------------------------------------------------------
scavenge the large object list.
- evac_step set by caller; similar games played with evac_step as with
+ evac_gen set by caller; similar games played with evac_gen as with
scavenge() - see comment at the top of scavenge(). Most large
- objects are (repeatedly) mutable, so most of the time evac_step will
+ objects are (repeatedly) mutable, so most of the time evac_gen will
be zero.
--------------------------------------------------------------------------- */
static void
-scavenge_large (step_workspace *ws)
+scavenge_large (gen_workspace *ws)
{
bdescr *bd;
StgPtr p;
- gct->evac_step = ws->step;
+ gct->evac_gen_no = ws->gen->no;
bd = ws->todo_large_objects;
// the front when evacuating.
ws->todo_large_objects = bd->link;
- ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
- dbl_link_onto(bd, &ws->step->scavenged_large_objects);
- ws->step->n_scavenged_large_blocks += bd->blocks;
- RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
+ ACQUIRE_SPIN_LOCK(&ws->gen->sync);
+ dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
+ ws->gen->n_scavenged_large_blocks += bd->blocks;
+ RELEASE_SPIN_LOCK(&ws->gen->sync);
p = bd->start;
if (scavenge_one(p)) {
- if (ws->step->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)p, ws->step->gen_no);
+ if (ws->gen->no > 0) {
+ recordMutableGen_GC((StgClosure *)p, ws->gen->no);
}
}
/* ----------------------------------------------------------------------------
Look for work to do.
- We look for the oldest step that has either a todo block that can
+ We look for the oldest gen that has either a todo block that can
be scanned, or a block of work on the global queue that we can
scan.
static rtsBool
scavenge_find_work (void)
{
- int s;
- step_workspace *ws;
+ int g;
+ gen_workspace *ws;
rtsBool did_something, did_anything;
bdescr *bd;
loop:
did_something = rtsFalse;
- for (s = total_steps-1; s >= 0; s--) {
- if (s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ws = &gct->steps[s];
+ for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
+ ws = &gct->gens[g];
gct->scan_bd = NULL;
break;
}
- if ((bd = grab_todo_block(ws)) != NULL) {
+ if ((bd = grab_local_todo_block(ws)) != NULL) {
scavenge_block(bd);
did_something = rtsTrue;
break;
did_anything = rtsTrue;
goto loop;
}
+
+#if defined(THREADED_RTS)
+ if (work_stealing) {
+ // look for work to steal
+ for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
+ if ((bd = steal_todo_block(g)) != NULL) {
+ scavenge_block(bd);
+ did_something = rtsTrue;
+ break;
+ }
+ }
+
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
+ }
+#endif
+
// only return when there is no more work to do
return did_anything;
}
// scavenge objects in compacted generation
- if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
- (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ if (mark_stack_bd != NULL && !mark_stack_empty()) {
scavenge_mark_stack();
work_to_do = rtsTrue;
}