*
* ---------------------------------------------------------------------------*/
+#include "PosixSource.h"
#include "Rts.h"
-#include "RtsFlags.h"
+
#include "Storage.h"
-#include "MBlock.h"
#include "GC.h"
#include "GCThread.h"
#include "GCUtils.h"
#include "Compact.h"
+#include "MarkStack.h"
#include "Evac.h"
#include "Scav.h"
#include "Apply.h"
#include "Trace.h"
-#include "LdvProfile.h"
#include "Sanity.h"
+#include "Capability.h"
+#include "LdvProfile.h"
static void scavenge_stack (StgPtr p, StgPtr stack_end);
#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
# define evacuate(a) evacuate1(a)
-# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
# define scavenge_loop(a) scavenge_loop1(a)
-# define scavenge_mutable_list(g) scavenge_mutable_list1(g)
+# define scavenge_block(a) scavenge_block1(a)
+# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
+# define scavenge_capability_mut_lists(cap) scavenge_capability_mut_Lists1(cap)
#endif
/* -----------------------------------------------------------------------------
return;
}
+ debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
+
+ // update the pointer from the Task.
+ if (tso->bound != NULL) {
+ tso->bound->tso = tso;
+ }
+
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
+ tso->dirty = 1;
scavenge_TSO_link(tso);
} else {
- tso->flags &= ~TSO_DIRTY;
+ tso->dirty = 0;
scavenge_TSO_link(tso);
if (gct->failed_to_evac) {
tso->flags |= TSO_LINK_DIRTY;
}
/* -----------------------------------------------------------------------------
+ Mutable arrays of pointers
+ -------------------------------------------------------------------------- */
+
+static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
+{
+ lnat m;
+ rtsBool any_failed;
+ StgPtr p, q;
+
+ any_failed = rtsFalse;
+ p = (StgPtr)&a->payload[0];
+ for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
+ {
+ q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+
+ q = (StgPtr)&a->payload[a->ptrs];
+ if (p < q) {
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
+}
+
+// scavenge only the marked areas of a MUT_ARR_PTRS
+static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
+{
+ lnat m;
+ StgPtr p, q;
+ rtsBool any_failed;
+
+ any_failed = rtsFalse;
+ for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
+ {
+ if (*mutArrPtrsCard(a,m) != 0) {
+ p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
+ q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
+ (StgPtr)&a->payload[a->ptrs]);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
+}
+
+/* -----------------------------------------------------------------------------
Blocks of function args occur on the stack (at the top) and
in PAPs.
-------------------------------------------------------------------------- */
// If the SRT entry hasn't got bit 0 set, the SRT entry points to a
// closure that's fixed at link-time, and no extra magic is required.
if ( (unsigned long)(*srt) & 0x1 ) {
- evacuate(stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
+ evacuate( (StgClosure**) ((unsigned long) (*srt) & ~0x1));
} else {
evacuate(p);
}
/* -----------------------------------------------------------------------------
Scavenge a block from the given scan pointer up to bd->free.
- evac_step is set by the caller to be either zero (for a step in a
+ evac_gen is set by the caller to be either zero (for a step in a
generation < N) or G where G is the generation of the step being
scavenged.
- We sometimes temporarily change evac_step back to zero if we're
+ We sometimes temporarily change evac_gen back to zero if we're
scavenging a mutable object where eager promotion isn't such a good
idea.
-------------------------------------------------------------------------- */
{
StgPtr p, q;
StgInfoTable *info;
- step *saved_evac_step;
+ generation *saved_evac_gen;
rtsBool saved_eager_promotion;
- step_workspace *ws;
+ gen_workspace *ws;
- debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
- bd->start, bd->gen_no, bd->step->no, bd->u.scan);
+ debugTrace(DEBUG_gc, "scavenging block %p (gen %d) @ %p",
+ bd->start, bd->gen_no, bd->u.scan);
gct->scan_bd = bd;
- gct->evac_step = bd->step;
- saved_evac_step = gct->evac_step;
+ gct->evac_gen = bd->gen;
+ saved_evac_gen = gct->evac_gen;
saved_eager_promotion = gct->eager_promotion;
gct->failed_to_evac = rtsFalse;
- ws = &gct->steps[bd->step->abs_no];
+ ws = &gct->gens[bd->gen->no];
p = bd->u.scan;
// time around the loop.
while (p < bd->free || (bd == ws->todo_bd && p < ws->todo_free)) {
+ ASSERT(bd->link == NULL);
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
break;
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
p += BLACKHOLE_sizeW();
break;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
- // follow everything
{
- StgPtr next;
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
- // We don't eagerly promote objects pointed to by a mutable
- // array, but if we find the array only points to objects in
- // the same or an older generation, we mark it "clean" and
- // avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager_promotion;
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&wq->closure);
evacuate((StgClosure **)&wq->next_queue_entry);
evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVarWatchQueue);
break;
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTVar);
break;
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&trec->enclosing_trec);
evacuate((StgClosure **)&trec->current_chunk);
evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecHeader);
break;
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate(&invariant->code);
evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgAtomicInvariant);
break;
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&queue->invariant);
evacuate((StgClosure **)&queue->my_execution);
evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgInvariantCheckQueue);
break;
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
if (bd->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
+ recordMutableGen_GC((StgClosure *)q, bd->gen_no);
}
}
}
{
StgPtr p, q;
StgInfoTable *info;
- step *saved_evac_step;
+ generation *saved_evac_gen;
- gct->evac_step = &oldest_gen->steps[0];
- saved_evac_step = gct->evac_step;
+ gct->evac_gen = oldest_gen;
+ saved_evac_gen = gct->evac_gen;
-linear_scan:
- while (!mark_stack_empty()) {
- p = pop_mark_stack();
+ while ((p = pop_mark_stack())) {
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
q = p;
- switch (((volatile StgWord *)info)[1] & 0xffff) {
+ switch (info->type) {
case MVAR_CLEAN:
case MVAR_DIRTY:
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
case ARR_WORDS:
break;
case MUT_ARR_PTRS_DIRTY:
// follow everything
{
- StgPtr next;
rtsBool saved_eager;
// We don't eagerly promote objects pointed to by a mutable
// avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager;
gct->failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next, q = p;
+ StgPtr q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&wq->closure);
evacuate((StgClosure **)&wq->next_queue_entry);
evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&trec->enclosing_trec);
evacuate((StgClosure **)&trec->current_chunk);
evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate(&invariant->code);
evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&queue->invariant);
evacuate((StgClosure **)&queue->my_execution);
evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- if (gct->evac_step) {
- recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
- }
- }
-
- // mark the next bit to indicate "scavenged"
- mark(q+1, Bdescr(q));
-
- } // while (!mark_stack_empty())
-
- // start a new linear scan if the mark stack overflowed at some point
- if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
- debugTrace(DEBUG_gc, "scavenge_mark_stack: starting linear scan");
- mark_stack_overflowed = rtsFalse;
- oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
- oldgen_scan = oldgen_scan_bd->start;
- }
-
- if (oldgen_scan_bd) {
- // push a new thing on the mark stack
- loop:
- // find a closure that is marked but not scavenged, and start
- // from there.
- while (oldgen_scan < oldgen_scan_bd->free
- && !is_marked(oldgen_scan,oldgen_scan_bd)) {
- oldgen_scan++;
- }
-
- if (oldgen_scan < oldgen_scan_bd->free) {
-
- // already scavenged?
- if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto loop;
+ if (gct->evac_gen) {
+ recordMutableGen_GC((StgClosure *)q, gct->evac_gen->no);
}
- push_mark_stack(oldgen_scan);
- // ToDo: bump the linear scan by the actual size of the object
- oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
- goto linear_scan;
- }
-
- oldgen_scan_bd = oldgen_scan_bd->link;
- if (oldgen_scan_bd != NULL) {
- oldgen_scan = oldgen_scan_bd->start;
- goto loop;
}
- }
+ } // while (p = pop_mark_stack())
}
/* -----------------------------------------------------------------------------
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- step *saved_evac_step = gct->evac_step;
+ generation *saved_evac_gen = gct->evac_gen;
rtsBool no_luck;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
}
case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
case BLACKHOLE:
break;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
{
- StgPtr next, q;
rtsBool saved_eager;
// We don't eagerly promote objects pointed to by a mutable
// avoid traversing it during minor GCs.
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
+
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager;
gct->failed_to_evac = rtsTrue;
break;
}
case MUT_ARR_PTRS_FROZEN0:
{
// follow everything
- StgPtr next, q=p;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
-
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&wq->closure);
evacuate((StgClosure **)&wq->next_queue_entry);
evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&trec->enclosing_trec);
evacuate((StgClosure **)&trec->current_chunk);
evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate(&invariant->code);
evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_step = 0;
+ gct->evac_gen = 0;
evacuate((StgClosure **)&queue->invariant);
evacuate((StgClosure **)&queue->my_execution);
evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_step = saved_evac_step;
+ gct->evac_gen = saved_evac_gen;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
+ case IND:
+ // IND can happen, for example, when the interpreter allocates
+ // a gigantic AP closure (more than one block), which ends up
+ // on the large-object list and then gets updated. See #3424.
case IND_OLDGEN:
case IND_OLDGEN_PERM:
case IND_STATIC:
- {
- /* Careful here: a THUNK can be on the mutable list because
- * it contains pointers to young gen objects. If such a thunk
- * is updated, the IND_OLDGEN will be added to the mutable
- * list again, and we'll scavenge it twice. evacuate()
- * doesn't check whether the object has already been
- * evacuated, so we perform that check here.
- */
- StgClosure *q = ((StgInd *)p)->indirectee;
- if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
- break;
- }
evacuate(&((StgInd *)p)->indirectee);
- }
#if 0 && defined(DEBUG)
if (RtsFlags.DebugFlags.gc)
* promoted
*/
{
- StgPtr start = gen->steps[0].scan;
- bdescr *start_bd = gen->steps[0].scan_bd;
+ StgPtr start = gen->scan;
+ bdescr *start_bd = gen->scan_bd;
nat size = 0;
- scavenge(&gen->steps[0]);
- if (start_bd != gen->steps[0].scan_bd) {
+ scavenge(&gen);
+ if (start_bd != gen->scan_bd) {
size += (P_)BLOCK_ROUND_UP(start) - start;
start_bd = start_bd->link;
- while (start_bd != gen->steps[0].scan_bd) {
+ while (start_bd != gen->scan_bd) {
size += BLOCK_SIZE_W;
start_bd = start_bd->link;
}
- size += gen->steps[0].scan -
- (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
+ size += gen->scan -
+ (P_)BLOCK_ROUND_DOWN(gen->scan);
} else {
- size = gen->steps[0].scan - start;
+ size = gen->scan - start;
}
debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
}
-------------------------------------------------------------------------- */
void
-scavenge_mutable_list(generation *gen)
+scavenge_mutable_list(bdescr *bd, generation *gen)
{
- bdescr *bd;
StgPtr p, q;
- bd = gen->saved_mut_list;
-
- gct->evac_step = &gen->steps[0];
+ gct->evac_gen = gen;
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
p = (StgPtr)*q;
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
- // list at all; others, such as MUT_ARR_PTRS_CLEAN and
- // TSO, are always on the mutable list.
+ // list at all; others, such as TSO
+ // are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
+ continue;
+ case MUT_ARR_PTRS_DIRTY:
+ {
+ rtsBool saved_eager;
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager;
+ gct->failed_to_evac = rtsFalse;
+ recordMutableGen_GC((StgClosure *)p,gen->no);
continue;
+ }
case TSO: {
StgTSO *tso = (StgTSO *)p;
- if ((tso->flags & TSO_DIRTY) == 0) {
+ if (tso->dirty == 0) {
// Must be on the mutable list because its link
// field is dirty.
ASSERT(tso->flags & TSO_LINK_DIRTY);
scavenge_TSO_link(tso);
if (gct->failed_to_evac) {
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
gct->failed_to_evac = rtsFalse;
} else {
tso->flags &= ~TSO_LINK_DIRTY;
if (scavenge_one(p)) {
// didn't manage to promote everything, so put the
// object back on the list.
- recordMutableGen_GC((StgClosure *)p,gen);
+ recordMutableGen_GC((StgClosure *)p,gen->no);
}
}
}
+}
- // free the old mut_list
- freeChain_sync(gen->saved_mut_list);
- gen->saved_mut_list = NULL;
+void
+scavenge_capability_mut_lists (Capability *cap)
+{
+ nat g;
+
+ /* Mutable lists from each generation > N
+ * we want to *scavenge* these roots, not evacuate them: they're not
+ * going to move in this GC.
+ * Also do them in reverse generation order, for the usual reason:
+ * namely to reduce the likelihood of spurious old->new pointers.
+ */
+ for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
+ scavenge_mutable_list(cap->saved_mut_lists[g], &generations[g]);
+ freeChain_sync(cap->saved_mut_lists[g]);
+ cap->saved_mut_lists[g] = NULL;
+ }
}
/* -----------------------------------------------------------------------------
/* Always evacuate straight to the oldest generation for static
* objects */
- gct->evac_step = &oldest_gen->steps[0];
+ gct->evac_gen = oldest_gen;
/* keep going until we've scavenged all the objects on the linked
list... */
*/
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- recordMutableGen_GC((StgClosure *)p,oldest_gen);
+ recordMutableGen_GC((StgClosure *)p,oldest_gen->no);
}
break;
}
// the indirection into an IND_PERM, so that evacuate will
// copy the indirection into the old generation instead of
// discarding it.
+ //
+ // Note [upd-black-hole]
+ // One slight hiccup is that the THUNK_SELECTOR machinery can
+ // overwrite the updatee with an IND. In parallel GC, this
+ // could even be happening concurrently, so we can't check for
+ // the IND. Fortunately if we assume that blackholing is
+ // happening (either lazy or eager), then we can be sure that
+ // the updatee is never a THUNK_SELECTOR and we're ok.
+ // NB. this is a new invariant: blackholing is not optional.
{
nat type;
const StgInfoTable *i;
+ StgClosure *updatee;
- i = ((StgUpdateFrame *)p)->updatee->header.info;
+ updatee = ((StgUpdateFrame *)p)->updatee;
+ i = updatee->header.info;
if (!IS_FORWARDING_PTR(i)) {
- type = get_itbl(((StgUpdateFrame *)p)->updatee)->type;
+ type = get_itbl(updatee)->type;
if (type == IND) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_PERM_info;
+ updatee->header.info = &stg_IND_PERM_info;
} else if (type == IND_OLDGEN) {
- ((StgUpdateFrame *)p)->updatee->header.info =
- (StgInfoTable *)&stg_IND_OLDGEN_PERM_info;
+ updatee->header.info = &stg_IND_OLDGEN_PERM_info;
}
- evacuate(&((StgUpdateFrame *)p)->updatee);
- p += sizeofW(StgUpdateFrame);
- continue;
}
+ evacuate(&((StgUpdateFrame *)p)->updatee);
+ ASSERT(GET_CLOSURE_TAG(((StgUpdateFrame *)p)->updatee) == 0);
+ p += sizeofW(StgUpdateFrame);
+ continue;
}
// small bitmap (< 32 entries, or 64 on a 64-bit machine)
/*-----------------------------------------------------------------------------
scavenge the large object list.
- evac_step set by caller; similar games played with evac_step as with
+ evac_gen set by caller; similar games played with evac_gen as with
scavenge() - see comment at the top of scavenge(). Most large
- objects are (repeatedly) mutable, so most of the time evac_step will
+ objects are (repeatedly) mutable, so most of the time evac_gen will
be zero.
--------------------------------------------------------------------------- */
static void
-scavenge_large (step_workspace *ws)
+scavenge_large (gen_workspace *ws)
{
bdescr *bd;
StgPtr p;
- gct->evac_step = ws->step;
+ gct->evac_gen = ws->gen;
bd = ws->todo_large_objects;
// the front when evacuating.
ws->todo_large_objects = bd->link;
- ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
- dbl_link_onto(bd, &ws->step->scavenged_large_objects);
- ws->step->n_scavenged_large_blocks += bd->blocks;
- RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
+ ACQUIRE_SPIN_LOCK(&ws->gen->sync_large_objects);
+ dbl_link_onto(bd, &ws->gen->scavenged_large_objects);
+ ws->gen->n_scavenged_large_blocks += bd->blocks;
+ RELEASE_SPIN_LOCK(&ws->gen->sync_large_objects);
p = bd->start;
if (scavenge_one(p)) {
- if (ws->step->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)p, ws->step->gen);
+ if (ws->gen->no > 0) {
+ recordMutableGen_GC((StgClosure *)p, ws->gen->no);
}
}
/* ----------------------------------------------------------------------------
Look for work to do.
- We look for the oldest step that has either a todo block that can
+ We look for the oldest gen that has either a todo block that can
be scanned, or a block of work on the global queue that we can
scan.
static rtsBool
scavenge_find_work (void)
{
- int s;
- step_workspace *ws;
+ int g;
+ gen_workspace *ws;
rtsBool did_something, did_anything;
bdescr *bd;
loop:
did_something = rtsFalse;
- for (s = total_steps-1; s >= 0; s--) {
- if (s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ws = &gct->steps[s];
+ for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
+ ws = &gct->gens[g];
gct->scan_bd = NULL;
break;
}
- if ((bd = grab_todo_block(ws)) != NULL) {
+ if ((bd = grab_local_todo_block(ws)) != NULL) {
scavenge_block(bd);
did_something = rtsTrue;
break;
did_anything = rtsTrue;
goto loop;
}
+
+#if defined(THREADED_RTS)
+ if (work_stealing) {
+ // look for work to steal
+ for (g = RtsFlags.GcFlags.generations-1; g >= 0; g--) {
+ if ((bd = steal_todo_block(g)) != NULL) {
+ scavenge_block(bd);
+ did_something = rtsTrue;
+ break;
+ }
+ }
+
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
+ }
+#endif
+
// only return when there is no more work to do
return did_anything;
}
// scavenge objects in compacted generation
- if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
- (mark_stack_bdescr != NULL && !mark_stack_empty())) {
+ if (mark_stack_bd != NULL && !mark_stack_empty()) {
scavenge_mark_stack();
work_to_do = rtsTrue;
}