StgLargeBitmap *large_bitmap,
nat size );
-static void scavenge_block (bdescr *bd, StgPtr scan);
-
/* Similar to scavenge_large_bitmap(), but we don't write back the
* pointers we get back from evacuate().
static void
scavengeTSO (StgTSO *tso)
{
+ rtsBool saved_eager;
+
+ if (tso->what_next == ThreadRelocated) {
+ // the only way this can happen is if the old TSO was on the
+ // mutable list. We might have other links to this defunct
+ // TSO, so we must update its link field.
+ evacuate((StgClosure**)&tso->_link);
+ return;
+ }
+
+ saved_eager = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
// scavenge this thread's stack
scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
+
+ if (gct->failed_to_evac) {
+ tso->flags |= TSO_DIRTY;
+ } else {
+ tso->flags &= ~TSO_DIRTY;
+ }
+
+ gct->eager_promotion = saved_eager;
}
/* -----------------------------------------------------------------------------
}
/* -----------------------------------------------------------------------------
- Scavenge a block from the given scan pointer up to bd->free.
-
- evac_gen is set by the caller to be either zero (for a step in a
- generation < N) or G where G is the generation of the step being
- scavenged.
-
- We sometimes temporarily change evac_gen back to zero if we're
- scavenging a mutable object where eager promotion isn't such a good
- idea.
- -------------------------------------------------------------------------- */
-
-static void
-scavenge_block (bdescr *bd, StgPtr scan)
-{
- StgPtr p, q;
- StgInfoTable *info;
- nat saved_evac_gen;
-
- p = scan;
-
- debugTrace(DEBUG_gc, "scavenging block %p (gen %d, step %d) @ %p",
- bd->start, bd->gen_no, bd->step->no, scan);
-
- gct->evac_gen = bd->gen_no;
- saved_evac_gen = gct->evac_gen;
- gct->failed_to_evac = rtsFalse;
-
- // we might be evacuating into the very object that we're
- // scavenging, so we have to check the real bd->free pointer each
- // time around the loop.
- while (p < bd->free) {
-
- ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
- info = get_itbl((StgClosure *)p);
-
- ASSERT(gct->thunk_selector_depth == 0);
-
- q = p;
- switch (info->type) {
-
- case MVAR_CLEAN:
- case MVAR_DIRTY:
- {
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
- StgMVar *mvar = ((StgMVar *)p);
- gct->eager_promotion = rtsFalse;
- evacuate((StgClosure **)&mvar->head);
- evacuate((StgClosure **)&mvar->tail);
- evacuate((StgClosure **)&mvar->value);
- gct->eager_promotion = saved_eager_promotion;
-
- if (gct->failed_to_evac) {
- mvar->header.info = &stg_MVAR_DIRTY_info;
- } else {
- mvar->header.info = &stg_MVAR_CLEAN_info;
- }
- p += sizeofW(StgMVar);
- break;
- }
-
- case FUN_2_0:
- scavenge_fun_srt(info);
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_2_0:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[1]);
- evacuate(&((StgThunk *)p)->payload[0]);
- p += sizeofW(StgThunk) + 2;
- break;
-
- case CONSTR_2_0:
- evacuate(&((StgClosure *)p)->payload[1]);
- evacuate(&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_1_0:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[0]);
- p += sizeofW(StgThunk) + 1;
- break;
-
- case FUN_1_0:
- scavenge_fun_srt(info);
- case CONSTR_1_0:
- evacuate(&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 1;
- break;
-
- case THUNK_0_1:
- scavenge_thunk_srt(info);
- p += sizeofW(StgThunk) + 1;
- break;
-
- case FUN_0_1:
- scavenge_fun_srt(info);
- case CONSTR_0_1:
- p += sizeofW(StgHeader) + 1;
- break;
-
- case THUNK_0_2:
- scavenge_thunk_srt(info);
- p += sizeofW(StgThunk) + 2;
- break;
-
- case FUN_0_2:
- scavenge_fun_srt(info);
- case CONSTR_0_2:
- p += sizeofW(StgHeader) + 2;
- break;
-
- case THUNK_1_1:
- scavenge_thunk_srt(info);
- evacuate(&((StgThunk *)p)->payload[0]);
- p += sizeofW(StgThunk) + 2;
- break;
-
- case FUN_1_1:
- scavenge_fun_srt(info);
- case CONSTR_1_1:
- evacuate(&((StgClosure *)p)->payload[0]);
- p += sizeofW(StgHeader) + 2;
- break;
-
- case FUN:
- scavenge_fun_srt(info);
- goto gen_obj;
-
- case THUNK:
- {
- StgPtr end;
-
- scavenge_thunk_srt(info);
- end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
- for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
- }
- p += info->layout.payload.nptrs;
- break;
- }
-
- gen_obj:
- case CONSTR:
- case WEAK:
- case STABLE_NAME:
- {
- StgPtr end;
-
- end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
- for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
- evacuate((StgClosure **)p);
- }
- p += info->layout.payload.nptrs;
- break;
- }
-
- case BCO: {
- StgBCO *bco = (StgBCO *)p;
- evacuate((StgClosure **)&bco->instrs);
- evacuate((StgClosure **)&bco->literals);
- evacuate((StgClosure **)&bco->ptrs);
- p += bco_sizeW(bco);
- break;
- }
-
- case IND_PERM:
- if (bd->gen_no != 0) {
-#ifdef PROFILING
- // @LDV profiling
- // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
- // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
- LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
-#endif
- //
- // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
- //
- SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
-
- // We pretend that p has just been created.
- LDV_RECORD_CREATE((StgClosure *)p);
- }
- // fall through
- case IND_OLDGEN_PERM:
- evacuate(&((StgInd *)p)->indirectee);
- p += sizeofW(StgInd);
- break;
-
- case MUT_VAR_CLEAN:
- case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
- gct->eager_promotion = rtsFalse;
- evacuate(&((StgMutVar *)p)->var);
- gct->eager_promotion = saved_eager_promotion;
-
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
- }
- p += sizeofW(StgMutVar);
- break;
- }
-
- case CAF_BLACKHOLE:
- case SE_CAF_BLACKHOLE:
- case SE_BLACKHOLE:
- case BLACKHOLE:
- p += BLACKHOLE_sizeW();
- break;
-
- case THUNK_SELECTOR:
- {
- StgSelector *s = (StgSelector *)p;
- evacuate(&s->selectee);
- p += THUNK_SELECTOR_sizeW();
- break;
- }
-
- // A chunk of stack saved in a heap object
- case AP_STACK:
- {
- StgAP_STACK *ap = (StgAP_STACK *)p;
-
- evacuate(&ap->fun);
- scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
- p = (StgPtr)ap->payload + ap->size;
- break;
- }
-
- case PAP:
- p = scavenge_PAP((StgPAP *)p);
- break;
-
- case AP:
- p = scavenge_AP((StgAP *)p);
- break;
-
- case ARR_WORDS:
- // nothing to follow
- p += arr_words_sizeW((StgArrWords *)p);
- break;
-
- case MUT_ARR_PTRS_CLEAN:
- case MUT_ARR_PTRS_DIRTY:
- // follow everything
- {
- StgPtr next;
- rtsBool saved_eager;
-
- // We don't eagerly promote objects pointed to by a mutable
- // array, but if we find the array only points to objects in
- // the same or an older generation, we mark it "clean" and
- // avoid traversing it during minor GCs.
- saved_eager = gct->eager_promotion;
- gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
-
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
- }
-
- gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
- break;
- }
-
- case MUT_ARR_PTRS_FROZEN:
- case MUT_ARR_PTRS_FROZEN0:
- // follow everything
- {
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
-
- // If we're going to put this object on the mutable list, then
- // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
- }
- break;
- }
-
- case TSO:
- {
- StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = gct->eager_promotion;
-
- gct->eager_promotion = rtsFalse;
- scavengeTSO(tso);
- gct->eager_promotion = saved_eager;
-
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
- gct->failed_to_evac = rtsTrue; // always on the mutable list
- p += tso_sizeW(tso);
- break;
- }
-
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVarWatchQueue);
- break;
- }
-
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVar);
- break;
- }
-
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTRecHeader);
- break;
- }
-
- case TREC_CHUNK:
- {
- StgWord i;
- StgTRecChunk *tc = ((StgTRecChunk *) p);
- TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&tc->prev_chunk);
- for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
- evacuate((StgClosure **)&e->tvar);
- evacuate((StgClosure **)&e->expected_value);
- evacuate((StgClosure **)&e->new_value);
- }
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTRecChunk);
- break;
- }
-
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgAtomicInvariant);
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgInvariantCheckQueue);
- break;
- }
-
- default:
- barf("scavenge: unimplemented/strange closure type %d @ %p",
- info->type, p);
- }
-
- /*
- * We need to record the current object on the mutable list if
- * (a) It is actually mutable, or
- * (b) It contains pointers to a younger generation.
- * Case (b) arises if we didn't manage to promote everything that
- * the current object points to into the current generation.
- */
- if (gct->failed_to_evac) {
- gct->failed_to_evac = rtsFalse;
- if (bd->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)q, &generations[bd->gen_no]);
- }
- }
- }
-
- debugTrace(DEBUG_gc, " scavenged %ld bytes", (bd->free - scan) * sizeof(W_));
-}
-
-/* -----------------------------------------------------------------------------
Scavenge everything on the mark stack.
This is slightly different from scavenge():
{
StgPtr p, q;
StgInfoTable *info;
- nat saved_evac_gen;
+ step *saved_evac_step;
- gct->evac_gen = oldest_gen->no;
- saved_evac_gen = gct->evac_gen;
+ gct->evac_step = &oldest_gen->steps[0];
+ saved_evac_step = gct->evac_step;
linear_scan:
while (!mark_stack_empty()) {
info = get_itbl((StgClosure *)p);
q = p;
- switch (info->type) {
+ switch (((volatile StgWord *)info)[1] & 0xffff) {
case MVAR_CLEAN:
case MVAR_DIRTY:
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = gct->eager_promotion;
-
- gct->eager_promotion = rtsFalse;
- scavengeTSO(tso);
- gct->eager_promotion = saved_eager;
-
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
+ scavengeTSO((StgTSO*)p);
gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&wq->closure);
evacuate((StgClosure **)&wq->next_queue_entry);
evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&trec->enclosing_trec);
evacuate((StgClosure **)&trec->current_chunk);
evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate(&invariant->code);
evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&queue->invariant);
evacuate((StgClosure **)&queue->my_execution);
evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
if (gct->failed_to_evac) {
gct->failed_to_evac = rtsFalse;
- if (gct->evac_gen > 0) {
- recordMutableGen_GC((StgClosure *)q, &generations[gct->evac_gen]);
+ if (gct->evac_step) {
+ recordMutableGen_GC((StgClosure *)q, gct->evac_step->gen);
}
}
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- nat saved_evac_gen = gct->evac_gen;
+ step *saved_evac_step = gct->evac_step;
rtsBool no_luck;
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- rtsBool saved_eager = gct->eager_promotion;
-
- gct->eager_promotion = rtsFalse;
- scavengeTSO(tso);
- gct->eager_promotion = saved_eager;
-
- if (gct->failed_to_evac) {
- tso->flags |= TSO_DIRTY;
- } else {
- tso->flags &= ~TSO_DIRTY;
- }
-
+ scavengeTSO((StgTSO*)p);
gct->failed_to_evac = rtsTrue; // always on the mutable list
break;
}
case TVAR_WATCH_QUEUE:
{
StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&wq->closure);
evacuate((StgClosure **)&wq->next_queue_entry);
evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TVAR:
{
StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&tvar->current_value);
evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case TREC_HEADER:
{
StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&trec->enclosing_trec);
evacuate((StgClosure **)&trec->current_chunk);
evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case ATOMIC_INVARIANT:
{
StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate(&invariant->code);
evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
case INVARIANT_CHECK_QUEUE:
{
StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
+ gct->evac_step = 0;
evacuate((StgClosure **)&queue->invariant);
evacuate((StgClosure **)&queue->my_execution);
evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->evac_step = saved_evac_step;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
bd = gen->saved_mut_list;
- gct->evac_gen = gen->no;
+ gct->evac_step = &gen->steps[0];
for (; bd != NULL; bd = bd->link) {
for (q = bd->start; q < bd->free; q++) {
p = (StgPtr)*q;
}
// free the old mut_list
- freeChain(gen->saved_mut_list);
+ freeChain_sync(gen->saved_mut_list);
gen->saved_mut_list = NULL;
}
StgClosure* p;
const StgInfoTable *info;
+ debugTrace(DEBUG_gc, "scavenging static objects");
+
/* Always evacuate straight to the oldest generation for static
* objects */
- gct->evac_gen = oldest_gen->no;
+ gct->evac_step = &oldest_gen->steps[0];
/* keep going until we've scavenged all the objects on the linked
list... */
while (1) {
- ACQUIRE_SPIN_LOCK(&static_objects_sync);
-
/* get the next static object from the list. Remember, there might
* be more stuff on this list after each evacuation...
* (static_objects is a global)
*/
- p = static_objects;
+ p = gct->static_objects;
if (p == END_OF_STATIC_LIST) {
- RELEASE_SPIN_LOCK(&static_objects_sync);
break;
}
/* Take this object *off* the static_objects list,
* and put it on the scavenged_static_objects list.
*/
- static_objects = *STATIC_LINK(info,p);
- *STATIC_LINK(info,p) = scavenged_static_objects;
- scavenged_static_objects = p;
-
- RELEASE_SPIN_LOCK(&static_objects_sync);
+ gct->static_objects = *STATIC_LINK(info,p);
+ *STATIC_LINK(info,p) = gct->scavenged_static_objects;
+ gct->scavenged_static_objects = p;
switch (info -> type) {
/*-----------------------------------------------------------------------------
scavenge the large object list.
- evac_gen set by caller; similar games played with evac_gen as with
+ evac_step set by caller; similar games played with evac_step as with
scavenge() - see comment at the top of scavenge(). Most large
- objects are (repeatedly) mutable, so most of the time evac_gen will
+ objects are (repeatedly) mutable, so most of the time evac_step will
be zero.
--------------------------------------------------------------------------- */
bdescr *bd;
StgPtr p;
- gct->evac_gen = ws->stp->gen_no;
+ gct->evac_step = ws->step;
bd = ws->todo_large_objects;
// the front when evacuating.
ws->todo_large_objects = bd->link;
- ACQUIRE_SPIN_LOCK(&ws->stp->sync_large_objects);
- dbl_link_onto(bd, &ws->stp->scavenged_large_objects);
- ws->stp->n_scavenged_large_blocks += bd->blocks;
- RELEASE_SPIN_LOCK(&ws->stp->sync_large_objects);
+ ACQUIRE_SPIN_LOCK(&ws->step->sync_large_objects);
+ dbl_link_onto(bd, &ws->step->scavenged_large_objects);
+ ws->step->n_scavenged_large_blocks += bd->blocks;
+ RELEASE_SPIN_LOCK(&ws->step->sync_large_objects);
p = bd->start;
if (scavenge_one(p)) {
- if (ws->stp->gen_no > 0) {
- recordMutableGen_GC((StgClosure *)p, ws->stp->gen);
+ if (ws->step->gen_no > 0) {
+ recordMutableGen_GC((StgClosure *)p, ws->step->gen);
}
}
+
+ // stats
+ gct->scanned += closure_sizeW((StgClosure*)p);
}
}
/* ----------------------------------------------------------------------------
- Find the oldest full block to scavenge, and scavenge it.
+ Scavenge a block
------------------------------------------------------------------------- */
-static rtsBool
-scavenge_find_global_work (void)
-{
- bdescr *bd;
- int g, s;
- rtsBool flag;
- step_workspace *ws;
-
- flag = rtsFalse;
- for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
- for (s = generations[g].n_steps; --s >= 0; ) {
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ws = &gct->steps[g][s];
-
- // If we have any large objects to scavenge, do them now.
- if (ws->todo_large_objects) {
- scavenge_large(ws);
- flag = rtsTrue;
- }
-
- if ((bd = grab_todo_block(ws)) != NULL) {
- // no need to assign this to ws->scan_bd, we're going
- // to scavenge the whole thing and then push it on
- // our scavd list. This saves pushing out the
- // scan_bd block, which might be partial.
- scavenge_block(bd, bd->start);
- push_scan_block(bd, ws);
- return rtsTrue;
- }
-
- if (flag) return rtsTrue;
- }
- }
- return rtsFalse;
-}
+#define PARALLEL_GC
+#include "Scav.c-inc"
+#undef PARALLEL_GC
+#include "Scav.c-inc"
/* ----------------------------------------------------------------------------
- Look for local work to do.
-
- We can have outstanding scavenging to do if, for any of the workspaces,
-
- - the scan block is the same as the todo block, and new objects
- have been evacuated to the todo block.
-
- - the scan block *was* the same as the todo block, but the todo
- block filled up and a new one has been allocated.
+ Look for work to do.
+
+ We look for the oldest step that has either a todo block that can
+ be scanned, or a block of work on the global queue that we can
+ scan.
+
+ It is important to take work from the *oldest* generation that we
+ has work available, because that minimizes the likelihood of
+ evacuating objects into a young generation when they should have
+ been eagerly promoted. This really does make a difference (the
+ cacheprof benchmark is one that is affected).
+
+ We also want to scan the todo block if possible before grabbing
+ work from the global queue, the reason being that we don't want to
+ steal work from the global queue and starve other threads if there
+ is other work we can usefully be doing.
------------------------------------------------------------------------- */
static rtsBool
-scavenge_find_local_work (void)
+scavenge_find_work (void)
{
- int g, s;
+ int s;
step_workspace *ws;
- rtsBool flag;
+ rtsBool did_something, did_anything;
+ bdescr *bd;
- flag = rtsFalse;
- for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
- for (s = generations[g].n_steps; --s >= 0; ) {
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ws = &gct->steps[g][s];
-
- // If we have a todo block and no scan block, start
- // scanning the todo block.
- if (ws->scan_bd == NULL && ws->todo_bd != NULL)
- {
- ws->scan_bd = ws->todo_bd;
- ws->scan = ws->scan_bd->start;
- }
+ gct->scav_find_work++;
- // If we have a scan block with some work to do,
- // scavenge everything up to the free pointer.
- if (ws->scan != NULL && ws->scan < ws->scan_bd->free)
- {
- scavenge_block(ws->scan_bd, ws->scan);
- ws->scan = ws->scan_bd->free;
- flag = rtsTrue;
- }
+ did_anything = rtsFalse;
- if (ws->scan_bd != NULL && ws->scan == ws->scan_bd->free
- && ws->scan_bd != ws->todo_bd)
- {
- // we're not going to evac any more objects into
- // this block, so push it now.
- push_scan_block(ws->scan_bd, ws);
- ws->scan_bd = NULL;
- ws->scan = NULL;
- // we might be able to scan the todo block now. But
- // don't do it right away: there might be full blocks
- // waiting to be scanned as a result of scavenge_block above.
- flag = rtsTrue;
- }
+loop:
+ did_something = rtsFalse;
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[s];
+
+ gct->scan_bd = NULL;
+
+ // If we have a scan block with some work to do,
+ // scavenge everything up to the free pointer.
+ if (ws->todo_bd->u.scan < ws->todo_free)
+ {
+ if (n_gc_threads == 1) {
+ scavenge_block1(ws->todo_bd);
+ } else {
+ scavenge_block(ws->todo_bd);
+ }
+ did_something = rtsTrue;
+ break;
+ }
- if (flag) return rtsTrue;
- }
+ // If we have any large objects to scavenge, do them now.
+ if (ws->todo_large_objects) {
+ scavenge_large(ws);
+ did_something = rtsTrue;
+ break;
+ }
+
+ if ((bd = grab_todo_block(ws)) != NULL) {
+ // no need to assign this to ws->scan_bd, we're going
+ // to scavenge the whole thing and then push it on
+ // our scavd list. This saves pushing out the
+ // scan_bd block, which might be partial.
+ if (n_gc_threads == 1) {
+ scavenge_block1(bd);
+ } else {
+ scavenge_block(bd);
+ }
+ did_something = rtsTrue;
+ break;
+ }
}
- return rtsFalse;
+
+ if (did_something) {
+ did_anything = rtsTrue;
+ goto loop;
+ }
+ // only return when there is no more work to do
+
+ return did_anything;
}
/* ----------------------------------------------------------------------------
work_to_do = rtsFalse;
// scavenge static objects
- if (major_gc && static_objects != END_OF_STATIC_LIST) {
- IF_DEBUG(sanity, checkStaticObjects(static_objects));
+ if (major_gc && gct->static_objects != END_OF_STATIC_LIST) {
+ IF_DEBUG(sanity, checkStaticObjects(gct->static_objects));
scavenge_static();
}
// local work. Only if all the global work has been exhausted
// do we start scavenging the fragments of blocks in the local
// workspaces.
- if (scavenge_find_global_work()) goto loop;
- if (scavenge_find_local_work()) goto loop;
+ if (scavenge_find_work()) goto loop;
if (work_to_do) goto loop;
}
rtsBool
any_work (void)
{
- int g, s;
+ int s;
step_workspace *ws;
+ gct->any_work++;
+
write_barrier();
- // scavenge static objects
- if (major_gc && static_objects != END_OF_STATIC_LIST) {
- return rtsTrue;
- }
-
// scavenge objects in compacted generation
if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
(mark_stack_bdescr != NULL && !mark_stack_empty())) {
// Check for global work in any step. We don't need to check for
// local work, because we have already exited scavenge_loop(),
// which means there is no local work for this thread.
- for (g = RtsFlags.GcFlags.generations; --g >= 0; ) {
- for (s = generations[g].n_steps; --s >= 0; ) {
- if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
- continue;
- }
- ws = &gct->steps[g][s];
- if (ws->todo_large_objects) return rtsTrue;
- if (ws->stp->todos) return rtsTrue;
- }
+ for (s = total_steps-1; s >= 0; s--) {
+ if (s == 0 && RtsFlags.GcFlags.generations > 1) {
+ continue;
+ }
+ ws = &gct->steps[s];
+ if (ws->todo_large_objects) return rtsTrue;
+ if (ws->step->todos) return rtsTrue;
}
+ gct->no_work++;
+
return rtsFalse;
}