#if defined(THREADED_RTS) && !defined(PARALLEL_GC)
# define evacuate(a) evacuate1(a)
-# define recordMutableGen_GC(a,b) recordMutableGen(a,b)
# define scavenge_loop(a) scavenge_loop1(a)
# define scavenge_block(a) scavenge_block1(a)
# define scavenge_mutable_list(bd,g) scavenge_mutable_list1(bd,g)
debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
+ // update the pointer from the Task.
+ if (tso->bound != NULL) {
+ tso->bound->tso = tso;
+ }
+
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
- || tso->why_blocked == BlockedOnException
+ || tso->why_blocked == BlockedOnMsgWakeup
+ || tso->why_blocked == BlockedOnMsgThrowTo
) {
evacuate(&tso->block_info.closure);
}
}
/* -----------------------------------------------------------------------------
+ Mutable arrays of pointers
+ -------------------------------------------------------------------------- */
+
+static StgPtr scavenge_mut_arr_ptrs (StgMutArrPtrs *a)
+{
+ lnat m;
+ rtsBool any_failed;
+ StgPtr p, q;
+
+ any_failed = rtsFalse;
+ p = (StgPtr)&a->payload[0];
+ for (m = 0; (int)m < (int)mutArrPtrsCards(a->ptrs) - 1; m++)
+ {
+ q = p + (1 << MUT_ARR_PTRS_CARD_BITS);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+
+ q = (StgPtr)&a->payload[a->ptrs];
+ if (p < q) {
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ *mutArrPtrsCard(a,m) = 1;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
+}
+
+// scavenge only the marked areas of a MUT_ARR_PTRS
+static StgPtr scavenge_mut_arr_ptrs_marked (StgMutArrPtrs *a)
+{
+ lnat m;
+ StgPtr p, q;
+ rtsBool any_failed;
+
+ any_failed = rtsFalse;
+ for (m = 0; m < mutArrPtrsCards(a->ptrs); m++)
+ {
+ if (*mutArrPtrsCard(a,m) != 0) {
+ p = (StgPtr)&a->payload[m << MUT_ARR_PTRS_CARD_BITS];
+ q = stg_min(p + (1 << MUT_ARR_PTRS_CARD_BITS),
+ (StgPtr)&a->payload[a->ptrs]);
+ for (; p < q; p++) {
+ evacuate((StgClosure**)p);
+ }
+ if (gct->failed_to_evac) {
+ any_failed = rtsTrue;
+ gct->failed_to_evac = rtsFalse;
+ } else {
+ *mutArrPtrsCard(a,m) = 0;
+ }
+ }
+ }
+
+ gct->failed_to_evac = any_failed;
+ return (StgPtr)a + mut_arr_ptrs_sizeW(a);
+}
+
+/* -----------------------------------------------------------------------------
Blocks of function args occur on the stack (at the top) and
in PAPs.
-------------------------------------------------------------------------- */
{
StgPtr p, q;
StgInfoTable *info;
- generation *saved_evac_gen;
rtsBool saved_eager_promotion;
gen_workspace *ws;
gct->scan_bd = bd;
gct->evac_gen = bd->gen;
- saved_evac_gen = gct->evac_gen;
saved_eager_promotion = gct->eager_promotion;
gct->failed_to_evac = rtsFalse;
gen_obj:
case CONSTR:
case WEAK:
- case STABLE_NAME:
+ case PRIM:
{
StgPtr end;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
- // follow everything
{
- StgPtr next;
+ // We don't eagerly promote objects pointed to by a mutable
+ // array, but if we find the array only points to objects in
+ // the same or an older generation, we mark it "clean" and
+ // avoid traversing it during minor GCs.
+ gct->eager_promotion = rtsFalse;
- // We don't eagerly promote objects pointed to by a mutable
- // array, but if we find the array only points to objects in
- // the same or an older generation, we mark it "clean" and
- // avoid traversing it during minor GCs.
- gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager_promotion;
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
if (gct->failed_to_evac) {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // always put it on the mutable list.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ p = scavenge_mut_arr_ptrs((StgMutArrPtrs*)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
break;
}
- case TVAR_WATCH_QUEUE:
+ case MUT_PRIM:
{
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVarWatchQueue);
- break;
- }
+ StgPtr end;
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTVar);
- break;
- }
+ gct->eager_promotion = rtsFalse;
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+ p += info->layout.payload.nptrs;
+
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgTRecHeader);
- break;
+ break;
}
case TREC_CHUNK:
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_gen = saved_evac_gen;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
p += sizeofW(StgTRecChunk);
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgAtomicInvariant);
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- p += sizeofW(StgInvariantCheckQueue);
- break;
- }
-
default:
barf("scavenge: unimplemented/strange closure type %d @ %p",
info->type, p);
{
StgPtr p, q;
StgInfoTable *info;
- generation *saved_evac_gen;
+ rtsBool saved_eager_promotion;
gct->evac_gen = oldest_gen;
- saved_evac_gen = gct->evac_gen;
+ saved_eager_promotion = gct->eager_promotion;
while ((p = pop_mark_stack())) {
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&mvar->head);
gen_obj:
case CONSTR:
case WEAK:
- case STABLE_NAME:
+ case PRIM:
{
StgPtr end;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
gct->eager_promotion = rtsFalse;
evacuate(&((StgMutVar *)p)->var);
gct->eager_promotion = saved_eager_promotion;
case MUT_ARR_PTRS_DIRTY:
// follow everything
{
- StgPtr next;
- rtsBool saved_eager;
-
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
- if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
- } else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+ if (gct->failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable anyhow.
break;
}
case MUT_ARR_PTRS_FROZEN0:
// follow everything
{
- StgPtr next, q = p;
+ StgPtr q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
break;
}
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
+ case MUT_PRIM:
+ {
+ StgPtr end;
+
+ gct->eager_promotion = rtsFalse;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = rtsTrue; // mutable
+ break;
+ }
+
case TREC_CHUNK:
{
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
default:
barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
info->type, p);
scavenge_one(StgPtr p)
{
const StgInfoTable *info;
- generation *saved_evac_gen = gct->evac_gen;
rtsBool no_luck;
+ rtsBool saved_eager_promotion;
+ saved_eager_promotion = gct->eager_promotion;
+
ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
info = get_itbl((StgClosure *)p);
case MVAR_CLEAN:
case MVAR_DIRTY:
{
- rtsBool saved_eager_promotion = gct->eager_promotion;
-
StgMVar *mvar = ((StgMVar *)p);
gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&mvar->head);
case CONSTR_0_2:
case CONSTR_2_0:
case WEAK:
+ case PRIM:
case IND_PERM:
{
StgPtr q, end;
case MUT_VAR_CLEAN:
case MUT_VAR_DIRTY: {
StgPtr q = p;
- rtsBool saved_eager_promotion = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
evacuate(&((StgMutVar *)p)->var);
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
{
- StgPtr next, q;
- rtsBool saved_eager;
-
// We don't eagerly promote objects pointed to by a mutable
// array, but if we find the array only points to objects in
// the same or an older generation, we mark it "clean" and
// avoid traversing it during minor GCs.
- saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
- q = p;
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
- gct->eager_promotion = saved_eager;
+
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
}
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue;
break;
}
case MUT_ARR_PTRS_FROZEN0:
{
// follow everything
- StgPtr next, q=p;
-
- next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
- for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
- evacuate((StgClosure **)p);
- }
-
+ scavenge_mut_arr_ptrs((StgMutArrPtrs *)p);
+
// If we're going to put this object on the mutable list, then
// set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
if (gct->failed_to_evac) {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
} else {
- ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
}
break;
}
break;
}
- case TVAR_WATCH_QUEUE:
- {
- StgTVarWatchQueue *wq = ((StgTVarWatchQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&wq->closure);
- evacuate((StgClosure **)&wq->next_queue_entry);
- evacuate((StgClosure **)&wq->prev_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
+ case MUT_PRIM:
+ {
+ StgPtr end;
+
+ gct->eager_promotion = rtsFalse;
+
+ end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
+ for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
+ evacuate((StgClosure **)p);
+ }
- case TVAR:
- {
- StgTVar *tvar = ((StgTVar *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&tvar->current_value);
- evacuate((StgClosure **)&tvar->first_watch_queue_entry);
- gct->evac_gen = saved_evac_gen;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
- }
- case TREC_HEADER:
- {
- StgTRecHeader *trec = ((StgTRecHeader *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&trec->enclosing_trec);
- evacuate((StgClosure **)&trec->current_chunk);
- evacuate((StgClosure **)&trec->invariants_to_check);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
+ }
case TREC_CHUNK:
{
StgWord i;
StgTRecChunk *tc = ((StgTRecChunk *) p);
TRecEntry *e = &(tc -> entries[0]);
- gct->evac_gen = 0;
+ gct->eager_promotion = rtsFalse;
evacuate((StgClosure **)&tc->prev_chunk);
for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
evacuate((StgClosure **)&e->tvar);
evacuate((StgClosure **)&e->expected_value);
evacuate((StgClosure **)&e->new_value);
}
- gct->evac_gen = saved_evac_gen;
+ gct->eager_promotion = saved_eager_promotion;
gct->failed_to_evac = rtsTrue; // mutable
break;
}
- case ATOMIC_INVARIANT:
- {
- StgAtomicInvariant *invariant = ((StgAtomicInvariant *) p);
- gct->evac_gen = 0;
- evacuate(&invariant->code);
- evacuate((StgClosure **)&invariant->last_execution);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
- case INVARIANT_CHECK_QUEUE:
- {
- StgInvariantCheckQueue *queue = ((StgInvariantCheckQueue *) p);
- gct->evac_gen = 0;
- evacuate((StgClosure **)&queue->invariant);
- evacuate((StgClosure **)&queue->my_execution);
- evacuate((StgClosure **)&queue->next_queue_entry);
- gct->evac_gen = saved_evac_gen;
- gct->failed_to_evac = rtsTrue; // mutable
- break;
- }
-
case IND:
// IND can happen, for example, when the interpreter allocates
// a gigantic AP closure (more than one block), which ends up
// definitely doesn't point into a young generation.
// Clean objects don't need to be scavenged. Some clean
// objects (MUT_VAR_CLEAN) are not kept on the mutable
- // list at all; others, such as MUT_ARR_PTRS_CLEAN
+ // list at all; others, such as TSO
// are always on the mutable list.
//
switch (get_itbl((StgClosure *)p)->type) {
case MUT_ARR_PTRS_CLEAN:
recordMutableGen_GC((StgClosure *)p,gen->no);
continue;
+ case MUT_ARR_PTRS_DIRTY:
+ {
+ rtsBool saved_eager_promotion;
+ saved_eager_promotion = gct->eager_promotion;
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_mut_arr_ptrs_marked((StgMutArrPtrs *)p);
+
+ if (gct->failed_to_evac) {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
+ } else {
+ ((StgClosure *)p)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
+ }
+
+ gct->eager_promotion = saved_eager_promotion;
+ gct->failed_to_evac = rtsFalse;
+ recordMutableGen_GC((StgClosure *)p,gen->no);
+ continue;
+ }
case TSO: {
StgTSO *tso = (StgTSO *)p;
if (tso->dirty == 0) {
- // Must be on the mutable list because its link
- // field is dirty.
- ASSERT(tso->flags & TSO_LINK_DIRTY);
+ // Should be on the mutable list because its link
+ // field is dirty. However, in parallel GC we may
+ // have a thread on multiple mutable lists, so
+ // this assertion would be invalid:
+ // ASSERT(tso->flags & TSO_LINK_DIRTY);
scavenge_TSO_link(tso);
if (gct->failed_to_evac) {