Scavenge a TSO.
-------------------------------------------------------------------------- */
-STATIC_INLINE void
-scavenge_TSO_link (StgTSO *tso)
-{
- // We don't always chase the link field: TSOs on the blackhole
- // queue are not automatically alive, so the link field is a
- // "weak" pointer in that case.
- if (tso->why_blocked != BlockedOnBlackHole) {
- evacuate((StgClosure **)&tso->_link);
- }
-}
-
static void
scavengeTSO (StgTSO *tso)
{
rtsBool saved_eager;
- if (tso->what_next == ThreadRelocated) {
- // the only way this can happen is if the old TSO was on the
- // mutable list. We might have other links to this defunct
- // TSO, so we must update its link field.
- evacuate((StgClosure**)&tso->_link);
- return;
- }
-
debugTrace(DEBUG_gc,"scavenging thread %d",(int)tso->id);
// update the pointer from the Task.
saved_eager = gct->eager_promotion;
gct->eager_promotion = rtsFalse;
+ evacuate((StgClosure **)&tso->blocked_exceptions);
+ evacuate((StgClosure **)&tso->bq);
+
+ // scavange current transaction record
+ evacuate((StgClosure **)&tso->trec);
+
+ evacuate((StgClosure **)&tso->stackobj);
+
+ evacuate((StgClosure **)&tso->_link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
- || tso->why_blocked == BlockedOnMsgWakeup
|| tso->why_blocked == BlockedOnMsgThrowTo
+ || tso->why_blocked == NotBlocked
) {
evacuate(&tso->block_info.closure);
}
- evacuate((StgClosure **)&tso->blocked_exceptions);
-
- // scavange current transaction record
- evacuate((StgClosure **)&tso->trec);
-
- // scavenge this thread's stack
- scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
-
- if (gct->failed_to_evac) {
- tso->dirty = 1;
- scavenge_TSO_link(tso);
- } else {
- tso->dirty = 0;
- scavenge_TSO_link(tso);
- if (gct->failed_to_evac) {
- tso->flags |= TSO_LINK_DIRTY;
- } else {
- tso->flags &= ~TSO_LINK_DIRTY;
- }
+#ifdef THREADED_RTS
+ // in the THREADED_RTS, block_info.closure must always point to a
+ // valid closure, because we assume this in throwTo(). In the
+ // non-threaded RTS it might be a FD (for
+ // BlockedOnRead/BlockedOnWrite) or a time value (BlockedOnDelay)
+ else {
+ tso->block_info.closure = (StgClosure *)END_TSO_QUEUE;
}
+#endif
+
+ tso->dirty = gct->failed_to_evac;
gct->eager_promotion = saved_eager;
}
while (bitmap != 0) {
if ((bitmap & 1) != 0) {
-#if defined(__PIC__) && defined(mingw32_TARGET_OS)
+#if defined(__PIC__) && defined(mingw32_HOST_OS)
// Special-case to handle references to closures hiding out in DLLs, since
// double indirections required to get at those. The code generator knows
// which is which when generating the SRT, so it stores the (indirect)
}
case IND_PERM:
- if (bd->gen_no != 0) {
-#ifdef PROFILING
- // @LDV profiling
- // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
- // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
- LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
-#endif
- //
- // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
- //
- SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
-
- // We pretend that p has just been created.
- LDV_RECORD_CREATE((StgClosure *)p);
- }
- // fall through
- case IND_OLDGEN_PERM:
+ case BLACKHOLE:
evacuate(&((StgInd *)p)->indirectee);
p += sizeofW(StgInd);
break;
p += sizeofW(StgMutVar);
break;
- case CAF_BLACKHOLE:
- case BLACKHOLE:
- p += BLACKHOLE_sizeW();
- break;
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ p += sizeofW(StgBlockingQueue);
+ break;
+ }
case THUNK_SELECTOR:
{
case TSO:
{
- StgTSO *tso = (StgTSO *)p;
- scavengeTSO(tso);
- p += tso_sizeW(tso);
+ scavengeTSO((StgTSO *)p);
+ p += sizeofW(StgTSO);
break;
}
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
+
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+ p += stack_sizeW(stack);
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
+
case MUT_PRIM:
{
StgPtr end;
// no "old" generation.
break;
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
+ case IND:
+ case BLACKHOLE:
evacuate(&((StgInd *)p)->indirectee);
break;
break;
}
- case CAF_BLACKHOLE:
- case BLACKHOLE:
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ break;
+ }
+
case ARR_WORDS:
break;
break;
}
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
+
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
+
case MUT_PRIM:
{
StgPtr end;
break;
}
- case CAF_BLACKHOLE:
- case BLACKHOLE:
- break;
-
+ case BLOCKING_QUEUE:
+ {
+ StgBlockingQueue *bq = (StgBlockingQueue *)p;
+
+ gct->eager_promotion = rtsFalse;
+ evacuate(&bq->bh);
+ evacuate((StgClosure**)&bq->owner);
+ evacuate((StgClosure**)&bq->queue);
+ evacuate((StgClosure**)&bq->link);
+ gct->eager_promotion = saved_eager_promotion;
+
+ if (gct->failed_to_evac) {
+ bq->header.info = &stg_BLOCKING_QUEUE_DIRTY_info;
+ } else {
+ bq->header.info = &stg_BLOCKING_QUEUE_CLEAN_info;
+ }
+ break;
+ }
+
case THUNK_SELECTOR:
{
StgSelector *s = (StgSelector *)p;
break;
}
+ case STACK:
+ {
+ StgStack *stack = (StgStack*)p;
+
+ gct->eager_promotion = rtsFalse;
+
+ scavenge_stack(stack->sp, stack->stack + stack->stack_size);
+ stack->dirty = gct->failed_to_evac;
+
+ gct->eager_promotion = saved_eager_promotion;
+ break;
+ }
+
case MUT_PRIM:
{
StgPtr end;
// IND can happen, for example, when the interpreter allocates
// a gigantic AP closure (more than one block), which ends up
// on the large-object list and then gets updated. See #3424.
- case IND_OLDGEN:
- case IND_OLDGEN_PERM:
+ case BLACKHOLE:
case IND_STATIC:
evacuate(&((StgInd *)p)->indirectee);
#ifdef DEBUG
switch (get_itbl((StgClosure *)p)->type) {
case MUT_VAR_CLEAN:
- barf("MUT_VAR_CLEAN on mutable list");
+ // can happen due to concurrent writeMutVars
case MUT_VAR_DIRTY:
mutlist_MUTVARS++; break;
case MUT_ARR_PTRS_CLEAN:
recordMutableGen_GC((StgClosure *)p,gen->no);
continue;
}
- case TSO: {
- StgTSO *tso = (StgTSO *)p;
- if (tso->dirty == 0) {
- // Should be on the mutable list because its link
- // field is dirty. However, in parallel GC we may
- // have a thread on multiple mutable lists, so
- // this assertion would be invalid:
- // ASSERT(tso->flags & TSO_LINK_DIRTY);
-
- scavenge_TSO_link(tso);
- if (gct->failed_to_evac) {
- recordMutableGen_GC((StgClosure *)p,gen->no);
- gct->failed_to_evac = rtsFalse;
- } else {
- tso->flags &= ~TSO_LINK_DIRTY;
- }
- continue;
- }
- }
- default:
+ default:
;
}
static void
scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
{
- nat i, b;
+ nat i, j, b;
StgWord bitmap;
b = 0;
- bitmap = large_bitmap->bitmap[b];
- for (i = 0; i < size; ) {
- if ((bitmap & 1) == 0) {
- evacuate((StgClosure **)p);
- }
- i++;
- p++;
- if (i % BITS_IN(W_) == 0) {
- b++;
- bitmap = large_bitmap->bitmap[b];
- } else {
+
+ for (i = 0; i < size; b++) {
+ bitmap = large_bitmap->bitmap[b];
+ j = stg_min(size-i, BITS_IN(W_));
+ i += j;
+ for (; j > 0; j--, p++) {
+ if ((bitmap & 1) == 0) {
+ evacuate((StgClosure **)p);
+ }
bitmap = bitmap >> 1;
- }
+ }
}
}
// threadPaused(). We could traverse the whole stack again
// before GC, but that seems like overkill.
//
- // So, if the frame points to an indirection, it will get
- // shorted out when we evacuate. If this happens, we have no
- // closure to update any more. In the past we solved this by
- // replacing the IND with an IND_PERM, but a better solution
- // is to replace the update frame with a frame that no longer
- // does the update and just uses the value already computed by
- // the other thread, so that is what we now do.
+ // Scavenging this update frame as normal would be disastrous;
+ // the updatee would end up pointing to the value. So we
+ // check whether the value after evacuation is a BLACKHOLE,
+ // and if not, we change the update frame to an stg_enter
+ // frame that simply returns the value. Hence, blackholing is
+ // compulsory (otherwise we would have to check for thunks
+ // too).
//
// Note [upd-black-hole]
// One slight hiccup is that the THUNK_SELECTOR machinery can
// the updatee is never a THUNK_SELECTOR and we're ok.
// NB. this is a new invariant: blackholing is not optional.
{
- StgClosure *v;
StgUpdateFrame *frame = (StgUpdateFrame *)p;
+ StgClosure *v;
evacuate(&frame->updatee);
v = frame->updatee;
if (GET_CLOSURE_TAG(v) != 0 ||
- (get_itbl(v)->type != BLACKHOLE &&
- get_itbl(v)->type != CAF_BLACKHOLE)) {
- frame->header.info = (const StgInfoTable*)&stg_gc_unpt_r1_info;
+ (get_itbl(v)->type != BLACKHOLE)) {
+ // blackholing is compulsory, see above.
+ frame->header.info = (const StgInfoTable*)&stg_enter_checkbh_info;
}
+ ASSERT(v->header.info != &stg_TSO_info);
p += sizeofW(StgUpdateFrame);
continue;
}
case CATCH_STM_FRAME:
case CATCH_RETRY_FRAME:
case ATOMICALLY_FRAME:
+ case UNDERFLOW_FRAME:
case STOP_FRAME:
case CATCH_FRAME:
case RET_SMALL: