ACQUIRE_SM_LOCK;
// send exceptions to any threads which were about to die
+ RELEASE_SM_LOCK;
resurrectThreads(resurrected_threads);
+ ACQUIRE_SM_LOCK;
// Update the stable pointer hash table.
updateStablePtrTable(major_gc);
SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
// fill the slop
if (size_to_reserve - size_to_copy_org > 0)
- FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
+ LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
#endif
return (StgClosure *)dest;
}
switch (info->type) {
- case MUT_VAR:
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY:
case MVAR:
return copy(q,sizeW_fromITBL(info),stp);
case THUNK_SELECTOR:
{
StgClosure *p;
+ const StgInfoTable *info_ptr;
if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
return copy(q,THUNK_SELECTOR_sizeW(),stp);
}
+ // stashed away for LDV profiling, see below
+ info_ptr = q->header.info;
+
p = eval_thunk_selector(info->layout.selector_offset,
(StgSelector *)q);
val = evacuate(p);
thunk_selector_depth--;
+#ifdef PROFILING
+ // For the purposes of LDV profiling, we have destroyed
+ // the original selector thunk.
+ SET_INFO(q, info_ptr);
+ LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(q);
+#endif
+
// Update the THUNK_SELECTOR with an indirection to the
// EVACUATED closure now at p. Why do this rather than
// upd_evacuee(q,p)? Because we have an invariant that an
SET_INFO(q, &stg_IND_info);
((StgInd *)q)->indirectee = p;
-#ifdef PROFILING
- // We store the size of the just evacuated object in the
- // LDV word so that the profiler can guess the position of
- // the next object later.
- SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
-#endif
+ // For the purposes of LDV profiling, we have created an
+ // indirection.
+ LDV_RECORD_CREATE(q);
+
return val;
}
}
}
case BLOCKED_FETCH:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
to = copy(q,sizeofW(StgBlockedFetch),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
case REMOTE_REF:
# endif
case FETCH_ME:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMe),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
return to;
case FETCH_ME_BQ:
- ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
+ ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
IF_DEBUG(gc,
debugBelch("@@ evacuate: %p (%s) to %p (%s)",
p += sizeofW(StgInd);
break;
- case MUT_VAR:
- evac_gen = 0;
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY: {
+ rtsBool saved_eager_promotion = eager_promotion;
+
+ eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable anyhow
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
+ }
p += sizeofW(StgMutVar);
break;
+ }
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- evac_gen = 0;
+ rtsBool saved_eager = eager_promotion;
+
+ eager_promotion = rtsFalse;
scavengeTSO(tso);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable anyhow.
+ eager_promotion = saved_eager;
+
+ if (failed_to_evac) {
+ tso->flags |= TSO_DIRTY;
+ } else {
+ tso->flags &= ~TSO_DIRTY;
+ }
+
+ failed_to_evac = rtsTrue; // always on the mutable list
p += tso_sizeW(tso);
break;
}
evacuate(((StgInd *)p)->indirectee);
break;
- case MUT_VAR:
- evac_gen = 0;
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY: {
+ rtsBool saved_eager_promotion = eager_promotion;
+
+ eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue;
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
+ }
break;
+ }
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case TSO:
{
StgTSO *tso = (StgTSO *)p;
- evac_gen = 0;
+ rtsBool saved_eager = eager_promotion;
+
+ eager_promotion = rtsFalse;
scavengeTSO(tso);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue;
+ eager_promotion = saved_eager;
+
+ if (failed_to_evac) {
+ tso->flags |= TSO_DIRTY;
+ } else {
+ tso->flags &= ~TSO_DIRTY;
+ }
+
+ failed_to_evac = rtsTrue; // always on the mutable list
break;
}
// already scavenged?
if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
- oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
goto loop;
}
push_mark_stack(oldgen_scan);
// ToDo: bump the linear scan by the actual size of the object
- oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
+ oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
goto linear_scan;
}
break;
}
- case MUT_VAR:
- evac_gen = 0;
+ case MUT_VAR_CLEAN:
+ case MUT_VAR_DIRTY: {
+ StgPtr q = p;
+ rtsBool saved_eager_promotion = eager_promotion;
+
+ eager_promotion = rtsFalse;
((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue; // mutable anyhow
+ eager_promotion = saved_eager_promotion;
+
+ if (failed_to_evac) {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
+ } else {
+ ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
+ }
break;
+ }
case CAF_BLACKHOLE:
case SE_CAF_BLACKHOLE:
case TSO:
{
StgTSO *tso = (StgTSO *)p;
-
- evac_gen = 0; // repeatedly mutable
+ rtsBool saved_eager = eager_promotion;
+
+ eager_promotion = rtsFalse;
scavengeTSO(tso);
- evac_gen = saved_evac_gen;
- failed_to_evac = rtsTrue;
+ eager_promotion = saved_eager;
+
+ if (failed_to_evac) {
+ tso->flags |= TSO_DIRTY;
+ } else {
+ tso->flags &= ~TSO_DIRTY;
+ }
+
+ failed_to_evac = rtsTrue; // always on the mutable list
break;
}
#ifdef DEBUG
switch (get_itbl((StgClosure *)p)->type) {
- case MUT_VAR:
+ case MUT_VAR_CLEAN:
+ barf("MUT_VAR_CLEAN on mutable list");
+ case MUT_VAR_DIRTY:
mutlist_MUTVARS++; break;
case MUT_ARR_PTRS_CLEAN:
case MUT_ARR_PTRS_DIRTY:
}
#endif
- // We don't need to scavenge clean arrays. This is the
- // Whole Point of MUT_ARR_PTRS_CLEAN.
- if (get_itbl((StgClosure *)p)->type == MUT_ARR_PTRS_CLEAN) {
+ // Check whether this object is "clean", that is it
+ // definitely doesn't point into a young generation.
+ // Clean objects don't need to be scavenged. Some clean
+ // objects (MUT_VAR_CLEAN) are not kept on the mutable
+ // list at all; others, such as MUT_ARR_PTRS_CLEAN and
+ // TSO, are always on the mutable list.
+ //
+ switch (get_itbl((StgClosure *)p)->type) {
+ case MUT_ARR_PTRS_CLEAN:
recordMutableGen((StgClosure *)p,gen);
continue;
+ case TSO: {
+ StgTSO *tso = (StgTSO *)p;
+ if ((tso->flags & TSO_DIRTY) == 0) {
+ // A clean TSO: we don't have to traverse its
+ // stack. However, we *do* follow the link field:
+ // we don't want to have to mark a TSO dirty just
+ // because we put it on a different queue.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ }
+ recordMutableGen((StgClosure *)p,gen);
+ continue;
+ }
+ }
+ default:
+ ;
}
if (scavenge_one(p)) {
- /* didn't manage to promote everything, so put the
- * object back on the list.
- */
+ // didn't manage to promote everything, so put the
+ // object back on the list.
recordMutableGen((StgClosure *)p,gen);
}
}
// Should we squeeze or not? Arbitrary heuristic: we squeeze if
// the number of words we have to shift down is less than the
// number of stack words we squeeze away by doing so.
- if (1 /*RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
- weight < words_to_squeeze*/) {
+ if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
+ weight < words_to_squeeze) {
stackSqueeze(tso, (StgPtr)frame);
}
}