if (stp->is_compacted) {
collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
} else {
- collected += stp->n_blocks * BLOCK_SIZE_W;
+ if (g == 0 && s == 0) {
+ collected += countNurseryBlocks() * BLOCK_SIZE_W;
+ collected += alloc_blocks;
+ } else {
+ collected += stp->n_blocks * BLOCK_SIZE_W;
+ }
}
/* free old memory and shift to-space into from-space for all
} else {
// we might have added extra large blocks to the nursery, so
// resize back to minAllocAreaSize again.
- resizeNurseries(RtsFlags.GcFlags.minAllocAreaSize);
+ resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
}
}
;
}
+ // Threads blocked on black holes: if the black hole
+ // is alive, then the thread is alive too.
+ if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
+ if (isAlive(t->block_info.closure)) {
+ t = (StgTSO *)evacuate((StgClosure *)t);
+ tmp = t;
+ flag = rtsTrue;
+ }
+ }
+
if (tmp == NULL) {
// not alive (yet): leave this thread on the
// old_all_threads list.
}
}
+ /* If we evacuated any threads, we need to go back to the scavenger.
+ */
+ if (flag) return rtsTrue;
+
/* And resurrect any threads which were about to become garbage.
*/
{
}
}
+ /* Finally, we can update the blackhole_queue. This queue
+ * simply strings together TSOs blocked on black holes, it is
+ * not intended to keep anything alive. Hence, we do not follow
+ * pointers on the blackhole_queue until now, when we have
+ * determined which TSOs are otherwise reachable. We know at
+ * this point that all TSOs have been evacuated, however.
+ */
+ {
+ StgTSO **pt;
+ for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ *pt = (StgTSO *)isAlive((StgClosure *)*pt);
+ ASSERT(*pt != NULL);
+ }
+ }
+
weak_stage = WeakDone; // *now* we're done,
return rtsTrue; // but one more round of scavenging, please
static void
scavengeTSO (StgTSO *tso)
{
- // chase the link field for any TSOs on the same queue
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
(StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
}
+ // We don't always chase the link field: TSOs on the blackhole
+ // queue are not automatically alive, so the link field is a
+ // "weak" pointer in that case.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ }
+
// scavange current transaction record
tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
}
case PAP:
- p = scavenge_AP((StgAP *)p);
+ p = scavenge_PAP((StgPAP *)p);
break;
case AP:
- p = scavenge_PAP((StgPAP *)p);
+ p = scavenge_AP((StgAP *)p);
break;
case ARR_WORDS:
debugBelch("Unexpected lazy BHing required at 0x%04x",(int)bh);
#endif
#ifdef DEBUG
- /* zero out the slop so that the sanity checker can tell
- * where the next closure is.
- */
- {
- StgInfoTable *bh_info = get_itbl(bh);
- nat np = bh_info->layout.payload.ptrs,
- nw = bh_info->layout.payload.nptrs, i;
- /* don't zero out slop for a THUNK_SELECTOR,
- * because its layout info is used for a
- * different purpose, and it's exactly the
- * same size as a BLACKHOLE in any case.
- */
- if (bh_info->type != THUNK_SELECTOR) {
- for (i = 0; i < np + nw; i++) {
- ((StgClosure *)bh)->payload[i] = INVALID_OBJECT;
- }
- }
- }
+ // zero out the slop so that the sanity checker can tell
+ // where the next closure is.
+ DEBUG_FILL_SLOP(bh);
#endif
#ifdef PROFILING
// We pretend that bh is now dead.
+ // ToDo: is the slop filling the same as DEBUG_FILL_SLOP?
LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
#endif
// Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
}
debugBelch("\n");
}
-
-STATIC_INLINE rtsBool
-maybeLarge(StgClosure *closure)
-{
- StgInfoTable *info = get_itbl(closure);
-
- /* closure types that may be found on the new_large_objects list;
- see scavenge_large */
- return (info->type == MUT_ARR_PTRS ||
- info->type == MUT_ARR_PTRS_FROZEN ||
- info->type == MUT_ARR_PTRS_FROZEN0 ||
- info->type == TSO ||
- info->type == ARR_WORDS);
-}
-
-
#endif /* DEBUG */