}
}
+ /* Finally, we can update the blackhole_queue. This queue
+ * simply strings together TSOs blocked on black holes, it is
+ * not intended to keep anything alive. Hence, we do not follow
+ * pointers on the blackhole_queue until now, when we have
+ * determined which TSOs are otherwise reachable. We know at
+ * this point that all TSOs have been evacuated, however.
+ */
+ {
+ StgTSO **pt;
+ for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ *pt = (StgTSO *)isAlive((StgClosure *)*pt);
+ ASSERT(*pt != NULL);
+ }
+ }
+
weak_stage = WeakDone; // *now* we're done,
return rtsTrue; // but one more round of scavenging, please
static void
scavengeTSO (StgTSO *tso)
{
- // chase the link field for any TSOs on the same queue
- tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ // We don't chase the link field: TSOs on the blackhole queue are
+ // not automatically alive, so the link field is a "weak" pointer.
+ // Queues of TSOs are traversed explicitly.
+
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
KH @ 25/10/99
*/
+static void
+evac_TSO_queue (evac_fn evac, StgTSO ** ptso)
+{
+ StgTSO **pt;
+
+ for (pt = ptso; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ evac((StgClosure **)pt);
+ }
+}
+
void
GetRoots( evac_fn evac )
{
markEventQueue();
#else /* !GRAN */
+
if (run_queue_hd != END_TSO_QUEUE) {
ASSERT(run_queue_tl != END_TSO_QUEUE);
- evac((StgClosure **)&run_queue_hd);
+ evac_TSO_queue(evac, &run_queue_hd);
evac((StgClosure **)&run_queue_tl);
}
if (blocked_queue_hd != END_TSO_QUEUE) {
ASSERT(blocked_queue_tl != END_TSO_QUEUE);
- evac((StgClosure **)&blocked_queue_hd);
+ evac_TSO_queue(evac, &blocked_queue_hd);
evac((StgClosure **)&blocked_queue_tl);
}
if (sleeping_queue != END_TSO_QUEUE) {
- evac((StgClosure **)&sleeping_queue);
+ evac_TSO_queue(evac, &blocked_queue_hd);
+ evac((StgClosure **)&blocked_queue_tl);
}
#endif
- if (blackhole_queue != END_TSO_QUEUE) {
- evac((StgClosure **)&blackhole_queue);
- }
+ // Don't chase the blackhole_queue just yet, we treat it as "weak"
if (suspended_ccalling_threads != END_TSO_QUEUE) {
- evac((StgClosure **)&suspended_ccalling_threads);
+ evac_TSO_queue(evac, &suspended_ccalling_threads);
}
#if defined(PARALLEL_HASKELL) || defined(GRAN)