;
}
+ // Threads blocked on black holes: if the black hole
+ // is alive, then the thread is alive too.
+ if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
+ if (isAlive(t->block_info.closure)) {
+ t = (StgTSO *)evacuate((StgClosure *)t);
+ tmp = t;
+ flag = rtsTrue;
+ }
+ }
+
if (tmp == NULL) {
// not alive (yet): leave this thread on the
// old_all_threads list.
}
}
+ /* If we evacuated any threads, we need to go back to the scavenger.
+ */
+ if (flag) return rtsTrue;
+
/* And resurrect any threads which were about to become garbage.
*/
{
static void
scavengeTSO (StgTSO *tso)
{
- // We don't chase the link field: TSOs on the blackhole queue are
- // not automatically alive, so the link field is a "weak" pointer.
- // Queues of TSOs are traversed explicitly.
-
if ( tso->why_blocked == BlockedOnMVar
|| tso->why_blocked == BlockedOnBlackHole
|| tso->why_blocked == BlockedOnException
(StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
}
+ // We don't always chase the link field: TSOs on the blackhole
+ // queue are not automatically alive, so the link field is a
+ // "weak" pointer in that case.
+ if (tso->why_blocked != BlockedOnBlackHole) {
+ tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
+ }
+
// scavange current transaction record
tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
KH @ 25/10/99
*/
-static void
-evac_TSO_queue (evac_fn evac, StgTSO ** ptso)
-{
- StgTSO **pt;
-
- for (pt = ptso; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
- evac((StgClosure **)pt);
- }
-}
-
void
GetRoots( evac_fn evac )
{
markEventQueue();
#else /* !GRAN */
-
if (run_queue_hd != END_TSO_QUEUE) {
ASSERT(run_queue_tl != END_TSO_QUEUE);
- evac_TSO_queue(evac, &run_queue_hd);
+ evac((StgClosure **)&run_queue_hd);
evac((StgClosure **)&run_queue_tl);
}
if (blocked_queue_hd != END_TSO_QUEUE) {
ASSERT(blocked_queue_tl != END_TSO_QUEUE);
- evac_TSO_queue(evac, &blocked_queue_hd);
+ evac((StgClosure **)&blocked_queue_hd);
evac((StgClosure **)&blocked_queue_tl);
}
if (sleeping_queue != END_TSO_QUEUE) {
- evac_TSO_queue(evac, &blocked_queue_hd);
- evac((StgClosure **)&blocked_queue_tl);
+ evac((StgClosure **)&sleeping_queue);
}
#endif
- // Don't chase the blackhole_queue just yet, we treat it as "weak"
+ if (blackhole_queue != END_TSO_QUEUE) {
+ evac((StgClosure **)&blackhole_queue);
+ }
if (suspended_ccalling_threads != END_TSO_QUEUE) {
- evac_TSO_queue(evac, &suspended_ccalling_threads);
+ evac((StgClosure **)&suspended_ccalling_threads);
}
#if defined(PARALLEL_HASKELL) || defined(GRAN)