From 4ab216140652b1ebdc011bba06f77cd05c614b91 Mon Sep 17 00:00:00 2001 From: simonmar Date: Thu, 5 May 2005 13:17:47 +0000 Subject: [PATCH] [project @ 2005-05-05 13:17:47 by simonmar] Some fixes to the blackhole garbage collection strategy. conc049 is a great test case. --- ghc/rts/GC.c | 25 +++++++++++++++++++++---- ghc/rts/Schedule.c | 24 +++++++----------------- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/ghc/rts/GC.c b/ghc/rts/GC.c index 0cef213..db05ef5 100644 --- a/ghc/rts/GC.c +++ b/ghc/rts/GC.c @@ -1266,6 +1266,16 @@ traverse_weak_ptr_list(void) ; } + // Threads blocked on black holes: if the black hole + // is alive, then the thread is alive too. + if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) { + if (isAlive(t->block_info.closure)) { + t = (StgTSO *)evacuate((StgClosure *)t); + tmp = t; + flag = rtsTrue; + } + } + if (tmp == NULL) { // not alive (yet): leave this thread on the // old_all_threads list. @@ -1282,6 +1292,10 @@ traverse_weak_ptr_list(void) } } + /* If we evacuated any threads, we need to go back to the scavenger. + */ + if (flag) return rtsTrue; + /* And resurrect any threads which were about to become garbage. */ { @@ -2371,10 +2385,6 @@ scavenge_fun_srt(const StgInfoTable *info) static void scavengeTSO (StgTSO *tso) { - // We don't chase the link field: TSOs on the blackhole queue are - // not automatically alive, so the link field is a "weak" pointer. - // Queues of TSOs are traversed explicitly. - if ( tso->why_blocked == BlockedOnMVar || tso->why_blocked == BlockedOnBlackHole || tso->why_blocked == BlockedOnException @@ -2390,6 +2400,13 @@ scavengeTSO (StgTSO *tso) (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions); } + // We don't always chase the link field: TSOs on the blackhole + // queue are not automatically alive, so the link field is a + // "weak" pointer in that case. + if (tso->why_blocked != BlockedOnBlackHole) { + tso->link = (StgTSO *)evacuate((StgClosure *)tso->link); + } + // scavange current transaction record tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec); diff --git a/ghc/rts/Schedule.c b/ghc/rts/Schedule.c index 83f2a8d..036c5b0 100644 --- a/ghc/rts/Schedule.c +++ b/ghc/rts/Schedule.c @@ -2745,16 +2745,6 @@ waitThread_(StgMainThread* m, Capability *initialCapability) KH @ 25/10/99 */ -static void -evac_TSO_queue (evac_fn evac, StgTSO ** ptso) -{ - StgTSO **pt; - - for (pt = ptso; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) { - evac((StgClosure **)pt); - } -} - void GetRoots( evac_fn evac ) { @@ -2779,29 +2769,29 @@ GetRoots( evac_fn evac ) markEventQueue(); #else /* !GRAN */ - if (run_queue_hd != END_TSO_QUEUE) { ASSERT(run_queue_tl != END_TSO_QUEUE); - evac_TSO_queue(evac, &run_queue_hd); + evac((StgClosure **)&run_queue_hd); evac((StgClosure **)&run_queue_tl); } if (blocked_queue_hd != END_TSO_QUEUE) { ASSERT(blocked_queue_tl != END_TSO_QUEUE); - evac_TSO_queue(evac, &blocked_queue_hd); + evac((StgClosure **)&blocked_queue_hd); evac((StgClosure **)&blocked_queue_tl); } if (sleeping_queue != END_TSO_QUEUE) { - evac_TSO_queue(evac, &blocked_queue_hd); - evac((StgClosure **)&blocked_queue_tl); + evac((StgClosure **)&sleeping_queue); } #endif - // Don't chase the blackhole_queue just yet, we treat it as "weak" + if (blackhole_queue != END_TSO_QUEUE) { + evac((StgClosure **)&blackhole_queue); + } if (suspended_ccalling_threads != END_TSO_QUEUE) { - evac_TSO_queue(evac, &suspended_ccalling_threads); + evac((StgClosure **)&suspended_ccalling_threads); } #if defined(PARALLEL_HASKELL) || defined(GRAN) -- 1.7.10.4