X-Git-Url: http://git.megacz.com/?a=blobdiff_plain;f=rts%2Fsm%2FMarkWeak.c;h=9d8e8c01a9bf49955361d7e2c4df2b292ca95df8;hb=214b3663d5d7598c13643f9221e43d5a7735b47f;hp=78c84ed77aaa8da0a4a13c15f7e5cdf66c855064;hpb=c151739a1831546baa9c3f4a55f93ffc0b38bf9d;p=ghc-hetmet.git diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c index 78c84ed..9d8e8c0 100644 --- a/rts/sm/MarkWeak.c +++ b/rts/sm/MarkWeak.c @@ -11,14 +11,17 @@ * * ---------------------------------------------------------------------------*/ +#include "PosixSource.h" #include "Rts.h" -#include "Storage.h" + #include "MarkWeak.h" #include "GC.h" #include "GCThread.h" #include "Evac.h" #include "Trace.h" #include "Schedule.h" +#include "Weak.h" +#include "Storage.h" /* ----------------------------------------------------------------------------- Weak Pointers @@ -80,6 +83,9 @@ StgTSO *resurrected_threads; // List of blocked threads found to have pending throwTos StgTSO *exception_threads; +static void resurrectUnreachableThreads (generation *gen); +static rtsBool tidyThreadList (generation *gen); + void initWeakForGC(void) { @@ -107,7 +113,7 @@ traverseWeakPtrList(void) /* doesn't matter where we evacuate values/finalizers to, since * these pointers are treated as roots (iff the keys are alive). */ - gct->evac_step = 0; + gct->evac_gen = 0; last_w = &old_weak_ptr_list; for (w = old_weak_ptr_list; w != NULL; w = next_w) { @@ -180,86 +186,23 @@ traverseWeakPtrList(void) return rtsTrue; case WeakThreads: - /* Now deal with the all_threads list, which behaves somewhat like + /* Now deal with the step->threads lists, which behave somewhat like * the weak ptr list. If we discover any threads that are about to * become garbage, we wake them up and administer an exception. */ - { - StgTSO *t, *tmp, *next, **prev; - nat g, s; - step *stp; + { + nat g; - // Traverse thread lists for generations we collected... - for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - stp = &generations[g].steps[s]; - - prev = &stp->old_threads; - - for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) { - - tmp = (StgTSO *)isAlive((StgClosure *)t); - - if (tmp != NULL) { - t = tmp; - } - - ASSERT(get_itbl(t)->type == TSO); - if (t->what_next == ThreadRelocated) { - next = t->_link; - *prev = next; - continue; - } - - next = t->global_link; - - // This is a good place to check for blocked - // exceptions. It might be the case that a thread is - // blocked on delivering an exception to a thread that - // is also blocked - we try to ensure that this - // doesn't happen in throwTo(), but it's too hard (or - // impossible) to close all the race holes, so we - // accept that some might get through and deal with - // them here. A GC will always happen at some point, - // even if the system is otherwise deadlocked. - // - // If an unreachable thread has blocked - // exceptions, we really want to perform the - // blocked exceptions rather than throwing - // BlockedIndefinitely exceptions. This is the - // only place we can discover such threads. - // The target thread might even be - // ThreadFinished or ThreadKilled. Bugs here - // will only be seen when running on a - // multiprocessor. - if (t->blocked_exceptions != END_TSO_QUEUE) { - if (tmp == NULL) { - evacuate((StgClosure **)&t); - flag = rtsTrue; - } - t->global_link = exception_threads; - exception_threads = t; - *prev = next; - continue; - } - - if (tmp == NULL) { - // not alive (yet): leave this thread on the - // old_all_threads list. - prev = &(t->global_link); - } - else { - // alive - *prev = next; - - // move this thread onto the correct threads list. - step *new_step; - new_step = Bdescr((P_)t)->step; - t->global_link = new_step->threads; - new_step->threads = t; - } - } - } + // Traverse thread lists for generations we collected... +// ToDo when we have one gen per capability: +// for (n = 0; n < n_capabilities; n++) { +// if (tidyThreadList(&nurseries[n])) { +// flag = rtsTrue; +// } +// } + for (g = 0; g <= N; g++) { + if (tidyThreadList(&generations[g])) { + flag = rtsTrue; } } @@ -270,36 +213,12 @@ traverseWeakPtrList(void) /* And resurrect any threads which were about to become garbage. */ { - nat g, s; - step *stp; - StgTSO *t, *tmp, *next; - + nat g; for (g = 0; g <= N; g++) { - for (s = 0; s < generations[g].n_steps; s++) { - stp = &generations[g].steps[s]; - - for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) { - next = t->global_link; - - // ThreadFinished and ThreadComplete: we have to keep - // these on the all_threads list until they - // become garbage, because they might get - // pending exceptions. - switch (t->what_next) { - case ThreadKilled: - case ThreadComplete: - continue; - default: - tmp = t; - evacuate((StgClosure **)&tmp); - tmp->global_link = resurrected_threads; - resurrected_threads = tmp; - } - } - } + resurrectUnreachableThreads(&generations[g]); } } - + /* Finally, we can update the blackhole_queue. This queue * simply strings together TSOs blocked on black holes, it is * not intended to keep anything alive. Hence, we do not follow @@ -314,15 +233,113 @@ traverseWeakPtrList(void) ASSERT(*pt != NULL); } } - + weak_stage = WeakDone; // *now* we're done, return rtsTrue; // but one more round of scavenging, please - + } + default: barf("traverse_weak_ptr_list"); return rtsTrue; } +} + + static void resurrectUnreachableThreads (generation *gen) +{ + StgTSO *t, *tmp, *next; + + for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) { + next = t->global_link; + + // ThreadFinished and ThreadComplete: we have to keep + // these on the all_threads list until they + // become garbage, because they might get + // pending exceptions. + switch (t->what_next) { + case ThreadKilled: + case ThreadComplete: + continue; + default: + tmp = t; + evacuate((StgClosure **)&tmp); + tmp->global_link = resurrected_threads; + resurrected_threads = tmp; + } + } +} + +static rtsBool tidyThreadList (generation *gen) +{ + StgTSO *t, *tmp, *next, **prev; + rtsBool flag = rtsFalse; + + prev = &gen->old_threads; + for (t = gen->old_threads; t != END_TSO_QUEUE; t = next) { + + tmp = (StgTSO *)isAlive((StgClosure *)t); + + if (tmp != NULL) { + t = tmp; + } + + ASSERT(get_itbl(t)->type == TSO); + if (t->what_next == ThreadRelocated) { + next = t->_link; + *prev = next; + continue; + } + + next = t->global_link; + + // This is a good place to check for blocked + // exceptions. It might be the case that a thread is + // blocked on delivering an exception to a thread that + // is also blocked - we try to ensure that this + // doesn't happen in throwTo(), but it's too hard (or + // impossible) to close all the race holes, so we + // accept that some might get through and deal with + // them here. A GC will always happen at some point, + // even if the system is otherwise deadlocked. + // + // If an unreachable thread has blocked + // exceptions, we really want to perform the + // blocked exceptions rather than throwing + // BlockedIndefinitely exceptions. This is the + // only place we can discover such threads. + // The target thread might even be + // ThreadFinished or ThreadKilled. Bugs here + // will only be seen when running on a + // multiprocessor. + if (t->blocked_exceptions != END_TSO_QUEUE) { + if (tmp == NULL) { + evacuate((StgClosure **)&t); + flag = rtsTrue; + } + t->global_link = exception_threads; + exception_threads = t; + *prev = next; + continue; + } + + if (tmp == NULL) { + // not alive (yet): leave this thread on the + // old_all_threads list. + prev = &(t->global_link); + } + else { + // alive + *prev = next; + + // move this thread onto the correct threads list. + generation *new_gen; + new_gen = Bdescr((P_)t)->gen; + t->global_link = new_gen->threads; + new_gen->threads = t; + } + } + + return flag; } /* -----------------------------------------------------------------------------