/* -----------------------------------------------------------------------------
*
- * (c) The GHC Team 1998-2006
+ * (c) The GHC Team 1998-2008
*
* Weak pointers and weak-like things in the GC
*
#include "Storage.h"
#include "MarkWeak.h"
#include "GC.h"
+#include "GCThread.h"
#include "Evac.h"
#include "Trace.h"
#include "Schedule.h"
*/
StgWeak *old_weak_ptr_list; // also pending finaliser list
-/* List of all threads during GC
- */
+// List of threads found to be unreachable
StgTSO *resurrected_threads;
-static StgTSO *old_all_threads;
+
+// List of blocked threads found to have pending throwTos
+StgTSO *exception_threads;
void
initWeakForGC(void)
old_weak_ptr_list = weak_ptr_list;
weak_ptr_list = NULL;
weak_stage = WeakPtrs;
-
- /* The all_threads list is like the weak_ptr_list.
- * See traverseWeakPtrList() for the details.
- */
- old_all_threads = all_threads;
- all_threads = END_TSO_QUEUE;
resurrected_threads = END_TSO_QUEUE;
+ exception_threads = END_TSO_QUEUE;
}
rtsBool
StgWeak *w, **last_w, *next_w;
StgClosure *new;
rtsBool flag = rtsFalse;
+ const StgInfoTable *info;
switch (weak_stage) {
/* doesn't matter where we evacuate values/finalizers to, since
* these pointers are treated as roots (iff the keys are alive).
*/
- evac_gen = 0;
+ gct->evac_step = 0;
last_w = &old_weak_ptr_list;
for (w = old_weak_ptr_list; w != NULL; w = next_w) {
continue;
}
- switch (get_itbl(w)->type) {
-
- case EVACUATED:
- next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ info = w->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ next_w = (StgWeak *)UN_FORWARDING_PTR(info);
*last_w = next_w;
continue;
+ }
+
+ switch (INFO_PTR_TO_STRUCT(info)->type) {
case WEAK:
/* Now, check whether the key is reachable.
if (new != NULL) {
w->key = new;
// evacuate the value and finalizer
- w->value = evacuate(w->value);
- w->finalizer = evacuate(w->finalizer);
+ evacuate(&w->value);
+ evacuate(&w->finalizer);
// remove this weak ptr from the old_weak_ptr list
*last_w = w->link;
// and put it on the new weak ptr list
*/
if (flag == rtsFalse) {
for (w = old_weak_ptr_list; w; w = w->link) {
- w->finalizer = evacuate(w->finalizer);
+ evacuate(&w->finalizer);
}
// Next, move to the WeakThreads stage after fully
* the weak ptr list. If we discover any threads that are about to
* become garbage, we wake them up and administer an exception.
*/
- {
- StgTSO *t, *tmp, *next, **prev;
+ {
+ StgTSO *t, *tmp, *next, **prev;
+ nat g, s;
+ step *stp;
- prev = &old_all_threads;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
+ // Traverse thread lists for generations we collected...
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+
+ prev = &stp->old_threads;
+
+ for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
- tmp = (StgTSO *)isAlive((StgClosure *)t);
+ tmp = (StgTSO *)isAlive((StgClosure *)t);
- if (tmp != NULL) {
- t = tmp;
- }
-
- ASSERT(get_itbl(t)->type == TSO);
- switch (t->what_next) {
- case ThreadRelocated:
- next = t->link;
- *prev = next;
- continue;
- case ThreadKilled:
- case ThreadComplete:
- // finshed or died. The thread might still be alive, but we
- // don't keep it on the all_threads list. Don't forget to
- // stub out its global_link field.
- next = t->global_link;
- t->global_link = END_TSO_QUEUE;
- *prev = next;
- continue;
- default:
- ;
- }
-
- if (tmp == NULL) {
- // not alive (yet): leave this thread on the
- // old_all_threads list.
- prev = &(t->global_link);
- next = t->global_link;
- }
- else {
- // alive: move this thread onto the all_threads list.
- next = t->global_link;
- t->global_link = all_threads;
- all_threads = t;
- *prev = next;
- }
- }
+ if (tmp != NULL) {
+ t = tmp;
+ }
+
+ ASSERT(get_itbl(t)->type == TSO);
+ if (t->what_next == ThreadRelocated) {
+ next = t->_link;
+ *prev = next;
+ continue;
+ }
+
+ next = t->global_link;
+
+ // This is a good place to check for blocked
+ // exceptions. It might be the case that a thread is
+ // blocked on delivering an exception to a thread that
+ // is also blocked - we try to ensure that this
+ // doesn't happen in throwTo(), but it's too hard (or
+ // impossible) to close all the race holes, so we
+ // accept that some might get through and deal with
+ // them here. A GC will always happen at some point,
+ // even if the system is otherwise deadlocked.
+ //
+ // If an unreachable thread has blocked
+ // exceptions, we really want to perform the
+ // blocked exceptions rather than throwing
+ // BlockedIndefinitely exceptions. This is the
+ // only place we can discover such threads.
+ // The target thread might even be
+ // ThreadFinished or ThreadKilled. Bugs here
+ // will only be seen when running on a
+ // multiprocessor.
+ if (t->blocked_exceptions != END_TSO_QUEUE) {
+ if (tmp == NULL) {
+ evacuate((StgClosure **)&t);
+ flag = rtsTrue;
+ }
+ t->global_link = exception_threads;
+ exception_threads = t;
+ *prev = next;
+ continue;
+ }
+
+ if (tmp == NULL) {
+ // not alive (yet): leave this thread on the
+ // old_all_threads list.
+ prev = &(t->global_link);
+ }
+ else {
+ // alive
+ *prev = next;
+
+ // move this thread onto the correct threads list.
+ step *new_step;
+ new_step = Bdescr((P_)t)->step;
+ t->global_link = new_step->threads;
+ new_step->threads = t;
+ }
+ }
+ }
+ }
}
-
+
/* If we evacuated any threads, we need to go back to the scavenger.
*/
if (flag) return rtsTrue;
/* And resurrect any threads which were about to become garbage.
*/
{
+ nat g, s;
+ step *stp;
StgTSO *t, *tmp, *next;
- for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
- next = t->global_link;
- tmp = (StgTSO *)evacuate((StgClosure *)t);
- tmp->global_link = resurrected_threads;
- resurrected_threads = tmp;
- }
+
+ for (g = 0; g <= N; g++) {
+ for (s = 0; s < generations[g].n_steps; s++) {
+ stp = &generations[g].steps[s];
+
+ for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
+ next = t->global_link;
+
+ // ThreadFinished and ThreadComplete: we have to keep
+ // these on the all_threads list until they
+ // become garbage, because they might get
+ // pending exceptions.
+ switch (t->what_next) {
+ case ThreadKilled:
+ case ThreadComplete:
+ continue;
+ default:
+ tmp = t;
+ evacuate((StgClosure **)&tmp);
+ tmp->global_link = resurrected_threads;
+ resurrected_threads = tmp;
+ }
+ }
+ }
+ }
}
/* Finally, we can update the blackhole_queue. This queue
*/
{
StgTSO **pt;
- for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
+ for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
*pt = (StgTSO *)isAlive((StgClosure *)*pt);
ASSERT(*pt != NULL);
}
{
StgTSO *prev, *t, *tmp;
rtsBool flag;
+ nat type;
flag = rtsFalse;
prev = NULL;
- for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->link) {
+ for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
+ // if the thread is not yet alive...
if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
- if (isAlive(t->block_info.closure)) {
- t = (StgTSO *)evacuate((StgClosure *)t);
- if (prev) prev->link = t;
- flag = rtsTrue;
- }
+ // if the closure it is blocked on is either (a) a
+ // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
+ // make the thread alive.
+ if (!isAlive(t->block_info.closure)) {
+ type = get_itbl(t->block_info.closure)->type;
+ if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
+ continue;
+ }
+ }
+ evacuate((StgClosure **)&t);
+ if (prev) {
+ prev->_link = t;
+ } else {
+ blackhole_queue = t;
+ }
+ // no write barrier when on the blackhole queue,
+ // because we traverse the whole queue on every GC.
+ flag = rtsTrue;
}
}
return flag;
void
markWeakPtrList ( void )
{
- StgWeak *w, **last_w;
+ StgWeak *w, **last_w, *tmp;
last_w = &weak_ptr_list;
for (w = weak_ptr_list; w; w = w->link) {
// w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
- ASSERT(w->header.info == &stg_DEAD_WEAK_info
- || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
- w = (StgWeak *)evacuate((StgClosure *)w);
+ ASSERT(IS_FORWARDING_PTR(w->header.info)
+ || w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK);
+ tmp = w;
+ evacuate((StgClosure **)&tmp);
*last_w = w;
last_w = &(w->link);
}