*/
StgWeak *old_weak_ptr_list; // also pending finaliser list
-/* List of all threads during GC
- */
+// List of threads found to be unreachable
StgTSO *resurrected_threads;
+// List of blocked threads found to have pending throwTos
+StgTSO *exception_threads;
+
void
initWeakForGC(void)
{
weak_ptr_list = NULL;
weak_stage = WeakPtrs;
resurrected_threads = END_TSO_QUEUE;
+ exception_threads = END_TSO_QUEUE;
}
rtsBool
StgWeak *w, **last_w, *next_w;
StgClosure *new;
rtsBool flag = rtsFalse;
+ const StgInfoTable *info;
switch (weak_stage) {
continue;
}
- switch (get_itbl(w)->type) {
-
- case EVACUATED:
- next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
+ info = w->header.info;
+ if (IS_FORWARDING_PTR(info)) {
+ next_w = (StgWeak *)UN_FORWARDING_PTR(info);
*last_w = next_w;
continue;
+ }
+
+ switch (INFO_PTR_TO_STRUCT(info)->type) {
case WEAK:
/* Now, check whether the key is reachable.
next = t->global_link;
}
else {
- step *new_step;
- // alive: move this thread onto the correct
- // threads list.
+ // alive
next = t->global_link;
- new_step = Bdescr((P_)t)->step;
- t->global_link = new_step->threads;
- new_step->threads = t;
*prev = next;
+
+ // This is a good place to check for blocked
+ // exceptions. It might be the case that a thread is
+ // blocked on delivering an exception to a thread that
+ // is also blocked - we try to ensure that this
+ // doesn't happen in throwTo(), but it's too hard (or
+ // impossible) to close all the race holes, so we
+ // accept that some might get through and deal with
+ // them here. A GC will always happen at some point,
+ // even if the system is otherwise deadlocked.
+ if (t->blocked_exceptions != END_TSO_QUEUE) {
+ t->global_link = exception_threads;
+ exception_threads = t;
+ } else {
+ // move this thread onto the correct threads list.
+ step *new_step;
+ new_step = Bdescr((P_)t)->step;
+ t->global_link = new_step->threads;
+ new_step->threads = t;
+ }
}
}
}
}
}
evacuate((StgClosure **)&t);
- if (prev) prev->_link = t;
+ if (prev) {
+ prev->_link = t;
+ } else {
+ blackhole_queue = t;
+ }
// no write barrier when on the blackhole queue,
// because we traverse the whole queue on every GC.
flag = rtsTrue;
last_w = &weak_ptr_list;
for (w = weak_ptr_list; w; w = w->link) {
// w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
- ASSERT(w->header.info == &stg_DEAD_WEAK_info
- || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
+ ASSERT(IS_FORWARDING_PTR(w->header.info)
+ || w->header.info == &stg_DEAD_WEAK_info
+ || get_itbl(w)->type == WEAK);
tmp = w;
evacuate((StgClosure **)&tmp);
*last_w = w;