1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2008
5 * Weak pointers and weak-like things in the GC
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
12 * ---------------------------------------------------------------------------*/
14 #include "PosixSource.h"
26 /* -----------------------------------------------------------------------------
29 traverse_weak_ptr_list is called possibly many times during garbage
30 collection. It returns a flag indicating whether it did any work
31 (i.e. called evacuate on any live pointers).
33 Invariant: traverse_weak_ptr_list is called when the heap is in an
34 idempotent state. That means that there are no pending
35 evacuate/scavenge operations. This invariant helps the weak
36 pointer code decide which weak pointers are dead - if there are no
37 new live weak pointers, then all the currently unreachable ones are
40 For generational GC: we just don't try to finalize weak pointers in
41 older generations than the one we're collecting. This could
42 probably be optimised by keeping per-generation lists of weak
43 pointers, but for a few weak pointers this scheme will work.
45 There are three distinct stages to processing weak pointers:
47 - weak_stage == WeakPtrs
49 We process all the weak pointers whos keys are alive (evacuate
50 their values and finalizers), and repeat until we can find no new
51 live keys. If no live keys are found in this pass, then we
52 evacuate the finalizers of all the dead weak pointers in order to
55 - weak_stage == WeakThreads
57 Now, we discover which *threads* are still alive. Pointers to
58 threads from the all_threads and main thread lists are the
59 weakest of all: a pointers from the finalizer of a dead weak
60 pointer can keep a thread alive. Any threads found to be unreachable
61 are evacuated and placed on the resurrected_threads list so we
62 can send them a signal later.
64 - weak_stage == WeakDone
66 No more evacuation is done.
68 -------------------------------------------------------------------------- */
70 /* Which stage of processing various kinds of weak pointer are we at?
71 * (see traverse_weak_ptr_list() below for discussion).
73 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
74 static WeakStage weak_stage;
78 StgWeak *old_weak_ptr_list; // also pending finaliser list
80 // List of threads found to be unreachable
81 StgTSO *resurrected_threads;
83 // List of blocked threads found to have pending throwTos
84 StgTSO *exception_threads;
86 static void resurrectUnreachableThreads (step *stp);
87 static rtsBool tidyThreadList (step *stp);
92 old_weak_ptr_list = weak_ptr_list;
94 weak_stage = WeakPtrs;
95 resurrected_threads = END_TSO_QUEUE;
96 exception_threads = END_TSO_QUEUE;
100 traverseWeakPtrList(void)
102 StgWeak *w, **last_w, *next_w;
104 rtsBool flag = rtsFalse;
105 const StgInfoTable *info;
107 switch (weak_stage) {
113 /* doesn't matter where we evacuate values/finalizers to, since
114 * these pointers are treated as roots (iff the keys are alive).
118 last_w = &old_weak_ptr_list;
119 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
121 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
122 * called on a live weak pointer object. Just remove it.
124 if (w->header.info == &stg_DEAD_WEAK_info) {
125 next_w = ((StgDeadWeak *)w)->link;
130 info = w->header.info;
131 if (IS_FORWARDING_PTR(info)) {
132 next_w = (StgWeak *)UN_FORWARDING_PTR(info);
137 switch (INFO_PTR_TO_STRUCT(info)->type) {
140 /* Now, check whether the key is reachable.
142 new = isAlive(w->key);
145 // evacuate the value and finalizer
147 evacuate(&w->finalizer);
148 // remove this weak ptr from the old_weak_ptr list
150 // and put it on the new weak ptr list
152 w->link = weak_ptr_list;
156 debugTrace(DEBUG_weak,
157 "weak pointer still alive at %p -> %p",
168 barf("traverseWeakPtrList: not WEAK");
172 /* If we didn't make any changes, then we can go round and kill all
173 * the dead weak pointers. The old_weak_ptr list is used as a list
174 * of pending finalizers later on.
176 if (flag == rtsFalse) {
177 for (w = old_weak_ptr_list; w; w = w->link) {
178 evacuate(&w->finalizer);
181 // Next, move to the WeakThreads stage after fully
182 // scavenging the finalizers we've just evacuated.
183 weak_stage = WeakThreads;
189 /* Now deal with the step->threads lists, which behave somewhat like
190 * the weak ptr list. If we discover any threads that are about to
191 * become garbage, we wake them up and administer an exception.
196 // Traverse thread lists for generations we collected...
197 for (n = 0; n < n_capabilities; n++) {
198 if (tidyThreadList(&nurseries[n])) {
202 for (g = 0; g <= N; g++) {
203 for (s = 0; s < generations[g].n_steps; s++) {
204 if (tidyThreadList(&generations[g].steps[s])) {
210 /* If we evacuated any threads, we need to go back to the scavenger.
212 if (flag) return rtsTrue;
214 /* And resurrect any threads which were about to become garbage.
219 for (n = 0; n < n_capabilities; n++) {
220 resurrectUnreachableThreads(&nurseries[n]);
222 for (g = 0; g <= N; g++) {
223 for (s = 0; s < generations[g].n_steps; s++) {
224 resurrectUnreachableThreads(&generations[g].steps[s]);
229 /* Finally, we can update the blackhole_queue. This queue
230 * simply strings together TSOs blocked on black holes, it is
231 * not intended to keep anything alive. Hence, we do not follow
232 * pointers on the blackhole_queue until now, when we have
233 * determined which TSOs are otherwise reachable. We know at
234 * this point that all TSOs have been evacuated, however.
238 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->_link)) {
239 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
244 weak_stage = WeakDone; // *now* we're done,
245 return rtsTrue; // but one more round of scavenging, please
249 barf("traverse_weak_ptr_list");
254 static void resurrectUnreachableThreads (step *stp)
256 StgTSO *t, *tmp, *next;
258 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
259 next = t->global_link;
261 // ThreadFinished and ThreadComplete: we have to keep
262 // these on the all_threads list until they
263 // become garbage, because they might get
264 // pending exceptions.
265 switch (t->what_next) {
271 evacuate((StgClosure **)&tmp);
272 tmp->global_link = resurrected_threads;
273 resurrected_threads = tmp;
278 static rtsBool tidyThreadList (step *stp)
280 StgTSO *t, *tmp, *next, **prev;
281 rtsBool flag = rtsFalse;
283 prev = &stp->old_threads;
285 for (t = stp->old_threads; t != END_TSO_QUEUE; t = next) {
287 tmp = (StgTSO *)isAlive((StgClosure *)t);
293 ASSERT(get_itbl(t)->type == TSO);
294 if (t->what_next == ThreadRelocated) {
300 next = t->global_link;
302 // This is a good place to check for blocked
303 // exceptions. It might be the case that a thread is
304 // blocked on delivering an exception to a thread that
305 // is also blocked - we try to ensure that this
306 // doesn't happen in throwTo(), but it's too hard (or
307 // impossible) to close all the race holes, so we
308 // accept that some might get through and deal with
309 // them here. A GC will always happen at some point,
310 // even if the system is otherwise deadlocked.
312 // If an unreachable thread has blocked
313 // exceptions, we really want to perform the
314 // blocked exceptions rather than throwing
315 // BlockedIndefinitely exceptions. This is the
316 // only place we can discover such threads.
317 // The target thread might even be
318 // ThreadFinished or ThreadKilled. Bugs here
319 // will only be seen when running on a
321 if (t->blocked_exceptions != END_TSO_QUEUE) {
323 evacuate((StgClosure **)&t);
326 t->global_link = exception_threads;
327 exception_threads = t;
333 // not alive (yet): leave this thread on the
334 // old_all_threads list.
335 prev = &(t->global_link);
341 // move this thread onto the correct threads list.
343 new_step = Bdescr((P_)t)->step;
344 t->global_link = new_step->threads;
345 new_step->threads = t;
352 /* -----------------------------------------------------------------------------
355 Threads on this list behave like weak pointers during the normal
356 phase of garbage collection: if the blackhole is reachable, then
357 the thread is reachable too.
358 -------------------------------------------------------------------------- */
360 traverseBlackholeQueue (void)
362 StgTSO *prev, *t, *tmp;
369 for (t = blackhole_queue; t != END_TSO_QUEUE; prev=t, t = t->_link) {
370 // if the thread is not yet alive...
371 if (! (tmp = (StgTSO *)isAlive((StgClosure*)t))) {
372 // if the closure it is blocked on is either (a) a
373 // reachable BLAKCHOLE or (b) not a BLACKHOLE, then we
374 // make the thread alive.
375 if (!isAlive(t->block_info.closure)) {
376 type = get_itbl(t->block_info.closure)->type;
377 if (type == BLACKHOLE || type == CAF_BLACKHOLE) {
381 evacuate((StgClosure **)&t);
387 // no write barrier when on the blackhole queue,
388 // because we traverse the whole queue on every GC.
395 /* -----------------------------------------------------------------------------
396 After GC, the live weak pointer list may have forwarding pointers
397 on it, because a weak pointer object was evacuated after being
398 moved to the live weak pointer list. We remove those forwarding
401 Also, we don't consider weak pointer objects to be reachable, but
402 we must nevertheless consider them to be "live" and retain them.
403 Therefore any weak pointer objects which haven't as yet been
404 evacuated need to be evacuated now.
405 -------------------------------------------------------------------------- */
408 markWeakPtrList ( void )
410 StgWeak *w, **last_w, *tmp;
412 last_w = &weak_ptr_list;
413 for (w = weak_ptr_list; w; w = w->link) {
414 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
415 ASSERT(IS_FORWARDING_PTR(w->header.info)
416 || w->header.info == &stg_DEAD_WEAK_info
417 || get_itbl(w)->type == WEAK);
419 evacuate((StgClosure **)&tmp);