1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.74 2000/03/17 13:30:24 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
58 //@node STATIC OBJECT LIST, Static function declarations, Includes
59 //@subsection STATIC OBJECT LIST
61 /* STATIC OBJECT LIST.
64 * We maintain a linked list of static objects that are still live.
65 * The requirements for this list are:
67 * - we need to scan the list while adding to it, in order to
68 * scavenge all the static objects (in the same way that
69 * breadth-first scavenging works for dynamic objects).
71 * - we need to be able to tell whether an object is already on
72 * the list, to break loops.
74 * Each static object has a "static link field", which we use for
75 * linking objects on to the list. We use a stack-type list, consing
76 * objects on the front as they are added (this means that the
77 * scavenge phase is depth-first, not breadth-first, but that
80 * A separate list is kept for objects that have been scavenged
81 * already - this is so that we can zero all the marks afterwards.
83 * An object is on the list if its static link field is non-zero; this
84 * means that we have to mark the end of the list with '1', not NULL.
86 * Extra notes for generational GC:
88 * Each generation has a static object list associated with it. When
89 * collecting generations up to N, we treat the static object lists
90 * from generations > N as roots.
92 * We build up a static object list while collecting generations 0..N,
93 * which is then appended to the static object list of generation N+1.
95 StgClosure* static_objects; /* live static objects */
96 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
98 /* N is the oldest generation being collected, where the generations
99 * are numbered starting at 0. A major GC (indicated by the major_gc
100 * flag) is when we're collecting all generations. We only attempt to
101 * deal with static objects and GC CAFs when doing a major GC.
104 static rtsBool major_gc;
106 /* Youngest generation that objects should be evacuated to in
107 * evacuate(). (Logically an argument to evacuate, but it's static
108 * a lot of the time so we optimise it into a global variable).
114 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
115 static rtsBool weak_done; /* all done for this pass */
117 /* List of all threads during GC
119 static StgTSO *old_all_threads;
120 static StgTSO *resurrected_threads;
122 /* Flag indicating failure to evacuate an object to the desired
125 static rtsBool failed_to_evac;
127 /* Old to-space (used for two-space collector only)
129 bdescr *old_to_space;
132 /* Data used for allocation area sizing.
134 lnat new_blocks; /* blocks allocated during this GC */
135 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
137 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
138 //@subsection Static function declarations
140 /* -----------------------------------------------------------------------------
141 Static function declarations
142 -------------------------------------------------------------------------- */
144 static StgClosure * evacuate ( StgClosure *q );
145 static void zero_static_object_list ( StgClosure* first_static );
146 static void zero_mutable_list ( StgMutClosure *first );
147 static void revert_dead_CAFs ( void );
149 static rtsBool traverse_weak_ptr_list ( void );
150 static void cleanup_weak_ptr_list ( StgWeak **list );
152 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
153 static void scavenge_large ( step *step );
154 static void scavenge ( step *step );
155 static void scavenge_static ( void );
156 static void scavenge_mutable_list ( generation *g );
157 static void scavenge_mut_once_list ( generation *g );
160 static void gcCAFs ( void );
163 //@node Garbage Collect, Weak Pointers, Static function declarations
164 //@subsection Garbage Collect
166 /* -----------------------------------------------------------------------------
169 For garbage collecting generation N (and all younger generations):
171 - follow all pointers in the root set. the root set includes all
172 mutable objects in all steps in all generations.
174 - for each pointer, evacuate the object it points to into either
175 + to-space in the next higher step in that generation, if one exists,
176 + if the object's generation == N, then evacuate it to the next
177 generation if one exists, or else to-space in the current
179 + if the object's generation < N, then evacuate it to to-space
180 in the next generation.
182 - repeatedly scavenge to-space from each step in each generation
183 being collected until no more objects can be evacuated.
185 - free from-space in each step, and set from-space = to-space.
187 -------------------------------------------------------------------------- */
188 //@cindex GarbageCollect
190 void GarbageCollect(void (*get_roots)(void))
194 lnat live, allocated, collected = 0, copied = 0;
198 CostCentreStack *prev_CCS;
201 #if defined(DEBUG) && defined(GRAN)
202 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
206 /* tell the stats department that we've started a GC */
209 /* attribute any costs to CCS_GC */
215 /* Approximate how much we allocated */
216 allocated = calcAllocated();
218 /* Figure out which generation to collect
221 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
222 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
226 major_gc = (N == RtsFlags.GcFlags.generations-1);
228 /* check stack sanity *before* GC (ToDo: check all threads) */
230 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
232 IF_DEBUG(sanity, checkFreeListSanity());
234 /* Initialise the static object lists
236 static_objects = END_OF_STATIC_LIST;
237 scavenged_static_objects = END_OF_STATIC_LIST;
239 /* zero the mutable list for the oldest generation (see comment by
240 * zero_mutable_list below).
243 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
246 /* Save the old to-space if we're doing a two-space collection
248 if (RtsFlags.GcFlags.generations == 1) {
249 old_to_space = g0s0->to_space;
250 g0s0->to_space = NULL;
253 /* Keep a count of how many new blocks we allocated during this GC
254 * (used for resizing the allocation area, later).
258 /* Initialise to-space in all the generations/steps that we're
261 for (g = 0; g <= N; g++) {
262 generations[g].mut_once_list = END_MUT_LIST;
263 generations[g].mut_list = END_MUT_LIST;
265 for (s = 0; s < generations[g].n_steps; s++) {
267 /* generation 0, step 0 doesn't need to-space */
268 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
272 /* Get a free block for to-space. Extra blocks will be chained on
276 step = &generations[g].steps[s];
277 ASSERT(step->gen->no == g);
278 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
279 bd->gen = &generations[g];
282 bd->evacuated = 1; /* it's a to-space block */
283 step->hp = bd->start;
284 step->hpLim = step->hp + BLOCK_SIZE_W;
288 step->scan = bd->start;
290 step->new_large_objects = NULL;
291 step->scavenged_large_objects = NULL;
293 /* mark the large objects as not evacuated yet */
294 for (bd = step->large_objects; bd; bd = bd->link) {
300 /* make sure the older generations have at least one block to
301 * allocate into (this makes things easier for copy(), see below.
303 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
304 for (s = 0; s < generations[g].n_steps; s++) {
305 step = &generations[g].steps[s];
306 if (step->hp_bd == NULL) {
308 bd->gen = &generations[g];
311 bd->evacuated = 0; /* *not* a to-space block */
312 step->hp = bd->start;
313 step->hpLim = step->hp + BLOCK_SIZE_W;
319 /* Set the scan pointer for older generations: remember we
320 * still have to scavenge objects that have been promoted. */
321 step->scan = step->hp;
322 step->scan_bd = step->hp_bd;
323 step->to_space = NULL;
325 step->new_large_objects = NULL;
326 step->scavenged_large_objects = NULL;
330 /* -----------------------------------------------------------------------
331 * follow all the roots that we know about:
332 * - mutable lists from each generation > N
333 * we want to *scavenge* these roots, not evacuate them: they're not
334 * going to move in this GC.
335 * Also: do them in reverse generation order. This is because we
336 * often want to promote objects that are pointed to by older
337 * generations early, so we don't have to repeatedly copy them.
338 * Doing the generations in reverse order ensures that we don't end
339 * up in the situation where we want to evac an object to gen 3 and
340 * it has already been evaced to gen 2.
344 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
345 generations[g].saved_mut_list = generations[g].mut_list;
346 generations[g].mut_list = END_MUT_LIST;
349 /* Do the mut-once lists first */
350 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
351 IF_PAR_DEBUG(verbose,
352 printMutOnceList(&generations[g]));
353 scavenge_mut_once_list(&generations[g]);
355 for (st = generations[g].n_steps-1; st >= 0; st--) {
356 scavenge(&generations[g].steps[st]);
360 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
361 IF_PAR_DEBUG(verbose,
362 printMutableList(&generations[g]));
363 scavenge_mutable_list(&generations[g]);
365 for (st = generations[g].n_steps-1; st >= 0; st--) {
366 scavenge(&generations[g].steps[st]);
371 /* follow all the roots that the application knows about.
377 /* And don't forget to mark the TSO if we got here direct from
379 /* Not needed in a seq version?
381 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
385 /* Mark the entries in the GALA table of the parallel system */
386 markLocalGAs(major_gc);
389 /* Mark the weak pointer list, and prepare to detect dead weak
392 old_weak_ptr_list = weak_ptr_list;
393 weak_ptr_list = NULL;
394 weak_done = rtsFalse;
396 /* The all_threads list is like the weak_ptr_list.
397 * See traverse_weak_ptr_list() for the details.
399 old_all_threads = all_threads;
400 all_threads = END_TSO_QUEUE;
401 resurrected_threads = END_TSO_QUEUE;
403 /* Mark the stable pointer table.
405 markStablePtrTable(major_gc);
409 /* ToDo: To fix the caf leak, we need to make the commented out
410 * parts of this code do something sensible - as described in
413 extern void markHugsObjects(void);
418 /* -------------------------------------------------------------------------
419 * Repeatedly scavenge all the areas we know about until there's no
420 * more scavenging to be done.
427 /* scavenge static objects */
428 if (major_gc && static_objects != END_OF_STATIC_LIST) {
432 /* When scavenging the older generations: Objects may have been
433 * evacuated from generations <= N into older generations, and we
434 * need to scavenge these objects. We're going to try to ensure that
435 * any evacuations that occur move the objects into at least the
436 * same generation as the object being scavenged, otherwise we
437 * have to create new entries on the mutable list for the older
441 /* scavenge each step in generations 0..maxgen */
445 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
446 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
447 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
450 step = &generations[gen].steps[st];
452 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
457 if (step->new_large_objects != NULL) {
458 scavenge_large(step);
465 if (flag) { goto loop; }
467 /* must be last... */
468 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
473 /* Final traversal of the weak pointer list (see comment by
474 * cleanUpWeakPtrList below).
476 cleanup_weak_ptr_list(&weak_ptr_list);
478 /* Now see which stable names are still alive.
480 gcStablePtrTable(major_gc);
482 /* revert dead CAFs and update enteredCAFs list */
485 /* Set the maximum blocks for the oldest generation, based on twice
486 * the amount of live data now, adjusted to fit the maximum heap
489 * This is an approximation, since in the worst case we'll need
490 * twice the amount of live data plus whatever space the other
493 if (RtsFlags.GcFlags.generations > 1) {
495 oldest_gen->max_blocks =
496 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
497 RtsFlags.GcFlags.minOldGenSize);
498 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
499 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
500 if (((int)oldest_gen->max_blocks -
501 (int)oldest_gen->steps[0].to_blocks) <
502 (RtsFlags.GcFlags.pcFreeHeap *
503 RtsFlags.GcFlags.maxHeapSize / 200)) {
510 /* run through all the generations/steps and tidy up
512 copied = new_blocks * BLOCK_SIZE_W;
513 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
516 generations[g].collections++; /* for stats */
519 for (s = 0; s < generations[g].n_steps; s++) {
521 step = &generations[g].steps[s];
523 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
524 /* Tidy the end of the to-space chains */
525 step->hp_bd->free = step->hp;
526 step->hp_bd->link = NULL;
527 /* stats information: how much we copied */
529 copied -= step->hp_bd->start + BLOCK_SIZE_W -
534 /* for generations we collected... */
537 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
539 /* free old memory and shift to-space into from-space for all
540 * the collected steps (except the allocation area). These
541 * freed blocks will probaby be quickly recycled.
543 if (!(g == 0 && s == 0)) {
544 freeChain(step->blocks);
545 step->blocks = step->to_space;
546 step->n_blocks = step->to_blocks;
547 step->to_space = NULL;
549 for (bd = step->blocks; bd != NULL; bd = bd->link) {
550 bd->evacuated = 0; /* now from-space */
554 /* LARGE OBJECTS. The current live large objects are chained on
555 * scavenged_large, having been moved during garbage
556 * collection from large_objects. Any objects left on
557 * large_objects list are therefore dead, so we free them here.
559 for (bd = step->large_objects; bd != NULL; bd = next) {
564 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
567 step->large_objects = step->scavenged_large_objects;
569 /* Set the maximum blocks for this generation, interpolating
570 * between the maximum size of the oldest and youngest
573 * max_blocks = oldgen_max_blocks * G
574 * ----------------------
579 generations[g].max_blocks = (oldest_gen->max_blocks * g)
580 / (RtsFlags.GcFlags.generations-1);
582 generations[g].max_blocks = oldest_gen->max_blocks;
585 /* for older generations... */
588 /* For older generations, we need to append the
589 * scavenged_large_object list (i.e. large objects that have been
590 * promoted during this GC) to the large_object list for that step.
592 for (bd = step->scavenged_large_objects; bd; bd = next) {
595 dbl_link_onto(bd, &step->large_objects);
598 /* add the new blocks we promoted during this GC */
599 step->n_blocks += step->to_blocks;
604 /* Guess the amount of live data for stats. */
607 /* Free the small objects allocated via allocate(), since this will
608 * all have been copied into G0S1 now.
610 if (small_alloc_list != NULL) {
611 freeChain(small_alloc_list);
613 small_alloc_list = NULL;
617 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
619 /* Two-space collector:
620 * Free the old to-space, and estimate the amount of live data.
622 if (RtsFlags.GcFlags.generations == 1) {
625 if (old_to_space != NULL) {
626 freeChain(old_to_space);
628 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
629 bd->evacuated = 0; /* now from-space */
632 /* For a two-space collector, we need to resize the nursery. */
634 /* set up a new nursery. Allocate a nursery size based on a
635 * function of the amount of live data (currently a factor of 2,
636 * should be configurable (ToDo)). Use the blocks from the old
637 * nursery if possible, freeing up any left over blocks.
639 * If we get near the maximum heap size, then adjust our nursery
640 * size accordingly. If the nursery is the same size as the live
641 * data (L), then we need 3L bytes. We can reduce the size of the
642 * nursery to bring the required memory down near 2L bytes.
644 * A normal 2-space collector would need 4L bytes to give the same
645 * performance we get from 3L bytes, reducing to the same
646 * performance at 2L bytes.
648 blocks = g0s0->to_blocks;
650 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
651 RtsFlags.GcFlags.maxHeapSize ) {
652 int adjusted_blocks; /* signed on purpose */
655 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
656 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
657 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
658 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
661 blocks = adjusted_blocks;
664 blocks *= RtsFlags.GcFlags.oldGenFactor;
665 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
666 blocks = RtsFlags.GcFlags.minAllocAreaSize;
669 resizeNursery(blocks);
672 /* Generational collector:
673 * If the user has given us a suggested heap size, adjust our
674 * allocation area to make best use of the memory available.
677 if (RtsFlags.GcFlags.heapSizeSuggestion) {
679 nat needed = calcNeeded(); /* approx blocks needed at next GC */
681 /* Guess how much will be live in generation 0 step 0 next time.
682 * A good approximation is the obtained by finding the
683 * percentage of g0s0 that was live at the last minor GC.
686 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
689 /* Estimate a size for the allocation area based on the
690 * information available. We might end up going slightly under
691 * or over the suggested heap size, but we should be pretty
694 * Formula: suggested - needed
695 * ----------------------------
696 * 1 + g0s0_pcnt_kept/100
698 * where 'needed' is the amount of memory needed at the next
699 * collection for collecting all steps except g0s0.
702 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
703 (100 + (int)g0s0_pcnt_kept);
705 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
706 blocks = RtsFlags.GcFlags.minAllocAreaSize;
709 resizeNursery((nat)blocks);
713 /* mark the garbage collected CAFs as dead */
715 if (major_gc) { gcCAFs(); }
718 /* zero the scavenged static object list */
720 zero_static_object_list(scavenged_static_objects);
728 /* Reconstruct the Global Address tables used in GUM */
729 RebuildGAtables(major_gc);
732 /* start any pending finalizers */
733 scheduleFinalizers(old_weak_ptr_list);
735 /* send exceptions to any threads which were about to die */
736 resurrectThreads(resurrected_threads);
738 /* check sanity after GC */
739 IF_DEBUG(sanity, checkSanity(N));
741 /* extra GC trace info */
742 IF_DEBUG(gc, stat_describe_gens());
745 /* symbol-table based profiling */
746 /* heapCensus(to_space); */ /* ToDo */
749 /* restore enclosing cost centre */
755 /* check for memory leaks if sanity checking is on */
756 IF_DEBUG(sanity, memInventory());
758 /* ok, GC over: tell the stats department what happened. */
759 stat_endGC(allocated, collected, live, copied, N);
762 //@node Weak Pointers, Evacuation, Garbage Collect
763 //@subsection Weak Pointers
765 /* -----------------------------------------------------------------------------
768 traverse_weak_ptr_list is called possibly many times during garbage
769 collection. It returns a flag indicating whether it did any work
770 (i.e. called evacuate on any live pointers).
772 Invariant: traverse_weak_ptr_list is called when the heap is in an
773 idempotent state. That means that there are no pending
774 evacuate/scavenge operations. This invariant helps the weak
775 pointer code decide which weak pointers are dead - if there are no
776 new live weak pointers, then all the currently unreachable ones are
779 For generational GC: we just don't try to finalize weak pointers in
780 older generations than the one we're collecting. This could
781 probably be optimised by keeping per-generation lists of weak
782 pointers, but for a few weak pointers this scheme will work.
783 -------------------------------------------------------------------------- */
784 //@cindex traverse_weak_ptr_list
787 traverse_weak_ptr_list(void)
789 StgWeak *w, **last_w, *next_w;
791 rtsBool flag = rtsFalse;
793 if (weak_done) { return rtsFalse; }
795 /* doesn't matter where we evacuate values/finalizers to, since
796 * these pointers are treated as roots (iff the keys are alive).
800 last_w = &old_weak_ptr_list;
801 for (w = old_weak_ptr_list; w; w = next_w) {
803 /* First, this weak pointer might have been evacuated. If so,
804 * remove the forwarding pointer from the weak_ptr_list.
806 if (get_itbl(w)->type == EVACUATED) {
807 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
811 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
812 * called on a live weak pointer object. Just remove it.
814 if (w->header.info == &DEAD_WEAK_info) {
815 next_w = ((StgDeadWeak *)w)->link;
820 ASSERT(get_itbl(w)->type == WEAK);
822 /* Now, check whether the key is reachable.
824 if ((new = isAlive(w->key))) {
826 /* evacuate the value and finalizer */
827 w->value = evacuate(w->value);
828 w->finalizer = evacuate(w->finalizer);
829 /* remove this weak ptr from the old_weak_ptr list */
831 /* and put it on the new weak ptr list */
833 w->link = weak_ptr_list;
836 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
846 /* Now deal with the all_threads list, which behaves somewhat like
847 * the weak ptr list. If we discover any threads that are about to
848 * become garbage, we wake them up and administer an exception.
851 StgTSO *t, *tmp, *next, **prev;
853 prev = &old_all_threads;
854 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
856 /* Threads which have finished or died get dropped from
859 switch (t->what_next) {
862 next = t->global_link;
868 /* Threads which have already been determined to be alive are
869 * moved onto the all_threads list.
871 (StgClosure *)tmp = isAlive((StgClosure *)t);
873 next = tmp->global_link;
874 tmp->global_link = all_threads;
878 prev = &(t->global_link);
879 next = t->global_link;
884 /* If we didn't make any changes, then we can go round and kill all
885 * the dead weak pointers. The old_weak_ptr list is used as a list
886 * of pending finalizers later on.
888 if (flag == rtsFalse) {
889 cleanup_weak_ptr_list(&old_weak_ptr_list);
890 for (w = old_weak_ptr_list; w; w = w->link) {
891 w->finalizer = evacuate(w->finalizer);
894 /* And resurrect any threads which were about to become garbage.
897 StgTSO *t, *tmp, *next;
898 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
899 next = t->global_link;
900 (StgClosure *)tmp = evacuate((StgClosure *)t);
901 tmp->global_link = resurrected_threads;
902 resurrected_threads = tmp;
912 /* -----------------------------------------------------------------------------
913 After GC, the live weak pointer list may have forwarding pointers
914 on it, because a weak pointer object was evacuated after being
915 moved to the live weak pointer list. We remove those forwarding
918 Also, we don't consider weak pointer objects to be reachable, but
919 we must nevertheless consider them to be "live" and retain them.
920 Therefore any weak pointer objects which haven't as yet been
921 evacuated need to be evacuated now.
922 -------------------------------------------------------------------------- */
924 //@cindex cleanup_weak_ptr_list
927 cleanup_weak_ptr_list ( StgWeak **list )
929 StgWeak *w, **last_w;
932 for (w = *list; w; w = w->link) {
934 if (get_itbl(w)->type == EVACUATED) {
935 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
939 if (Bdescr((P_)w)->evacuated == 0) {
940 (StgClosure *)w = evacuate((StgClosure *)w);
947 /* -----------------------------------------------------------------------------
948 isAlive determines whether the given closure is still alive (after
949 a garbage collection) or not. It returns the new address of the
950 closure if it is alive, or NULL otherwise.
951 -------------------------------------------------------------------------- */
956 isAlive(StgClosure *p)
958 const StgInfoTable *info;
965 /* ToDo: for static closures, check the static link field.
966 * Problem here is that we sometimes don't set the link field, eg.
967 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
970 #if 1 || !defined(PAR)
971 /* ignore closures in generations that we're not collecting. */
972 /* In GUM we use this routine when rebuilding GA tables; for some
973 reason it has problems with the LOOKS_LIKE_STATIC macro -- HWL */
974 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
979 switch (info->type) {
984 case IND_OLDGEN: /* rely on compatible layout with StgInd */
985 case IND_OLDGEN_PERM:
986 /* follow indirections */
987 p = ((StgInd *)p)->indirectee;
992 return ((StgEvacuated *)p)->evacuee;
995 size = bco_sizeW((StgBCO*)p);
999 size = arr_words_sizeW((StgArrWords *)p);
1003 case MUT_ARR_PTRS_FROZEN:
1004 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1008 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1009 p = (StgClosure *)((StgTSO *)p)->link;
1013 size = tso_sizeW((StgTSO *)p);
1015 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1016 && Bdescr((P_)p)->evacuated)
1030 MarkRoot(StgClosure *root)
1032 return evacuate(root);
1036 static void addBlock(step *step)
1038 bdescr *bd = allocBlock();
1039 bd->gen = step->gen;
1042 if (step->gen->no <= N) {
1048 step->hp_bd->free = step->hp;
1049 step->hp_bd->link = bd;
1050 step->hp = bd->start;
1051 step->hpLim = step->hp + BLOCK_SIZE_W;
1057 //@cindex upd_evacuee
1059 static __inline__ void
1060 upd_evacuee(StgClosure *p, StgClosure *dest)
1062 p->header.info = &EVACUATED_info;
1063 ((StgEvacuated *)p)->evacuee = dest;
1068 static __inline__ StgClosure *
1069 copy(StgClosure *src, nat size, step *step)
1073 TICK_GC_WORDS_COPIED(size);
1074 /* Find out where we're going, using the handy "to" pointer in
1075 * the step of the source object. If it turns out we need to
1076 * evacuate to an older generation, adjust it here (see comment
1079 if (step->gen->no < evac_gen) {
1080 #ifdef NO_EAGER_PROMOTION
1081 failed_to_evac = rtsTrue;
1083 step = &generations[evac_gen].steps[0];
1087 /* chain a new block onto the to-space for the destination step if
1090 if (step->hp + size >= step->hpLim) {
1094 for(to = step->hp, from = (P_)src; size>0; --size) {
1100 upd_evacuee(src,(StgClosure *)dest);
1101 return (StgClosure *)dest;
1104 /* Special version of copy() for when we only want to copy the info
1105 * pointer of an object, but reserve some padding after it. This is
1106 * used to optimise evacuation of BLACKHOLEs.
1111 static __inline__ StgClosure *
1112 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1116 TICK_GC_WORDS_COPIED(size_to_copy);
1117 if (step->gen->no < evac_gen) {
1118 #ifdef NO_EAGER_PROMOTION
1119 failed_to_evac = rtsTrue;
1121 step = &generations[evac_gen].steps[0];
1125 if (step->hp + size_to_reserve >= step->hpLim) {
1129 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1134 step->hp += size_to_reserve;
1135 upd_evacuee(src,(StgClosure *)dest);
1136 return (StgClosure *)dest;
1139 //@node Evacuation, Scavenging, Weak Pointers
1140 //@subsection Evacuation
1142 /* -----------------------------------------------------------------------------
1143 Evacuate a large object
1145 This just consists of removing the object from the (doubly-linked)
1146 large_alloc_list, and linking it on to the (singly-linked)
1147 new_large_objects list, from where it will be scavenged later.
1149 Convention: bd->evacuated is /= 0 for a large object that has been
1150 evacuated, or 0 otherwise.
1151 -------------------------------------------------------------------------- */
1153 //@cindex evacuate_large
1156 evacuate_large(StgPtr p, rtsBool mutable)
1158 bdescr *bd = Bdescr(p);
1161 /* should point to the beginning of the block */
1162 ASSERT(((W_)p & BLOCK_MASK) == 0);
1164 /* already evacuated? */
1165 if (bd->evacuated) {
1166 /* Don't forget to set the failed_to_evac flag if we didn't get
1167 * the desired destination (see comments in evacuate()).
1169 if (bd->gen->no < evac_gen) {
1170 failed_to_evac = rtsTrue;
1171 TICK_GC_FAILED_PROMOTION();
1177 /* remove from large_object list */
1179 bd->back->link = bd->link;
1180 } else { /* first object in the list */
1181 step->large_objects = bd->link;
1184 bd->link->back = bd->back;
1187 /* link it on to the evacuated large object list of the destination step
1189 step = bd->step->to;
1190 if (step->gen->no < evac_gen) {
1191 #ifdef NO_EAGER_PROMOTION
1192 failed_to_evac = rtsTrue;
1194 step = &generations[evac_gen].steps[0];
1199 bd->gen = step->gen;
1200 bd->link = step->new_large_objects;
1201 step->new_large_objects = bd;
1205 recordMutable((StgMutClosure *)p);
1209 /* -----------------------------------------------------------------------------
1210 Adding a MUT_CONS to an older generation.
1212 This is necessary from time to time when we end up with an
1213 old-to-new generation pointer in a non-mutable object. We defer
1214 the promotion until the next GC.
1215 -------------------------------------------------------------------------- */
1220 mkMutCons(StgClosure *ptr, generation *gen)
1225 step = &gen->steps[0];
1227 /* chain a new block onto the to-space for the destination step if
1230 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1234 q = (StgMutVar *)step->hp;
1235 step->hp += sizeofW(StgMutVar);
1237 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1239 recordOldToNewPtrs((StgMutClosure *)q);
1241 return (StgClosure *)q;
1244 /* -----------------------------------------------------------------------------
1247 This is called (eventually) for every live object in the system.
1249 The caller to evacuate specifies a desired generation in the
1250 evac_gen global variable. The following conditions apply to
1251 evacuating an object which resides in generation M when we're
1252 collecting up to generation N
1256 else evac to step->to
1258 if M < evac_gen evac to evac_gen, step 0
1260 if the object is already evacuated, then we check which generation
1263 if M >= evac_gen do nothing
1264 if M < evac_gen set failed_to_evac flag to indicate that we
1265 didn't manage to evacuate this object into evac_gen.
1267 -------------------------------------------------------------------------- */
1271 evacuate(StgClosure *q)
1276 const StgInfoTable *info;
1279 if (HEAP_ALLOCED(q)) {
1281 if (bd->gen->no > N) {
1282 /* Can't evacuate this object, because it's in a generation
1283 * older than the ones we're collecting. Let's hope that it's
1284 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1286 if (bd->gen->no < evac_gen) {
1288 failed_to_evac = rtsTrue;
1289 TICK_GC_FAILED_PROMOTION();
1293 step = bd->step->to;
1296 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1299 /* make sure the info pointer is into text space */
1300 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1301 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1304 if (info->type==RBH) {
1305 info = REVERT_INFOPTR(info);
1307 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1308 q, info_type(q), info, info_type_by_ip(info)));
1312 switch (info -> type) {
1316 nat size = bco_sizeW((StgBCO*)q);
1318 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1319 evacuate_large((P_)q, rtsFalse);
1322 /* just copy the block */
1323 to = copy(q,size,step);
1329 ASSERT(q->header.info != &MUT_CONS_info);
1331 to = copy(q,sizeW_fromITBL(info),step);
1332 recordMutable((StgMutClosure *)to);
1339 return copy(q,sizeofW(StgHeader)+1,step);
1341 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1346 #ifdef NO_PROMOTE_THUNKS
1347 if (bd->gen->no == 0 &&
1348 bd->step->no != 0 &&
1349 bd->step->no == bd->gen->n_steps-1) {
1353 return copy(q,sizeofW(StgHeader)+2,step);
1361 return copy(q,sizeofW(StgHeader)+2,step);
1367 case IND_OLDGEN_PERM:
1373 return copy(q,sizeW_fromITBL(info),step);
1376 case SE_CAF_BLACKHOLE:
1379 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1382 to = copy(q,BLACKHOLE_sizeW(),step);
1383 recordMutable((StgMutClosure *)to);
1386 case THUNK_SELECTOR:
1388 const StgInfoTable* selectee_info;
1389 StgClosure* selectee = ((StgSelector*)q)->selectee;
1392 selectee_info = get_itbl(selectee);
1393 switch (selectee_info->type) {
1402 StgWord32 offset = info->layout.selector_offset;
1404 /* check that the size is in range */
1406 (StgWord32)(selectee_info->layout.payload.ptrs +
1407 selectee_info->layout.payload.nptrs));
1409 /* perform the selection! */
1410 q = selectee->payload[offset];
1412 /* if we're already in to-space, there's no need to continue
1413 * with the evacuation, just update the source address with
1414 * a pointer to the (evacuated) constructor field.
1416 if (HEAP_ALLOCED(q)) {
1417 bdescr *bd = Bdescr((P_)q);
1418 if (bd->evacuated) {
1419 if (bd->gen->no < evac_gen) {
1420 failed_to_evac = rtsTrue;
1421 TICK_GC_FAILED_PROMOTION();
1427 /* otherwise, carry on and evacuate this constructor field,
1428 * (but not the constructor itself)
1437 case IND_OLDGEN_PERM:
1438 selectee = ((StgInd *)selectee)->indirectee;
1442 selectee = ((StgCAF *)selectee)->value;
1446 selectee = ((StgEvacuated *)selectee)->evacuee;
1456 case THUNK_SELECTOR:
1457 /* aargh - do recursively???? */
1460 case SE_CAF_BLACKHOLE:
1464 /* not evaluated yet */
1468 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1469 (int)(selectee_info->type));
1472 return copy(q,THUNK_SELECTOR_sizeW(),step);
1476 /* follow chains of indirections, don't evacuate them */
1477 q = ((StgInd*)q)->indirectee;
1481 if (info->srt_len > 0 && major_gc &&
1482 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1483 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1484 static_objects = (StgClosure *)q;
1489 if (info->srt_len > 0 && major_gc &&
1490 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1491 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1492 static_objects = (StgClosure *)q;
1497 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1498 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1499 static_objects = (StgClosure *)q;
1504 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1505 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1506 static_objects = (StgClosure *)q;
1510 case CONSTR_INTLIKE:
1511 case CONSTR_CHARLIKE:
1512 case CONSTR_NOCAF_STATIC:
1513 /* no need to put these on the static linked list, they don't need
1528 /* shouldn't see these */
1529 barf("evacuate: stack frame at %p\n", q);
1533 /* these are special - the payload is a copy of a chunk of stack,
1535 return copy(q,pap_sizeW((StgPAP *)q),step);
1538 /* Already evacuated, just return the forwarding address.
1539 * HOWEVER: if the requested destination generation (evac_gen) is
1540 * older than the actual generation (because the object was
1541 * already evacuated to a younger generation) then we have to
1542 * set the failed_to_evac flag to indicate that we couldn't
1543 * manage to promote the object to the desired generation.
1545 if (evac_gen > 0) { /* optimisation */
1546 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1547 if (Bdescr((P_)p)->gen->no < evac_gen) {
1548 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1549 failed_to_evac = rtsTrue;
1550 TICK_GC_FAILED_PROMOTION();
1553 return ((StgEvacuated*)q)->evacuee;
1557 nat size = arr_words_sizeW((StgArrWords *)q);
1559 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1560 evacuate_large((P_)q, rtsFalse);
1563 /* just copy the block */
1564 return copy(q,size,step);
1569 case MUT_ARR_PTRS_FROZEN:
1571 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1573 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1574 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1577 /* just copy the block */
1578 to = copy(q,size,step);
1579 if (info->type == MUT_ARR_PTRS) {
1580 recordMutable((StgMutClosure *)to);
1588 StgTSO *tso = (StgTSO *)q;
1589 nat size = tso_sizeW(tso);
1592 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1594 if (tso->what_next == ThreadRelocated) {
1595 q = (StgClosure *)tso->link;
1599 /* Large TSOs don't get moved, so no relocation is required.
1601 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1602 evacuate_large((P_)q, rtsTrue);
1605 /* To evacuate a small TSO, we need to relocate the update frame
1609 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1611 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1613 /* relocate the stack pointers... */
1614 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1615 new_tso->sp = (StgPtr)new_tso->sp + diff;
1616 new_tso->splim = (StgPtr)new_tso->splim + diff;
1618 relocate_TSO(tso, new_tso);
1620 recordMutable((StgMutClosure *)new_tso);
1621 return (StgClosure *)new_tso;
1626 case RBH: // cf. BLACKHOLE_BQ
1628 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1629 to = copy(q,BLACKHOLE_sizeW(),step);
1630 //ToDo: derive size etc from reverted IP
1631 //to = copy(q,size,step);
1632 recordMutable((StgMutClosure *)to);
1634 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1635 q, info_type(q), to, info_type(to)));
1640 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1641 to = copy(q,sizeofW(StgBlockedFetch),step);
1643 belch("@@ evacuate: %p (%s) to %p (%s)",
1644 q, info_type(q), to, info_type(to)));
1648 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1649 to = copy(q,sizeofW(StgFetchMe),step);
1651 belch("@@ evacuate: %p (%s) to %p (%s)",
1652 q, info_type(q), to, info_type(to)));
1656 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1657 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1659 belch("@@ evacuate: %p (%s) to %p (%s)",
1660 q, info_type(q), to, info_type(to)));
1665 barf("evacuate: strange closure type %d", (int)(info->type));
1671 /* -----------------------------------------------------------------------------
1672 relocate_TSO is called just after a TSO has been copied from src to
1673 dest. It adjusts the update frame list for the new location.
1674 -------------------------------------------------------------------------- */
1675 //@cindex relocate_TSO
1678 relocate_TSO(StgTSO *src, StgTSO *dest)
1685 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1689 while ((P_)su < dest->stack + dest->stack_size) {
1690 switch (get_itbl(su)->type) {
1692 /* GCC actually manages to common up these three cases! */
1695 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1700 cf = (StgCatchFrame *)su;
1701 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1706 sf = (StgSeqFrame *)su;
1707 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1716 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1724 //@node Scavenging, Reverting CAFs, Evacuation
1725 //@subsection Scavenging
1727 //@cindex scavenge_srt
1730 scavenge_srt(const StgInfoTable *info)
1732 StgClosure **srt, **srt_end;
1734 /* evacuate the SRT. If srt_len is zero, then there isn't an
1735 * srt field in the info table. That's ok, because we'll
1736 * never dereference it.
1738 srt = (StgClosure **)(info->srt);
1739 srt_end = srt + info->srt_len;
1740 for (; srt < srt_end; srt++) {
1741 /* Special-case to handle references to closures hiding out in DLLs, since
1742 double indirections required to get at those. The code generator knows
1743 which is which when generating the SRT, so it stores the (indirect)
1744 reference to the DLL closure in the table by first adding one to it.
1745 We check for this here, and undo the addition before evacuating it.
1747 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1748 closure that's fixed at link-time, and no extra magic is required.
1750 #ifdef ENABLE_WIN32_DLL_SUPPORT
1751 if ( (unsigned long)(*srt) & 0x1 ) {
1752 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1762 /* -----------------------------------------------------------------------------
1764 -------------------------------------------------------------------------- */
1767 scavengeTSO (StgTSO *tso)
1769 /* chase the link field for any TSOs on the same queue */
1770 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1771 if ( tso->why_blocked == BlockedOnMVar
1772 || tso->why_blocked == BlockedOnBlackHole
1773 || tso->why_blocked == BlockedOnException) {
1774 tso->block_info.closure = evacuate(tso->block_info.closure);
1776 if ( tso->blocked_exceptions != NULL ) {
1777 tso->blocked_exceptions =
1778 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1780 /* scavenge this thread's stack */
1781 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1784 /* -----------------------------------------------------------------------------
1785 Scavenge a given step until there are no more objects in this step
1788 evac_gen is set by the caller to be either zero (for a step in a
1789 generation < N) or G where G is the generation of the step being
1792 We sometimes temporarily change evac_gen back to zero if we're
1793 scavenging a mutable object where early promotion isn't such a good
1795 -------------------------------------------------------------------------- */
1799 scavenge(step *step)
1802 const StgInfoTable *info;
1804 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1809 failed_to_evac = rtsFalse;
1811 /* scavenge phase - standard breadth-first scavenging of the
1815 while (bd != step->hp_bd || p < step->hp) {
1817 /* If we're at the end of this block, move on to the next block */
1818 if (bd != step->hp_bd && p == bd->free) {
1824 q = p; /* save ptr to object */
1826 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1827 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1829 info = get_itbl((StgClosure *)p);
1831 if (info->type==RBH)
1832 info = REVERT_INFOPTR(info);
1835 switch (info -> type) {
1839 StgBCO* bco = (StgBCO *)p;
1841 for (i = 0; i < bco->n_ptrs; i++) {
1842 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1844 p += bco_sizeW(bco);
1849 /* treat MVars specially, because we don't want to evacuate the
1850 * mut_link field in the middle of the closure.
1853 StgMVar *mvar = ((StgMVar *)p);
1855 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1856 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1857 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1858 p += sizeofW(StgMVar);
1859 evac_gen = saved_evac_gen;
1867 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1868 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1869 p += sizeofW(StgHeader) + 2;
1874 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1875 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1881 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1882 p += sizeofW(StgHeader) + 1;
1887 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1893 p += sizeofW(StgHeader) + 1;
1900 p += sizeofW(StgHeader) + 2;
1907 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1908 p += sizeofW(StgHeader) + 2;
1923 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1924 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1925 (StgClosure *)*p = evacuate((StgClosure *)*p);
1927 p += info->layout.payload.nptrs;
1932 if (step->gen->no != 0) {
1933 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1936 case IND_OLDGEN_PERM:
1937 ((StgIndOldGen *)p)->indirectee =
1938 evacuate(((StgIndOldGen *)p)->indirectee);
1939 if (failed_to_evac) {
1940 failed_to_evac = rtsFalse;
1941 recordOldToNewPtrs((StgMutClosure *)p);
1943 p += sizeofW(StgIndOldGen);
1948 StgCAF *caf = (StgCAF *)p;
1950 caf->body = evacuate(caf->body);
1951 if (failed_to_evac) {
1952 failed_to_evac = rtsFalse;
1953 recordOldToNewPtrs((StgMutClosure *)p);
1955 caf->mut_link = NULL;
1957 p += sizeofW(StgCAF);
1963 StgCAF *caf = (StgCAF *)p;
1965 caf->body = evacuate(caf->body);
1966 caf->value = evacuate(caf->value);
1967 if (failed_to_evac) {
1968 failed_to_evac = rtsFalse;
1969 recordOldToNewPtrs((StgMutClosure *)p);
1971 caf->mut_link = NULL;
1973 p += sizeofW(StgCAF);
1978 /* ignore MUT_CONSs */
1979 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
1981 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
1982 evac_gen = saved_evac_gen;
1984 p += sizeofW(StgMutVar);
1988 case SE_CAF_BLACKHOLE:
1991 p += BLACKHOLE_sizeW();
1996 StgBlockingQueue *bh = (StgBlockingQueue *)p;
1997 (StgClosure *)bh->blocking_queue =
1998 evacuate((StgClosure *)bh->blocking_queue);
1999 if (failed_to_evac) {
2000 failed_to_evac = rtsFalse;
2001 recordMutable((StgMutClosure *)bh);
2003 p += BLACKHOLE_sizeW();
2007 case THUNK_SELECTOR:
2009 StgSelector *s = (StgSelector *)p;
2010 s->selectee = evacuate(s->selectee);
2011 p += THUNK_SELECTOR_sizeW();
2017 barf("scavenge:IND???\n");
2019 case CONSTR_INTLIKE:
2020 case CONSTR_CHARLIKE:
2022 case CONSTR_NOCAF_STATIC:
2026 /* Shouldn't see a static object here. */
2027 barf("scavenge: STATIC object\n");
2039 /* Shouldn't see stack frames here. */
2040 barf("scavenge: stack frame\n");
2042 case AP_UPD: /* same as PAPs */
2044 /* Treat a PAP just like a section of stack, not forgetting to
2045 * evacuate the function pointer too...
2048 StgPAP* pap = (StgPAP *)p;
2050 pap->fun = evacuate(pap->fun);
2051 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2052 p += pap_sizeW(pap);
2057 /* nothing to follow */
2058 p += arr_words_sizeW((StgArrWords *)p);
2062 /* follow everything */
2066 evac_gen = 0; /* repeatedly mutable */
2067 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2068 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2069 (StgClosure *)*p = evacuate((StgClosure *)*p);
2071 evac_gen = saved_evac_gen;
2075 case MUT_ARR_PTRS_FROZEN:
2076 /* follow everything */
2078 StgPtr start = p, next;
2080 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2081 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2082 (StgClosure *)*p = evacuate((StgClosure *)*p);
2084 if (failed_to_evac) {
2085 /* we can do this easier... */
2086 recordMutable((StgMutClosure *)start);
2087 failed_to_evac = rtsFalse;
2094 StgTSO *tso = (StgTSO *)p;
2097 evac_gen = saved_evac_gen;
2098 p += tso_sizeW(tso);
2103 case RBH: // cf. BLACKHOLE_BQ
2105 // nat size, ptrs, nonptrs, vhs;
2107 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2108 StgRBH *rbh = (StgRBH *)p;
2109 (StgClosure *)rbh->blocking_queue =
2110 evacuate((StgClosure *)rbh->blocking_queue);
2111 if (failed_to_evac) {
2112 failed_to_evac = rtsFalse;
2113 recordMutable((StgMutClosure *)rbh);
2116 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2117 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2118 // ToDo: use size of reverted closure here!
2119 p += BLACKHOLE_sizeW();
2125 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2126 /* follow the pointer to the node which is being demanded */
2127 (StgClosure *)bf->node =
2128 evacuate((StgClosure *)bf->node);
2129 /* follow the link to the rest of the blocking queue */
2130 (StgClosure *)bf->link =
2131 evacuate((StgClosure *)bf->link);
2132 if (failed_to_evac) {
2133 failed_to_evac = rtsFalse;
2134 recordMutable((StgMutClosure *)bf);
2137 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2138 bf, info_type((StgClosure *)bf),
2139 bf->node, info_type(bf->node)));
2140 p += sizeofW(StgBlockedFetch);
2146 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2147 p, info_type((StgClosure *)p)));
2148 p += sizeofW(StgFetchMe);
2149 break; // nothing to do in this case
2151 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2153 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2154 (StgClosure *)fmbq->blocking_queue =
2155 evacuate((StgClosure *)fmbq->blocking_queue);
2156 if (failed_to_evac) {
2157 failed_to_evac = rtsFalse;
2158 recordMutable((StgMutClosure *)fmbq);
2161 belch("@@ scavenge: %p (%s) exciting, isn't it",
2162 p, info_type((StgClosure *)p)));
2163 p += sizeofW(StgFetchMeBlockingQueue);
2169 barf("scavenge: unimplemented/strange closure type\n");
2175 /* If we didn't manage to promote all the objects pointed to by
2176 * the current object, then we have to designate this object as
2177 * mutable (because it contains old-to-new generation pointers).
2179 if (failed_to_evac) {
2180 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2181 failed_to_evac = rtsFalse;
2189 /* -----------------------------------------------------------------------------
2190 Scavenge one object.
2192 This is used for objects that are temporarily marked as mutable
2193 because they contain old-to-new generation pointers. Only certain
2194 objects can have this property.
2195 -------------------------------------------------------------------------- */
2196 //@cindex scavenge_one
2199 scavenge_one(StgClosure *p)
2201 const StgInfoTable *info;
2204 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2205 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2210 if (info->type==RBH)
2211 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2214 switch (info -> type) {
2217 case FUN_1_0: /* hardly worth specialising these guys */
2237 case IND_OLDGEN_PERM:
2242 end = (P_)p->payload + info->layout.payload.ptrs;
2243 for (q = (P_)p->payload; q < end; q++) {
2244 (StgClosure *)*q = evacuate((StgClosure *)*q);
2250 case SE_CAF_BLACKHOLE:
2255 case THUNK_SELECTOR:
2257 StgSelector *s = (StgSelector *)p;
2258 s->selectee = evacuate(s->selectee);
2262 case AP_UPD: /* same as PAPs */
2264 /* Treat a PAP just like a section of stack, not forgetting to
2265 * evacuate the function pointer too...
2268 StgPAP* pap = (StgPAP *)p;
2270 pap->fun = evacuate(pap->fun);
2271 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2276 /* This might happen if for instance a MUT_CONS was pointing to a
2277 * THUNK which has since been updated. The IND_OLDGEN will
2278 * be on the mutable list anyway, so we don't need to do anything
2284 barf("scavenge_one: strange object");
2287 no_luck = failed_to_evac;
2288 failed_to_evac = rtsFalse;
2293 /* -----------------------------------------------------------------------------
2294 Scavenging mutable lists.
2296 We treat the mutable list of each generation > N (i.e. all the
2297 generations older than the one being collected) as roots. We also
2298 remove non-mutable objects from the mutable list at this point.
2299 -------------------------------------------------------------------------- */
2300 //@cindex scavenge_mut_once_list
2303 scavenge_mut_once_list(generation *gen)
2305 const StgInfoTable *info;
2306 StgMutClosure *p, *next, *new_list;
2308 p = gen->mut_once_list;
2309 new_list = END_MUT_LIST;
2313 failed_to_evac = rtsFalse;
2315 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2317 /* make sure the info pointer is into text space */
2318 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2319 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2323 if (info->type==RBH)
2324 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2326 switch(info->type) {
2329 case IND_OLDGEN_PERM:
2331 /* Try to pull the indirectee into this generation, so we can
2332 * remove the indirection from the mutable list.
2334 ((StgIndOldGen *)p)->indirectee =
2335 evacuate(((StgIndOldGen *)p)->indirectee);
2338 if (RtsFlags.DebugFlags.gc)
2339 /* Debugging code to print out the size of the thing we just
2343 StgPtr start = gen->steps[0].scan;
2344 bdescr *start_bd = gen->steps[0].scan_bd;
2346 scavenge(&gen->steps[0]);
2347 if (start_bd != gen->steps[0].scan_bd) {
2348 size += (P_)BLOCK_ROUND_UP(start) - start;
2349 start_bd = start_bd->link;
2350 while (start_bd != gen->steps[0].scan_bd) {
2351 size += BLOCK_SIZE_W;
2352 start_bd = start_bd->link;
2354 size += gen->steps[0].scan -
2355 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2357 size = gen->steps[0].scan - start;
2359 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2363 /* failed_to_evac might happen if we've got more than two
2364 * generations, we're collecting only generation 0, the
2365 * indirection resides in generation 2 and the indirectee is
2368 if (failed_to_evac) {
2369 failed_to_evac = rtsFalse;
2370 p->mut_link = new_list;
2373 /* the mut_link field of an IND_STATIC is overloaded as the
2374 * static link field too (it just so happens that we don't need
2375 * both at the same time), so we need to NULL it out when
2376 * removing this object from the mutable list because the static
2377 * link fields are all assumed to be NULL before doing a major
2385 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2386 * it from the mutable list if possible by promoting whatever it
2389 ASSERT(p->header.info == &MUT_CONS_info);
2390 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2391 /* didn't manage to promote everything, so put the
2392 * MUT_CONS back on the list.
2394 p->mut_link = new_list;
2401 StgCAF *caf = (StgCAF *)p;
2402 caf->body = evacuate(caf->body);
2403 caf->value = evacuate(caf->value);
2404 if (failed_to_evac) {
2405 failed_to_evac = rtsFalse;
2406 p->mut_link = new_list;
2416 StgCAF *caf = (StgCAF *)p;
2417 caf->body = evacuate(caf->body);
2418 if (failed_to_evac) {
2419 failed_to_evac = rtsFalse;
2420 p->mut_link = new_list;
2429 /* shouldn't have anything else on the mutables list */
2430 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2434 gen->mut_once_list = new_list;
2437 //@cindex scavenge_mutable_list
2440 scavenge_mutable_list(generation *gen)
2442 const StgInfoTable *info;
2443 StgMutClosure *p, *next;
2445 p = gen->saved_mut_list;
2449 failed_to_evac = rtsFalse;
2451 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2453 /* make sure the info pointer is into text space */
2454 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2455 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2459 if (info->type==RBH)
2460 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2462 switch(info->type) {
2464 case MUT_ARR_PTRS_FROZEN:
2465 /* remove this guy from the mutable list, but follow the ptrs
2466 * anyway (and make sure they get promoted to this gen).
2472 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS_FROZEN %p; size: %#x ; next: %p",
2473 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2475 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2477 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2478 (StgClosure *)*q = evacuate((StgClosure *)*q);
2482 if (failed_to_evac) {
2483 failed_to_evac = rtsFalse;
2484 p->mut_link = gen->mut_list;
2491 /* follow everything */
2492 p->mut_link = gen->mut_list;
2498 belch("@@ scavenge_mut_list: scavenging MUT_ARR_PTRS %p; size: %#x ; next: %p",
2499 p, mut_arr_ptrs_sizeW((StgMutArrPtrs*)p), p->mut_link));
2501 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2502 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2503 (StgClosure *)*q = evacuate((StgClosure *)*q);
2509 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2510 * it from the mutable list if possible by promoting whatever it
2514 belch("@@ scavenge_mut_list: scavenging MUT_VAR %p; var: %p ; next: %p",
2515 p, ((StgMutVar *)p)->var, p->mut_link));
2517 ASSERT(p->header.info != &MUT_CONS_info);
2518 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2519 p->mut_link = gen->mut_list;
2525 StgMVar *mvar = (StgMVar *)p;
2528 belch("@@ scavenge_mut_list: scavenging MAVR %p; head: %p; tail: %p; value: %p ; next: %p",
2529 mvar, mvar->head, mvar->tail, mvar->value, p->mut_link));
2531 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2532 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2533 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2534 p->mut_link = gen->mut_list;
2541 StgTSO *tso = (StgTSO *)p;
2545 /* Don't take this TSO off the mutable list - it might still
2546 * point to some younger objects (because we set evac_gen to 0
2549 tso->mut_link = gen->mut_list;
2550 gen->mut_list = (StgMutClosure *)tso;
2556 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2559 belch("@@ scavenge_mut_list: scavenging BLACKHOLE_BQ (%p); next: %p",
2562 (StgClosure *)bh->blocking_queue =
2563 evacuate((StgClosure *)bh->blocking_queue);
2564 p->mut_link = gen->mut_list;
2569 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2572 case IND_OLDGEN_PERM:
2573 /* Try to pull the indirectee into this generation, so we can
2574 * remove the indirection from the mutable list.
2577 ((StgIndOldGen *)p)->indirectee =
2578 evacuate(((StgIndOldGen *)p)->indirectee);
2581 if (failed_to_evac) {
2582 failed_to_evac = rtsFalse;
2583 p->mut_link = gen->mut_once_list;
2584 gen->mut_once_list = p;
2590 // HWL: old PAR code deleted here
2593 /* shouldn't have anything else on the mutables list */
2594 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2599 //@cindex scavenge_static
2602 scavenge_static(void)
2604 StgClosure* p = static_objects;
2605 const StgInfoTable *info;
2607 /* Always evacuate straight to the oldest generation for static
2609 evac_gen = oldest_gen->no;
2611 /* keep going until we've scavenged all the objects on the linked
2613 while (p != END_OF_STATIC_LIST) {
2617 if (info->type==RBH)
2618 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2620 /* make sure the info pointer is into text space */
2621 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2622 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2624 /* Take this object *off* the static_objects list,
2625 * and put it on the scavenged_static_objects list.
2627 static_objects = STATIC_LINK(info,p);
2628 STATIC_LINK(info,p) = scavenged_static_objects;
2629 scavenged_static_objects = p;
2631 switch (info -> type) {
2635 StgInd *ind = (StgInd *)p;
2636 ind->indirectee = evacuate(ind->indirectee);
2638 /* might fail to evacuate it, in which case we have to pop it
2639 * back on the mutable list (and take it off the
2640 * scavenged_static list because the static link and mut link
2641 * pointers are one and the same).
2643 if (failed_to_evac) {
2644 failed_to_evac = rtsFalse;
2645 scavenged_static_objects = STATIC_LINK(info,p);
2646 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2647 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2661 next = (P_)p->payload + info->layout.payload.ptrs;
2662 /* evacuate the pointers */
2663 for (q = (P_)p->payload; q < next; q++) {
2664 (StgClosure *)*q = evacuate((StgClosure *)*q);
2670 barf("scavenge_static");
2673 ASSERT(failed_to_evac == rtsFalse);
2675 /* get the next static object from the list. Remeber, there might
2676 * be more stuff on this list now that we've done some evacuating!
2677 * (static_objects is a global)
2683 /* -----------------------------------------------------------------------------
2684 scavenge_stack walks over a section of stack and evacuates all the
2685 objects pointed to by it. We can use the same code for walking
2686 PAPs, since these are just sections of copied stack.
2687 -------------------------------------------------------------------------- */
2688 //@cindex scavenge_stack
2691 scavenge_stack(StgPtr p, StgPtr stack_end)
2694 const StgInfoTable* info;
2697 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2700 * Each time around this loop, we are looking at a chunk of stack
2701 * that starts with either a pending argument section or an
2702 * activation record.
2705 while (p < stack_end) {
2708 /* If we've got a tag, skip over that many words on the stack */
2709 if (IS_ARG_TAG((W_)q)) {
2714 /* Is q a pointer to a closure?
2716 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2718 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2719 ASSERT(closure_STATIC((StgClosure *)q));
2721 /* otherwise, must be a pointer into the allocation space. */
2724 (StgClosure *)*p = evacuate((StgClosure *)q);
2730 * Otherwise, q must be the info pointer of an activation
2731 * record. All activation records have 'bitmap' style layout
2734 info = get_itbl((StgClosure *)p);
2736 switch (info->type) {
2738 /* Dynamic bitmap: the mask is stored on the stack */
2740 bitmap = ((StgRetDyn *)p)->liveness;
2741 p = (P_)&((StgRetDyn *)p)->payload[0];
2744 /* probably a slow-entry point return address: */
2752 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2753 old_p, p, old_p+1));
2755 p++; /* what if FHS!=1 !? -- HWL */
2760 /* Specialised code for update frames, since they're so common.
2761 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2762 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2766 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2768 nat type = get_itbl(frame->updatee)->type;
2770 p += sizeofW(StgUpdateFrame);
2771 if (type == EVACUATED) {
2772 frame->updatee = evacuate(frame->updatee);
2775 bdescr *bd = Bdescr((P_)frame->updatee);
2777 if (bd->gen->no > N) {
2778 if (bd->gen->no < evac_gen) {
2779 failed_to_evac = rtsTrue;
2784 /* Don't promote blackholes */
2786 if (!(step->gen->no == 0 &&
2788 step->no == step->gen->n_steps-1)) {
2795 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2796 sizeofW(StgHeader), step);
2797 frame->updatee = to;
2800 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2801 frame->updatee = to;
2802 recordMutable((StgMutClosure *)to);
2805 /* will never be SE_{,CAF_}BLACKHOLE, since we
2806 don't push an update frame for single-entry thunks. KSW 1999-01. */
2807 barf("scavenge_stack: UPDATE_FRAME updatee");
2812 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2817 // StgPtr old_p = p; // debugging only -- HWL
2818 /* stack frames like these are ordinary closures and therefore may
2819 contain setup-specific fixed-header words (as in GranSim!);
2820 therefore, these cases should not use p++ but &(p->payload) -- HWL */
2821 // IF_DEBUG(gran, IF_DEBUG(sanity, printObj(p)));
2822 bitmap = info->layout.bitmap;
2824 p = (StgPtr)&(((StgClosure *)p)->payload);
2825 // IF_DEBUG(sanity, belch("HWL: scavenge_stack: (STOP|CATCH|SEQ)_FRAME adjusting p from %p to %p (instead of %p)", old_p, p, old_p+1));
2831 bitmap = info->layout.bitmap;
2833 /* this assumes that the payload starts immediately after the info-ptr */
2835 while (bitmap != 0) {
2836 if ((bitmap & 1) == 0) {
2837 (StgClosure *)*p = evacuate((StgClosure *)*p);
2840 bitmap = bitmap >> 1;
2847 /* large bitmap (> 32 entries) */
2852 StgLargeBitmap *large_bitmap;
2855 large_bitmap = info->layout.large_bitmap;
2858 for (i=0; i<large_bitmap->size; i++) {
2859 bitmap = large_bitmap->bitmap[i];
2860 q = p + sizeof(W_) * 8;
2861 while (bitmap != 0) {
2862 if ((bitmap & 1) == 0) {
2863 (StgClosure *)*p = evacuate((StgClosure *)*p);
2866 bitmap = bitmap >> 1;
2868 if (i+1 < large_bitmap->size) {
2870 (StgClosure *)*p = evacuate((StgClosure *)*p);
2876 /* and don't forget to follow the SRT */
2881 barf("scavenge_stack: weird activation record found on stack.\n");
2886 /*-----------------------------------------------------------------------------
2887 scavenge the large object list.
2889 evac_gen set by caller; similar games played with evac_gen as with
2890 scavenge() - see comment at the top of scavenge(). Most large
2891 objects are (repeatedly) mutable, so most of the time evac_gen will
2893 --------------------------------------------------------------------------- */
2894 //@cindex scavenge_large
2897 scavenge_large(step *step)
2901 const StgInfoTable* info;
2902 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2904 evac_gen = 0; /* most objects are mutable */
2905 bd = step->new_large_objects;
2907 for (; bd != NULL; bd = step->new_large_objects) {
2909 /* take this object *off* the large objects list and put it on
2910 * the scavenged large objects list. This is so that we can
2911 * treat new_large_objects as a stack and push new objects on
2912 * the front when evacuating.
2914 step->new_large_objects = bd->link;
2915 dbl_link_onto(bd, &step->scavenged_large_objects);
2918 info = get_itbl((StgClosure *)p);
2920 switch (info->type) {
2922 /* only certain objects can be "large"... */
2925 /* nothing to follow */
2929 /* follow everything */
2933 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2934 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2935 (StgClosure *)*p = evacuate((StgClosure *)*p);
2940 case MUT_ARR_PTRS_FROZEN:
2941 /* follow everything */
2943 StgPtr start = p, next;
2945 evac_gen = saved_evac_gen; /* not really mutable */
2946 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2947 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2948 (StgClosure *)*p = evacuate((StgClosure *)*p);
2951 if (failed_to_evac) {
2952 recordMutable((StgMutClosure *)start);
2959 StgBCO* bco = (StgBCO *)p;
2961 evac_gen = saved_evac_gen;
2962 for (i = 0; i < bco->n_ptrs; i++) {
2963 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
2970 scavengeTSO((StgTSO *)p);
2971 // HWL: old PAR code deleted here
2975 barf("scavenge_large: unknown/strange object");
2980 //@cindex zero_static_object_list
2983 zero_static_object_list(StgClosure* first_static)
2987 const StgInfoTable *info;
2989 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
2991 link = STATIC_LINK(info, p);
2992 STATIC_LINK(info,p) = NULL;
2996 /* This function is only needed because we share the mutable link
2997 * field with the static link field in an IND_STATIC, so we have to
2998 * zero the mut_link field before doing a major GC, which needs the
2999 * static link field.
3001 * It doesn't do any harm to zero all the mutable link fields on the
3004 //@cindex zero_mutable_list
3007 zero_mutable_list( StgMutClosure *first )
3009 StgMutClosure *next, *c;
3011 for (c = first; c != END_MUT_LIST; c = next) {
3017 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3018 //@subsection Reverting CAFs
3020 /* -----------------------------------------------------------------------------
3022 -------------------------------------------------------------------------- */
3023 //@cindex RevertCAFs
3025 void RevertCAFs(void)
3027 while (enteredCAFs != END_CAF_LIST) {
3028 StgCAF* caf = enteredCAFs;
3030 enteredCAFs = caf->link;
3031 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3032 SET_INFO(caf,&CAF_UNENTERED_info);
3033 caf->value = (StgClosure *)0xdeadbeef;
3034 caf->link = (StgCAF *)0xdeadbeef;
3036 enteredCAFs = END_CAF_LIST;
3039 //@cindex revert_dead_CAFs
3041 void revert_dead_CAFs(void)
3043 StgCAF* caf = enteredCAFs;
3044 enteredCAFs = END_CAF_LIST;
3045 while (caf != END_CAF_LIST) {
3048 new = (StgCAF*)isAlive((StgClosure*)caf);
3050 new->link = enteredCAFs;
3054 SET_INFO(caf,&CAF_UNENTERED_info);
3055 caf->value = (StgClosure*)0xdeadbeef;
3056 caf->link = (StgCAF*)0xdeadbeef;
3062 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3063 //@subsection Sanity code for CAF garbage collection
3065 /* -----------------------------------------------------------------------------
3066 Sanity code for CAF garbage collection.
3068 With DEBUG turned on, we manage a CAF list in addition to the SRT
3069 mechanism. After GC, we run down the CAF list and blackhole any
3070 CAFs which have been garbage collected. This means we get an error
3071 whenever the program tries to enter a garbage collected CAF.
3073 Any garbage collected CAFs are taken off the CAF list at the same
3075 -------------------------------------------------------------------------- */
3085 const StgInfoTable *info;
3096 ASSERT(info->type == IND_STATIC);
3098 if (STATIC_LINK(info,p) == NULL) {
3099 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3101 SET_INFO(p,&BLACKHOLE_info);
3102 p = STATIC_LINK2(info,p);
3106 pp = &STATIC_LINK2(info,p);
3113 /* fprintf(stderr, "%d CAFs live\n", i); */
3117 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3118 //@subsection Lazy black holing
3120 /* -----------------------------------------------------------------------------
3123 Whenever a thread returns to the scheduler after possibly doing
3124 some work, we have to run down the stack and black-hole all the
3125 closures referred to by update frames.
3126 -------------------------------------------------------------------------- */
3127 //@cindex threadLazyBlackHole
3130 threadLazyBlackHole(StgTSO *tso)
3132 StgUpdateFrame *update_frame;
3133 StgBlockingQueue *bh;
3136 stack_end = &tso->stack[tso->stack_size];
3137 update_frame = tso->su;
3140 switch (get_itbl(update_frame)->type) {
3143 update_frame = ((StgCatchFrame *)update_frame)->link;
3147 bh = (StgBlockingQueue *)update_frame->updatee;
3149 /* if the thunk is already blackholed, it means we've also
3150 * already blackholed the rest of the thunks on this stack,
3151 * so we can stop early.
3153 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3154 * don't interfere with this optimisation.
3156 if (bh->header.info == &BLACKHOLE_info) {
3160 if (bh->header.info != &BLACKHOLE_BQ_info &&
3161 bh->header.info != &CAF_BLACKHOLE_info) {
3162 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3163 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3165 SET_INFO(bh,&BLACKHOLE_info);
3168 update_frame = update_frame->link;
3172 update_frame = ((StgSeqFrame *)update_frame)->link;
3178 barf("threadPaused");
3183 //@node Stack squeezing, Pausing a thread, Lazy black holing
3184 //@subsection Stack squeezing
3186 /* -----------------------------------------------------------------------------
3189 * Code largely pinched from old RTS, then hacked to bits. We also do
3190 * lazy black holing here.
3192 * -------------------------------------------------------------------------- */
3193 //@cindex threadSqueezeStack
3196 threadSqueezeStack(StgTSO *tso)
3198 lnat displacement = 0;
3199 StgUpdateFrame *frame;
3200 StgUpdateFrame *next_frame; /* Temporally next */
3201 StgUpdateFrame *prev_frame; /* Temporally previous */
3203 rtsBool prev_was_update_frame;
3205 StgUpdateFrame *top_frame;
3206 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3208 void printObj( StgClosure *obj ); // from Printer.c
3210 top_frame = tso->su;
3213 bottom = &(tso->stack[tso->stack_size]);
3216 /* There must be at least one frame, namely the STOP_FRAME.
3218 ASSERT((P_)frame < bottom);
3220 /* Walk down the stack, reversing the links between frames so that
3221 * we can walk back up as we squeeze from the bottom. Note that
3222 * next_frame and prev_frame refer to next and previous as they were
3223 * added to the stack, rather than the way we see them in this
3224 * walk. (It makes the next loop less confusing.)
3226 * Stop if we find an update frame pointing to a black hole
3227 * (see comment in threadLazyBlackHole()).
3231 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3232 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3233 prev_frame = frame->link;
3234 frame->link = next_frame;
3239 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3240 printObj((StgClosure *)prev_frame);
3241 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3244 switch (get_itbl(frame)->type) {
3245 case UPDATE_FRAME: upd_frames++;
3246 if (frame->updatee->header.info == &BLACKHOLE_info)
3249 case STOP_FRAME: stop_frames++;
3251 case CATCH_FRAME: catch_frames++;
3253 case SEQ_FRAME: seq_frames++;
3256 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3258 printObj((StgClosure *)prev_frame);
3261 if (get_itbl(frame)->type == UPDATE_FRAME
3262 && frame->updatee->header.info == &BLACKHOLE_info) {
3267 /* Now, we're at the bottom. Frame points to the lowest update
3268 * frame on the stack, and its link actually points to the frame
3269 * above. We have to walk back up the stack, squeezing out empty
3270 * update frames and turning the pointers back around on the way
3273 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3274 * we never want to eliminate it anyway. Just walk one step up
3275 * before starting to squeeze. When you get to the topmost frame,
3276 * remember that there are still some words above it that might have
3283 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3286 * Loop through all of the frames (everything except the very
3287 * bottom). Things are complicated by the fact that we have
3288 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3289 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3291 while (frame != NULL) {
3293 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3294 rtsBool is_update_frame;
3296 next_frame = frame->link;
3297 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3300 * 1. both the previous and current frame are update frames
3301 * 2. the current frame is empty
3303 if (prev_was_update_frame && is_update_frame &&
3304 (P_)prev_frame == frame_bottom + displacement) {
3306 /* Now squeeze out the current frame */
3307 StgClosure *updatee_keep = prev_frame->updatee;
3308 StgClosure *updatee_bypass = frame->updatee;
3311 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3315 /* Deal with blocking queues. If both updatees have blocked
3316 * threads, then we should merge the queues into the update
3317 * frame that we're keeping.
3319 * Alternatively, we could just wake them up: they'll just go
3320 * straight to sleep on the proper blackhole! This is less code
3321 * and probably less bug prone, although it's probably much
3324 #if 0 /* do it properly... */
3325 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3326 # error Unimplemented lazy BH warning. (KSW 1999-01)
3328 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3329 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3331 /* Sigh. It has one. Don't lose those threads! */
3332 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3333 /* Urgh. Two queues. Merge them. */
3334 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3336 while (keep_tso->link != END_TSO_QUEUE) {
3337 keep_tso = keep_tso->link;
3339 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3342 /* For simplicity, just swap the BQ for the BH */
3343 P_ temp = updatee_keep;
3345 updatee_keep = updatee_bypass;
3346 updatee_bypass = temp;
3348 /* Record the swap in the kept frame (below) */
3349 prev_frame->updatee = updatee_keep;
3354 TICK_UPD_SQUEEZED();
3355 /* wasn't there something about update squeezing and ticky to be
3356 * sorted out? oh yes: we aren't counting each enter properly
3357 * in this case. See the log somewhere. KSW 1999-04-21
3359 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3361 sp = (P_)frame - 1; /* sp = stuff to slide */
3362 displacement += sizeofW(StgUpdateFrame);
3365 /* No squeeze for this frame */
3366 sp = frame_bottom - 1; /* Keep the current frame */
3368 /* Do lazy black-holing.
3370 if (is_update_frame) {
3371 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3372 if (bh->header.info != &BLACKHOLE_info &&
3373 bh->header.info != &BLACKHOLE_BQ_info &&
3374 bh->header.info != &CAF_BLACKHOLE_info) {
3375 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3376 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3378 SET_INFO(bh,&BLACKHOLE_info);
3382 /* Fix the link in the current frame (should point to the frame below) */
3383 frame->link = prev_frame;
3384 prev_was_update_frame = is_update_frame;
3387 /* Now slide all words from sp up to the next frame */
3389 if (displacement > 0) {
3390 P_ next_frame_bottom;
3392 if (next_frame != NULL)
3393 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3395 next_frame_bottom = tso->sp - 1;
3399 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3403 while (sp >= next_frame_bottom) {
3404 sp[displacement] = *sp;
3408 (P_)prev_frame = (P_)frame + displacement;
3412 tso->sp += displacement;
3413 tso->su = prev_frame;
3416 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3417 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3421 //@node Pausing a thread, Index, Stack squeezing
3422 //@subsection Pausing a thread
3424 /* -----------------------------------------------------------------------------
3427 * We have to prepare for GC - this means doing lazy black holing
3428 * here. We also take the opportunity to do stack squeezing if it's
3430 * -------------------------------------------------------------------------- */
3431 //@cindex threadPaused
3434 threadPaused(StgTSO *tso)
3436 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3437 threadSqueezeStack(tso); /* does black holing too */
3439 threadLazyBlackHole(tso);
3442 /* -----------------------------------------------------------------------------
3444 * -------------------------------------------------------------------------- */
3447 //@cindex printMutOnceList
3449 printMutOnceList(generation *gen)
3451 StgMutClosure *p, *next;
3453 p = gen->mut_once_list;
3456 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3457 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3458 fprintf(stderr, "%p (%s), ",
3459 p, info_type((StgClosure *)p));
3461 fputc('\n', stderr);
3464 //@cindex printMutableList
3466 printMutableList(generation *gen)
3468 StgMutClosure *p, *next;
3470 p = gen->saved_mut_list;
3473 fprintf(stderr, "@@ Mutable list %p: ", gen->saved_mut_list);
3474 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3475 fprintf(stderr, "%p (%s), ",
3476 p, info_type((StgClosure *)p));
3478 fputc('\n', stderr);
3482 //@node Index, , Pausing a thread
3486 //* GarbageCollect:: @cindex\s-+GarbageCollect
3487 //* MarkRoot:: @cindex\s-+MarkRoot
3488 //* RevertCAFs:: @cindex\s-+RevertCAFs
3489 //* addBlock:: @cindex\s-+addBlock
3490 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3491 //* copy:: @cindex\s-+copy
3492 //* copyPart:: @cindex\s-+copyPart
3493 //* evacuate:: @cindex\s-+evacuate
3494 //* evacuate_large:: @cindex\s-+evacuate_large
3495 //* gcCAFs:: @cindex\s-+gcCAFs
3496 //* isAlive:: @cindex\s-+isAlive
3497 //* mkMutCons:: @cindex\s-+mkMutCons
3498 //* relocate_TSO:: @cindex\s-+relocate_TSO
3499 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3500 //* scavenge:: @cindex\s-+scavenge
3501 //* scavenge_large:: @cindex\s-+scavenge_large
3502 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3503 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3504 //* scavenge_one:: @cindex\s-+scavenge_one
3505 //* scavenge_srt:: @cindex\s-+scavenge_srt
3506 //* scavenge_stack:: @cindex\s-+scavenge_stack
3507 //* scavenge_static:: @cindex\s-+scavenge_static
3508 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3509 //* threadPaused:: @cindex\s-+threadPaused
3510 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3511 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3512 //* upd_evacuee:: @cindex\s-+upd_evacuee
3513 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3514 //* zero_static_object_list:: @cindex\s-+zero_static_object_list