1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.80 2000/04/14 16:47:43 panne Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
56 //@node STATIC OBJECT LIST, Static function declarations, Includes
57 //@subsection STATIC OBJECT LIST
59 /* STATIC OBJECT LIST.
62 * We maintain a linked list of static objects that are still live.
63 * The requirements for this list are:
65 * - we need to scan the list while adding to it, in order to
66 * scavenge all the static objects (in the same way that
67 * breadth-first scavenging works for dynamic objects).
69 * - we need to be able to tell whether an object is already on
70 * the list, to break loops.
72 * Each static object has a "static link field", which we use for
73 * linking objects on to the list. We use a stack-type list, consing
74 * objects on the front as they are added (this means that the
75 * scavenge phase is depth-first, not breadth-first, but that
78 * A separate list is kept for objects that have been scavenged
79 * already - this is so that we can zero all the marks afterwards.
81 * An object is on the list if its static link field is non-zero; this
82 * means that we have to mark the end of the list with '1', not NULL.
84 * Extra notes for generational GC:
86 * Each generation has a static object list associated with it. When
87 * collecting generations up to N, we treat the static object lists
88 * from generations > N as roots.
90 * We build up a static object list while collecting generations 0..N,
91 * which is then appended to the static object list of generation N+1.
93 StgClosure* static_objects; /* live static objects */
94 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
96 /* N is the oldest generation being collected, where the generations
97 * are numbered starting at 0. A major GC (indicated by the major_gc
98 * flag) is when we're collecting all generations. We only attempt to
99 * deal with static objects and GC CAFs when doing a major GC.
102 static rtsBool major_gc;
104 /* Youngest generation that objects should be evacuated to in
105 * evacuate(). (Logically an argument to evacuate, but it's static
106 * a lot of the time so we optimise it into a global variable).
112 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
113 static rtsBool weak_done; /* all done for this pass */
115 /* List of all threads during GC
117 static StgTSO *old_all_threads;
118 static StgTSO *resurrected_threads;
120 /* Flag indicating failure to evacuate an object to the desired
123 static rtsBool failed_to_evac;
125 /* Old to-space (used for two-space collector only)
127 bdescr *old_to_space;
130 /* Data used for allocation area sizing.
132 lnat new_blocks; /* blocks allocated during this GC */
133 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
135 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
136 //@subsection Static function declarations
138 /* -----------------------------------------------------------------------------
139 Static function declarations
140 -------------------------------------------------------------------------- */
142 static StgClosure * evacuate ( StgClosure *q );
143 static void zero_static_object_list ( StgClosure* first_static );
144 static void zero_mutable_list ( StgMutClosure *first );
146 static void revert_dead_CAFs ( void );
149 static rtsBool traverse_weak_ptr_list ( void );
150 static void cleanup_weak_ptr_list ( StgWeak **list );
152 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
153 static void scavenge_large ( step *step );
154 static void scavenge ( step *step );
155 static void scavenge_static ( void );
156 static void scavenge_mutable_list ( generation *g );
157 static void scavenge_mut_once_list ( generation *g );
160 static void gcCAFs ( void );
163 //@node Garbage Collect, Weak Pointers, Static function declarations
164 //@subsection Garbage Collect
166 /* -----------------------------------------------------------------------------
169 For garbage collecting generation N (and all younger generations):
171 - follow all pointers in the root set. the root set includes all
172 mutable objects in all steps in all generations.
174 - for each pointer, evacuate the object it points to into either
175 + to-space in the next higher step in that generation, if one exists,
176 + if the object's generation == N, then evacuate it to the next
177 generation if one exists, or else to-space in the current
179 + if the object's generation < N, then evacuate it to to-space
180 in the next generation.
182 - repeatedly scavenge to-space from each step in each generation
183 being collected until no more objects can be evacuated.
185 - free from-space in each step, and set from-space = to-space.
187 -------------------------------------------------------------------------- */
188 //@cindex GarbageCollect
190 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
194 lnat live, allocated, collected = 0, copied = 0;
198 CostCentreStack *prev_CCS;
201 #if defined(DEBUG) && defined(GRAN)
202 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
206 /* tell the stats department that we've started a GC */
209 /* attribute any costs to CCS_GC */
215 /* Approximate how much we allocated */
216 allocated = calcAllocated();
218 /* Figure out which generation to collect
220 if (force_major_gc) {
221 N = RtsFlags.GcFlags.generations - 1;
225 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
226 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
230 major_gc = (N == RtsFlags.GcFlags.generations-1);
233 /* check stack sanity *before* GC (ToDo: check all threads) */
235 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
237 IF_DEBUG(sanity, checkFreeListSanity());
239 /* Initialise the static object lists
241 static_objects = END_OF_STATIC_LIST;
242 scavenged_static_objects = END_OF_STATIC_LIST;
244 /* zero the mutable list for the oldest generation (see comment by
245 * zero_mutable_list below).
248 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
251 /* Save the old to-space if we're doing a two-space collection
253 if (RtsFlags.GcFlags.generations == 1) {
254 old_to_space = g0s0->to_space;
255 g0s0->to_space = NULL;
258 /* Keep a count of how many new blocks we allocated during this GC
259 * (used for resizing the allocation area, later).
263 /* Initialise to-space in all the generations/steps that we're
266 for (g = 0; g <= N; g++) {
267 generations[g].mut_once_list = END_MUT_LIST;
268 generations[g].mut_list = END_MUT_LIST;
270 for (s = 0; s < generations[g].n_steps; s++) {
272 /* generation 0, step 0 doesn't need to-space */
273 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
277 /* Get a free block for to-space. Extra blocks will be chained on
281 step = &generations[g].steps[s];
282 ASSERT(step->gen->no == g);
283 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
284 bd->gen = &generations[g];
287 bd->evacuated = 1; /* it's a to-space block */
288 step->hp = bd->start;
289 step->hpLim = step->hp + BLOCK_SIZE_W;
293 step->scan = bd->start;
295 step->new_large_objects = NULL;
296 step->scavenged_large_objects = NULL;
298 /* mark the large objects as not evacuated yet */
299 for (bd = step->large_objects; bd; bd = bd->link) {
305 /* make sure the older generations have at least one block to
306 * allocate into (this makes things easier for copy(), see below.
308 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
309 for (s = 0; s < generations[g].n_steps; s++) {
310 step = &generations[g].steps[s];
311 if (step->hp_bd == NULL) {
313 bd->gen = &generations[g];
316 bd->evacuated = 0; /* *not* a to-space block */
317 step->hp = bd->start;
318 step->hpLim = step->hp + BLOCK_SIZE_W;
324 /* Set the scan pointer for older generations: remember we
325 * still have to scavenge objects that have been promoted. */
326 step->scan = step->hp;
327 step->scan_bd = step->hp_bd;
328 step->to_space = NULL;
330 step->new_large_objects = NULL;
331 step->scavenged_large_objects = NULL;
335 /* -----------------------------------------------------------------------
336 * follow all the roots that we know about:
337 * - mutable lists from each generation > N
338 * we want to *scavenge* these roots, not evacuate them: they're not
339 * going to move in this GC.
340 * Also: do them in reverse generation order. This is because we
341 * often want to promote objects that are pointed to by older
342 * generations early, so we don't have to repeatedly copy them.
343 * Doing the generations in reverse order ensures that we don't end
344 * up in the situation where we want to evac an object to gen 3 and
345 * it has already been evaced to gen 2.
349 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
350 generations[g].saved_mut_list = generations[g].mut_list;
351 generations[g].mut_list = END_MUT_LIST;
354 /* Do the mut-once lists first */
355 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
356 IF_PAR_DEBUG(verbose,
357 printMutOnceList(&generations[g]));
358 scavenge_mut_once_list(&generations[g]);
360 for (st = generations[g].n_steps-1; st >= 0; st--) {
361 scavenge(&generations[g].steps[st]);
365 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
366 IF_PAR_DEBUG(verbose,
367 printMutableList(&generations[g]));
368 scavenge_mutable_list(&generations[g]);
370 for (st = generations[g].n_steps-1; st >= 0; st--) {
371 scavenge(&generations[g].steps[st]);
376 /* follow all the roots that the application knows about.
382 /* And don't forget to mark the TSO if we got here direct from
384 /* Not needed in a seq version?
386 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
390 /* Mark the entries in the GALA table of the parallel system */
391 markLocalGAs(major_gc);
394 /* Mark the weak pointer list, and prepare to detect dead weak
397 old_weak_ptr_list = weak_ptr_list;
398 weak_ptr_list = NULL;
399 weak_done = rtsFalse;
401 /* The all_threads list is like the weak_ptr_list.
402 * See traverse_weak_ptr_list() for the details.
404 old_all_threads = all_threads;
405 all_threads = END_TSO_QUEUE;
406 resurrected_threads = END_TSO_QUEUE;
408 /* Mark the stable pointer table.
410 markStablePtrTable(major_gc);
414 /* ToDo: To fix the caf leak, we need to make the commented out
415 * parts of this code do something sensible - as described in
418 extern void markHugsObjects(void);
423 /* -------------------------------------------------------------------------
424 * Repeatedly scavenge all the areas we know about until there's no
425 * more scavenging to be done.
432 /* scavenge static objects */
433 if (major_gc && static_objects != END_OF_STATIC_LIST) {
435 checkStaticObjects());
439 /* When scavenging the older generations: Objects may have been
440 * evacuated from generations <= N into older generations, and we
441 * need to scavenge these objects. We're going to try to ensure that
442 * any evacuations that occur move the objects into at least the
443 * same generation as the object being scavenged, otherwise we
444 * have to create new entries on the mutable list for the older
448 /* scavenge each step in generations 0..maxgen */
452 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
453 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
454 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
457 step = &generations[gen].steps[st];
459 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
464 if (step->new_large_objects != NULL) {
465 scavenge_large(step);
472 if (flag) { goto loop; }
474 /* must be last... */
475 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
480 /* Final traversal of the weak pointer list (see comment by
481 * cleanUpWeakPtrList below).
483 cleanup_weak_ptr_list(&weak_ptr_list);
485 /* Now see which stable names are still alive.
487 gcStablePtrTable(major_gc);
490 /* revert dead CAFs and update enteredCAFs list */
495 /* Reconstruct the Global Address tables used in GUM */
496 rebuildGAtables(major_gc);
497 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
498 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
501 /* Set the maximum blocks for the oldest generation, based on twice
502 * the amount of live data now, adjusted to fit the maximum heap
505 * This is an approximation, since in the worst case we'll need
506 * twice the amount of live data plus whatever space the other
509 if (RtsFlags.GcFlags.generations > 1) {
511 oldest_gen->max_blocks =
512 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
513 RtsFlags.GcFlags.minOldGenSize);
514 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
515 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
516 if (((int)oldest_gen->max_blocks -
517 (int)oldest_gen->steps[0].to_blocks) <
518 (RtsFlags.GcFlags.pcFreeHeap *
519 RtsFlags.GcFlags.maxHeapSize / 200)) {
526 /* run through all the generations/steps and tidy up
528 copied = new_blocks * BLOCK_SIZE_W;
529 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
532 generations[g].collections++; /* for stats */
535 for (s = 0; s < generations[g].n_steps; s++) {
537 step = &generations[g].steps[s];
539 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
540 /* Tidy the end of the to-space chains */
541 step->hp_bd->free = step->hp;
542 step->hp_bd->link = NULL;
543 /* stats information: how much we copied */
545 copied -= step->hp_bd->start + BLOCK_SIZE_W -
550 /* for generations we collected... */
553 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
555 /* free old memory and shift to-space into from-space for all
556 * the collected steps (except the allocation area). These
557 * freed blocks will probaby be quickly recycled.
559 if (!(g == 0 && s == 0)) {
560 freeChain(step->blocks);
561 step->blocks = step->to_space;
562 step->n_blocks = step->to_blocks;
563 step->to_space = NULL;
565 for (bd = step->blocks; bd != NULL; bd = bd->link) {
566 bd->evacuated = 0; /* now from-space */
570 /* LARGE OBJECTS. The current live large objects are chained on
571 * scavenged_large, having been moved during garbage
572 * collection from large_objects. Any objects left on
573 * large_objects list are therefore dead, so we free them here.
575 for (bd = step->large_objects; bd != NULL; bd = next) {
580 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
583 step->large_objects = step->scavenged_large_objects;
585 /* Set the maximum blocks for this generation, interpolating
586 * between the maximum size of the oldest and youngest
589 * max_blocks = oldgen_max_blocks * G
590 * ----------------------
595 generations[g].max_blocks = (oldest_gen->max_blocks * g)
596 / (RtsFlags.GcFlags.generations-1);
598 generations[g].max_blocks = oldest_gen->max_blocks;
601 /* for older generations... */
604 /* For older generations, we need to append the
605 * scavenged_large_object list (i.e. large objects that have been
606 * promoted during this GC) to the large_object list for that step.
608 for (bd = step->scavenged_large_objects; bd; bd = next) {
611 dbl_link_onto(bd, &step->large_objects);
614 /* add the new blocks we promoted during this GC */
615 step->n_blocks += step->to_blocks;
620 /* Guess the amount of live data for stats. */
623 /* Free the small objects allocated via allocate(), since this will
624 * all have been copied into G0S1 now.
626 if (small_alloc_list != NULL) {
627 freeChain(small_alloc_list);
629 small_alloc_list = NULL;
633 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
635 /* Two-space collector:
636 * Free the old to-space, and estimate the amount of live data.
638 if (RtsFlags.GcFlags.generations == 1) {
641 if (old_to_space != NULL) {
642 freeChain(old_to_space);
644 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
645 bd->evacuated = 0; /* now from-space */
648 /* For a two-space collector, we need to resize the nursery. */
650 /* set up a new nursery. Allocate a nursery size based on a
651 * function of the amount of live data (currently a factor of 2,
652 * should be configurable (ToDo)). Use the blocks from the old
653 * nursery if possible, freeing up any left over blocks.
655 * If we get near the maximum heap size, then adjust our nursery
656 * size accordingly. If the nursery is the same size as the live
657 * data (L), then we need 3L bytes. We can reduce the size of the
658 * nursery to bring the required memory down near 2L bytes.
660 * A normal 2-space collector would need 4L bytes to give the same
661 * performance we get from 3L bytes, reducing to the same
662 * performance at 2L bytes.
664 blocks = g0s0->to_blocks;
666 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
667 RtsFlags.GcFlags.maxHeapSize ) {
668 int adjusted_blocks; /* signed on purpose */
671 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
672 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
673 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
674 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
677 blocks = adjusted_blocks;
680 blocks *= RtsFlags.GcFlags.oldGenFactor;
681 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
682 blocks = RtsFlags.GcFlags.minAllocAreaSize;
685 resizeNursery(blocks);
688 /* Generational collector:
689 * If the user has given us a suggested heap size, adjust our
690 * allocation area to make best use of the memory available.
693 if (RtsFlags.GcFlags.heapSizeSuggestion) {
695 nat needed = calcNeeded(); /* approx blocks needed at next GC */
697 /* Guess how much will be live in generation 0 step 0 next time.
698 * A good approximation is the obtained by finding the
699 * percentage of g0s0 that was live at the last minor GC.
702 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
705 /* Estimate a size for the allocation area based on the
706 * information available. We might end up going slightly under
707 * or over the suggested heap size, but we should be pretty
710 * Formula: suggested - needed
711 * ----------------------------
712 * 1 + g0s0_pcnt_kept/100
714 * where 'needed' is the amount of memory needed at the next
715 * collection for collecting all steps except g0s0.
718 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
719 (100 + (int)g0s0_pcnt_kept);
721 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
722 blocks = RtsFlags.GcFlags.minAllocAreaSize;
725 resizeNursery((nat)blocks);
729 /* mark the garbage collected CAFs as dead */
731 if (major_gc) { gcCAFs(); }
734 /* zero the scavenged static object list */
736 zero_static_object_list(scavenged_static_objects);
743 /* start any pending finalizers */
744 scheduleFinalizers(old_weak_ptr_list);
746 /* send exceptions to any threads which were about to die */
747 resurrectThreads(resurrected_threads);
749 /* check sanity after GC */
750 IF_DEBUG(sanity, checkSanity(N));
752 /* extra GC trace info */
753 IF_DEBUG(gc, stat_describe_gens());
756 /* symbol-table based profiling */
757 /* heapCensus(to_space); */ /* ToDo */
760 /* restore enclosing cost centre */
766 /* check for memory leaks if sanity checking is on */
767 IF_DEBUG(sanity, memInventory());
769 /* ok, GC over: tell the stats department what happened. */
770 stat_endGC(allocated, collected, live, copied, N);
773 //@node Weak Pointers, Evacuation, Garbage Collect
774 //@subsection Weak Pointers
776 /* -----------------------------------------------------------------------------
779 traverse_weak_ptr_list is called possibly many times during garbage
780 collection. It returns a flag indicating whether it did any work
781 (i.e. called evacuate on any live pointers).
783 Invariant: traverse_weak_ptr_list is called when the heap is in an
784 idempotent state. That means that there are no pending
785 evacuate/scavenge operations. This invariant helps the weak
786 pointer code decide which weak pointers are dead - if there are no
787 new live weak pointers, then all the currently unreachable ones are
790 For generational GC: we just don't try to finalize weak pointers in
791 older generations than the one we're collecting. This could
792 probably be optimised by keeping per-generation lists of weak
793 pointers, but for a few weak pointers this scheme will work.
794 -------------------------------------------------------------------------- */
795 //@cindex traverse_weak_ptr_list
798 traverse_weak_ptr_list(void)
800 StgWeak *w, **last_w, *next_w;
802 rtsBool flag = rtsFalse;
804 if (weak_done) { return rtsFalse; }
806 /* doesn't matter where we evacuate values/finalizers to, since
807 * these pointers are treated as roots (iff the keys are alive).
811 last_w = &old_weak_ptr_list;
812 for (w = old_weak_ptr_list; w; w = next_w) {
814 /* First, this weak pointer might have been evacuated. If so,
815 * remove the forwarding pointer from the weak_ptr_list.
817 if (get_itbl(w)->type == EVACUATED) {
818 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
822 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
823 * called on a live weak pointer object. Just remove it.
825 if (w->header.info == &DEAD_WEAK_info) {
826 next_w = ((StgDeadWeak *)w)->link;
831 ASSERT(get_itbl(w)->type == WEAK);
833 /* Now, check whether the key is reachable.
835 if ((new = isAlive(w->key))) {
837 /* evacuate the value and finalizer */
838 w->value = evacuate(w->value);
839 w->finalizer = evacuate(w->finalizer);
840 /* remove this weak ptr from the old_weak_ptr list */
842 /* and put it on the new weak ptr list */
844 w->link = weak_ptr_list;
847 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
857 /* Now deal with the all_threads list, which behaves somewhat like
858 * the weak ptr list. If we discover any threads that are about to
859 * become garbage, we wake them up and administer an exception.
862 StgTSO *t, *tmp, *next, **prev;
864 prev = &old_all_threads;
865 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
867 /* Threads which have finished or died get dropped from
870 switch (t->what_next) {
873 next = t->global_link;
879 /* Threads which have already been determined to be alive are
880 * moved onto the all_threads list.
882 (StgClosure *)tmp = isAlive((StgClosure *)t);
884 next = tmp->global_link;
885 tmp->global_link = all_threads;
889 prev = &(t->global_link);
890 next = t->global_link;
895 /* If we didn't make any changes, then we can go round and kill all
896 * the dead weak pointers. The old_weak_ptr list is used as a list
897 * of pending finalizers later on.
899 if (flag == rtsFalse) {
900 cleanup_weak_ptr_list(&old_weak_ptr_list);
901 for (w = old_weak_ptr_list; w; w = w->link) {
902 w->finalizer = evacuate(w->finalizer);
905 /* And resurrect any threads which were about to become garbage.
908 StgTSO *t, *tmp, *next;
909 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
910 next = t->global_link;
911 (StgClosure *)tmp = evacuate((StgClosure *)t);
912 tmp->global_link = resurrected_threads;
913 resurrected_threads = tmp;
923 /* -----------------------------------------------------------------------------
924 After GC, the live weak pointer list may have forwarding pointers
925 on it, because a weak pointer object was evacuated after being
926 moved to the live weak pointer list. We remove those forwarding
929 Also, we don't consider weak pointer objects to be reachable, but
930 we must nevertheless consider them to be "live" and retain them.
931 Therefore any weak pointer objects which haven't as yet been
932 evacuated need to be evacuated now.
933 -------------------------------------------------------------------------- */
935 //@cindex cleanup_weak_ptr_list
938 cleanup_weak_ptr_list ( StgWeak **list )
940 StgWeak *w, **last_w;
943 for (w = *list; w; w = w->link) {
945 if (get_itbl(w)->type == EVACUATED) {
946 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
950 if (Bdescr((P_)w)->evacuated == 0) {
951 (StgClosure *)w = evacuate((StgClosure *)w);
958 /* -----------------------------------------------------------------------------
959 isAlive determines whether the given closure is still alive (after
960 a garbage collection) or not. It returns the new address of the
961 closure if it is alive, or NULL otherwise.
962 -------------------------------------------------------------------------- */
967 isAlive(StgClosure *p)
969 const StgInfoTable *info;
976 /* ToDo: for static closures, check the static link field.
977 * Problem here is that we sometimes don't set the link field, eg.
978 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
981 /* ignore closures in generations that we're not collecting. */
982 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
986 switch (info->type) {
991 case IND_OLDGEN: /* rely on compatible layout with StgInd */
992 case IND_OLDGEN_PERM:
993 /* follow indirections */
994 p = ((StgInd *)p)->indirectee;
999 return ((StgEvacuated *)p)->evacuee;
1002 size = bco_sizeW((StgBCO*)p);
1006 size = arr_words_sizeW((StgArrWords *)p);
1010 case MUT_ARR_PTRS_FROZEN:
1011 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1015 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1016 p = (StgClosure *)((StgTSO *)p)->link;
1020 size = tso_sizeW((StgTSO *)p);
1022 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1023 && Bdescr((P_)p)->evacuated)
1037 MarkRoot(StgClosure *root)
1039 # if 0 && defined(PAR) && defined(DEBUG)
1040 StgClosure *foo = evacuate(root);
1041 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1042 ASSERT(isAlive(foo)); // must be in to-space
1045 return evacuate(root);
1050 static void addBlock(step *step)
1052 bdescr *bd = allocBlock();
1053 bd->gen = step->gen;
1056 if (step->gen->no <= N) {
1062 step->hp_bd->free = step->hp;
1063 step->hp_bd->link = bd;
1064 step->hp = bd->start;
1065 step->hpLim = step->hp + BLOCK_SIZE_W;
1071 //@cindex upd_evacuee
1073 static __inline__ void
1074 upd_evacuee(StgClosure *p, StgClosure *dest)
1076 p->header.info = &EVACUATED_info;
1077 ((StgEvacuated *)p)->evacuee = dest;
1082 static __inline__ StgClosure *
1083 copy(StgClosure *src, nat size, step *step)
1087 TICK_GC_WORDS_COPIED(size);
1088 /* Find out where we're going, using the handy "to" pointer in
1089 * the step of the source object. If it turns out we need to
1090 * evacuate to an older generation, adjust it here (see comment
1093 if (step->gen->no < evac_gen) {
1094 #ifdef NO_EAGER_PROMOTION
1095 failed_to_evac = rtsTrue;
1097 step = &generations[evac_gen].steps[0];
1101 /* chain a new block onto the to-space for the destination step if
1104 if (step->hp + size >= step->hpLim) {
1108 for(to = step->hp, from = (P_)src; size>0; --size) {
1114 upd_evacuee(src,(StgClosure *)dest);
1115 return (StgClosure *)dest;
1118 /* Special version of copy() for when we only want to copy the info
1119 * pointer of an object, but reserve some padding after it. This is
1120 * used to optimise evacuation of BLACKHOLEs.
1125 static __inline__ StgClosure *
1126 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1130 TICK_GC_WORDS_COPIED(size_to_copy);
1131 if (step->gen->no < evac_gen) {
1132 #ifdef NO_EAGER_PROMOTION
1133 failed_to_evac = rtsTrue;
1135 step = &generations[evac_gen].steps[0];
1139 if (step->hp + size_to_reserve >= step->hpLim) {
1143 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1148 step->hp += size_to_reserve;
1149 upd_evacuee(src,(StgClosure *)dest);
1150 return (StgClosure *)dest;
1153 //@node Evacuation, Scavenging, Weak Pointers
1154 //@subsection Evacuation
1156 /* -----------------------------------------------------------------------------
1157 Evacuate a large object
1159 This just consists of removing the object from the (doubly-linked)
1160 large_alloc_list, and linking it on to the (singly-linked)
1161 new_large_objects list, from where it will be scavenged later.
1163 Convention: bd->evacuated is /= 0 for a large object that has been
1164 evacuated, or 0 otherwise.
1165 -------------------------------------------------------------------------- */
1167 //@cindex evacuate_large
1170 evacuate_large(StgPtr p, rtsBool mutable)
1172 bdescr *bd = Bdescr(p);
1175 /* should point to the beginning of the block */
1176 ASSERT(((W_)p & BLOCK_MASK) == 0);
1178 /* already evacuated? */
1179 if (bd->evacuated) {
1180 /* Don't forget to set the failed_to_evac flag if we didn't get
1181 * the desired destination (see comments in evacuate()).
1183 if (bd->gen->no < evac_gen) {
1184 failed_to_evac = rtsTrue;
1185 TICK_GC_FAILED_PROMOTION();
1191 /* remove from large_object list */
1193 bd->back->link = bd->link;
1194 } else { /* first object in the list */
1195 step->large_objects = bd->link;
1198 bd->link->back = bd->back;
1201 /* link it on to the evacuated large object list of the destination step
1203 step = bd->step->to;
1204 if (step->gen->no < evac_gen) {
1205 #ifdef NO_EAGER_PROMOTION
1206 failed_to_evac = rtsTrue;
1208 step = &generations[evac_gen].steps[0];
1213 bd->gen = step->gen;
1214 bd->link = step->new_large_objects;
1215 step->new_large_objects = bd;
1219 recordMutable((StgMutClosure *)p);
1223 /* -----------------------------------------------------------------------------
1224 Adding a MUT_CONS to an older generation.
1226 This is necessary from time to time when we end up with an
1227 old-to-new generation pointer in a non-mutable object. We defer
1228 the promotion until the next GC.
1229 -------------------------------------------------------------------------- */
1234 mkMutCons(StgClosure *ptr, generation *gen)
1239 step = &gen->steps[0];
1241 /* chain a new block onto the to-space for the destination step if
1244 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1248 q = (StgMutVar *)step->hp;
1249 step->hp += sizeofW(StgMutVar);
1251 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1253 recordOldToNewPtrs((StgMutClosure *)q);
1255 return (StgClosure *)q;
1258 /* -----------------------------------------------------------------------------
1261 This is called (eventually) for every live object in the system.
1263 The caller to evacuate specifies a desired generation in the
1264 evac_gen global variable. The following conditions apply to
1265 evacuating an object which resides in generation M when we're
1266 collecting up to generation N
1270 else evac to step->to
1272 if M < evac_gen evac to evac_gen, step 0
1274 if the object is already evacuated, then we check which generation
1277 if M >= evac_gen do nothing
1278 if M < evac_gen set failed_to_evac flag to indicate that we
1279 didn't manage to evacuate this object into evac_gen.
1281 -------------------------------------------------------------------------- */
1285 evacuate(StgClosure *q)
1290 const StgInfoTable *info;
1293 if (HEAP_ALLOCED(q)) {
1295 if (bd->gen->no > N) {
1296 /* Can't evacuate this object, because it's in a generation
1297 * older than the ones we're collecting. Let's hope that it's
1298 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1300 if (bd->gen->no < evac_gen) {
1302 failed_to_evac = rtsTrue;
1303 TICK_GC_FAILED_PROMOTION();
1307 step = bd->step->to;
1310 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1313 /* make sure the info pointer is into text space */
1314 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1315 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1318 if (info->type==RBH) {
1319 info = REVERT_INFOPTR(info);
1321 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1322 q, info_type(q), info, info_type_by_ip(info)));
1326 switch (info -> type) {
1330 nat size = bco_sizeW((StgBCO*)q);
1332 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1333 evacuate_large((P_)q, rtsFalse);
1336 /* just copy the block */
1337 to = copy(q,size,step);
1343 ASSERT(q->header.info != &MUT_CONS_info);
1345 to = copy(q,sizeW_fromITBL(info),step);
1346 recordMutable((StgMutClosure *)to);
1353 return copy(q,sizeofW(StgHeader)+1,step);
1355 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1360 #ifdef NO_PROMOTE_THUNKS
1361 if (bd->gen->no == 0 &&
1362 bd->step->no != 0 &&
1363 bd->step->no == bd->gen->n_steps-1) {
1367 return copy(q,sizeofW(StgHeader)+2,step);
1375 return copy(q,sizeofW(StgHeader)+2,step);
1381 case IND_OLDGEN_PERM:
1387 return copy(q,sizeW_fromITBL(info),step);
1390 case SE_CAF_BLACKHOLE:
1393 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1396 to = copy(q,BLACKHOLE_sizeW(),step);
1397 recordMutable((StgMutClosure *)to);
1400 case THUNK_SELECTOR:
1402 const StgInfoTable* selectee_info;
1403 StgClosure* selectee = ((StgSelector*)q)->selectee;
1406 selectee_info = get_itbl(selectee);
1407 switch (selectee_info->type) {
1416 StgWord32 offset = info->layout.selector_offset;
1418 /* check that the size is in range */
1420 (StgWord32)(selectee_info->layout.payload.ptrs +
1421 selectee_info->layout.payload.nptrs));
1423 /* perform the selection! */
1424 q = selectee->payload[offset];
1426 /* if we're already in to-space, there's no need to continue
1427 * with the evacuation, just update the source address with
1428 * a pointer to the (evacuated) constructor field.
1430 if (HEAP_ALLOCED(q)) {
1431 bdescr *bd = Bdescr((P_)q);
1432 if (bd->evacuated) {
1433 if (bd->gen->no < evac_gen) {
1434 failed_to_evac = rtsTrue;
1435 TICK_GC_FAILED_PROMOTION();
1441 /* otherwise, carry on and evacuate this constructor field,
1442 * (but not the constructor itself)
1451 case IND_OLDGEN_PERM:
1452 selectee = ((StgInd *)selectee)->indirectee;
1456 selectee = ((StgCAF *)selectee)->value;
1460 selectee = ((StgEvacuated *)selectee)->evacuee;
1470 case THUNK_SELECTOR:
1471 /* aargh - do recursively???? */
1474 case SE_CAF_BLACKHOLE:
1478 /* not evaluated yet */
1482 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1483 (int)(selectee_info->type));
1486 return copy(q,THUNK_SELECTOR_sizeW(),step);
1490 /* follow chains of indirections, don't evacuate them */
1491 q = ((StgInd*)q)->indirectee;
1495 if (info->srt_len > 0 && major_gc &&
1496 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1497 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1498 static_objects = (StgClosure *)q;
1503 if (info->srt_len > 0 && major_gc &&
1504 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1505 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1506 static_objects = (StgClosure *)q;
1511 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1512 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1513 static_objects = (StgClosure *)q;
1518 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1519 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1520 static_objects = (StgClosure *)q;
1524 case CONSTR_INTLIKE:
1525 case CONSTR_CHARLIKE:
1526 case CONSTR_NOCAF_STATIC:
1527 /* no need to put these on the static linked list, they don't need
1542 /* shouldn't see these */
1543 barf("evacuate: stack frame at %p\n", q);
1547 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1548 * of stack, tagging and all.
1550 * They can be larger than a block in size. Both are only
1551 * allocated via allocate(), so they should be chained on to the
1552 * large_object list.
1555 nat size = pap_sizeW((StgPAP*)q);
1556 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1557 evacuate_large((P_)q, rtsFalse);
1560 return copy(q,size,step);
1565 /* Already evacuated, just return the forwarding address.
1566 * HOWEVER: if the requested destination generation (evac_gen) is
1567 * older than the actual generation (because the object was
1568 * already evacuated to a younger generation) then we have to
1569 * set the failed_to_evac flag to indicate that we couldn't
1570 * manage to promote the object to the desired generation.
1572 if (evac_gen > 0) { /* optimisation */
1573 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1574 if (Bdescr((P_)p)->gen->no < evac_gen) {
1575 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1576 failed_to_evac = rtsTrue;
1577 TICK_GC_FAILED_PROMOTION();
1580 return ((StgEvacuated*)q)->evacuee;
1584 nat size = arr_words_sizeW((StgArrWords *)q);
1586 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1587 evacuate_large((P_)q, rtsFalse);
1590 /* just copy the block */
1591 return copy(q,size,step);
1596 case MUT_ARR_PTRS_FROZEN:
1598 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1600 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1601 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1604 /* just copy the block */
1605 to = copy(q,size,step);
1606 if (info->type == MUT_ARR_PTRS) {
1607 recordMutable((StgMutClosure *)to);
1615 StgTSO *tso = (StgTSO *)q;
1616 nat size = tso_sizeW(tso);
1619 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1621 if (tso->what_next == ThreadRelocated) {
1622 q = (StgClosure *)tso->link;
1626 /* Large TSOs don't get moved, so no relocation is required.
1628 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1629 evacuate_large((P_)q, rtsTrue);
1632 /* To evacuate a small TSO, we need to relocate the update frame
1636 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1638 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1640 /* relocate the stack pointers... */
1641 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1642 new_tso->sp = (StgPtr)new_tso->sp + diff;
1643 new_tso->splim = (StgPtr)new_tso->splim + diff;
1645 relocate_TSO(tso, new_tso);
1647 recordMutable((StgMutClosure *)new_tso);
1648 return (StgClosure *)new_tso;
1653 case RBH: // cf. BLACKHOLE_BQ
1655 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1656 to = copy(q,BLACKHOLE_sizeW(),step);
1657 //ToDo: derive size etc from reverted IP
1658 //to = copy(q,size,step);
1659 recordMutable((StgMutClosure *)to);
1661 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1662 q, info_type(q), to, info_type(to)));
1667 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1668 to = copy(q,sizeofW(StgBlockedFetch),step);
1670 belch("@@ evacuate: %p (%s) to %p (%s)",
1671 q, info_type(q), to, info_type(to)));
1675 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1676 to = copy(q,sizeofW(StgFetchMe),step);
1678 belch("@@ evacuate: %p (%s) to %p (%s)",
1679 q, info_type(q), to, info_type(to)));
1683 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1684 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1686 belch("@@ evacuate: %p (%s) to %p (%s)",
1687 q, info_type(q), to, info_type(to)));
1692 barf("evacuate: strange closure type %d", (int)(info->type));
1698 /* -----------------------------------------------------------------------------
1699 relocate_TSO is called just after a TSO has been copied from src to
1700 dest. It adjusts the update frame list for the new location.
1701 -------------------------------------------------------------------------- */
1702 //@cindex relocate_TSO
1705 relocate_TSO(StgTSO *src, StgTSO *dest)
1712 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1716 while ((P_)su < dest->stack + dest->stack_size) {
1717 switch (get_itbl(su)->type) {
1719 /* GCC actually manages to common up these three cases! */
1722 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1727 cf = (StgCatchFrame *)su;
1728 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1733 sf = (StgSeqFrame *)su;
1734 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1743 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1751 //@node Scavenging, Reverting CAFs, Evacuation
1752 //@subsection Scavenging
1754 //@cindex scavenge_srt
1757 scavenge_srt(const StgInfoTable *info)
1759 StgClosure **srt, **srt_end;
1761 /* evacuate the SRT. If srt_len is zero, then there isn't an
1762 * srt field in the info table. That's ok, because we'll
1763 * never dereference it.
1765 srt = (StgClosure **)(info->srt);
1766 srt_end = srt + info->srt_len;
1767 for (; srt < srt_end; srt++) {
1768 /* Special-case to handle references to closures hiding out in DLLs, since
1769 double indirections required to get at those. The code generator knows
1770 which is which when generating the SRT, so it stores the (indirect)
1771 reference to the DLL closure in the table by first adding one to it.
1772 We check for this here, and undo the addition before evacuating it.
1774 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1775 closure that's fixed at link-time, and no extra magic is required.
1777 #ifdef ENABLE_WIN32_DLL_SUPPORT
1778 if ( (unsigned long)(*srt) & 0x1 ) {
1779 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1789 /* -----------------------------------------------------------------------------
1791 -------------------------------------------------------------------------- */
1794 scavengeTSO (StgTSO *tso)
1796 /* chase the link field for any TSOs on the same queue */
1797 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1798 if ( tso->why_blocked == BlockedOnMVar
1799 || tso->why_blocked == BlockedOnBlackHole
1800 || tso->why_blocked == BlockedOnException
1802 || tso->why_blocked == BlockedOnGA
1803 || tso->why_blocked == BlockedOnGA_NoSend
1806 tso->block_info.closure = evacuate(tso->block_info.closure);
1808 if ( tso->blocked_exceptions != NULL ) {
1809 tso->blocked_exceptions =
1810 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1812 /* scavenge this thread's stack */
1813 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1816 /* -----------------------------------------------------------------------------
1817 Scavenge a given step until there are no more objects in this step
1820 evac_gen is set by the caller to be either zero (for a step in a
1821 generation < N) or G where G is the generation of the step being
1824 We sometimes temporarily change evac_gen back to zero if we're
1825 scavenging a mutable object where early promotion isn't such a good
1827 -------------------------------------------------------------------------- */
1831 scavenge(step *step)
1834 const StgInfoTable *info;
1836 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1841 failed_to_evac = rtsFalse;
1843 /* scavenge phase - standard breadth-first scavenging of the
1847 while (bd != step->hp_bd || p < step->hp) {
1849 /* If we're at the end of this block, move on to the next block */
1850 if (bd != step->hp_bd && p == bd->free) {
1856 q = p; /* save ptr to object */
1858 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1859 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1861 info = get_itbl((StgClosure *)p);
1863 if (info->type==RBH)
1864 info = REVERT_INFOPTR(info);
1867 switch (info -> type) {
1871 StgBCO* bco = (StgBCO *)p;
1873 for (i = 0; i < bco->n_ptrs; i++) {
1874 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1876 p += bco_sizeW(bco);
1881 /* treat MVars specially, because we don't want to evacuate the
1882 * mut_link field in the middle of the closure.
1885 StgMVar *mvar = ((StgMVar *)p);
1887 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1888 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1889 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1890 p += sizeofW(StgMVar);
1891 evac_gen = saved_evac_gen;
1899 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1900 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1901 p += sizeofW(StgHeader) + 2;
1906 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1907 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1913 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1914 p += sizeofW(StgHeader) + 1;
1919 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1925 p += sizeofW(StgHeader) + 1;
1932 p += sizeofW(StgHeader) + 2;
1939 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1940 p += sizeofW(StgHeader) + 2;
1955 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1956 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1957 (StgClosure *)*p = evacuate((StgClosure *)*p);
1959 p += info->layout.payload.nptrs;
1964 if (step->gen->no != 0) {
1965 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1968 case IND_OLDGEN_PERM:
1969 ((StgIndOldGen *)p)->indirectee =
1970 evacuate(((StgIndOldGen *)p)->indirectee);
1971 if (failed_to_evac) {
1972 failed_to_evac = rtsFalse;
1973 recordOldToNewPtrs((StgMutClosure *)p);
1975 p += sizeofW(StgIndOldGen);
1980 StgCAF *caf = (StgCAF *)p;
1982 caf->body = evacuate(caf->body);
1983 if (failed_to_evac) {
1984 failed_to_evac = rtsFalse;
1985 recordOldToNewPtrs((StgMutClosure *)p);
1987 caf->mut_link = NULL;
1989 p += sizeofW(StgCAF);
1995 StgCAF *caf = (StgCAF *)p;
1997 caf->body = evacuate(caf->body);
1998 caf->value = evacuate(caf->value);
1999 if (failed_to_evac) {
2000 failed_to_evac = rtsFalse;
2001 recordOldToNewPtrs((StgMutClosure *)p);
2003 caf->mut_link = NULL;
2005 p += sizeofW(StgCAF);
2010 /* ignore MUT_CONSs */
2011 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2013 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2014 evac_gen = saved_evac_gen;
2016 p += sizeofW(StgMutVar);
2020 case SE_CAF_BLACKHOLE:
2023 p += BLACKHOLE_sizeW();
2028 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2029 (StgClosure *)bh->blocking_queue =
2030 evacuate((StgClosure *)bh->blocking_queue);
2031 if (failed_to_evac) {
2032 failed_to_evac = rtsFalse;
2033 recordMutable((StgMutClosure *)bh);
2035 p += BLACKHOLE_sizeW();
2039 case THUNK_SELECTOR:
2041 StgSelector *s = (StgSelector *)p;
2042 s->selectee = evacuate(s->selectee);
2043 p += THUNK_SELECTOR_sizeW();
2049 barf("scavenge:IND???\n");
2051 case CONSTR_INTLIKE:
2052 case CONSTR_CHARLIKE:
2054 case CONSTR_NOCAF_STATIC:
2058 /* Shouldn't see a static object here. */
2059 barf("scavenge: STATIC object\n");
2071 /* Shouldn't see stack frames here. */
2072 barf("scavenge: stack frame\n");
2074 case AP_UPD: /* same as PAPs */
2076 /* Treat a PAP just like a section of stack, not forgetting to
2077 * evacuate the function pointer too...
2080 StgPAP* pap = (StgPAP *)p;
2082 pap->fun = evacuate(pap->fun);
2083 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2084 p += pap_sizeW(pap);
2089 /* nothing to follow */
2090 p += arr_words_sizeW((StgArrWords *)p);
2094 /* follow everything */
2098 evac_gen = 0; /* repeatedly mutable */
2099 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2100 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2101 (StgClosure *)*p = evacuate((StgClosure *)*p);
2103 evac_gen = saved_evac_gen;
2107 case MUT_ARR_PTRS_FROZEN:
2108 /* follow everything */
2110 StgPtr start = p, next;
2112 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2113 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2114 (StgClosure *)*p = evacuate((StgClosure *)*p);
2116 if (failed_to_evac) {
2117 /* we can do this easier... */
2118 recordMutable((StgMutClosure *)start);
2119 failed_to_evac = rtsFalse;
2126 StgTSO *tso = (StgTSO *)p;
2129 evac_gen = saved_evac_gen;
2130 p += tso_sizeW(tso);
2135 case RBH: // cf. BLACKHOLE_BQ
2137 // nat size, ptrs, nonptrs, vhs;
2139 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2140 StgRBH *rbh = (StgRBH *)p;
2141 (StgClosure *)rbh->blocking_queue =
2142 evacuate((StgClosure *)rbh->blocking_queue);
2143 if (failed_to_evac) {
2144 failed_to_evac = rtsFalse;
2145 recordMutable((StgMutClosure *)rbh);
2148 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2149 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2150 // ToDo: use size of reverted closure here!
2151 p += BLACKHOLE_sizeW();
2157 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2158 /* follow the pointer to the node which is being demanded */
2159 (StgClosure *)bf->node =
2160 evacuate((StgClosure *)bf->node);
2161 /* follow the link to the rest of the blocking queue */
2162 (StgClosure *)bf->link =
2163 evacuate((StgClosure *)bf->link);
2164 if (failed_to_evac) {
2165 failed_to_evac = rtsFalse;
2166 recordMutable((StgMutClosure *)bf);
2169 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2170 bf, info_type((StgClosure *)bf),
2171 bf->node, info_type(bf->node)));
2172 p += sizeofW(StgBlockedFetch);
2178 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2179 p, info_type((StgClosure *)p)));
2180 p += sizeofW(StgFetchMe);
2181 break; // nothing to do in this case
2183 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2185 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2186 (StgClosure *)fmbq->blocking_queue =
2187 evacuate((StgClosure *)fmbq->blocking_queue);
2188 if (failed_to_evac) {
2189 failed_to_evac = rtsFalse;
2190 recordMutable((StgMutClosure *)fmbq);
2193 belch("@@ scavenge: %p (%s) exciting, isn't it",
2194 p, info_type((StgClosure *)p)));
2195 p += sizeofW(StgFetchMeBlockingQueue);
2201 barf("scavenge: unimplemented/strange closure type %d @ %p",
2205 barf("scavenge: unimplemented/strange closure type %d @ %p",
2209 /* If we didn't manage to promote all the objects pointed to by
2210 * the current object, then we have to designate this object as
2211 * mutable (because it contains old-to-new generation pointers).
2213 if (failed_to_evac) {
2214 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2215 failed_to_evac = rtsFalse;
2223 /* -----------------------------------------------------------------------------
2224 Scavenge one object.
2226 This is used for objects that are temporarily marked as mutable
2227 because they contain old-to-new generation pointers. Only certain
2228 objects can have this property.
2229 -------------------------------------------------------------------------- */
2230 //@cindex scavenge_one
2233 scavenge_one(StgClosure *p)
2235 const StgInfoTable *info;
2238 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2239 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2244 if (info->type==RBH)
2245 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2248 switch (info -> type) {
2251 case FUN_1_0: /* hardly worth specialising these guys */
2271 case IND_OLDGEN_PERM:
2276 end = (P_)p->payload + info->layout.payload.ptrs;
2277 for (q = (P_)p->payload; q < end; q++) {
2278 (StgClosure *)*q = evacuate((StgClosure *)*q);
2284 case SE_CAF_BLACKHOLE:
2289 case THUNK_SELECTOR:
2291 StgSelector *s = (StgSelector *)p;
2292 s->selectee = evacuate(s->selectee);
2296 case AP_UPD: /* same as PAPs */
2298 /* Treat a PAP just like a section of stack, not forgetting to
2299 * evacuate the function pointer too...
2302 StgPAP* pap = (StgPAP *)p;
2304 pap->fun = evacuate(pap->fun);
2305 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2310 /* This might happen if for instance a MUT_CONS was pointing to a
2311 * THUNK which has since been updated. The IND_OLDGEN will
2312 * be on the mutable list anyway, so we don't need to do anything
2318 barf("scavenge_one: strange object %d", (int)(info->type));
2321 no_luck = failed_to_evac;
2322 failed_to_evac = rtsFalse;
2327 /* -----------------------------------------------------------------------------
2328 Scavenging mutable lists.
2330 We treat the mutable list of each generation > N (i.e. all the
2331 generations older than the one being collected) as roots. We also
2332 remove non-mutable objects from the mutable list at this point.
2333 -------------------------------------------------------------------------- */
2334 //@cindex scavenge_mut_once_list
2337 scavenge_mut_once_list(generation *gen)
2339 const StgInfoTable *info;
2340 StgMutClosure *p, *next, *new_list;
2342 p = gen->mut_once_list;
2343 new_list = END_MUT_LIST;
2347 failed_to_evac = rtsFalse;
2349 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2351 /* make sure the info pointer is into text space */
2352 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2353 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2357 if (info->type==RBH)
2358 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2360 switch(info->type) {
2363 case IND_OLDGEN_PERM:
2365 /* Try to pull the indirectee into this generation, so we can
2366 * remove the indirection from the mutable list.
2368 ((StgIndOldGen *)p)->indirectee =
2369 evacuate(((StgIndOldGen *)p)->indirectee);
2372 if (RtsFlags.DebugFlags.gc)
2373 /* Debugging code to print out the size of the thing we just
2377 StgPtr start = gen->steps[0].scan;
2378 bdescr *start_bd = gen->steps[0].scan_bd;
2380 scavenge(&gen->steps[0]);
2381 if (start_bd != gen->steps[0].scan_bd) {
2382 size += (P_)BLOCK_ROUND_UP(start) - start;
2383 start_bd = start_bd->link;
2384 while (start_bd != gen->steps[0].scan_bd) {
2385 size += BLOCK_SIZE_W;
2386 start_bd = start_bd->link;
2388 size += gen->steps[0].scan -
2389 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2391 size = gen->steps[0].scan - start;
2393 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2397 /* failed_to_evac might happen if we've got more than two
2398 * generations, we're collecting only generation 0, the
2399 * indirection resides in generation 2 and the indirectee is
2402 if (failed_to_evac) {
2403 failed_to_evac = rtsFalse;
2404 p->mut_link = new_list;
2407 /* the mut_link field of an IND_STATIC is overloaded as the
2408 * static link field too (it just so happens that we don't need
2409 * both at the same time), so we need to NULL it out when
2410 * removing this object from the mutable list because the static
2411 * link fields are all assumed to be NULL before doing a major
2419 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2420 * it from the mutable list if possible by promoting whatever it
2423 ASSERT(p->header.info == &MUT_CONS_info);
2424 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2425 /* didn't manage to promote everything, so put the
2426 * MUT_CONS back on the list.
2428 p->mut_link = new_list;
2435 StgCAF *caf = (StgCAF *)p;
2436 caf->body = evacuate(caf->body);
2437 caf->value = evacuate(caf->value);
2438 if (failed_to_evac) {
2439 failed_to_evac = rtsFalse;
2440 p->mut_link = new_list;
2450 StgCAF *caf = (StgCAF *)p;
2451 caf->body = evacuate(caf->body);
2452 if (failed_to_evac) {
2453 failed_to_evac = rtsFalse;
2454 p->mut_link = new_list;
2463 /* shouldn't have anything else on the mutables list */
2464 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2468 gen->mut_once_list = new_list;
2471 //@cindex scavenge_mutable_list
2474 scavenge_mutable_list(generation *gen)
2476 const StgInfoTable *info;
2477 StgMutClosure *p, *next;
2479 p = gen->saved_mut_list;
2483 failed_to_evac = rtsFalse;
2485 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2487 /* make sure the info pointer is into text space */
2488 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2489 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2493 if (info->type==RBH)
2494 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2496 switch(info->type) {
2498 case MUT_ARR_PTRS_FROZEN:
2499 /* remove this guy from the mutable list, but follow the ptrs
2500 * anyway (and make sure they get promoted to this gen).
2505 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2507 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2508 (StgClosure *)*q = evacuate((StgClosure *)*q);
2512 if (failed_to_evac) {
2513 failed_to_evac = rtsFalse;
2514 p->mut_link = gen->mut_list;
2521 /* follow everything */
2522 p->mut_link = gen->mut_list;
2527 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2528 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2529 (StgClosure *)*q = evacuate((StgClosure *)*q);
2535 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2536 * it from the mutable list if possible by promoting whatever it
2539 ASSERT(p->header.info != &MUT_CONS_info);
2540 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2541 p->mut_link = gen->mut_list;
2547 StgMVar *mvar = (StgMVar *)p;
2548 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2549 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2550 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2551 p->mut_link = gen->mut_list;
2558 StgTSO *tso = (StgTSO *)p;
2562 /* Don't take this TSO off the mutable list - it might still
2563 * point to some younger objects (because we set evac_gen to 0
2566 tso->mut_link = gen->mut_list;
2567 gen->mut_list = (StgMutClosure *)tso;
2573 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2574 (StgClosure *)bh->blocking_queue =
2575 evacuate((StgClosure *)bh->blocking_queue);
2576 p->mut_link = gen->mut_list;
2581 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2584 case IND_OLDGEN_PERM:
2585 /* Try to pull the indirectee into this generation, so we can
2586 * remove the indirection from the mutable list.
2589 ((StgIndOldGen *)p)->indirectee =
2590 evacuate(((StgIndOldGen *)p)->indirectee);
2593 if (failed_to_evac) {
2594 failed_to_evac = rtsFalse;
2595 p->mut_link = gen->mut_once_list;
2596 gen->mut_once_list = p;
2603 // HWL: check whether all of these are necessary
2605 case RBH: // cf. BLACKHOLE_BQ
2607 // nat size, ptrs, nonptrs, vhs;
2609 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2610 StgRBH *rbh = (StgRBH *)p;
2611 (StgClosure *)rbh->blocking_queue =
2612 evacuate((StgClosure *)rbh->blocking_queue);
2613 if (failed_to_evac) {
2614 failed_to_evac = rtsFalse;
2615 recordMutable((StgMutClosure *)rbh);
2617 // ToDo: use size of reverted closure here!
2618 p += BLACKHOLE_sizeW();
2624 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2625 /* follow the pointer to the node which is being demanded */
2626 (StgClosure *)bf->node =
2627 evacuate((StgClosure *)bf->node);
2628 /* follow the link to the rest of the blocking queue */
2629 (StgClosure *)bf->link =
2630 evacuate((StgClosure *)bf->link);
2631 if (failed_to_evac) {
2632 failed_to_evac = rtsFalse;
2633 recordMutable((StgMutClosure *)bf);
2635 p += sizeofW(StgBlockedFetch);
2640 p += sizeofW(StgFetchMe);
2641 break; // nothing to do in this case
2643 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2645 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2646 (StgClosure *)fmbq->blocking_queue =
2647 evacuate((StgClosure *)fmbq->blocking_queue);
2648 if (failed_to_evac) {
2649 failed_to_evac = rtsFalse;
2650 recordMutable((StgMutClosure *)fmbq);
2652 p += sizeofW(StgFetchMeBlockingQueue);
2658 /* shouldn't have anything else on the mutables list */
2659 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2664 //@cindex scavenge_static
2667 scavenge_static(void)
2669 StgClosure* p = static_objects;
2670 const StgInfoTable *info;
2672 /* Always evacuate straight to the oldest generation for static
2674 evac_gen = oldest_gen->no;
2676 /* keep going until we've scavenged all the objects on the linked
2678 while (p != END_OF_STATIC_LIST) {
2682 if (info->type==RBH)
2683 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2685 /* make sure the info pointer is into text space */
2686 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2687 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2689 /* Take this object *off* the static_objects list,
2690 * and put it on the scavenged_static_objects list.
2692 static_objects = STATIC_LINK(info,p);
2693 STATIC_LINK(info,p) = scavenged_static_objects;
2694 scavenged_static_objects = p;
2696 switch (info -> type) {
2700 StgInd *ind = (StgInd *)p;
2701 ind->indirectee = evacuate(ind->indirectee);
2703 /* might fail to evacuate it, in which case we have to pop it
2704 * back on the mutable list (and take it off the
2705 * scavenged_static list because the static link and mut link
2706 * pointers are one and the same).
2708 if (failed_to_evac) {
2709 failed_to_evac = rtsFalse;
2710 scavenged_static_objects = STATIC_LINK(info,p);
2711 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2712 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2726 next = (P_)p->payload + info->layout.payload.ptrs;
2727 /* evacuate the pointers */
2728 for (q = (P_)p->payload; q < next; q++) {
2729 (StgClosure *)*q = evacuate((StgClosure *)*q);
2735 barf("scavenge_static: strange closure %d", (int)(info->type));
2738 ASSERT(failed_to_evac == rtsFalse);
2740 /* get the next static object from the list. Remember, there might
2741 * be more stuff on this list now that we've done some evacuating!
2742 * (static_objects is a global)
2748 /* -----------------------------------------------------------------------------
2749 scavenge_stack walks over a section of stack and evacuates all the
2750 objects pointed to by it. We can use the same code for walking
2751 PAPs, since these are just sections of copied stack.
2752 -------------------------------------------------------------------------- */
2753 //@cindex scavenge_stack
2756 scavenge_stack(StgPtr p, StgPtr stack_end)
2759 const StgInfoTable* info;
2762 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2765 * Each time around this loop, we are looking at a chunk of stack
2766 * that starts with either a pending argument section or an
2767 * activation record.
2770 while (p < stack_end) {
2773 /* If we've got a tag, skip over that many words on the stack */
2774 if (IS_ARG_TAG((W_)q)) {
2779 /* Is q a pointer to a closure?
2781 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2783 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2784 ASSERT(closure_STATIC((StgClosure *)q));
2786 /* otherwise, must be a pointer into the allocation space. */
2789 (StgClosure *)*p = evacuate((StgClosure *)q);
2795 * Otherwise, q must be the info pointer of an activation
2796 * record. All activation records have 'bitmap' style layout
2799 info = get_itbl((StgClosure *)p);
2801 switch (info->type) {
2803 /* Dynamic bitmap: the mask is stored on the stack */
2805 bitmap = ((StgRetDyn *)p)->liveness;
2806 p = (P_)&((StgRetDyn *)p)->payload[0];
2809 /* probably a slow-entry point return address: */
2817 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2818 old_p, p, old_p+1));
2820 p++; /* what if FHS!=1 !? -- HWL */
2825 /* Specialised code for update frames, since they're so common.
2826 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2827 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2831 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2833 nat type = get_itbl(frame->updatee)->type;
2835 p += sizeofW(StgUpdateFrame);
2836 if (type == EVACUATED) {
2837 frame->updatee = evacuate(frame->updatee);
2840 bdescr *bd = Bdescr((P_)frame->updatee);
2842 if (bd->gen->no > N) {
2843 if (bd->gen->no < evac_gen) {
2844 failed_to_evac = rtsTrue;
2849 /* Don't promote blackholes */
2851 if (!(step->gen->no == 0 &&
2853 step->no == step->gen->n_steps-1)) {
2860 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2861 sizeofW(StgHeader), step);
2862 frame->updatee = to;
2865 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2866 frame->updatee = to;
2867 recordMutable((StgMutClosure *)to);
2870 /* will never be SE_{,CAF_}BLACKHOLE, since we
2871 don't push an update frame for single-entry thunks. KSW 1999-01. */
2872 barf("scavenge_stack: UPDATE_FRAME updatee");
2877 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2884 bitmap = info->layout.bitmap;
2886 /* this assumes that the payload starts immediately after the info-ptr */
2888 while (bitmap != 0) {
2889 if ((bitmap & 1) == 0) {
2890 (StgClosure *)*p = evacuate((StgClosure *)*p);
2893 bitmap = bitmap >> 1;
2900 /* large bitmap (> 32 entries) */
2905 StgLargeBitmap *large_bitmap;
2908 large_bitmap = info->layout.large_bitmap;
2911 for (i=0; i<large_bitmap->size; i++) {
2912 bitmap = large_bitmap->bitmap[i];
2913 q = p + sizeof(W_) * 8;
2914 while (bitmap != 0) {
2915 if ((bitmap & 1) == 0) {
2916 (StgClosure *)*p = evacuate((StgClosure *)*p);
2919 bitmap = bitmap >> 1;
2921 if (i+1 < large_bitmap->size) {
2923 (StgClosure *)*p = evacuate((StgClosure *)*p);
2929 /* and don't forget to follow the SRT */
2934 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2939 /*-----------------------------------------------------------------------------
2940 scavenge the large object list.
2942 evac_gen set by caller; similar games played with evac_gen as with
2943 scavenge() - see comment at the top of scavenge(). Most large
2944 objects are (repeatedly) mutable, so most of the time evac_gen will
2946 --------------------------------------------------------------------------- */
2947 //@cindex scavenge_large
2950 scavenge_large(step *step)
2954 const StgInfoTable* info;
2955 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2957 evac_gen = 0; /* most objects are mutable */
2958 bd = step->new_large_objects;
2960 for (; bd != NULL; bd = step->new_large_objects) {
2962 /* take this object *off* the large objects list and put it on
2963 * the scavenged large objects list. This is so that we can
2964 * treat new_large_objects as a stack and push new objects on
2965 * the front when evacuating.
2967 step->new_large_objects = bd->link;
2968 dbl_link_onto(bd, &step->scavenged_large_objects);
2971 info = get_itbl((StgClosure *)p);
2973 switch (info->type) {
2975 /* only certain objects can be "large"... */
2978 /* nothing to follow */
2982 /* follow everything */
2986 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2987 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2988 (StgClosure *)*p = evacuate((StgClosure *)*p);
2993 case MUT_ARR_PTRS_FROZEN:
2994 /* follow everything */
2996 StgPtr start = p, next;
2998 evac_gen = saved_evac_gen; /* not really mutable */
2999 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3000 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3001 (StgClosure *)*p = evacuate((StgClosure *)*p);
3004 if (failed_to_evac) {
3005 recordMutable((StgMutClosure *)start);
3012 StgBCO* bco = (StgBCO *)p;
3014 evac_gen = saved_evac_gen;
3015 for (i = 0; i < bco->n_ptrs; i++) {
3016 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3023 scavengeTSO((StgTSO *)p);
3029 StgPAP* pap = (StgPAP *)p;
3031 evac_gen = saved_evac_gen; /* not really mutable */
3032 pap->fun = evacuate(pap->fun);
3033 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3039 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3044 //@cindex zero_static_object_list
3047 zero_static_object_list(StgClosure* first_static)
3051 const StgInfoTable *info;
3053 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3055 link = STATIC_LINK(info, p);
3056 STATIC_LINK(info,p) = NULL;
3060 /* This function is only needed because we share the mutable link
3061 * field with the static link field in an IND_STATIC, so we have to
3062 * zero the mut_link field before doing a major GC, which needs the
3063 * static link field.
3065 * It doesn't do any harm to zero all the mutable link fields on the
3068 //@cindex zero_mutable_list
3071 zero_mutable_list( StgMutClosure *first )
3073 StgMutClosure *next, *c;
3075 for (c = first; c != END_MUT_LIST; c = next) {
3081 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3082 //@subsection Reverting CAFs
3084 /* -----------------------------------------------------------------------------
3086 -------------------------------------------------------------------------- */
3087 //@cindex RevertCAFs
3089 void RevertCAFs(void)
3094 /* Deal with CAFs created by compiled code. */
3095 for (i = 0; i < usedECafTable; i++) {
3096 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3097 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3100 /* Deal with CAFs created by the interpreter. */
3101 while (ecafList != END_ECAF_LIST) {
3102 StgCAF* caf = ecafList;
3103 ecafList = caf->link;
3104 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3105 SET_INFO(caf,&CAF_UNENTERED_info);
3106 caf->value = (StgClosure *)0xdeadbeef;
3107 caf->link = (StgCAF *)0xdeadbeef;
3110 /* Empty out both the table and the list. */
3112 ecafList = END_ECAF_LIST;
3117 //@cindex revert_dead_CAFs
3119 void revert_dead_CAFs(void)
3121 StgCAF* caf = enteredCAFs;
3122 enteredCAFs = END_CAF_LIST;
3123 while (caf != END_CAF_LIST) {
3126 new = (StgCAF*)isAlive((StgClosure*)caf);
3128 new->link = enteredCAFs;
3132 SET_INFO(caf,&CAF_UNENTERED_info);
3133 caf->value = (StgClosure*)0xdeadbeef;
3134 caf->link = (StgCAF*)0xdeadbeef;
3141 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3142 //@subsection Sanity code for CAF garbage collection
3144 /* -----------------------------------------------------------------------------
3145 Sanity code for CAF garbage collection.
3147 With DEBUG turned on, we manage a CAF list in addition to the SRT
3148 mechanism. After GC, we run down the CAF list and blackhole any
3149 CAFs which have been garbage collected. This means we get an error
3150 whenever the program tries to enter a garbage collected CAF.
3152 Any garbage collected CAFs are taken off the CAF list at the same
3154 -------------------------------------------------------------------------- */
3164 const StgInfoTable *info;
3175 ASSERT(info->type == IND_STATIC);
3177 if (STATIC_LINK(info,p) == NULL) {
3178 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3180 SET_INFO(p,&BLACKHOLE_info);
3181 p = STATIC_LINK2(info,p);
3185 pp = &STATIC_LINK2(info,p);
3192 /* fprintf(stderr, "%d CAFs live\n", i); */
3196 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3197 //@subsection Lazy black holing
3199 /* -----------------------------------------------------------------------------
3202 Whenever a thread returns to the scheduler after possibly doing
3203 some work, we have to run down the stack and black-hole all the
3204 closures referred to by update frames.
3205 -------------------------------------------------------------------------- */
3206 //@cindex threadLazyBlackHole
3209 threadLazyBlackHole(StgTSO *tso)
3211 StgUpdateFrame *update_frame;
3212 StgBlockingQueue *bh;
3215 stack_end = &tso->stack[tso->stack_size];
3216 update_frame = tso->su;
3219 switch (get_itbl(update_frame)->type) {
3222 update_frame = ((StgCatchFrame *)update_frame)->link;
3226 bh = (StgBlockingQueue *)update_frame->updatee;
3228 /* if the thunk is already blackholed, it means we've also
3229 * already blackholed the rest of the thunks on this stack,
3230 * so we can stop early.
3232 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3233 * don't interfere with this optimisation.
3235 if (bh->header.info == &BLACKHOLE_info) {
3239 if (bh->header.info != &BLACKHOLE_BQ_info &&
3240 bh->header.info != &CAF_BLACKHOLE_info) {
3241 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3242 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3244 SET_INFO(bh,&BLACKHOLE_info);
3247 update_frame = update_frame->link;
3251 update_frame = ((StgSeqFrame *)update_frame)->link;
3257 barf("threadPaused");
3262 //@node Stack squeezing, Pausing a thread, Lazy black holing
3263 //@subsection Stack squeezing
3265 /* -----------------------------------------------------------------------------
3268 * Code largely pinched from old RTS, then hacked to bits. We also do
3269 * lazy black holing here.
3271 * -------------------------------------------------------------------------- */
3272 //@cindex threadSqueezeStack
3275 threadSqueezeStack(StgTSO *tso)
3277 lnat displacement = 0;
3278 StgUpdateFrame *frame;
3279 StgUpdateFrame *next_frame; /* Temporally next */
3280 StgUpdateFrame *prev_frame; /* Temporally previous */
3282 rtsBool prev_was_update_frame;
3284 StgUpdateFrame *top_frame;
3285 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3287 void printObj( StgClosure *obj ); // from Printer.c
3289 top_frame = tso->su;
3292 bottom = &(tso->stack[tso->stack_size]);
3295 /* There must be at least one frame, namely the STOP_FRAME.
3297 ASSERT((P_)frame < bottom);
3299 /* Walk down the stack, reversing the links between frames so that
3300 * we can walk back up as we squeeze from the bottom. Note that
3301 * next_frame and prev_frame refer to next and previous as they were
3302 * added to the stack, rather than the way we see them in this
3303 * walk. (It makes the next loop less confusing.)
3305 * Stop if we find an update frame pointing to a black hole
3306 * (see comment in threadLazyBlackHole()).
3310 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3311 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3312 prev_frame = frame->link;
3313 frame->link = next_frame;
3318 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3319 printObj((StgClosure *)prev_frame);
3320 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3323 switch (get_itbl(frame)->type) {
3324 case UPDATE_FRAME: upd_frames++;
3325 if (frame->updatee->header.info == &BLACKHOLE_info)
3328 case STOP_FRAME: stop_frames++;
3330 case CATCH_FRAME: catch_frames++;
3332 case SEQ_FRAME: seq_frames++;
3335 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3337 printObj((StgClosure *)prev_frame);
3340 if (get_itbl(frame)->type == UPDATE_FRAME
3341 && frame->updatee->header.info == &BLACKHOLE_info) {
3346 /* Now, we're at the bottom. Frame points to the lowest update
3347 * frame on the stack, and its link actually points to the frame
3348 * above. We have to walk back up the stack, squeezing out empty
3349 * update frames and turning the pointers back around on the way
3352 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3353 * we never want to eliminate it anyway. Just walk one step up
3354 * before starting to squeeze. When you get to the topmost frame,
3355 * remember that there are still some words above it that might have
3362 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3365 * Loop through all of the frames (everything except the very
3366 * bottom). Things are complicated by the fact that we have
3367 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3368 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3370 while (frame != NULL) {
3372 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3373 rtsBool is_update_frame;
3375 next_frame = frame->link;
3376 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3379 * 1. both the previous and current frame are update frames
3380 * 2. the current frame is empty
3382 if (prev_was_update_frame && is_update_frame &&
3383 (P_)prev_frame == frame_bottom + displacement) {
3385 /* Now squeeze out the current frame */
3386 StgClosure *updatee_keep = prev_frame->updatee;
3387 StgClosure *updatee_bypass = frame->updatee;
3390 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3394 /* Deal with blocking queues. If both updatees have blocked
3395 * threads, then we should merge the queues into the update
3396 * frame that we're keeping.
3398 * Alternatively, we could just wake them up: they'll just go
3399 * straight to sleep on the proper blackhole! This is less code
3400 * and probably less bug prone, although it's probably much
3403 #if 0 /* do it properly... */
3404 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3405 # error Unimplemented lazy BH warning. (KSW 1999-01)
3407 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3408 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3410 /* Sigh. It has one. Don't lose those threads! */
3411 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3412 /* Urgh. Two queues. Merge them. */
3413 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3415 while (keep_tso->link != END_TSO_QUEUE) {
3416 keep_tso = keep_tso->link;
3418 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3421 /* For simplicity, just swap the BQ for the BH */
3422 P_ temp = updatee_keep;
3424 updatee_keep = updatee_bypass;
3425 updatee_bypass = temp;
3427 /* Record the swap in the kept frame (below) */
3428 prev_frame->updatee = updatee_keep;
3433 TICK_UPD_SQUEEZED();
3434 /* wasn't there something about update squeezing and ticky to be
3435 * sorted out? oh yes: we aren't counting each enter properly
3436 * in this case. See the log somewhere. KSW 1999-04-21
3438 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3440 sp = (P_)frame - 1; /* sp = stuff to slide */
3441 displacement += sizeofW(StgUpdateFrame);
3444 /* No squeeze for this frame */
3445 sp = frame_bottom - 1; /* Keep the current frame */
3447 /* Do lazy black-holing.
3449 if (is_update_frame) {
3450 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3451 if (bh->header.info != &BLACKHOLE_info &&
3452 bh->header.info != &BLACKHOLE_BQ_info &&
3453 bh->header.info != &CAF_BLACKHOLE_info) {
3454 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3455 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3457 SET_INFO(bh,&BLACKHOLE_info);
3461 /* Fix the link in the current frame (should point to the frame below) */
3462 frame->link = prev_frame;
3463 prev_was_update_frame = is_update_frame;
3466 /* Now slide all words from sp up to the next frame */
3468 if (displacement > 0) {
3469 P_ next_frame_bottom;
3471 if (next_frame != NULL)
3472 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3474 next_frame_bottom = tso->sp - 1;
3478 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3482 while (sp >= next_frame_bottom) {
3483 sp[displacement] = *sp;
3487 (P_)prev_frame = (P_)frame + displacement;
3491 tso->sp += displacement;
3492 tso->su = prev_frame;
3495 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3496 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3500 //@node Pausing a thread, Index, Stack squeezing
3501 //@subsection Pausing a thread
3503 /* -----------------------------------------------------------------------------
3506 * We have to prepare for GC - this means doing lazy black holing
3507 * here. We also take the opportunity to do stack squeezing if it's
3509 * -------------------------------------------------------------------------- */
3510 //@cindex threadPaused
3512 threadPaused(StgTSO *tso)
3514 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3515 threadSqueezeStack(tso); /* does black holing too */
3517 threadLazyBlackHole(tso);
3520 /* -----------------------------------------------------------------------------
3522 * -------------------------------------------------------------------------- */
3525 //@cindex printMutOnceList
3527 printMutOnceList(generation *gen)
3529 StgMutClosure *p, *next;
3531 p = gen->mut_once_list;
3534 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3535 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3536 fprintf(stderr, "%p (%s), ",
3537 p, info_type((StgClosure *)p));
3539 fputc('\n', stderr);
3542 //@cindex printMutableList
3544 printMutableList(generation *gen)
3546 StgMutClosure *p, *next;
3551 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3552 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3553 fprintf(stderr, "%p (%s), ",
3554 p, info_type((StgClosure *)p));
3556 fputc('\n', stderr);
3559 //@cindex maybeLarge
3560 static inline rtsBool
3561 maybeLarge(StgClosure *closure)
3563 StgInfoTable *info = get_itbl(closure);
3565 /* closure types that may be found on the new_large_objects list;
3566 see scavenge_large */
3567 return (info->type == MUT_ARR_PTRS ||
3568 info->type == MUT_ARR_PTRS_FROZEN ||
3569 info->type == TSO ||
3570 info->type == ARR_WORDS ||
3577 //@node Index, , Pausing a thread
3581 //* GarbageCollect:: @cindex\s-+GarbageCollect
3582 //* MarkRoot:: @cindex\s-+MarkRoot
3583 //* RevertCAFs:: @cindex\s-+RevertCAFs
3584 //* addBlock:: @cindex\s-+addBlock
3585 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3586 //* copy:: @cindex\s-+copy
3587 //* copyPart:: @cindex\s-+copyPart
3588 //* evacuate:: @cindex\s-+evacuate
3589 //* evacuate_large:: @cindex\s-+evacuate_large
3590 //* gcCAFs:: @cindex\s-+gcCAFs
3591 //* isAlive:: @cindex\s-+isAlive
3592 //* maybeLarge:: @cindex\s-+maybeLarge
3593 //* mkMutCons:: @cindex\s-+mkMutCons
3594 //* printMutOnceList:: @cindex\s-+printMutOnceList
3595 //* printMutableList:: @cindex\s-+printMutableList
3596 //* relocate_TSO:: @cindex\s-+relocate_TSO
3597 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3598 //* scavenge:: @cindex\s-+scavenge
3599 //* scavenge_large:: @cindex\s-+scavenge_large
3600 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3601 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3602 //* scavenge_one:: @cindex\s-+scavenge_one
3603 //* scavenge_srt:: @cindex\s-+scavenge_srt
3604 //* scavenge_stack:: @cindex\s-+scavenge_stack
3605 //* scavenge_static:: @cindex\s-+scavenge_static
3606 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3607 //* threadPaused:: @cindex\s-+threadPaused
3608 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3609 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3610 //* upd_evacuee:: @cindex\s-+upd_evacuee
3611 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3612 //* zero_static_object_list:: @cindex\s-+zero_static_object_list