1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.81 2000/04/27 16:31:46 sewardj Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
56 //@node STATIC OBJECT LIST, Static function declarations, Includes
57 //@subsection STATIC OBJECT LIST
59 /* STATIC OBJECT LIST.
62 * We maintain a linked list of static objects that are still live.
63 * The requirements for this list are:
65 * - we need to scan the list while adding to it, in order to
66 * scavenge all the static objects (in the same way that
67 * breadth-first scavenging works for dynamic objects).
69 * - we need to be able to tell whether an object is already on
70 * the list, to break loops.
72 * Each static object has a "static link field", which we use for
73 * linking objects on to the list. We use a stack-type list, consing
74 * objects on the front as they are added (this means that the
75 * scavenge phase is depth-first, not breadth-first, but that
78 * A separate list is kept for objects that have been scavenged
79 * already - this is so that we can zero all the marks afterwards.
81 * An object is on the list if its static link field is non-zero; this
82 * means that we have to mark the end of the list with '1', not NULL.
84 * Extra notes for generational GC:
86 * Each generation has a static object list associated with it. When
87 * collecting generations up to N, we treat the static object lists
88 * from generations > N as roots.
90 * We build up a static object list while collecting generations 0..N,
91 * which is then appended to the static object list of generation N+1.
93 StgClosure* static_objects; /* live static objects */
94 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
96 /* N is the oldest generation being collected, where the generations
97 * are numbered starting at 0. A major GC (indicated by the major_gc
98 * flag) is when we're collecting all generations. We only attempt to
99 * deal with static objects and GC CAFs when doing a major GC.
102 static rtsBool major_gc;
104 /* Youngest generation that objects should be evacuated to in
105 * evacuate(). (Logically an argument to evacuate, but it's static
106 * a lot of the time so we optimise it into a global variable).
112 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
113 static rtsBool weak_done; /* all done for this pass */
115 /* List of all threads during GC
117 static StgTSO *old_all_threads;
118 static StgTSO *resurrected_threads;
120 /* Flag indicating failure to evacuate an object to the desired
123 static rtsBool failed_to_evac;
125 /* Old to-space (used for two-space collector only)
127 bdescr *old_to_space;
130 /* Data used for allocation area sizing.
132 lnat new_blocks; /* blocks allocated during this GC */
133 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
135 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
136 //@subsection Static function declarations
138 /* -----------------------------------------------------------------------------
139 Static function declarations
140 -------------------------------------------------------------------------- */
142 static StgClosure * evacuate ( StgClosure *q );
143 static void zero_static_object_list ( StgClosure* first_static );
144 static void zero_mutable_list ( StgMutClosure *first );
146 static rtsBool traverse_weak_ptr_list ( void );
147 static void cleanup_weak_ptr_list ( StgWeak **list );
149 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
150 static void scavenge_large ( step *step );
151 static void scavenge ( step *step );
152 static void scavenge_static ( void );
153 static void scavenge_mutable_list ( generation *g );
154 static void scavenge_mut_once_list ( generation *g );
157 static void gcCAFs ( void );
160 //@node Garbage Collect, Weak Pointers, Static function declarations
161 //@subsection Garbage Collect
163 /* -----------------------------------------------------------------------------
166 For garbage collecting generation N (and all younger generations):
168 - follow all pointers in the root set. the root set includes all
169 mutable objects in all steps in all generations.
171 - for each pointer, evacuate the object it points to into either
172 + to-space in the next higher step in that generation, if one exists,
173 + if the object's generation == N, then evacuate it to the next
174 generation if one exists, or else to-space in the current
176 + if the object's generation < N, then evacuate it to to-space
177 in the next generation.
179 - repeatedly scavenge to-space from each step in each generation
180 being collected until no more objects can be evacuated.
182 - free from-space in each step, and set from-space = to-space.
184 -------------------------------------------------------------------------- */
185 //@cindex GarbageCollect
187 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
191 lnat live, allocated, collected = 0, copied = 0;
195 CostCentreStack *prev_CCS;
198 #if defined(DEBUG) && defined(GRAN)
199 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
203 /* tell the stats department that we've started a GC */
206 /* attribute any costs to CCS_GC */
212 /* Approximate how much we allocated */
213 allocated = calcAllocated();
215 /* Figure out which generation to collect
217 if (force_major_gc) {
218 N = RtsFlags.GcFlags.generations - 1;
222 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
223 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
227 major_gc = (N == RtsFlags.GcFlags.generations-1);
230 /* check stack sanity *before* GC (ToDo: check all threads) */
232 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
234 IF_DEBUG(sanity, checkFreeListSanity());
236 /* Initialise the static object lists
238 static_objects = END_OF_STATIC_LIST;
239 scavenged_static_objects = END_OF_STATIC_LIST;
241 /* zero the mutable list for the oldest generation (see comment by
242 * zero_mutable_list below).
245 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
248 /* Save the old to-space if we're doing a two-space collection
250 if (RtsFlags.GcFlags.generations == 1) {
251 old_to_space = g0s0->to_space;
252 g0s0->to_space = NULL;
255 /* Keep a count of how many new blocks we allocated during this GC
256 * (used for resizing the allocation area, later).
260 /* Initialise to-space in all the generations/steps that we're
263 for (g = 0; g <= N; g++) {
264 generations[g].mut_once_list = END_MUT_LIST;
265 generations[g].mut_list = END_MUT_LIST;
267 for (s = 0; s < generations[g].n_steps; s++) {
269 /* generation 0, step 0 doesn't need to-space */
270 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
274 /* Get a free block for to-space. Extra blocks will be chained on
278 step = &generations[g].steps[s];
279 ASSERT(step->gen->no == g);
280 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
281 bd->gen = &generations[g];
284 bd->evacuated = 1; /* it's a to-space block */
285 step->hp = bd->start;
286 step->hpLim = step->hp + BLOCK_SIZE_W;
290 step->scan = bd->start;
292 step->new_large_objects = NULL;
293 step->scavenged_large_objects = NULL;
295 /* mark the large objects as not evacuated yet */
296 for (bd = step->large_objects; bd; bd = bd->link) {
302 /* make sure the older generations have at least one block to
303 * allocate into (this makes things easier for copy(), see below.
305 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
306 for (s = 0; s < generations[g].n_steps; s++) {
307 step = &generations[g].steps[s];
308 if (step->hp_bd == NULL) {
310 bd->gen = &generations[g];
313 bd->evacuated = 0; /* *not* a to-space block */
314 step->hp = bd->start;
315 step->hpLim = step->hp + BLOCK_SIZE_W;
321 /* Set the scan pointer for older generations: remember we
322 * still have to scavenge objects that have been promoted. */
323 step->scan = step->hp;
324 step->scan_bd = step->hp_bd;
325 step->to_space = NULL;
327 step->new_large_objects = NULL;
328 step->scavenged_large_objects = NULL;
332 /* -----------------------------------------------------------------------
333 * follow all the roots that we know about:
334 * - mutable lists from each generation > N
335 * we want to *scavenge* these roots, not evacuate them: they're not
336 * going to move in this GC.
337 * Also: do them in reverse generation order. This is because we
338 * often want to promote objects that are pointed to by older
339 * generations early, so we don't have to repeatedly copy them.
340 * Doing the generations in reverse order ensures that we don't end
341 * up in the situation where we want to evac an object to gen 3 and
342 * it has already been evaced to gen 2.
346 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
347 generations[g].saved_mut_list = generations[g].mut_list;
348 generations[g].mut_list = END_MUT_LIST;
351 /* Do the mut-once lists first */
352 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
353 IF_PAR_DEBUG(verbose,
354 printMutOnceList(&generations[g]));
355 scavenge_mut_once_list(&generations[g]);
357 for (st = generations[g].n_steps-1; st >= 0; st--) {
358 scavenge(&generations[g].steps[st]);
362 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
363 IF_PAR_DEBUG(verbose,
364 printMutableList(&generations[g]));
365 scavenge_mutable_list(&generations[g]);
367 for (st = generations[g].n_steps-1; st >= 0; st--) {
368 scavenge(&generations[g].steps[st]);
373 /* follow all the roots that the application knows about.
379 /* And don't forget to mark the TSO if we got here direct from
381 /* Not needed in a seq version?
383 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
387 /* Mark the entries in the GALA table of the parallel system */
388 markLocalGAs(major_gc);
391 /* Mark the weak pointer list, and prepare to detect dead weak
394 old_weak_ptr_list = weak_ptr_list;
395 weak_ptr_list = NULL;
396 weak_done = rtsFalse;
398 /* The all_threads list is like the weak_ptr_list.
399 * See traverse_weak_ptr_list() for the details.
401 old_all_threads = all_threads;
402 all_threads = END_TSO_QUEUE;
403 resurrected_threads = END_TSO_QUEUE;
405 /* Mark the stable pointer table.
407 markStablePtrTable(major_gc);
411 /* ToDo: To fix the caf leak, we need to make the commented out
412 * parts of this code do something sensible - as described in
415 extern void markHugsObjects(void);
420 /* -------------------------------------------------------------------------
421 * Repeatedly scavenge all the areas we know about until there's no
422 * more scavenging to be done.
429 /* scavenge static objects */
430 if (major_gc && static_objects != END_OF_STATIC_LIST) {
432 checkStaticObjects());
436 /* When scavenging the older generations: Objects may have been
437 * evacuated from generations <= N into older generations, and we
438 * need to scavenge these objects. We're going to try to ensure that
439 * any evacuations that occur move the objects into at least the
440 * same generation as the object being scavenged, otherwise we
441 * have to create new entries on the mutable list for the older
445 /* scavenge each step in generations 0..maxgen */
449 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
450 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
451 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
454 step = &generations[gen].steps[st];
456 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
461 if (step->new_large_objects != NULL) {
462 scavenge_large(step);
469 if (flag) { goto loop; }
471 /* must be last... */
472 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
477 /* Final traversal of the weak pointer list (see comment by
478 * cleanUpWeakPtrList below).
480 cleanup_weak_ptr_list(&weak_ptr_list);
482 /* Now see which stable names are still alive.
484 gcStablePtrTable(major_gc);
487 /* Reconstruct the Global Address tables used in GUM */
488 rebuildGAtables(major_gc);
489 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
490 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
493 /* Set the maximum blocks for the oldest generation, based on twice
494 * the amount of live data now, adjusted to fit the maximum heap
497 * This is an approximation, since in the worst case we'll need
498 * twice the amount of live data plus whatever space the other
501 if (RtsFlags.GcFlags.generations > 1) {
503 oldest_gen->max_blocks =
504 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
505 RtsFlags.GcFlags.minOldGenSize);
506 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
507 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
508 if (((int)oldest_gen->max_blocks -
509 (int)oldest_gen->steps[0].to_blocks) <
510 (RtsFlags.GcFlags.pcFreeHeap *
511 RtsFlags.GcFlags.maxHeapSize / 200)) {
518 /* run through all the generations/steps and tidy up
520 copied = new_blocks * BLOCK_SIZE_W;
521 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
524 generations[g].collections++; /* for stats */
527 for (s = 0; s < generations[g].n_steps; s++) {
529 step = &generations[g].steps[s];
531 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
532 /* Tidy the end of the to-space chains */
533 step->hp_bd->free = step->hp;
534 step->hp_bd->link = NULL;
535 /* stats information: how much we copied */
537 copied -= step->hp_bd->start + BLOCK_SIZE_W -
542 /* for generations we collected... */
545 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
547 /* free old memory and shift to-space into from-space for all
548 * the collected steps (except the allocation area). These
549 * freed blocks will probaby be quickly recycled.
551 if (!(g == 0 && s == 0)) {
552 freeChain(step->blocks);
553 step->blocks = step->to_space;
554 step->n_blocks = step->to_blocks;
555 step->to_space = NULL;
557 for (bd = step->blocks; bd != NULL; bd = bd->link) {
558 bd->evacuated = 0; /* now from-space */
562 /* LARGE OBJECTS. The current live large objects are chained on
563 * scavenged_large, having been moved during garbage
564 * collection from large_objects. Any objects left on
565 * large_objects list are therefore dead, so we free them here.
567 for (bd = step->large_objects; bd != NULL; bd = next) {
572 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
575 step->large_objects = step->scavenged_large_objects;
577 /* Set the maximum blocks for this generation, interpolating
578 * between the maximum size of the oldest and youngest
581 * max_blocks = oldgen_max_blocks * G
582 * ----------------------
587 generations[g].max_blocks = (oldest_gen->max_blocks * g)
588 / (RtsFlags.GcFlags.generations-1);
590 generations[g].max_blocks = oldest_gen->max_blocks;
593 /* for older generations... */
596 /* For older generations, we need to append the
597 * scavenged_large_object list (i.e. large objects that have been
598 * promoted during this GC) to the large_object list for that step.
600 for (bd = step->scavenged_large_objects; bd; bd = next) {
603 dbl_link_onto(bd, &step->large_objects);
606 /* add the new blocks we promoted during this GC */
607 step->n_blocks += step->to_blocks;
612 /* Guess the amount of live data for stats. */
615 /* Free the small objects allocated via allocate(), since this will
616 * all have been copied into G0S1 now.
618 if (small_alloc_list != NULL) {
619 freeChain(small_alloc_list);
621 small_alloc_list = NULL;
625 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
627 /* Two-space collector:
628 * Free the old to-space, and estimate the amount of live data.
630 if (RtsFlags.GcFlags.generations == 1) {
633 if (old_to_space != NULL) {
634 freeChain(old_to_space);
636 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
637 bd->evacuated = 0; /* now from-space */
640 /* For a two-space collector, we need to resize the nursery. */
642 /* set up a new nursery. Allocate a nursery size based on a
643 * function of the amount of live data (currently a factor of 2,
644 * should be configurable (ToDo)). Use the blocks from the old
645 * nursery if possible, freeing up any left over blocks.
647 * If we get near the maximum heap size, then adjust our nursery
648 * size accordingly. If the nursery is the same size as the live
649 * data (L), then we need 3L bytes. We can reduce the size of the
650 * nursery to bring the required memory down near 2L bytes.
652 * A normal 2-space collector would need 4L bytes to give the same
653 * performance we get from 3L bytes, reducing to the same
654 * performance at 2L bytes.
656 blocks = g0s0->to_blocks;
658 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
659 RtsFlags.GcFlags.maxHeapSize ) {
660 int adjusted_blocks; /* signed on purpose */
663 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
664 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
665 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
666 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
669 blocks = adjusted_blocks;
672 blocks *= RtsFlags.GcFlags.oldGenFactor;
673 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
674 blocks = RtsFlags.GcFlags.minAllocAreaSize;
677 resizeNursery(blocks);
680 /* Generational collector:
681 * If the user has given us a suggested heap size, adjust our
682 * allocation area to make best use of the memory available.
685 if (RtsFlags.GcFlags.heapSizeSuggestion) {
687 nat needed = calcNeeded(); /* approx blocks needed at next GC */
689 /* Guess how much will be live in generation 0 step 0 next time.
690 * A good approximation is the obtained by finding the
691 * percentage of g0s0 that was live at the last minor GC.
694 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
697 /* Estimate a size for the allocation area based on the
698 * information available. We might end up going slightly under
699 * or over the suggested heap size, but we should be pretty
702 * Formula: suggested - needed
703 * ----------------------------
704 * 1 + g0s0_pcnt_kept/100
706 * where 'needed' is the amount of memory needed at the next
707 * collection for collecting all steps except g0s0.
710 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
711 (100 + (int)g0s0_pcnt_kept);
713 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
714 blocks = RtsFlags.GcFlags.minAllocAreaSize;
717 resizeNursery((nat)blocks);
721 /* mark the garbage collected CAFs as dead */
723 if (major_gc) { gcCAFs(); }
726 /* zero the scavenged static object list */
728 zero_static_object_list(scavenged_static_objects);
735 /* start any pending finalizers */
736 scheduleFinalizers(old_weak_ptr_list);
738 /* send exceptions to any threads which were about to die */
739 resurrectThreads(resurrected_threads);
741 /* check sanity after GC */
742 IF_DEBUG(sanity, checkSanity(N));
744 /* extra GC trace info */
745 IF_DEBUG(gc, stat_describe_gens());
748 /* symbol-table based profiling */
749 /* heapCensus(to_space); */ /* ToDo */
752 /* restore enclosing cost centre */
758 /* check for memory leaks if sanity checking is on */
759 IF_DEBUG(sanity, memInventory());
761 /* ok, GC over: tell the stats department what happened. */
762 stat_endGC(allocated, collected, live, copied, N);
765 //@node Weak Pointers, Evacuation, Garbage Collect
766 //@subsection Weak Pointers
768 /* -----------------------------------------------------------------------------
771 traverse_weak_ptr_list is called possibly many times during garbage
772 collection. It returns a flag indicating whether it did any work
773 (i.e. called evacuate on any live pointers).
775 Invariant: traverse_weak_ptr_list is called when the heap is in an
776 idempotent state. That means that there are no pending
777 evacuate/scavenge operations. This invariant helps the weak
778 pointer code decide which weak pointers are dead - if there are no
779 new live weak pointers, then all the currently unreachable ones are
782 For generational GC: we just don't try to finalize weak pointers in
783 older generations than the one we're collecting. This could
784 probably be optimised by keeping per-generation lists of weak
785 pointers, but for a few weak pointers this scheme will work.
786 -------------------------------------------------------------------------- */
787 //@cindex traverse_weak_ptr_list
790 traverse_weak_ptr_list(void)
792 StgWeak *w, **last_w, *next_w;
794 rtsBool flag = rtsFalse;
796 if (weak_done) { return rtsFalse; }
798 /* doesn't matter where we evacuate values/finalizers to, since
799 * these pointers are treated as roots (iff the keys are alive).
803 last_w = &old_weak_ptr_list;
804 for (w = old_weak_ptr_list; w; w = next_w) {
806 /* First, this weak pointer might have been evacuated. If so,
807 * remove the forwarding pointer from the weak_ptr_list.
809 if (get_itbl(w)->type == EVACUATED) {
810 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
814 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
815 * called on a live weak pointer object. Just remove it.
817 if (w->header.info == &DEAD_WEAK_info) {
818 next_w = ((StgDeadWeak *)w)->link;
823 ASSERT(get_itbl(w)->type == WEAK);
825 /* Now, check whether the key is reachable.
827 if ((new = isAlive(w->key))) {
829 /* evacuate the value and finalizer */
830 w->value = evacuate(w->value);
831 w->finalizer = evacuate(w->finalizer);
832 /* remove this weak ptr from the old_weak_ptr list */
834 /* and put it on the new weak ptr list */
836 w->link = weak_ptr_list;
839 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
849 /* Now deal with the all_threads list, which behaves somewhat like
850 * the weak ptr list. If we discover any threads that are about to
851 * become garbage, we wake them up and administer an exception.
854 StgTSO *t, *tmp, *next, **prev;
856 prev = &old_all_threads;
857 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
859 /* Threads which have finished or died get dropped from
862 switch (t->what_next) {
865 next = t->global_link;
871 /* Threads which have already been determined to be alive are
872 * moved onto the all_threads list.
874 (StgClosure *)tmp = isAlive((StgClosure *)t);
876 next = tmp->global_link;
877 tmp->global_link = all_threads;
881 prev = &(t->global_link);
882 next = t->global_link;
887 /* If we didn't make any changes, then we can go round and kill all
888 * the dead weak pointers. The old_weak_ptr list is used as a list
889 * of pending finalizers later on.
891 if (flag == rtsFalse) {
892 cleanup_weak_ptr_list(&old_weak_ptr_list);
893 for (w = old_weak_ptr_list; w; w = w->link) {
894 w->finalizer = evacuate(w->finalizer);
897 /* And resurrect any threads which were about to become garbage.
900 StgTSO *t, *tmp, *next;
901 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
902 next = t->global_link;
903 (StgClosure *)tmp = evacuate((StgClosure *)t);
904 tmp->global_link = resurrected_threads;
905 resurrected_threads = tmp;
915 /* -----------------------------------------------------------------------------
916 After GC, the live weak pointer list may have forwarding pointers
917 on it, because a weak pointer object was evacuated after being
918 moved to the live weak pointer list. We remove those forwarding
921 Also, we don't consider weak pointer objects to be reachable, but
922 we must nevertheless consider them to be "live" and retain them.
923 Therefore any weak pointer objects which haven't as yet been
924 evacuated need to be evacuated now.
925 -------------------------------------------------------------------------- */
927 //@cindex cleanup_weak_ptr_list
930 cleanup_weak_ptr_list ( StgWeak **list )
932 StgWeak *w, **last_w;
935 for (w = *list; w; w = w->link) {
937 if (get_itbl(w)->type == EVACUATED) {
938 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
942 if (Bdescr((P_)w)->evacuated == 0) {
943 (StgClosure *)w = evacuate((StgClosure *)w);
950 /* -----------------------------------------------------------------------------
951 isAlive determines whether the given closure is still alive (after
952 a garbage collection) or not. It returns the new address of the
953 closure if it is alive, or NULL otherwise.
954 -------------------------------------------------------------------------- */
959 isAlive(StgClosure *p)
961 const StgInfoTable *info;
968 /* ToDo: for static closures, check the static link field.
969 * Problem here is that we sometimes don't set the link field, eg.
970 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
973 /* ignore closures in generations that we're not collecting. */
974 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
978 switch (info->type) {
983 case IND_OLDGEN: /* rely on compatible layout with StgInd */
984 case IND_OLDGEN_PERM:
985 /* follow indirections */
986 p = ((StgInd *)p)->indirectee;
991 return ((StgEvacuated *)p)->evacuee;
994 size = bco_sizeW((StgBCO*)p);
998 size = arr_words_sizeW((StgArrWords *)p);
1002 case MUT_ARR_PTRS_FROZEN:
1003 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1007 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1008 p = (StgClosure *)((StgTSO *)p)->link;
1012 size = tso_sizeW((StgTSO *)p);
1014 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1015 && Bdescr((P_)p)->evacuated)
1029 MarkRoot(StgClosure *root)
1031 # if 0 && defined(PAR) && defined(DEBUG)
1032 StgClosure *foo = evacuate(root);
1033 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1034 ASSERT(isAlive(foo)); // must be in to-space
1037 return evacuate(root);
1042 static void addBlock(step *step)
1044 bdescr *bd = allocBlock();
1045 bd->gen = step->gen;
1048 if (step->gen->no <= N) {
1054 step->hp_bd->free = step->hp;
1055 step->hp_bd->link = bd;
1056 step->hp = bd->start;
1057 step->hpLim = step->hp + BLOCK_SIZE_W;
1063 //@cindex upd_evacuee
1065 static __inline__ void
1066 upd_evacuee(StgClosure *p, StgClosure *dest)
1068 p->header.info = &EVACUATED_info;
1069 ((StgEvacuated *)p)->evacuee = dest;
1074 static __inline__ StgClosure *
1075 copy(StgClosure *src, nat size, step *step)
1079 TICK_GC_WORDS_COPIED(size);
1080 /* Find out where we're going, using the handy "to" pointer in
1081 * the step of the source object. If it turns out we need to
1082 * evacuate to an older generation, adjust it here (see comment
1085 if (step->gen->no < evac_gen) {
1086 #ifdef NO_EAGER_PROMOTION
1087 failed_to_evac = rtsTrue;
1089 step = &generations[evac_gen].steps[0];
1093 /* chain a new block onto the to-space for the destination step if
1096 if (step->hp + size >= step->hpLim) {
1100 for(to = step->hp, from = (P_)src; size>0; --size) {
1106 upd_evacuee(src,(StgClosure *)dest);
1107 return (StgClosure *)dest;
1110 /* Special version of copy() for when we only want to copy the info
1111 * pointer of an object, but reserve some padding after it. This is
1112 * used to optimise evacuation of BLACKHOLEs.
1117 static __inline__ StgClosure *
1118 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1122 TICK_GC_WORDS_COPIED(size_to_copy);
1123 if (step->gen->no < evac_gen) {
1124 #ifdef NO_EAGER_PROMOTION
1125 failed_to_evac = rtsTrue;
1127 step = &generations[evac_gen].steps[0];
1131 if (step->hp + size_to_reserve >= step->hpLim) {
1135 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1140 step->hp += size_to_reserve;
1141 upd_evacuee(src,(StgClosure *)dest);
1142 return (StgClosure *)dest;
1145 //@node Evacuation, Scavenging, Weak Pointers
1146 //@subsection Evacuation
1148 /* -----------------------------------------------------------------------------
1149 Evacuate a large object
1151 This just consists of removing the object from the (doubly-linked)
1152 large_alloc_list, and linking it on to the (singly-linked)
1153 new_large_objects list, from where it will be scavenged later.
1155 Convention: bd->evacuated is /= 0 for a large object that has been
1156 evacuated, or 0 otherwise.
1157 -------------------------------------------------------------------------- */
1159 //@cindex evacuate_large
1162 evacuate_large(StgPtr p, rtsBool mutable)
1164 bdescr *bd = Bdescr(p);
1167 /* should point to the beginning of the block */
1168 ASSERT(((W_)p & BLOCK_MASK) == 0);
1170 /* already evacuated? */
1171 if (bd->evacuated) {
1172 /* Don't forget to set the failed_to_evac flag if we didn't get
1173 * the desired destination (see comments in evacuate()).
1175 if (bd->gen->no < evac_gen) {
1176 failed_to_evac = rtsTrue;
1177 TICK_GC_FAILED_PROMOTION();
1183 /* remove from large_object list */
1185 bd->back->link = bd->link;
1186 } else { /* first object in the list */
1187 step->large_objects = bd->link;
1190 bd->link->back = bd->back;
1193 /* link it on to the evacuated large object list of the destination step
1195 step = bd->step->to;
1196 if (step->gen->no < evac_gen) {
1197 #ifdef NO_EAGER_PROMOTION
1198 failed_to_evac = rtsTrue;
1200 step = &generations[evac_gen].steps[0];
1205 bd->gen = step->gen;
1206 bd->link = step->new_large_objects;
1207 step->new_large_objects = bd;
1211 recordMutable((StgMutClosure *)p);
1215 /* -----------------------------------------------------------------------------
1216 Adding a MUT_CONS to an older generation.
1218 This is necessary from time to time when we end up with an
1219 old-to-new generation pointer in a non-mutable object. We defer
1220 the promotion until the next GC.
1221 -------------------------------------------------------------------------- */
1226 mkMutCons(StgClosure *ptr, generation *gen)
1231 step = &gen->steps[0];
1233 /* chain a new block onto the to-space for the destination step if
1236 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1240 q = (StgMutVar *)step->hp;
1241 step->hp += sizeofW(StgMutVar);
1243 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1245 recordOldToNewPtrs((StgMutClosure *)q);
1247 return (StgClosure *)q;
1250 /* -----------------------------------------------------------------------------
1253 This is called (eventually) for every live object in the system.
1255 The caller to evacuate specifies a desired generation in the
1256 evac_gen global variable. The following conditions apply to
1257 evacuating an object which resides in generation M when we're
1258 collecting up to generation N
1262 else evac to step->to
1264 if M < evac_gen evac to evac_gen, step 0
1266 if the object is already evacuated, then we check which generation
1269 if M >= evac_gen do nothing
1270 if M < evac_gen set failed_to_evac flag to indicate that we
1271 didn't manage to evacuate this object into evac_gen.
1273 -------------------------------------------------------------------------- */
1277 evacuate(StgClosure *q)
1282 const StgInfoTable *info;
1285 if (HEAP_ALLOCED(q)) {
1287 if (bd->gen->no > N) {
1288 /* Can't evacuate this object, because it's in a generation
1289 * older than the ones we're collecting. Let's hope that it's
1290 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1292 if (bd->gen->no < evac_gen) {
1294 failed_to_evac = rtsTrue;
1295 TICK_GC_FAILED_PROMOTION();
1299 step = bd->step->to;
1302 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1305 /* make sure the info pointer is into text space */
1306 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1307 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1310 if (info->type==RBH) {
1311 info = REVERT_INFOPTR(info);
1313 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1314 q, info_type(q), info, info_type_by_ip(info)));
1318 switch (info -> type) {
1322 nat size = bco_sizeW((StgBCO*)q);
1324 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1325 evacuate_large((P_)q, rtsFalse);
1328 /* just copy the block */
1329 to = copy(q,size,step);
1335 ASSERT(q->header.info != &MUT_CONS_info);
1337 to = copy(q,sizeW_fromITBL(info),step);
1338 recordMutable((StgMutClosure *)to);
1345 return copy(q,sizeofW(StgHeader)+1,step);
1347 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1352 #ifdef NO_PROMOTE_THUNKS
1353 if (bd->gen->no == 0 &&
1354 bd->step->no != 0 &&
1355 bd->step->no == bd->gen->n_steps-1) {
1359 return copy(q,sizeofW(StgHeader)+2,step);
1367 return copy(q,sizeofW(StgHeader)+2,step);
1373 case IND_OLDGEN_PERM:
1379 return copy(q,sizeW_fromITBL(info),step);
1382 case SE_CAF_BLACKHOLE:
1385 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1388 to = copy(q,BLACKHOLE_sizeW(),step);
1389 recordMutable((StgMutClosure *)to);
1392 case THUNK_SELECTOR:
1394 const StgInfoTable* selectee_info;
1395 StgClosure* selectee = ((StgSelector*)q)->selectee;
1398 selectee_info = get_itbl(selectee);
1399 switch (selectee_info->type) {
1408 StgWord32 offset = info->layout.selector_offset;
1410 /* check that the size is in range */
1412 (StgWord32)(selectee_info->layout.payload.ptrs +
1413 selectee_info->layout.payload.nptrs));
1415 /* perform the selection! */
1416 q = selectee->payload[offset];
1418 /* if we're already in to-space, there's no need to continue
1419 * with the evacuation, just update the source address with
1420 * a pointer to the (evacuated) constructor field.
1422 if (HEAP_ALLOCED(q)) {
1423 bdescr *bd = Bdescr((P_)q);
1424 if (bd->evacuated) {
1425 if (bd->gen->no < evac_gen) {
1426 failed_to_evac = rtsTrue;
1427 TICK_GC_FAILED_PROMOTION();
1433 /* otherwise, carry on and evacuate this constructor field,
1434 * (but not the constructor itself)
1443 case IND_OLDGEN_PERM:
1444 selectee = ((StgInd *)selectee)->indirectee;
1448 selectee = ((StgCAF *)selectee)->value;
1452 selectee = ((StgEvacuated *)selectee)->evacuee;
1462 case THUNK_SELECTOR:
1463 /* aargh - do recursively???? */
1466 case SE_CAF_BLACKHOLE:
1470 /* not evaluated yet */
1474 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1475 (int)(selectee_info->type));
1478 return copy(q,THUNK_SELECTOR_sizeW(),step);
1482 /* follow chains of indirections, don't evacuate them */
1483 q = ((StgInd*)q)->indirectee;
1487 if (info->srt_len > 0 && major_gc &&
1488 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1489 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1490 static_objects = (StgClosure *)q;
1495 if (info->srt_len > 0 && major_gc &&
1496 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1497 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1498 static_objects = (StgClosure *)q;
1503 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1504 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1505 static_objects = (StgClosure *)q;
1510 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1511 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1512 static_objects = (StgClosure *)q;
1516 case CONSTR_INTLIKE:
1517 case CONSTR_CHARLIKE:
1518 case CONSTR_NOCAF_STATIC:
1519 /* no need to put these on the static linked list, they don't need
1534 /* shouldn't see these */
1535 barf("evacuate: stack frame at %p\n", q);
1539 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1540 * of stack, tagging and all.
1542 * They can be larger than a block in size. Both are only
1543 * allocated via allocate(), so they should be chained on to the
1544 * large_object list.
1547 nat size = pap_sizeW((StgPAP*)q);
1548 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1549 evacuate_large((P_)q, rtsFalse);
1552 return copy(q,size,step);
1557 /* Already evacuated, just return the forwarding address.
1558 * HOWEVER: if the requested destination generation (evac_gen) is
1559 * older than the actual generation (because the object was
1560 * already evacuated to a younger generation) then we have to
1561 * set the failed_to_evac flag to indicate that we couldn't
1562 * manage to promote the object to the desired generation.
1564 if (evac_gen > 0) { /* optimisation */
1565 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1566 if (Bdescr((P_)p)->gen->no < evac_gen) {
1567 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1568 failed_to_evac = rtsTrue;
1569 TICK_GC_FAILED_PROMOTION();
1572 return ((StgEvacuated*)q)->evacuee;
1576 nat size = arr_words_sizeW((StgArrWords *)q);
1578 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1579 evacuate_large((P_)q, rtsFalse);
1582 /* just copy the block */
1583 return copy(q,size,step);
1588 case MUT_ARR_PTRS_FROZEN:
1590 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1592 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1593 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1596 /* just copy the block */
1597 to = copy(q,size,step);
1598 if (info->type == MUT_ARR_PTRS) {
1599 recordMutable((StgMutClosure *)to);
1607 StgTSO *tso = (StgTSO *)q;
1608 nat size = tso_sizeW(tso);
1611 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1613 if (tso->what_next == ThreadRelocated) {
1614 q = (StgClosure *)tso->link;
1618 /* Large TSOs don't get moved, so no relocation is required.
1620 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1621 evacuate_large((P_)q, rtsTrue);
1624 /* To evacuate a small TSO, we need to relocate the update frame
1628 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1630 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1632 /* relocate the stack pointers... */
1633 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1634 new_tso->sp = (StgPtr)new_tso->sp + diff;
1635 new_tso->splim = (StgPtr)new_tso->splim + diff;
1637 relocate_TSO(tso, new_tso);
1639 recordMutable((StgMutClosure *)new_tso);
1640 return (StgClosure *)new_tso;
1645 case RBH: // cf. BLACKHOLE_BQ
1647 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1648 to = copy(q,BLACKHOLE_sizeW(),step);
1649 //ToDo: derive size etc from reverted IP
1650 //to = copy(q,size,step);
1651 recordMutable((StgMutClosure *)to);
1653 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1654 q, info_type(q), to, info_type(to)));
1659 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1660 to = copy(q,sizeofW(StgBlockedFetch),step);
1662 belch("@@ evacuate: %p (%s) to %p (%s)",
1663 q, info_type(q), to, info_type(to)));
1667 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1668 to = copy(q,sizeofW(StgFetchMe),step);
1670 belch("@@ evacuate: %p (%s) to %p (%s)",
1671 q, info_type(q), to, info_type(to)));
1675 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1676 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1678 belch("@@ evacuate: %p (%s) to %p (%s)",
1679 q, info_type(q), to, info_type(to)));
1684 barf("evacuate: strange closure type %d", (int)(info->type));
1690 /* -----------------------------------------------------------------------------
1691 relocate_TSO is called just after a TSO has been copied from src to
1692 dest. It adjusts the update frame list for the new location.
1693 -------------------------------------------------------------------------- */
1694 //@cindex relocate_TSO
1697 relocate_TSO(StgTSO *src, StgTSO *dest)
1704 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1708 while ((P_)su < dest->stack + dest->stack_size) {
1709 switch (get_itbl(su)->type) {
1711 /* GCC actually manages to common up these three cases! */
1714 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1719 cf = (StgCatchFrame *)su;
1720 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1725 sf = (StgSeqFrame *)su;
1726 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1735 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1743 //@node Scavenging, Reverting CAFs, Evacuation
1744 //@subsection Scavenging
1746 //@cindex scavenge_srt
1749 scavenge_srt(const StgInfoTable *info)
1751 StgClosure **srt, **srt_end;
1753 /* evacuate the SRT. If srt_len is zero, then there isn't an
1754 * srt field in the info table. That's ok, because we'll
1755 * never dereference it.
1757 srt = (StgClosure **)(info->srt);
1758 srt_end = srt + info->srt_len;
1759 for (; srt < srt_end; srt++) {
1760 /* Special-case to handle references to closures hiding out in DLLs, since
1761 double indirections required to get at those. The code generator knows
1762 which is which when generating the SRT, so it stores the (indirect)
1763 reference to the DLL closure in the table by first adding one to it.
1764 We check for this here, and undo the addition before evacuating it.
1766 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1767 closure that's fixed at link-time, and no extra magic is required.
1769 #ifdef ENABLE_WIN32_DLL_SUPPORT
1770 if ( (unsigned long)(*srt) & 0x1 ) {
1771 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1781 /* -----------------------------------------------------------------------------
1783 -------------------------------------------------------------------------- */
1786 scavengeTSO (StgTSO *tso)
1788 /* chase the link field for any TSOs on the same queue */
1789 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1790 if ( tso->why_blocked == BlockedOnMVar
1791 || tso->why_blocked == BlockedOnBlackHole
1792 || tso->why_blocked == BlockedOnException
1794 || tso->why_blocked == BlockedOnGA
1795 || tso->why_blocked == BlockedOnGA_NoSend
1798 tso->block_info.closure = evacuate(tso->block_info.closure);
1800 if ( tso->blocked_exceptions != NULL ) {
1801 tso->blocked_exceptions =
1802 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1804 /* scavenge this thread's stack */
1805 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1808 /* -----------------------------------------------------------------------------
1809 Scavenge a given step until there are no more objects in this step
1812 evac_gen is set by the caller to be either zero (for a step in a
1813 generation < N) or G where G is the generation of the step being
1816 We sometimes temporarily change evac_gen back to zero if we're
1817 scavenging a mutable object where early promotion isn't such a good
1819 -------------------------------------------------------------------------- */
1823 scavenge(step *step)
1826 const StgInfoTable *info;
1828 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1833 failed_to_evac = rtsFalse;
1835 /* scavenge phase - standard breadth-first scavenging of the
1839 while (bd != step->hp_bd || p < step->hp) {
1841 /* If we're at the end of this block, move on to the next block */
1842 if (bd != step->hp_bd && p == bd->free) {
1848 q = p; /* save ptr to object */
1850 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1851 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1853 info = get_itbl((StgClosure *)p);
1855 if (info->type==RBH)
1856 info = REVERT_INFOPTR(info);
1859 switch (info -> type) {
1863 StgBCO* bco = (StgBCO *)p;
1865 for (i = 0; i < bco->n_ptrs; i++) {
1866 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1868 p += bco_sizeW(bco);
1873 /* treat MVars specially, because we don't want to evacuate the
1874 * mut_link field in the middle of the closure.
1877 StgMVar *mvar = ((StgMVar *)p);
1879 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1880 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1881 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1882 p += sizeofW(StgMVar);
1883 evac_gen = saved_evac_gen;
1891 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1892 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1893 p += sizeofW(StgHeader) + 2;
1898 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1899 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1905 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1906 p += sizeofW(StgHeader) + 1;
1911 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1917 p += sizeofW(StgHeader) + 1;
1924 p += sizeofW(StgHeader) + 2;
1931 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1932 p += sizeofW(StgHeader) + 2;
1947 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1948 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1949 (StgClosure *)*p = evacuate((StgClosure *)*p);
1951 p += info->layout.payload.nptrs;
1956 if (step->gen->no != 0) {
1957 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1960 case IND_OLDGEN_PERM:
1961 ((StgIndOldGen *)p)->indirectee =
1962 evacuate(((StgIndOldGen *)p)->indirectee);
1963 if (failed_to_evac) {
1964 failed_to_evac = rtsFalse;
1965 recordOldToNewPtrs((StgMutClosure *)p);
1967 p += sizeofW(StgIndOldGen);
1972 StgCAF *caf = (StgCAF *)p;
1974 caf->body = evacuate(caf->body);
1975 if (failed_to_evac) {
1976 failed_to_evac = rtsFalse;
1977 recordOldToNewPtrs((StgMutClosure *)p);
1979 caf->mut_link = NULL;
1981 p += sizeofW(StgCAF);
1987 StgCAF *caf = (StgCAF *)p;
1989 caf->body = evacuate(caf->body);
1990 caf->value = evacuate(caf->value);
1991 if (failed_to_evac) {
1992 failed_to_evac = rtsFalse;
1993 recordOldToNewPtrs((StgMutClosure *)p);
1995 caf->mut_link = NULL;
1997 p += sizeofW(StgCAF);
2002 /* ignore MUT_CONSs */
2003 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2005 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2006 evac_gen = saved_evac_gen;
2008 p += sizeofW(StgMutVar);
2012 case SE_CAF_BLACKHOLE:
2015 p += BLACKHOLE_sizeW();
2020 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2021 (StgClosure *)bh->blocking_queue =
2022 evacuate((StgClosure *)bh->blocking_queue);
2023 if (failed_to_evac) {
2024 failed_to_evac = rtsFalse;
2025 recordMutable((StgMutClosure *)bh);
2027 p += BLACKHOLE_sizeW();
2031 case THUNK_SELECTOR:
2033 StgSelector *s = (StgSelector *)p;
2034 s->selectee = evacuate(s->selectee);
2035 p += THUNK_SELECTOR_sizeW();
2041 barf("scavenge:IND???\n");
2043 case CONSTR_INTLIKE:
2044 case CONSTR_CHARLIKE:
2046 case CONSTR_NOCAF_STATIC:
2050 /* Shouldn't see a static object here. */
2051 barf("scavenge: STATIC object\n");
2063 /* Shouldn't see stack frames here. */
2064 barf("scavenge: stack frame\n");
2066 case AP_UPD: /* same as PAPs */
2068 /* Treat a PAP just like a section of stack, not forgetting to
2069 * evacuate the function pointer too...
2072 StgPAP* pap = (StgPAP *)p;
2074 pap->fun = evacuate(pap->fun);
2075 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2076 p += pap_sizeW(pap);
2081 /* nothing to follow */
2082 p += arr_words_sizeW((StgArrWords *)p);
2086 /* follow everything */
2090 evac_gen = 0; /* repeatedly mutable */
2091 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2092 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2093 (StgClosure *)*p = evacuate((StgClosure *)*p);
2095 evac_gen = saved_evac_gen;
2099 case MUT_ARR_PTRS_FROZEN:
2100 /* follow everything */
2102 StgPtr start = p, next;
2104 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2105 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2106 (StgClosure *)*p = evacuate((StgClosure *)*p);
2108 if (failed_to_evac) {
2109 /* we can do this easier... */
2110 recordMutable((StgMutClosure *)start);
2111 failed_to_evac = rtsFalse;
2118 StgTSO *tso = (StgTSO *)p;
2121 evac_gen = saved_evac_gen;
2122 p += tso_sizeW(tso);
2127 case RBH: // cf. BLACKHOLE_BQ
2129 // nat size, ptrs, nonptrs, vhs;
2131 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2132 StgRBH *rbh = (StgRBH *)p;
2133 (StgClosure *)rbh->blocking_queue =
2134 evacuate((StgClosure *)rbh->blocking_queue);
2135 if (failed_to_evac) {
2136 failed_to_evac = rtsFalse;
2137 recordMutable((StgMutClosure *)rbh);
2140 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2141 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2142 // ToDo: use size of reverted closure here!
2143 p += BLACKHOLE_sizeW();
2149 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2150 /* follow the pointer to the node which is being demanded */
2151 (StgClosure *)bf->node =
2152 evacuate((StgClosure *)bf->node);
2153 /* follow the link to the rest of the blocking queue */
2154 (StgClosure *)bf->link =
2155 evacuate((StgClosure *)bf->link);
2156 if (failed_to_evac) {
2157 failed_to_evac = rtsFalse;
2158 recordMutable((StgMutClosure *)bf);
2161 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2162 bf, info_type((StgClosure *)bf),
2163 bf->node, info_type(bf->node)));
2164 p += sizeofW(StgBlockedFetch);
2170 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2171 p, info_type((StgClosure *)p)));
2172 p += sizeofW(StgFetchMe);
2173 break; // nothing to do in this case
2175 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2177 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2178 (StgClosure *)fmbq->blocking_queue =
2179 evacuate((StgClosure *)fmbq->blocking_queue);
2180 if (failed_to_evac) {
2181 failed_to_evac = rtsFalse;
2182 recordMutable((StgMutClosure *)fmbq);
2185 belch("@@ scavenge: %p (%s) exciting, isn't it",
2186 p, info_type((StgClosure *)p)));
2187 p += sizeofW(StgFetchMeBlockingQueue);
2193 barf("scavenge: unimplemented/strange closure type %d @ %p",
2197 barf("scavenge: unimplemented/strange closure type %d @ %p",
2201 /* If we didn't manage to promote all the objects pointed to by
2202 * the current object, then we have to designate this object as
2203 * mutable (because it contains old-to-new generation pointers).
2205 if (failed_to_evac) {
2206 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2207 failed_to_evac = rtsFalse;
2215 /* -----------------------------------------------------------------------------
2216 Scavenge one object.
2218 This is used for objects that are temporarily marked as mutable
2219 because they contain old-to-new generation pointers. Only certain
2220 objects can have this property.
2221 -------------------------------------------------------------------------- */
2222 //@cindex scavenge_one
2225 scavenge_one(StgClosure *p)
2227 const StgInfoTable *info;
2230 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2231 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2236 if (info->type==RBH)
2237 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2240 switch (info -> type) {
2243 case FUN_1_0: /* hardly worth specialising these guys */
2263 case IND_OLDGEN_PERM:
2268 end = (P_)p->payload + info->layout.payload.ptrs;
2269 for (q = (P_)p->payload; q < end; q++) {
2270 (StgClosure *)*q = evacuate((StgClosure *)*q);
2276 case SE_CAF_BLACKHOLE:
2281 case THUNK_SELECTOR:
2283 StgSelector *s = (StgSelector *)p;
2284 s->selectee = evacuate(s->selectee);
2288 case AP_UPD: /* same as PAPs */
2290 /* Treat a PAP just like a section of stack, not forgetting to
2291 * evacuate the function pointer too...
2294 StgPAP* pap = (StgPAP *)p;
2296 pap->fun = evacuate(pap->fun);
2297 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2302 /* This might happen if for instance a MUT_CONS was pointing to a
2303 * THUNK which has since been updated. The IND_OLDGEN will
2304 * be on the mutable list anyway, so we don't need to do anything
2310 barf("scavenge_one: strange object %d", (int)(info->type));
2313 no_luck = failed_to_evac;
2314 failed_to_evac = rtsFalse;
2319 /* -----------------------------------------------------------------------------
2320 Scavenging mutable lists.
2322 We treat the mutable list of each generation > N (i.e. all the
2323 generations older than the one being collected) as roots. We also
2324 remove non-mutable objects from the mutable list at this point.
2325 -------------------------------------------------------------------------- */
2326 //@cindex scavenge_mut_once_list
2329 scavenge_mut_once_list(generation *gen)
2331 const StgInfoTable *info;
2332 StgMutClosure *p, *next, *new_list;
2334 p = gen->mut_once_list;
2335 new_list = END_MUT_LIST;
2339 failed_to_evac = rtsFalse;
2341 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2343 /* make sure the info pointer is into text space */
2344 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2345 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2349 if (info->type==RBH)
2350 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2352 switch(info->type) {
2355 case IND_OLDGEN_PERM:
2357 /* Try to pull the indirectee into this generation, so we can
2358 * remove the indirection from the mutable list.
2360 ((StgIndOldGen *)p)->indirectee =
2361 evacuate(((StgIndOldGen *)p)->indirectee);
2364 if (RtsFlags.DebugFlags.gc)
2365 /* Debugging code to print out the size of the thing we just
2369 StgPtr start = gen->steps[0].scan;
2370 bdescr *start_bd = gen->steps[0].scan_bd;
2372 scavenge(&gen->steps[0]);
2373 if (start_bd != gen->steps[0].scan_bd) {
2374 size += (P_)BLOCK_ROUND_UP(start) - start;
2375 start_bd = start_bd->link;
2376 while (start_bd != gen->steps[0].scan_bd) {
2377 size += BLOCK_SIZE_W;
2378 start_bd = start_bd->link;
2380 size += gen->steps[0].scan -
2381 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2383 size = gen->steps[0].scan - start;
2385 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2389 /* failed_to_evac might happen if we've got more than two
2390 * generations, we're collecting only generation 0, the
2391 * indirection resides in generation 2 and the indirectee is
2394 if (failed_to_evac) {
2395 failed_to_evac = rtsFalse;
2396 p->mut_link = new_list;
2399 /* the mut_link field of an IND_STATIC is overloaded as the
2400 * static link field too (it just so happens that we don't need
2401 * both at the same time), so we need to NULL it out when
2402 * removing this object from the mutable list because the static
2403 * link fields are all assumed to be NULL before doing a major
2411 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2412 * it from the mutable list if possible by promoting whatever it
2415 ASSERT(p->header.info == &MUT_CONS_info);
2416 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2417 /* didn't manage to promote everything, so put the
2418 * MUT_CONS back on the list.
2420 p->mut_link = new_list;
2427 StgCAF *caf = (StgCAF *)p;
2428 caf->body = evacuate(caf->body);
2429 caf->value = evacuate(caf->value);
2430 if (failed_to_evac) {
2431 failed_to_evac = rtsFalse;
2432 p->mut_link = new_list;
2442 StgCAF *caf = (StgCAF *)p;
2443 caf->body = evacuate(caf->body);
2444 if (failed_to_evac) {
2445 failed_to_evac = rtsFalse;
2446 p->mut_link = new_list;
2455 /* shouldn't have anything else on the mutables list */
2456 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2460 gen->mut_once_list = new_list;
2463 //@cindex scavenge_mutable_list
2466 scavenge_mutable_list(generation *gen)
2468 const StgInfoTable *info;
2469 StgMutClosure *p, *next;
2471 p = gen->saved_mut_list;
2475 failed_to_evac = rtsFalse;
2477 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2479 /* make sure the info pointer is into text space */
2480 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2481 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2485 if (info->type==RBH)
2486 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2488 switch(info->type) {
2490 case MUT_ARR_PTRS_FROZEN:
2491 /* remove this guy from the mutable list, but follow the ptrs
2492 * anyway (and make sure they get promoted to this gen).
2497 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2499 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2500 (StgClosure *)*q = evacuate((StgClosure *)*q);
2504 if (failed_to_evac) {
2505 failed_to_evac = rtsFalse;
2506 p->mut_link = gen->mut_list;
2513 /* follow everything */
2514 p->mut_link = gen->mut_list;
2519 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2520 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2521 (StgClosure *)*q = evacuate((StgClosure *)*q);
2527 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2528 * it from the mutable list if possible by promoting whatever it
2531 ASSERT(p->header.info != &MUT_CONS_info);
2532 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2533 p->mut_link = gen->mut_list;
2539 StgMVar *mvar = (StgMVar *)p;
2540 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2541 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2542 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2543 p->mut_link = gen->mut_list;
2550 StgTSO *tso = (StgTSO *)p;
2554 /* Don't take this TSO off the mutable list - it might still
2555 * point to some younger objects (because we set evac_gen to 0
2558 tso->mut_link = gen->mut_list;
2559 gen->mut_list = (StgMutClosure *)tso;
2565 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2566 (StgClosure *)bh->blocking_queue =
2567 evacuate((StgClosure *)bh->blocking_queue);
2568 p->mut_link = gen->mut_list;
2573 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2576 case IND_OLDGEN_PERM:
2577 /* Try to pull the indirectee into this generation, so we can
2578 * remove the indirection from the mutable list.
2581 ((StgIndOldGen *)p)->indirectee =
2582 evacuate(((StgIndOldGen *)p)->indirectee);
2585 if (failed_to_evac) {
2586 failed_to_evac = rtsFalse;
2587 p->mut_link = gen->mut_once_list;
2588 gen->mut_once_list = p;
2595 // HWL: check whether all of these are necessary
2597 case RBH: // cf. BLACKHOLE_BQ
2599 // nat size, ptrs, nonptrs, vhs;
2601 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2602 StgRBH *rbh = (StgRBH *)p;
2603 (StgClosure *)rbh->blocking_queue =
2604 evacuate((StgClosure *)rbh->blocking_queue);
2605 if (failed_to_evac) {
2606 failed_to_evac = rtsFalse;
2607 recordMutable((StgMutClosure *)rbh);
2609 // ToDo: use size of reverted closure here!
2610 p += BLACKHOLE_sizeW();
2616 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2617 /* follow the pointer to the node which is being demanded */
2618 (StgClosure *)bf->node =
2619 evacuate((StgClosure *)bf->node);
2620 /* follow the link to the rest of the blocking queue */
2621 (StgClosure *)bf->link =
2622 evacuate((StgClosure *)bf->link);
2623 if (failed_to_evac) {
2624 failed_to_evac = rtsFalse;
2625 recordMutable((StgMutClosure *)bf);
2627 p += sizeofW(StgBlockedFetch);
2632 p += sizeofW(StgFetchMe);
2633 break; // nothing to do in this case
2635 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2637 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2638 (StgClosure *)fmbq->blocking_queue =
2639 evacuate((StgClosure *)fmbq->blocking_queue);
2640 if (failed_to_evac) {
2641 failed_to_evac = rtsFalse;
2642 recordMutable((StgMutClosure *)fmbq);
2644 p += sizeofW(StgFetchMeBlockingQueue);
2650 /* shouldn't have anything else on the mutables list */
2651 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2656 //@cindex scavenge_static
2659 scavenge_static(void)
2661 StgClosure* p = static_objects;
2662 const StgInfoTable *info;
2664 /* Always evacuate straight to the oldest generation for static
2666 evac_gen = oldest_gen->no;
2668 /* keep going until we've scavenged all the objects on the linked
2670 while (p != END_OF_STATIC_LIST) {
2674 if (info->type==RBH)
2675 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2677 /* make sure the info pointer is into text space */
2678 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2679 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2681 /* Take this object *off* the static_objects list,
2682 * and put it on the scavenged_static_objects list.
2684 static_objects = STATIC_LINK(info,p);
2685 STATIC_LINK(info,p) = scavenged_static_objects;
2686 scavenged_static_objects = p;
2688 switch (info -> type) {
2692 StgInd *ind = (StgInd *)p;
2693 ind->indirectee = evacuate(ind->indirectee);
2695 /* might fail to evacuate it, in which case we have to pop it
2696 * back on the mutable list (and take it off the
2697 * scavenged_static list because the static link and mut link
2698 * pointers are one and the same).
2700 if (failed_to_evac) {
2701 failed_to_evac = rtsFalse;
2702 scavenged_static_objects = STATIC_LINK(info,p);
2703 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2704 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2718 next = (P_)p->payload + info->layout.payload.ptrs;
2719 /* evacuate the pointers */
2720 for (q = (P_)p->payload; q < next; q++) {
2721 (StgClosure *)*q = evacuate((StgClosure *)*q);
2727 barf("scavenge_static: strange closure %d", (int)(info->type));
2730 ASSERT(failed_to_evac == rtsFalse);
2732 /* get the next static object from the list. Remember, there might
2733 * be more stuff on this list now that we've done some evacuating!
2734 * (static_objects is a global)
2740 /* -----------------------------------------------------------------------------
2741 scavenge_stack walks over a section of stack and evacuates all the
2742 objects pointed to by it. We can use the same code for walking
2743 PAPs, since these are just sections of copied stack.
2744 -------------------------------------------------------------------------- */
2745 //@cindex scavenge_stack
2748 scavenge_stack(StgPtr p, StgPtr stack_end)
2751 const StgInfoTable* info;
2754 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2757 * Each time around this loop, we are looking at a chunk of stack
2758 * that starts with either a pending argument section or an
2759 * activation record.
2762 while (p < stack_end) {
2765 /* If we've got a tag, skip over that many words on the stack */
2766 if (IS_ARG_TAG((W_)q)) {
2771 /* Is q a pointer to a closure?
2773 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2775 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2776 ASSERT(closure_STATIC((StgClosure *)q));
2778 /* otherwise, must be a pointer into the allocation space. */
2781 (StgClosure *)*p = evacuate((StgClosure *)q);
2787 * Otherwise, q must be the info pointer of an activation
2788 * record. All activation records have 'bitmap' style layout
2791 info = get_itbl((StgClosure *)p);
2793 switch (info->type) {
2795 /* Dynamic bitmap: the mask is stored on the stack */
2797 bitmap = ((StgRetDyn *)p)->liveness;
2798 p = (P_)&((StgRetDyn *)p)->payload[0];
2801 /* probably a slow-entry point return address: */
2809 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2810 old_p, p, old_p+1));
2812 p++; /* what if FHS!=1 !? -- HWL */
2817 /* Specialised code for update frames, since they're so common.
2818 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2819 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2823 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2825 nat type = get_itbl(frame->updatee)->type;
2827 p += sizeofW(StgUpdateFrame);
2828 if (type == EVACUATED) {
2829 frame->updatee = evacuate(frame->updatee);
2832 bdescr *bd = Bdescr((P_)frame->updatee);
2834 if (bd->gen->no > N) {
2835 if (bd->gen->no < evac_gen) {
2836 failed_to_evac = rtsTrue;
2841 /* Don't promote blackholes */
2843 if (!(step->gen->no == 0 &&
2845 step->no == step->gen->n_steps-1)) {
2852 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2853 sizeofW(StgHeader), step);
2854 frame->updatee = to;
2857 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2858 frame->updatee = to;
2859 recordMutable((StgMutClosure *)to);
2862 /* will never be SE_{,CAF_}BLACKHOLE, since we
2863 don't push an update frame for single-entry thunks. KSW 1999-01. */
2864 barf("scavenge_stack: UPDATE_FRAME updatee");
2869 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2876 bitmap = info->layout.bitmap;
2878 /* this assumes that the payload starts immediately after the info-ptr */
2880 while (bitmap != 0) {
2881 if ((bitmap & 1) == 0) {
2882 (StgClosure *)*p = evacuate((StgClosure *)*p);
2885 bitmap = bitmap >> 1;
2892 /* large bitmap (> 32 entries) */
2897 StgLargeBitmap *large_bitmap;
2900 large_bitmap = info->layout.large_bitmap;
2903 for (i=0; i<large_bitmap->size; i++) {
2904 bitmap = large_bitmap->bitmap[i];
2905 q = p + sizeof(W_) * 8;
2906 while (bitmap != 0) {
2907 if ((bitmap & 1) == 0) {
2908 (StgClosure *)*p = evacuate((StgClosure *)*p);
2911 bitmap = bitmap >> 1;
2913 if (i+1 < large_bitmap->size) {
2915 (StgClosure *)*p = evacuate((StgClosure *)*p);
2921 /* and don't forget to follow the SRT */
2926 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2931 /*-----------------------------------------------------------------------------
2932 scavenge the large object list.
2934 evac_gen set by caller; similar games played with evac_gen as with
2935 scavenge() - see comment at the top of scavenge(). Most large
2936 objects are (repeatedly) mutable, so most of the time evac_gen will
2938 --------------------------------------------------------------------------- */
2939 //@cindex scavenge_large
2942 scavenge_large(step *step)
2946 const StgInfoTable* info;
2947 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2949 evac_gen = 0; /* most objects are mutable */
2950 bd = step->new_large_objects;
2952 for (; bd != NULL; bd = step->new_large_objects) {
2954 /* take this object *off* the large objects list and put it on
2955 * the scavenged large objects list. This is so that we can
2956 * treat new_large_objects as a stack and push new objects on
2957 * the front when evacuating.
2959 step->new_large_objects = bd->link;
2960 dbl_link_onto(bd, &step->scavenged_large_objects);
2963 info = get_itbl((StgClosure *)p);
2965 switch (info->type) {
2967 /* only certain objects can be "large"... */
2970 /* nothing to follow */
2974 /* follow everything */
2978 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2979 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2980 (StgClosure *)*p = evacuate((StgClosure *)*p);
2985 case MUT_ARR_PTRS_FROZEN:
2986 /* follow everything */
2988 StgPtr start = p, next;
2990 evac_gen = saved_evac_gen; /* not really mutable */
2991 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2992 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2993 (StgClosure *)*p = evacuate((StgClosure *)*p);
2996 if (failed_to_evac) {
2997 recordMutable((StgMutClosure *)start);
3004 StgBCO* bco = (StgBCO *)p;
3006 evac_gen = saved_evac_gen;
3007 for (i = 0; i < bco->n_ptrs; i++) {
3008 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3015 scavengeTSO((StgTSO *)p);
3021 StgPAP* pap = (StgPAP *)p;
3023 evac_gen = saved_evac_gen; /* not really mutable */
3024 pap->fun = evacuate(pap->fun);
3025 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3031 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3036 //@cindex zero_static_object_list
3039 zero_static_object_list(StgClosure* first_static)
3043 const StgInfoTable *info;
3045 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3047 link = STATIC_LINK(info, p);
3048 STATIC_LINK(info,p) = NULL;
3052 /* This function is only needed because we share the mutable link
3053 * field with the static link field in an IND_STATIC, so we have to
3054 * zero the mut_link field before doing a major GC, which needs the
3055 * static link field.
3057 * It doesn't do any harm to zero all the mutable link fields on the
3060 //@cindex zero_mutable_list
3063 zero_mutable_list( StgMutClosure *first )
3065 StgMutClosure *next, *c;
3067 for (c = first; c != END_MUT_LIST; c = next) {
3073 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3074 //@subsection Reverting CAFs
3076 /* -----------------------------------------------------------------------------
3078 -------------------------------------------------------------------------- */
3079 //@cindex RevertCAFs
3081 void RevertCAFs(void)
3086 /* Deal with CAFs created by compiled code. */
3087 for (i = 0; i < usedECafTable; i++) {
3088 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3089 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3092 /* Deal with CAFs created by the interpreter. */
3093 while (ecafList != END_ECAF_LIST) {
3094 StgCAF* caf = ecafList;
3095 ecafList = caf->link;
3096 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3097 SET_INFO(caf,&CAF_UNENTERED_info);
3098 caf->value = (StgClosure *)0xdeadbeef;
3099 caf->link = (StgCAF *)0xdeadbeef;
3102 /* Empty out both the table and the list. */
3104 ecafList = END_ECAF_LIST;
3108 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3109 //@subsection Sanity code for CAF garbage collection
3111 /* -----------------------------------------------------------------------------
3112 Sanity code for CAF garbage collection.
3114 With DEBUG turned on, we manage a CAF list in addition to the SRT
3115 mechanism. After GC, we run down the CAF list and blackhole any
3116 CAFs which have been garbage collected. This means we get an error
3117 whenever the program tries to enter a garbage collected CAF.
3119 Any garbage collected CAFs are taken off the CAF list at the same
3121 -------------------------------------------------------------------------- */
3131 const StgInfoTable *info;
3142 ASSERT(info->type == IND_STATIC);
3144 if (STATIC_LINK(info,p) == NULL) {
3145 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3147 SET_INFO(p,&BLACKHOLE_info);
3148 p = STATIC_LINK2(info,p);
3152 pp = &STATIC_LINK2(info,p);
3159 /* fprintf(stderr, "%d CAFs live\n", i); */
3163 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3164 //@subsection Lazy black holing
3166 /* -----------------------------------------------------------------------------
3169 Whenever a thread returns to the scheduler after possibly doing
3170 some work, we have to run down the stack and black-hole all the
3171 closures referred to by update frames.
3172 -------------------------------------------------------------------------- */
3173 //@cindex threadLazyBlackHole
3176 threadLazyBlackHole(StgTSO *tso)
3178 StgUpdateFrame *update_frame;
3179 StgBlockingQueue *bh;
3182 stack_end = &tso->stack[tso->stack_size];
3183 update_frame = tso->su;
3186 switch (get_itbl(update_frame)->type) {
3189 update_frame = ((StgCatchFrame *)update_frame)->link;
3193 bh = (StgBlockingQueue *)update_frame->updatee;
3195 /* if the thunk is already blackholed, it means we've also
3196 * already blackholed the rest of the thunks on this stack,
3197 * so we can stop early.
3199 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3200 * don't interfere with this optimisation.
3202 if (bh->header.info == &BLACKHOLE_info) {
3206 if (bh->header.info != &BLACKHOLE_BQ_info &&
3207 bh->header.info != &CAF_BLACKHOLE_info) {
3208 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3209 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3211 SET_INFO(bh,&BLACKHOLE_info);
3214 update_frame = update_frame->link;
3218 update_frame = ((StgSeqFrame *)update_frame)->link;
3224 barf("threadPaused");
3229 //@node Stack squeezing, Pausing a thread, Lazy black holing
3230 //@subsection Stack squeezing
3232 /* -----------------------------------------------------------------------------
3235 * Code largely pinched from old RTS, then hacked to bits. We also do
3236 * lazy black holing here.
3238 * -------------------------------------------------------------------------- */
3239 //@cindex threadSqueezeStack
3242 threadSqueezeStack(StgTSO *tso)
3244 lnat displacement = 0;
3245 StgUpdateFrame *frame;
3246 StgUpdateFrame *next_frame; /* Temporally next */
3247 StgUpdateFrame *prev_frame; /* Temporally previous */
3249 rtsBool prev_was_update_frame;
3251 StgUpdateFrame *top_frame;
3252 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3254 void printObj( StgClosure *obj ); // from Printer.c
3256 top_frame = tso->su;
3259 bottom = &(tso->stack[tso->stack_size]);
3262 /* There must be at least one frame, namely the STOP_FRAME.
3264 ASSERT((P_)frame < bottom);
3266 /* Walk down the stack, reversing the links between frames so that
3267 * we can walk back up as we squeeze from the bottom. Note that
3268 * next_frame and prev_frame refer to next and previous as they were
3269 * added to the stack, rather than the way we see them in this
3270 * walk. (It makes the next loop less confusing.)
3272 * Stop if we find an update frame pointing to a black hole
3273 * (see comment in threadLazyBlackHole()).
3277 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3278 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3279 prev_frame = frame->link;
3280 frame->link = next_frame;
3285 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3286 printObj((StgClosure *)prev_frame);
3287 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3290 switch (get_itbl(frame)->type) {
3291 case UPDATE_FRAME: upd_frames++;
3292 if (frame->updatee->header.info == &BLACKHOLE_info)
3295 case STOP_FRAME: stop_frames++;
3297 case CATCH_FRAME: catch_frames++;
3299 case SEQ_FRAME: seq_frames++;
3302 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3304 printObj((StgClosure *)prev_frame);
3307 if (get_itbl(frame)->type == UPDATE_FRAME
3308 && frame->updatee->header.info == &BLACKHOLE_info) {
3313 /* Now, we're at the bottom. Frame points to the lowest update
3314 * frame on the stack, and its link actually points to the frame
3315 * above. We have to walk back up the stack, squeezing out empty
3316 * update frames and turning the pointers back around on the way
3319 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3320 * we never want to eliminate it anyway. Just walk one step up
3321 * before starting to squeeze. When you get to the topmost frame,
3322 * remember that there are still some words above it that might have
3329 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3332 * Loop through all of the frames (everything except the very
3333 * bottom). Things are complicated by the fact that we have
3334 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3335 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3337 while (frame != NULL) {
3339 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3340 rtsBool is_update_frame;
3342 next_frame = frame->link;
3343 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3346 * 1. both the previous and current frame are update frames
3347 * 2. the current frame is empty
3349 if (prev_was_update_frame && is_update_frame &&
3350 (P_)prev_frame == frame_bottom + displacement) {
3352 /* Now squeeze out the current frame */
3353 StgClosure *updatee_keep = prev_frame->updatee;
3354 StgClosure *updatee_bypass = frame->updatee;
3357 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3361 /* Deal with blocking queues. If both updatees have blocked
3362 * threads, then we should merge the queues into the update
3363 * frame that we're keeping.
3365 * Alternatively, we could just wake them up: they'll just go
3366 * straight to sleep on the proper blackhole! This is less code
3367 * and probably less bug prone, although it's probably much
3370 #if 0 /* do it properly... */
3371 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3372 # error Unimplemented lazy BH warning. (KSW 1999-01)
3374 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3375 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3377 /* Sigh. It has one. Don't lose those threads! */
3378 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3379 /* Urgh. Two queues. Merge them. */
3380 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3382 while (keep_tso->link != END_TSO_QUEUE) {
3383 keep_tso = keep_tso->link;
3385 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3388 /* For simplicity, just swap the BQ for the BH */
3389 P_ temp = updatee_keep;
3391 updatee_keep = updatee_bypass;
3392 updatee_bypass = temp;
3394 /* Record the swap in the kept frame (below) */
3395 prev_frame->updatee = updatee_keep;
3400 TICK_UPD_SQUEEZED();
3401 /* wasn't there something about update squeezing and ticky to be
3402 * sorted out? oh yes: we aren't counting each enter properly
3403 * in this case. See the log somewhere. KSW 1999-04-21
3405 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3407 sp = (P_)frame - 1; /* sp = stuff to slide */
3408 displacement += sizeofW(StgUpdateFrame);
3411 /* No squeeze for this frame */
3412 sp = frame_bottom - 1; /* Keep the current frame */
3414 /* Do lazy black-holing.
3416 if (is_update_frame) {
3417 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3418 if (bh->header.info != &BLACKHOLE_info &&
3419 bh->header.info != &BLACKHOLE_BQ_info &&
3420 bh->header.info != &CAF_BLACKHOLE_info) {
3421 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3422 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3424 SET_INFO(bh,&BLACKHOLE_info);
3428 /* Fix the link in the current frame (should point to the frame below) */
3429 frame->link = prev_frame;
3430 prev_was_update_frame = is_update_frame;
3433 /* Now slide all words from sp up to the next frame */
3435 if (displacement > 0) {
3436 P_ next_frame_bottom;
3438 if (next_frame != NULL)
3439 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3441 next_frame_bottom = tso->sp - 1;
3445 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3449 while (sp >= next_frame_bottom) {
3450 sp[displacement] = *sp;
3454 (P_)prev_frame = (P_)frame + displacement;
3458 tso->sp += displacement;
3459 tso->su = prev_frame;
3462 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3463 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3467 //@node Pausing a thread, Index, Stack squeezing
3468 //@subsection Pausing a thread
3470 /* -----------------------------------------------------------------------------
3473 * We have to prepare for GC - this means doing lazy black holing
3474 * here. We also take the opportunity to do stack squeezing if it's
3476 * -------------------------------------------------------------------------- */
3477 //@cindex threadPaused
3479 threadPaused(StgTSO *tso)
3481 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3482 threadSqueezeStack(tso); /* does black holing too */
3484 threadLazyBlackHole(tso);
3487 /* -----------------------------------------------------------------------------
3489 * -------------------------------------------------------------------------- */
3492 //@cindex printMutOnceList
3494 printMutOnceList(generation *gen)
3496 StgMutClosure *p, *next;
3498 p = gen->mut_once_list;
3501 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3502 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3503 fprintf(stderr, "%p (%s), ",
3504 p, info_type((StgClosure *)p));
3506 fputc('\n', stderr);
3509 //@cindex printMutableList
3511 printMutableList(generation *gen)
3513 StgMutClosure *p, *next;
3518 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3519 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3520 fprintf(stderr, "%p (%s), ",
3521 p, info_type((StgClosure *)p));
3523 fputc('\n', stderr);
3526 //@cindex maybeLarge
3527 static inline rtsBool
3528 maybeLarge(StgClosure *closure)
3530 StgInfoTable *info = get_itbl(closure);
3532 /* closure types that may be found on the new_large_objects list;
3533 see scavenge_large */
3534 return (info->type == MUT_ARR_PTRS ||
3535 info->type == MUT_ARR_PTRS_FROZEN ||
3536 info->type == TSO ||
3537 info->type == ARR_WORDS ||
3544 //@node Index, , Pausing a thread
3548 //* GarbageCollect:: @cindex\s-+GarbageCollect
3549 //* MarkRoot:: @cindex\s-+MarkRoot
3550 //* RevertCAFs:: @cindex\s-+RevertCAFs
3551 //* addBlock:: @cindex\s-+addBlock
3552 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3553 //* copy:: @cindex\s-+copy
3554 //* copyPart:: @cindex\s-+copyPart
3555 //* evacuate:: @cindex\s-+evacuate
3556 //* evacuate_large:: @cindex\s-+evacuate_large
3557 //* gcCAFs:: @cindex\s-+gcCAFs
3558 //* isAlive:: @cindex\s-+isAlive
3559 //* maybeLarge:: @cindex\s-+maybeLarge
3560 //* mkMutCons:: @cindex\s-+mkMutCons
3561 //* printMutOnceList:: @cindex\s-+printMutOnceList
3562 //* printMutableList:: @cindex\s-+printMutableList
3563 //* relocate_TSO:: @cindex\s-+relocate_TSO
3564 //* scavenge:: @cindex\s-+scavenge
3565 //* scavenge_large:: @cindex\s-+scavenge_large
3566 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3567 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3568 //* scavenge_one:: @cindex\s-+scavenge_one
3569 //* scavenge_srt:: @cindex\s-+scavenge_srt
3570 //* scavenge_stack:: @cindex\s-+scavenge_stack
3571 //* scavenge_static:: @cindex\s-+scavenge_static
3572 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3573 //* threadPaused:: @cindex\s-+threadPaused
3574 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3575 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3576 //* upd_evacuee:: @cindex\s-+upd_evacuee
3577 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3578 //* zero_static_object_list:: @cindex\s-+zero_static_object_list