1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.85 2000/10/06 15:38:06 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
60 //@node STATIC OBJECT LIST, Static function declarations, Includes
61 //@subsection STATIC OBJECT LIST
63 /* STATIC OBJECT LIST.
66 * We maintain a linked list of static objects that are still live.
67 * The requirements for this list are:
69 * - we need to scan the list while adding to it, in order to
70 * scavenge all the static objects (in the same way that
71 * breadth-first scavenging works for dynamic objects).
73 * - we need to be able to tell whether an object is already on
74 * the list, to break loops.
76 * Each static object has a "static link field", which we use for
77 * linking objects on to the list. We use a stack-type list, consing
78 * objects on the front as they are added (this means that the
79 * scavenge phase is depth-first, not breadth-first, but that
82 * A separate list is kept for objects that have been scavenged
83 * already - this is so that we can zero all the marks afterwards.
85 * An object is on the list if its static link field is non-zero; this
86 * means that we have to mark the end of the list with '1', not NULL.
88 * Extra notes for generational GC:
90 * Each generation has a static object list associated with it. When
91 * collecting generations up to N, we treat the static object lists
92 * from generations > N as roots.
94 * We build up a static object list while collecting generations 0..N,
95 * which is then appended to the static object list of generation N+1.
97 StgClosure* static_objects; /* live static objects */
98 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
100 /* N is the oldest generation being collected, where the generations
101 * are numbered starting at 0. A major GC (indicated by the major_gc
102 * flag) is when we're collecting all generations. We only attempt to
103 * deal with static objects and GC CAFs when doing a major GC.
106 static rtsBool major_gc;
108 /* Youngest generation that objects should be evacuated to in
109 * evacuate(). (Logically an argument to evacuate, but it's static
110 * a lot of the time so we optimise it into a global variable).
116 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
117 static rtsBool weak_done; /* all done for this pass */
119 /* List of all threads during GC
121 static StgTSO *old_all_threads;
122 static StgTSO *resurrected_threads;
124 /* Flag indicating failure to evacuate an object to the desired
127 static rtsBool failed_to_evac;
129 /* Old to-space (used for two-space collector only)
131 bdescr *old_to_space;
134 /* Data used for allocation area sizing.
136 lnat new_blocks; /* blocks allocated during this GC */
137 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
139 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
140 //@subsection Static function declarations
142 /* -----------------------------------------------------------------------------
143 Static function declarations
144 -------------------------------------------------------------------------- */
146 static StgClosure * evacuate ( StgClosure *q );
147 static void zero_static_object_list ( StgClosure* first_static );
148 static void zero_mutable_list ( StgMutClosure *first );
150 static rtsBool traverse_weak_ptr_list ( void );
151 static void cleanup_weak_ptr_list ( StgWeak **list );
153 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
154 static void scavenge_large ( step *step );
155 static void scavenge ( step *step );
156 static void scavenge_static ( void );
157 static void scavenge_mutable_list ( generation *g );
158 static void scavenge_mut_once_list ( generation *g );
161 static void gcCAFs ( void );
164 //@node Garbage Collect, Weak Pointers, Static function declarations
165 //@subsection Garbage Collect
167 /* -----------------------------------------------------------------------------
170 For garbage collecting generation N (and all younger generations):
172 - follow all pointers in the root set. the root set includes all
173 mutable objects in all steps in all generations.
175 - for each pointer, evacuate the object it points to into either
176 + to-space in the next higher step in that generation, if one exists,
177 + if the object's generation == N, then evacuate it to the next
178 generation if one exists, or else to-space in the current
180 + if the object's generation < N, then evacuate it to to-space
181 in the next generation.
183 - repeatedly scavenge to-space from each step in each generation
184 being collected until no more objects can be evacuated.
186 - free from-space in each step, and set from-space = to-space.
188 -------------------------------------------------------------------------- */
189 //@cindex GarbageCollect
191 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
195 lnat live, allocated, collected = 0, copied = 0;
199 CostCentreStack *prev_CCS;
202 #if defined(DEBUG) && defined(GRAN)
203 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
207 /* tell the stats department that we've started a GC */
210 /* attribute any costs to CCS_GC */
216 /* Approximate how much we allocated */
217 allocated = calcAllocated();
219 /* Figure out which generation to collect
221 if (force_major_gc) {
222 N = RtsFlags.GcFlags.generations - 1;
226 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
227 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
231 major_gc = (N == RtsFlags.GcFlags.generations-1);
234 /* check stack sanity *before* GC (ToDo: check all threads) */
236 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
238 IF_DEBUG(sanity, checkFreeListSanity());
240 /* Initialise the static object lists
242 static_objects = END_OF_STATIC_LIST;
243 scavenged_static_objects = END_OF_STATIC_LIST;
245 /* zero the mutable list for the oldest generation (see comment by
246 * zero_mutable_list below).
249 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
252 /* Save the old to-space if we're doing a two-space collection
254 if (RtsFlags.GcFlags.generations == 1) {
255 old_to_space = g0s0->to_space;
256 g0s0->to_space = NULL;
259 /* Keep a count of how many new blocks we allocated during this GC
260 * (used for resizing the allocation area, later).
264 /* Initialise to-space in all the generations/steps that we're
267 for (g = 0; g <= N; g++) {
268 generations[g].mut_once_list = END_MUT_LIST;
269 generations[g].mut_list = END_MUT_LIST;
271 for (s = 0; s < generations[g].n_steps; s++) {
273 /* generation 0, step 0 doesn't need to-space */
274 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
278 /* Get a free block for to-space. Extra blocks will be chained on
282 step = &generations[g].steps[s];
283 ASSERT(step->gen->no == g);
284 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
285 bd->gen = &generations[g];
288 bd->evacuated = 1; /* it's a to-space block */
289 step->hp = bd->start;
290 step->hpLim = step->hp + BLOCK_SIZE_W;
294 step->scan = bd->start;
296 step->new_large_objects = NULL;
297 step->scavenged_large_objects = NULL;
299 /* mark the large objects as not evacuated yet */
300 for (bd = step->large_objects; bd; bd = bd->link) {
306 /* make sure the older generations have at least one block to
307 * allocate into (this makes things easier for copy(), see below.
309 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
310 for (s = 0; s < generations[g].n_steps; s++) {
311 step = &generations[g].steps[s];
312 if (step->hp_bd == NULL) {
314 bd->gen = &generations[g];
317 bd->evacuated = 0; /* *not* a to-space block */
318 step->hp = bd->start;
319 step->hpLim = step->hp + BLOCK_SIZE_W;
325 /* Set the scan pointer for older generations: remember we
326 * still have to scavenge objects that have been promoted. */
327 step->scan = step->hp;
328 step->scan_bd = step->hp_bd;
329 step->to_space = NULL;
331 step->new_large_objects = NULL;
332 step->scavenged_large_objects = NULL;
336 /* -----------------------------------------------------------------------
337 * follow all the roots that we know about:
338 * - mutable lists from each generation > N
339 * we want to *scavenge* these roots, not evacuate them: they're not
340 * going to move in this GC.
341 * Also: do them in reverse generation order. This is because we
342 * often want to promote objects that are pointed to by older
343 * generations early, so we don't have to repeatedly copy them.
344 * Doing the generations in reverse order ensures that we don't end
345 * up in the situation where we want to evac an object to gen 3 and
346 * it has already been evaced to gen 2.
350 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
351 generations[g].saved_mut_list = generations[g].mut_list;
352 generations[g].mut_list = END_MUT_LIST;
355 /* Do the mut-once lists first */
356 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
357 IF_PAR_DEBUG(verbose,
358 printMutOnceList(&generations[g]));
359 scavenge_mut_once_list(&generations[g]);
361 for (st = generations[g].n_steps-1; st >= 0; st--) {
362 scavenge(&generations[g].steps[st]);
366 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
367 IF_PAR_DEBUG(verbose,
368 printMutableList(&generations[g]));
369 scavenge_mutable_list(&generations[g]);
371 for (st = generations[g].n_steps-1; st >= 0; st--) {
372 scavenge(&generations[g].steps[st]);
377 /* follow all the roots that the application knows about.
383 /* And don't forget to mark the TSO if we got here direct from
385 /* Not needed in a seq version?
387 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
391 /* Mark the entries in the GALA table of the parallel system */
392 markLocalGAs(major_gc);
395 /* Mark the weak pointer list, and prepare to detect dead weak
398 old_weak_ptr_list = weak_ptr_list;
399 weak_ptr_list = NULL;
400 weak_done = rtsFalse;
402 /* The all_threads list is like the weak_ptr_list.
403 * See traverse_weak_ptr_list() for the details.
405 old_all_threads = all_threads;
406 all_threads = END_TSO_QUEUE;
407 resurrected_threads = END_TSO_QUEUE;
409 /* Mark the stable pointer table.
411 markStablePtrTable(major_gc);
415 /* ToDo: To fix the caf leak, we need to make the commented out
416 * parts of this code do something sensible - as described in
419 extern void markHugsObjects(void);
424 /* -------------------------------------------------------------------------
425 * Repeatedly scavenge all the areas we know about until there's no
426 * more scavenging to be done.
433 /* scavenge static objects */
434 if (major_gc && static_objects != END_OF_STATIC_LIST) {
436 checkStaticObjects());
440 /* When scavenging the older generations: Objects may have been
441 * evacuated from generations <= N into older generations, and we
442 * need to scavenge these objects. We're going to try to ensure that
443 * any evacuations that occur move the objects into at least the
444 * same generation as the object being scavenged, otherwise we
445 * have to create new entries on the mutable list for the older
449 /* scavenge each step in generations 0..maxgen */
453 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
454 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
455 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
458 step = &generations[gen].steps[st];
460 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
465 if (step->new_large_objects != NULL) {
466 scavenge_large(step);
473 if (flag) { goto loop; }
475 /* must be last... */
476 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
481 /* Final traversal of the weak pointer list (see comment by
482 * cleanUpWeakPtrList below).
484 cleanup_weak_ptr_list(&weak_ptr_list);
486 /* Now see which stable names are still alive.
488 gcStablePtrTable(major_gc);
491 /* Reconstruct the Global Address tables used in GUM */
492 rebuildGAtables(major_gc);
493 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
494 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
497 /* Set the maximum blocks for the oldest generation, based on twice
498 * the amount of live data now, adjusted to fit the maximum heap
501 * This is an approximation, since in the worst case we'll need
502 * twice the amount of live data plus whatever space the other
505 if (RtsFlags.GcFlags.generations > 1) {
507 oldest_gen->max_blocks =
508 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
509 RtsFlags.GcFlags.minOldGenSize);
510 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
511 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
512 if (((int)oldest_gen->max_blocks -
513 (int)oldest_gen->steps[0].to_blocks) <
514 (RtsFlags.GcFlags.pcFreeHeap *
515 RtsFlags.GcFlags.maxHeapSize / 200)) {
522 /* run through all the generations/steps and tidy up
524 copied = new_blocks * BLOCK_SIZE_W;
525 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
528 generations[g].collections++; /* for stats */
531 for (s = 0; s < generations[g].n_steps; s++) {
533 step = &generations[g].steps[s];
535 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
536 /* Tidy the end of the to-space chains */
537 step->hp_bd->free = step->hp;
538 step->hp_bd->link = NULL;
539 /* stats information: how much we copied */
541 copied -= step->hp_bd->start + BLOCK_SIZE_W -
546 /* for generations we collected... */
549 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
551 /* free old memory and shift to-space into from-space for all
552 * the collected steps (except the allocation area). These
553 * freed blocks will probaby be quickly recycled.
555 if (!(g == 0 && s == 0)) {
556 freeChain(step->blocks);
557 step->blocks = step->to_space;
558 step->n_blocks = step->to_blocks;
559 step->to_space = NULL;
561 for (bd = step->blocks; bd != NULL; bd = bd->link) {
562 bd->evacuated = 0; /* now from-space */
566 /* LARGE OBJECTS. The current live large objects are chained on
567 * scavenged_large, having been moved during garbage
568 * collection from large_objects. Any objects left on
569 * large_objects list are therefore dead, so we free them here.
571 for (bd = step->large_objects; bd != NULL; bd = next) {
576 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
579 step->large_objects = step->scavenged_large_objects;
581 /* Set the maximum blocks for this generation, interpolating
582 * between the maximum size of the oldest and youngest
585 * max_blocks = oldgen_max_blocks * G
586 * ----------------------
591 generations[g].max_blocks = (oldest_gen->max_blocks * g)
592 / (RtsFlags.GcFlags.generations-1);
594 generations[g].max_blocks = oldest_gen->max_blocks;
597 /* for older generations... */
600 /* For older generations, we need to append the
601 * scavenged_large_object list (i.e. large objects that have been
602 * promoted during this GC) to the large_object list for that step.
604 for (bd = step->scavenged_large_objects; bd; bd = next) {
607 dbl_link_onto(bd, &step->large_objects);
610 /* add the new blocks we promoted during this GC */
611 step->n_blocks += step->to_blocks;
616 /* Guess the amount of live data for stats. */
619 /* Free the small objects allocated via allocate(), since this will
620 * all have been copied into G0S1 now.
622 if (small_alloc_list != NULL) {
623 freeChain(small_alloc_list);
625 small_alloc_list = NULL;
629 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
631 /* Two-space collector:
632 * Free the old to-space, and estimate the amount of live data.
634 if (RtsFlags.GcFlags.generations == 1) {
637 if (old_to_space != NULL) {
638 freeChain(old_to_space);
640 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
641 bd->evacuated = 0; /* now from-space */
644 /* For a two-space collector, we need to resize the nursery. */
646 /* set up a new nursery. Allocate a nursery size based on a
647 * function of the amount of live data (currently a factor of 2,
648 * should be configurable (ToDo)). Use the blocks from the old
649 * nursery if possible, freeing up any left over blocks.
651 * If we get near the maximum heap size, then adjust our nursery
652 * size accordingly. If the nursery is the same size as the live
653 * data (L), then we need 3L bytes. We can reduce the size of the
654 * nursery to bring the required memory down near 2L bytes.
656 * A normal 2-space collector would need 4L bytes to give the same
657 * performance we get from 3L bytes, reducing to the same
658 * performance at 2L bytes.
660 blocks = g0s0->to_blocks;
662 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
663 RtsFlags.GcFlags.maxHeapSize ) {
664 int adjusted_blocks; /* signed on purpose */
667 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
668 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
669 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
670 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
673 blocks = adjusted_blocks;
676 blocks *= RtsFlags.GcFlags.oldGenFactor;
677 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
678 blocks = RtsFlags.GcFlags.minAllocAreaSize;
681 resizeNursery(blocks);
684 /* Generational collector:
685 * If the user has given us a suggested heap size, adjust our
686 * allocation area to make best use of the memory available.
689 if (RtsFlags.GcFlags.heapSizeSuggestion) {
691 nat needed = calcNeeded(); /* approx blocks needed at next GC */
693 /* Guess how much will be live in generation 0 step 0 next time.
694 * A good approximation is the obtained by finding the
695 * percentage of g0s0 that was live at the last minor GC.
698 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
701 /* Estimate a size for the allocation area based on the
702 * information available. We might end up going slightly under
703 * or over the suggested heap size, but we should be pretty
706 * Formula: suggested - needed
707 * ----------------------------
708 * 1 + g0s0_pcnt_kept/100
710 * where 'needed' is the amount of memory needed at the next
711 * collection for collecting all steps except g0s0.
714 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
715 (100 + (int)g0s0_pcnt_kept);
717 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
718 blocks = RtsFlags.GcFlags.minAllocAreaSize;
721 resizeNursery((nat)blocks);
725 /* mark the garbage collected CAFs as dead */
727 if (major_gc) { gcCAFs(); }
730 /* zero the scavenged static object list */
732 zero_static_object_list(scavenged_static_objects);
739 /* start any pending finalizers */
740 scheduleFinalizers(old_weak_ptr_list);
742 /* send exceptions to any threads which were about to die */
743 resurrectThreads(resurrected_threads);
745 /* check sanity after GC */
746 IF_DEBUG(sanity, checkSanity(N));
748 /* extra GC trace info */
749 IF_DEBUG(gc, stat_describe_gens());
752 /* symbol-table based profiling */
753 /* heapCensus(to_space); */ /* ToDo */
756 /* restore enclosing cost centre */
762 /* check for memory leaks if sanity checking is on */
763 IF_DEBUG(sanity, memInventory());
765 /* ok, GC over: tell the stats department what happened. */
766 stat_endGC(allocated, collected, live, copied, N);
769 //@node Weak Pointers, Evacuation, Garbage Collect
770 //@subsection Weak Pointers
772 /* -----------------------------------------------------------------------------
775 traverse_weak_ptr_list is called possibly many times during garbage
776 collection. It returns a flag indicating whether it did any work
777 (i.e. called evacuate on any live pointers).
779 Invariant: traverse_weak_ptr_list is called when the heap is in an
780 idempotent state. That means that there are no pending
781 evacuate/scavenge operations. This invariant helps the weak
782 pointer code decide which weak pointers are dead - if there are no
783 new live weak pointers, then all the currently unreachable ones are
786 For generational GC: we just don't try to finalize weak pointers in
787 older generations than the one we're collecting. This could
788 probably be optimised by keeping per-generation lists of weak
789 pointers, but for a few weak pointers this scheme will work.
790 -------------------------------------------------------------------------- */
791 //@cindex traverse_weak_ptr_list
794 traverse_weak_ptr_list(void)
796 StgWeak *w, **last_w, *next_w;
798 rtsBool flag = rtsFalse;
800 if (weak_done) { return rtsFalse; }
802 /* doesn't matter where we evacuate values/finalizers to, since
803 * these pointers are treated as roots (iff the keys are alive).
807 last_w = &old_weak_ptr_list;
808 for (w = old_weak_ptr_list; w; w = next_w) {
810 /* First, this weak pointer might have been evacuated. If so,
811 * remove the forwarding pointer from the weak_ptr_list.
813 if (get_itbl(w)->type == EVACUATED) {
814 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
818 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
819 * called on a live weak pointer object. Just remove it.
821 if (w->header.info == &DEAD_WEAK_info) {
822 next_w = ((StgDeadWeak *)w)->link;
827 ASSERT(get_itbl(w)->type == WEAK);
829 /* Now, check whether the key is reachable.
831 if ((new = isAlive(w->key))) {
833 /* evacuate the value and finalizer */
834 w->value = evacuate(w->value);
835 w->finalizer = evacuate(w->finalizer);
836 /* remove this weak ptr from the old_weak_ptr list */
838 /* and put it on the new weak ptr list */
840 w->link = weak_ptr_list;
843 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
853 /* Now deal with the all_threads list, which behaves somewhat like
854 * the weak ptr list. If we discover any threads that are about to
855 * become garbage, we wake them up and administer an exception.
858 StgTSO *t, *tmp, *next, **prev;
860 prev = &old_all_threads;
861 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
863 /* Threads which have finished or died get dropped from
866 switch (t->what_next) {
867 case ThreadRelocated:
873 next = t->global_link;
879 /* Threads which have already been determined to be alive are
880 * moved onto the all_threads list.
882 (StgClosure *)tmp = isAlive((StgClosure *)t);
884 next = tmp->global_link;
885 tmp->global_link = all_threads;
889 prev = &(t->global_link);
890 next = t->global_link;
895 /* If we didn't make any changes, then we can go round and kill all
896 * the dead weak pointers. The old_weak_ptr list is used as a list
897 * of pending finalizers later on.
899 if (flag == rtsFalse) {
900 cleanup_weak_ptr_list(&old_weak_ptr_list);
901 for (w = old_weak_ptr_list; w; w = w->link) {
902 w->finalizer = evacuate(w->finalizer);
905 /* And resurrect any threads which were about to become garbage.
908 StgTSO *t, *tmp, *next;
909 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
910 next = t->global_link;
911 (StgClosure *)tmp = evacuate((StgClosure *)t);
912 tmp->global_link = resurrected_threads;
913 resurrected_threads = tmp;
923 /* -----------------------------------------------------------------------------
924 After GC, the live weak pointer list may have forwarding pointers
925 on it, because a weak pointer object was evacuated after being
926 moved to the live weak pointer list. We remove those forwarding
929 Also, we don't consider weak pointer objects to be reachable, but
930 we must nevertheless consider them to be "live" and retain them.
931 Therefore any weak pointer objects which haven't as yet been
932 evacuated need to be evacuated now.
933 -------------------------------------------------------------------------- */
935 //@cindex cleanup_weak_ptr_list
938 cleanup_weak_ptr_list ( StgWeak **list )
940 StgWeak *w, **last_w;
943 for (w = *list; w; w = w->link) {
945 if (get_itbl(w)->type == EVACUATED) {
946 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
950 if (Bdescr((P_)w)->evacuated == 0) {
951 (StgClosure *)w = evacuate((StgClosure *)w);
958 /* -----------------------------------------------------------------------------
959 isAlive determines whether the given closure is still alive (after
960 a garbage collection) or not. It returns the new address of the
961 closure if it is alive, or NULL otherwise.
962 -------------------------------------------------------------------------- */
967 isAlive(StgClosure *p)
969 const StgInfoTable *info;
976 /* ToDo: for static closures, check the static link field.
977 * Problem here is that we sometimes don't set the link field, eg.
978 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
981 /* ignore closures in generations that we're not collecting. */
982 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
986 switch (info->type) {
991 case IND_OLDGEN: /* rely on compatible layout with StgInd */
992 case IND_OLDGEN_PERM:
993 /* follow indirections */
994 p = ((StgInd *)p)->indirectee;
999 return ((StgEvacuated *)p)->evacuee;
1002 size = bco_sizeW((StgBCO*)p);
1006 size = arr_words_sizeW((StgArrWords *)p);
1010 case MUT_ARR_PTRS_FROZEN:
1011 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1015 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1016 p = (StgClosure *)((StgTSO *)p)->link;
1020 size = tso_sizeW((StgTSO *)p);
1022 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1023 && Bdescr((P_)p)->evacuated)
1037 MarkRoot(StgClosure *root)
1039 # if 0 && defined(PAR) && defined(DEBUG)
1040 StgClosure *foo = evacuate(root);
1041 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1042 ASSERT(isAlive(foo)); // must be in to-space
1045 return evacuate(root);
1050 static void addBlock(step *step)
1052 bdescr *bd = allocBlock();
1053 bd->gen = step->gen;
1056 if (step->gen->no <= N) {
1062 step->hp_bd->free = step->hp;
1063 step->hp_bd->link = bd;
1064 step->hp = bd->start;
1065 step->hpLim = step->hp + BLOCK_SIZE_W;
1071 //@cindex upd_evacuee
1073 static __inline__ void
1074 upd_evacuee(StgClosure *p, StgClosure *dest)
1076 p->header.info = &EVACUATED_info;
1077 ((StgEvacuated *)p)->evacuee = dest;
1082 static __inline__ StgClosure *
1083 copy(StgClosure *src, nat size, step *step)
1087 TICK_GC_WORDS_COPIED(size);
1088 /* Find out where we're going, using the handy "to" pointer in
1089 * the step of the source object. If it turns out we need to
1090 * evacuate to an older generation, adjust it here (see comment
1093 if (step->gen->no < evac_gen) {
1094 #ifdef NO_EAGER_PROMOTION
1095 failed_to_evac = rtsTrue;
1097 step = &generations[evac_gen].steps[0];
1101 /* chain a new block onto the to-space for the destination step if
1104 if (step->hp + size >= step->hpLim) {
1108 for(to = step->hp, from = (P_)src; size>0; --size) {
1114 upd_evacuee(src,(StgClosure *)dest);
1115 return (StgClosure *)dest;
1118 /* Special version of copy() for when we only want to copy the info
1119 * pointer of an object, but reserve some padding after it. This is
1120 * used to optimise evacuation of BLACKHOLEs.
1125 static __inline__ StgClosure *
1126 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1130 TICK_GC_WORDS_COPIED(size_to_copy);
1131 if (step->gen->no < evac_gen) {
1132 #ifdef NO_EAGER_PROMOTION
1133 failed_to_evac = rtsTrue;
1135 step = &generations[evac_gen].steps[0];
1139 if (step->hp + size_to_reserve >= step->hpLim) {
1143 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1148 step->hp += size_to_reserve;
1149 upd_evacuee(src,(StgClosure *)dest);
1150 return (StgClosure *)dest;
1153 //@node Evacuation, Scavenging, Weak Pointers
1154 //@subsection Evacuation
1156 /* -----------------------------------------------------------------------------
1157 Evacuate a large object
1159 This just consists of removing the object from the (doubly-linked)
1160 large_alloc_list, and linking it on to the (singly-linked)
1161 new_large_objects list, from where it will be scavenged later.
1163 Convention: bd->evacuated is /= 0 for a large object that has been
1164 evacuated, or 0 otherwise.
1165 -------------------------------------------------------------------------- */
1167 //@cindex evacuate_large
1170 evacuate_large(StgPtr p, rtsBool mutable)
1172 bdescr *bd = Bdescr(p);
1175 /* should point to the beginning of the block */
1176 ASSERT(((W_)p & BLOCK_MASK) == 0);
1178 /* already evacuated? */
1179 if (bd->evacuated) {
1180 /* Don't forget to set the failed_to_evac flag if we didn't get
1181 * the desired destination (see comments in evacuate()).
1183 if (bd->gen->no < evac_gen) {
1184 failed_to_evac = rtsTrue;
1185 TICK_GC_FAILED_PROMOTION();
1191 /* remove from large_object list */
1193 bd->back->link = bd->link;
1194 } else { /* first object in the list */
1195 step->large_objects = bd->link;
1198 bd->link->back = bd->back;
1201 /* link it on to the evacuated large object list of the destination step
1203 step = bd->step->to;
1204 if (step->gen->no < evac_gen) {
1205 #ifdef NO_EAGER_PROMOTION
1206 failed_to_evac = rtsTrue;
1208 step = &generations[evac_gen].steps[0];
1213 bd->gen = step->gen;
1214 bd->link = step->new_large_objects;
1215 step->new_large_objects = bd;
1219 recordMutable((StgMutClosure *)p);
1223 /* -----------------------------------------------------------------------------
1224 Adding a MUT_CONS to an older generation.
1226 This is necessary from time to time when we end up with an
1227 old-to-new generation pointer in a non-mutable object. We defer
1228 the promotion until the next GC.
1229 -------------------------------------------------------------------------- */
1234 mkMutCons(StgClosure *ptr, generation *gen)
1239 step = &gen->steps[0];
1241 /* chain a new block onto the to-space for the destination step if
1244 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1248 q = (StgMutVar *)step->hp;
1249 step->hp += sizeofW(StgMutVar);
1251 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1253 recordOldToNewPtrs((StgMutClosure *)q);
1255 return (StgClosure *)q;
1258 /* -----------------------------------------------------------------------------
1261 This is called (eventually) for every live object in the system.
1263 The caller to evacuate specifies a desired generation in the
1264 evac_gen global variable. The following conditions apply to
1265 evacuating an object which resides in generation M when we're
1266 collecting up to generation N
1270 else evac to step->to
1272 if M < evac_gen evac to evac_gen, step 0
1274 if the object is already evacuated, then we check which generation
1277 if M >= evac_gen do nothing
1278 if M < evac_gen set failed_to_evac flag to indicate that we
1279 didn't manage to evacuate this object into evac_gen.
1281 -------------------------------------------------------------------------- */
1285 evacuate(StgClosure *q)
1290 const StgInfoTable *info;
1293 if (HEAP_ALLOCED(q)) {
1295 if (bd->gen->no > N) {
1296 /* Can't evacuate this object, because it's in a generation
1297 * older than the ones we're collecting. Let's hope that it's
1298 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1300 if (bd->gen->no < evac_gen) {
1302 failed_to_evac = rtsTrue;
1303 TICK_GC_FAILED_PROMOTION();
1307 step = bd->step->to;
1310 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1313 /* make sure the info pointer is into text space */
1314 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1315 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1318 if (info->type==RBH) {
1319 info = REVERT_INFOPTR(info);
1321 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1322 q, info_type(q), info, info_type_by_ip(info)));
1326 switch (info -> type) {
1330 nat size = bco_sizeW((StgBCO*)q);
1332 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1333 evacuate_large((P_)q, rtsFalse);
1336 /* just copy the block */
1337 to = copy(q,size,step);
1343 ASSERT(q->header.info != &MUT_CONS_info);
1345 to = copy(q,sizeW_fromITBL(info),step);
1346 recordMutable((StgMutClosure *)to);
1353 return copy(q,sizeofW(StgHeader)+1,step);
1355 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1360 #ifdef NO_PROMOTE_THUNKS
1361 if (bd->gen->no == 0 &&
1362 bd->step->no != 0 &&
1363 bd->step->no == bd->gen->n_steps-1) {
1367 return copy(q,sizeofW(StgHeader)+2,step);
1375 return copy(q,sizeofW(StgHeader)+2,step);
1381 case IND_OLDGEN_PERM:
1387 return copy(q,sizeW_fromITBL(info),step);
1390 case SE_CAF_BLACKHOLE:
1393 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1396 to = copy(q,BLACKHOLE_sizeW(),step);
1397 recordMutable((StgMutClosure *)to);
1400 case THUNK_SELECTOR:
1402 const StgInfoTable* selectee_info;
1403 StgClosure* selectee = ((StgSelector*)q)->selectee;
1406 selectee_info = get_itbl(selectee);
1407 switch (selectee_info->type) {
1416 StgWord32 offset = info->layout.selector_offset;
1418 /* check that the size is in range */
1420 (StgWord32)(selectee_info->layout.payload.ptrs +
1421 selectee_info->layout.payload.nptrs));
1423 /* perform the selection! */
1424 q = selectee->payload[offset];
1426 /* if we're already in to-space, there's no need to continue
1427 * with the evacuation, just update the source address with
1428 * a pointer to the (evacuated) constructor field.
1430 if (HEAP_ALLOCED(q)) {
1431 bdescr *bd = Bdescr((P_)q);
1432 if (bd->evacuated) {
1433 if (bd->gen->no < evac_gen) {
1434 failed_to_evac = rtsTrue;
1435 TICK_GC_FAILED_PROMOTION();
1441 /* otherwise, carry on and evacuate this constructor field,
1442 * (but not the constructor itself)
1451 case IND_OLDGEN_PERM:
1452 selectee = ((StgInd *)selectee)->indirectee;
1456 selectee = ((StgCAF *)selectee)->value;
1460 selectee = ((StgEvacuated *)selectee)->evacuee;
1471 case THUNK_SELECTOR:
1472 /* aargh - do recursively???? */
1475 case SE_CAF_BLACKHOLE:
1479 /* not evaluated yet */
1483 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1484 (int)(selectee_info->type));
1487 return copy(q,THUNK_SELECTOR_sizeW(),step);
1491 /* follow chains of indirections, don't evacuate them */
1492 q = ((StgInd*)q)->indirectee;
1496 if (info->srt_len > 0 && major_gc &&
1497 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1498 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1499 static_objects = (StgClosure *)q;
1504 if (info->srt_len > 0 && major_gc &&
1505 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1506 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1507 static_objects = (StgClosure *)q;
1512 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1513 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1514 static_objects = (StgClosure *)q;
1519 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1520 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1521 static_objects = (StgClosure *)q;
1525 case CONSTR_INTLIKE:
1526 case CONSTR_CHARLIKE:
1527 case CONSTR_NOCAF_STATIC:
1528 /* no need to put these on the static linked list, they don't need
1543 /* shouldn't see these */
1544 barf("evacuate: stack frame at %p\n", q);
1548 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1549 * of stack, tagging and all.
1551 * They can be larger than a block in size. Both are only
1552 * allocated via allocate(), so they should be chained on to the
1553 * large_object list.
1556 nat size = pap_sizeW((StgPAP*)q);
1557 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1558 evacuate_large((P_)q, rtsFalse);
1561 return copy(q,size,step);
1566 /* Already evacuated, just return the forwarding address.
1567 * HOWEVER: if the requested destination generation (evac_gen) is
1568 * older than the actual generation (because the object was
1569 * already evacuated to a younger generation) then we have to
1570 * set the failed_to_evac flag to indicate that we couldn't
1571 * manage to promote the object to the desired generation.
1573 if (evac_gen > 0) { /* optimisation */
1574 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1575 if (Bdescr((P_)p)->gen->no < evac_gen) {
1576 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1577 failed_to_evac = rtsTrue;
1578 TICK_GC_FAILED_PROMOTION();
1581 return ((StgEvacuated*)q)->evacuee;
1585 nat size = arr_words_sizeW((StgArrWords *)q);
1587 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1588 evacuate_large((P_)q, rtsFalse);
1591 /* just copy the block */
1592 return copy(q,size,step);
1597 case MUT_ARR_PTRS_FROZEN:
1599 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1601 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1602 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1605 /* just copy the block */
1606 to = copy(q,size,step);
1607 if (info->type == MUT_ARR_PTRS) {
1608 recordMutable((StgMutClosure *)to);
1616 StgTSO *tso = (StgTSO *)q;
1617 nat size = tso_sizeW(tso);
1620 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1622 if (tso->what_next == ThreadRelocated) {
1623 q = (StgClosure *)tso->link;
1627 /* Large TSOs don't get moved, so no relocation is required.
1629 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1630 evacuate_large((P_)q, rtsTrue);
1633 /* To evacuate a small TSO, we need to relocate the update frame
1637 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1639 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1641 /* relocate the stack pointers... */
1642 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1643 new_tso->sp = (StgPtr)new_tso->sp + diff;
1645 relocate_TSO(tso, new_tso);
1647 recordMutable((StgMutClosure *)new_tso);
1648 return (StgClosure *)new_tso;
1653 case RBH: // cf. BLACKHOLE_BQ
1655 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1656 to = copy(q,BLACKHOLE_sizeW(),step);
1657 //ToDo: derive size etc from reverted IP
1658 //to = copy(q,size,step);
1659 recordMutable((StgMutClosure *)to);
1661 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1662 q, info_type(q), to, info_type(to)));
1667 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1668 to = copy(q,sizeofW(StgBlockedFetch),step);
1670 belch("@@ evacuate: %p (%s) to %p (%s)",
1671 q, info_type(q), to, info_type(to)));
1675 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1676 to = copy(q,sizeofW(StgFetchMe),step);
1678 belch("@@ evacuate: %p (%s) to %p (%s)",
1679 q, info_type(q), to, info_type(to)));
1683 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1684 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1686 belch("@@ evacuate: %p (%s) to %p (%s)",
1687 q, info_type(q), to, info_type(to)));
1692 barf("evacuate: strange closure type %d", (int)(info->type));
1698 /* -----------------------------------------------------------------------------
1699 relocate_TSO is called just after a TSO has been copied from src to
1700 dest. It adjusts the update frame list for the new location.
1701 -------------------------------------------------------------------------- */
1702 //@cindex relocate_TSO
1705 relocate_TSO(StgTSO *src, StgTSO *dest)
1712 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1716 while ((P_)su < dest->stack + dest->stack_size) {
1717 switch (get_itbl(su)->type) {
1719 /* GCC actually manages to common up these three cases! */
1722 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1727 cf = (StgCatchFrame *)su;
1728 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1733 sf = (StgSeqFrame *)su;
1734 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1743 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1751 //@node Scavenging, Reverting CAFs, Evacuation
1752 //@subsection Scavenging
1754 //@cindex scavenge_srt
1757 scavenge_srt(const StgInfoTable *info)
1759 StgClosure **srt, **srt_end;
1761 /* evacuate the SRT. If srt_len is zero, then there isn't an
1762 * srt field in the info table. That's ok, because we'll
1763 * never dereference it.
1765 srt = (StgClosure **)(info->srt);
1766 srt_end = srt + info->srt_len;
1767 for (; srt < srt_end; srt++) {
1768 /* Special-case to handle references to closures hiding out in DLLs, since
1769 double indirections required to get at those. The code generator knows
1770 which is which when generating the SRT, so it stores the (indirect)
1771 reference to the DLL closure in the table by first adding one to it.
1772 We check for this here, and undo the addition before evacuating it.
1774 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1775 closure that's fixed at link-time, and no extra magic is required.
1777 #ifdef ENABLE_WIN32_DLL_SUPPORT
1778 if ( (unsigned long)(*srt) & 0x1 ) {
1779 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1789 /* -----------------------------------------------------------------------------
1791 -------------------------------------------------------------------------- */
1794 scavengeTSO (StgTSO *tso)
1796 /* chase the link field for any TSOs on the same queue */
1797 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1798 if ( tso->why_blocked == BlockedOnMVar
1799 || tso->why_blocked == BlockedOnBlackHole
1800 || tso->why_blocked == BlockedOnException
1802 || tso->why_blocked == BlockedOnGA
1803 || tso->why_blocked == BlockedOnGA_NoSend
1806 tso->block_info.closure = evacuate(tso->block_info.closure);
1808 if ( tso->blocked_exceptions != NULL ) {
1809 tso->blocked_exceptions =
1810 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1812 /* scavenge this thread's stack */
1813 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1816 /* -----------------------------------------------------------------------------
1817 Scavenge a given step until there are no more objects in this step
1820 evac_gen is set by the caller to be either zero (for a step in a
1821 generation < N) or G where G is the generation of the step being
1824 We sometimes temporarily change evac_gen back to zero if we're
1825 scavenging a mutable object where early promotion isn't such a good
1827 -------------------------------------------------------------------------- */
1831 scavenge(step *step)
1834 const StgInfoTable *info;
1836 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1841 failed_to_evac = rtsFalse;
1843 /* scavenge phase - standard breadth-first scavenging of the
1847 while (bd != step->hp_bd || p < step->hp) {
1849 /* If we're at the end of this block, move on to the next block */
1850 if (bd != step->hp_bd && p == bd->free) {
1856 q = p; /* save ptr to object */
1858 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1859 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1861 info = get_itbl((StgClosure *)p);
1863 if (info->type==RBH)
1864 info = REVERT_INFOPTR(info);
1867 switch (info -> type) {
1871 StgBCO* bco = (StgBCO *)p;
1873 for (i = 0; i < bco->n_ptrs; i++) {
1874 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1876 p += bco_sizeW(bco);
1881 /* treat MVars specially, because we don't want to evacuate the
1882 * mut_link field in the middle of the closure.
1885 StgMVar *mvar = ((StgMVar *)p);
1887 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1888 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1889 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1890 p += sizeofW(StgMVar);
1891 evac_gen = saved_evac_gen;
1899 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1900 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1901 p += sizeofW(StgHeader) + 2;
1906 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1907 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1913 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1914 p += sizeofW(StgHeader) + 1;
1919 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1925 p += sizeofW(StgHeader) + 1;
1932 p += sizeofW(StgHeader) + 2;
1939 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1940 p += sizeofW(StgHeader) + 2;
1955 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1956 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1957 (StgClosure *)*p = evacuate((StgClosure *)*p);
1959 p += info->layout.payload.nptrs;
1964 if (step->gen->no != 0) {
1965 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1968 case IND_OLDGEN_PERM:
1969 ((StgIndOldGen *)p)->indirectee =
1970 evacuate(((StgIndOldGen *)p)->indirectee);
1971 if (failed_to_evac) {
1972 failed_to_evac = rtsFalse;
1973 recordOldToNewPtrs((StgMutClosure *)p);
1975 p += sizeofW(StgIndOldGen);
1980 StgCAF *caf = (StgCAF *)p;
1982 caf->body = evacuate(caf->body);
1983 if (failed_to_evac) {
1984 failed_to_evac = rtsFalse;
1985 recordOldToNewPtrs((StgMutClosure *)p);
1987 caf->mut_link = NULL;
1989 p += sizeofW(StgCAF);
1995 StgCAF *caf = (StgCAF *)p;
1997 caf->body = evacuate(caf->body);
1998 caf->value = evacuate(caf->value);
1999 if (failed_to_evac) {
2000 failed_to_evac = rtsFalse;
2001 recordOldToNewPtrs((StgMutClosure *)p);
2003 caf->mut_link = NULL;
2005 p += sizeofW(StgCAF);
2010 /* ignore MUT_CONSs */
2011 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2013 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2014 evac_gen = saved_evac_gen;
2016 p += sizeofW(StgMutVar);
2020 case SE_CAF_BLACKHOLE:
2023 p += BLACKHOLE_sizeW();
2028 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2029 (StgClosure *)bh->blocking_queue =
2030 evacuate((StgClosure *)bh->blocking_queue);
2031 if (failed_to_evac) {
2032 failed_to_evac = rtsFalse;
2033 recordMutable((StgMutClosure *)bh);
2035 p += BLACKHOLE_sizeW();
2039 case THUNK_SELECTOR:
2041 StgSelector *s = (StgSelector *)p;
2042 s->selectee = evacuate(s->selectee);
2043 p += THUNK_SELECTOR_sizeW();
2049 barf("scavenge:IND???\n");
2051 case CONSTR_INTLIKE:
2052 case CONSTR_CHARLIKE:
2054 case CONSTR_NOCAF_STATIC:
2058 /* Shouldn't see a static object here. */
2059 barf("scavenge: STATIC object\n");
2071 /* Shouldn't see stack frames here. */
2072 barf("scavenge: stack frame\n");
2074 case AP_UPD: /* same as PAPs */
2076 /* Treat a PAP just like a section of stack, not forgetting to
2077 * evacuate the function pointer too...
2080 StgPAP* pap = (StgPAP *)p;
2082 pap->fun = evacuate(pap->fun);
2083 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2084 p += pap_sizeW(pap);
2089 /* nothing to follow */
2090 p += arr_words_sizeW((StgArrWords *)p);
2094 /* follow everything */
2098 evac_gen = 0; /* repeatedly mutable */
2099 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2100 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2101 (StgClosure *)*p = evacuate((StgClosure *)*p);
2103 evac_gen = saved_evac_gen;
2107 case MUT_ARR_PTRS_FROZEN:
2108 /* follow everything */
2110 StgPtr start = p, next;
2112 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2113 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2114 (StgClosure *)*p = evacuate((StgClosure *)*p);
2116 if (failed_to_evac) {
2117 /* we can do this easier... */
2118 recordMutable((StgMutClosure *)start);
2119 failed_to_evac = rtsFalse;
2126 StgTSO *tso = (StgTSO *)p;
2129 evac_gen = saved_evac_gen;
2130 p += tso_sizeW(tso);
2135 case RBH: // cf. BLACKHOLE_BQ
2137 // nat size, ptrs, nonptrs, vhs;
2139 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2140 StgRBH *rbh = (StgRBH *)p;
2141 (StgClosure *)rbh->blocking_queue =
2142 evacuate((StgClosure *)rbh->blocking_queue);
2143 if (failed_to_evac) {
2144 failed_to_evac = rtsFalse;
2145 recordMutable((StgMutClosure *)rbh);
2148 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2149 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2150 // ToDo: use size of reverted closure here!
2151 p += BLACKHOLE_sizeW();
2157 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2158 /* follow the pointer to the node which is being demanded */
2159 (StgClosure *)bf->node =
2160 evacuate((StgClosure *)bf->node);
2161 /* follow the link to the rest of the blocking queue */
2162 (StgClosure *)bf->link =
2163 evacuate((StgClosure *)bf->link);
2164 if (failed_to_evac) {
2165 failed_to_evac = rtsFalse;
2166 recordMutable((StgMutClosure *)bf);
2169 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2170 bf, info_type((StgClosure *)bf),
2171 bf->node, info_type(bf->node)));
2172 p += sizeofW(StgBlockedFetch);
2178 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2179 p, info_type((StgClosure *)p)));
2180 p += sizeofW(StgFetchMe);
2181 break; // nothing to do in this case
2183 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2185 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2186 (StgClosure *)fmbq->blocking_queue =
2187 evacuate((StgClosure *)fmbq->blocking_queue);
2188 if (failed_to_evac) {
2189 failed_to_evac = rtsFalse;
2190 recordMutable((StgMutClosure *)fmbq);
2193 belch("@@ scavenge: %p (%s) exciting, isn't it",
2194 p, info_type((StgClosure *)p)));
2195 p += sizeofW(StgFetchMeBlockingQueue);
2201 barf("scavenge: unimplemented/strange closure type %d @ %p",
2205 barf("scavenge: unimplemented/strange closure type %d @ %p",
2209 /* If we didn't manage to promote all the objects pointed to by
2210 * the current object, then we have to designate this object as
2211 * mutable (because it contains old-to-new generation pointers).
2213 if (failed_to_evac) {
2214 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2215 failed_to_evac = rtsFalse;
2223 /* -----------------------------------------------------------------------------
2224 Scavenge one object.
2226 This is used for objects that are temporarily marked as mutable
2227 because they contain old-to-new generation pointers. Only certain
2228 objects can have this property.
2229 -------------------------------------------------------------------------- */
2230 //@cindex scavenge_one
2233 scavenge_one(StgClosure *p)
2235 const StgInfoTable *info;
2238 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2239 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2244 if (info->type==RBH)
2245 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2248 switch (info -> type) {
2251 case FUN_1_0: /* hardly worth specialising these guys */
2271 case IND_OLDGEN_PERM:
2276 end = (P_)p->payload + info->layout.payload.ptrs;
2277 for (q = (P_)p->payload; q < end; q++) {
2278 (StgClosure *)*q = evacuate((StgClosure *)*q);
2284 case SE_CAF_BLACKHOLE:
2289 case THUNK_SELECTOR:
2291 StgSelector *s = (StgSelector *)p;
2292 s->selectee = evacuate(s->selectee);
2296 case AP_UPD: /* same as PAPs */
2298 /* Treat a PAP just like a section of stack, not forgetting to
2299 * evacuate the function pointer too...
2302 StgPAP* pap = (StgPAP *)p;
2304 pap->fun = evacuate(pap->fun);
2305 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2310 /* This might happen if for instance a MUT_CONS was pointing to a
2311 * THUNK which has since been updated. The IND_OLDGEN will
2312 * be on the mutable list anyway, so we don't need to do anything
2318 barf("scavenge_one: strange object %d", (int)(info->type));
2321 no_luck = failed_to_evac;
2322 failed_to_evac = rtsFalse;
2327 /* -----------------------------------------------------------------------------
2328 Scavenging mutable lists.
2330 We treat the mutable list of each generation > N (i.e. all the
2331 generations older than the one being collected) as roots. We also
2332 remove non-mutable objects from the mutable list at this point.
2333 -------------------------------------------------------------------------- */
2334 //@cindex scavenge_mut_once_list
2337 scavenge_mut_once_list(generation *gen)
2339 const StgInfoTable *info;
2340 StgMutClosure *p, *next, *new_list;
2342 p = gen->mut_once_list;
2343 new_list = END_MUT_LIST;
2347 failed_to_evac = rtsFalse;
2349 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2351 /* make sure the info pointer is into text space */
2352 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2353 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2357 if (info->type==RBH)
2358 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2360 switch(info->type) {
2363 case IND_OLDGEN_PERM:
2365 /* Try to pull the indirectee into this generation, so we can
2366 * remove the indirection from the mutable list.
2368 ((StgIndOldGen *)p)->indirectee =
2369 evacuate(((StgIndOldGen *)p)->indirectee);
2372 if (RtsFlags.DebugFlags.gc)
2373 /* Debugging code to print out the size of the thing we just
2377 StgPtr start = gen->steps[0].scan;
2378 bdescr *start_bd = gen->steps[0].scan_bd;
2380 scavenge(&gen->steps[0]);
2381 if (start_bd != gen->steps[0].scan_bd) {
2382 size += (P_)BLOCK_ROUND_UP(start) - start;
2383 start_bd = start_bd->link;
2384 while (start_bd != gen->steps[0].scan_bd) {
2385 size += BLOCK_SIZE_W;
2386 start_bd = start_bd->link;
2388 size += gen->steps[0].scan -
2389 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2391 size = gen->steps[0].scan - start;
2393 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2397 /* failed_to_evac might happen if we've got more than two
2398 * generations, we're collecting only generation 0, the
2399 * indirection resides in generation 2 and the indirectee is
2402 if (failed_to_evac) {
2403 failed_to_evac = rtsFalse;
2404 p->mut_link = new_list;
2407 /* the mut_link field of an IND_STATIC is overloaded as the
2408 * static link field too (it just so happens that we don't need
2409 * both at the same time), so we need to NULL it out when
2410 * removing this object from the mutable list because the static
2411 * link fields are all assumed to be NULL before doing a major
2419 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2420 * it from the mutable list if possible by promoting whatever it
2423 ASSERT(p->header.info == &MUT_CONS_info);
2424 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2425 /* didn't manage to promote everything, so put the
2426 * MUT_CONS back on the list.
2428 p->mut_link = new_list;
2435 StgCAF *caf = (StgCAF *)p;
2436 caf->body = evacuate(caf->body);
2437 caf->value = evacuate(caf->value);
2438 if (failed_to_evac) {
2439 failed_to_evac = rtsFalse;
2440 p->mut_link = new_list;
2450 StgCAF *caf = (StgCAF *)p;
2451 caf->body = evacuate(caf->body);
2452 if (failed_to_evac) {
2453 failed_to_evac = rtsFalse;
2454 p->mut_link = new_list;
2463 /* shouldn't have anything else on the mutables list */
2464 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2468 gen->mut_once_list = new_list;
2471 //@cindex scavenge_mutable_list
2474 scavenge_mutable_list(generation *gen)
2476 const StgInfoTable *info;
2477 StgMutClosure *p, *next;
2479 p = gen->saved_mut_list;
2483 failed_to_evac = rtsFalse;
2485 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2487 /* make sure the info pointer is into text space */
2488 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2489 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2493 if (info->type==RBH)
2494 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2496 switch(info->type) {
2498 case MUT_ARR_PTRS_FROZEN:
2499 /* remove this guy from the mutable list, but follow the ptrs
2500 * anyway (and make sure they get promoted to this gen).
2505 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2507 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2508 (StgClosure *)*q = evacuate((StgClosure *)*q);
2512 if (failed_to_evac) {
2513 failed_to_evac = rtsFalse;
2514 p->mut_link = gen->mut_list;
2521 /* follow everything */
2522 p->mut_link = gen->mut_list;
2527 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2528 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2529 (StgClosure *)*q = evacuate((StgClosure *)*q);
2535 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2536 * it from the mutable list if possible by promoting whatever it
2539 ASSERT(p->header.info != &MUT_CONS_info);
2540 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2541 p->mut_link = gen->mut_list;
2547 StgMVar *mvar = (StgMVar *)p;
2548 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2549 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2550 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2551 p->mut_link = gen->mut_list;
2558 StgTSO *tso = (StgTSO *)p;
2562 /* Don't take this TSO off the mutable list - it might still
2563 * point to some younger objects (because we set evac_gen to 0
2566 tso->mut_link = gen->mut_list;
2567 gen->mut_list = (StgMutClosure *)tso;
2573 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2574 (StgClosure *)bh->blocking_queue =
2575 evacuate((StgClosure *)bh->blocking_queue);
2576 p->mut_link = gen->mut_list;
2581 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2584 case IND_OLDGEN_PERM:
2585 /* Try to pull the indirectee into this generation, so we can
2586 * remove the indirection from the mutable list.
2589 ((StgIndOldGen *)p)->indirectee =
2590 evacuate(((StgIndOldGen *)p)->indirectee);
2593 if (failed_to_evac) {
2594 failed_to_evac = rtsFalse;
2595 p->mut_link = gen->mut_once_list;
2596 gen->mut_once_list = p;
2603 // HWL: check whether all of these are necessary
2605 case RBH: // cf. BLACKHOLE_BQ
2607 // nat size, ptrs, nonptrs, vhs;
2609 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2610 StgRBH *rbh = (StgRBH *)p;
2611 (StgClosure *)rbh->blocking_queue =
2612 evacuate((StgClosure *)rbh->blocking_queue);
2613 if (failed_to_evac) {
2614 failed_to_evac = rtsFalse;
2615 recordMutable((StgMutClosure *)rbh);
2617 // ToDo: use size of reverted closure here!
2618 p += BLACKHOLE_sizeW();
2624 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2625 /* follow the pointer to the node which is being demanded */
2626 (StgClosure *)bf->node =
2627 evacuate((StgClosure *)bf->node);
2628 /* follow the link to the rest of the blocking queue */
2629 (StgClosure *)bf->link =
2630 evacuate((StgClosure *)bf->link);
2631 if (failed_to_evac) {
2632 failed_to_evac = rtsFalse;
2633 recordMutable((StgMutClosure *)bf);
2635 p += sizeofW(StgBlockedFetch);
2640 p += sizeofW(StgFetchMe);
2641 break; // nothing to do in this case
2643 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2645 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2646 (StgClosure *)fmbq->blocking_queue =
2647 evacuate((StgClosure *)fmbq->blocking_queue);
2648 if (failed_to_evac) {
2649 failed_to_evac = rtsFalse;
2650 recordMutable((StgMutClosure *)fmbq);
2652 p += sizeofW(StgFetchMeBlockingQueue);
2658 /* shouldn't have anything else on the mutables list */
2659 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2664 //@cindex scavenge_static
2667 scavenge_static(void)
2669 StgClosure* p = static_objects;
2670 const StgInfoTable *info;
2672 /* Always evacuate straight to the oldest generation for static
2674 evac_gen = oldest_gen->no;
2676 /* keep going until we've scavenged all the objects on the linked
2678 while (p != END_OF_STATIC_LIST) {
2682 if (info->type==RBH)
2683 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2685 /* make sure the info pointer is into text space */
2686 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2687 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2689 /* Take this object *off* the static_objects list,
2690 * and put it on the scavenged_static_objects list.
2692 static_objects = STATIC_LINK(info,p);
2693 STATIC_LINK(info,p) = scavenged_static_objects;
2694 scavenged_static_objects = p;
2696 switch (info -> type) {
2700 StgInd *ind = (StgInd *)p;
2701 ind->indirectee = evacuate(ind->indirectee);
2703 /* might fail to evacuate it, in which case we have to pop it
2704 * back on the mutable list (and take it off the
2705 * scavenged_static list because the static link and mut link
2706 * pointers are one and the same).
2708 if (failed_to_evac) {
2709 failed_to_evac = rtsFalse;
2710 scavenged_static_objects = STATIC_LINK(info,p);
2711 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2712 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2726 next = (P_)p->payload + info->layout.payload.ptrs;
2727 /* evacuate the pointers */
2728 for (q = (P_)p->payload; q < next; q++) {
2729 (StgClosure *)*q = evacuate((StgClosure *)*q);
2735 barf("scavenge_static: strange closure %d", (int)(info->type));
2738 ASSERT(failed_to_evac == rtsFalse);
2740 /* get the next static object from the list. Remember, there might
2741 * be more stuff on this list now that we've done some evacuating!
2742 * (static_objects is a global)
2748 /* -----------------------------------------------------------------------------
2749 scavenge_stack walks over a section of stack and evacuates all the
2750 objects pointed to by it. We can use the same code for walking
2751 PAPs, since these are just sections of copied stack.
2752 -------------------------------------------------------------------------- */
2753 //@cindex scavenge_stack
2756 scavenge_stack(StgPtr p, StgPtr stack_end)
2759 const StgInfoTable* info;
2762 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2765 * Each time around this loop, we are looking at a chunk of stack
2766 * that starts with either a pending argument section or an
2767 * activation record.
2770 while (p < stack_end) {
2773 /* If we've got a tag, skip over that many words on the stack */
2774 if (IS_ARG_TAG((W_)q)) {
2779 /* Is q a pointer to a closure?
2781 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2783 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2784 ASSERT(closure_STATIC((StgClosure *)q));
2786 /* otherwise, must be a pointer into the allocation space. */
2789 (StgClosure *)*p = evacuate((StgClosure *)q);
2795 * Otherwise, q must be the info pointer of an activation
2796 * record. All activation records have 'bitmap' style layout
2799 info = get_itbl((StgClosure *)p);
2801 switch (info->type) {
2803 /* Dynamic bitmap: the mask is stored on the stack */
2805 bitmap = ((StgRetDyn *)p)->liveness;
2806 p = (P_)&((StgRetDyn *)p)->payload[0];
2809 /* probably a slow-entry point return address: */
2817 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2818 old_p, p, old_p+1));
2820 p++; /* what if FHS!=1 !? -- HWL */
2825 /* Specialised code for update frames, since they're so common.
2826 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2827 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2831 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2833 nat type = get_itbl(frame->updatee)->type;
2835 p += sizeofW(StgUpdateFrame);
2836 if (type == EVACUATED) {
2837 frame->updatee = evacuate(frame->updatee);
2840 bdescr *bd = Bdescr((P_)frame->updatee);
2842 if (bd->gen->no > N) {
2843 if (bd->gen->no < evac_gen) {
2844 failed_to_evac = rtsTrue;
2849 /* Don't promote blackholes */
2851 if (!(step->gen->no == 0 &&
2853 step->no == step->gen->n_steps-1)) {
2860 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2861 sizeofW(StgHeader), step);
2862 frame->updatee = to;
2865 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2866 frame->updatee = to;
2867 recordMutable((StgMutClosure *)to);
2870 /* will never be SE_{,CAF_}BLACKHOLE, since we
2871 don't push an update frame for single-entry thunks. KSW 1999-01. */
2872 barf("scavenge_stack: UPDATE_FRAME updatee");
2877 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2884 bitmap = info->layout.bitmap;
2886 /* this assumes that the payload starts immediately after the info-ptr */
2888 while (bitmap != 0) {
2889 if ((bitmap & 1) == 0) {
2890 (StgClosure *)*p = evacuate((StgClosure *)*p);
2893 bitmap = bitmap >> 1;
2900 /* large bitmap (> 32 entries) */
2905 StgLargeBitmap *large_bitmap;
2908 large_bitmap = info->layout.large_bitmap;
2911 for (i=0; i<large_bitmap->size; i++) {
2912 bitmap = large_bitmap->bitmap[i];
2913 q = p + sizeof(W_) * 8;
2914 while (bitmap != 0) {
2915 if ((bitmap & 1) == 0) {
2916 (StgClosure *)*p = evacuate((StgClosure *)*p);
2919 bitmap = bitmap >> 1;
2921 if (i+1 < large_bitmap->size) {
2923 (StgClosure *)*p = evacuate((StgClosure *)*p);
2929 /* and don't forget to follow the SRT */
2934 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2939 /*-----------------------------------------------------------------------------
2940 scavenge the large object list.
2942 evac_gen set by caller; similar games played with evac_gen as with
2943 scavenge() - see comment at the top of scavenge(). Most large
2944 objects are (repeatedly) mutable, so most of the time evac_gen will
2946 --------------------------------------------------------------------------- */
2947 //@cindex scavenge_large
2950 scavenge_large(step *step)
2954 const StgInfoTable* info;
2955 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2957 evac_gen = 0; /* most objects are mutable */
2958 bd = step->new_large_objects;
2960 for (; bd != NULL; bd = step->new_large_objects) {
2962 /* take this object *off* the large objects list and put it on
2963 * the scavenged large objects list. This is so that we can
2964 * treat new_large_objects as a stack and push new objects on
2965 * the front when evacuating.
2967 step->new_large_objects = bd->link;
2968 dbl_link_onto(bd, &step->scavenged_large_objects);
2971 info = get_itbl((StgClosure *)p);
2973 switch (info->type) {
2975 /* only certain objects can be "large"... */
2978 /* nothing to follow */
2982 /* follow everything */
2986 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2987 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2988 (StgClosure *)*p = evacuate((StgClosure *)*p);
2993 case MUT_ARR_PTRS_FROZEN:
2994 /* follow everything */
2996 StgPtr start = p, next;
2998 evac_gen = saved_evac_gen; /* not really mutable */
2999 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3000 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3001 (StgClosure *)*p = evacuate((StgClosure *)*p);
3004 if (failed_to_evac) {
3005 recordMutable((StgMutClosure *)start);
3012 StgBCO* bco = (StgBCO *)p;
3014 evac_gen = saved_evac_gen;
3015 for (i = 0; i < bco->n_ptrs; i++) {
3016 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3023 scavengeTSO((StgTSO *)p);
3029 StgPAP* pap = (StgPAP *)p;
3031 evac_gen = saved_evac_gen; /* not really mutable */
3032 pap->fun = evacuate(pap->fun);
3033 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3039 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3044 //@cindex zero_static_object_list
3047 zero_static_object_list(StgClosure* first_static)
3051 const StgInfoTable *info;
3053 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3055 link = STATIC_LINK(info, p);
3056 STATIC_LINK(info,p) = NULL;
3060 /* This function is only needed because we share the mutable link
3061 * field with the static link field in an IND_STATIC, so we have to
3062 * zero the mut_link field before doing a major GC, which needs the
3063 * static link field.
3065 * It doesn't do any harm to zero all the mutable link fields on the
3068 //@cindex zero_mutable_list
3071 zero_mutable_list( StgMutClosure *first )
3073 StgMutClosure *next, *c;
3075 for (c = first; c != END_MUT_LIST; c = next) {
3081 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3082 //@subsection Reverting CAFs
3084 /* -----------------------------------------------------------------------------
3086 -------------------------------------------------------------------------- */
3087 //@cindex RevertCAFs
3089 void RevertCAFs(void)
3094 /* Deal with CAFs created by compiled code. */
3095 for (i = 0; i < usedECafTable; i++) {
3096 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3097 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3100 /* Deal with CAFs created by the interpreter. */
3101 while (ecafList != END_ECAF_LIST) {
3102 StgCAF* caf = ecafList;
3103 ecafList = caf->link;
3104 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3105 SET_INFO(caf,&CAF_UNENTERED_info);
3106 caf->value = (StgClosure *)0xdeadbeef;
3107 caf->link = (StgCAF *)0xdeadbeef;
3110 /* Empty out both the table and the list. */
3112 ecafList = END_ECAF_LIST;
3116 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3117 //@subsection Sanity code for CAF garbage collection
3119 /* -----------------------------------------------------------------------------
3120 Sanity code for CAF garbage collection.
3122 With DEBUG turned on, we manage a CAF list in addition to the SRT
3123 mechanism. After GC, we run down the CAF list and blackhole any
3124 CAFs which have been garbage collected. This means we get an error
3125 whenever the program tries to enter a garbage collected CAF.
3127 Any garbage collected CAFs are taken off the CAF list at the same
3129 -------------------------------------------------------------------------- */
3139 const StgInfoTable *info;
3150 ASSERT(info->type == IND_STATIC);
3152 if (STATIC_LINK(info,p) == NULL) {
3153 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3155 SET_INFO(p,&BLACKHOLE_info);
3156 p = STATIC_LINK2(info,p);
3160 pp = &STATIC_LINK2(info,p);
3167 /* fprintf(stderr, "%d CAFs live\n", i); */
3171 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3172 //@subsection Lazy black holing
3174 /* -----------------------------------------------------------------------------
3177 Whenever a thread returns to the scheduler after possibly doing
3178 some work, we have to run down the stack and black-hole all the
3179 closures referred to by update frames.
3180 -------------------------------------------------------------------------- */
3181 //@cindex threadLazyBlackHole
3184 threadLazyBlackHole(StgTSO *tso)
3186 StgUpdateFrame *update_frame;
3187 StgBlockingQueue *bh;
3190 stack_end = &tso->stack[tso->stack_size];
3191 update_frame = tso->su;
3194 switch (get_itbl(update_frame)->type) {
3197 update_frame = ((StgCatchFrame *)update_frame)->link;
3201 bh = (StgBlockingQueue *)update_frame->updatee;
3203 /* if the thunk is already blackholed, it means we've also
3204 * already blackholed the rest of the thunks on this stack,
3205 * so we can stop early.
3207 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3208 * don't interfere with this optimisation.
3210 if (bh->header.info == &BLACKHOLE_info) {
3214 if (bh->header.info != &BLACKHOLE_BQ_info &&
3215 bh->header.info != &CAF_BLACKHOLE_info) {
3216 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3217 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3219 SET_INFO(bh,&BLACKHOLE_info);
3222 update_frame = update_frame->link;
3226 update_frame = ((StgSeqFrame *)update_frame)->link;
3232 barf("threadPaused");
3237 //@node Stack squeezing, Pausing a thread, Lazy black holing
3238 //@subsection Stack squeezing
3240 /* -----------------------------------------------------------------------------
3243 * Code largely pinched from old RTS, then hacked to bits. We also do
3244 * lazy black holing here.
3246 * -------------------------------------------------------------------------- */
3247 //@cindex threadSqueezeStack
3250 threadSqueezeStack(StgTSO *tso)
3252 lnat displacement = 0;
3253 StgUpdateFrame *frame;
3254 StgUpdateFrame *next_frame; /* Temporally next */
3255 StgUpdateFrame *prev_frame; /* Temporally previous */
3257 rtsBool prev_was_update_frame;
3259 StgUpdateFrame *top_frame;
3260 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3262 void printObj( StgClosure *obj ); // from Printer.c
3264 top_frame = tso->su;
3267 bottom = &(tso->stack[tso->stack_size]);
3270 /* There must be at least one frame, namely the STOP_FRAME.
3272 ASSERT((P_)frame < bottom);
3274 /* Walk down the stack, reversing the links between frames so that
3275 * we can walk back up as we squeeze from the bottom. Note that
3276 * next_frame and prev_frame refer to next and previous as they were
3277 * added to the stack, rather than the way we see them in this
3278 * walk. (It makes the next loop less confusing.)
3280 * Stop if we find an update frame pointing to a black hole
3281 * (see comment in threadLazyBlackHole()).
3285 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3286 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3287 prev_frame = frame->link;
3288 frame->link = next_frame;
3293 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3294 printObj((StgClosure *)prev_frame);
3295 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3298 switch (get_itbl(frame)->type) {
3299 case UPDATE_FRAME: upd_frames++;
3300 if (frame->updatee->header.info == &BLACKHOLE_info)
3303 case STOP_FRAME: stop_frames++;
3305 case CATCH_FRAME: catch_frames++;
3307 case SEQ_FRAME: seq_frames++;
3310 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3312 printObj((StgClosure *)prev_frame);
3315 if (get_itbl(frame)->type == UPDATE_FRAME
3316 && frame->updatee->header.info == &BLACKHOLE_info) {
3321 /* Now, we're at the bottom. Frame points to the lowest update
3322 * frame on the stack, and its link actually points to the frame
3323 * above. We have to walk back up the stack, squeezing out empty
3324 * update frames and turning the pointers back around on the way
3327 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3328 * we never want to eliminate it anyway. Just walk one step up
3329 * before starting to squeeze. When you get to the topmost frame,
3330 * remember that there are still some words above it that might have
3337 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3340 * Loop through all of the frames (everything except the very
3341 * bottom). Things are complicated by the fact that we have
3342 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3343 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3345 while (frame != NULL) {
3347 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3348 rtsBool is_update_frame;
3350 next_frame = frame->link;
3351 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3354 * 1. both the previous and current frame are update frames
3355 * 2. the current frame is empty
3357 if (prev_was_update_frame && is_update_frame &&
3358 (P_)prev_frame == frame_bottom + displacement) {
3360 /* Now squeeze out the current frame */
3361 StgClosure *updatee_keep = prev_frame->updatee;
3362 StgClosure *updatee_bypass = frame->updatee;
3365 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3369 /* Deal with blocking queues. If both updatees have blocked
3370 * threads, then we should merge the queues into the update
3371 * frame that we're keeping.
3373 * Alternatively, we could just wake them up: they'll just go
3374 * straight to sleep on the proper blackhole! This is less code
3375 * and probably less bug prone, although it's probably much
3378 #if 0 /* do it properly... */
3379 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3380 # error Unimplemented lazy BH warning. (KSW 1999-01)
3382 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3383 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3385 /* Sigh. It has one. Don't lose those threads! */
3386 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3387 /* Urgh. Two queues. Merge them. */
3388 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3390 while (keep_tso->link != END_TSO_QUEUE) {
3391 keep_tso = keep_tso->link;
3393 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3396 /* For simplicity, just swap the BQ for the BH */
3397 P_ temp = updatee_keep;
3399 updatee_keep = updatee_bypass;
3400 updatee_bypass = temp;
3402 /* Record the swap in the kept frame (below) */
3403 prev_frame->updatee = updatee_keep;
3408 TICK_UPD_SQUEEZED();
3409 /* wasn't there something about update squeezing and ticky to be
3410 * sorted out? oh yes: we aren't counting each enter properly
3411 * in this case. See the log somewhere. KSW 1999-04-21
3413 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3415 sp = (P_)frame - 1; /* sp = stuff to slide */
3416 displacement += sizeofW(StgUpdateFrame);
3419 /* No squeeze for this frame */
3420 sp = frame_bottom - 1; /* Keep the current frame */
3422 /* Do lazy black-holing.
3424 if (is_update_frame) {
3425 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3426 if (bh->header.info != &BLACKHOLE_info &&
3427 bh->header.info != &BLACKHOLE_BQ_info &&
3428 bh->header.info != &CAF_BLACKHOLE_info) {
3429 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3430 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3432 SET_INFO(bh,&BLACKHOLE_info);
3436 /* Fix the link in the current frame (should point to the frame below) */
3437 frame->link = prev_frame;
3438 prev_was_update_frame = is_update_frame;
3441 /* Now slide all words from sp up to the next frame */
3443 if (displacement > 0) {
3444 P_ next_frame_bottom;
3446 if (next_frame != NULL)
3447 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3449 next_frame_bottom = tso->sp - 1;
3453 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3457 while (sp >= next_frame_bottom) {
3458 sp[displacement] = *sp;
3462 (P_)prev_frame = (P_)frame + displacement;
3466 tso->sp += displacement;
3467 tso->su = prev_frame;
3470 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3471 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3475 //@node Pausing a thread, Index, Stack squeezing
3476 //@subsection Pausing a thread
3478 /* -----------------------------------------------------------------------------
3481 * We have to prepare for GC - this means doing lazy black holing
3482 * here. We also take the opportunity to do stack squeezing if it's
3484 * -------------------------------------------------------------------------- */
3485 //@cindex threadPaused
3487 threadPaused(StgTSO *tso)
3489 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3490 threadSqueezeStack(tso); /* does black holing too */
3492 threadLazyBlackHole(tso);
3495 /* -----------------------------------------------------------------------------
3497 * -------------------------------------------------------------------------- */
3500 //@cindex printMutOnceList
3502 printMutOnceList(generation *gen)
3504 StgMutClosure *p, *next;
3506 p = gen->mut_once_list;
3509 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3510 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3511 fprintf(stderr, "%p (%s), ",
3512 p, info_type((StgClosure *)p));
3514 fputc('\n', stderr);
3517 //@cindex printMutableList
3519 printMutableList(generation *gen)
3521 StgMutClosure *p, *next;
3526 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3527 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3528 fprintf(stderr, "%p (%s), ",
3529 p, info_type((StgClosure *)p));
3531 fputc('\n', stderr);
3534 //@cindex maybeLarge
3535 static inline rtsBool
3536 maybeLarge(StgClosure *closure)
3538 StgInfoTable *info = get_itbl(closure);
3540 /* closure types that may be found on the new_large_objects list;
3541 see scavenge_large */
3542 return (info->type == MUT_ARR_PTRS ||
3543 info->type == MUT_ARR_PTRS_FROZEN ||
3544 info->type == TSO ||
3545 info->type == ARR_WORDS ||
3552 //@node Index, , Pausing a thread
3556 //* GarbageCollect:: @cindex\s-+GarbageCollect
3557 //* MarkRoot:: @cindex\s-+MarkRoot
3558 //* RevertCAFs:: @cindex\s-+RevertCAFs
3559 //* addBlock:: @cindex\s-+addBlock
3560 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3561 //* copy:: @cindex\s-+copy
3562 //* copyPart:: @cindex\s-+copyPart
3563 //* evacuate:: @cindex\s-+evacuate
3564 //* evacuate_large:: @cindex\s-+evacuate_large
3565 //* gcCAFs:: @cindex\s-+gcCAFs
3566 //* isAlive:: @cindex\s-+isAlive
3567 //* maybeLarge:: @cindex\s-+maybeLarge
3568 //* mkMutCons:: @cindex\s-+mkMutCons
3569 //* printMutOnceList:: @cindex\s-+printMutOnceList
3570 //* printMutableList:: @cindex\s-+printMutableList
3571 //* relocate_TSO:: @cindex\s-+relocate_TSO
3572 //* scavenge:: @cindex\s-+scavenge
3573 //* scavenge_large:: @cindex\s-+scavenge_large
3574 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3575 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3576 //* scavenge_one:: @cindex\s-+scavenge_one
3577 //* scavenge_srt:: @cindex\s-+scavenge_srt
3578 //* scavenge_stack:: @cindex\s-+scavenge_stack
3579 //* scavenge_static:: @cindex\s-+scavenge_static
3580 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3581 //* threadPaused:: @cindex\s-+threadPaused
3582 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3583 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3584 //* upd_evacuee:: @cindex\s-+upd_evacuee
3585 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3586 //* zero_static_object_list:: @cindex\s-+zero_static_object_list