1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.82 2000/05/23 13:57:53 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
56 //@node STATIC OBJECT LIST, Static function declarations, Includes
57 //@subsection STATIC OBJECT LIST
59 /* STATIC OBJECT LIST.
62 * We maintain a linked list of static objects that are still live.
63 * The requirements for this list are:
65 * - we need to scan the list while adding to it, in order to
66 * scavenge all the static objects (in the same way that
67 * breadth-first scavenging works for dynamic objects).
69 * - we need to be able to tell whether an object is already on
70 * the list, to break loops.
72 * Each static object has a "static link field", which we use for
73 * linking objects on to the list. We use a stack-type list, consing
74 * objects on the front as they are added (this means that the
75 * scavenge phase is depth-first, not breadth-first, but that
78 * A separate list is kept for objects that have been scavenged
79 * already - this is so that we can zero all the marks afterwards.
81 * An object is on the list if its static link field is non-zero; this
82 * means that we have to mark the end of the list with '1', not NULL.
84 * Extra notes for generational GC:
86 * Each generation has a static object list associated with it. When
87 * collecting generations up to N, we treat the static object lists
88 * from generations > N as roots.
90 * We build up a static object list while collecting generations 0..N,
91 * which is then appended to the static object list of generation N+1.
93 StgClosure* static_objects; /* live static objects */
94 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
96 /* N is the oldest generation being collected, where the generations
97 * are numbered starting at 0. A major GC (indicated by the major_gc
98 * flag) is when we're collecting all generations. We only attempt to
99 * deal with static objects and GC CAFs when doing a major GC.
102 static rtsBool major_gc;
104 /* Youngest generation that objects should be evacuated to in
105 * evacuate(). (Logically an argument to evacuate, but it's static
106 * a lot of the time so we optimise it into a global variable).
112 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
113 static rtsBool weak_done; /* all done for this pass */
115 /* List of all threads during GC
117 static StgTSO *old_all_threads;
118 static StgTSO *resurrected_threads;
120 /* Flag indicating failure to evacuate an object to the desired
123 static rtsBool failed_to_evac;
125 /* Old to-space (used for two-space collector only)
127 bdescr *old_to_space;
130 /* Data used for allocation area sizing.
132 lnat new_blocks; /* blocks allocated during this GC */
133 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
135 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
136 //@subsection Static function declarations
138 /* -----------------------------------------------------------------------------
139 Static function declarations
140 -------------------------------------------------------------------------- */
142 static StgClosure * evacuate ( StgClosure *q );
143 static void zero_static_object_list ( StgClosure* first_static );
144 static void zero_mutable_list ( StgMutClosure *first );
146 static rtsBool traverse_weak_ptr_list ( void );
147 static void cleanup_weak_ptr_list ( StgWeak **list );
149 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
150 static void scavenge_large ( step *step );
151 static void scavenge ( step *step );
152 static void scavenge_static ( void );
153 static void scavenge_mutable_list ( generation *g );
154 static void scavenge_mut_once_list ( generation *g );
157 static void gcCAFs ( void );
160 //@node Garbage Collect, Weak Pointers, Static function declarations
161 //@subsection Garbage Collect
163 /* -----------------------------------------------------------------------------
166 For garbage collecting generation N (and all younger generations):
168 - follow all pointers in the root set. the root set includes all
169 mutable objects in all steps in all generations.
171 - for each pointer, evacuate the object it points to into either
172 + to-space in the next higher step in that generation, if one exists,
173 + if the object's generation == N, then evacuate it to the next
174 generation if one exists, or else to-space in the current
176 + if the object's generation < N, then evacuate it to to-space
177 in the next generation.
179 - repeatedly scavenge to-space from each step in each generation
180 being collected until no more objects can be evacuated.
182 - free from-space in each step, and set from-space = to-space.
184 -------------------------------------------------------------------------- */
185 //@cindex GarbageCollect
187 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
191 lnat live, allocated, collected = 0, copied = 0;
195 CostCentreStack *prev_CCS;
198 #if defined(DEBUG) && defined(GRAN)
199 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
203 /* tell the stats department that we've started a GC */
206 /* attribute any costs to CCS_GC */
212 /* Approximate how much we allocated */
213 allocated = calcAllocated();
215 /* Figure out which generation to collect
217 if (force_major_gc) {
218 N = RtsFlags.GcFlags.generations - 1;
222 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
223 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
227 major_gc = (N == RtsFlags.GcFlags.generations-1);
230 /* check stack sanity *before* GC (ToDo: check all threads) */
232 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
234 IF_DEBUG(sanity, checkFreeListSanity());
236 /* Initialise the static object lists
238 static_objects = END_OF_STATIC_LIST;
239 scavenged_static_objects = END_OF_STATIC_LIST;
241 /* zero the mutable list for the oldest generation (see comment by
242 * zero_mutable_list below).
245 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
248 /* Save the old to-space if we're doing a two-space collection
250 if (RtsFlags.GcFlags.generations == 1) {
251 old_to_space = g0s0->to_space;
252 g0s0->to_space = NULL;
255 /* Keep a count of how many new blocks we allocated during this GC
256 * (used for resizing the allocation area, later).
260 /* Initialise to-space in all the generations/steps that we're
263 for (g = 0; g <= N; g++) {
264 generations[g].mut_once_list = END_MUT_LIST;
265 generations[g].mut_list = END_MUT_LIST;
267 for (s = 0; s < generations[g].n_steps; s++) {
269 /* generation 0, step 0 doesn't need to-space */
270 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
274 /* Get a free block for to-space. Extra blocks will be chained on
278 step = &generations[g].steps[s];
279 ASSERT(step->gen->no == g);
280 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
281 bd->gen = &generations[g];
284 bd->evacuated = 1; /* it's a to-space block */
285 step->hp = bd->start;
286 step->hpLim = step->hp + BLOCK_SIZE_W;
290 step->scan = bd->start;
292 step->new_large_objects = NULL;
293 step->scavenged_large_objects = NULL;
295 /* mark the large objects as not evacuated yet */
296 for (bd = step->large_objects; bd; bd = bd->link) {
302 /* make sure the older generations have at least one block to
303 * allocate into (this makes things easier for copy(), see below.
305 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
306 for (s = 0; s < generations[g].n_steps; s++) {
307 step = &generations[g].steps[s];
308 if (step->hp_bd == NULL) {
310 bd->gen = &generations[g];
313 bd->evacuated = 0; /* *not* a to-space block */
314 step->hp = bd->start;
315 step->hpLim = step->hp + BLOCK_SIZE_W;
321 /* Set the scan pointer for older generations: remember we
322 * still have to scavenge objects that have been promoted. */
323 step->scan = step->hp;
324 step->scan_bd = step->hp_bd;
325 step->to_space = NULL;
327 step->new_large_objects = NULL;
328 step->scavenged_large_objects = NULL;
332 /* -----------------------------------------------------------------------
333 * follow all the roots that we know about:
334 * - mutable lists from each generation > N
335 * we want to *scavenge* these roots, not evacuate them: they're not
336 * going to move in this GC.
337 * Also: do them in reverse generation order. This is because we
338 * often want to promote objects that are pointed to by older
339 * generations early, so we don't have to repeatedly copy them.
340 * Doing the generations in reverse order ensures that we don't end
341 * up in the situation where we want to evac an object to gen 3 and
342 * it has already been evaced to gen 2.
346 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
347 generations[g].saved_mut_list = generations[g].mut_list;
348 generations[g].mut_list = END_MUT_LIST;
351 /* Do the mut-once lists first */
352 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
353 IF_PAR_DEBUG(verbose,
354 printMutOnceList(&generations[g]));
355 scavenge_mut_once_list(&generations[g]);
357 for (st = generations[g].n_steps-1; st >= 0; st--) {
358 scavenge(&generations[g].steps[st]);
362 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
363 IF_PAR_DEBUG(verbose,
364 printMutableList(&generations[g]));
365 scavenge_mutable_list(&generations[g]);
367 for (st = generations[g].n_steps-1; st >= 0; st--) {
368 scavenge(&generations[g].steps[st]);
373 /* follow all the roots that the application knows about.
379 /* And don't forget to mark the TSO if we got here direct from
381 /* Not needed in a seq version?
383 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
387 /* Mark the entries in the GALA table of the parallel system */
388 markLocalGAs(major_gc);
391 /* Mark the weak pointer list, and prepare to detect dead weak
394 old_weak_ptr_list = weak_ptr_list;
395 weak_ptr_list = NULL;
396 weak_done = rtsFalse;
398 /* The all_threads list is like the weak_ptr_list.
399 * See traverse_weak_ptr_list() for the details.
401 old_all_threads = all_threads;
402 all_threads = END_TSO_QUEUE;
403 resurrected_threads = END_TSO_QUEUE;
405 /* Mark the stable pointer table.
407 markStablePtrTable(major_gc);
411 /* ToDo: To fix the caf leak, we need to make the commented out
412 * parts of this code do something sensible - as described in
415 extern void markHugsObjects(void);
420 /* -------------------------------------------------------------------------
421 * Repeatedly scavenge all the areas we know about until there's no
422 * more scavenging to be done.
429 /* scavenge static objects */
430 if (major_gc && static_objects != END_OF_STATIC_LIST) {
432 checkStaticObjects());
436 /* When scavenging the older generations: Objects may have been
437 * evacuated from generations <= N into older generations, and we
438 * need to scavenge these objects. We're going to try to ensure that
439 * any evacuations that occur move the objects into at least the
440 * same generation as the object being scavenged, otherwise we
441 * have to create new entries on the mutable list for the older
445 /* scavenge each step in generations 0..maxgen */
449 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
450 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
451 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
454 step = &generations[gen].steps[st];
456 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
461 if (step->new_large_objects != NULL) {
462 scavenge_large(step);
469 if (flag) { goto loop; }
471 /* must be last... */
472 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
477 /* Final traversal of the weak pointer list (see comment by
478 * cleanUpWeakPtrList below).
480 cleanup_weak_ptr_list(&weak_ptr_list);
482 /* Now see which stable names are still alive.
484 gcStablePtrTable(major_gc);
487 /* Reconstruct the Global Address tables used in GUM */
488 rebuildGAtables(major_gc);
489 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
490 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
493 /* Set the maximum blocks for the oldest generation, based on twice
494 * the amount of live data now, adjusted to fit the maximum heap
497 * This is an approximation, since in the worst case we'll need
498 * twice the amount of live data plus whatever space the other
501 if (RtsFlags.GcFlags.generations > 1) {
503 oldest_gen->max_blocks =
504 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
505 RtsFlags.GcFlags.minOldGenSize);
506 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
507 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
508 if (((int)oldest_gen->max_blocks -
509 (int)oldest_gen->steps[0].to_blocks) <
510 (RtsFlags.GcFlags.pcFreeHeap *
511 RtsFlags.GcFlags.maxHeapSize / 200)) {
518 /* run through all the generations/steps and tidy up
520 copied = new_blocks * BLOCK_SIZE_W;
521 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
524 generations[g].collections++; /* for stats */
527 for (s = 0; s < generations[g].n_steps; s++) {
529 step = &generations[g].steps[s];
531 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
532 /* Tidy the end of the to-space chains */
533 step->hp_bd->free = step->hp;
534 step->hp_bd->link = NULL;
535 /* stats information: how much we copied */
537 copied -= step->hp_bd->start + BLOCK_SIZE_W -
542 /* for generations we collected... */
545 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
547 /* free old memory and shift to-space into from-space for all
548 * the collected steps (except the allocation area). These
549 * freed blocks will probaby be quickly recycled.
551 if (!(g == 0 && s == 0)) {
552 freeChain(step->blocks);
553 step->blocks = step->to_space;
554 step->n_blocks = step->to_blocks;
555 step->to_space = NULL;
557 for (bd = step->blocks; bd != NULL; bd = bd->link) {
558 bd->evacuated = 0; /* now from-space */
562 /* LARGE OBJECTS. The current live large objects are chained on
563 * scavenged_large, having been moved during garbage
564 * collection from large_objects. Any objects left on
565 * large_objects list are therefore dead, so we free them here.
567 for (bd = step->large_objects; bd != NULL; bd = next) {
572 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
575 step->large_objects = step->scavenged_large_objects;
577 /* Set the maximum blocks for this generation, interpolating
578 * between the maximum size of the oldest and youngest
581 * max_blocks = oldgen_max_blocks * G
582 * ----------------------
587 generations[g].max_blocks = (oldest_gen->max_blocks * g)
588 / (RtsFlags.GcFlags.generations-1);
590 generations[g].max_blocks = oldest_gen->max_blocks;
593 /* for older generations... */
596 /* For older generations, we need to append the
597 * scavenged_large_object list (i.e. large objects that have been
598 * promoted during this GC) to the large_object list for that step.
600 for (bd = step->scavenged_large_objects; bd; bd = next) {
603 dbl_link_onto(bd, &step->large_objects);
606 /* add the new blocks we promoted during this GC */
607 step->n_blocks += step->to_blocks;
612 /* Guess the amount of live data for stats. */
615 /* Free the small objects allocated via allocate(), since this will
616 * all have been copied into G0S1 now.
618 if (small_alloc_list != NULL) {
619 freeChain(small_alloc_list);
621 small_alloc_list = NULL;
625 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
627 /* Two-space collector:
628 * Free the old to-space, and estimate the amount of live data.
630 if (RtsFlags.GcFlags.generations == 1) {
633 if (old_to_space != NULL) {
634 freeChain(old_to_space);
636 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
637 bd->evacuated = 0; /* now from-space */
640 /* For a two-space collector, we need to resize the nursery. */
642 /* set up a new nursery. Allocate a nursery size based on a
643 * function of the amount of live data (currently a factor of 2,
644 * should be configurable (ToDo)). Use the blocks from the old
645 * nursery if possible, freeing up any left over blocks.
647 * If we get near the maximum heap size, then adjust our nursery
648 * size accordingly. If the nursery is the same size as the live
649 * data (L), then we need 3L bytes. We can reduce the size of the
650 * nursery to bring the required memory down near 2L bytes.
652 * A normal 2-space collector would need 4L bytes to give the same
653 * performance we get from 3L bytes, reducing to the same
654 * performance at 2L bytes.
656 blocks = g0s0->to_blocks;
658 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
659 RtsFlags.GcFlags.maxHeapSize ) {
660 int adjusted_blocks; /* signed on purpose */
663 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
664 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
665 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
666 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
669 blocks = adjusted_blocks;
672 blocks *= RtsFlags.GcFlags.oldGenFactor;
673 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
674 blocks = RtsFlags.GcFlags.minAllocAreaSize;
677 resizeNursery(blocks);
680 /* Generational collector:
681 * If the user has given us a suggested heap size, adjust our
682 * allocation area to make best use of the memory available.
685 if (RtsFlags.GcFlags.heapSizeSuggestion) {
687 nat needed = calcNeeded(); /* approx blocks needed at next GC */
689 /* Guess how much will be live in generation 0 step 0 next time.
690 * A good approximation is the obtained by finding the
691 * percentage of g0s0 that was live at the last minor GC.
694 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
697 /* Estimate a size for the allocation area based on the
698 * information available. We might end up going slightly under
699 * or over the suggested heap size, but we should be pretty
702 * Formula: suggested - needed
703 * ----------------------------
704 * 1 + g0s0_pcnt_kept/100
706 * where 'needed' is the amount of memory needed at the next
707 * collection for collecting all steps except g0s0.
710 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
711 (100 + (int)g0s0_pcnt_kept);
713 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
714 blocks = RtsFlags.GcFlags.minAllocAreaSize;
717 resizeNursery((nat)blocks);
721 /* mark the garbage collected CAFs as dead */
723 if (major_gc) { gcCAFs(); }
726 /* zero the scavenged static object list */
728 zero_static_object_list(scavenged_static_objects);
735 /* start any pending finalizers */
736 scheduleFinalizers(old_weak_ptr_list);
738 /* send exceptions to any threads which were about to die */
739 resurrectThreads(resurrected_threads);
741 /* check sanity after GC */
742 IF_DEBUG(sanity, checkSanity(N));
744 /* extra GC trace info */
745 IF_DEBUG(gc, stat_describe_gens());
748 /* symbol-table based profiling */
749 /* heapCensus(to_space); */ /* ToDo */
752 /* restore enclosing cost centre */
758 /* check for memory leaks if sanity checking is on */
759 IF_DEBUG(sanity, memInventory());
761 /* ok, GC over: tell the stats department what happened. */
762 stat_endGC(allocated, collected, live, copied, N);
765 //@node Weak Pointers, Evacuation, Garbage Collect
766 //@subsection Weak Pointers
768 /* -----------------------------------------------------------------------------
771 traverse_weak_ptr_list is called possibly many times during garbage
772 collection. It returns a flag indicating whether it did any work
773 (i.e. called evacuate on any live pointers).
775 Invariant: traverse_weak_ptr_list is called when the heap is in an
776 idempotent state. That means that there are no pending
777 evacuate/scavenge operations. This invariant helps the weak
778 pointer code decide which weak pointers are dead - if there are no
779 new live weak pointers, then all the currently unreachable ones are
782 For generational GC: we just don't try to finalize weak pointers in
783 older generations than the one we're collecting. This could
784 probably be optimised by keeping per-generation lists of weak
785 pointers, but for a few weak pointers this scheme will work.
786 -------------------------------------------------------------------------- */
787 //@cindex traverse_weak_ptr_list
790 traverse_weak_ptr_list(void)
792 StgWeak *w, **last_w, *next_w;
794 rtsBool flag = rtsFalse;
796 if (weak_done) { return rtsFalse; }
798 /* doesn't matter where we evacuate values/finalizers to, since
799 * these pointers are treated as roots (iff the keys are alive).
803 last_w = &old_weak_ptr_list;
804 for (w = old_weak_ptr_list; w; w = next_w) {
806 /* First, this weak pointer might have been evacuated. If so,
807 * remove the forwarding pointer from the weak_ptr_list.
809 if (get_itbl(w)->type == EVACUATED) {
810 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
814 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
815 * called on a live weak pointer object. Just remove it.
817 if (w->header.info == &DEAD_WEAK_info) {
818 next_w = ((StgDeadWeak *)w)->link;
823 ASSERT(get_itbl(w)->type == WEAK);
825 /* Now, check whether the key is reachable.
827 if ((new = isAlive(w->key))) {
829 /* evacuate the value and finalizer */
830 w->value = evacuate(w->value);
831 w->finalizer = evacuate(w->finalizer);
832 /* remove this weak ptr from the old_weak_ptr list */
834 /* and put it on the new weak ptr list */
836 w->link = weak_ptr_list;
839 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
849 /* Now deal with the all_threads list, which behaves somewhat like
850 * the weak ptr list. If we discover any threads that are about to
851 * become garbage, we wake them up and administer an exception.
854 StgTSO *t, *tmp, *next, **prev;
856 prev = &old_all_threads;
857 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
859 /* Threads which have finished or died get dropped from
862 switch (t->what_next) {
863 case ThreadRelocated:
869 next = t->global_link;
875 /* Threads which have already been determined to be alive are
876 * moved onto the all_threads list.
878 (StgClosure *)tmp = isAlive((StgClosure *)t);
880 next = tmp->global_link;
881 tmp->global_link = all_threads;
885 prev = &(t->global_link);
886 next = t->global_link;
891 /* If we didn't make any changes, then we can go round and kill all
892 * the dead weak pointers. The old_weak_ptr list is used as a list
893 * of pending finalizers later on.
895 if (flag == rtsFalse) {
896 cleanup_weak_ptr_list(&old_weak_ptr_list);
897 for (w = old_weak_ptr_list; w; w = w->link) {
898 w->finalizer = evacuate(w->finalizer);
901 /* And resurrect any threads which were about to become garbage.
904 StgTSO *t, *tmp, *next;
905 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
906 next = t->global_link;
907 (StgClosure *)tmp = evacuate((StgClosure *)t);
908 tmp->global_link = resurrected_threads;
909 resurrected_threads = tmp;
919 /* -----------------------------------------------------------------------------
920 After GC, the live weak pointer list may have forwarding pointers
921 on it, because a weak pointer object was evacuated after being
922 moved to the live weak pointer list. We remove those forwarding
925 Also, we don't consider weak pointer objects to be reachable, but
926 we must nevertheless consider them to be "live" and retain them.
927 Therefore any weak pointer objects which haven't as yet been
928 evacuated need to be evacuated now.
929 -------------------------------------------------------------------------- */
931 //@cindex cleanup_weak_ptr_list
934 cleanup_weak_ptr_list ( StgWeak **list )
936 StgWeak *w, **last_w;
939 for (w = *list; w; w = w->link) {
941 if (get_itbl(w)->type == EVACUATED) {
942 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
946 if (Bdescr((P_)w)->evacuated == 0) {
947 (StgClosure *)w = evacuate((StgClosure *)w);
954 /* -----------------------------------------------------------------------------
955 isAlive determines whether the given closure is still alive (after
956 a garbage collection) or not. It returns the new address of the
957 closure if it is alive, or NULL otherwise.
958 -------------------------------------------------------------------------- */
963 isAlive(StgClosure *p)
965 const StgInfoTable *info;
972 /* ToDo: for static closures, check the static link field.
973 * Problem here is that we sometimes don't set the link field, eg.
974 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
977 /* ignore closures in generations that we're not collecting. */
978 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
982 switch (info->type) {
987 case IND_OLDGEN: /* rely on compatible layout with StgInd */
988 case IND_OLDGEN_PERM:
989 /* follow indirections */
990 p = ((StgInd *)p)->indirectee;
995 return ((StgEvacuated *)p)->evacuee;
998 size = bco_sizeW((StgBCO*)p);
1002 size = arr_words_sizeW((StgArrWords *)p);
1006 case MUT_ARR_PTRS_FROZEN:
1007 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1011 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1012 p = (StgClosure *)((StgTSO *)p)->link;
1016 size = tso_sizeW((StgTSO *)p);
1018 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1019 && Bdescr((P_)p)->evacuated)
1033 MarkRoot(StgClosure *root)
1035 # if 0 && defined(PAR) && defined(DEBUG)
1036 StgClosure *foo = evacuate(root);
1037 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1038 ASSERT(isAlive(foo)); // must be in to-space
1041 return evacuate(root);
1046 static void addBlock(step *step)
1048 bdescr *bd = allocBlock();
1049 bd->gen = step->gen;
1052 if (step->gen->no <= N) {
1058 step->hp_bd->free = step->hp;
1059 step->hp_bd->link = bd;
1060 step->hp = bd->start;
1061 step->hpLim = step->hp + BLOCK_SIZE_W;
1067 //@cindex upd_evacuee
1069 static __inline__ void
1070 upd_evacuee(StgClosure *p, StgClosure *dest)
1072 p->header.info = &EVACUATED_info;
1073 ((StgEvacuated *)p)->evacuee = dest;
1078 static __inline__ StgClosure *
1079 copy(StgClosure *src, nat size, step *step)
1083 TICK_GC_WORDS_COPIED(size);
1084 /* Find out where we're going, using the handy "to" pointer in
1085 * the step of the source object. If it turns out we need to
1086 * evacuate to an older generation, adjust it here (see comment
1089 if (step->gen->no < evac_gen) {
1090 #ifdef NO_EAGER_PROMOTION
1091 failed_to_evac = rtsTrue;
1093 step = &generations[evac_gen].steps[0];
1097 /* chain a new block onto the to-space for the destination step if
1100 if (step->hp + size >= step->hpLim) {
1104 for(to = step->hp, from = (P_)src; size>0; --size) {
1110 upd_evacuee(src,(StgClosure *)dest);
1111 return (StgClosure *)dest;
1114 /* Special version of copy() for when we only want to copy the info
1115 * pointer of an object, but reserve some padding after it. This is
1116 * used to optimise evacuation of BLACKHOLEs.
1121 static __inline__ StgClosure *
1122 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1126 TICK_GC_WORDS_COPIED(size_to_copy);
1127 if (step->gen->no < evac_gen) {
1128 #ifdef NO_EAGER_PROMOTION
1129 failed_to_evac = rtsTrue;
1131 step = &generations[evac_gen].steps[0];
1135 if (step->hp + size_to_reserve >= step->hpLim) {
1139 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1144 step->hp += size_to_reserve;
1145 upd_evacuee(src,(StgClosure *)dest);
1146 return (StgClosure *)dest;
1149 //@node Evacuation, Scavenging, Weak Pointers
1150 //@subsection Evacuation
1152 /* -----------------------------------------------------------------------------
1153 Evacuate a large object
1155 This just consists of removing the object from the (doubly-linked)
1156 large_alloc_list, and linking it on to the (singly-linked)
1157 new_large_objects list, from where it will be scavenged later.
1159 Convention: bd->evacuated is /= 0 for a large object that has been
1160 evacuated, or 0 otherwise.
1161 -------------------------------------------------------------------------- */
1163 //@cindex evacuate_large
1166 evacuate_large(StgPtr p, rtsBool mutable)
1168 bdescr *bd = Bdescr(p);
1171 /* should point to the beginning of the block */
1172 ASSERT(((W_)p & BLOCK_MASK) == 0);
1174 /* already evacuated? */
1175 if (bd->evacuated) {
1176 /* Don't forget to set the failed_to_evac flag if we didn't get
1177 * the desired destination (see comments in evacuate()).
1179 if (bd->gen->no < evac_gen) {
1180 failed_to_evac = rtsTrue;
1181 TICK_GC_FAILED_PROMOTION();
1187 /* remove from large_object list */
1189 bd->back->link = bd->link;
1190 } else { /* first object in the list */
1191 step->large_objects = bd->link;
1194 bd->link->back = bd->back;
1197 /* link it on to the evacuated large object list of the destination step
1199 step = bd->step->to;
1200 if (step->gen->no < evac_gen) {
1201 #ifdef NO_EAGER_PROMOTION
1202 failed_to_evac = rtsTrue;
1204 step = &generations[evac_gen].steps[0];
1209 bd->gen = step->gen;
1210 bd->link = step->new_large_objects;
1211 step->new_large_objects = bd;
1215 recordMutable((StgMutClosure *)p);
1219 /* -----------------------------------------------------------------------------
1220 Adding a MUT_CONS to an older generation.
1222 This is necessary from time to time when we end up with an
1223 old-to-new generation pointer in a non-mutable object. We defer
1224 the promotion until the next GC.
1225 -------------------------------------------------------------------------- */
1230 mkMutCons(StgClosure *ptr, generation *gen)
1235 step = &gen->steps[0];
1237 /* chain a new block onto the to-space for the destination step if
1240 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1244 q = (StgMutVar *)step->hp;
1245 step->hp += sizeofW(StgMutVar);
1247 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1249 recordOldToNewPtrs((StgMutClosure *)q);
1251 return (StgClosure *)q;
1254 /* -----------------------------------------------------------------------------
1257 This is called (eventually) for every live object in the system.
1259 The caller to evacuate specifies a desired generation in the
1260 evac_gen global variable. The following conditions apply to
1261 evacuating an object which resides in generation M when we're
1262 collecting up to generation N
1266 else evac to step->to
1268 if M < evac_gen evac to evac_gen, step 0
1270 if the object is already evacuated, then we check which generation
1273 if M >= evac_gen do nothing
1274 if M < evac_gen set failed_to_evac flag to indicate that we
1275 didn't manage to evacuate this object into evac_gen.
1277 -------------------------------------------------------------------------- */
1281 evacuate(StgClosure *q)
1286 const StgInfoTable *info;
1289 if (HEAP_ALLOCED(q)) {
1291 if (bd->gen->no > N) {
1292 /* Can't evacuate this object, because it's in a generation
1293 * older than the ones we're collecting. Let's hope that it's
1294 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1296 if (bd->gen->no < evac_gen) {
1298 failed_to_evac = rtsTrue;
1299 TICK_GC_FAILED_PROMOTION();
1303 step = bd->step->to;
1306 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1309 /* make sure the info pointer is into text space */
1310 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1311 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1314 if (info->type==RBH) {
1315 info = REVERT_INFOPTR(info);
1317 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1318 q, info_type(q), info, info_type_by_ip(info)));
1322 switch (info -> type) {
1326 nat size = bco_sizeW((StgBCO*)q);
1328 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1329 evacuate_large((P_)q, rtsFalse);
1332 /* just copy the block */
1333 to = copy(q,size,step);
1339 ASSERT(q->header.info != &MUT_CONS_info);
1341 to = copy(q,sizeW_fromITBL(info),step);
1342 recordMutable((StgMutClosure *)to);
1349 return copy(q,sizeofW(StgHeader)+1,step);
1351 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1356 #ifdef NO_PROMOTE_THUNKS
1357 if (bd->gen->no == 0 &&
1358 bd->step->no != 0 &&
1359 bd->step->no == bd->gen->n_steps-1) {
1363 return copy(q,sizeofW(StgHeader)+2,step);
1371 return copy(q,sizeofW(StgHeader)+2,step);
1377 case IND_OLDGEN_PERM:
1383 return copy(q,sizeW_fromITBL(info),step);
1386 case SE_CAF_BLACKHOLE:
1389 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1392 to = copy(q,BLACKHOLE_sizeW(),step);
1393 recordMutable((StgMutClosure *)to);
1396 case THUNK_SELECTOR:
1398 const StgInfoTable* selectee_info;
1399 StgClosure* selectee = ((StgSelector*)q)->selectee;
1402 selectee_info = get_itbl(selectee);
1403 switch (selectee_info->type) {
1412 StgWord32 offset = info->layout.selector_offset;
1414 /* check that the size is in range */
1416 (StgWord32)(selectee_info->layout.payload.ptrs +
1417 selectee_info->layout.payload.nptrs));
1419 /* perform the selection! */
1420 q = selectee->payload[offset];
1422 /* if we're already in to-space, there's no need to continue
1423 * with the evacuation, just update the source address with
1424 * a pointer to the (evacuated) constructor field.
1426 if (HEAP_ALLOCED(q)) {
1427 bdescr *bd = Bdescr((P_)q);
1428 if (bd->evacuated) {
1429 if (bd->gen->no < evac_gen) {
1430 failed_to_evac = rtsTrue;
1431 TICK_GC_FAILED_PROMOTION();
1437 /* otherwise, carry on and evacuate this constructor field,
1438 * (but not the constructor itself)
1447 case IND_OLDGEN_PERM:
1448 selectee = ((StgInd *)selectee)->indirectee;
1452 selectee = ((StgCAF *)selectee)->value;
1456 selectee = ((StgEvacuated *)selectee)->evacuee;
1466 case THUNK_SELECTOR:
1467 /* aargh - do recursively???? */
1470 case SE_CAF_BLACKHOLE:
1474 /* not evaluated yet */
1478 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1479 (int)(selectee_info->type));
1482 return copy(q,THUNK_SELECTOR_sizeW(),step);
1486 /* follow chains of indirections, don't evacuate them */
1487 q = ((StgInd*)q)->indirectee;
1491 if (info->srt_len > 0 && major_gc &&
1492 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1493 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1494 static_objects = (StgClosure *)q;
1499 if (info->srt_len > 0 && major_gc &&
1500 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1501 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1502 static_objects = (StgClosure *)q;
1507 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1508 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1509 static_objects = (StgClosure *)q;
1514 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1515 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1516 static_objects = (StgClosure *)q;
1520 case CONSTR_INTLIKE:
1521 case CONSTR_CHARLIKE:
1522 case CONSTR_NOCAF_STATIC:
1523 /* no need to put these on the static linked list, they don't need
1538 /* shouldn't see these */
1539 barf("evacuate: stack frame at %p\n", q);
1543 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1544 * of stack, tagging and all.
1546 * They can be larger than a block in size. Both are only
1547 * allocated via allocate(), so they should be chained on to the
1548 * large_object list.
1551 nat size = pap_sizeW((StgPAP*)q);
1552 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1553 evacuate_large((P_)q, rtsFalse);
1556 return copy(q,size,step);
1561 /* Already evacuated, just return the forwarding address.
1562 * HOWEVER: if the requested destination generation (evac_gen) is
1563 * older than the actual generation (because the object was
1564 * already evacuated to a younger generation) then we have to
1565 * set the failed_to_evac flag to indicate that we couldn't
1566 * manage to promote the object to the desired generation.
1568 if (evac_gen > 0) { /* optimisation */
1569 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1570 if (Bdescr((P_)p)->gen->no < evac_gen) {
1571 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1572 failed_to_evac = rtsTrue;
1573 TICK_GC_FAILED_PROMOTION();
1576 return ((StgEvacuated*)q)->evacuee;
1580 nat size = arr_words_sizeW((StgArrWords *)q);
1582 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1583 evacuate_large((P_)q, rtsFalse);
1586 /* just copy the block */
1587 return copy(q,size,step);
1592 case MUT_ARR_PTRS_FROZEN:
1594 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1596 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1597 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1600 /* just copy the block */
1601 to = copy(q,size,step);
1602 if (info->type == MUT_ARR_PTRS) {
1603 recordMutable((StgMutClosure *)to);
1611 StgTSO *tso = (StgTSO *)q;
1612 nat size = tso_sizeW(tso);
1615 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1617 if (tso->what_next == ThreadRelocated) {
1618 q = (StgClosure *)tso->link;
1622 /* Large TSOs don't get moved, so no relocation is required.
1624 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1625 evacuate_large((P_)q, rtsTrue);
1628 /* To evacuate a small TSO, we need to relocate the update frame
1632 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1634 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1636 /* relocate the stack pointers... */
1637 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1638 new_tso->sp = (StgPtr)new_tso->sp + diff;
1639 new_tso->splim = (StgPtr)new_tso->splim + diff;
1641 relocate_TSO(tso, new_tso);
1643 recordMutable((StgMutClosure *)new_tso);
1644 return (StgClosure *)new_tso;
1649 case RBH: // cf. BLACKHOLE_BQ
1651 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1652 to = copy(q,BLACKHOLE_sizeW(),step);
1653 //ToDo: derive size etc from reverted IP
1654 //to = copy(q,size,step);
1655 recordMutable((StgMutClosure *)to);
1657 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1658 q, info_type(q), to, info_type(to)));
1663 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1664 to = copy(q,sizeofW(StgBlockedFetch),step);
1666 belch("@@ evacuate: %p (%s) to %p (%s)",
1667 q, info_type(q), to, info_type(to)));
1671 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1672 to = copy(q,sizeofW(StgFetchMe),step);
1674 belch("@@ evacuate: %p (%s) to %p (%s)",
1675 q, info_type(q), to, info_type(to)));
1679 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1680 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1682 belch("@@ evacuate: %p (%s) to %p (%s)",
1683 q, info_type(q), to, info_type(to)));
1688 barf("evacuate: strange closure type %d", (int)(info->type));
1694 /* -----------------------------------------------------------------------------
1695 relocate_TSO is called just after a TSO has been copied from src to
1696 dest. It adjusts the update frame list for the new location.
1697 -------------------------------------------------------------------------- */
1698 //@cindex relocate_TSO
1701 relocate_TSO(StgTSO *src, StgTSO *dest)
1708 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1712 while ((P_)su < dest->stack + dest->stack_size) {
1713 switch (get_itbl(su)->type) {
1715 /* GCC actually manages to common up these three cases! */
1718 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1723 cf = (StgCatchFrame *)su;
1724 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1729 sf = (StgSeqFrame *)su;
1730 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1739 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1747 //@node Scavenging, Reverting CAFs, Evacuation
1748 //@subsection Scavenging
1750 //@cindex scavenge_srt
1753 scavenge_srt(const StgInfoTable *info)
1755 StgClosure **srt, **srt_end;
1757 /* evacuate the SRT. If srt_len is zero, then there isn't an
1758 * srt field in the info table. That's ok, because we'll
1759 * never dereference it.
1761 srt = (StgClosure **)(info->srt);
1762 srt_end = srt + info->srt_len;
1763 for (; srt < srt_end; srt++) {
1764 /* Special-case to handle references to closures hiding out in DLLs, since
1765 double indirections required to get at those. The code generator knows
1766 which is which when generating the SRT, so it stores the (indirect)
1767 reference to the DLL closure in the table by first adding one to it.
1768 We check for this here, and undo the addition before evacuating it.
1770 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1771 closure that's fixed at link-time, and no extra magic is required.
1773 #ifdef ENABLE_WIN32_DLL_SUPPORT
1774 if ( (unsigned long)(*srt) & 0x1 ) {
1775 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1785 /* -----------------------------------------------------------------------------
1787 -------------------------------------------------------------------------- */
1790 scavengeTSO (StgTSO *tso)
1792 /* chase the link field for any TSOs on the same queue */
1793 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1794 if ( tso->why_blocked == BlockedOnMVar
1795 || tso->why_blocked == BlockedOnBlackHole
1796 || tso->why_blocked == BlockedOnException
1798 || tso->why_blocked == BlockedOnGA
1799 || tso->why_blocked == BlockedOnGA_NoSend
1802 tso->block_info.closure = evacuate(tso->block_info.closure);
1804 if ( tso->blocked_exceptions != NULL ) {
1805 tso->blocked_exceptions =
1806 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1808 /* scavenge this thread's stack */
1809 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1812 /* -----------------------------------------------------------------------------
1813 Scavenge a given step until there are no more objects in this step
1816 evac_gen is set by the caller to be either zero (for a step in a
1817 generation < N) or G where G is the generation of the step being
1820 We sometimes temporarily change evac_gen back to zero if we're
1821 scavenging a mutable object where early promotion isn't such a good
1823 -------------------------------------------------------------------------- */
1827 scavenge(step *step)
1830 const StgInfoTable *info;
1832 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1837 failed_to_evac = rtsFalse;
1839 /* scavenge phase - standard breadth-first scavenging of the
1843 while (bd != step->hp_bd || p < step->hp) {
1845 /* If we're at the end of this block, move on to the next block */
1846 if (bd != step->hp_bd && p == bd->free) {
1852 q = p; /* save ptr to object */
1854 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1855 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1857 info = get_itbl((StgClosure *)p);
1859 if (info->type==RBH)
1860 info = REVERT_INFOPTR(info);
1863 switch (info -> type) {
1867 StgBCO* bco = (StgBCO *)p;
1869 for (i = 0; i < bco->n_ptrs; i++) {
1870 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1872 p += bco_sizeW(bco);
1877 /* treat MVars specially, because we don't want to evacuate the
1878 * mut_link field in the middle of the closure.
1881 StgMVar *mvar = ((StgMVar *)p);
1883 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1884 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1885 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1886 p += sizeofW(StgMVar);
1887 evac_gen = saved_evac_gen;
1895 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1896 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1897 p += sizeofW(StgHeader) + 2;
1902 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1903 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1909 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1910 p += sizeofW(StgHeader) + 1;
1915 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1921 p += sizeofW(StgHeader) + 1;
1928 p += sizeofW(StgHeader) + 2;
1935 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1936 p += sizeofW(StgHeader) + 2;
1951 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1952 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1953 (StgClosure *)*p = evacuate((StgClosure *)*p);
1955 p += info->layout.payload.nptrs;
1960 if (step->gen->no != 0) {
1961 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1964 case IND_OLDGEN_PERM:
1965 ((StgIndOldGen *)p)->indirectee =
1966 evacuate(((StgIndOldGen *)p)->indirectee);
1967 if (failed_to_evac) {
1968 failed_to_evac = rtsFalse;
1969 recordOldToNewPtrs((StgMutClosure *)p);
1971 p += sizeofW(StgIndOldGen);
1976 StgCAF *caf = (StgCAF *)p;
1978 caf->body = evacuate(caf->body);
1979 if (failed_to_evac) {
1980 failed_to_evac = rtsFalse;
1981 recordOldToNewPtrs((StgMutClosure *)p);
1983 caf->mut_link = NULL;
1985 p += sizeofW(StgCAF);
1991 StgCAF *caf = (StgCAF *)p;
1993 caf->body = evacuate(caf->body);
1994 caf->value = evacuate(caf->value);
1995 if (failed_to_evac) {
1996 failed_to_evac = rtsFalse;
1997 recordOldToNewPtrs((StgMutClosure *)p);
1999 caf->mut_link = NULL;
2001 p += sizeofW(StgCAF);
2006 /* ignore MUT_CONSs */
2007 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2009 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2010 evac_gen = saved_evac_gen;
2012 p += sizeofW(StgMutVar);
2016 case SE_CAF_BLACKHOLE:
2019 p += BLACKHOLE_sizeW();
2024 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2025 (StgClosure *)bh->blocking_queue =
2026 evacuate((StgClosure *)bh->blocking_queue);
2027 if (failed_to_evac) {
2028 failed_to_evac = rtsFalse;
2029 recordMutable((StgMutClosure *)bh);
2031 p += BLACKHOLE_sizeW();
2035 case THUNK_SELECTOR:
2037 StgSelector *s = (StgSelector *)p;
2038 s->selectee = evacuate(s->selectee);
2039 p += THUNK_SELECTOR_sizeW();
2045 barf("scavenge:IND???\n");
2047 case CONSTR_INTLIKE:
2048 case CONSTR_CHARLIKE:
2050 case CONSTR_NOCAF_STATIC:
2054 /* Shouldn't see a static object here. */
2055 barf("scavenge: STATIC object\n");
2067 /* Shouldn't see stack frames here. */
2068 barf("scavenge: stack frame\n");
2070 case AP_UPD: /* same as PAPs */
2072 /* Treat a PAP just like a section of stack, not forgetting to
2073 * evacuate the function pointer too...
2076 StgPAP* pap = (StgPAP *)p;
2078 pap->fun = evacuate(pap->fun);
2079 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2080 p += pap_sizeW(pap);
2085 /* nothing to follow */
2086 p += arr_words_sizeW((StgArrWords *)p);
2090 /* follow everything */
2094 evac_gen = 0; /* repeatedly mutable */
2095 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2096 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2097 (StgClosure *)*p = evacuate((StgClosure *)*p);
2099 evac_gen = saved_evac_gen;
2103 case MUT_ARR_PTRS_FROZEN:
2104 /* follow everything */
2106 StgPtr start = p, next;
2108 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2109 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2110 (StgClosure *)*p = evacuate((StgClosure *)*p);
2112 if (failed_to_evac) {
2113 /* we can do this easier... */
2114 recordMutable((StgMutClosure *)start);
2115 failed_to_evac = rtsFalse;
2122 StgTSO *tso = (StgTSO *)p;
2125 evac_gen = saved_evac_gen;
2126 p += tso_sizeW(tso);
2131 case RBH: // cf. BLACKHOLE_BQ
2133 // nat size, ptrs, nonptrs, vhs;
2135 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2136 StgRBH *rbh = (StgRBH *)p;
2137 (StgClosure *)rbh->blocking_queue =
2138 evacuate((StgClosure *)rbh->blocking_queue);
2139 if (failed_to_evac) {
2140 failed_to_evac = rtsFalse;
2141 recordMutable((StgMutClosure *)rbh);
2144 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2145 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2146 // ToDo: use size of reverted closure here!
2147 p += BLACKHOLE_sizeW();
2153 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2154 /* follow the pointer to the node which is being demanded */
2155 (StgClosure *)bf->node =
2156 evacuate((StgClosure *)bf->node);
2157 /* follow the link to the rest of the blocking queue */
2158 (StgClosure *)bf->link =
2159 evacuate((StgClosure *)bf->link);
2160 if (failed_to_evac) {
2161 failed_to_evac = rtsFalse;
2162 recordMutable((StgMutClosure *)bf);
2165 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2166 bf, info_type((StgClosure *)bf),
2167 bf->node, info_type(bf->node)));
2168 p += sizeofW(StgBlockedFetch);
2174 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2175 p, info_type((StgClosure *)p)));
2176 p += sizeofW(StgFetchMe);
2177 break; // nothing to do in this case
2179 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2181 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2182 (StgClosure *)fmbq->blocking_queue =
2183 evacuate((StgClosure *)fmbq->blocking_queue);
2184 if (failed_to_evac) {
2185 failed_to_evac = rtsFalse;
2186 recordMutable((StgMutClosure *)fmbq);
2189 belch("@@ scavenge: %p (%s) exciting, isn't it",
2190 p, info_type((StgClosure *)p)));
2191 p += sizeofW(StgFetchMeBlockingQueue);
2197 barf("scavenge: unimplemented/strange closure type %d @ %p",
2201 barf("scavenge: unimplemented/strange closure type %d @ %p",
2205 /* If we didn't manage to promote all the objects pointed to by
2206 * the current object, then we have to designate this object as
2207 * mutable (because it contains old-to-new generation pointers).
2209 if (failed_to_evac) {
2210 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2211 failed_to_evac = rtsFalse;
2219 /* -----------------------------------------------------------------------------
2220 Scavenge one object.
2222 This is used for objects that are temporarily marked as mutable
2223 because they contain old-to-new generation pointers. Only certain
2224 objects can have this property.
2225 -------------------------------------------------------------------------- */
2226 //@cindex scavenge_one
2229 scavenge_one(StgClosure *p)
2231 const StgInfoTable *info;
2234 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2235 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2240 if (info->type==RBH)
2241 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2244 switch (info -> type) {
2247 case FUN_1_0: /* hardly worth specialising these guys */
2267 case IND_OLDGEN_PERM:
2272 end = (P_)p->payload + info->layout.payload.ptrs;
2273 for (q = (P_)p->payload; q < end; q++) {
2274 (StgClosure *)*q = evacuate((StgClosure *)*q);
2280 case SE_CAF_BLACKHOLE:
2285 case THUNK_SELECTOR:
2287 StgSelector *s = (StgSelector *)p;
2288 s->selectee = evacuate(s->selectee);
2292 case AP_UPD: /* same as PAPs */
2294 /* Treat a PAP just like a section of stack, not forgetting to
2295 * evacuate the function pointer too...
2298 StgPAP* pap = (StgPAP *)p;
2300 pap->fun = evacuate(pap->fun);
2301 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2306 /* This might happen if for instance a MUT_CONS was pointing to a
2307 * THUNK which has since been updated. The IND_OLDGEN will
2308 * be on the mutable list anyway, so we don't need to do anything
2314 barf("scavenge_one: strange object %d", (int)(info->type));
2317 no_luck = failed_to_evac;
2318 failed_to_evac = rtsFalse;
2323 /* -----------------------------------------------------------------------------
2324 Scavenging mutable lists.
2326 We treat the mutable list of each generation > N (i.e. all the
2327 generations older than the one being collected) as roots. We also
2328 remove non-mutable objects from the mutable list at this point.
2329 -------------------------------------------------------------------------- */
2330 //@cindex scavenge_mut_once_list
2333 scavenge_mut_once_list(generation *gen)
2335 const StgInfoTable *info;
2336 StgMutClosure *p, *next, *new_list;
2338 p = gen->mut_once_list;
2339 new_list = END_MUT_LIST;
2343 failed_to_evac = rtsFalse;
2345 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2347 /* make sure the info pointer is into text space */
2348 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2349 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2353 if (info->type==RBH)
2354 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2356 switch(info->type) {
2359 case IND_OLDGEN_PERM:
2361 /* Try to pull the indirectee into this generation, so we can
2362 * remove the indirection from the mutable list.
2364 ((StgIndOldGen *)p)->indirectee =
2365 evacuate(((StgIndOldGen *)p)->indirectee);
2368 if (RtsFlags.DebugFlags.gc)
2369 /* Debugging code to print out the size of the thing we just
2373 StgPtr start = gen->steps[0].scan;
2374 bdescr *start_bd = gen->steps[0].scan_bd;
2376 scavenge(&gen->steps[0]);
2377 if (start_bd != gen->steps[0].scan_bd) {
2378 size += (P_)BLOCK_ROUND_UP(start) - start;
2379 start_bd = start_bd->link;
2380 while (start_bd != gen->steps[0].scan_bd) {
2381 size += BLOCK_SIZE_W;
2382 start_bd = start_bd->link;
2384 size += gen->steps[0].scan -
2385 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2387 size = gen->steps[0].scan - start;
2389 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2393 /* failed_to_evac might happen if we've got more than two
2394 * generations, we're collecting only generation 0, the
2395 * indirection resides in generation 2 and the indirectee is
2398 if (failed_to_evac) {
2399 failed_to_evac = rtsFalse;
2400 p->mut_link = new_list;
2403 /* the mut_link field of an IND_STATIC is overloaded as the
2404 * static link field too (it just so happens that we don't need
2405 * both at the same time), so we need to NULL it out when
2406 * removing this object from the mutable list because the static
2407 * link fields are all assumed to be NULL before doing a major
2415 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2416 * it from the mutable list if possible by promoting whatever it
2419 ASSERT(p->header.info == &MUT_CONS_info);
2420 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2421 /* didn't manage to promote everything, so put the
2422 * MUT_CONS back on the list.
2424 p->mut_link = new_list;
2431 StgCAF *caf = (StgCAF *)p;
2432 caf->body = evacuate(caf->body);
2433 caf->value = evacuate(caf->value);
2434 if (failed_to_evac) {
2435 failed_to_evac = rtsFalse;
2436 p->mut_link = new_list;
2446 StgCAF *caf = (StgCAF *)p;
2447 caf->body = evacuate(caf->body);
2448 if (failed_to_evac) {
2449 failed_to_evac = rtsFalse;
2450 p->mut_link = new_list;
2459 /* shouldn't have anything else on the mutables list */
2460 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2464 gen->mut_once_list = new_list;
2467 //@cindex scavenge_mutable_list
2470 scavenge_mutable_list(generation *gen)
2472 const StgInfoTable *info;
2473 StgMutClosure *p, *next;
2475 p = gen->saved_mut_list;
2479 failed_to_evac = rtsFalse;
2481 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2483 /* make sure the info pointer is into text space */
2484 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2485 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2489 if (info->type==RBH)
2490 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2492 switch(info->type) {
2494 case MUT_ARR_PTRS_FROZEN:
2495 /* remove this guy from the mutable list, but follow the ptrs
2496 * anyway (and make sure they get promoted to this gen).
2501 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2503 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2504 (StgClosure *)*q = evacuate((StgClosure *)*q);
2508 if (failed_to_evac) {
2509 failed_to_evac = rtsFalse;
2510 p->mut_link = gen->mut_list;
2517 /* follow everything */
2518 p->mut_link = gen->mut_list;
2523 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2524 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2525 (StgClosure *)*q = evacuate((StgClosure *)*q);
2531 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2532 * it from the mutable list if possible by promoting whatever it
2535 ASSERT(p->header.info != &MUT_CONS_info);
2536 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2537 p->mut_link = gen->mut_list;
2543 StgMVar *mvar = (StgMVar *)p;
2544 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2545 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2546 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2547 p->mut_link = gen->mut_list;
2554 StgTSO *tso = (StgTSO *)p;
2558 /* Don't take this TSO off the mutable list - it might still
2559 * point to some younger objects (because we set evac_gen to 0
2562 tso->mut_link = gen->mut_list;
2563 gen->mut_list = (StgMutClosure *)tso;
2569 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2570 (StgClosure *)bh->blocking_queue =
2571 evacuate((StgClosure *)bh->blocking_queue);
2572 p->mut_link = gen->mut_list;
2577 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2580 case IND_OLDGEN_PERM:
2581 /* Try to pull the indirectee into this generation, so we can
2582 * remove the indirection from the mutable list.
2585 ((StgIndOldGen *)p)->indirectee =
2586 evacuate(((StgIndOldGen *)p)->indirectee);
2589 if (failed_to_evac) {
2590 failed_to_evac = rtsFalse;
2591 p->mut_link = gen->mut_once_list;
2592 gen->mut_once_list = p;
2599 // HWL: check whether all of these are necessary
2601 case RBH: // cf. BLACKHOLE_BQ
2603 // nat size, ptrs, nonptrs, vhs;
2605 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2606 StgRBH *rbh = (StgRBH *)p;
2607 (StgClosure *)rbh->blocking_queue =
2608 evacuate((StgClosure *)rbh->blocking_queue);
2609 if (failed_to_evac) {
2610 failed_to_evac = rtsFalse;
2611 recordMutable((StgMutClosure *)rbh);
2613 // ToDo: use size of reverted closure here!
2614 p += BLACKHOLE_sizeW();
2620 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2621 /* follow the pointer to the node which is being demanded */
2622 (StgClosure *)bf->node =
2623 evacuate((StgClosure *)bf->node);
2624 /* follow the link to the rest of the blocking queue */
2625 (StgClosure *)bf->link =
2626 evacuate((StgClosure *)bf->link);
2627 if (failed_to_evac) {
2628 failed_to_evac = rtsFalse;
2629 recordMutable((StgMutClosure *)bf);
2631 p += sizeofW(StgBlockedFetch);
2636 p += sizeofW(StgFetchMe);
2637 break; // nothing to do in this case
2639 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2641 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2642 (StgClosure *)fmbq->blocking_queue =
2643 evacuate((StgClosure *)fmbq->blocking_queue);
2644 if (failed_to_evac) {
2645 failed_to_evac = rtsFalse;
2646 recordMutable((StgMutClosure *)fmbq);
2648 p += sizeofW(StgFetchMeBlockingQueue);
2654 /* shouldn't have anything else on the mutables list */
2655 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2660 //@cindex scavenge_static
2663 scavenge_static(void)
2665 StgClosure* p = static_objects;
2666 const StgInfoTable *info;
2668 /* Always evacuate straight to the oldest generation for static
2670 evac_gen = oldest_gen->no;
2672 /* keep going until we've scavenged all the objects on the linked
2674 while (p != END_OF_STATIC_LIST) {
2678 if (info->type==RBH)
2679 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2681 /* make sure the info pointer is into text space */
2682 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2683 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2685 /* Take this object *off* the static_objects list,
2686 * and put it on the scavenged_static_objects list.
2688 static_objects = STATIC_LINK(info,p);
2689 STATIC_LINK(info,p) = scavenged_static_objects;
2690 scavenged_static_objects = p;
2692 switch (info -> type) {
2696 StgInd *ind = (StgInd *)p;
2697 ind->indirectee = evacuate(ind->indirectee);
2699 /* might fail to evacuate it, in which case we have to pop it
2700 * back on the mutable list (and take it off the
2701 * scavenged_static list because the static link and mut link
2702 * pointers are one and the same).
2704 if (failed_to_evac) {
2705 failed_to_evac = rtsFalse;
2706 scavenged_static_objects = STATIC_LINK(info,p);
2707 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2708 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2722 next = (P_)p->payload + info->layout.payload.ptrs;
2723 /* evacuate the pointers */
2724 for (q = (P_)p->payload; q < next; q++) {
2725 (StgClosure *)*q = evacuate((StgClosure *)*q);
2731 barf("scavenge_static: strange closure %d", (int)(info->type));
2734 ASSERT(failed_to_evac == rtsFalse);
2736 /* get the next static object from the list. Remember, there might
2737 * be more stuff on this list now that we've done some evacuating!
2738 * (static_objects is a global)
2744 /* -----------------------------------------------------------------------------
2745 scavenge_stack walks over a section of stack and evacuates all the
2746 objects pointed to by it. We can use the same code for walking
2747 PAPs, since these are just sections of copied stack.
2748 -------------------------------------------------------------------------- */
2749 //@cindex scavenge_stack
2752 scavenge_stack(StgPtr p, StgPtr stack_end)
2755 const StgInfoTable* info;
2758 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2761 * Each time around this loop, we are looking at a chunk of stack
2762 * that starts with either a pending argument section or an
2763 * activation record.
2766 while (p < stack_end) {
2769 /* If we've got a tag, skip over that many words on the stack */
2770 if (IS_ARG_TAG((W_)q)) {
2775 /* Is q a pointer to a closure?
2777 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2779 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2780 ASSERT(closure_STATIC((StgClosure *)q));
2782 /* otherwise, must be a pointer into the allocation space. */
2785 (StgClosure *)*p = evacuate((StgClosure *)q);
2791 * Otherwise, q must be the info pointer of an activation
2792 * record. All activation records have 'bitmap' style layout
2795 info = get_itbl((StgClosure *)p);
2797 switch (info->type) {
2799 /* Dynamic bitmap: the mask is stored on the stack */
2801 bitmap = ((StgRetDyn *)p)->liveness;
2802 p = (P_)&((StgRetDyn *)p)->payload[0];
2805 /* probably a slow-entry point return address: */
2813 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2814 old_p, p, old_p+1));
2816 p++; /* what if FHS!=1 !? -- HWL */
2821 /* Specialised code for update frames, since they're so common.
2822 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2823 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2827 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2829 nat type = get_itbl(frame->updatee)->type;
2831 p += sizeofW(StgUpdateFrame);
2832 if (type == EVACUATED) {
2833 frame->updatee = evacuate(frame->updatee);
2836 bdescr *bd = Bdescr((P_)frame->updatee);
2838 if (bd->gen->no > N) {
2839 if (bd->gen->no < evac_gen) {
2840 failed_to_evac = rtsTrue;
2845 /* Don't promote blackholes */
2847 if (!(step->gen->no == 0 &&
2849 step->no == step->gen->n_steps-1)) {
2856 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2857 sizeofW(StgHeader), step);
2858 frame->updatee = to;
2861 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2862 frame->updatee = to;
2863 recordMutable((StgMutClosure *)to);
2866 /* will never be SE_{,CAF_}BLACKHOLE, since we
2867 don't push an update frame for single-entry thunks. KSW 1999-01. */
2868 barf("scavenge_stack: UPDATE_FRAME updatee");
2873 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2880 bitmap = info->layout.bitmap;
2882 /* this assumes that the payload starts immediately after the info-ptr */
2884 while (bitmap != 0) {
2885 if ((bitmap & 1) == 0) {
2886 (StgClosure *)*p = evacuate((StgClosure *)*p);
2889 bitmap = bitmap >> 1;
2896 /* large bitmap (> 32 entries) */
2901 StgLargeBitmap *large_bitmap;
2904 large_bitmap = info->layout.large_bitmap;
2907 for (i=0; i<large_bitmap->size; i++) {
2908 bitmap = large_bitmap->bitmap[i];
2909 q = p + sizeof(W_) * 8;
2910 while (bitmap != 0) {
2911 if ((bitmap & 1) == 0) {
2912 (StgClosure *)*p = evacuate((StgClosure *)*p);
2915 bitmap = bitmap >> 1;
2917 if (i+1 < large_bitmap->size) {
2919 (StgClosure *)*p = evacuate((StgClosure *)*p);
2925 /* and don't forget to follow the SRT */
2930 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2935 /*-----------------------------------------------------------------------------
2936 scavenge the large object list.
2938 evac_gen set by caller; similar games played with evac_gen as with
2939 scavenge() - see comment at the top of scavenge(). Most large
2940 objects are (repeatedly) mutable, so most of the time evac_gen will
2942 --------------------------------------------------------------------------- */
2943 //@cindex scavenge_large
2946 scavenge_large(step *step)
2950 const StgInfoTable* info;
2951 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2953 evac_gen = 0; /* most objects are mutable */
2954 bd = step->new_large_objects;
2956 for (; bd != NULL; bd = step->new_large_objects) {
2958 /* take this object *off* the large objects list and put it on
2959 * the scavenged large objects list. This is so that we can
2960 * treat new_large_objects as a stack and push new objects on
2961 * the front when evacuating.
2963 step->new_large_objects = bd->link;
2964 dbl_link_onto(bd, &step->scavenged_large_objects);
2967 info = get_itbl((StgClosure *)p);
2969 switch (info->type) {
2971 /* only certain objects can be "large"... */
2974 /* nothing to follow */
2978 /* follow everything */
2982 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2983 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2984 (StgClosure *)*p = evacuate((StgClosure *)*p);
2989 case MUT_ARR_PTRS_FROZEN:
2990 /* follow everything */
2992 StgPtr start = p, next;
2994 evac_gen = saved_evac_gen; /* not really mutable */
2995 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2996 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2997 (StgClosure *)*p = evacuate((StgClosure *)*p);
3000 if (failed_to_evac) {
3001 recordMutable((StgMutClosure *)start);
3008 StgBCO* bco = (StgBCO *)p;
3010 evac_gen = saved_evac_gen;
3011 for (i = 0; i < bco->n_ptrs; i++) {
3012 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3019 scavengeTSO((StgTSO *)p);
3025 StgPAP* pap = (StgPAP *)p;
3027 evac_gen = saved_evac_gen; /* not really mutable */
3028 pap->fun = evacuate(pap->fun);
3029 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3035 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3040 //@cindex zero_static_object_list
3043 zero_static_object_list(StgClosure* first_static)
3047 const StgInfoTable *info;
3049 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3051 link = STATIC_LINK(info, p);
3052 STATIC_LINK(info,p) = NULL;
3056 /* This function is only needed because we share the mutable link
3057 * field with the static link field in an IND_STATIC, so we have to
3058 * zero the mut_link field before doing a major GC, which needs the
3059 * static link field.
3061 * It doesn't do any harm to zero all the mutable link fields on the
3064 //@cindex zero_mutable_list
3067 zero_mutable_list( StgMutClosure *first )
3069 StgMutClosure *next, *c;
3071 for (c = first; c != END_MUT_LIST; c = next) {
3077 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3078 //@subsection Reverting CAFs
3080 /* -----------------------------------------------------------------------------
3082 -------------------------------------------------------------------------- */
3083 //@cindex RevertCAFs
3085 void RevertCAFs(void)
3090 /* Deal with CAFs created by compiled code. */
3091 for (i = 0; i < usedECafTable; i++) {
3092 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3093 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3096 /* Deal with CAFs created by the interpreter. */
3097 while (ecafList != END_ECAF_LIST) {
3098 StgCAF* caf = ecafList;
3099 ecafList = caf->link;
3100 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3101 SET_INFO(caf,&CAF_UNENTERED_info);
3102 caf->value = (StgClosure *)0xdeadbeef;
3103 caf->link = (StgCAF *)0xdeadbeef;
3106 /* Empty out both the table and the list. */
3108 ecafList = END_ECAF_LIST;
3112 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3113 //@subsection Sanity code for CAF garbage collection
3115 /* -----------------------------------------------------------------------------
3116 Sanity code for CAF garbage collection.
3118 With DEBUG turned on, we manage a CAF list in addition to the SRT
3119 mechanism. After GC, we run down the CAF list and blackhole any
3120 CAFs which have been garbage collected. This means we get an error
3121 whenever the program tries to enter a garbage collected CAF.
3123 Any garbage collected CAFs are taken off the CAF list at the same
3125 -------------------------------------------------------------------------- */
3135 const StgInfoTable *info;
3146 ASSERT(info->type == IND_STATIC);
3148 if (STATIC_LINK(info,p) == NULL) {
3149 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3151 SET_INFO(p,&BLACKHOLE_info);
3152 p = STATIC_LINK2(info,p);
3156 pp = &STATIC_LINK2(info,p);
3163 /* fprintf(stderr, "%d CAFs live\n", i); */
3167 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3168 //@subsection Lazy black holing
3170 /* -----------------------------------------------------------------------------
3173 Whenever a thread returns to the scheduler after possibly doing
3174 some work, we have to run down the stack and black-hole all the
3175 closures referred to by update frames.
3176 -------------------------------------------------------------------------- */
3177 //@cindex threadLazyBlackHole
3180 threadLazyBlackHole(StgTSO *tso)
3182 StgUpdateFrame *update_frame;
3183 StgBlockingQueue *bh;
3186 stack_end = &tso->stack[tso->stack_size];
3187 update_frame = tso->su;
3190 switch (get_itbl(update_frame)->type) {
3193 update_frame = ((StgCatchFrame *)update_frame)->link;
3197 bh = (StgBlockingQueue *)update_frame->updatee;
3199 /* if the thunk is already blackholed, it means we've also
3200 * already blackholed the rest of the thunks on this stack,
3201 * so we can stop early.
3203 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3204 * don't interfere with this optimisation.
3206 if (bh->header.info == &BLACKHOLE_info) {
3210 if (bh->header.info != &BLACKHOLE_BQ_info &&
3211 bh->header.info != &CAF_BLACKHOLE_info) {
3212 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3213 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3215 SET_INFO(bh,&BLACKHOLE_info);
3218 update_frame = update_frame->link;
3222 update_frame = ((StgSeqFrame *)update_frame)->link;
3228 barf("threadPaused");
3233 //@node Stack squeezing, Pausing a thread, Lazy black holing
3234 //@subsection Stack squeezing
3236 /* -----------------------------------------------------------------------------
3239 * Code largely pinched from old RTS, then hacked to bits. We also do
3240 * lazy black holing here.
3242 * -------------------------------------------------------------------------- */
3243 //@cindex threadSqueezeStack
3246 threadSqueezeStack(StgTSO *tso)
3248 lnat displacement = 0;
3249 StgUpdateFrame *frame;
3250 StgUpdateFrame *next_frame; /* Temporally next */
3251 StgUpdateFrame *prev_frame; /* Temporally previous */
3253 rtsBool prev_was_update_frame;
3255 StgUpdateFrame *top_frame;
3256 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3258 void printObj( StgClosure *obj ); // from Printer.c
3260 top_frame = tso->su;
3263 bottom = &(tso->stack[tso->stack_size]);
3266 /* There must be at least one frame, namely the STOP_FRAME.
3268 ASSERT((P_)frame < bottom);
3270 /* Walk down the stack, reversing the links between frames so that
3271 * we can walk back up as we squeeze from the bottom. Note that
3272 * next_frame and prev_frame refer to next and previous as they were
3273 * added to the stack, rather than the way we see them in this
3274 * walk. (It makes the next loop less confusing.)
3276 * Stop if we find an update frame pointing to a black hole
3277 * (see comment in threadLazyBlackHole()).
3281 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3282 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3283 prev_frame = frame->link;
3284 frame->link = next_frame;
3289 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3290 printObj((StgClosure *)prev_frame);
3291 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3294 switch (get_itbl(frame)->type) {
3295 case UPDATE_FRAME: upd_frames++;
3296 if (frame->updatee->header.info == &BLACKHOLE_info)
3299 case STOP_FRAME: stop_frames++;
3301 case CATCH_FRAME: catch_frames++;
3303 case SEQ_FRAME: seq_frames++;
3306 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3308 printObj((StgClosure *)prev_frame);
3311 if (get_itbl(frame)->type == UPDATE_FRAME
3312 && frame->updatee->header.info == &BLACKHOLE_info) {
3317 /* Now, we're at the bottom. Frame points to the lowest update
3318 * frame on the stack, and its link actually points to the frame
3319 * above. We have to walk back up the stack, squeezing out empty
3320 * update frames and turning the pointers back around on the way
3323 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3324 * we never want to eliminate it anyway. Just walk one step up
3325 * before starting to squeeze. When you get to the topmost frame,
3326 * remember that there are still some words above it that might have
3333 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3336 * Loop through all of the frames (everything except the very
3337 * bottom). Things are complicated by the fact that we have
3338 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3339 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3341 while (frame != NULL) {
3343 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3344 rtsBool is_update_frame;
3346 next_frame = frame->link;
3347 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3350 * 1. both the previous and current frame are update frames
3351 * 2. the current frame is empty
3353 if (prev_was_update_frame && is_update_frame &&
3354 (P_)prev_frame == frame_bottom + displacement) {
3356 /* Now squeeze out the current frame */
3357 StgClosure *updatee_keep = prev_frame->updatee;
3358 StgClosure *updatee_bypass = frame->updatee;
3361 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3365 /* Deal with blocking queues. If both updatees have blocked
3366 * threads, then we should merge the queues into the update
3367 * frame that we're keeping.
3369 * Alternatively, we could just wake them up: they'll just go
3370 * straight to sleep on the proper blackhole! This is less code
3371 * and probably less bug prone, although it's probably much
3374 #if 0 /* do it properly... */
3375 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3376 # error Unimplemented lazy BH warning. (KSW 1999-01)
3378 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3379 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3381 /* Sigh. It has one. Don't lose those threads! */
3382 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3383 /* Urgh. Two queues. Merge them. */
3384 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3386 while (keep_tso->link != END_TSO_QUEUE) {
3387 keep_tso = keep_tso->link;
3389 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3392 /* For simplicity, just swap the BQ for the BH */
3393 P_ temp = updatee_keep;
3395 updatee_keep = updatee_bypass;
3396 updatee_bypass = temp;
3398 /* Record the swap in the kept frame (below) */
3399 prev_frame->updatee = updatee_keep;
3404 TICK_UPD_SQUEEZED();
3405 /* wasn't there something about update squeezing and ticky to be
3406 * sorted out? oh yes: we aren't counting each enter properly
3407 * in this case. See the log somewhere. KSW 1999-04-21
3409 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3411 sp = (P_)frame - 1; /* sp = stuff to slide */
3412 displacement += sizeofW(StgUpdateFrame);
3415 /* No squeeze for this frame */
3416 sp = frame_bottom - 1; /* Keep the current frame */
3418 /* Do lazy black-holing.
3420 if (is_update_frame) {
3421 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3422 if (bh->header.info != &BLACKHOLE_info &&
3423 bh->header.info != &BLACKHOLE_BQ_info &&
3424 bh->header.info != &CAF_BLACKHOLE_info) {
3425 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3426 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3428 SET_INFO(bh,&BLACKHOLE_info);
3432 /* Fix the link in the current frame (should point to the frame below) */
3433 frame->link = prev_frame;
3434 prev_was_update_frame = is_update_frame;
3437 /* Now slide all words from sp up to the next frame */
3439 if (displacement > 0) {
3440 P_ next_frame_bottom;
3442 if (next_frame != NULL)
3443 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3445 next_frame_bottom = tso->sp - 1;
3449 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3453 while (sp >= next_frame_bottom) {
3454 sp[displacement] = *sp;
3458 (P_)prev_frame = (P_)frame + displacement;
3462 tso->sp += displacement;
3463 tso->su = prev_frame;
3466 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3467 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3471 //@node Pausing a thread, Index, Stack squeezing
3472 //@subsection Pausing a thread
3474 /* -----------------------------------------------------------------------------
3477 * We have to prepare for GC - this means doing lazy black holing
3478 * here. We also take the opportunity to do stack squeezing if it's
3480 * -------------------------------------------------------------------------- */
3481 //@cindex threadPaused
3483 threadPaused(StgTSO *tso)
3485 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3486 threadSqueezeStack(tso); /* does black holing too */
3488 threadLazyBlackHole(tso);
3491 /* -----------------------------------------------------------------------------
3493 * -------------------------------------------------------------------------- */
3496 //@cindex printMutOnceList
3498 printMutOnceList(generation *gen)
3500 StgMutClosure *p, *next;
3502 p = gen->mut_once_list;
3505 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3506 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3507 fprintf(stderr, "%p (%s), ",
3508 p, info_type((StgClosure *)p));
3510 fputc('\n', stderr);
3513 //@cindex printMutableList
3515 printMutableList(generation *gen)
3517 StgMutClosure *p, *next;
3522 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3523 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3524 fprintf(stderr, "%p (%s), ",
3525 p, info_type((StgClosure *)p));
3527 fputc('\n', stderr);
3530 //@cindex maybeLarge
3531 static inline rtsBool
3532 maybeLarge(StgClosure *closure)
3534 StgInfoTable *info = get_itbl(closure);
3536 /* closure types that may be found on the new_large_objects list;
3537 see scavenge_large */
3538 return (info->type == MUT_ARR_PTRS ||
3539 info->type == MUT_ARR_PTRS_FROZEN ||
3540 info->type == TSO ||
3541 info->type == ARR_WORDS ||
3548 //@node Index, , Pausing a thread
3552 //* GarbageCollect:: @cindex\s-+GarbageCollect
3553 //* MarkRoot:: @cindex\s-+MarkRoot
3554 //* RevertCAFs:: @cindex\s-+RevertCAFs
3555 //* addBlock:: @cindex\s-+addBlock
3556 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3557 //* copy:: @cindex\s-+copy
3558 //* copyPart:: @cindex\s-+copyPart
3559 //* evacuate:: @cindex\s-+evacuate
3560 //* evacuate_large:: @cindex\s-+evacuate_large
3561 //* gcCAFs:: @cindex\s-+gcCAFs
3562 //* isAlive:: @cindex\s-+isAlive
3563 //* maybeLarge:: @cindex\s-+maybeLarge
3564 //* mkMutCons:: @cindex\s-+mkMutCons
3565 //* printMutOnceList:: @cindex\s-+printMutOnceList
3566 //* printMutableList:: @cindex\s-+printMutableList
3567 //* relocate_TSO:: @cindex\s-+relocate_TSO
3568 //* scavenge:: @cindex\s-+scavenge
3569 //* scavenge_large:: @cindex\s-+scavenge_large
3570 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3571 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3572 //* scavenge_one:: @cindex\s-+scavenge_one
3573 //* scavenge_srt:: @cindex\s-+scavenge_srt
3574 //* scavenge_stack:: @cindex\s-+scavenge_stack
3575 //* scavenge_static:: @cindex\s-+scavenge_static
3576 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3577 //* threadPaused:: @cindex\s-+threadPaused
3578 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3579 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3580 //* upd_evacuee:: @cindex\s-+upd_evacuee
3581 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3582 //* zero_static_object_list:: @cindex\s-+zero_static_object_list