1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.83 2000/05/26 08:42:59 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
56 //@node STATIC OBJECT LIST, Static function declarations, Includes
57 //@subsection STATIC OBJECT LIST
59 /* STATIC OBJECT LIST.
62 * We maintain a linked list of static objects that are still live.
63 * The requirements for this list are:
65 * - we need to scan the list while adding to it, in order to
66 * scavenge all the static objects (in the same way that
67 * breadth-first scavenging works for dynamic objects).
69 * - we need to be able to tell whether an object is already on
70 * the list, to break loops.
72 * Each static object has a "static link field", which we use for
73 * linking objects on to the list. We use a stack-type list, consing
74 * objects on the front as they are added (this means that the
75 * scavenge phase is depth-first, not breadth-first, but that
78 * A separate list is kept for objects that have been scavenged
79 * already - this is so that we can zero all the marks afterwards.
81 * An object is on the list if its static link field is non-zero; this
82 * means that we have to mark the end of the list with '1', not NULL.
84 * Extra notes for generational GC:
86 * Each generation has a static object list associated with it. When
87 * collecting generations up to N, we treat the static object lists
88 * from generations > N as roots.
90 * We build up a static object list while collecting generations 0..N,
91 * which is then appended to the static object list of generation N+1.
93 StgClosure* static_objects; /* live static objects */
94 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
96 /* N is the oldest generation being collected, where the generations
97 * are numbered starting at 0. A major GC (indicated by the major_gc
98 * flag) is when we're collecting all generations. We only attempt to
99 * deal with static objects and GC CAFs when doing a major GC.
102 static rtsBool major_gc;
104 /* Youngest generation that objects should be evacuated to in
105 * evacuate(). (Logically an argument to evacuate, but it's static
106 * a lot of the time so we optimise it into a global variable).
112 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
113 static rtsBool weak_done; /* all done for this pass */
115 /* List of all threads during GC
117 static StgTSO *old_all_threads;
118 static StgTSO *resurrected_threads;
120 /* Flag indicating failure to evacuate an object to the desired
123 static rtsBool failed_to_evac;
125 /* Old to-space (used for two-space collector only)
127 bdescr *old_to_space;
130 /* Data used for allocation area sizing.
132 lnat new_blocks; /* blocks allocated during this GC */
133 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
135 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
136 //@subsection Static function declarations
138 /* -----------------------------------------------------------------------------
139 Static function declarations
140 -------------------------------------------------------------------------- */
142 static StgClosure * evacuate ( StgClosure *q );
143 static void zero_static_object_list ( StgClosure* first_static );
144 static void zero_mutable_list ( StgMutClosure *first );
146 static rtsBool traverse_weak_ptr_list ( void );
147 static void cleanup_weak_ptr_list ( StgWeak **list );
149 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
150 static void scavenge_large ( step *step );
151 static void scavenge ( step *step );
152 static void scavenge_static ( void );
153 static void scavenge_mutable_list ( generation *g );
154 static void scavenge_mut_once_list ( generation *g );
157 static void gcCAFs ( void );
160 //@node Garbage Collect, Weak Pointers, Static function declarations
161 //@subsection Garbage Collect
163 /* -----------------------------------------------------------------------------
166 For garbage collecting generation N (and all younger generations):
168 - follow all pointers in the root set. the root set includes all
169 mutable objects in all steps in all generations.
171 - for each pointer, evacuate the object it points to into either
172 + to-space in the next higher step in that generation, if one exists,
173 + if the object's generation == N, then evacuate it to the next
174 generation if one exists, or else to-space in the current
176 + if the object's generation < N, then evacuate it to to-space
177 in the next generation.
179 - repeatedly scavenge to-space from each step in each generation
180 being collected until no more objects can be evacuated.
182 - free from-space in each step, and set from-space = to-space.
184 -------------------------------------------------------------------------- */
185 //@cindex GarbageCollect
187 void GarbageCollect ( void (*get_roots)(void), rtsBool force_major_gc )
191 lnat live, allocated, collected = 0, copied = 0;
195 CostCentreStack *prev_CCS;
198 #if defined(DEBUG) && defined(GRAN)
199 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
203 /* tell the stats department that we've started a GC */
206 /* attribute any costs to CCS_GC */
212 /* Approximate how much we allocated */
213 allocated = calcAllocated();
215 /* Figure out which generation to collect
217 if (force_major_gc) {
218 N = RtsFlags.GcFlags.generations - 1;
222 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
223 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
227 major_gc = (N == RtsFlags.GcFlags.generations-1);
230 /* check stack sanity *before* GC (ToDo: check all threads) */
232 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
234 IF_DEBUG(sanity, checkFreeListSanity());
236 /* Initialise the static object lists
238 static_objects = END_OF_STATIC_LIST;
239 scavenged_static_objects = END_OF_STATIC_LIST;
241 /* zero the mutable list for the oldest generation (see comment by
242 * zero_mutable_list below).
245 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
248 /* Save the old to-space if we're doing a two-space collection
250 if (RtsFlags.GcFlags.generations == 1) {
251 old_to_space = g0s0->to_space;
252 g0s0->to_space = NULL;
255 /* Keep a count of how many new blocks we allocated during this GC
256 * (used for resizing the allocation area, later).
260 /* Initialise to-space in all the generations/steps that we're
263 for (g = 0; g <= N; g++) {
264 generations[g].mut_once_list = END_MUT_LIST;
265 generations[g].mut_list = END_MUT_LIST;
267 for (s = 0; s < generations[g].n_steps; s++) {
269 /* generation 0, step 0 doesn't need to-space */
270 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
274 /* Get a free block for to-space. Extra blocks will be chained on
278 step = &generations[g].steps[s];
279 ASSERT(step->gen->no == g);
280 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
281 bd->gen = &generations[g];
284 bd->evacuated = 1; /* it's a to-space block */
285 step->hp = bd->start;
286 step->hpLim = step->hp + BLOCK_SIZE_W;
290 step->scan = bd->start;
292 step->new_large_objects = NULL;
293 step->scavenged_large_objects = NULL;
295 /* mark the large objects as not evacuated yet */
296 for (bd = step->large_objects; bd; bd = bd->link) {
302 /* make sure the older generations have at least one block to
303 * allocate into (this makes things easier for copy(), see below.
305 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
306 for (s = 0; s < generations[g].n_steps; s++) {
307 step = &generations[g].steps[s];
308 if (step->hp_bd == NULL) {
310 bd->gen = &generations[g];
313 bd->evacuated = 0; /* *not* a to-space block */
314 step->hp = bd->start;
315 step->hpLim = step->hp + BLOCK_SIZE_W;
321 /* Set the scan pointer for older generations: remember we
322 * still have to scavenge objects that have been promoted. */
323 step->scan = step->hp;
324 step->scan_bd = step->hp_bd;
325 step->to_space = NULL;
327 step->new_large_objects = NULL;
328 step->scavenged_large_objects = NULL;
332 /* -----------------------------------------------------------------------
333 * follow all the roots that we know about:
334 * - mutable lists from each generation > N
335 * we want to *scavenge* these roots, not evacuate them: they're not
336 * going to move in this GC.
337 * Also: do them in reverse generation order. This is because we
338 * often want to promote objects that are pointed to by older
339 * generations early, so we don't have to repeatedly copy them.
340 * Doing the generations in reverse order ensures that we don't end
341 * up in the situation where we want to evac an object to gen 3 and
342 * it has already been evaced to gen 2.
346 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
347 generations[g].saved_mut_list = generations[g].mut_list;
348 generations[g].mut_list = END_MUT_LIST;
351 /* Do the mut-once lists first */
352 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
353 IF_PAR_DEBUG(verbose,
354 printMutOnceList(&generations[g]));
355 scavenge_mut_once_list(&generations[g]);
357 for (st = generations[g].n_steps-1; st >= 0; st--) {
358 scavenge(&generations[g].steps[st]);
362 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
363 IF_PAR_DEBUG(verbose,
364 printMutableList(&generations[g]));
365 scavenge_mutable_list(&generations[g]);
367 for (st = generations[g].n_steps-1; st >= 0; st--) {
368 scavenge(&generations[g].steps[st]);
373 /* follow all the roots that the application knows about.
379 /* And don't forget to mark the TSO if we got here direct from
381 /* Not needed in a seq version?
383 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
387 /* Mark the entries in the GALA table of the parallel system */
388 markLocalGAs(major_gc);
391 /* Mark the weak pointer list, and prepare to detect dead weak
394 old_weak_ptr_list = weak_ptr_list;
395 weak_ptr_list = NULL;
396 weak_done = rtsFalse;
398 /* The all_threads list is like the weak_ptr_list.
399 * See traverse_weak_ptr_list() for the details.
401 old_all_threads = all_threads;
402 all_threads = END_TSO_QUEUE;
403 resurrected_threads = END_TSO_QUEUE;
405 /* Mark the stable pointer table.
407 markStablePtrTable(major_gc);
411 /* ToDo: To fix the caf leak, we need to make the commented out
412 * parts of this code do something sensible - as described in
415 extern void markHugsObjects(void);
420 /* -------------------------------------------------------------------------
421 * Repeatedly scavenge all the areas we know about until there's no
422 * more scavenging to be done.
429 /* scavenge static objects */
430 if (major_gc && static_objects != END_OF_STATIC_LIST) {
432 checkStaticObjects());
436 /* When scavenging the older generations: Objects may have been
437 * evacuated from generations <= N into older generations, and we
438 * need to scavenge these objects. We're going to try to ensure that
439 * any evacuations that occur move the objects into at least the
440 * same generation as the object being scavenged, otherwise we
441 * have to create new entries on the mutable list for the older
445 /* scavenge each step in generations 0..maxgen */
449 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
450 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
451 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
454 step = &generations[gen].steps[st];
456 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
461 if (step->new_large_objects != NULL) {
462 scavenge_large(step);
469 if (flag) { goto loop; }
471 /* must be last... */
472 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
477 /* Final traversal of the weak pointer list (see comment by
478 * cleanUpWeakPtrList below).
480 cleanup_weak_ptr_list(&weak_ptr_list);
482 /* Now see which stable names are still alive.
484 gcStablePtrTable(major_gc);
487 /* Reconstruct the Global Address tables used in GUM */
488 rebuildGAtables(major_gc);
489 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
490 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
493 /* Set the maximum blocks for the oldest generation, based on twice
494 * the amount of live data now, adjusted to fit the maximum heap
497 * This is an approximation, since in the worst case we'll need
498 * twice the amount of live data plus whatever space the other
501 if (RtsFlags.GcFlags.generations > 1) {
503 oldest_gen->max_blocks =
504 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
505 RtsFlags.GcFlags.minOldGenSize);
506 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
507 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
508 if (((int)oldest_gen->max_blocks -
509 (int)oldest_gen->steps[0].to_blocks) <
510 (RtsFlags.GcFlags.pcFreeHeap *
511 RtsFlags.GcFlags.maxHeapSize / 200)) {
518 /* run through all the generations/steps and tidy up
520 copied = new_blocks * BLOCK_SIZE_W;
521 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
524 generations[g].collections++; /* for stats */
527 for (s = 0; s < generations[g].n_steps; s++) {
529 step = &generations[g].steps[s];
531 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
532 /* Tidy the end of the to-space chains */
533 step->hp_bd->free = step->hp;
534 step->hp_bd->link = NULL;
535 /* stats information: how much we copied */
537 copied -= step->hp_bd->start + BLOCK_SIZE_W -
542 /* for generations we collected... */
545 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
547 /* free old memory and shift to-space into from-space for all
548 * the collected steps (except the allocation area). These
549 * freed blocks will probaby be quickly recycled.
551 if (!(g == 0 && s == 0)) {
552 freeChain(step->blocks);
553 step->blocks = step->to_space;
554 step->n_blocks = step->to_blocks;
555 step->to_space = NULL;
557 for (bd = step->blocks; bd != NULL; bd = bd->link) {
558 bd->evacuated = 0; /* now from-space */
562 /* LARGE OBJECTS. The current live large objects are chained on
563 * scavenged_large, having been moved during garbage
564 * collection from large_objects. Any objects left on
565 * large_objects list are therefore dead, so we free them here.
567 for (bd = step->large_objects; bd != NULL; bd = next) {
572 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
575 step->large_objects = step->scavenged_large_objects;
577 /* Set the maximum blocks for this generation, interpolating
578 * between the maximum size of the oldest and youngest
581 * max_blocks = oldgen_max_blocks * G
582 * ----------------------
587 generations[g].max_blocks = (oldest_gen->max_blocks * g)
588 / (RtsFlags.GcFlags.generations-1);
590 generations[g].max_blocks = oldest_gen->max_blocks;
593 /* for older generations... */
596 /* For older generations, we need to append the
597 * scavenged_large_object list (i.e. large objects that have been
598 * promoted during this GC) to the large_object list for that step.
600 for (bd = step->scavenged_large_objects; bd; bd = next) {
603 dbl_link_onto(bd, &step->large_objects);
606 /* add the new blocks we promoted during this GC */
607 step->n_blocks += step->to_blocks;
612 /* Guess the amount of live data for stats. */
615 /* Free the small objects allocated via allocate(), since this will
616 * all have been copied into G0S1 now.
618 if (small_alloc_list != NULL) {
619 freeChain(small_alloc_list);
621 small_alloc_list = NULL;
625 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
627 /* Two-space collector:
628 * Free the old to-space, and estimate the amount of live data.
630 if (RtsFlags.GcFlags.generations == 1) {
633 if (old_to_space != NULL) {
634 freeChain(old_to_space);
636 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
637 bd->evacuated = 0; /* now from-space */
640 /* For a two-space collector, we need to resize the nursery. */
642 /* set up a new nursery. Allocate a nursery size based on a
643 * function of the amount of live data (currently a factor of 2,
644 * should be configurable (ToDo)). Use the blocks from the old
645 * nursery if possible, freeing up any left over blocks.
647 * If we get near the maximum heap size, then adjust our nursery
648 * size accordingly. If the nursery is the same size as the live
649 * data (L), then we need 3L bytes. We can reduce the size of the
650 * nursery to bring the required memory down near 2L bytes.
652 * A normal 2-space collector would need 4L bytes to give the same
653 * performance we get from 3L bytes, reducing to the same
654 * performance at 2L bytes.
656 blocks = g0s0->to_blocks;
658 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
659 RtsFlags.GcFlags.maxHeapSize ) {
660 int adjusted_blocks; /* signed on purpose */
663 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
664 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
665 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
666 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
669 blocks = adjusted_blocks;
672 blocks *= RtsFlags.GcFlags.oldGenFactor;
673 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
674 blocks = RtsFlags.GcFlags.minAllocAreaSize;
677 resizeNursery(blocks);
680 /* Generational collector:
681 * If the user has given us a suggested heap size, adjust our
682 * allocation area to make best use of the memory available.
685 if (RtsFlags.GcFlags.heapSizeSuggestion) {
687 nat needed = calcNeeded(); /* approx blocks needed at next GC */
689 /* Guess how much will be live in generation 0 step 0 next time.
690 * A good approximation is the obtained by finding the
691 * percentage of g0s0 that was live at the last minor GC.
694 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
697 /* Estimate a size for the allocation area based on the
698 * information available. We might end up going slightly under
699 * or over the suggested heap size, but we should be pretty
702 * Formula: suggested - needed
703 * ----------------------------
704 * 1 + g0s0_pcnt_kept/100
706 * where 'needed' is the amount of memory needed at the next
707 * collection for collecting all steps except g0s0.
710 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
711 (100 + (int)g0s0_pcnt_kept);
713 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
714 blocks = RtsFlags.GcFlags.minAllocAreaSize;
717 resizeNursery((nat)blocks);
721 /* mark the garbage collected CAFs as dead */
723 if (major_gc) { gcCAFs(); }
726 /* zero the scavenged static object list */
728 zero_static_object_list(scavenged_static_objects);
735 /* start any pending finalizers */
736 scheduleFinalizers(old_weak_ptr_list);
738 /* send exceptions to any threads which were about to die */
739 resurrectThreads(resurrected_threads);
741 /* check sanity after GC */
742 IF_DEBUG(sanity, checkSanity(N));
744 /* extra GC trace info */
745 IF_DEBUG(gc, stat_describe_gens());
748 /* symbol-table based profiling */
749 /* heapCensus(to_space); */ /* ToDo */
752 /* restore enclosing cost centre */
758 /* check for memory leaks if sanity checking is on */
759 IF_DEBUG(sanity, memInventory());
761 /* ok, GC over: tell the stats department what happened. */
762 stat_endGC(allocated, collected, live, copied, N);
765 //@node Weak Pointers, Evacuation, Garbage Collect
766 //@subsection Weak Pointers
768 /* -----------------------------------------------------------------------------
771 traverse_weak_ptr_list is called possibly many times during garbage
772 collection. It returns a flag indicating whether it did any work
773 (i.e. called evacuate on any live pointers).
775 Invariant: traverse_weak_ptr_list is called when the heap is in an
776 idempotent state. That means that there are no pending
777 evacuate/scavenge operations. This invariant helps the weak
778 pointer code decide which weak pointers are dead - if there are no
779 new live weak pointers, then all the currently unreachable ones are
782 For generational GC: we just don't try to finalize weak pointers in
783 older generations than the one we're collecting. This could
784 probably be optimised by keeping per-generation lists of weak
785 pointers, but for a few weak pointers this scheme will work.
786 -------------------------------------------------------------------------- */
787 //@cindex traverse_weak_ptr_list
790 traverse_weak_ptr_list(void)
792 StgWeak *w, **last_w, *next_w;
794 rtsBool flag = rtsFalse;
796 if (weak_done) { return rtsFalse; }
798 /* doesn't matter where we evacuate values/finalizers to, since
799 * these pointers are treated as roots (iff the keys are alive).
803 last_w = &old_weak_ptr_list;
804 for (w = old_weak_ptr_list; w; w = next_w) {
806 /* First, this weak pointer might have been evacuated. If so,
807 * remove the forwarding pointer from the weak_ptr_list.
809 if (get_itbl(w)->type == EVACUATED) {
810 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
814 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
815 * called on a live weak pointer object. Just remove it.
817 if (w->header.info == &DEAD_WEAK_info) {
818 next_w = ((StgDeadWeak *)w)->link;
823 ASSERT(get_itbl(w)->type == WEAK);
825 /* Now, check whether the key is reachable.
827 if ((new = isAlive(w->key))) {
829 /* evacuate the value and finalizer */
830 w->value = evacuate(w->value);
831 w->finalizer = evacuate(w->finalizer);
832 /* remove this weak ptr from the old_weak_ptr list */
834 /* and put it on the new weak ptr list */
836 w->link = weak_ptr_list;
839 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
849 /* Now deal with the all_threads list, which behaves somewhat like
850 * the weak ptr list. If we discover any threads that are about to
851 * become garbage, we wake them up and administer an exception.
854 StgTSO *t, *tmp, *next, **prev;
856 prev = &old_all_threads;
857 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
859 /* Threads which have finished or died get dropped from
862 switch (t->what_next) {
863 case ThreadRelocated:
869 next = t->global_link;
875 /* Threads which have already been determined to be alive are
876 * moved onto the all_threads list.
878 (StgClosure *)tmp = isAlive((StgClosure *)t);
880 next = tmp->global_link;
881 tmp->global_link = all_threads;
885 prev = &(t->global_link);
886 next = t->global_link;
891 /* If we didn't make any changes, then we can go round and kill all
892 * the dead weak pointers. The old_weak_ptr list is used as a list
893 * of pending finalizers later on.
895 if (flag == rtsFalse) {
896 cleanup_weak_ptr_list(&old_weak_ptr_list);
897 for (w = old_weak_ptr_list; w; w = w->link) {
898 w->finalizer = evacuate(w->finalizer);
901 /* And resurrect any threads which were about to become garbage.
904 StgTSO *t, *tmp, *next;
905 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
906 next = t->global_link;
907 (StgClosure *)tmp = evacuate((StgClosure *)t);
908 tmp->global_link = resurrected_threads;
909 resurrected_threads = tmp;
919 /* -----------------------------------------------------------------------------
920 After GC, the live weak pointer list may have forwarding pointers
921 on it, because a weak pointer object was evacuated after being
922 moved to the live weak pointer list. We remove those forwarding
925 Also, we don't consider weak pointer objects to be reachable, but
926 we must nevertheless consider them to be "live" and retain them.
927 Therefore any weak pointer objects which haven't as yet been
928 evacuated need to be evacuated now.
929 -------------------------------------------------------------------------- */
931 //@cindex cleanup_weak_ptr_list
934 cleanup_weak_ptr_list ( StgWeak **list )
936 StgWeak *w, **last_w;
939 for (w = *list; w; w = w->link) {
941 if (get_itbl(w)->type == EVACUATED) {
942 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
946 if (Bdescr((P_)w)->evacuated == 0) {
947 (StgClosure *)w = evacuate((StgClosure *)w);
954 /* -----------------------------------------------------------------------------
955 isAlive determines whether the given closure is still alive (after
956 a garbage collection) or not. It returns the new address of the
957 closure if it is alive, or NULL otherwise.
958 -------------------------------------------------------------------------- */
963 isAlive(StgClosure *p)
965 const StgInfoTable *info;
972 /* ToDo: for static closures, check the static link field.
973 * Problem here is that we sometimes don't set the link field, eg.
974 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
977 /* ignore closures in generations that we're not collecting. */
978 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
982 switch (info->type) {
987 case IND_OLDGEN: /* rely on compatible layout with StgInd */
988 case IND_OLDGEN_PERM:
989 /* follow indirections */
990 p = ((StgInd *)p)->indirectee;
995 return ((StgEvacuated *)p)->evacuee;
998 size = bco_sizeW((StgBCO*)p);
1002 size = arr_words_sizeW((StgArrWords *)p);
1006 case MUT_ARR_PTRS_FROZEN:
1007 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1011 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1012 p = (StgClosure *)((StgTSO *)p)->link;
1016 size = tso_sizeW((StgTSO *)p);
1018 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1019 && Bdescr((P_)p)->evacuated)
1033 MarkRoot(StgClosure *root)
1035 # if 0 && defined(PAR) && defined(DEBUG)
1036 StgClosure *foo = evacuate(root);
1037 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1038 ASSERT(isAlive(foo)); // must be in to-space
1041 return evacuate(root);
1046 static void addBlock(step *step)
1048 bdescr *bd = allocBlock();
1049 bd->gen = step->gen;
1052 if (step->gen->no <= N) {
1058 step->hp_bd->free = step->hp;
1059 step->hp_bd->link = bd;
1060 step->hp = bd->start;
1061 step->hpLim = step->hp + BLOCK_SIZE_W;
1067 //@cindex upd_evacuee
1069 static __inline__ void
1070 upd_evacuee(StgClosure *p, StgClosure *dest)
1072 p->header.info = &EVACUATED_info;
1073 ((StgEvacuated *)p)->evacuee = dest;
1078 static __inline__ StgClosure *
1079 copy(StgClosure *src, nat size, step *step)
1083 TICK_GC_WORDS_COPIED(size);
1084 /* Find out where we're going, using the handy "to" pointer in
1085 * the step of the source object. If it turns out we need to
1086 * evacuate to an older generation, adjust it here (see comment
1089 if (step->gen->no < evac_gen) {
1090 #ifdef NO_EAGER_PROMOTION
1091 failed_to_evac = rtsTrue;
1093 step = &generations[evac_gen].steps[0];
1097 /* chain a new block onto the to-space for the destination step if
1100 if (step->hp + size >= step->hpLim) {
1104 for(to = step->hp, from = (P_)src; size>0; --size) {
1110 upd_evacuee(src,(StgClosure *)dest);
1111 return (StgClosure *)dest;
1114 /* Special version of copy() for when we only want to copy the info
1115 * pointer of an object, but reserve some padding after it. This is
1116 * used to optimise evacuation of BLACKHOLEs.
1121 static __inline__ StgClosure *
1122 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1126 TICK_GC_WORDS_COPIED(size_to_copy);
1127 if (step->gen->no < evac_gen) {
1128 #ifdef NO_EAGER_PROMOTION
1129 failed_to_evac = rtsTrue;
1131 step = &generations[evac_gen].steps[0];
1135 if (step->hp + size_to_reserve >= step->hpLim) {
1139 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1144 step->hp += size_to_reserve;
1145 upd_evacuee(src,(StgClosure *)dest);
1146 return (StgClosure *)dest;
1149 //@node Evacuation, Scavenging, Weak Pointers
1150 //@subsection Evacuation
1152 /* -----------------------------------------------------------------------------
1153 Evacuate a large object
1155 This just consists of removing the object from the (doubly-linked)
1156 large_alloc_list, and linking it on to the (singly-linked)
1157 new_large_objects list, from where it will be scavenged later.
1159 Convention: bd->evacuated is /= 0 for a large object that has been
1160 evacuated, or 0 otherwise.
1161 -------------------------------------------------------------------------- */
1163 //@cindex evacuate_large
1166 evacuate_large(StgPtr p, rtsBool mutable)
1168 bdescr *bd = Bdescr(p);
1171 /* should point to the beginning of the block */
1172 ASSERT(((W_)p & BLOCK_MASK) == 0);
1174 /* already evacuated? */
1175 if (bd->evacuated) {
1176 /* Don't forget to set the failed_to_evac flag if we didn't get
1177 * the desired destination (see comments in evacuate()).
1179 if (bd->gen->no < evac_gen) {
1180 failed_to_evac = rtsTrue;
1181 TICK_GC_FAILED_PROMOTION();
1187 /* remove from large_object list */
1189 bd->back->link = bd->link;
1190 } else { /* first object in the list */
1191 step->large_objects = bd->link;
1194 bd->link->back = bd->back;
1197 /* link it on to the evacuated large object list of the destination step
1199 step = bd->step->to;
1200 if (step->gen->no < evac_gen) {
1201 #ifdef NO_EAGER_PROMOTION
1202 failed_to_evac = rtsTrue;
1204 step = &generations[evac_gen].steps[0];
1209 bd->gen = step->gen;
1210 bd->link = step->new_large_objects;
1211 step->new_large_objects = bd;
1215 recordMutable((StgMutClosure *)p);
1219 /* -----------------------------------------------------------------------------
1220 Adding a MUT_CONS to an older generation.
1222 This is necessary from time to time when we end up with an
1223 old-to-new generation pointer in a non-mutable object. We defer
1224 the promotion until the next GC.
1225 -------------------------------------------------------------------------- */
1230 mkMutCons(StgClosure *ptr, generation *gen)
1235 step = &gen->steps[0];
1237 /* chain a new block onto the to-space for the destination step if
1240 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1244 q = (StgMutVar *)step->hp;
1245 step->hp += sizeofW(StgMutVar);
1247 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1249 recordOldToNewPtrs((StgMutClosure *)q);
1251 return (StgClosure *)q;
1254 /* -----------------------------------------------------------------------------
1257 This is called (eventually) for every live object in the system.
1259 The caller to evacuate specifies a desired generation in the
1260 evac_gen global variable. The following conditions apply to
1261 evacuating an object which resides in generation M when we're
1262 collecting up to generation N
1266 else evac to step->to
1268 if M < evac_gen evac to evac_gen, step 0
1270 if the object is already evacuated, then we check which generation
1273 if M >= evac_gen do nothing
1274 if M < evac_gen set failed_to_evac flag to indicate that we
1275 didn't manage to evacuate this object into evac_gen.
1277 -------------------------------------------------------------------------- */
1281 evacuate(StgClosure *q)
1286 const StgInfoTable *info;
1289 if (HEAP_ALLOCED(q)) {
1291 if (bd->gen->no > N) {
1292 /* Can't evacuate this object, because it's in a generation
1293 * older than the ones we're collecting. Let's hope that it's
1294 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1296 if (bd->gen->no < evac_gen) {
1298 failed_to_evac = rtsTrue;
1299 TICK_GC_FAILED_PROMOTION();
1303 step = bd->step->to;
1306 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1309 /* make sure the info pointer is into text space */
1310 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1311 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1314 if (info->type==RBH) {
1315 info = REVERT_INFOPTR(info);
1317 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1318 q, info_type(q), info, info_type_by_ip(info)));
1322 switch (info -> type) {
1326 nat size = bco_sizeW((StgBCO*)q);
1328 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1329 evacuate_large((P_)q, rtsFalse);
1332 /* just copy the block */
1333 to = copy(q,size,step);
1339 ASSERT(q->header.info != &MUT_CONS_info);
1341 to = copy(q,sizeW_fromITBL(info),step);
1342 recordMutable((StgMutClosure *)to);
1349 return copy(q,sizeofW(StgHeader)+1,step);
1351 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1356 #ifdef NO_PROMOTE_THUNKS
1357 if (bd->gen->no == 0 &&
1358 bd->step->no != 0 &&
1359 bd->step->no == bd->gen->n_steps-1) {
1363 return copy(q,sizeofW(StgHeader)+2,step);
1371 return copy(q,sizeofW(StgHeader)+2,step);
1377 case IND_OLDGEN_PERM:
1383 return copy(q,sizeW_fromITBL(info),step);
1386 case SE_CAF_BLACKHOLE:
1389 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1392 to = copy(q,BLACKHOLE_sizeW(),step);
1393 recordMutable((StgMutClosure *)to);
1396 case THUNK_SELECTOR:
1398 const StgInfoTable* selectee_info;
1399 StgClosure* selectee = ((StgSelector*)q)->selectee;
1402 selectee_info = get_itbl(selectee);
1403 switch (selectee_info->type) {
1412 StgWord32 offset = info->layout.selector_offset;
1414 /* check that the size is in range */
1416 (StgWord32)(selectee_info->layout.payload.ptrs +
1417 selectee_info->layout.payload.nptrs));
1419 /* perform the selection! */
1420 q = selectee->payload[offset];
1422 /* if we're already in to-space, there's no need to continue
1423 * with the evacuation, just update the source address with
1424 * a pointer to the (evacuated) constructor field.
1426 if (HEAP_ALLOCED(q)) {
1427 bdescr *bd = Bdescr((P_)q);
1428 if (bd->evacuated) {
1429 if (bd->gen->no < evac_gen) {
1430 failed_to_evac = rtsTrue;
1431 TICK_GC_FAILED_PROMOTION();
1437 /* otherwise, carry on and evacuate this constructor field,
1438 * (but not the constructor itself)
1447 case IND_OLDGEN_PERM:
1448 selectee = ((StgInd *)selectee)->indirectee;
1452 selectee = ((StgCAF *)selectee)->value;
1456 selectee = ((StgEvacuated *)selectee)->evacuee;
1467 case THUNK_SELECTOR:
1468 /* aargh - do recursively???? */
1471 case SE_CAF_BLACKHOLE:
1475 /* not evaluated yet */
1479 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1480 (int)(selectee_info->type));
1483 return copy(q,THUNK_SELECTOR_sizeW(),step);
1487 /* follow chains of indirections, don't evacuate them */
1488 q = ((StgInd*)q)->indirectee;
1492 if (info->srt_len > 0 && major_gc &&
1493 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1494 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1495 static_objects = (StgClosure *)q;
1500 if (info->srt_len > 0 && major_gc &&
1501 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1502 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1503 static_objects = (StgClosure *)q;
1508 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1509 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1510 static_objects = (StgClosure *)q;
1515 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1516 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1517 static_objects = (StgClosure *)q;
1521 case CONSTR_INTLIKE:
1522 case CONSTR_CHARLIKE:
1523 case CONSTR_NOCAF_STATIC:
1524 /* no need to put these on the static linked list, they don't need
1539 /* shouldn't see these */
1540 barf("evacuate: stack frame at %p\n", q);
1544 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1545 * of stack, tagging and all.
1547 * They can be larger than a block in size. Both are only
1548 * allocated via allocate(), so they should be chained on to the
1549 * large_object list.
1552 nat size = pap_sizeW((StgPAP*)q);
1553 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1554 evacuate_large((P_)q, rtsFalse);
1557 return copy(q,size,step);
1562 /* Already evacuated, just return the forwarding address.
1563 * HOWEVER: if the requested destination generation (evac_gen) is
1564 * older than the actual generation (because the object was
1565 * already evacuated to a younger generation) then we have to
1566 * set the failed_to_evac flag to indicate that we couldn't
1567 * manage to promote the object to the desired generation.
1569 if (evac_gen > 0) { /* optimisation */
1570 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1571 if (Bdescr((P_)p)->gen->no < evac_gen) {
1572 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1573 failed_to_evac = rtsTrue;
1574 TICK_GC_FAILED_PROMOTION();
1577 return ((StgEvacuated*)q)->evacuee;
1581 nat size = arr_words_sizeW((StgArrWords *)q);
1583 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1584 evacuate_large((P_)q, rtsFalse);
1587 /* just copy the block */
1588 return copy(q,size,step);
1593 case MUT_ARR_PTRS_FROZEN:
1595 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1597 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1598 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1601 /* just copy the block */
1602 to = copy(q,size,step);
1603 if (info->type == MUT_ARR_PTRS) {
1604 recordMutable((StgMutClosure *)to);
1612 StgTSO *tso = (StgTSO *)q;
1613 nat size = tso_sizeW(tso);
1616 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1618 if (tso->what_next == ThreadRelocated) {
1619 q = (StgClosure *)tso->link;
1623 /* Large TSOs don't get moved, so no relocation is required.
1625 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1626 evacuate_large((P_)q, rtsTrue);
1629 /* To evacuate a small TSO, we need to relocate the update frame
1633 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1635 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1637 /* relocate the stack pointers... */
1638 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1639 new_tso->sp = (StgPtr)new_tso->sp + diff;
1640 new_tso->splim = (StgPtr)new_tso->splim + diff;
1642 relocate_TSO(tso, new_tso);
1644 recordMutable((StgMutClosure *)new_tso);
1645 return (StgClosure *)new_tso;
1650 case RBH: // cf. BLACKHOLE_BQ
1652 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1653 to = copy(q,BLACKHOLE_sizeW(),step);
1654 //ToDo: derive size etc from reverted IP
1655 //to = copy(q,size,step);
1656 recordMutable((StgMutClosure *)to);
1658 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1659 q, info_type(q), to, info_type(to)));
1664 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1665 to = copy(q,sizeofW(StgBlockedFetch),step);
1667 belch("@@ evacuate: %p (%s) to %p (%s)",
1668 q, info_type(q), to, info_type(to)));
1672 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1673 to = copy(q,sizeofW(StgFetchMe),step);
1675 belch("@@ evacuate: %p (%s) to %p (%s)",
1676 q, info_type(q), to, info_type(to)));
1680 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1681 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1683 belch("@@ evacuate: %p (%s) to %p (%s)",
1684 q, info_type(q), to, info_type(to)));
1689 barf("evacuate: strange closure type %d", (int)(info->type));
1695 /* -----------------------------------------------------------------------------
1696 relocate_TSO is called just after a TSO has been copied from src to
1697 dest. It adjusts the update frame list for the new location.
1698 -------------------------------------------------------------------------- */
1699 //@cindex relocate_TSO
1702 relocate_TSO(StgTSO *src, StgTSO *dest)
1709 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1713 while ((P_)su < dest->stack + dest->stack_size) {
1714 switch (get_itbl(su)->type) {
1716 /* GCC actually manages to common up these three cases! */
1719 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1724 cf = (StgCatchFrame *)su;
1725 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1730 sf = (StgSeqFrame *)su;
1731 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1740 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1748 //@node Scavenging, Reverting CAFs, Evacuation
1749 //@subsection Scavenging
1751 //@cindex scavenge_srt
1754 scavenge_srt(const StgInfoTable *info)
1756 StgClosure **srt, **srt_end;
1758 /* evacuate the SRT. If srt_len is zero, then there isn't an
1759 * srt field in the info table. That's ok, because we'll
1760 * never dereference it.
1762 srt = (StgClosure **)(info->srt);
1763 srt_end = srt + info->srt_len;
1764 for (; srt < srt_end; srt++) {
1765 /* Special-case to handle references to closures hiding out in DLLs, since
1766 double indirections required to get at those. The code generator knows
1767 which is which when generating the SRT, so it stores the (indirect)
1768 reference to the DLL closure in the table by first adding one to it.
1769 We check for this here, and undo the addition before evacuating it.
1771 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1772 closure that's fixed at link-time, and no extra magic is required.
1774 #ifdef ENABLE_WIN32_DLL_SUPPORT
1775 if ( (unsigned long)(*srt) & 0x1 ) {
1776 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1786 /* -----------------------------------------------------------------------------
1788 -------------------------------------------------------------------------- */
1791 scavengeTSO (StgTSO *tso)
1793 /* chase the link field for any TSOs on the same queue */
1794 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1795 if ( tso->why_blocked == BlockedOnMVar
1796 || tso->why_blocked == BlockedOnBlackHole
1797 || tso->why_blocked == BlockedOnException
1799 || tso->why_blocked == BlockedOnGA
1800 || tso->why_blocked == BlockedOnGA_NoSend
1803 tso->block_info.closure = evacuate(tso->block_info.closure);
1805 if ( tso->blocked_exceptions != NULL ) {
1806 tso->blocked_exceptions =
1807 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1809 /* scavenge this thread's stack */
1810 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1813 /* -----------------------------------------------------------------------------
1814 Scavenge a given step until there are no more objects in this step
1817 evac_gen is set by the caller to be either zero (for a step in a
1818 generation < N) or G where G is the generation of the step being
1821 We sometimes temporarily change evac_gen back to zero if we're
1822 scavenging a mutable object where early promotion isn't such a good
1824 -------------------------------------------------------------------------- */
1828 scavenge(step *step)
1831 const StgInfoTable *info;
1833 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1838 failed_to_evac = rtsFalse;
1840 /* scavenge phase - standard breadth-first scavenging of the
1844 while (bd != step->hp_bd || p < step->hp) {
1846 /* If we're at the end of this block, move on to the next block */
1847 if (bd != step->hp_bd && p == bd->free) {
1853 q = p; /* save ptr to object */
1855 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1856 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1858 info = get_itbl((StgClosure *)p);
1860 if (info->type==RBH)
1861 info = REVERT_INFOPTR(info);
1864 switch (info -> type) {
1868 StgBCO* bco = (StgBCO *)p;
1870 for (i = 0; i < bco->n_ptrs; i++) {
1871 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1873 p += bco_sizeW(bco);
1878 /* treat MVars specially, because we don't want to evacuate the
1879 * mut_link field in the middle of the closure.
1882 StgMVar *mvar = ((StgMVar *)p);
1884 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1885 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1886 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1887 p += sizeofW(StgMVar);
1888 evac_gen = saved_evac_gen;
1896 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1897 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1898 p += sizeofW(StgHeader) + 2;
1903 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1904 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1910 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1911 p += sizeofW(StgHeader) + 1;
1916 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1922 p += sizeofW(StgHeader) + 1;
1929 p += sizeofW(StgHeader) + 2;
1936 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1937 p += sizeofW(StgHeader) + 2;
1952 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1953 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1954 (StgClosure *)*p = evacuate((StgClosure *)*p);
1956 p += info->layout.payload.nptrs;
1961 if (step->gen->no != 0) {
1962 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1965 case IND_OLDGEN_PERM:
1966 ((StgIndOldGen *)p)->indirectee =
1967 evacuate(((StgIndOldGen *)p)->indirectee);
1968 if (failed_to_evac) {
1969 failed_to_evac = rtsFalse;
1970 recordOldToNewPtrs((StgMutClosure *)p);
1972 p += sizeofW(StgIndOldGen);
1977 StgCAF *caf = (StgCAF *)p;
1979 caf->body = evacuate(caf->body);
1980 if (failed_to_evac) {
1981 failed_to_evac = rtsFalse;
1982 recordOldToNewPtrs((StgMutClosure *)p);
1984 caf->mut_link = NULL;
1986 p += sizeofW(StgCAF);
1992 StgCAF *caf = (StgCAF *)p;
1994 caf->body = evacuate(caf->body);
1995 caf->value = evacuate(caf->value);
1996 if (failed_to_evac) {
1997 failed_to_evac = rtsFalse;
1998 recordOldToNewPtrs((StgMutClosure *)p);
2000 caf->mut_link = NULL;
2002 p += sizeofW(StgCAF);
2007 /* ignore MUT_CONSs */
2008 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2010 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2011 evac_gen = saved_evac_gen;
2013 p += sizeofW(StgMutVar);
2017 case SE_CAF_BLACKHOLE:
2020 p += BLACKHOLE_sizeW();
2025 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2026 (StgClosure *)bh->blocking_queue =
2027 evacuate((StgClosure *)bh->blocking_queue);
2028 if (failed_to_evac) {
2029 failed_to_evac = rtsFalse;
2030 recordMutable((StgMutClosure *)bh);
2032 p += BLACKHOLE_sizeW();
2036 case THUNK_SELECTOR:
2038 StgSelector *s = (StgSelector *)p;
2039 s->selectee = evacuate(s->selectee);
2040 p += THUNK_SELECTOR_sizeW();
2046 barf("scavenge:IND???\n");
2048 case CONSTR_INTLIKE:
2049 case CONSTR_CHARLIKE:
2051 case CONSTR_NOCAF_STATIC:
2055 /* Shouldn't see a static object here. */
2056 barf("scavenge: STATIC object\n");
2068 /* Shouldn't see stack frames here. */
2069 barf("scavenge: stack frame\n");
2071 case AP_UPD: /* same as PAPs */
2073 /* Treat a PAP just like a section of stack, not forgetting to
2074 * evacuate the function pointer too...
2077 StgPAP* pap = (StgPAP *)p;
2079 pap->fun = evacuate(pap->fun);
2080 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2081 p += pap_sizeW(pap);
2086 /* nothing to follow */
2087 p += arr_words_sizeW((StgArrWords *)p);
2091 /* follow everything */
2095 evac_gen = 0; /* repeatedly mutable */
2096 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2097 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2098 (StgClosure *)*p = evacuate((StgClosure *)*p);
2100 evac_gen = saved_evac_gen;
2104 case MUT_ARR_PTRS_FROZEN:
2105 /* follow everything */
2107 StgPtr start = p, next;
2109 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2110 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2111 (StgClosure *)*p = evacuate((StgClosure *)*p);
2113 if (failed_to_evac) {
2114 /* we can do this easier... */
2115 recordMutable((StgMutClosure *)start);
2116 failed_to_evac = rtsFalse;
2123 StgTSO *tso = (StgTSO *)p;
2126 evac_gen = saved_evac_gen;
2127 p += tso_sizeW(tso);
2132 case RBH: // cf. BLACKHOLE_BQ
2134 // nat size, ptrs, nonptrs, vhs;
2136 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2137 StgRBH *rbh = (StgRBH *)p;
2138 (StgClosure *)rbh->blocking_queue =
2139 evacuate((StgClosure *)rbh->blocking_queue);
2140 if (failed_to_evac) {
2141 failed_to_evac = rtsFalse;
2142 recordMutable((StgMutClosure *)rbh);
2145 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2146 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2147 // ToDo: use size of reverted closure here!
2148 p += BLACKHOLE_sizeW();
2154 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2155 /* follow the pointer to the node which is being demanded */
2156 (StgClosure *)bf->node =
2157 evacuate((StgClosure *)bf->node);
2158 /* follow the link to the rest of the blocking queue */
2159 (StgClosure *)bf->link =
2160 evacuate((StgClosure *)bf->link);
2161 if (failed_to_evac) {
2162 failed_to_evac = rtsFalse;
2163 recordMutable((StgMutClosure *)bf);
2166 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2167 bf, info_type((StgClosure *)bf),
2168 bf->node, info_type(bf->node)));
2169 p += sizeofW(StgBlockedFetch);
2175 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2176 p, info_type((StgClosure *)p)));
2177 p += sizeofW(StgFetchMe);
2178 break; // nothing to do in this case
2180 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2182 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2183 (StgClosure *)fmbq->blocking_queue =
2184 evacuate((StgClosure *)fmbq->blocking_queue);
2185 if (failed_to_evac) {
2186 failed_to_evac = rtsFalse;
2187 recordMutable((StgMutClosure *)fmbq);
2190 belch("@@ scavenge: %p (%s) exciting, isn't it",
2191 p, info_type((StgClosure *)p)));
2192 p += sizeofW(StgFetchMeBlockingQueue);
2198 barf("scavenge: unimplemented/strange closure type %d @ %p",
2202 barf("scavenge: unimplemented/strange closure type %d @ %p",
2206 /* If we didn't manage to promote all the objects pointed to by
2207 * the current object, then we have to designate this object as
2208 * mutable (because it contains old-to-new generation pointers).
2210 if (failed_to_evac) {
2211 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2212 failed_to_evac = rtsFalse;
2220 /* -----------------------------------------------------------------------------
2221 Scavenge one object.
2223 This is used for objects that are temporarily marked as mutable
2224 because they contain old-to-new generation pointers. Only certain
2225 objects can have this property.
2226 -------------------------------------------------------------------------- */
2227 //@cindex scavenge_one
2230 scavenge_one(StgClosure *p)
2232 const StgInfoTable *info;
2235 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2236 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2241 if (info->type==RBH)
2242 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2245 switch (info -> type) {
2248 case FUN_1_0: /* hardly worth specialising these guys */
2268 case IND_OLDGEN_PERM:
2273 end = (P_)p->payload + info->layout.payload.ptrs;
2274 for (q = (P_)p->payload; q < end; q++) {
2275 (StgClosure *)*q = evacuate((StgClosure *)*q);
2281 case SE_CAF_BLACKHOLE:
2286 case THUNK_SELECTOR:
2288 StgSelector *s = (StgSelector *)p;
2289 s->selectee = evacuate(s->selectee);
2293 case AP_UPD: /* same as PAPs */
2295 /* Treat a PAP just like a section of stack, not forgetting to
2296 * evacuate the function pointer too...
2299 StgPAP* pap = (StgPAP *)p;
2301 pap->fun = evacuate(pap->fun);
2302 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2307 /* This might happen if for instance a MUT_CONS was pointing to a
2308 * THUNK which has since been updated. The IND_OLDGEN will
2309 * be on the mutable list anyway, so we don't need to do anything
2315 barf("scavenge_one: strange object %d", (int)(info->type));
2318 no_luck = failed_to_evac;
2319 failed_to_evac = rtsFalse;
2324 /* -----------------------------------------------------------------------------
2325 Scavenging mutable lists.
2327 We treat the mutable list of each generation > N (i.e. all the
2328 generations older than the one being collected) as roots. We also
2329 remove non-mutable objects from the mutable list at this point.
2330 -------------------------------------------------------------------------- */
2331 //@cindex scavenge_mut_once_list
2334 scavenge_mut_once_list(generation *gen)
2336 const StgInfoTable *info;
2337 StgMutClosure *p, *next, *new_list;
2339 p = gen->mut_once_list;
2340 new_list = END_MUT_LIST;
2344 failed_to_evac = rtsFalse;
2346 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2348 /* make sure the info pointer is into text space */
2349 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2350 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2354 if (info->type==RBH)
2355 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2357 switch(info->type) {
2360 case IND_OLDGEN_PERM:
2362 /* Try to pull the indirectee into this generation, so we can
2363 * remove the indirection from the mutable list.
2365 ((StgIndOldGen *)p)->indirectee =
2366 evacuate(((StgIndOldGen *)p)->indirectee);
2369 if (RtsFlags.DebugFlags.gc)
2370 /* Debugging code to print out the size of the thing we just
2374 StgPtr start = gen->steps[0].scan;
2375 bdescr *start_bd = gen->steps[0].scan_bd;
2377 scavenge(&gen->steps[0]);
2378 if (start_bd != gen->steps[0].scan_bd) {
2379 size += (P_)BLOCK_ROUND_UP(start) - start;
2380 start_bd = start_bd->link;
2381 while (start_bd != gen->steps[0].scan_bd) {
2382 size += BLOCK_SIZE_W;
2383 start_bd = start_bd->link;
2385 size += gen->steps[0].scan -
2386 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2388 size = gen->steps[0].scan - start;
2390 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2394 /* failed_to_evac might happen if we've got more than two
2395 * generations, we're collecting only generation 0, the
2396 * indirection resides in generation 2 and the indirectee is
2399 if (failed_to_evac) {
2400 failed_to_evac = rtsFalse;
2401 p->mut_link = new_list;
2404 /* the mut_link field of an IND_STATIC is overloaded as the
2405 * static link field too (it just so happens that we don't need
2406 * both at the same time), so we need to NULL it out when
2407 * removing this object from the mutable list because the static
2408 * link fields are all assumed to be NULL before doing a major
2416 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2417 * it from the mutable list if possible by promoting whatever it
2420 ASSERT(p->header.info == &MUT_CONS_info);
2421 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2422 /* didn't manage to promote everything, so put the
2423 * MUT_CONS back on the list.
2425 p->mut_link = new_list;
2432 StgCAF *caf = (StgCAF *)p;
2433 caf->body = evacuate(caf->body);
2434 caf->value = evacuate(caf->value);
2435 if (failed_to_evac) {
2436 failed_to_evac = rtsFalse;
2437 p->mut_link = new_list;
2447 StgCAF *caf = (StgCAF *)p;
2448 caf->body = evacuate(caf->body);
2449 if (failed_to_evac) {
2450 failed_to_evac = rtsFalse;
2451 p->mut_link = new_list;
2460 /* shouldn't have anything else on the mutables list */
2461 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2465 gen->mut_once_list = new_list;
2468 //@cindex scavenge_mutable_list
2471 scavenge_mutable_list(generation *gen)
2473 const StgInfoTable *info;
2474 StgMutClosure *p, *next;
2476 p = gen->saved_mut_list;
2480 failed_to_evac = rtsFalse;
2482 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2484 /* make sure the info pointer is into text space */
2485 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2486 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2490 if (info->type==RBH)
2491 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2493 switch(info->type) {
2495 case MUT_ARR_PTRS_FROZEN:
2496 /* remove this guy from the mutable list, but follow the ptrs
2497 * anyway (and make sure they get promoted to this gen).
2502 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2504 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2505 (StgClosure *)*q = evacuate((StgClosure *)*q);
2509 if (failed_to_evac) {
2510 failed_to_evac = rtsFalse;
2511 p->mut_link = gen->mut_list;
2518 /* follow everything */
2519 p->mut_link = gen->mut_list;
2524 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2525 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2526 (StgClosure *)*q = evacuate((StgClosure *)*q);
2532 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2533 * it from the mutable list if possible by promoting whatever it
2536 ASSERT(p->header.info != &MUT_CONS_info);
2537 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2538 p->mut_link = gen->mut_list;
2544 StgMVar *mvar = (StgMVar *)p;
2545 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2546 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2547 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2548 p->mut_link = gen->mut_list;
2555 StgTSO *tso = (StgTSO *)p;
2559 /* Don't take this TSO off the mutable list - it might still
2560 * point to some younger objects (because we set evac_gen to 0
2563 tso->mut_link = gen->mut_list;
2564 gen->mut_list = (StgMutClosure *)tso;
2570 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2571 (StgClosure *)bh->blocking_queue =
2572 evacuate((StgClosure *)bh->blocking_queue);
2573 p->mut_link = gen->mut_list;
2578 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2581 case IND_OLDGEN_PERM:
2582 /* Try to pull the indirectee into this generation, so we can
2583 * remove the indirection from the mutable list.
2586 ((StgIndOldGen *)p)->indirectee =
2587 evacuate(((StgIndOldGen *)p)->indirectee);
2590 if (failed_to_evac) {
2591 failed_to_evac = rtsFalse;
2592 p->mut_link = gen->mut_once_list;
2593 gen->mut_once_list = p;
2600 // HWL: check whether all of these are necessary
2602 case RBH: // cf. BLACKHOLE_BQ
2604 // nat size, ptrs, nonptrs, vhs;
2606 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2607 StgRBH *rbh = (StgRBH *)p;
2608 (StgClosure *)rbh->blocking_queue =
2609 evacuate((StgClosure *)rbh->blocking_queue);
2610 if (failed_to_evac) {
2611 failed_to_evac = rtsFalse;
2612 recordMutable((StgMutClosure *)rbh);
2614 // ToDo: use size of reverted closure here!
2615 p += BLACKHOLE_sizeW();
2621 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2622 /* follow the pointer to the node which is being demanded */
2623 (StgClosure *)bf->node =
2624 evacuate((StgClosure *)bf->node);
2625 /* follow the link to the rest of the blocking queue */
2626 (StgClosure *)bf->link =
2627 evacuate((StgClosure *)bf->link);
2628 if (failed_to_evac) {
2629 failed_to_evac = rtsFalse;
2630 recordMutable((StgMutClosure *)bf);
2632 p += sizeofW(StgBlockedFetch);
2637 p += sizeofW(StgFetchMe);
2638 break; // nothing to do in this case
2640 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2642 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2643 (StgClosure *)fmbq->blocking_queue =
2644 evacuate((StgClosure *)fmbq->blocking_queue);
2645 if (failed_to_evac) {
2646 failed_to_evac = rtsFalse;
2647 recordMutable((StgMutClosure *)fmbq);
2649 p += sizeofW(StgFetchMeBlockingQueue);
2655 /* shouldn't have anything else on the mutables list */
2656 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2661 //@cindex scavenge_static
2664 scavenge_static(void)
2666 StgClosure* p = static_objects;
2667 const StgInfoTable *info;
2669 /* Always evacuate straight to the oldest generation for static
2671 evac_gen = oldest_gen->no;
2673 /* keep going until we've scavenged all the objects on the linked
2675 while (p != END_OF_STATIC_LIST) {
2679 if (info->type==RBH)
2680 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2682 /* make sure the info pointer is into text space */
2683 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2684 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2686 /* Take this object *off* the static_objects list,
2687 * and put it on the scavenged_static_objects list.
2689 static_objects = STATIC_LINK(info,p);
2690 STATIC_LINK(info,p) = scavenged_static_objects;
2691 scavenged_static_objects = p;
2693 switch (info -> type) {
2697 StgInd *ind = (StgInd *)p;
2698 ind->indirectee = evacuate(ind->indirectee);
2700 /* might fail to evacuate it, in which case we have to pop it
2701 * back on the mutable list (and take it off the
2702 * scavenged_static list because the static link and mut link
2703 * pointers are one and the same).
2705 if (failed_to_evac) {
2706 failed_to_evac = rtsFalse;
2707 scavenged_static_objects = STATIC_LINK(info,p);
2708 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2709 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2723 next = (P_)p->payload + info->layout.payload.ptrs;
2724 /* evacuate the pointers */
2725 for (q = (P_)p->payload; q < next; q++) {
2726 (StgClosure *)*q = evacuate((StgClosure *)*q);
2732 barf("scavenge_static: strange closure %d", (int)(info->type));
2735 ASSERT(failed_to_evac == rtsFalse);
2737 /* get the next static object from the list. Remember, there might
2738 * be more stuff on this list now that we've done some evacuating!
2739 * (static_objects is a global)
2745 /* -----------------------------------------------------------------------------
2746 scavenge_stack walks over a section of stack and evacuates all the
2747 objects pointed to by it. We can use the same code for walking
2748 PAPs, since these are just sections of copied stack.
2749 -------------------------------------------------------------------------- */
2750 //@cindex scavenge_stack
2753 scavenge_stack(StgPtr p, StgPtr stack_end)
2756 const StgInfoTable* info;
2759 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2762 * Each time around this loop, we are looking at a chunk of stack
2763 * that starts with either a pending argument section or an
2764 * activation record.
2767 while (p < stack_end) {
2770 /* If we've got a tag, skip over that many words on the stack */
2771 if (IS_ARG_TAG((W_)q)) {
2776 /* Is q a pointer to a closure?
2778 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2780 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2781 ASSERT(closure_STATIC((StgClosure *)q));
2783 /* otherwise, must be a pointer into the allocation space. */
2786 (StgClosure *)*p = evacuate((StgClosure *)q);
2792 * Otherwise, q must be the info pointer of an activation
2793 * record. All activation records have 'bitmap' style layout
2796 info = get_itbl((StgClosure *)p);
2798 switch (info->type) {
2800 /* Dynamic bitmap: the mask is stored on the stack */
2802 bitmap = ((StgRetDyn *)p)->liveness;
2803 p = (P_)&((StgRetDyn *)p)->payload[0];
2806 /* probably a slow-entry point return address: */
2814 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2815 old_p, p, old_p+1));
2817 p++; /* what if FHS!=1 !? -- HWL */
2822 /* Specialised code for update frames, since they're so common.
2823 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2824 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2828 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2830 nat type = get_itbl(frame->updatee)->type;
2832 p += sizeofW(StgUpdateFrame);
2833 if (type == EVACUATED) {
2834 frame->updatee = evacuate(frame->updatee);
2837 bdescr *bd = Bdescr((P_)frame->updatee);
2839 if (bd->gen->no > N) {
2840 if (bd->gen->no < evac_gen) {
2841 failed_to_evac = rtsTrue;
2846 /* Don't promote blackholes */
2848 if (!(step->gen->no == 0 &&
2850 step->no == step->gen->n_steps-1)) {
2857 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2858 sizeofW(StgHeader), step);
2859 frame->updatee = to;
2862 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2863 frame->updatee = to;
2864 recordMutable((StgMutClosure *)to);
2867 /* will never be SE_{,CAF_}BLACKHOLE, since we
2868 don't push an update frame for single-entry thunks. KSW 1999-01. */
2869 barf("scavenge_stack: UPDATE_FRAME updatee");
2874 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2881 bitmap = info->layout.bitmap;
2883 /* this assumes that the payload starts immediately after the info-ptr */
2885 while (bitmap != 0) {
2886 if ((bitmap & 1) == 0) {
2887 (StgClosure *)*p = evacuate((StgClosure *)*p);
2890 bitmap = bitmap >> 1;
2897 /* large bitmap (> 32 entries) */
2902 StgLargeBitmap *large_bitmap;
2905 large_bitmap = info->layout.large_bitmap;
2908 for (i=0; i<large_bitmap->size; i++) {
2909 bitmap = large_bitmap->bitmap[i];
2910 q = p + sizeof(W_) * 8;
2911 while (bitmap != 0) {
2912 if ((bitmap & 1) == 0) {
2913 (StgClosure *)*p = evacuate((StgClosure *)*p);
2916 bitmap = bitmap >> 1;
2918 if (i+1 < large_bitmap->size) {
2920 (StgClosure *)*p = evacuate((StgClosure *)*p);
2926 /* and don't forget to follow the SRT */
2931 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2936 /*-----------------------------------------------------------------------------
2937 scavenge the large object list.
2939 evac_gen set by caller; similar games played with evac_gen as with
2940 scavenge() - see comment at the top of scavenge(). Most large
2941 objects are (repeatedly) mutable, so most of the time evac_gen will
2943 --------------------------------------------------------------------------- */
2944 //@cindex scavenge_large
2947 scavenge_large(step *step)
2951 const StgInfoTable* info;
2952 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2954 evac_gen = 0; /* most objects are mutable */
2955 bd = step->new_large_objects;
2957 for (; bd != NULL; bd = step->new_large_objects) {
2959 /* take this object *off* the large objects list and put it on
2960 * the scavenged large objects list. This is so that we can
2961 * treat new_large_objects as a stack and push new objects on
2962 * the front when evacuating.
2964 step->new_large_objects = bd->link;
2965 dbl_link_onto(bd, &step->scavenged_large_objects);
2968 info = get_itbl((StgClosure *)p);
2970 switch (info->type) {
2972 /* only certain objects can be "large"... */
2975 /* nothing to follow */
2979 /* follow everything */
2983 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2984 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2985 (StgClosure *)*p = evacuate((StgClosure *)*p);
2990 case MUT_ARR_PTRS_FROZEN:
2991 /* follow everything */
2993 StgPtr start = p, next;
2995 evac_gen = saved_evac_gen; /* not really mutable */
2996 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2997 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2998 (StgClosure *)*p = evacuate((StgClosure *)*p);
3001 if (failed_to_evac) {
3002 recordMutable((StgMutClosure *)start);
3009 StgBCO* bco = (StgBCO *)p;
3011 evac_gen = saved_evac_gen;
3012 for (i = 0; i < bco->n_ptrs; i++) {
3013 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3020 scavengeTSO((StgTSO *)p);
3026 StgPAP* pap = (StgPAP *)p;
3028 evac_gen = saved_evac_gen; /* not really mutable */
3029 pap->fun = evacuate(pap->fun);
3030 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3036 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3041 //@cindex zero_static_object_list
3044 zero_static_object_list(StgClosure* first_static)
3048 const StgInfoTable *info;
3050 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3052 link = STATIC_LINK(info, p);
3053 STATIC_LINK(info,p) = NULL;
3057 /* This function is only needed because we share the mutable link
3058 * field with the static link field in an IND_STATIC, so we have to
3059 * zero the mut_link field before doing a major GC, which needs the
3060 * static link field.
3062 * It doesn't do any harm to zero all the mutable link fields on the
3065 //@cindex zero_mutable_list
3068 zero_mutable_list( StgMutClosure *first )
3070 StgMutClosure *next, *c;
3072 for (c = first; c != END_MUT_LIST; c = next) {
3078 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3079 //@subsection Reverting CAFs
3081 /* -----------------------------------------------------------------------------
3083 -------------------------------------------------------------------------- */
3084 //@cindex RevertCAFs
3086 void RevertCAFs(void)
3091 /* Deal with CAFs created by compiled code. */
3092 for (i = 0; i < usedECafTable; i++) {
3093 SET_INFO( (StgInd*)(ecafTable[i].closure), ecafTable[i].origItbl );
3094 ((StgInd*)(ecafTable[i].closure))->indirectee = 0;
3097 /* Deal with CAFs created by the interpreter. */
3098 while (ecafList != END_ECAF_LIST) {
3099 StgCAF* caf = ecafList;
3100 ecafList = caf->link;
3101 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3102 SET_INFO(caf,&CAF_UNENTERED_info);
3103 caf->value = (StgClosure *)0xdeadbeef;
3104 caf->link = (StgCAF *)0xdeadbeef;
3107 /* Empty out both the table and the list. */
3109 ecafList = END_ECAF_LIST;
3113 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3114 //@subsection Sanity code for CAF garbage collection
3116 /* -----------------------------------------------------------------------------
3117 Sanity code for CAF garbage collection.
3119 With DEBUG turned on, we manage a CAF list in addition to the SRT
3120 mechanism. After GC, we run down the CAF list and blackhole any
3121 CAFs which have been garbage collected. This means we get an error
3122 whenever the program tries to enter a garbage collected CAF.
3124 Any garbage collected CAFs are taken off the CAF list at the same
3126 -------------------------------------------------------------------------- */
3136 const StgInfoTable *info;
3147 ASSERT(info->type == IND_STATIC);
3149 if (STATIC_LINK(info,p) == NULL) {
3150 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3152 SET_INFO(p,&BLACKHOLE_info);
3153 p = STATIC_LINK2(info,p);
3157 pp = &STATIC_LINK2(info,p);
3164 /* fprintf(stderr, "%d CAFs live\n", i); */
3168 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3169 //@subsection Lazy black holing
3171 /* -----------------------------------------------------------------------------
3174 Whenever a thread returns to the scheduler after possibly doing
3175 some work, we have to run down the stack and black-hole all the
3176 closures referred to by update frames.
3177 -------------------------------------------------------------------------- */
3178 //@cindex threadLazyBlackHole
3181 threadLazyBlackHole(StgTSO *tso)
3183 StgUpdateFrame *update_frame;
3184 StgBlockingQueue *bh;
3187 stack_end = &tso->stack[tso->stack_size];
3188 update_frame = tso->su;
3191 switch (get_itbl(update_frame)->type) {
3194 update_frame = ((StgCatchFrame *)update_frame)->link;
3198 bh = (StgBlockingQueue *)update_frame->updatee;
3200 /* if the thunk is already blackholed, it means we've also
3201 * already blackholed the rest of the thunks on this stack,
3202 * so we can stop early.
3204 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3205 * don't interfere with this optimisation.
3207 if (bh->header.info == &BLACKHOLE_info) {
3211 if (bh->header.info != &BLACKHOLE_BQ_info &&
3212 bh->header.info != &CAF_BLACKHOLE_info) {
3213 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3214 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3216 SET_INFO(bh,&BLACKHOLE_info);
3219 update_frame = update_frame->link;
3223 update_frame = ((StgSeqFrame *)update_frame)->link;
3229 barf("threadPaused");
3234 //@node Stack squeezing, Pausing a thread, Lazy black holing
3235 //@subsection Stack squeezing
3237 /* -----------------------------------------------------------------------------
3240 * Code largely pinched from old RTS, then hacked to bits. We also do
3241 * lazy black holing here.
3243 * -------------------------------------------------------------------------- */
3244 //@cindex threadSqueezeStack
3247 threadSqueezeStack(StgTSO *tso)
3249 lnat displacement = 0;
3250 StgUpdateFrame *frame;
3251 StgUpdateFrame *next_frame; /* Temporally next */
3252 StgUpdateFrame *prev_frame; /* Temporally previous */
3254 rtsBool prev_was_update_frame;
3256 StgUpdateFrame *top_frame;
3257 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3259 void printObj( StgClosure *obj ); // from Printer.c
3261 top_frame = tso->su;
3264 bottom = &(tso->stack[tso->stack_size]);
3267 /* There must be at least one frame, namely the STOP_FRAME.
3269 ASSERT((P_)frame < bottom);
3271 /* Walk down the stack, reversing the links between frames so that
3272 * we can walk back up as we squeeze from the bottom. Note that
3273 * next_frame and prev_frame refer to next and previous as they were
3274 * added to the stack, rather than the way we see them in this
3275 * walk. (It makes the next loop less confusing.)
3277 * Stop if we find an update frame pointing to a black hole
3278 * (see comment in threadLazyBlackHole()).
3282 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3283 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3284 prev_frame = frame->link;
3285 frame->link = next_frame;
3290 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3291 printObj((StgClosure *)prev_frame);
3292 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3295 switch (get_itbl(frame)->type) {
3296 case UPDATE_FRAME: upd_frames++;
3297 if (frame->updatee->header.info == &BLACKHOLE_info)
3300 case STOP_FRAME: stop_frames++;
3302 case CATCH_FRAME: catch_frames++;
3304 case SEQ_FRAME: seq_frames++;
3307 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3309 printObj((StgClosure *)prev_frame);
3312 if (get_itbl(frame)->type == UPDATE_FRAME
3313 && frame->updatee->header.info == &BLACKHOLE_info) {
3318 /* Now, we're at the bottom. Frame points to the lowest update
3319 * frame on the stack, and its link actually points to the frame
3320 * above. We have to walk back up the stack, squeezing out empty
3321 * update frames and turning the pointers back around on the way
3324 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3325 * we never want to eliminate it anyway. Just walk one step up
3326 * before starting to squeeze. When you get to the topmost frame,
3327 * remember that there are still some words above it that might have
3334 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3337 * Loop through all of the frames (everything except the very
3338 * bottom). Things are complicated by the fact that we have
3339 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3340 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3342 while (frame != NULL) {
3344 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3345 rtsBool is_update_frame;
3347 next_frame = frame->link;
3348 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3351 * 1. both the previous and current frame are update frames
3352 * 2. the current frame is empty
3354 if (prev_was_update_frame && is_update_frame &&
3355 (P_)prev_frame == frame_bottom + displacement) {
3357 /* Now squeeze out the current frame */
3358 StgClosure *updatee_keep = prev_frame->updatee;
3359 StgClosure *updatee_bypass = frame->updatee;
3362 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3366 /* Deal with blocking queues. If both updatees have blocked
3367 * threads, then we should merge the queues into the update
3368 * frame that we're keeping.
3370 * Alternatively, we could just wake them up: they'll just go
3371 * straight to sleep on the proper blackhole! This is less code
3372 * and probably less bug prone, although it's probably much
3375 #if 0 /* do it properly... */
3376 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3377 # error Unimplemented lazy BH warning. (KSW 1999-01)
3379 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3380 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3382 /* Sigh. It has one. Don't lose those threads! */
3383 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3384 /* Urgh. Two queues. Merge them. */
3385 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3387 while (keep_tso->link != END_TSO_QUEUE) {
3388 keep_tso = keep_tso->link;
3390 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3393 /* For simplicity, just swap the BQ for the BH */
3394 P_ temp = updatee_keep;
3396 updatee_keep = updatee_bypass;
3397 updatee_bypass = temp;
3399 /* Record the swap in the kept frame (below) */
3400 prev_frame->updatee = updatee_keep;
3405 TICK_UPD_SQUEEZED();
3406 /* wasn't there something about update squeezing and ticky to be
3407 * sorted out? oh yes: we aren't counting each enter properly
3408 * in this case. See the log somewhere. KSW 1999-04-21
3410 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3412 sp = (P_)frame - 1; /* sp = stuff to slide */
3413 displacement += sizeofW(StgUpdateFrame);
3416 /* No squeeze for this frame */
3417 sp = frame_bottom - 1; /* Keep the current frame */
3419 /* Do lazy black-holing.
3421 if (is_update_frame) {
3422 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3423 if (bh->header.info != &BLACKHOLE_info &&
3424 bh->header.info != &BLACKHOLE_BQ_info &&
3425 bh->header.info != &CAF_BLACKHOLE_info) {
3426 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3427 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3429 SET_INFO(bh,&BLACKHOLE_info);
3433 /* Fix the link in the current frame (should point to the frame below) */
3434 frame->link = prev_frame;
3435 prev_was_update_frame = is_update_frame;
3438 /* Now slide all words from sp up to the next frame */
3440 if (displacement > 0) {
3441 P_ next_frame_bottom;
3443 if (next_frame != NULL)
3444 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3446 next_frame_bottom = tso->sp - 1;
3450 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3454 while (sp >= next_frame_bottom) {
3455 sp[displacement] = *sp;
3459 (P_)prev_frame = (P_)frame + displacement;
3463 tso->sp += displacement;
3464 tso->su = prev_frame;
3467 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3468 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3472 //@node Pausing a thread, Index, Stack squeezing
3473 //@subsection Pausing a thread
3475 /* -----------------------------------------------------------------------------
3478 * We have to prepare for GC - this means doing lazy black holing
3479 * here. We also take the opportunity to do stack squeezing if it's
3481 * -------------------------------------------------------------------------- */
3482 //@cindex threadPaused
3484 threadPaused(StgTSO *tso)
3486 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3487 threadSqueezeStack(tso); /* does black holing too */
3489 threadLazyBlackHole(tso);
3492 /* -----------------------------------------------------------------------------
3494 * -------------------------------------------------------------------------- */
3497 //@cindex printMutOnceList
3499 printMutOnceList(generation *gen)
3501 StgMutClosure *p, *next;
3503 p = gen->mut_once_list;
3506 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3507 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3508 fprintf(stderr, "%p (%s), ",
3509 p, info_type((StgClosure *)p));
3511 fputc('\n', stderr);
3514 //@cindex printMutableList
3516 printMutableList(generation *gen)
3518 StgMutClosure *p, *next;
3523 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3524 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3525 fprintf(stderr, "%p (%s), ",
3526 p, info_type((StgClosure *)p));
3528 fputc('\n', stderr);
3531 //@cindex maybeLarge
3532 static inline rtsBool
3533 maybeLarge(StgClosure *closure)
3535 StgInfoTable *info = get_itbl(closure);
3537 /* closure types that may be found on the new_large_objects list;
3538 see scavenge_large */
3539 return (info->type == MUT_ARR_PTRS ||
3540 info->type == MUT_ARR_PTRS_FROZEN ||
3541 info->type == TSO ||
3542 info->type == ARR_WORDS ||
3549 //@node Index, , Pausing a thread
3553 //* GarbageCollect:: @cindex\s-+GarbageCollect
3554 //* MarkRoot:: @cindex\s-+MarkRoot
3555 //* RevertCAFs:: @cindex\s-+RevertCAFs
3556 //* addBlock:: @cindex\s-+addBlock
3557 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3558 //* copy:: @cindex\s-+copy
3559 //* copyPart:: @cindex\s-+copyPart
3560 //* evacuate:: @cindex\s-+evacuate
3561 //* evacuate_large:: @cindex\s-+evacuate_large
3562 //* gcCAFs:: @cindex\s-+gcCAFs
3563 //* isAlive:: @cindex\s-+isAlive
3564 //* maybeLarge:: @cindex\s-+maybeLarge
3565 //* mkMutCons:: @cindex\s-+mkMutCons
3566 //* printMutOnceList:: @cindex\s-+printMutOnceList
3567 //* printMutableList:: @cindex\s-+printMutableList
3568 //* relocate_TSO:: @cindex\s-+relocate_TSO
3569 //* scavenge:: @cindex\s-+scavenge
3570 //* scavenge_large:: @cindex\s-+scavenge_large
3571 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3572 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3573 //* scavenge_one:: @cindex\s-+scavenge_one
3574 //* scavenge_srt:: @cindex\s-+scavenge_srt
3575 //* scavenge_stack:: @cindex\s-+scavenge_stack
3576 //* scavenge_static:: @cindex\s-+scavenge_static
3577 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3578 //* threadPaused:: @cindex\s-+threadPaused
3579 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3580 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3581 //* upd_evacuee:: @cindex\s-+upd_evacuee
3582 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3583 //* zero_static_object_list:: @cindex\s-+zero_static_object_list