1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.77 2000/03/31 03:09:36 hwloidl Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
12 //* STATIC OBJECT LIST::
13 //* Static function declarations::
19 //* Sanity code for CAF garbage collection::
20 //* Lazy black holing::
22 //* Pausing a thread::
26 //@node Includes, STATIC OBJECT LIST
27 //@subsection Includes
33 #include "StoragePriv.h"
36 #include "SchedAPI.h" /* for ReverCAFs prototype */
39 #include "BlockAlloc.h"
44 #include "StablePriv.h"
46 #if defined(GRAN) || defined(PAR)
47 # include "GranSimRts.h"
48 # include "ParallelRts.h"
52 # include "ParallelDebug.h"
58 //@node STATIC OBJECT LIST, Static function declarations, Includes
59 //@subsection STATIC OBJECT LIST
61 /* STATIC OBJECT LIST.
64 * We maintain a linked list of static objects that are still live.
65 * The requirements for this list are:
67 * - we need to scan the list while adding to it, in order to
68 * scavenge all the static objects (in the same way that
69 * breadth-first scavenging works for dynamic objects).
71 * - we need to be able to tell whether an object is already on
72 * the list, to break loops.
74 * Each static object has a "static link field", which we use for
75 * linking objects on to the list. We use a stack-type list, consing
76 * objects on the front as they are added (this means that the
77 * scavenge phase is depth-first, not breadth-first, but that
80 * A separate list is kept for objects that have been scavenged
81 * already - this is so that we can zero all the marks afterwards.
83 * An object is on the list if its static link field is non-zero; this
84 * means that we have to mark the end of the list with '1', not NULL.
86 * Extra notes for generational GC:
88 * Each generation has a static object list associated with it. When
89 * collecting generations up to N, we treat the static object lists
90 * from generations > N as roots.
92 * We build up a static object list while collecting generations 0..N,
93 * which is then appended to the static object list of generation N+1.
95 StgClosure* static_objects; /* live static objects */
96 StgClosure* scavenged_static_objects; /* static objects scavenged so far */
98 /* N is the oldest generation being collected, where the generations
99 * are numbered starting at 0. A major GC (indicated by the major_gc
100 * flag) is when we're collecting all generations. We only attempt to
101 * deal with static objects and GC CAFs when doing a major GC.
104 static rtsBool major_gc;
106 /* Youngest generation that objects should be evacuated to in
107 * evacuate(). (Logically an argument to evacuate, but it's static
108 * a lot of the time so we optimise it into a global variable).
114 static StgWeak *old_weak_ptr_list; /* also pending finaliser list */
115 static rtsBool weak_done; /* all done for this pass */
117 /* List of all threads during GC
119 static StgTSO *old_all_threads;
120 static StgTSO *resurrected_threads;
122 /* Flag indicating failure to evacuate an object to the desired
125 static rtsBool failed_to_evac;
127 /* Old to-space (used for two-space collector only)
129 bdescr *old_to_space;
132 /* Data used for allocation area sizing.
134 lnat new_blocks; /* blocks allocated during this GC */
135 lnat g0s0_pcnt_kept = 30; /* percentage of g0s0 live at last minor GC */
137 //@node Static function declarations, Garbage Collect, STATIC OBJECT LIST
138 //@subsection Static function declarations
140 /* -----------------------------------------------------------------------------
141 Static function declarations
142 -------------------------------------------------------------------------- */
144 static StgClosure * evacuate ( StgClosure *q );
145 static void zero_static_object_list ( StgClosure* first_static );
146 static void zero_mutable_list ( StgMutClosure *first );
147 static void revert_dead_CAFs ( void );
149 static rtsBool traverse_weak_ptr_list ( void );
150 static void cleanup_weak_ptr_list ( StgWeak **list );
152 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
153 static void scavenge_large ( step *step );
154 static void scavenge ( step *step );
155 static void scavenge_static ( void );
156 static void scavenge_mutable_list ( generation *g );
157 static void scavenge_mut_once_list ( generation *g );
160 static void gcCAFs ( void );
163 //@node Garbage Collect, Weak Pointers, Static function declarations
164 //@subsection Garbage Collect
166 /* -----------------------------------------------------------------------------
169 For garbage collecting generation N (and all younger generations):
171 - follow all pointers in the root set. the root set includes all
172 mutable objects in all steps in all generations.
174 - for each pointer, evacuate the object it points to into either
175 + to-space in the next higher step in that generation, if one exists,
176 + if the object's generation == N, then evacuate it to the next
177 generation if one exists, or else to-space in the current
179 + if the object's generation < N, then evacuate it to to-space
180 in the next generation.
182 - repeatedly scavenge to-space from each step in each generation
183 being collected until no more objects can be evacuated.
185 - free from-space in each step, and set from-space = to-space.
187 -------------------------------------------------------------------------- */
188 //@cindex GarbageCollect
190 void GarbageCollect(void (*get_roots)(void))
194 lnat live, allocated, collected = 0, copied = 0;
198 CostCentreStack *prev_CCS;
201 #if defined(DEBUG) && defined(GRAN)
202 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
206 /* tell the stats department that we've started a GC */
209 /* attribute any costs to CCS_GC */
215 /* Approximate how much we allocated */
216 allocated = calcAllocated();
218 /* Figure out which generation to collect
221 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
222 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
226 major_gc = (N == RtsFlags.GcFlags.generations-1);
228 /* check stack sanity *before* GC (ToDo: check all threads) */
230 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
232 IF_DEBUG(sanity, checkFreeListSanity());
234 /* Initialise the static object lists
236 static_objects = END_OF_STATIC_LIST;
237 scavenged_static_objects = END_OF_STATIC_LIST;
239 /* zero the mutable list for the oldest generation (see comment by
240 * zero_mutable_list below).
243 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
246 /* Save the old to-space if we're doing a two-space collection
248 if (RtsFlags.GcFlags.generations == 1) {
249 old_to_space = g0s0->to_space;
250 g0s0->to_space = NULL;
253 /* Keep a count of how many new blocks we allocated during this GC
254 * (used for resizing the allocation area, later).
258 /* Initialise to-space in all the generations/steps that we're
261 for (g = 0; g <= N; g++) {
262 generations[g].mut_once_list = END_MUT_LIST;
263 generations[g].mut_list = END_MUT_LIST;
265 for (s = 0; s < generations[g].n_steps; s++) {
267 /* generation 0, step 0 doesn't need to-space */
268 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
272 /* Get a free block for to-space. Extra blocks will be chained on
276 step = &generations[g].steps[s];
277 ASSERT(step->gen->no == g);
278 ASSERT(step->hp ? Bdescr(step->hp)->step == step : rtsTrue);
279 bd->gen = &generations[g];
282 bd->evacuated = 1; /* it's a to-space block */
283 step->hp = bd->start;
284 step->hpLim = step->hp + BLOCK_SIZE_W;
288 step->scan = bd->start;
290 step->new_large_objects = NULL;
291 step->scavenged_large_objects = NULL;
293 /* mark the large objects as not evacuated yet */
294 for (bd = step->large_objects; bd; bd = bd->link) {
300 /* make sure the older generations have at least one block to
301 * allocate into (this makes things easier for copy(), see below.
303 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
304 for (s = 0; s < generations[g].n_steps; s++) {
305 step = &generations[g].steps[s];
306 if (step->hp_bd == NULL) {
308 bd->gen = &generations[g];
311 bd->evacuated = 0; /* *not* a to-space block */
312 step->hp = bd->start;
313 step->hpLim = step->hp + BLOCK_SIZE_W;
319 /* Set the scan pointer for older generations: remember we
320 * still have to scavenge objects that have been promoted. */
321 step->scan = step->hp;
322 step->scan_bd = step->hp_bd;
323 step->to_space = NULL;
325 step->new_large_objects = NULL;
326 step->scavenged_large_objects = NULL;
330 /* -----------------------------------------------------------------------
331 * follow all the roots that we know about:
332 * - mutable lists from each generation > N
333 * we want to *scavenge* these roots, not evacuate them: they're not
334 * going to move in this GC.
335 * Also: do them in reverse generation order. This is because we
336 * often want to promote objects that are pointed to by older
337 * generations early, so we don't have to repeatedly copy them.
338 * Doing the generations in reverse order ensures that we don't end
339 * up in the situation where we want to evac an object to gen 3 and
340 * it has already been evaced to gen 2.
344 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
345 generations[g].saved_mut_list = generations[g].mut_list;
346 generations[g].mut_list = END_MUT_LIST;
349 /* Do the mut-once lists first */
350 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
351 IF_PAR_DEBUG(verbose,
352 printMutOnceList(&generations[g]));
353 scavenge_mut_once_list(&generations[g]);
355 for (st = generations[g].n_steps-1; st >= 0; st--) {
356 scavenge(&generations[g].steps[st]);
360 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
361 IF_PAR_DEBUG(verbose,
362 printMutableList(&generations[g]));
363 scavenge_mutable_list(&generations[g]);
365 for (st = generations[g].n_steps-1; st >= 0; st--) {
366 scavenge(&generations[g].steps[st]);
371 /* follow all the roots that the application knows about.
377 /* And don't forget to mark the TSO if we got here direct from
379 /* Not needed in a seq version?
381 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
385 /* Mark the entries in the GALA table of the parallel system */
386 markLocalGAs(major_gc);
389 /* Mark the weak pointer list, and prepare to detect dead weak
392 old_weak_ptr_list = weak_ptr_list;
393 weak_ptr_list = NULL;
394 weak_done = rtsFalse;
396 /* The all_threads list is like the weak_ptr_list.
397 * See traverse_weak_ptr_list() for the details.
399 old_all_threads = all_threads;
400 all_threads = END_TSO_QUEUE;
401 resurrected_threads = END_TSO_QUEUE;
403 /* Mark the stable pointer table.
405 markStablePtrTable(major_gc);
409 /* ToDo: To fix the caf leak, we need to make the commented out
410 * parts of this code do something sensible - as described in
413 extern void markHugsObjects(void);
418 /* -------------------------------------------------------------------------
419 * Repeatedly scavenge all the areas we know about until there's no
420 * more scavenging to be done.
427 /* scavenge static objects */
428 if (major_gc && static_objects != END_OF_STATIC_LIST) {
430 checkStaticObjects());
434 /* When scavenging the older generations: Objects may have been
435 * evacuated from generations <= N into older generations, and we
436 * need to scavenge these objects. We're going to try to ensure that
437 * any evacuations that occur move the objects into at least the
438 * same generation as the object being scavenged, otherwise we
439 * have to create new entries on the mutable list for the older
443 /* scavenge each step in generations 0..maxgen */
447 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
448 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
449 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
452 step = &generations[gen].steps[st];
454 if (step->hp_bd != step->scan_bd || step->scan < step->hp) {
459 if (step->new_large_objects != NULL) {
460 scavenge_large(step);
467 if (flag) { goto loop; }
469 /* must be last... */
470 if (traverse_weak_ptr_list()) { /* returns rtsTrue if evaced something */
475 /* Final traversal of the weak pointer list (see comment by
476 * cleanUpWeakPtrList below).
478 cleanup_weak_ptr_list(&weak_ptr_list);
480 /* Now see which stable names are still alive.
482 gcStablePtrTable(major_gc);
484 /* revert dead CAFs and update enteredCAFs list */
488 /* Reconstruct the Global Address tables used in GUM */
489 rebuildGAtables(major_gc);
490 IF_DEBUG(sanity, checkGlobalTSOList(rtsTrue/*check TSOs, too*/));
491 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
494 /* Set the maximum blocks for the oldest generation, based on twice
495 * the amount of live data now, adjusted to fit the maximum heap
498 * This is an approximation, since in the worst case we'll need
499 * twice the amount of live data plus whatever space the other
502 if (RtsFlags.GcFlags.generations > 1) {
504 oldest_gen->max_blocks =
505 stg_max(oldest_gen->steps[0].to_blocks * RtsFlags.GcFlags.oldGenFactor,
506 RtsFlags.GcFlags.minOldGenSize);
507 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
508 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
509 if (((int)oldest_gen->max_blocks -
510 (int)oldest_gen->steps[0].to_blocks) <
511 (RtsFlags.GcFlags.pcFreeHeap *
512 RtsFlags.GcFlags.maxHeapSize / 200)) {
519 /* run through all the generations/steps and tidy up
521 copied = new_blocks * BLOCK_SIZE_W;
522 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
525 generations[g].collections++; /* for stats */
528 for (s = 0; s < generations[g].n_steps; s++) {
530 step = &generations[g].steps[s];
532 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
533 /* Tidy the end of the to-space chains */
534 step->hp_bd->free = step->hp;
535 step->hp_bd->link = NULL;
536 /* stats information: how much we copied */
538 copied -= step->hp_bd->start + BLOCK_SIZE_W -
543 /* for generations we collected... */
546 collected += step->n_blocks * BLOCK_SIZE_W; /* for stats */
548 /* free old memory and shift to-space into from-space for all
549 * the collected steps (except the allocation area). These
550 * freed blocks will probaby be quickly recycled.
552 if (!(g == 0 && s == 0)) {
553 freeChain(step->blocks);
554 step->blocks = step->to_space;
555 step->n_blocks = step->to_blocks;
556 step->to_space = NULL;
558 for (bd = step->blocks; bd != NULL; bd = bd->link) {
559 bd->evacuated = 0; /* now from-space */
563 /* LARGE OBJECTS. The current live large objects are chained on
564 * scavenged_large, having been moved during garbage
565 * collection from large_objects. Any objects left on
566 * large_objects list are therefore dead, so we free them here.
568 for (bd = step->large_objects; bd != NULL; bd = next) {
573 for (bd = step->scavenged_large_objects; bd != NULL; bd = bd->link) {
576 step->large_objects = step->scavenged_large_objects;
578 /* Set the maximum blocks for this generation, interpolating
579 * between the maximum size of the oldest and youngest
582 * max_blocks = oldgen_max_blocks * G
583 * ----------------------
588 generations[g].max_blocks = (oldest_gen->max_blocks * g)
589 / (RtsFlags.GcFlags.generations-1);
591 generations[g].max_blocks = oldest_gen->max_blocks;
594 /* for older generations... */
597 /* For older generations, we need to append the
598 * scavenged_large_object list (i.e. large objects that have been
599 * promoted during this GC) to the large_object list for that step.
601 for (bd = step->scavenged_large_objects; bd; bd = next) {
604 dbl_link_onto(bd, &step->large_objects);
607 /* add the new blocks we promoted during this GC */
608 step->n_blocks += step->to_blocks;
613 /* Guess the amount of live data for stats. */
616 /* Free the small objects allocated via allocate(), since this will
617 * all have been copied into G0S1 now.
619 if (small_alloc_list != NULL) {
620 freeChain(small_alloc_list);
622 small_alloc_list = NULL;
626 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
628 /* Two-space collector:
629 * Free the old to-space, and estimate the amount of live data.
631 if (RtsFlags.GcFlags.generations == 1) {
634 if (old_to_space != NULL) {
635 freeChain(old_to_space);
637 for (bd = g0s0->to_space; bd != NULL; bd = bd->link) {
638 bd->evacuated = 0; /* now from-space */
641 /* For a two-space collector, we need to resize the nursery. */
643 /* set up a new nursery. Allocate a nursery size based on a
644 * function of the amount of live data (currently a factor of 2,
645 * should be configurable (ToDo)). Use the blocks from the old
646 * nursery if possible, freeing up any left over blocks.
648 * If we get near the maximum heap size, then adjust our nursery
649 * size accordingly. If the nursery is the same size as the live
650 * data (L), then we need 3L bytes. We can reduce the size of the
651 * nursery to bring the required memory down near 2L bytes.
653 * A normal 2-space collector would need 4L bytes to give the same
654 * performance we get from 3L bytes, reducing to the same
655 * performance at 2L bytes.
657 blocks = g0s0->to_blocks;
659 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
660 RtsFlags.GcFlags.maxHeapSize ) {
661 int adjusted_blocks; /* signed on purpose */
664 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
665 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
666 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
667 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
670 blocks = adjusted_blocks;
673 blocks *= RtsFlags.GcFlags.oldGenFactor;
674 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
675 blocks = RtsFlags.GcFlags.minAllocAreaSize;
678 resizeNursery(blocks);
681 /* Generational collector:
682 * If the user has given us a suggested heap size, adjust our
683 * allocation area to make best use of the memory available.
686 if (RtsFlags.GcFlags.heapSizeSuggestion) {
688 nat needed = calcNeeded(); /* approx blocks needed at next GC */
690 /* Guess how much will be live in generation 0 step 0 next time.
691 * A good approximation is the obtained by finding the
692 * percentage of g0s0 that was live at the last minor GC.
695 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
698 /* Estimate a size for the allocation area based on the
699 * information available. We might end up going slightly under
700 * or over the suggested heap size, but we should be pretty
703 * Formula: suggested - needed
704 * ----------------------------
705 * 1 + g0s0_pcnt_kept/100
707 * where 'needed' is the amount of memory needed at the next
708 * collection for collecting all steps except g0s0.
711 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
712 (100 + (int)g0s0_pcnt_kept);
714 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
715 blocks = RtsFlags.GcFlags.minAllocAreaSize;
718 resizeNursery((nat)blocks);
722 /* mark the garbage collected CAFs as dead */
724 if (major_gc) { gcCAFs(); }
727 /* zero the scavenged static object list */
729 zero_static_object_list(scavenged_static_objects);
736 /* start any pending finalizers */
737 scheduleFinalizers(old_weak_ptr_list);
739 /* send exceptions to any threads which were about to die */
740 resurrectThreads(resurrected_threads);
742 /* check sanity after GC */
743 IF_DEBUG(sanity, checkSanity(N));
745 /* extra GC trace info */
746 IF_DEBUG(gc, stat_describe_gens());
749 /* symbol-table based profiling */
750 /* heapCensus(to_space); */ /* ToDo */
753 /* restore enclosing cost centre */
759 /* check for memory leaks if sanity checking is on */
760 IF_DEBUG(sanity, memInventory());
762 /* ok, GC over: tell the stats department what happened. */
763 stat_endGC(allocated, collected, live, copied, N);
766 //@node Weak Pointers, Evacuation, Garbage Collect
767 //@subsection Weak Pointers
769 /* -----------------------------------------------------------------------------
772 traverse_weak_ptr_list is called possibly many times during garbage
773 collection. It returns a flag indicating whether it did any work
774 (i.e. called evacuate on any live pointers).
776 Invariant: traverse_weak_ptr_list is called when the heap is in an
777 idempotent state. That means that there are no pending
778 evacuate/scavenge operations. This invariant helps the weak
779 pointer code decide which weak pointers are dead - if there are no
780 new live weak pointers, then all the currently unreachable ones are
783 For generational GC: we just don't try to finalize weak pointers in
784 older generations than the one we're collecting. This could
785 probably be optimised by keeping per-generation lists of weak
786 pointers, but for a few weak pointers this scheme will work.
787 -------------------------------------------------------------------------- */
788 //@cindex traverse_weak_ptr_list
791 traverse_weak_ptr_list(void)
793 StgWeak *w, **last_w, *next_w;
795 rtsBool flag = rtsFalse;
797 if (weak_done) { return rtsFalse; }
799 /* doesn't matter where we evacuate values/finalizers to, since
800 * these pointers are treated as roots (iff the keys are alive).
804 last_w = &old_weak_ptr_list;
805 for (w = old_weak_ptr_list; w; w = next_w) {
807 /* First, this weak pointer might have been evacuated. If so,
808 * remove the forwarding pointer from the weak_ptr_list.
810 if (get_itbl(w)->type == EVACUATED) {
811 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
815 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
816 * called on a live weak pointer object. Just remove it.
818 if (w->header.info == &DEAD_WEAK_info) {
819 next_w = ((StgDeadWeak *)w)->link;
824 ASSERT(get_itbl(w)->type == WEAK);
826 /* Now, check whether the key is reachable.
828 if ((new = isAlive(w->key))) {
830 /* evacuate the value and finalizer */
831 w->value = evacuate(w->value);
832 w->finalizer = evacuate(w->finalizer);
833 /* remove this weak ptr from the old_weak_ptr list */
835 /* and put it on the new weak ptr list */
837 w->link = weak_ptr_list;
840 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
850 /* Now deal with the all_threads list, which behaves somewhat like
851 * the weak ptr list. If we discover any threads that are about to
852 * become garbage, we wake them up and administer an exception.
855 StgTSO *t, *tmp, *next, **prev;
857 prev = &old_all_threads;
858 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
860 /* Threads which have finished or died get dropped from
863 switch (t->what_next) {
866 next = t->global_link;
872 /* Threads which have already been determined to be alive are
873 * moved onto the all_threads list.
875 (StgClosure *)tmp = isAlive((StgClosure *)t);
877 next = tmp->global_link;
878 tmp->global_link = all_threads;
882 prev = &(t->global_link);
883 next = t->global_link;
888 /* If we didn't make any changes, then we can go round and kill all
889 * the dead weak pointers. The old_weak_ptr list is used as a list
890 * of pending finalizers later on.
892 if (flag == rtsFalse) {
893 cleanup_weak_ptr_list(&old_weak_ptr_list);
894 for (w = old_weak_ptr_list; w; w = w->link) {
895 w->finalizer = evacuate(w->finalizer);
898 /* And resurrect any threads which were about to become garbage.
901 StgTSO *t, *tmp, *next;
902 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
903 next = t->global_link;
904 (StgClosure *)tmp = evacuate((StgClosure *)t);
905 tmp->global_link = resurrected_threads;
906 resurrected_threads = tmp;
916 /* -----------------------------------------------------------------------------
917 After GC, the live weak pointer list may have forwarding pointers
918 on it, because a weak pointer object was evacuated after being
919 moved to the live weak pointer list. We remove those forwarding
922 Also, we don't consider weak pointer objects to be reachable, but
923 we must nevertheless consider them to be "live" and retain them.
924 Therefore any weak pointer objects which haven't as yet been
925 evacuated need to be evacuated now.
926 -------------------------------------------------------------------------- */
928 //@cindex cleanup_weak_ptr_list
931 cleanup_weak_ptr_list ( StgWeak **list )
933 StgWeak *w, **last_w;
936 for (w = *list; w; w = w->link) {
938 if (get_itbl(w)->type == EVACUATED) {
939 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
943 if (Bdescr((P_)w)->evacuated == 0) {
944 (StgClosure *)w = evacuate((StgClosure *)w);
951 /* -----------------------------------------------------------------------------
952 isAlive determines whether the given closure is still alive (after
953 a garbage collection) or not. It returns the new address of the
954 closure if it is alive, or NULL otherwise.
955 -------------------------------------------------------------------------- */
960 isAlive(StgClosure *p)
962 const StgInfoTable *info;
969 /* ToDo: for static closures, check the static link field.
970 * Problem here is that we sometimes don't set the link field, eg.
971 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
974 /* ignore closures in generations that we're not collecting. */
975 if (LOOKS_LIKE_STATIC(p) || Bdescr((P_)p)->gen->no > N) {
979 switch (info->type) {
984 case IND_OLDGEN: /* rely on compatible layout with StgInd */
985 case IND_OLDGEN_PERM:
986 /* follow indirections */
987 p = ((StgInd *)p)->indirectee;
992 return ((StgEvacuated *)p)->evacuee;
995 size = bco_sizeW((StgBCO*)p);
999 size = arr_words_sizeW((StgArrWords *)p);
1003 case MUT_ARR_PTRS_FROZEN:
1004 size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)p);
1008 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1009 p = (StgClosure *)((StgTSO *)p)->link;
1013 size = tso_sizeW((StgTSO *)p);
1015 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)
1016 && Bdescr((P_)p)->evacuated)
1030 MarkRoot(StgClosure *root)
1032 # if 0 && defined(PAR) && defined(DEBUG)
1033 StgClosure *foo = evacuate(root);
1034 // ASSERT(closure_STATIC(foo) || maybeLarge(foo) || Bdescr(foo)->evacuated);
1035 ASSERT(isAlive(foo)); // must be in to-space
1038 return evacuate(root);
1043 static void addBlock(step *step)
1045 bdescr *bd = allocBlock();
1046 bd->gen = step->gen;
1049 if (step->gen->no <= N) {
1055 step->hp_bd->free = step->hp;
1056 step->hp_bd->link = bd;
1057 step->hp = bd->start;
1058 step->hpLim = step->hp + BLOCK_SIZE_W;
1064 //@cindex upd_evacuee
1066 static __inline__ void
1067 upd_evacuee(StgClosure *p, StgClosure *dest)
1069 p->header.info = &EVACUATED_info;
1070 ((StgEvacuated *)p)->evacuee = dest;
1075 static __inline__ StgClosure *
1076 copy(StgClosure *src, nat size, step *step)
1080 TICK_GC_WORDS_COPIED(size);
1081 /* Find out where we're going, using the handy "to" pointer in
1082 * the step of the source object. If it turns out we need to
1083 * evacuate to an older generation, adjust it here (see comment
1086 if (step->gen->no < evac_gen) {
1087 #ifdef NO_EAGER_PROMOTION
1088 failed_to_evac = rtsTrue;
1090 step = &generations[evac_gen].steps[0];
1094 /* chain a new block onto the to-space for the destination step if
1097 if (step->hp + size >= step->hpLim) {
1101 for(to = step->hp, from = (P_)src; size>0; --size) {
1107 upd_evacuee(src,(StgClosure *)dest);
1108 return (StgClosure *)dest;
1111 /* Special version of copy() for when we only want to copy the info
1112 * pointer of an object, but reserve some padding after it. This is
1113 * used to optimise evacuation of BLACKHOLEs.
1118 static __inline__ StgClosure *
1119 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *step)
1123 TICK_GC_WORDS_COPIED(size_to_copy);
1124 if (step->gen->no < evac_gen) {
1125 #ifdef NO_EAGER_PROMOTION
1126 failed_to_evac = rtsTrue;
1128 step = &generations[evac_gen].steps[0];
1132 if (step->hp + size_to_reserve >= step->hpLim) {
1136 for(to = step->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1141 step->hp += size_to_reserve;
1142 upd_evacuee(src,(StgClosure *)dest);
1143 return (StgClosure *)dest;
1146 //@node Evacuation, Scavenging, Weak Pointers
1147 //@subsection Evacuation
1149 /* -----------------------------------------------------------------------------
1150 Evacuate a large object
1152 This just consists of removing the object from the (doubly-linked)
1153 large_alloc_list, and linking it on to the (singly-linked)
1154 new_large_objects list, from where it will be scavenged later.
1156 Convention: bd->evacuated is /= 0 for a large object that has been
1157 evacuated, or 0 otherwise.
1158 -------------------------------------------------------------------------- */
1160 //@cindex evacuate_large
1163 evacuate_large(StgPtr p, rtsBool mutable)
1165 bdescr *bd = Bdescr(p);
1168 /* should point to the beginning of the block */
1169 ASSERT(((W_)p & BLOCK_MASK) == 0);
1171 /* already evacuated? */
1172 if (bd->evacuated) {
1173 /* Don't forget to set the failed_to_evac flag if we didn't get
1174 * the desired destination (see comments in evacuate()).
1176 if (bd->gen->no < evac_gen) {
1177 failed_to_evac = rtsTrue;
1178 TICK_GC_FAILED_PROMOTION();
1184 /* remove from large_object list */
1186 bd->back->link = bd->link;
1187 } else { /* first object in the list */
1188 step->large_objects = bd->link;
1191 bd->link->back = bd->back;
1194 /* link it on to the evacuated large object list of the destination step
1196 step = bd->step->to;
1197 if (step->gen->no < evac_gen) {
1198 #ifdef NO_EAGER_PROMOTION
1199 failed_to_evac = rtsTrue;
1201 step = &generations[evac_gen].steps[0];
1206 bd->gen = step->gen;
1207 bd->link = step->new_large_objects;
1208 step->new_large_objects = bd;
1212 recordMutable((StgMutClosure *)p);
1216 /* -----------------------------------------------------------------------------
1217 Adding a MUT_CONS to an older generation.
1219 This is necessary from time to time when we end up with an
1220 old-to-new generation pointer in a non-mutable object. We defer
1221 the promotion until the next GC.
1222 -------------------------------------------------------------------------- */
1227 mkMutCons(StgClosure *ptr, generation *gen)
1232 step = &gen->steps[0];
1234 /* chain a new block onto the to-space for the destination step if
1237 if (step->hp + sizeofW(StgIndOldGen) >= step->hpLim) {
1241 q = (StgMutVar *)step->hp;
1242 step->hp += sizeofW(StgMutVar);
1244 SET_HDR(q,&MUT_CONS_info,CCS_GC);
1246 recordOldToNewPtrs((StgMutClosure *)q);
1248 return (StgClosure *)q;
1251 /* -----------------------------------------------------------------------------
1254 This is called (eventually) for every live object in the system.
1256 The caller to evacuate specifies a desired generation in the
1257 evac_gen global variable. The following conditions apply to
1258 evacuating an object which resides in generation M when we're
1259 collecting up to generation N
1263 else evac to step->to
1265 if M < evac_gen evac to evac_gen, step 0
1267 if the object is already evacuated, then we check which generation
1270 if M >= evac_gen do nothing
1271 if M < evac_gen set failed_to_evac flag to indicate that we
1272 didn't manage to evacuate this object into evac_gen.
1274 -------------------------------------------------------------------------- */
1278 evacuate(StgClosure *q)
1283 const StgInfoTable *info;
1286 if (HEAP_ALLOCED(q)) {
1288 if (bd->gen->no > N) {
1289 /* Can't evacuate this object, because it's in a generation
1290 * older than the ones we're collecting. Let's hope that it's
1291 * in evac_gen or older, or we will have to make an IND_OLDGEN object.
1293 if (bd->gen->no < evac_gen) {
1295 failed_to_evac = rtsTrue;
1296 TICK_GC_FAILED_PROMOTION();
1300 step = bd->step->to;
1303 else step = NULL; /* make sure copy() will crash if HEAP_ALLOCED is wrong */
1306 /* make sure the info pointer is into text space */
1307 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1308 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1311 if (info->type==RBH) {
1312 info = REVERT_INFOPTR(info);
1314 belch("@_ Trying to evacuate an RBH %p (%s); reverting to IP %p (%s)",
1315 q, info_type(q), info, info_type_by_ip(info)));
1319 switch (info -> type) {
1323 nat size = bco_sizeW((StgBCO*)q);
1325 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1326 evacuate_large((P_)q, rtsFalse);
1329 /* just copy the block */
1330 to = copy(q,size,step);
1336 ASSERT(q->header.info != &MUT_CONS_info);
1338 to = copy(q,sizeW_fromITBL(info),step);
1339 recordMutable((StgMutClosure *)to);
1346 return copy(q,sizeofW(StgHeader)+1,step);
1348 case THUNK_1_0: /* here because of MIN_UPD_SIZE */
1353 #ifdef NO_PROMOTE_THUNKS
1354 if (bd->gen->no == 0 &&
1355 bd->step->no != 0 &&
1356 bd->step->no == bd->gen->n_steps-1) {
1360 return copy(q,sizeofW(StgHeader)+2,step);
1368 return copy(q,sizeofW(StgHeader)+2,step);
1374 case IND_OLDGEN_PERM:
1380 return copy(q,sizeW_fromITBL(info),step);
1383 case SE_CAF_BLACKHOLE:
1386 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),step);
1389 to = copy(q,BLACKHOLE_sizeW(),step);
1390 recordMutable((StgMutClosure *)to);
1393 case THUNK_SELECTOR:
1395 const StgInfoTable* selectee_info;
1396 StgClosure* selectee = ((StgSelector*)q)->selectee;
1399 selectee_info = get_itbl(selectee);
1400 switch (selectee_info->type) {
1409 StgWord32 offset = info->layout.selector_offset;
1411 /* check that the size is in range */
1413 (StgWord32)(selectee_info->layout.payload.ptrs +
1414 selectee_info->layout.payload.nptrs));
1416 /* perform the selection! */
1417 q = selectee->payload[offset];
1419 /* if we're already in to-space, there's no need to continue
1420 * with the evacuation, just update the source address with
1421 * a pointer to the (evacuated) constructor field.
1423 if (HEAP_ALLOCED(q)) {
1424 bdescr *bd = Bdescr((P_)q);
1425 if (bd->evacuated) {
1426 if (bd->gen->no < evac_gen) {
1427 failed_to_evac = rtsTrue;
1428 TICK_GC_FAILED_PROMOTION();
1434 /* otherwise, carry on and evacuate this constructor field,
1435 * (but not the constructor itself)
1444 case IND_OLDGEN_PERM:
1445 selectee = ((StgInd *)selectee)->indirectee;
1449 selectee = ((StgCAF *)selectee)->value;
1453 selectee = ((StgEvacuated *)selectee)->evacuee;
1463 case THUNK_SELECTOR:
1464 /* aargh - do recursively???? */
1467 case SE_CAF_BLACKHOLE:
1471 /* not evaluated yet */
1475 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1476 (int)(selectee_info->type));
1479 return copy(q,THUNK_SELECTOR_sizeW(),step);
1483 /* follow chains of indirections, don't evacuate them */
1484 q = ((StgInd*)q)->indirectee;
1488 if (info->srt_len > 0 && major_gc &&
1489 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1490 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1491 static_objects = (StgClosure *)q;
1496 if (info->srt_len > 0 && major_gc &&
1497 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1498 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1499 static_objects = (StgClosure *)q;
1504 if (major_gc && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1505 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1506 static_objects = (StgClosure *)q;
1511 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1512 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1513 static_objects = (StgClosure *)q;
1517 case CONSTR_INTLIKE:
1518 case CONSTR_CHARLIKE:
1519 case CONSTR_NOCAF_STATIC:
1520 /* no need to put these on the static linked list, they don't need
1535 /* shouldn't see these */
1536 barf("evacuate: stack frame at %p\n", q);
1540 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1541 * of stack, tagging and all.
1543 * They can be larger than a block in size. Both are only
1544 * allocated via allocate(), so they should be chained on to the
1545 * large_object list.
1548 nat size = pap_sizeW((StgPAP*)q);
1549 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1550 evacuate_large((P_)q, rtsFalse);
1553 return copy(q,size,step);
1558 /* Already evacuated, just return the forwarding address.
1559 * HOWEVER: if the requested destination generation (evac_gen) is
1560 * older than the actual generation (because the object was
1561 * already evacuated to a younger generation) then we have to
1562 * set the failed_to_evac flag to indicate that we couldn't
1563 * manage to promote the object to the desired generation.
1565 if (evac_gen > 0) { /* optimisation */
1566 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1567 if (Bdescr((P_)p)->gen->no < evac_gen) {
1568 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1569 failed_to_evac = rtsTrue;
1570 TICK_GC_FAILED_PROMOTION();
1573 return ((StgEvacuated*)q)->evacuee;
1577 nat size = arr_words_sizeW((StgArrWords *)q);
1579 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1580 evacuate_large((P_)q, rtsFalse);
1583 /* just copy the block */
1584 return copy(q,size,step);
1589 case MUT_ARR_PTRS_FROZEN:
1591 nat size = mut_arr_ptrs_sizeW((StgMutArrPtrs *)q);
1593 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1594 evacuate_large((P_)q, info->type == MUT_ARR_PTRS);
1597 /* just copy the block */
1598 to = copy(q,size,step);
1599 if (info->type == MUT_ARR_PTRS) {
1600 recordMutable((StgMutClosure *)to);
1608 StgTSO *tso = (StgTSO *)q;
1609 nat size = tso_sizeW(tso);
1612 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1614 if (tso->what_next == ThreadRelocated) {
1615 q = (StgClosure *)tso->link;
1619 /* Large TSOs don't get moved, so no relocation is required.
1621 if (size >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
1622 evacuate_large((P_)q, rtsTrue);
1625 /* To evacuate a small TSO, we need to relocate the update frame
1629 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),step);
1631 diff = (StgPtr)new_tso - (StgPtr)tso; /* In *words* */
1633 /* relocate the stack pointers... */
1634 new_tso->su = (StgUpdateFrame *) ((StgPtr)new_tso->su + diff);
1635 new_tso->sp = (StgPtr)new_tso->sp + diff;
1636 new_tso->splim = (StgPtr)new_tso->splim + diff;
1638 relocate_TSO(tso, new_tso);
1640 recordMutable((StgMutClosure *)new_tso);
1641 return (StgClosure *)new_tso;
1646 case RBH: // cf. BLACKHOLE_BQ
1648 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1649 to = copy(q,BLACKHOLE_sizeW(),step);
1650 //ToDo: derive size etc from reverted IP
1651 //to = copy(q,size,step);
1652 recordMutable((StgMutClosure *)to);
1654 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1655 q, info_type(q), to, info_type(to)));
1660 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1661 to = copy(q,sizeofW(StgBlockedFetch),step);
1663 belch("@@ evacuate: %p (%s) to %p (%s)",
1664 q, info_type(q), to, info_type(to)));
1668 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1669 to = copy(q,sizeofW(StgFetchMe),step);
1671 belch("@@ evacuate: %p (%s) to %p (%s)",
1672 q, info_type(q), to, info_type(to)));
1676 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1677 to = copy(q,sizeofW(StgFetchMeBlockingQueue),step);
1679 belch("@@ evacuate: %p (%s) to %p (%s)",
1680 q, info_type(q), to, info_type(to)));
1685 barf("evacuate: strange closure type %d", (int)(info->type));
1691 /* -----------------------------------------------------------------------------
1692 relocate_TSO is called just after a TSO has been copied from src to
1693 dest. It adjusts the update frame list for the new location.
1694 -------------------------------------------------------------------------- */
1695 //@cindex relocate_TSO
1698 relocate_TSO(StgTSO *src, StgTSO *dest)
1705 diff = (StgPtr)dest->sp - (StgPtr)src->sp; /* In *words* */
1709 while ((P_)su < dest->stack + dest->stack_size) {
1710 switch (get_itbl(su)->type) {
1712 /* GCC actually manages to common up these three cases! */
1715 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1720 cf = (StgCatchFrame *)su;
1721 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1726 sf = (StgSeqFrame *)su;
1727 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1736 barf("relocate_TSO %d", (int)(get_itbl(su)->type));
1744 //@node Scavenging, Reverting CAFs, Evacuation
1745 //@subsection Scavenging
1747 //@cindex scavenge_srt
1750 scavenge_srt(const StgInfoTable *info)
1752 StgClosure **srt, **srt_end;
1754 /* evacuate the SRT. If srt_len is zero, then there isn't an
1755 * srt field in the info table. That's ok, because we'll
1756 * never dereference it.
1758 srt = (StgClosure **)(info->srt);
1759 srt_end = srt + info->srt_len;
1760 for (; srt < srt_end; srt++) {
1761 /* Special-case to handle references to closures hiding out in DLLs, since
1762 double indirections required to get at those. The code generator knows
1763 which is which when generating the SRT, so it stores the (indirect)
1764 reference to the DLL closure in the table by first adding one to it.
1765 We check for this here, and undo the addition before evacuating it.
1767 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1768 closure that's fixed at link-time, and no extra magic is required.
1770 #ifdef ENABLE_WIN32_DLL_SUPPORT
1771 if ( (unsigned long)(*srt) & 0x1 ) {
1772 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1782 /* -----------------------------------------------------------------------------
1784 -------------------------------------------------------------------------- */
1787 scavengeTSO (StgTSO *tso)
1789 /* chase the link field for any TSOs on the same queue */
1790 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1791 if ( tso->why_blocked == BlockedOnMVar
1792 || tso->why_blocked == BlockedOnBlackHole
1793 || tso->why_blocked == BlockedOnException
1795 || tso->why_blocked == BlockedOnGA
1796 || tso->why_blocked == BlockedOnGA_NoSend
1799 tso->block_info.closure = evacuate(tso->block_info.closure);
1801 if ( tso->blocked_exceptions != NULL ) {
1802 tso->blocked_exceptions =
1803 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1805 /* scavenge this thread's stack */
1806 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1809 /* -----------------------------------------------------------------------------
1810 Scavenge a given step until there are no more objects in this step
1813 evac_gen is set by the caller to be either zero (for a step in a
1814 generation < N) or G where G is the generation of the step being
1817 We sometimes temporarily change evac_gen back to zero if we're
1818 scavenging a mutable object where early promotion isn't such a good
1820 -------------------------------------------------------------------------- */
1824 scavenge(step *step)
1827 const StgInfoTable *info;
1829 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
1834 failed_to_evac = rtsFalse;
1836 /* scavenge phase - standard breadth-first scavenging of the
1840 while (bd != step->hp_bd || p < step->hp) {
1842 /* If we're at the end of this block, move on to the next block */
1843 if (bd != step->hp_bd && p == bd->free) {
1849 q = p; /* save ptr to object */
1851 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
1852 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
1854 info = get_itbl((StgClosure *)p);
1856 if (info->type==RBH)
1857 info = REVERT_INFOPTR(info);
1860 switch (info -> type) {
1864 StgBCO* bco = (StgBCO *)p;
1866 for (i = 0; i < bco->n_ptrs; i++) {
1867 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
1869 p += bco_sizeW(bco);
1874 /* treat MVars specially, because we don't want to evacuate the
1875 * mut_link field in the middle of the closure.
1878 StgMVar *mvar = ((StgMVar *)p);
1880 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
1881 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
1882 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
1883 p += sizeofW(StgMVar);
1884 evac_gen = saved_evac_gen;
1892 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
1893 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1894 p += sizeofW(StgHeader) + 2;
1899 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1900 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1906 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1907 p += sizeofW(StgHeader) + 1;
1912 p += sizeofW(StgHeader) + 2; /* MIN_UPD_SIZE */
1918 p += sizeofW(StgHeader) + 1;
1925 p += sizeofW(StgHeader) + 2;
1932 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
1933 p += sizeofW(StgHeader) + 2;
1948 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
1949 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
1950 (StgClosure *)*p = evacuate((StgClosure *)*p);
1952 p += info->layout.payload.nptrs;
1957 if (step->gen->no != 0) {
1958 SET_INFO(((StgClosure *)p), &IND_OLDGEN_PERM_info);
1961 case IND_OLDGEN_PERM:
1962 ((StgIndOldGen *)p)->indirectee =
1963 evacuate(((StgIndOldGen *)p)->indirectee);
1964 if (failed_to_evac) {
1965 failed_to_evac = rtsFalse;
1966 recordOldToNewPtrs((StgMutClosure *)p);
1968 p += sizeofW(StgIndOldGen);
1973 StgCAF *caf = (StgCAF *)p;
1975 caf->body = evacuate(caf->body);
1976 if (failed_to_evac) {
1977 failed_to_evac = rtsFalse;
1978 recordOldToNewPtrs((StgMutClosure *)p);
1980 caf->mut_link = NULL;
1982 p += sizeofW(StgCAF);
1988 StgCAF *caf = (StgCAF *)p;
1990 caf->body = evacuate(caf->body);
1991 caf->value = evacuate(caf->value);
1992 if (failed_to_evac) {
1993 failed_to_evac = rtsFalse;
1994 recordOldToNewPtrs((StgMutClosure *)p);
1996 caf->mut_link = NULL;
1998 p += sizeofW(StgCAF);
2003 /* ignore MUT_CONSs */
2004 if (((StgMutVar *)p)->header.info != &MUT_CONS_info) {
2006 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2007 evac_gen = saved_evac_gen;
2009 p += sizeofW(StgMutVar);
2013 case SE_CAF_BLACKHOLE:
2016 p += BLACKHOLE_sizeW();
2021 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2022 (StgClosure *)bh->blocking_queue =
2023 evacuate((StgClosure *)bh->blocking_queue);
2024 if (failed_to_evac) {
2025 failed_to_evac = rtsFalse;
2026 recordMutable((StgMutClosure *)bh);
2028 p += BLACKHOLE_sizeW();
2032 case THUNK_SELECTOR:
2034 StgSelector *s = (StgSelector *)p;
2035 s->selectee = evacuate(s->selectee);
2036 p += THUNK_SELECTOR_sizeW();
2042 barf("scavenge:IND???\n");
2044 case CONSTR_INTLIKE:
2045 case CONSTR_CHARLIKE:
2047 case CONSTR_NOCAF_STATIC:
2051 /* Shouldn't see a static object here. */
2052 barf("scavenge: STATIC object\n");
2064 /* Shouldn't see stack frames here. */
2065 barf("scavenge: stack frame\n");
2067 case AP_UPD: /* same as PAPs */
2069 /* Treat a PAP just like a section of stack, not forgetting to
2070 * evacuate the function pointer too...
2073 StgPAP* pap = (StgPAP *)p;
2075 pap->fun = evacuate(pap->fun);
2076 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2077 p += pap_sizeW(pap);
2082 /* nothing to follow */
2083 p += arr_words_sizeW((StgArrWords *)p);
2087 /* follow everything */
2091 evac_gen = 0; /* repeatedly mutable */
2092 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2093 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2094 (StgClosure *)*p = evacuate((StgClosure *)*p);
2096 evac_gen = saved_evac_gen;
2100 case MUT_ARR_PTRS_FROZEN:
2101 /* follow everything */
2103 StgPtr start = p, next;
2105 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2106 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2107 (StgClosure *)*p = evacuate((StgClosure *)*p);
2109 if (failed_to_evac) {
2110 /* we can do this easier... */
2111 recordMutable((StgMutClosure *)start);
2112 failed_to_evac = rtsFalse;
2119 StgTSO *tso = (StgTSO *)p;
2122 evac_gen = saved_evac_gen;
2123 p += tso_sizeW(tso);
2128 case RBH: // cf. BLACKHOLE_BQ
2130 // nat size, ptrs, nonptrs, vhs;
2132 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2133 StgRBH *rbh = (StgRBH *)p;
2134 (StgClosure *)rbh->blocking_queue =
2135 evacuate((StgClosure *)rbh->blocking_queue);
2136 if (failed_to_evac) {
2137 failed_to_evac = rtsFalse;
2138 recordMutable((StgMutClosure *)rbh);
2141 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2142 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2143 // ToDo: use size of reverted closure here!
2144 p += BLACKHOLE_sizeW();
2150 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2151 /* follow the pointer to the node which is being demanded */
2152 (StgClosure *)bf->node =
2153 evacuate((StgClosure *)bf->node);
2154 /* follow the link to the rest of the blocking queue */
2155 (StgClosure *)bf->link =
2156 evacuate((StgClosure *)bf->link);
2157 if (failed_to_evac) {
2158 failed_to_evac = rtsFalse;
2159 recordMutable((StgMutClosure *)bf);
2162 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2163 bf, info_type((StgClosure *)bf),
2164 bf->node, info_type(bf->node)));
2165 p += sizeofW(StgBlockedFetch);
2171 belch("@@ scavenge: HWL claims nothing to do for %p (%s)",
2172 p, info_type((StgClosure *)p)));
2173 p += sizeofW(StgFetchMe);
2174 break; // nothing to do in this case
2176 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2178 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2179 (StgClosure *)fmbq->blocking_queue =
2180 evacuate((StgClosure *)fmbq->blocking_queue);
2181 if (failed_to_evac) {
2182 failed_to_evac = rtsFalse;
2183 recordMutable((StgMutClosure *)fmbq);
2186 belch("@@ scavenge: %p (%s) exciting, isn't it",
2187 p, info_type((StgClosure *)p)));
2188 p += sizeofW(StgFetchMeBlockingQueue);
2194 barf("scavenge: unimplemented/strange closure type %d @ %p",
2198 barf("scavenge: unimplemented/strange closure type %d @ %p",
2202 /* If we didn't manage to promote all the objects pointed to by
2203 * the current object, then we have to designate this object as
2204 * mutable (because it contains old-to-new generation pointers).
2206 if (failed_to_evac) {
2207 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2208 failed_to_evac = rtsFalse;
2216 /* -----------------------------------------------------------------------------
2217 Scavenge one object.
2219 This is used for objects that are temporarily marked as mutable
2220 because they contain old-to-new generation pointers. Only certain
2221 objects can have this property.
2222 -------------------------------------------------------------------------- */
2223 //@cindex scavenge_one
2226 scavenge_one(StgClosure *p)
2228 const StgInfoTable *info;
2231 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2232 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2237 if (info->type==RBH)
2238 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2241 switch (info -> type) {
2244 case FUN_1_0: /* hardly worth specialising these guys */
2264 case IND_OLDGEN_PERM:
2269 end = (P_)p->payload + info->layout.payload.ptrs;
2270 for (q = (P_)p->payload; q < end; q++) {
2271 (StgClosure *)*q = evacuate((StgClosure *)*q);
2277 case SE_CAF_BLACKHOLE:
2282 case THUNK_SELECTOR:
2284 StgSelector *s = (StgSelector *)p;
2285 s->selectee = evacuate(s->selectee);
2289 case AP_UPD: /* same as PAPs */
2291 /* Treat a PAP just like a section of stack, not forgetting to
2292 * evacuate the function pointer too...
2295 StgPAP* pap = (StgPAP *)p;
2297 pap->fun = evacuate(pap->fun);
2298 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2303 /* This might happen if for instance a MUT_CONS was pointing to a
2304 * THUNK which has since been updated. The IND_OLDGEN will
2305 * be on the mutable list anyway, so we don't need to do anything
2311 barf("scavenge_one: strange object %d", (int)(info->type));
2314 no_luck = failed_to_evac;
2315 failed_to_evac = rtsFalse;
2320 /* -----------------------------------------------------------------------------
2321 Scavenging mutable lists.
2323 We treat the mutable list of each generation > N (i.e. all the
2324 generations older than the one being collected) as roots. We also
2325 remove non-mutable objects from the mutable list at this point.
2326 -------------------------------------------------------------------------- */
2327 //@cindex scavenge_mut_once_list
2330 scavenge_mut_once_list(generation *gen)
2332 const StgInfoTable *info;
2333 StgMutClosure *p, *next, *new_list;
2335 p = gen->mut_once_list;
2336 new_list = END_MUT_LIST;
2340 failed_to_evac = rtsFalse;
2342 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2344 /* make sure the info pointer is into text space */
2345 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2346 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2350 if (info->type==RBH)
2351 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2353 switch(info->type) {
2356 case IND_OLDGEN_PERM:
2358 /* Try to pull the indirectee into this generation, so we can
2359 * remove the indirection from the mutable list.
2361 ((StgIndOldGen *)p)->indirectee =
2362 evacuate(((StgIndOldGen *)p)->indirectee);
2365 if (RtsFlags.DebugFlags.gc)
2366 /* Debugging code to print out the size of the thing we just
2370 StgPtr start = gen->steps[0].scan;
2371 bdescr *start_bd = gen->steps[0].scan_bd;
2373 scavenge(&gen->steps[0]);
2374 if (start_bd != gen->steps[0].scan_bd) {
2375 size += (P_)BLOCK_ROUND_UP(start) - start;
2376 start_bd = start_bd->link;
2377 while (start_bd != gen->steps[0].scan_bd) {
2378 size += BLOCK_SIZE_W;
2379 start_bd = start_bd->link;
2381 size += gen->steps[0].scan -
2382 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2384 size = gen->steps[0].scan - start;
2386 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2390 /* failed_to_evac might happen if we've got more than two
2391 * generations, we're collecting only generation 0, the
2392 * indirection resides in generation 2 and the indirectee is
2395 if (failed_to_evac) {
2396 failed_to_evac = rtsFalse;
2397 p->mut_link = new_list;
2400 /* the mut_link field of an IND_STATIC is overloaded as the
2401 * static link field too (it just so happens that we don't need
2402 * both at the same time), so we need to NULL it out when
2403 * removing this object from the mutable list because the static
2404 * link fields are all assumed to be NULL before doing a major
2412 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2413 * it from the mutable list if possible by promoting whatever it
2416 ASSERT(p->header.info == &MUT_CONS_info);
2417 if (scavenge_one(((StgMutVar *)p)->var) == rtsTrue) {
2418 /* didn't manage to promote everything, so put the
2419 * MUT_CONS back on the list.
2421 p->mut_link = new_list;
2428 StgCAF *caf = (StgCAF *)p;
2429 caf->body = evacuate(caf->body);
2430 caf->value = evacuate(caf->value);
2431 if (failed_to_evac) {
2432 failed_to_evac = rtsFalse;
2433 p->mut_link = new_list;
2443 StgCAF *caf = (StgCAF *)p;
2444 caf->body = evacuate(caf->body);
2445 if (failed_to_evac) {
2446 failed_to_evac = rtsFalse;
2447 p->mut_link = new_list;
2456 /* shouldn't have anything else on the mutables list */
2457 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2461 gen->mut_once_list = new_list;
2464 //@cindex scavenge_mutable_list
2467 scavenge_mutable_list(generation *gen)
2469 const StgInfoTable *info;
2470 StgMutClosure *p, *next;
2472 p = gen->saved_mut_list;
2476 failed_to_evac = rtsFalse;
2478 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2480 /* make sure the info pointer is into text space */
2481 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2482 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2486 if (info->type==RBH)
2487 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2489 switch(info->type) {
2491 case MUT_ARR_PTRS_FROZEN:
2492 /* remove this guy from the mutable list, but follow the ptrs
2493 * anyway (and make sure they get promoted to this gen).
2498 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2500 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2501 (StgClosure *)*q = evacuate((StgClosure *)*q);
2505 if (failed_to_evac) {
2506 failed_to_evac = rtsFalse;
2507 p->mut_link = gen->mut_list;
2514 /* follow everything */
2515 p->mut_link = gen->mut_list;
2520 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2521 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2522 (StgClosure *)*q = evacuate((StgClosure *)*q);
2528 /* MUT_CONS is a kind of MUT_VAR, except that we try to remove
2529 * it from the mutable list if possible by promoting whatever it
2532 ASSERT(p->header.info != &MUT_CONS_info);
2533 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2534 p->mut_link = gen->mut_list;
2540 StgMVar *mvar = (StgMVar *)p;
2541 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2542 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2543 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2544 p->mut_link = gen->mut_list;
2551 StgTSO *tso = (StgTSO *)p;
2555 /* Don't take this TSO off the mutable list - it might still
2556 * point to some younger objects (because we set evac_gen to 0
2559 tso->mut_link = gen->mut_list;
2560 gen->mut_list = (StgMutClosure *)tso;
2566 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2567 (StgClosure *)bh->blocking_queue =
2568 evacuate((StgClosure *)bh->blocking_queue);
2569 p->mut_link = gen->mut_list;
2574 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2577 case IND_OLDGEN_PERM:
2578 /* Try to pull the indirectee into this generation, so we can
2579 * remove the indirection from the mutable list.
2582 ((StgIndOldGen *)p)->indirectee =
2583 evacuate(((StgIndOldGen *)p)->indirectee);
2586 if (failed_to_evac) {
2587 failed_to_evac = rtsFalse;
2588 p->mut_link = gen->mut_once_list;
2589 gen->mut_once_list = p;
2596 // HWL: check whether all of these are necessary
2598 case RBH: // cf. BLACKHOLE_BQ
2600 // nat size, ptrs, nonptrs, vhs;
2602 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2603 StgRBH *rbh = (StgRBH *)p;
2604 (StgClosure *)rbh->blocking_queue =
2605 evacuate((StgClosure *)rbh->blocking_queue);
2606 if (failed_to_evac) {
2607 failed_to_evac = rtsFalse;
2608 recordMutable((StgMutClosure *)rbh);
2610 // ToDo: use size of reverted closure here!
2611 p += BLACKHOLE_sizeW();
2617 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2618 /* follow the pointer to the node which is being demanded */
2619 (StgClosure *)bf->node =
2620 evacuate((StgClosure *)bf->node);
2621 /* follow the link to the rest of the blocking queue */
2622 (StgClosure *)bf->link =
2623 evacuate((StgClosure *)bf->link);
2624 if (failed_to_evac) {
2625 failed_to_evac = rtsFalse;
2626 recordMutable((StgMutClosure *)bf);
2628 p += sizeofW(StgBlockedFetch);
2633 p += sizeofW(StgFetchMe);
2634 break; // nothing to do in this case
2636 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2638 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2639 (StgClosure *)fmbq->blocking_queue =
2640 evacuate((StgClosure *)fmbq->blocking_queue);
2641 if (failed_to_evac) {
2642 failed_to_evac = rtsFalse;
2643 recordMutable((StgMutClosure *)fmbq);
2645 p += sizeofW(StgFetchMeBlockingQueue);
2651 /* shouldn't have anything else on the mutables list */
2652 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2657 //@cindex scavenge_static
2660 scavenge_static(void)
2662 StgClosure* p = static_objects;
2663 const StgInfoTable *info;
2665 /* Always evacuate straight to the oldest generation for static
2667 evac_gen = oldest_gen->no;
2669 /* keep going until we've scavenged all the objects on the linked
2671 while (p != END_OF_STATIC_LIST) {
2675 if (info->type==RBH)
2676 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2678 /* make sure the info pointer is into text space */
2679 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2680 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2682 /* Take this object *off* the static_objects list,
2683 * and put it on the scavenged_static_objects list.
2685 static_objects = STATIC_LINK(info,p);
2686 STATIC_LINK(info,p) = scavenged_static_objects;
2687 scavenged_static_objects = p;
2689 switch (info -> type) {
2693 StgInd *ind = (StgInd *)p;
2694 ind->indirectee = evacuate(ind->indirectee);
2696 /* might fail to evacuate it, in which case we have to pop it
2697 * back on the mutable list (and take it off the
2698 * scavenged_static list because the static link and mut link
2699 * pointers are one and the same).
2701 if (failed_to_evac) {
2702 failed_to_evac = rtsFalse;
2703 scavenged_static_objects = STATIC_LINK(info,p);
2704 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
2705 oldest_gen->mut_once_list = (StgMutClosure *)ind;
2719 next = (P_)p->payload + info->layout.payload.ptrs;
2720 /* evacuate the pointers */
2721 for (q = (P_)p->payload; q < next; q++) {
2722 (StgClosure *)*q = evacuate((StgClosure *)*q);
2728 barf("scavenge_static: strange closure %d", (int)(info->type));
2731 ASSERT(failed_to_evac == rtsFalse);
2733 /* get the next static object from the list. Remember, there might
2734 * be more stuff on this list now that we've done some evacuating!
2735 * (static_objects is a global)
2741 /* -----------------------------------------------------------------------------
2742 scavenge_stack walks over a section of stack and evacuates all the
2743 objects pointed to by it. We can use the same code for walking
2744 PAPs, since these are just sections of copied stack.
2745 -------------------------------------------------------------------------- */
2746 //@cindex scavenge_stack
2749 scavenge_stack(StgPtr p, StgPtr stack_end)
2752 const StgInfoTable* info;
2755 IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
2758 * Each time around this loop, we are looking at a chunk of stack
2759 * that starts with either a pending argument section or an
2760 * activation record.
2763 while (p < stack_end) {
2766 /* If we've got a tag, skip over that many words on the stack */
2767 if (IS_ARG_TAG((W_)q)) {
2772 /* Is q a pointer to a closure?
2774 if (! LOOKS_LIKE_GHC_INFO(q) ) {
2776 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { /* Is it a static closure? */
2777 ASSERT(closure_STATIC((StgClosure *)q));
2779 /* otherwise, must be a pointer into the allocation space. */
2782 (StgClosure *)*p = evacuate((StgClosure *)q);
2788 * Otherwise, q must be the info pointer of an activation
2789 * record. All activation records have 'bitmap' style layout
2792 info = get_itbl((StgClosure *)p);
2794 switch (info->type) {
2796 /* Dynamic bitmap: the mask is stored on the stack */
2798 bitmap = ((StgRetDyn *)p)->liveness;
2799 p = (P_)&((StgRetDyn *)p)->payload[0];
2802 /* probably a slow-entry point return address: */
2810 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
2811 old_p, p, old_p+1));
2813 p++; /* what if FHS!=1 !? -- HWL */
2818 /* Specialised code for update frames, since they're so common.
2819 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
2820 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
2824 StgUpdateFrame *frame = (StgUpdateFrame *)p;
2826 nat type = get_itbl(frame->updatee)->type;
2828 p += sizeofW(StgUpdateFrame);
2829 if (type == EVACUATED) {
2830 frame->updatee = evacuate(frame->updatee);
2833 bdescr *bd = Bdescr((P_)frame->updatee);
2835 if (bd->gen->no > N) {
2836 if (bd->gen->no < evac_gen) {
2837 failed_to_evac = rtsTrue;
2842 /* Don't promote blackholes */
2844 if (!(step->gen->no == 0 &&
2846 step->no == step->gen->n_steps-1)) {
2853 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
2854 sizeofW(StgHeader), step);
2855 frame->updatee = to;
2858 to = copy(frame->updatee, BLACKHOLE_sizeW(), step);
2859 frame->updatee = to;
2860 recordMutable((StgMutClosure *)to);
2863 /* will never be SE_{,CAF_}BLACKHOLE, since we
2864 don't push an update frame for single-entry thunks. KSW 1999-01. */
2865 barf("scavenge_stack: UPDATE_FRAME updatee");
2870 /* small bitmap (< 32 entries, or 64 on a 64-bit machine) */
2877 bitmap = info->layout.bitmap;
2879 /* this assumes that the payload starts immediately after the info-ptr */
2881 while (bitmap != 0) {
2882 if ((bitmap & 1) == 0) {
2883 (StgClosure *)*p = evacuate((StgClosure *)*p);
2886 bitmap = bitmap >> 1;
2893 /* large bitmap (> 32 entries) */
2898 StgLargeBitmap *large_bitmap;
2901 large_bitmap = info->layout.large_bitmap;
2904 for (i=0; i<large_bitmap->size; i++) {
2905 bitmap = large_bitmap->bitmap[i];
2906 q = p + sizeof(W_) * 8;
2907 while (bitmap != 0) {
2908 if ((bitmap & 1) == 0) {
2909 (StgClosure *)*p = evacuate((StgClosure *)*p);
2912 bitmap = bitmap >> 1;
2914 if (i+1 < large_bitmap->size) {
2916 (StgClosure *)*p = evacuate((StgClosure *)*p);
2922 /* and don't forget to follow the SRT */
2927 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
2932 /*-----------------------------------------------------------------------------
2933 scavenge the large object list.
2935 evac_gen set by caller; similar games played with evac_gen as with
2936 scavenge() - see comment at the top of scavenge(). Most large
2937 objects are (repeatedly) mutable, so most of the time evac_gen will
2939 --------------------------------------------------------------------------- */
2940 //@cindex scavenge_large
2943 scavenge_large(step *step)
2947 const StgInfoTable* info;
2948 nat saved_evac_gen = evac_gen; /* used for temporarily changing evac_gen */
2950 evac_gen = 0; /* most objects are mutable */
2951 bd = step->new_large_objects;
2953 for (; bd != NULL; bd = step->new_large_objects) {
2955 /* take this object *off* the large objects list and put it on
2956 * the scavenged large objects list. This is so that we can
2957 * treat new_large_objects as a stack and push new objects on
2958 * the front when evacuating.
2960 step->new_large_objects = bd->link;
2961 dbl_link_onto(bd, &step->scavenged_large_objects);
2964 info = get_itbl((StgClosure *)p);
2966 switch (info->type) {
2968 /* only certain objects can be "large"... */
2971 /* nothing to follow */
2975 /* follow everything */
2979 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2980 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2981 (StgClosure *)*p = evacuate((StgClosure *)*p);
2986 case MUT_ARR_PTRS_FROZEN:
2987 /* follow everything */
2989 StgPtr start = p, next;
2991 evac_gen = saved_evac_gen; /* not really mutable */
2992 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2993 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2994 (StgClosure *)*p = evacuate((StgClosure *)*p);
2997 if (failed_to_evac) {
2998 recordMutable((StgMutClosure *)start);
3005 StgBCO* bco = (StgBCO *)p;
3007 evac_gen = saved_evac_gen;
3008 for (i = 0; i < bco->n_ptrs; i++) {
3009 bcoConstCPtr(bco,i) = evacuate(bcoConstCPtr(bco,i));
3016 scavengeTSO((StgTSO *)p);
3022 StgPAP* pap = (StgPAP *)p;
3024 evac_gen = saved_evac_gen; /* not really mutable */
3025 pap->fun = evacuate(pap->fun);
3026 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3032 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3037 //@cindex zero_static_object_list
3040 zero_static_object_list(StgClosure* first_static)
3044 const StgInfoTable *info;
3046 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3048 link = STATIC_LINK(info, p);
3049 STATIC_LINK(info,p) = NULL;
3053 /* This function is only needed because we share the mutable link
3054 * field with the static link field in an IND_STATIC, so we have to
3055 * zero the mut_link field before doing a major GC, which needs the
3056 * static link field.
3058 * It doesn't do any harm to zero all the mutable link fields on the
3061 //@cindex zero_mutable_list
3064 zero_mutable_list( StgMutClosure *first )
3066 StgMutClosure *next, *c;
3068 for (c = first; c != END_MUT_LIST; c = next) {
3074 //@node Reverting CAFs, Sanity code for CAF garbage collection, Scavenging
3075 //@subsection Reverting CAFs
3077 /* -----------------------------------------------------------------------------
3079 -------------------------------------------------------------------------- */
3080 //@cindex RevertCAFs
3082 void RevertCAFs(void)
3084 while (enteredCAFs != END_CAF_LIST) {
3085 StgCAF* caf = enteredCAFs;
3087 enteredCAFs = caf->link;
3088 ASSERT(get_itbl(caf)->type == CAF_ENTERED);
3089 SET_INFO(caf,&CAF_UNENTERED_info);
3090 caf->value = (StgClosure *)0xdeadbeef;
3091 caf->link = (StgCAF *)0xdeadbeef;
3093 enteredCAFs = END_CAF_LIST;
3096 //@cindex revert_dead_CAFs
3098 void revert_dead_CAFs(void)
3100 StgCAF* caf = enteredCAFs;
3101 enteredCAFs = END_CAF_LIST;
3102 while (caf != END_CAF_LIST) {
3105 new = (StgCAF*)isAlive((StgClosure*)caf);
3107 new->link = enteredCAFs;
3111 SET_INFO(caf,&CAF_UNENTERED_info);
3112 caf->value = (StgClosure*)0xdeadbeef;
3113 caf->link = (StgCAF*)0xdeadbeef;
3119 //@node Sanity code for CAF garbage collection, Lazy black holing, Reverting CAFs
3120 //@subsection Sanity code for CAF garbage collection
3122 /* -----------------------------------------------------------------------------
3123 Sanity code for CAF garbage collection.
3125 With DEBUG turned on, we manage a CAF list in addition to the SRT
3126 mechanism. After GC, we run down the CAF list and blackhole any
3127 CAFs which have been garbage collected. This means we get an error
3128 whenever the program tries to enter a garbage collected CAF.
3130 Any garbage collected CAFs are taken off the CAF list at the same
3132 -------------------------------------------------------------------------- */
3142 const StgInfoTable *info;
3153 ASSERT(info->type == IND_STATIC);
3155 if (STATIC_LINK(info,p) == NULL) {
3156 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3158 SET_INFO(p,&BLACKHOLE_info);
3159 p = STATIC_LINK2(info,p);
3163 pp = &STATIC_LINK2(info,p);
3170 /* fprintf(stderr, "%d CAFs live\n", i); */
3174 //@node Lazy black holing, Stack squeezing, Sanity code for CAF garbage collection
3175 //@subsection Lazy black holing
3177 /* -----------------------------------------------------------------------------
3180 Whenever a thread returns to the scheduler after possibly doing
3181 some work, we have to run down the stack and black-hole all the
3182 closures referred to by update frames.
3183 -------------------------------------------------------------------------- */
3184 //@cindex threadLazyBlackHole
3187 threadLazyBlackHole(StgTSO *tso)
3189 StgUpdateFrame *update_frame;
3190 StgBlockingQueue *bh;
3193 stack_end = &tso->stack[tso->stack_size];
3194 update_frame = tso->su;
3197 switch (get_itbl(update_frame)->type) {
3200 update_frame = ((StgCatchFrame *)update_frame)->link;
3204 bh = (StgBlockingQueue *)update_frame->updatee;
3206 /* if the thunk is already blackholed, it means we've also
3207 * already blackholed the rest of the thunks on this stack,
3208 * so we can stop early.
3210 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3211 * don't interfere with this optimisation.
3213 if (bh->header.info == &BLACKHOLE_info) {
3217 if (bh->header.info != &BLACKHOLE_BQ_info &&
3218 bh->header.info != &CAF_BLACKHOLE_info) {
3219 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3220 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3222 SET_INFO(bh,&BLACKHOLE_info);
3225 update_frame = update_frame->link;
3229 update_frame = ((StgSeqFrame *)update_frame)->link;
3235 barf("threadPaused");
3240 //@node Stack squeezing, Pausing a thread, Lazy black holing
3241 //@subsection Stack squeezing
3243 /* -----------------------------------------------------------------------------
3246 * Code largely pinched from old RTS, then hacked to bits. We also do
3247 * lazy black holing here.
3249 * -------------------------------------------------------------------------- */
3250 //@cindex threadSqueezeStack
3253 threadSqueezeStack(StgTSO *tso)
3255 lnat displacement = 0;
3256 StgUpdateFrame *frame;
3257 StgUpdateFrame *next_frame; /* Temporally next */
3258 StgUpdateFrame *prev_frame; /* Temporally previous */
3260 rtsBool prev_was_update_frame;
3262 StgUpdateFrame *top_frame;
3263 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3265 void printObj( StgClosure *obj ); // from Printer.c
3267 top_frame = tso->su;
3270 bottom = &(tso->stack[tso->stack_size]);
3273 /* There must be at least one frame, namely the STOP_FRAME.
3275 ASSERT((P_)frame < bottom);
3277 /* Walk down the stack, reversing the links between frames so that
3278 * we can walk back up as we squeeze from the bottom. Note that
3279 * next_frame and prev_frame refer to next and previous as they were
3280 * added to the stack, rather than the way we see them in this
3281 * walk. (It makes the next loop less confusing.)
3283 * Stop if we find an update frame pointing to a black hole
3284 * (see comment in threadLazyBlackHole()).
3288 /* bottom - sizeof(StgStopFrame) is the STOP_FRAME */
3289 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3290 prev_frame = frame->link;
3291 frame->link = next_frame;
3296 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3297 printObj((StgClosure *)prev_frame);
3298 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3301 switch (get_itbl(frame)->type) {
3302 case UPDATE_FRAME: upd_frames++;
3303 if (frame->updatee->header.info == &BLACKHOLE_info)
3306 case STOP_FRAME: stop_frames++;
3308 case CATCH_FRAME: catch_frames++;
3310 case SEQ_FRAME: seq_frames++;
3313 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3315 printObj((StgClosure *)prev_frame);
3318 if (get_itbl(frame)->type == UPDATE_FRAME
3319 && frame->updatee->header.info == &BLACKHOLE_info) {
3324 /* Now, we're at the bottom. Frame points to the lowest update
3325 * frame on the stack, and its link actually points to the frame
3326 * above. We have to walk back up the stack, squeezing out empty
3327 * update frames and turning the pointers back around on the way
3330 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3331 * we never want to eliminate it anyway. Just walk one step up
3332 * before starting to squeeze. When you get to the topmost frame,
3333 * remember that there are still some words above it that might have
3340 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3343 * Loop through all of the frames (everything except the very
3344 * bottom). Things are complicated by the fact that we have
3345 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3346 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3348 while (frame != NULL) {
3350 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3351 rtsBool is_update_frame;
3353 next_frame = frame->link;
3354 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3357 * 1. both the previous and current frame are update frames
3358 * 2. the current frame is empty
3360 if (prev_was_update_frame && is_update_frame &&
3361 (P_)prev_frame == frame_bottom + displacement) {
3363 /* Now squeeze out the current frame */
3364 StgClosure *updatee_keep = prev_frame->updatee;
3365 StgClosure *updatee_bypass = frame->updatee;
3368 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3372 /* Deal with blocking queues. If both updatees have blocked
3373 * threads, then we should merge the queues into the update
3374 * frame that we're keeping.
3376 * Alternatively, we could just wake them up: they'll just go
3377 * straight to sleep on the proper blackhole! This is less code
3378 * and probably less bug prone, although it's probably much
3381 #if 0 /* do it properly... */
3382 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3383 # error Unimplemented lazy BH warning. (KSW 1999-01)
3385 if (GET_INFO(updatee_bypass) == BLACKHOLE_BQ_info
3386 || GET_INFO(updatee_bypass) == CAF_BLACKHOLE_info
3388 /* Sigh. It has one. Don't lose those threads! */
3389 if (GET_INFO(updatee_keep) == BLACKHOLE_BQ_info) {
3390 /* Urgh. Two queues. Merge them. */
3391 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3393 while (keep_tso->link != END_TSO_QUEUE) {
3394 keep_tso = keep_tso->link;
3396 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3399 /* For simplicity, just swap the BQ for the BH */
3400 P_ temp = updatee_keep;
3402 updatee_keep = updatee_bypass;
3403 updatee_bypass = temp;
3405 /* Record the swap in the kept frame (below) */
3406 prev_frame->updatee = updatee_keep;
3411 TICK_UPD_SQUEEZED();
3412 /* wasn't there something about update squeezing and ticky to be
3413 * sorted out? oh yes: we aren't counting each enter properly
3414 * in this case. See the log somewhere. KSW 1999-04-21
3416 UPD_IND_NOLOCK(updatee_bypass, updatee_keep); /* this wakes the threads up */
3418 sp = (P_)frame - 1; /* sp = stuff to slide */
3419 displacement += sizeofW(StgUpdateFrame);
3422 /* No squeeze for this frame */
3423 sp = frame_bottom - 1; /* Keep the current frame */
3425 /* Do lazy black-holing.
3427 if (is_update_frame) {
3428 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3429 if (bh->header.info != &BLACKHOLE_info &&
3430 bh->header.info != &BLACKHOLE_BQ_info &&
3431 bh->header.info != &CAF_BLACKHOLE_info) {
3432 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3433 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3435 SET_INFO(bh,&BLACKHOLE_info);
3439 /* Fix the link in the current frame (should point to the frame below) */
3440 frame->link = prev_frame;
3441 prev_was_update_frame = is_update_frame;
3444 /* Now slide all words from sp up to the next frame */
3446 if (displacement > 0) {
3447 P_ next_frame_bottom;
3449 if (next_frame != NULL)
3450 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3452 next_frame_bottom = tso->sp - 1;
3456 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3460 while (sp >= next_frame_bottom) {
3461 sp[displacement] = *sp;
3465 (P_)prev_frame = (P_)frame + displacement;
3469 tso->sp += displacement;
3470 tso->su = prev_frame;
3473 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3474 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3478 //@node Pausing a thread, Index, Stack squeezing
3479 //@subsection Pausing a thread
3481 /* -----------------------------------------------------------------------------
3484 * We have to prepare for GC - this means doing lazy black holing
3485 * here. We also take the opportunity to do stack squeezing if it's
3487 * -------------------------------------------------------------------------- */
3488 //@cindex threadPaused
3490 threadPaused(StgTSO *tso)
3492 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3493 threadSqueezeStack(tso); /* does black holing too */
3495 threadLazyBlackHole(tso);
3498 /* -----------------------------------------------------------------------------
3500 * -------------------------------------------------------------------------- */
3503 //@cindex printMutOnceList
3505 printMutOnceList(generation *gen)
3507 StgMutClosure *p, *next;
3509 p = gen->mut_once_list;
3512 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3513 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3514 fprintf(stderr, "%p (%s), ",
3515 p, info_type((StgClosure *)p));
3517 fputc('\n', stderr);
3520 //@cindex printMutableList
3522 printMutableList(generation *gen)
3524 StgMutClosure *p, *next;
3529 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3530 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3531 fprintf(stderr, "%p (%s), ",
3532 p, info_type((StgClosure *)p));
3534 fputc('\n', stderr);
3537 //@cindex maybeLarge
3538 static inline rtsBool
3539 maybeLarge(StgClosure *closure)
3541 StgInfoTable *info = get_itbl(closure);
3543 /* closure types that may be found on the new_large_objects list;
3544 see scavenge_large */
3545 return (info->type == MUT_ARR_PTRS ||
3546 info->type == MUT_ARR_PTRS_FROZEN ||
3547 info->type == TSO ||
3548 info->type == ARR_WORDS ||
3555 //@node Index, , Pausing a thread
3559 //* GarbageCollect:: @cindex\s-+GarbageCollect
3560 //* MarkRoot:: @cindex\s-+MarkRoot
3561 //* RevertCAFs:: @cindex\s-+RevertCAFs
3562 //* addBlock:: @cindex\s-+addBlock
3563 //* cleanup_weak_ptr_list:: @cindex\s-+cleanup_weak_ptr_list
3564 //* copy:: @cindex\s-+copy
3565 //* copyPart:: @cindex\s-+copyPart
3566 //* evacuate:: @cindex\s-+evacuate
3567 //* evacuate_large:: @cindex\s-+evacuate_large
3568 //* gcCAFs:: @cindex\s-+gcCAFs
3569 //* isAlive:: @cindex\s-+isAlive
3570 //* maybeLarge:: @cindex\s-+maybeLarge
3571 //* mkMutCons:: @cindex\s-+mkMutCons
3572 //* printMutOnceList:: @cindex\s-+printMutOnceList
3573 //* printMutableList:: @cindex\s-+printMutableList
3574 //* relocate_TSO:: @cindex\s-+relocate_TSO
3575 //* revert_dead_CAFs:: @cindex\s-+revert_dead_CAFs
3576 //* scavenge:: @cindex\s-+scavenge
3577 //* scavenge_large:: @cindex\s-+scavenge_large
3578 //* scavenge_mut_once_list:: @cindex\s-+scavenge_mut_once_list
3579 //* scavenge_mutable_list:: @cindex\s-+scavenge_mutable_list
3580 //* scavenge_one:: @cindex\s-+scavenge_one
3581 //* scavenge_srt:: @cindex\s-+scavenge_srt
3582 //* scavenge_stack:: @cindex\s-+scavenge_stack
3583 //* scavenge_static:: @cindex\s-+scavenge_static
3584 //* threadLazyBlackHole:: @cindex\s-+threadLazyBlackHole
3585 //* threadPaused:: @cindex\s-+threadPaused
3586 //* threadSqueezeStack:: @cindex\s-+threadSqueezeStack
3587 //* traverse_weak_ptr_list:: @cindex\s-+traverse_weak_ptr_list
3588 //* upd_evacuee:: @cindex\s-+upd_evacuee
3589 //* zero_mutable_list:: @cindex\s-+zero_mutable_list
3590 //* zero_static_object_list:: @cindex\s-+zero_static_object_list