1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.106 2001/07/24 06:31:36 ken Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void cleanup_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgClosure *p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 static inline rtsBool
163 mark_stack_empty(void)
165 return mark_sp == mark_stack;
168 static inline rtsBool
169 mark_stack_full(void)
171 return mark_sp >= mark_splim;
175 push_mark_stack(StgPtr p)
186 /* -----------------------------------------------------------------------------
189 For garbage collecting generation N (and all younger generations):
191 - follow all pointers in the root set. the root set includes all
192 mutable objects in all steps in all generations.
194 - for each pointer, evacuate the object it points to into either
195 + to-space in the next higher step in that generation, if one exists,
196 + if the object's generation == N, then evacuate it to the next
197 generation if one exists, or else to-space in the current
199 + if the object's generation < N, then evacuate it to to-space
200 in the next generation.
202 - repeatedly scavenge to-space from each step in each generation
203 being collected until no more objects can be evacuated.
205 - free from-space in each step, and set from-space = to-space.
207 -------------------------------------------------------------------------- */
210 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
214 lnat live, allocated, collected = 0, copied = 0;
218 CostCentreStack *prev_CCS;
221 #if defined(DEBUG) && defined(GRAN)
222 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
226 // tell the stats department that we've started a GC
229 // Init stats and print par specific (timing) info
230 PAR_TICKY_PAR_START();
232 // attribute any costs to CCS_GC
238 /* Approximate how much we allocated.
239 * Todo: only when generating stats?
241 allocated = calcAllocated();
243 /* Figure out which generation to collect
245 if (force_major_gc) {
246 N = RtsFlags.GcFlags.generations - 1;
250 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
251 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
255 major_gc = (N == RtsFlags.GcFlags.generations-1);
258 #ifdef RTS_GTK_FRONTPANEL
259 if (RtsFlags.GcFlags.frontpanel) {
260 updateFrontPanelBeforeGC(N);
264 // check stack sanity *before* GC (ToDo: check all threads)
266 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
268 IF_DEBUG(sanity, checkFreeListSanity());
270 /* Initialise the static object lists
272 static_objects = END_OF_STATIC_LIST;
273 scavenged_static_objects = END_OF_STATIC_LIST;
275 /* zero the mutable list for the oldest generation (see comment by
276 * zero_mutable_list below).
279 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
282 /* Save the old to-space if we're doing a two-space collection
284 if (RtsFlags.GcFlags.generations == 1) {
285 old_to_blocks = g0s0->to_blocks;
286 g0s0->to_blocks = NULL;
289 /* Keep a count of how many new blocks we allocated during this GC
290 * (used for resizing the allocation area, later).
294 /* Initialise to-space in all the generations/steps that we're
297 for (g = 0; g <= N; g++) {
298 generations[g].mut_once_list = END_MUT_LIST;
299 generations[g].mut_list = END_MUT_LIST;
301 for (s = 0; s < generations[g].n_steps; s++) {
303 // generation 0, step 0 doesn't need to-space
304 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
308 /* Get a free block for to-space. Extra blocks will be chained on
312 stp = &generations[g].steps[s];
313 ASSERT(stp->gen_no == g);
314 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
318 bd->flags = BF_EVACUATED; // it's a to-space block
320 stp->hpLim = stp->hp + BLOCK_SIZE_W;
323 stp->n_to_blocks = 1;
324 stp->scan = bd->start;
326 stp->new_large_objects = NULL;
327 stp->scavenged_large_objects = NULL;
329 // mark the large objects as not evacuated yet
330 for (bd = stp->large_objects; bd; bd = bd->link) {
331 bd->flags = BF_LARGE;
334 // for a compacted step, we need to allocate the bitmap
335 if (stp->is_compacted) {
336 nat bitmap_size; // in bytes
337 bdescr *bitmap_bdescr;
340 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
342 if (bitmap_size > 0) {
343 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
345 stp->bitmap = bitmap_bdescr;
346 bitmap = bitmap_bdescr->start;
348 IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
349 bitmap_size, bitmap););
351 // don't forget to fill it with zeros!
352 memset(bitmap, 0, bitmap_size);
354 // for each block in this step, point to its bitmap from the
356 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
357 bd->u.bitmap = bitmap;
358 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
365 /* make sure the older generations have at least one block to
366 * allocate into (this makes things easier for copy(), see below.
368 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
369 for (s = 0; s < generations[g].n_steps; s++) {
370 stp = &generations[g].steps[s];
371 if (stp->hp_bd == NULL) {
372 ASSERT(stp->blocks == NULL);
377 bd->flags = 0; // *not* a to-space block or a large object
379 stp->hpLim = stp->hp + BLOCK_SIZE_W;
385 /* Set the scan pointer for older generations: remember we
386 * still have to scavenge objects that have been promoted. */
388 stp->scan_bd = stp->hp_bd;
389 stp->to_blocks = NULL;
390 stp->n_to_blocks = 0;
391 stp->new_large_objects = NULL;
392 stp->scavenged_large_objects = NULL;
396 /* Allocate a mark stack if we're doing a major collection.
399 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
400 mark_stack = (StgPtr *)mark_stack_bdescr->start;
401 mark_sp = mark_stack;
402 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
404 mark_stack_bdescr = NULL;
407 /* -----------------------------------------------------------------------
408 * follow all the roots that we know about:
409 * - mutable lists from each generation > N
410 * we want to *scavenge* these roots, not evacuate them: they're not
411 * going to move in this GC.
412 * Also: do them in reverse generation order. This is because we
413 * often want to promote objects that are pointed to by older
414 * generations early, so we don't have to repeatedly copy them.
415 * Doing the generations in reverse order ensures that we don't end
416 * up in the situation where we want to evac an object to gen 3 and
417 * it has already been evaced to gen 2.
421 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
422 generations[g].saved_mut_list = generations[g].mut_list;
423 generations[g].mut_list = END_MUT_LIST;
426 // Do the mut-once lists first
427 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
428 IF_PAR_DEBUG(verbose,
429 printMutOnceList(&generations[g]));
430 scavenge_mut_once_list(&generations[g]);
432 for (st = generations[g].n_steps-1; st >= 0; st--) {
433 scavenge(&generations[g].steps[st]);
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 IF_PAR_DEBUG(verbose,
439 printMutableList(&generations[g]));
440 scavenge_mutable_list(&generations[g]);
442 for (st = generations[g].n_steps-1; st >= 0; st--) {
443 scavenge(&generations[g].steps[st]);
450 /* follow all the roots that the application knows about.
453 get_roots(mark_root);
456 /* And don't forget to mark the TSO if we got here direct from
458 /* Not needed in a seq version?
460 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
464 // Mark the entries in the GALA table of the parallel system
465 markLocalGAs(major_gc);
466 // Mark all entries on the list of pending fetches
467 markPendingFetches(major_gc);
470 /* Mark the weak pointer list, and prepare to detect dead weak
473 old_weak_ptr_list = weak_ptr_list;
474 weak_ptr_list = NULL;
475 weak_done = rtsFalse;
477 /* The all_threads list is like the weak_ptr_list.
478 * See traverse_weak_ptr_list() for the details.
480 old_all_threads = all_threads;
481 all_threads = END_TSO_QUEUE;
482 resurrected_threads = END_TSO_QUEUE;
484 /* Mark the stable pointer table.
486 markStablePtrTable(mark_root);
490 /* ToDo: To fix the caf leak, we need to make the commented out
491 * parts of this code do something sensible - as described in
494 extern void markHugsObjects(void);
499 /* -------------------------------------------------------------------------
500 * Repeatedly scavenge all the areas we know about until there's no
501 * more scavenging to be done.
508 // scavenge static objects
509 if (major_gc && static_objects != END_OF_STATIC_LIST) {
510 IF_DEBUG(sanity, checkStaticObjects(static_objects));
514 // scavenge objects in compacted generation
515 if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
516 scavenge_mark_stack();
520 /* When scavenging the older generations: Objects may have been
521 * evacuated from generations <= N into older generations, and we
522 * need to scavenge these objects. We're going to try to ensure that
523 * any evacuations that occur move the objects into at least the
524 * same generation as the object being scavenged, otherwise we
525 * have to create new entries on the mutable list for the older
529 // scavenge each step in generations 0..maxgen
534 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
535 for (st = generations[gen].n_steps; --st >= 0; ) {
536 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
539 stp = &generations[gen].steps[st];
541 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
546 if (stp->new_large_objects != NULL) {
555 if (flag) { goto loop; }
558 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
563 /* Final traversal of the weak pointer list (see comment by
564 * cleanUpWeakPtrList below).
566 cleanup_weak_ptr_list(&weak_ptr_list);
569 // Reconstruct the Global Address tables used in GUM
570 rebuildGAtables(major_gc);
571 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
574 // Now see which stable names are still alive.
577 // Tidy the end of the to-space chains
578 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
579 for (s = 0; s < generations[g].n_steps; s++) {
580 stp = &generations[g].steps[s];
581 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
582 stp->hp_bd->free = stp->hp;
583 stp->hp_bd->link = NULL;
588 // NO MORE EVACUATION AFTER THIS POINT!
589 // Finally: compaction of the oldest generation.
590 if (major_gc && RtsFlags.GcFlags.compact) {
594 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
596 /* run through all the generations/steps and tidy up
598 copied = new_blocks * BLOCK_SIZE_W;
599 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
602 generations[g].collections++; // for stats
605 for (s = 0; s < generations[g].n_steps; s++) {
607 stp = &generations[g].steps[s];
609 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
610 // stats information: how much we copied
612 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
617 // for generations we collected...
620 collected += stp->n_blocks * BLOCK_SIZE_W; // for stats
622 /* free old memory and shift to-space into from-space for all
623 * the collected steps (except the allocation area). These
624 * freed blocks will probaby be quickly recycled.
626 if (!(g == 0 && s == 0)) {
627 if (stp->is_compacted) {
628 // for a compacted step, just shift the new to-space
629 // onto the front of the now-compacted existing blocks.
630 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
631 bd->flags &= ~BF_EVACUATED; // now from-space
633 // tack the new blocks on the end of the existing blocks
634 if (stp->blocks == NULL) {
635 stp->blocks = stp->to_blocks;
637 for (bd = stp->blocks; bd != NULL; bd = next) {
640 bd->link = stp->to_blocks;
644 // add the new blocks to the block tally
645 stp->n_blocks += stp->n_to_blocks;
647 freeChain(stp->blocks);
648 stp->blocks = stp->to_blocks;
649 stp->n_blocks = stp->n_to_blocks;
650 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
651 bd->flags &= ~BF_EVACUATED; // now from-space
654 stp->to_blocks = NULL;
655 stp->n_to_blocks = 0;
658 /* LARGE OBJECTS. The current live large objects are chained on
659 * scavenged_large, having been moved during garbage
660 * collection from large_objects. Any objects left on
661 * large_objects list are therefore dead, so we free them here.
663 for (bd = stp->large_objects; bd != NULL; bd = next) {
668 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
669 bd->flags &= ~BF_EVACUATED;
671 stp->large_objects = stp->scavenged_large_objects;
673 /* Set the maximum blocks for this generation, interpolating
674 * between the maximum size of the oldest and youngest
677 * max_blocks = oldgen_max_blocks * G
678 * ----------------------
683 generations[g].max_blocks = (oldest_gen->max_blocks * g)
684 / (RtsFlags.GcFlags.generations-1);
686 generations[g].max_blocks = oldest_gen->max_blocks;
689 // for older generations...
692 /* For older generations, we need to append the
693 * scavenged_large_object list (i.e. large objects that have been
694 * promoted during this GC) to the large_object list for that step.
696 for (bd = stp->scavenged_large_objects; bd; bd = next) {
698 bd->flags &= ~BF_EVACUATED;
699 dbl_link_onto(bd, &stp->large_objects);
702 // add the new blocks we promoted during this GC
703 stp->n_blocks += stp->n_to_blocks;
708 /* Set the maximum blocks for the oldest generation, based on twice
709 * the amount of live data now, adjusted to fit the maximum heap
712 * This is an approximation, since in the worst case we'll need
713 * twice the amount of live data plus whatever space the other
716 if (major_gc && RtsFlags.GcFlags.generations > 1) {
717 oldest_gen->max_blocks =
718 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
719 RtsFlags.GcFlags.minOldGenSize);
720 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
721 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
722 if (((int)oldest_gen->max_blocks -
723 (int)oldest_gen->steps[0].n_blocks) <
724 (RtsFlags.GcFlags.pcFreeHeap *
725 RtsFlags.GcFlags.maxHeapSize / 200)) {
731 // Guess the amount of live data for stats.
734 /* Free the small objects allocated via allocate(), since this will
735 * all have been copied into G0S1 now.
737 if (small_alloc_list != NULL) {
738 freeChain(small_alloc_list);
740 small_alloc_list = NULL;
744 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
746 /* Free the mark stack.
748 if (mark_stack_bdescr != NULL) {
749 freeGroup(mark_stack_bdescr);
754 for (g = 0; g <= N; g++) {
755 for (s = 0; s < generations[g].n_steps; s++) {
756 stp = &generations[g].steps[s];
757 if (stp->is_compacted && stp->bitmap != NULL) {
758 freeGroup(stp->bitmap);
763 /* Two-space collector:
764 * Free the old to-space, and estimate the amount of live data.
766 if (RtsFlags.GcFlags.generations == 1) {
769 if (old_to_blocks != NULL) {
770 freeChain(old_to_blocks);
772 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
773 bd->flags = 0; // now from-space
776 /* For a two-space collector, we need to resize the nursery. */
778 /* set up a new nursery. Allocate a nursery size based on a
779 * function of the amount of live data (currently a factor of 2,
780 * should be configurable (ToDo)). Use the blocks from the old
781 * nursery if possible, freeing up any left over blocks.
783 * If we get near the maximum heap size, then adjust our nursery
784 * size accordingly. If the nursery is the same size as the live
785 * data (L), then we need 3L bytes. We can reduce the size of the
786 * nursery to bring the required memory down near 2L bytes.
788 * A normal 2-space collector would need 4L bytes to give the same
789 * performance we get from 3L bytes, reducing to the same
790 * performance at 2L bytes.
792 blocks = g0s0->n_to_blocks;
794 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
795 RtsFlags.GcFlags.maxHeapSize ) {
796 long adjusted_blocks; // signed on purpose
799 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
800 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
801 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
802 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
805 blocks = adjusted_blocks;
808 blocks *= RtsFlags.GcFlags.oldGenFactor;
809 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
810 blocks = RtsFlags.GcFlags.minAllocAreaSize;
813 resizeNursery(blocks);
816 /* Generational collector:
817 * If the user has given us a suggested heap size, adjust our
818 * allocation area to make best use of the memory available.
821 if (RtsFlags.GcFlags.heapSizeSuggestion) {
823 nat needed = calcNeeded(); // approx blocks needed at next GC
825 /* Guess how much will be live in generation 0 step 0 next time.
826 * A good approximation is the obtained by finding the
827 * percentage of g0s0 that was live at the last minor GC.
830 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
833 /* Estimate a size for the allocation area based on the
834 * information available. We might end up going slightly under
835 * or over the suggested heap size, but we should be pretty
838 * Formula: suggested - needed
839 * ----------------------------
840 * 1 + g0s0_pcnt_kept/100
842 * where 'needed' is the amount of memory needed at the next
843 * collection for collecting all steps except g0s0.
846 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
847 (100 + (long)g0s0_pcnt_kept);
849 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
850 blocks = RtsFlags.GcFlags.minAllocAreaSize;
853 resizeNursery((nat)blocks);
857 // mark the garbage collected CAFs as dead
858 #if 0 && defined(DEBUG) // doesn't work at the moment
859 if (major_gc) { gcCAFs(); }
862 // zero the scavenged static object list
864 zero_static_object_list(scavenged_static_objects);
871 // start any pending finalizers
872 scheduleFinalizers(old_weak_ptr_list);
874 // send exceptions to any threads which were about to die
875 resurrectThreads(resurrected_threads);
877 // Update the stable pointer hash table.
878 updateStablePtrTable(major_gc);
880 // check sanity after GC
881 IF_DEBUG(sanity, checkSanity());
883 // extra GC trace info
884 IF_DEBUG(gc, statDescribeGens());
887 // symbol-table based profiling
888 /* heapCensus(to_blocks); */ /* ToDo */
891 // restore enclosing cost centre
897 // check for memory leaks if sanity checking is on
898 IF_DEBUG(sanity, memInventory());
900 #ifdef RTS_GTK_FRONTPANEL
901 if (RtsFlags.GcFlags.frontpanel) {
902 updateFrontPanelAfterGC( N, live );
906 // ok, GC over: tell the stats department what happened.
907 stat_endGC(allocated, collected, live, copied, N);
913 /* -----------------------------------------------------------------------------
916 traverse_weak_ptr_list is called possibly many times during garbage
917 collection. It returns a flag indicating whether it did any work
918 (i.e. called evacuate on any live pointers).
920 Invariant: traverse_weak_ptr_list is called when the heap is in an
921 idempotent state. That means that there are no pending
922 evacuate/scavenge operations. This invariant helps the weak
923 pointer code decide which weak pointers are dead - if there are no
924 new live weak pointers, then all the currently unreachable ones are
927 For generational GC: we just don't try to finalize weak pointers in
928 older generations than the one we're collecting. This could
929 probably be optimised by keeping per-generation lists of weak
930 pointers, but for a few weak pointers this scheme will work.
931 -------------------------------------------------------------------------- */
934 traverse_weak_ptr_list(void)
936 StgWeak *w, **last_w, *next_w;
938 rtsBool flag = rtsFalse;
940 if (weak_done) { return rtsFalse; }
942 /* doesn't matter where we evacuate values/finalizers to, since
943 * these pointers are treated as roots (iff the keys are alive).
947 last_w = &old_weak_ptr_list;
948 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
950 /* First, this weak pointer might have been evacuated. If so,
951 * remove the forwarding pointer from the weak_ptr_list.
953 if (get_itbl(w)->type == EVACUATED) {
954 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
958 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
959 * called on a live weak pointer object. Just remove it.
961 if (w->header.info == &stg_DEAD_WEAK_info) {
962 next_w = ((StgDeadWeak *)w)->link;
967 ASSERT(get_itbl(w)->type == WEAK);
969 /* Now, check whether the key is reachable.
971 if ((new = isAlive(w->key))) {
973 // evacuate the value and finalizer
974 w->value = evacuate(w->value);
975 w->finalizer = evacuate(w->finalizer);
976 // remove this weak ptr from the old_weak_ptr list
978 // and put it on the new weak ptr list
980 w->link = weak_ptr_list;
983 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
993 /* Now deal with the all_threads list, which behaves somewhat like
994 * the weak ptr list. If we discover any threads that are about to
995 * become garbage, we wake them up and administer an exception.
998 StgTSO *t, *tmp, *next, **prev;
1000 prev = &old_all_threads;
1001 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1003 (StgClosure *)tmp = isAlive((StgClosure *)t);
1009 ASSERT(get_itbl(t)->type == TSO);
1010 switch (t->what_next) {
1011 case ThreadRelocated:
1016 case ThreadComplete:
1017 // finshed or died. The thread might still be alive, but we
1018 // don't keep it on the all_threads list. Don't forget to
1019 // stub out its global_link field.
1020 next = t->global_link;
1021 t->global_link = END_TSO_QUEUE;
1029 // not alive (yet): leave this thread on the old_all_threads list.
1030 prev = &(t->global_link);
1031 next = t->global_link;
1035 // alive: move this thread onto the all_threads list.
1036 next = t->global_link;
1037 t->global_link = all_threads;
1045 /* If we didn't make any changes, then we can go round and kill all
1046 * the dead weak pointers. The old_weak_ptr list is used as a list
1047 * of pending finalizers later on.
1049 if (flag == rtsFalse) {
1050 cleanup_weak_ptr_list(&old_weak_ptr_list);
1051 for (w = old_weak_ptr_list; w; w = w->link) {
1052 w->finalizer = evacuate(w->finalizer);
1055 /* And resurrect any threads which were about to become garbage.
1058 StgTSO *t, *tmp, *next;
1059 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1060 next = t->global_link;
1061 (StgClosure *)tmp = evacuate((StgClosure *)t);
1062 tmp->global_link = resurrected_threads;
1063 resurrected_threads = tmp;
1067 weak_done = rtsTrue;
1073 /* -----------------------------------------------------------------------------
1074 After GC, the live weak pointer list may have forwarding pointers
1075 on it, because a weak pointer object was evacuated after being
1076 moved to the live weak pointer list. We remove those forwarding
1079 Also, we don't consider weak pointer objects to be reachable, but
1080 we must nevertheless consider them to be "live" and retain them.
1081 Therefore any weak pointer objects which haven't as yet been
1082 evacuated need to be evacuated now.
1083 -------------------------------------------------------------------------- */
1087 cleanup_weak_ptr_list ( StgWeak **list )
1089 StgWeak *w, **last_w;
1092 for (w = *list; w; w = w->link) {
1094 if (get_itbl(w)->type == EVACUATED) {
1095 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1099 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1100 (StgClosure *)w = evacuate((StgClosure *)w);
1103 last_w = &(w->link);
1107 /* -----------------------------------------------------------------------------
1108 isAlive determines whether the given closure is still alive (after
1109 a garbage collection) or not. It returns the new address of the
1110 closure if it is alive, or NULL otherwise.
1112 NOTE: Use it before compaction only!
1113 -------------------------------------------------------------------------- */
1117 isAlive(StgClosure *p)
1119 const StgInfoTable *info;
1126 /* ToDo: for static closures, check the static link field.
1127 * Problem here is that we sometimes don't set the link field, eg.
1128 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1133 // ignore closures in generations that we're not collecting.
1134 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1137 // large objects have an evacuated flag
1138 if ((bd->flags & BF_LARGE) && (bd->flags & BF_EVACUATED)) {
1141 // check the mark bit for compacted steps
1142 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1146 switch (info->type) {
1151 case IND_OLDGEN: // rely on compatible layout with StgInd
1152 case IND_OLDGEN_PERM:
1153 // follow indirections
1154 p = ((StgInd *)p)->indirectee;
1159 return ((StgEvacuated *)p)->evacuee;
1162 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1163 p = (StgClosure *)((StgTSO *)p)->link;
1175 mark_root(StgClosure **root)
1177 *root = evacuate(*root);
1183 bdescr *bd = allocBlock();
1184 bd->gen_no = stp->gen_no;
1187 if (stp->gen_no <= N) {
1188 bd->flags = BF_EVACUATED;
1193 stp->hp_bd->free = stp->hp;
1194 stp->hp_bd->link = bd;
1195 stp->hp = bd->start;
1196 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1203 static __inline__ void
1204 upd_evacuee(StgClosure *p, StgClosure *dest)
1206 p->header.info = &stg_EVACUATED_info;
1207 ((StgEvacuated *)p)->evacuee = dest;
1211 static __inline__ StgClosure *
1212 copy(StgClosure *src, nat size, step *stp)
1216 TICK_GC_WORDS_COPIED(size);
1217 /* Find out where we're going, using the handy "to" pointer in
1218 * the step of the source object. If it turns out we need to
1219 * evacuate to an older generation, adjust it here (see comment
1222 if (stp->gen_no < evac_gen) {
1223 #ifdef NO_EAGER_PROMOTION
1224 failed_to_evac = rtsTrue;
1226 stp = &generations[evac_gen].steps[0];
1230 /* chain a new block onto the to-space for the destination step if
1233 if (stp->hp + size >= stp->hpLim) {
1237 for(to = stp->hp, from = (P_)src; size>0; --size) {
1243 upd_evacuee(src,(StgClosure *)dest);
1244 return (StgClosure *)dest;
1247 /* Special version of copy() for when we only want to copy the info
1248 * pointer of an object, but reserve some padding after it. This is
1249 * used to optimise evacuation of BLACKHOLEs.
1253 static __inline__ StgClosure *
1254 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1258 TICK_GC_WORDS_COPIED(size_to_copy);
1259 if (stp->gen_no < evac_gen) {
1260 #ifdef NO_EAGER_PROMOTION
1261 failed_to_evac = rtsTrue;
1263 stp = &generations[evac_gen].steps[0];
1267 if (stp->hp + size_to_reserve >= stp->hpLim) {
1271 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1276 stp->hp += size_to_reserve;
1277 upd_evacuee(src,(StgClosure *)dest);
1278 return (StgClosure *)dest;
1282 /* -----------------------------------------------------------------------------
1283 Evacuate a large object
1285 This just consists of removing the object from the (doubly-linked)
1286 large_alloc_list, and linking it on to the (singly-linked)
1287 new_large_objects list, from where it will be scavenged later.
1289 Convention: bd->flags has BF_EVACUATED set for a large object
1290 that has been evacuated, or unset otherwise.
1291 -------------------------------------------------------------------------- */
1295 evacuate_large(StgPtr p)
1297 bdescr *bd = Bdescr(p);
1300 // should point to the beginning of the block
1301 ASSERT(((W_)p & BLOCK_MASK) == 0);
1303 // already evacuated?
1304 if (bd->flags & BF_EVACUATED) {
1305 /* Don't forget to set the failed_to_evac flag if we didn't get
1306 * the desired destination (see comments in evacuate()).
1308 if (bd->gen_no < evac_gen) {
1309 failed_to_evac = rtsTrue;
1310 TICK_GC_FAILED_PROMOTION();
1316 // remove from large_object list
1318 bd->u.back->link = bd->link;
1319 } else { // first object in the list
1320 stp->large_objects = bd->link;
1323 bd->link->u.back = bd->u.back;
1326 /* link it on to the evacuated large object list of the destination step
1329 if (stp->gen_no < evac_gen) {
1330 #ifdef NO_EAGER_PROMOTION
1331 failed_to_evac = rtsTrue;
1333 stp = &generations[evac_gen].steps[0];
1338 bd->gen_no = stp->gen_no;
1339 bd->link = stp->new_large_objects;
1340 stp->new_large_objects = bd;
1341 bd->flags |= BF_EVACUATED;
1344 /* -----------------------------------------------------------------------------
1345 Adding a MUT_CONS to an older generation.
1347 This is necessary from time to time when we end up with an
1348 old-to-new generation pointer in a non-mutable object. We defer
1349 the promotion until the next GC.
1350 -------------------------------------------------------------------------- */
1354 mkMutCons(StgClosure *ptr, generation *gen)
1359 stp = &gen->steps[0];
1361 /* chain a new block onto the to-space for the destination step if
1364 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1368 q = (StgMutVar *)stp->hp;
1369 stp->hp += sizeofW(StgMutVar);
1371 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1373 recordOldToNewPtrs((StgMutClosure *)q);
1375 return (StgClosure *)q;
1378 /* -----------------------------------------------------------------------------
1381 This is called (eventually) for every live object in the system.
1383 The caller to evacuate specifies a desired generation in the
1384 evac_gen global variable. The following conditions apply to
1385 evacuating an object which resides in generation M when we're
1386 collecting up to generation N
1390 else evac to step->to
1392 if M < evac_gen evac to evac_gen, step 0
1394 if the object is already evacuated, then we check which generation
1397 if M >= evac_gen do nothing
1398 if M < evac_gen set failed_to_evac flag to indicate that we
1399 didn't manage to evacuate this object into evac_gen.
1401 -------------------------------------------------------------------------- */
1404 evacuate(StgClosure *q)
1409 const StgInfoTable *info;
1412 if (HEAP_ALLOCED(q)) {
1415 if (bd->gen_no > N) {
1416 /* Can't evacuate this object, because it's in a generation
1417 * older than the ones we're collecting. Let's hope that it's
1418 * in evac_gen or older, or we will have to arrange to track
1419 * this pointer using the mutable list.
1421 if (bd->gen_no < evac_gen) {
1423 failed_to_evac = rtsTrue;
1424 TICK_GC_FAILED_PROMOTION();
1429 /* evacuate large objects by re-linking them onto a different list.
1431 if (bd->flags & BF_LARGE) {
1433 if (info->type == TSO &&
1434 ((StgTSO *)q)->what_next == ThreadRelocated) {
1435 q = (StgClosure *)((StgTSO *)q)->link;
1438 evacuate_large((P_)q);
1442 /* If the object is in a step that we're compacting, then we
1443 * need to use an alternative evacuate procedure.
1445 if (bd->step->is_compacted) {
1446 if (!is_marked((P_)q,bd)) {
1448 if (mark_stack_full()) {
1449 barf("ToDo: mark stack full");
1451 push_mark_stack((P_)q);
1459 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1462 // make sure the info pointer is into text space
1463 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1464 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1467 switch (info -> type) {
1471 to = copy(q,sizeW_fromITBL(info),stp);
1476 StgWord w = (StgWord)q->payload[0];
1477 if (q->header.info == Czh_con_info &&
1478 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1479 (StgChar)w <= MAX_CHARLIKE) {
1480 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1482 if (q->header.info == Izh_con_info &&
1483 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1484 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1486 // else, fall through ...
1492 return copy(q,sizeofW(StgHeader)+1,stp);
1494 case THUNK_1_0: // here because of MIN_UPD_SIZE
1499 #ifdef NO_PROMOTE_THUNKS
1500 if (bd->gen_no == 0 &&
1501 bd->step->no != 0 &&
1502 bd->step->no == generations[bd->gen_no].n_steps-1) {
1506 return copy(q,sizeofW(StgHeader)+2,stp);
1514 return copy(q,sizeofW(StgHeader)+2,stp);
1520 case IND_OLDGEN_PERM:
1525 return copy(q,sizeW_fromITBL(info),stp);
1528 case SE_CAF_BLACKHOLE:
1531 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1534 to = copy(q,BLACKHOLE_sizeW(),stp);
1537 case THUNK_SELECTOR:
1539 const StgInfoTable* selectee_info;
1540 StgClosure* selectee = ((StgSelector*)q)->selectee;
1543 selectee_info = get_itbl(selectee);
1544 switch (selectee_info->type) {
1553 StgWord offset = info->layout.selector_offset;
1555 // check that the size is in range
1557 (StgWord32)(selectee_info->layout.payload.ptrs +
1558 selectee_info->layout.payload.nptrs));
1560 // perform the selection!
1561 q = selectee->payload[offset];
1563 /* if we're already in to-space, there's no need to continue
1564 * with the evacuation, just update the source address with
1565 * a pointer to the (evacuated) constructor field.
1567 if (HEAP_ALLOCED(q)) {
1568 bdescr *bd = Bdescr((P_)q);
1569 if (bd->flags & BF_EVACUATED) {
1570 if (bd->gen_no < evac_gen) {
1571 failed_to_evac = rtsTrue;
1572 TICK_GC_FAILED_PROMOTION();
1578 /* otherwise, carry on and evacuate this constructor field,
1579 * (but not the constructor itself)
1588 case IND_OLDGEN_PERM:
1589 selectee = ((StgInd *)selectee)->indirectee;
1593 selectee = ((StgEvacuated *)selectee)->evacuee;
1596 case THUNK_SELECTOR:
1598 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1599 something) to go into an infinite loop when the nightly
1600 stage2 compiles PrelTup.lhs. */
1602 /* we can't recurse indefinitely in evacuate(), so set a
1603 * limit on the number of times we can go around this
1606 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1608 bd = Bdescr((P_)selectee);
1609 if (!bd->flags & BF_EVACUATED) {
1610 thunk_selector_depth++;
1611 selectee = evacuate(selectee);
1612 thunk_selector_depth--;
1616 // otherwise, fall through...
1628 case SE_CAF_BLACKHOLE:
1632 // not evaluated yet
1636 // a copy of the top-level cases below
1637 case RBH: // cf. BLACKHOLE_BQ
1639 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1640 to = copy(q,BLACKHOLE_sizeW(),stp);
1641 //ToDo: derive size etc from reverted IP
1642 //to = copy(q,size,stp);
1643 // recordMutable((StgMutClosure *)to);
1648 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1649 to = copy(q,sizeofW(StgBlockedFetch),stp);
1656 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1657 to = copy(q,sizeofW(StgFetchMe),stp);
1661 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1662 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1667 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1668 (int)(selectee_info->type));
1671 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1675 // follow chains of indirections, don't evacuate them
1676 q = ((StgInd*)q)->indirectee;
1680 if (info->srt_len > 0 && major_gc &&
1681 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1682 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1683 static_objects = (StgClosure *)q;
1688 if (info->srt_len > 0 && major_gc &&
1689 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1690 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1691 static_objects = (StgClosure *)q;
1696 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1697 * on the CAF list, so don't do anything with it here (we'll
1698 * scavenge it later).
1701 && ((StgIndStatic *)q)->saved_info == NULL
1702 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1703 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1704 static_objects = (StgClosure *)q;
1709 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1710 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1711 static_objects = (StgClosure *)q;
1715 case CONSTR_INTLIKE:
1716 case CONSTR_CHARLIKE:
1717 case CONSTR_NOCAF_STATIC:
1718 /* no need to put these on the static linked list, they don't need
1733 // shouldn't see these
1734 barf("evacuate: stack frame at %p\n", q);
1738 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1739 * of stack, tagging and all.
1741 return copy(q,pap_sizeW((StgPAP*)q),stp);
1744 /* Already evacuated, just return the forwarding address.
1745 * HOWEVER: if the requested destination generation (evac_gen) is
1746 * older than the actual generation (because the object was
1747 * already evacuated to a younger generation) then we have to
1748 * set the failed_to_evac flag to indicate that we couldn't
1749 * manage to promote the object to the desired generation.
1751 if (evac_gen > 0) { // optimisation
1752 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1753 if (Bdescr((P_)p)->gen_no < evac_gen) {
1754 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1755 failed_to_evac = rtsTrue;
1756 TICK_GC_FAILED_PROMOTION();
1759 return ((StgEvacuated*)q)->evacuee;
1762 // just copy the block
1763 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1766 case MUT_ARR_PTRS_FROZEN:
1767 // just copy the block
1768 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1772 StgTSO *tso = (StgTSO *)q;
1774 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1776 if (tso->what_next == ThreadRelocated) {
1777 q = (StgClosure *)tso->link;
1781 /* To evacuate a small TSO, we need to relocate the update frame
1785 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1786 move_TSO(tso, new_tso);
1787 return (StgClosure *)new_tso;
1792 case RBH: // cf. BLACKHOLE_BQ
1794 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1795 to = copy(q,BLACKHOLE_sizeW(),stp);
1796 //ToDo: derive size etc from reverted IP
1797 //to = copy(q,size,stp);
1799 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1800 q, info_type(q), to, info_type(to)));
1805 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1806 to = copy(q,sizeofW(StgBlockedFetch),stp);
1808 belch("@@ evacuate: %p (%s) to %p (%s)",
1809 q, info_type(q), to, info_type(to)));
1816 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1817 to = copy(q,sizeofW(StgFetchMe),stp);
1819 belch("@@ evacuate: %p (%s) to %p (%s)",
1820 q, info_type(q), to, info_type(to)));
1824 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1825 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1827 belch("@@ evacuate: %p (%s) to %p (%s)",
1828 q, info_type(q), to, info_type(to)));
1833 barf("evacuate: strange closure type %d", (int)(info->type));
1839 /* -----------------------------------------------------------------------------
1840 move_TSO is called to update the TSO structure after it has been
1841 moved from one place to another.
1842 -------------------------------------------------------------------------- */
1845 move_TSO(StgTSO *src, StgTSO *dest)
1849 // relocate the stack pointers...
1850 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1851 dest->sp = (StgPtr)dest->sp + diff;
1852 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1854 relocate_stack(dest, diff);
1857 /* -----------------------------------------------------------------------------
1858 relocate_stack is called to update the linkage between
1859 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1861 -------------------------------------------------------------------------- */
1864 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1872 while ((P_)su < dest->stack + dest->stack_size) {
1873 switch (get_itbl(su)->type) {
1875 // GCC actually manages to common up these three cases!
1878 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1883 cf = (StgCatchFrame *)su;
1884 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1889 sf = (StgSeqFrame *)su;
1890 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1899 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1910 scavenge_srt(const StgInfoTable *info)
1912 StgClosure **srt, **srt_end;
1914 /* evacuate the SRT. If srt_len is zero, then there isn't an
1915 * srt field in the info table. That's ok, because we'll
1916 * never dereference it.
1918 srt = (StgClosure **)(info->srt);
1919 srt_end = srt + info->srt_len;
1920 for (; srt < srt_end; srt++) {
1921 /* Special-case to handle references to closures hiding out in DLLs, since
1922 double indirections required to get at those. The code generator knows
1923 which is which when generating the SRT, so it stores the (indirect)
1924 reference to the DLL closure in the table by first adding one to it.
1925 We check for this here, and undo the addition before evacuating it.
1927 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1928 closure that's fixed at link-time, and no extra magic is required.
1930 #ifdef ENABLE_WIN32_DLL_SUPPORT
1931 if ( (unsigned long)(*srt) & 0x1 ) {
1932 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1942 /* -----------------------------------------------------------------------------
1944 -------------------------------------------------------------------------- */
1947 scavengeTSO (StgTSO *tso)
1949 // chase the link field for any TSOs on the same queue
1950 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1951 if ( tso->why_blocked == BlockedOnMVar
1952 || tso->why_blocked == BlockedOnBlackHole
1953 || tso->why_blocked == BlockedOnException
1955 || tso->why_blocked == BlockedOnGA
1956 || tso->why_blocked == BlockedOnGA_NoSend
1959 tso->block_info.closure = evacuate(tso->block_info.closure);
1961 if ( tso->blocked_exceptions != NULL ) {
1962 tso->blocked_exceptions =
1963 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1965 // scavenge this thread's stack
1966 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1969 /* -----------------------------------------------------------------------------
1970 Scavenge a given step until there are no more objects in this step
1973 evac_gen is set by the caller to be either zero (for a step in a
1974 generation < N) or G where G is the generation of the step being
1977 We sometimes temporarily change evac_gen back to zero if we're
1978 scavenging a mutable object where early promotion isn't such a good
1980 -------------------------------------------------------------------------- */
1988 nat saved_evac_gen = evac_gen;
1993 failed_to_evac = rtsFalse;
1995 /* scavenge phase - standard breadth-first scavenging of the
1999 while (bd != stp->hp_bd || p < stp->hp) {
2001 // If we're at the end of this block, move on to the next block
2002 if (bd != stp->hp_bd && p == bd->free) {
2008 info = get_itbl((StgClosure *)p);
2009 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2012 switch (info->type) {
2015 /* treat MVars specially, because we don't want to evacuate the
2016 * mut_link field in the middle of the closure.
2019 StgMVar *mvar = ((StgMVar *)p);
2021 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2022 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2023 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2024 evac_gen = saved_evac_gen;
2025 recordMutable((StgMutClosure *)mvar);
2026 failed_to_evac = rtsFalse; // mutable.
2027 p += sizeofW(StgMVar);
2035 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2036 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2037 p += sizeofW(StgHeader) + 2;
2042 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2043 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2049 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2050 p += sizeofW(StgHeader) + 1;
2055 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2061 p += sizeofW(StgHeader) + 1;
2068 p += sizeofW(StgHeader) + 2;
2075 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2076 p += sizeofW(StgHeader) + 2;
2092 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2093 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2094 (StgClosure *)*p = evacuate((StgClosure *)*p);
2096 p += info->layout.payload.nptrs;
2101 if (stp->gen_no != 0) {
2102 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2105 case IND_OLDGEN_PERM:
2106 ((StgIndOldGen *)p)->indirectee =
2107 evacuate(((StgIndOldGen *)p)->indirectee);
2108 if (failed_to_evac) {
2109 failed_to_evac = rtsFalse;
2110 recordOldToNewPtrs((StgMutClosure *)p);
2112 p += sizeofW(StgIndOldGen);
2117 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2118 evac_gen = saved_evac_gen;
2119 recordMutable((StgMutClosure *)p);
2120 failed_to_evac = rtsFalse; // mutable anyhow
2121 p += sizeofW(StgMutVar);
2126 failed_to_evac = rtsFalse; // mutable anyhow
2127 p += sizeofW(StgMutVar);
2131 case SE_CAF_BLACKHOLE:
2134 p += BLACKHOLE_sizeW();
2139 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2140 (StgClosure *)bh->blocking_queue =
2141 evacuate((StgClosure *)bh->blocking_queue);
2142 recordMutable((StgMutClosure *)bh);
2143 failed_to_evac = rtsFalse;
2144 p += BLACKHOLE_sizeW();
2148 case THUNK_SELECTOR:
2150 StgSelector *s = (StgSelector *)p;
2151 s->selectee = evacuate(s->selectee);
2152 p += THUNK_SELECTOR_sizeW();
2156 case AP_UPD: // same as PAPs
2158 /* Treat a PAP just like a section of stack, not forgetting to
2159 * evacuate the function pointer too...
2162 StgPAP* pap = (StgPAP *)p;
2164 pap->fun = evacuate(pap->fun);
2165 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2166 p += pap_sizeW(pap);
2171 // nothing to follow
2172 p += arr_words_sizeW((StgArrWords *)p);
2176 // follow everything
2180 evac_gen = 0; // repeatedly mutable
2181 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2182 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2183 (StgClosure *)*p = evacuate((StgClosure *)*p);
2185 evac_gen = saved_evac_gen;
2186 recordMutable((StgMutClosure *)q);
2187 failed_to_evac = rtsFalse; // mutable anyhow.
2191 case MUT_ARR_PTRS_FROZEN:
2192 // follow everything
2196 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2197 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2198 (StgClosure *)*p = evacuate((StgClosure *)*p);
2200 // it's tempting to recordMutable() if failed_to_evac is
2201 // false, but that breaks some assumptions (eg. every
2202 // closure on the mutable list is supposed to have the MUT
2203 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2209 StgTSO *tso = (StgTSO *)p;
2212 evac_gen = saved_evac_gen;
2213 recordMutable((StgMutClosure *)tso);
2214 failed_to_evac = rtsFalse; // mutable anyhow.
2215 p += tso_sizeW(tso);
2220 case RBH: // cf. BLACKHOLE_BQ
2223 nat size, ptrs, nonptrs, vhs;
2225 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2227 StgRBH *rbh = (StgRBH *)p;
2228 (StgClosure *)rbh->blocking_queue =
2229 evacuate((StgClosure *)rbh->blocking_queue);
2230 recordMutable((StgMutClosure *)to);
2231 failed_to_evac = rtsFalse; // mutable anyhow.
2233 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2234 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2235 // ToDo: use size of reverted closure here!
2236 p += BLACKHOLE_sizeW();
2242 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2243 // follow the pointer to the node which is being demanded
2244 (StgClosure *)bf->node =
2245 evacuate((StgClosure *)bf->node);
2246 // follow the link to the rest of the blocking queue
2247 (StgClosure *)bf->link =
2248 evacuate((StgClosure *)bf->link);
2249 if (failed_to_evac) {
2250 failed_to_evac = rtsFalse;
2251 recordMutable((StgMutClosure *)bf);
2254 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2255 bf, info_type((StgClosure *)bf),
2256 bf->node, info_type(bf->node)));
2257 p += sizeofW(StgBlockedFetch);
2265 p += sizeofW(StgFetchMe);
2266 break; // nothing to do in this case
2268 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2270 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2271 (StgClosure *)fmbq->blocking_queue =
2272 evacuate((StgClosure *)fmbq->blocking_queue);
2273 if (failed_to_evac) {
2274 failed_to_evac = rtsFalse;
2275 recordMutable((StgMutClosure *)fmbq);
2278 belch("@@ scavenge: %p (%s) exciting, isn't it",
2279 p, info_type((StgClosure *)p)));
2280 p += sizeofW(StgFetchMeBlockingQueue);
2286 barf("scavenge: unimplemented/strange closure type %d @ %p",
2290 /* If we didn't manage to promote all the objects pointed to by
2291 * the current object, then we have to designate this object as
2292 * mutable (because it contains old-to-new generation pointers).
2294 if (failed_to_evac) {
2295 failed_to_evac = rtsFalse;
2296 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2304 /* -----------------------------------------------------------------------------
2305 Scavenge everything on the mark stack.
2307 This is slightly different from scavenge():
2308 - we don't walk linearly through the objects, so the scavenger
2309 doesn't need to advance the pointer on to the next object.
2310 -------------------------------------------------------------------------- */
2313 scavenge_mark_stack(void)
2319 evac_gen = oldest_gen->no;
2320 saved_evac_gen = evac_gen;
2322 while (!mark_stack_empty()) {
2323 p = pop_mark_stack();
2325 info = get_itbl((StgClosure *)p);
2326 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2328 switch (info->type) {
2331 /* treat MVars specially, because we don't want to evacuate the
2332 * mut_link field in the middle of the closure.
2335 StgMVar *mvar = ((StgMVar *)p);
2337 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2338 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2339 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2340 evac_gen = saved_evac_gen;
2341 failed_to_evac = rtsFalse; // mutable.
2349 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2350 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2360 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2385 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2386 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2387 (StgClosure *)*p = evacuate((StgClosure *)*p);
2393 // don't need to do anything here: the only possible case
2394 // is that we're in a 1-space compacting collector, with
2395 // no "old" generation.
2399 case IND_OLDGEN_PERM:
2400 ((StgIndOldGen *)p)->indirectee =
2401 evacuate(((StgIndOldGen *)p)->indirectee);
2402 if (failed_to_evac) {
2403 recordOldToNewPtrs((StgMutClosure *)p);
2405 failed_to_evac = rtsFalse;
2410 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2411 evac_gen = saved_evac_gen;
2412 failed_to_evac = rtsFalse;
2417 failed_to_evac = rtsFalse;
2421 case SE_CAF_BLACKHOLE:
2429 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2430 (StgClosure *)bh->blocking_queue =
2431 evacuate((StgClosure *)bh->blocking_queue);
2432 failed_to_evac = rtsFalse;
2436 case THUNK_SELECTOR:
2438 StgSelector *s = (StgSelector *)p;
2439 s->selectee = evacuate(s->selectee);
2443 case AP_UPD: // same as PAPs
2445 /* Treat a PAP just like a section of stack, not forgetting to
2446 * evacuate the function pointer too...
2449 StgPAP* pap = (StgPAP *)p;
2451 pap->fun = evacuate(pap->fun);
2452 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2457 // follow everything
2461 evac_gen = 0; // repeatedly mutable
2462 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2463 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2464 (StgClosure *)*p = evacuate((StgClosure *)*p);
2466 evac_gen = saved_evac_gen;
2467 failed_to_evac = rtsFalse; // mutable anyhow.
2471 case MUT_ARR_PTRS_FROZEN:
2472 // follow everything
2476 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2477 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2478 (StgClosure *)*p = evacuate((StgClosure *)*p);
2485 StgTSO *tso = (StgTSO *)p;
2488 evac_gen = saved_evac_gen;
2489 failed_to_evac = rtsFalse;
2494 case RBH: // cf. BLACKHOLE_BQ
2497 nat size, ptrs, nonptrs, vhs;
2499 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2501 StgRBH *rbh = (StgRBH *)p;
2502 (StgClosure *)rbh->blocking_queue =
2503 evacuate((StgClosure *)rbh->blocking_queue);
2504 recordMutable((StgMutClosure *)rbh);
2505 failed_to_evac = rtsFalse; // mutable anyhow.
2507 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2508 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2514 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2515 // follow the pointer to the node which is being demanded
2516 (StgClosure *)bf->node =
2517 evacuate((StgClosure *)bf->node);
2518 // follow the link to the rest of the blocking queue
2519 (StgClosure *)bf->link =
2520 evacuate((StgClosure *)bf->link);
2521 if (failed_to_evac) {
2522 failed_to_evac = rtsFalse;
2523 recordMutable((StgMutClosure *)bf);
2526 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2527 bf, info_type((StgClosure *)bf),
2528 bf->node, info_type(bf->node)));
2536 break; // nothing to do in this case
2538 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2540 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2541 (StgClosure *)fmbq->blocking_queue =
2542 evacuate((StgClosure *)fmbq->blocking_queue);
2543 if (failed_to_evac) {
2544 failed_to_evac = rtsFalse;
2545 recordMutable((StgMutClosure *)fmbq);
2548 belch("@@ scavenge: %p (%s) exciting, isn't it",
2549 p, info_type((StgClosure *)p)));
2555 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2559 if (failed_to_evac) {
2560 failed_to_evac = rtsFalse;
2561 mkMutCons((StgClosure *)p, &generations[evac_gen]);
2564 } // while (!mark_stack_empty())
2567 /* -----------------------------------------------------------------------------
2568 Scavenge one object.
2570 This is used for objects that are temporarily marked as mutable
2571 because they contain old-to-new generation pointers. Only certain
2572 objects can have this property.
2573 -------------------------------------------------------------------------- */
2576 scavenge_one(StgClosure *p)
2578 const StgInfoTable *info;
2581 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2582 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2586 switch (info -> type) {
2589 case FUN_1_0: // hardly worth specialising these guys
2609 case IND_OLDGEN_PERM:
2613 end = (P_)p->payload + info->layout.payload.ptrs;
2614 for (q = (P_)p->payload; q < end; q++) {
2615 (StgClosure *)*q = evacuate((StgClosure *)*q);
2621 case SE_CAF_BLACKHOLE:
2626 case THUNK_SELECTOR:
2628 StgSelector *s = (StgSelector *)p;
2629 s->selectee = evacuate(s->selectee);
2633 case AP_UPD: /* same as PAPs */
2635 /* Treat a PAP just like a section of stack, not forgetting to
2636 * evacuate the function pointer too...
2639 StgPAP* pap = (StgPAP *)p;
2641 pap->fun = evacuate(pap->fun);
2642 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2647 /* This might happen if for instance a MUT_CONS was pointing to a
2648 * THUNK which has since been updated. The IND_OLDGEN will
2649 * be on the mutable list anyway, so we don't need to do anything
2654 case MUT_ARR_PTRS_FROZEN:
2656 // follow everything
2660 next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2661 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
2662 (StgClosure *)*q = evacuate((StgClosure *)*q);
2668 barf("scavenge_one: strange object %d", (int)(info->type));
2671 no_luck = failed_to_evac;
2672 failed_to_evac = rtsFalse;
2676 /* -----------------------------------------------------------------------------
2677 Scavenging mutable lists.
2679 We treat the mutable list of each generation > N (i.e. all the
2680 generations older than the one being collected) as roots. We also
2681 remove non-mutable objects from the mutable list at this point.
2682 -------------------------------------------------------------------------- */
2685 scavenge_mut_once_list(generation *gen)
2687 const StgInfoTable *info;
2688 StgMutClosure *p, *next, *new_list;
2690 p = gen->mut_once_list;
2691 new_list = END_MUT_LIST;
2695 failed_to_evac = rtsFalse;
2697 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2699 // make sure the info pointer is into text space
2700 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2701 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2705 if (info->type==RBH)
2706 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2708 switch(info->type) {
2711 case IND_OLDGEN_PERM:
2713 /* Try to pull the indirectee into this generation, so we can
2714 * remove the indirection from the mutable list.
2716 ((StgIndOldGen *)p)->indirectee =
2717 evacuate(((StgIndOldGen *)p)->indirectee);
2719 #if 0 && defined(DEBUG)
2720 if (RtsFlags.DebugFlags.gc)
2721 /* Debugging code to print out the size of the thing we just
2725 StgPtr start = gen->steps[0].scan;
2726 bdescr *start_bd = gen->steps[0].scan_bd;
2728 scavenge(&gen->steps[0]);
2729 if (start_bd != gen->steps[0].scan_bd) {
2730 size += (P_)BLOCK_ROUND_UP(start) - start;
2731 start_bd = start_bd->link;
2732 while (start_bd != gen->steps[0].scan_bd) {
2733 size += BLOCK_SIZE_W;
2734 start_bd = start_bd->link;
2736 size += gen->steps[0].scan -
2737 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2739 size = gen->steps[0].scan - start;
2741 fprintf(stderr,"evac IND_OLDGEN: %ld bytes\n", size * sizeof(W_));
2745 /* failed_to_evac might happen if we've got more than two
2746 * generations, we're collecting only generation 0, the
2747 * indirection resides in generation 2 and the indirectee is
2750 if (failed_to_evac) {
2751 failed_to_evac = rtsFalse;
2752 p->mut_link = new_list;
2755 /* the mut_link field of an IND_STATIC is overloaded as the
2756 * static link field too (it just so happens that we don't need
2757 * both at the same time), so we need to NULL it out when
2758 * removing this object from the mutable list because the static
2759 * link fields are all assumed to be NULL before doing a major
2767 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2768 * it from the mutable list if possible by promoting whatever it
2771 scavenge_one((StgClosure *)((StgMutVar *)p)->var);
2772 if (failed_to_evac == rtsTrue) {
2773 /* didn't manage to promote everything, so put the
2774 * MUT_CONS back on the list.
2776 failed_to_evac = rtsFalse;
2777 p->mut_link = new_list;
2783 // shouldn't have anything else on the mutables list
2784 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2788 gen->mut_once_list = new_list;
2793 scavenge_mutable_list(generation *gen)
2795 const StgInfoTable *info;
2796 StgMutClosure *p, *next;
2798 p = gen->saved_mut_list;
2802 failed_to_evac = rtsFalse;
2804 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2806 // make sure the info pointer is into text space
2807 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2808 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2812 if (info->type==RBH)
2813 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2815 switch(info->type) {
2818 // follow everything
2819 p->mut_link = gen->mut_list;
2824 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2825 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2826 (StgClosure *)*q = evacuate((StgClosure *)*q);
2832 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2833 p->mut_link = gen->mut_list;
2839 StgMVar *mvar = (StgMVar *)p;
2840 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2841 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2842 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2843 p->mut_link = gen->mut_list;
2850 StgTSO *tso = (StgTSO *)p;
2854 /* Don't take this TSO off the mutable list - it might still
2855 * point to some younger objects (because we set evac_gen to 0
2858 tso->mut_link = gen->mut_list;
2859 gen->mut_list = (StgMutClosure *)tso;
2865 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2866 (StgClosure *)bh->blocking_queue =
2867 evacuate((StgClosure *)bh->blocking_queue);
2868 p->mut_link = gen->mut_list;
2873 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2876 case IND_OLDGEN_PERM:
2877 /* Try to pull the indirectee into this generation, so we can
2878 * remove the indirection from the mutable list.
2881 ((StgIndOldGen *)p)->indirectee =
2882 evacuate(((StgIndOldGen *)p)->indirectee);
2885 if (failed_to_evac) {
2886 failed_to_evac = rtsFalse;
2887 p->mut_link = gen->mut_once_list;
2888 gen->mut_once_list = p;
2895 // HWL: check whether all of these are necessary
2897 case RBH: // cf. BLACKHOLE_BQ
2899 // nat size, ptrs, nonptrs, vhs;
2901 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2902 StgRBH *rbh = (StgRBH *)p;
2903 (StgClosure *)rbh->blocking_queue =
2904 evacuate((StgClosure *)rbh->blocking_queue);
2905 if (failed_to_evac) {
2906 failed_to_evac = rtsFalse;
2907 recordMutable((StgMutClosure *)rbh);
2909 // ToDo: use size of reverted closure here!
2910 p += BLACKHOLE_sizeW();
2916 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2917 // follow the pointer to the node which is being demanded
2918 (StgClosure *)bf->node =
2919 evacuate((StgClosure *)bf->node);
2920 // follow the link to the rest of the blocking queue
2921 (StgClosure *)bf->link =
2922 evacuate((StgClosure *)bf->link);
2923 if (failed_to_evac) {
2924 failed_to_evac = rtsFalse;
2925 recordMutable((StgMutClosure *)bf);
2927 p += sizeofW(StgBlockedFetch);
2933 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2936 p += sizeofW(StgFetchMe);
2937 break; // nothing to do in this case
2939 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2941 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2942 (StgClosure *)fmbq->blocking_queue =
2943 evacuate((StgClosure *)fmbq->blocking_queue);
2944 if (failed_to_evac) {
2945 failed_to_evac = rtsFalse;
2946 recordMutable((StgMutClosure *)fmbq);
2948 p += sizeofW(StgFetchMeBlockingQueue);
2954 // shouldn't have anything else on the mutables list
2955 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2962 scavenge_static(void)
2964 StgClosure* p = static_objects;
2965 const StgInfoTable *info;
2967 /* Always evacuate straight to the oldest generation for static
2969 evac_gen = oldest_gen->no;
2971 /* keep going until we've scavenged all the objects on the linked
2973 while (p != END_OF_STATIC_LIST) {
2977 if (info->type==RBH)
2978 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2980 // make sure the info pointer is into text space
2981 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2982 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2984 /* Take this object *off* the static_objects list,
2985 * and put it on the scavenged_static_objects list.
2987 static_objects = STATIC_LINK(info,p);
2988 STATIC_LINK(info,p) = scavenged_static_objects;
2989 scavenged_static_objects = p;
2991 switch (info -> type) {
2995 StgInd *ind = (StgInd *)p;
2996 ind->indirectee = evacuate(ind->indirectee);
2998 /* might fail to evacuate it, in which case we have to pop it
2999 * back on the mutable list (and take it off the
3000 * scavenged_static list because the static link and mut link
3001 * pointers are one and the same).
3003 if (failed_to_evac) {
3004 failed_to_evac = rtsFalse;
3005 scavenged_static_objects = STATIC_LINK(info,p);
3006 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3007 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3021 next = (P_)p->payload + info->layout.payload.ptrs;
3022 // evacuate the pointers
3023 for (q = (P_)p->payload; q < next; q++) {
3024 (StgClosure *)*q = evacuate((StgClosure *)*q);
3030 barf("scavenge_static: strange closure %d", (int)(info->type));
3033 ASSERT(failed_to_evac == rtsFalse);
3035 /* get the next static object from the list. Remember, there might
3036 * be more stuff on this list now that we've done some evacuating!
3037 * (static_objects is a global)
3043 /* -----------------------------------------------------------------------------
3044 scavenge_stack walks over a section of stack and evacuates all the
3045 objects pointed to by it. We can use the same code for walking
3046 PAPs, since these are just sections of copied stack.
3047 -------------------------------------------------------------------------- */
3050 scavenge_stack(StgPtr p, StgPtr stack_end)
3053 const StgInfoTable* info;
3056 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3059 * Each time around this loop, we are looking at a chunk of stack
3060 * that starts with either a pending argument section or an
3061 * activation record.
3064 while (p < stack_end) {
3067 // If we've got a tag, skip over that many words on the stack
3068 if (IS_ARG_TAG((W_)q)) {
3073 /* Is q a pointer to a closure?
3075 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3077 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3078 ASSERT(closure_STATIC((StgClosure *)q));
3080 // otherwise, must be a pointer into the allocation space.
3083 (StgClosure *)*p = evacuate((StgClosure *)q);
3089 * Otherwise, q must be the info pointer of an activation
3090 * record. All activation records have 'bitmap' style layout
3093 info = get_itbl((StgClosure *)p);
3095 switch (info->type) {
3097 // Dynamic bitmap: the mask is stored on the stack
3099 bitmap = ((StgRetDyn *)p)->liveness;
3100 p = (P_)&((StgRetDyn *)p)->payload[0];
3103 // probably a slow-entry point return address:
3111 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3112 old_p, p, old_p+1));
3114 p++; // what if FHS!=1 !? -- HWL
3119 /* Specialised code for update frames, since they're so common.
3120 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3121 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3125 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3127 p += sizeofW(StgUpdateFrame);
3130 frame->updatee = evacuate(frame->updatee);
3132 #else // specialised code for update frames, not sure if it's worth it.
3134 nat type = get_itbl(frame->updatee)->type;
3136 if (type == EVACUATED) {
3137 frame->updatee = evacuate(frame->updatee);
3140 bdescr *bd = Bdescr((P_)frame->updatee);
3142 if (bd->gen_no > N) {
3143 if (bd->gen_no < evac_gen) {
3144 failed_to_evac = rtsTrue;
3149 // Don't promote blackholes
3151 if (!(stp->gen_no == 0 &&
3153 stp->no == stp->gen->n_steps-1)) {
3160 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3161 sizeofW(StgHeader), stp);
3162 frame->updatee = to;
3165 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3166 frame->updatee = to;
3167 recordMutable((StgMutClosure *)to);
3170 /* will never be SE_{,CAF_}BLACKHOLE, since we
3171 don't push an update frame for single-entry thunks. KSW 1999-01. */
3172 barf("scavenge_stack: UPDATE_FRAME updatee");
3178 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3185 bitmap = info->layout.bitmap;
3187 // this assumes that the payload starts immediately after the info-ptr
3189 while (bitmap != 0) {
3190 if ((bitmap & 1) == 0) {
3191 (StgClosure *)*p = evacuate((StgClosure *)*p);
3194 bitmap = bitmap >> 1;
3201 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3206 StgLargeBitmap *large_bitmap;
3209 large_bitmap = info->layout.large_bitmap;
3212 for (i=0; i<large_bitmap->size; i++) {
3213 bitmap = large_bitmap->bitmap[i];
3214 q = p + BITS_IN(W_);
3215 while (bitmap != 0) {
3216 if ((bitmap & 1) == 0) {
3217 (StgClosure *)*p = evacuate((StgClosure *)*p);
3220 bitmap = bitmap >> 1;
3222 if (i+1 < large_bitmap->size) {
3224 (StgClosure *)*p = evacuate((StgClosure *)*p);
3230 // and don't forget to follow the SRT
3235 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3240 /*-----------------------------------------------------------------------------
3241 scavenge the large object list.
3243 evac_gen set by caller; similar games played with evac_gen as with
3244 scavenge() - see comment at the top of scavenge(). Most large
3245 objects are (repeatedly) mutable, so most of the time evac_gen will
3247 --------------------------------------------------------------------------- */
3250 scavenge_large(step *stp)
3254 const StgInfoTable* info;
3255 nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
3257 bd = stp->new_large_objects;
3259 for (; bd != NULL; bd = stp->new_large_objects) {
3261 /* take this object *off* the large objects list and put it on
3262 * the scavenged large objects list. This is so that we can
3263 * treat new_large_objects as a stack and push new objects on
3264 * the front when evacuating.
3266 stp->new_large_objects = bd->link;
3267 dbl_link_onto(bd, &stp->scavenged_large_objects);
3270 info = get_itbl((StgClosure *)p);
3272 // only certain objects can be "large"...
3274 switch (info->type) {
3277 // nothing to follow
3282 // follow everything
3285 evac_gen = 0; // repeatedly mutable
3286 recordMutable((StgMutClosure *)p);
3287 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3288 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3289 (StgClosure *)*p = evacuate((StgClosure *)*p);
3291 evac_gen = saved_evac_gen;
3292 failed_to_evac = rtsFalse;
3296 case MUT_ARR_PTRS_FROZEN:
3298 // follow everything
3301 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3302 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3303 (StgClosure *)*p = evacuate((StgClosure *)*p);
3310 StgTSO *tso = (StgTSO *)p;
3312 evac_gen = 0; // repeatedly mutable
3314 recordMutable((StgMutClosure *)tso);
3315 evac_gen = saved_evac_gen;
3316 failed_to_evac = rtsFalse;
3323 StgPAP* pap = (StgPAP *)p;
3324 pap->fun = evacuate(pap->fun);
3325 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3330 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3333 if (failed_to_evac) {
3334 failed_to_evac = rtsFalse;
3335 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3340 /* -----------------------------------------------------------------------------
3341 Initialising the static object & mutable lists
3342 -------------------------------------------------------------------------- */
3345 zero_static_object_list(StgClosure* first_static)
3349 const StgInfoTable *info;
3351 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3353 link = STATIC_LINK(info, p);
3354 STATIC_LINK(info,p) = NULL;
3358 /* This function is only needed because we share the mutable link
3359 * field with the static link field in an IND_STATIC, so we have to
3360 * zero the mut_link field before doing a major GC, which needs the
3361 * static link field.
3363 * It doesn't do any harm to zero all the mutable link fields on the
3368 zero_mutable_list( StgMutClosure *first )
3370 StgMutClosure *next, *c;
3372 for (c = first; c != END_MUT_LIST; c = next) {
3378 /* -----------------------------------------------------------------------------
3380 -------------------------------------------------------------------------- */
3387 for (c = (StgIndStatic *)caf_list; c != NULL;
3388 c = (StgIndStatic *)c->static_link)
3390 c->header.info = c->saved_info;
3391 c->saved_info = NULL;
3392 // could, but not necessary: c->static_link = NULL;
3398 scavengeCAFs( void )
3403 for (c = (StgIndStatic *)caf_list; c != NULL;
3404 c = (StgIndStatic *)c->static_link)
3406 c->indirectee = evacuate(c->indirectee);
3410 /* -----------------------------------------------------------------------------
3411 Sanity code for CAF garbage collection.
3413 With DEBUG turned on, we manage a CAF list in addition to the SRT
3414 mechanism. After GC, we run down the CAF list and blackhole any
3415 CAFs which have been garbage collected. This means we get an error
3416 whenever the program tries to enter a garbage collected CAF.
3418 Any garbage collected CAFs are taken off the CAF list at the same
3420 -------------------------------------------------------------------------- */
3422 #if 0 && defined(DEBUG)
3429 const StgInfoTable *info;
3440 ASSERT(info->type == IND_STATIC);
3442 if (STATIC_LINK(info,p) == NULL) {
3443 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04lx\n", (long)p));
3445 SET_INFO(p,&stg_BLACKHOLE_info);
3446 p = STATIC_LINK2(info,p);
3450 pp = &STATIC_LINK2(info,p);
3457 // fprintf(stderr, "%d CAFs live\n", i);
3462 /* -----------------------------------------------------------------------------
3465 Whenever a thread returns to the scheduler after possibly doing
3466 some work, we have to run down the stack and black-hole all the
3467 closures referred to by update frames.
3468 -------------------------------------------------------------------------- */
3471 threadLazyBlackHole(StgTSO *tso)
3473 StgUpdateFrame *update_frame;
3474 StgBlockingQueue *bh;
3477 stack_end = &tso->stack[tso->stack_size];
3478 update_frame = tso->su;
3481 switch (get_itbl(update_frame)->type) {
3484 update_frame = ((StgCatchFrame *)update_frame)->link;
3488 bh = (StgBlockingQueue *)update_frame->updatee;
3490 /* if the thunk is already blackholed, it means we've also
3491 * already blackholed the rest of the thunks on this stack,
3492 * so we can stop early.
3494 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3495 * don't interfere with this optimisation.
3497 if (bh->header.info == &stg_BLACKHOLE_info) {
3501 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3502 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3503 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3504 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3506 SET_INFO(bh,&stg_BLACKHOLE_info);
3509 update_frame = update_frame->link;
3513 update_frame = ((StgSeqFrame *)update_frame)->link;
3519 barf("threadPaused");
3525 /* -----------------------------------------------------------------------------
3528 * Code largely pinched from old RTS, then hacked to bits. We also do
3529 * lazy black holing here.
3531 * -------------------------------------------------------------------------- */
3534 threadSqueezeStack(StgTSO *tso)
3536 lnat displacement = 0;
3537 StgUpdateFrame *frame;
3538 StgUpdateFrame *next_frame; // Temporally next
3539 StgUpdateFrame *prev_frame; // Temporally previous
3541 rtsBool prev_was_update_frame;
3543 StgUpdateFrame *top_frame;
3544 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3546 void printObj( StgClosure *obj ); // from Printer.c
3548 top_frame = tso->su;
3551 bottom = &(tso->stack[tso->stack_size]);
3554 /* There must be at least one frame, namely the STOP_FRAME.
3556 ASSERT((P_)frame < bottom);
3558 /* Walk down the stack, reversing the links between frames so that
3559 * we can walk back up as we squeeze from the bottom. Note that
3560 * next_frame and prev_frame refer to next and previous as they were
3561 * added to the stack, rather than the way we see them in this
3562 * walk. (It makes the next loop less confusing.)
3564 * Stop if we find an update frame pointing to a black hole
3565 * (see comment in threadLazyBlackHole()).
3569 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3570 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3571 prev_frame = frame->link;
3572 frame->link = next_frame;
3577 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3578 printObj((StgClosure *)prev_frame);
3579 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3582 switch (get_itbl(frame)->type) {
3585 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3598 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3600 printObj((StgClosure *)prev_frame);
3603 if (get_itbl(frame)->type == UPDATE_FRAME
3604 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3609 /* Now, we're at the bottom. Frame points to the lowest update
3610 * frame on the stack, and its link actually points to the frame
3611 * above. We have to walk back up the stack, squeezing out empty
3612 * update frames and turning the pointers back around on the way
3615 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3616 * we never want to eliminate it anyway. Just walk one step up
3617 * before starting to squeeze. When you get to the topmost frame,
3618 * remember that there are still some words above it that might have
3625 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3628 * Loop through all of the frames (everything except the very
3629 * bottom). Things are complicated by the fact that we have
3630 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3631 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3633 while (frame != NULL) {
3635 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3636 rtsBool is_update_frame;
3638 next_frame = frame->link;
3639 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3642 * 1. both the previous and current frame are update frames
3643 * 2. the current frame is empty
3645 if (prev_was_update_frame && is_update_frame &&
3646 (P_)prev_frame == frame_bottom + displacement) {
3648 // Now squeeze out the current frame
3649 StgClosure *updatee_keep = prev_frame->updatee;
3650 StgClosure *updatee_bypass = frame->updatee;
3653 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3657 /* Deal with blocking queues. If both updatees have blocked
3658 * threads, then we should merge the queues into the update
3659 * frame that we're keeping.
3661 * Alternatively, we could just wake them up: they'll just go
3662 * straight to sleep on the proper blackhole! This is less code
3663 * and probably less bug prone, although it's probably much
3666 #if 0 // do it properly...
3667 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3668 # error Unimplemented lazy BH warning. (KSW 1999-01)
3670 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3671 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3673 // Sigh. It has one. Don't lose those threads!
3674 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3675 // Urgh. Two queues. Merge them.
3676 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3678 while (keep_tso->link != END_TSO_QUEUE) {
3679 keep_tso = keep_tso->link;
3681 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3684 // For simplicity, just swap the BQ for the BH
3685 P_ temp = updatee_keep;
3687 updatee_keep = updatee_bypass;
3688 updatee_bypass = temp;
3690 // Record the swap in the kept frame (below)
3691 prev_frame->updatee = updatee_keep;
3696 TICK_UPD_SQUEEZED();
3697 /* wasn't there something about update squeezing and ticky to be
3698 * sorted out? oh yes: we aren't counting each enter properly
3699 * in this case. See the log somewhere. KSW 1999-04-21
3701 * Check two things: that the two update frames don't point to
3702 * the same object, and that the updatee_bypass isn't already an
3703 * indirection. Both of these cases only happen when we're in a
3704 * block hole-style loop (and there are multiple update frames
3705 * on the stack pointing to the same closure), but they can both
3706 * screw us up if we don't check.
3708 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3709 // this wakes the threads up
3710 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3713 sp = (P_)frame - 1; // sp = stuff to slide
3714 displacement += sizeofW(StgUpdateFrame);
3717 // No squeeze for this frame
3718 sp = frame_bottom - 1; // Keep the current frame
3720 /* Do lazy black-holing.
3722 if (is_update_frame) {
3723 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3724 if (bh->header.info != &stg_BLACKHOLE_info &&
3725 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3726 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3727 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3728 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3731 /* zero out the slop so that the sanity checker can tell
3732 * where the next closure is.
3735 StgInfoTable *info = get_itbl(bh);
3736 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3737 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3738 * info is used for a different purpose, and it's exactly the
3739 * same size as a BLACKHOLE in any case.
3741 if (info->type != THUNK_SELECTOR) {
3742 for (i = np; i < np + nw; i++) {
3743 ((StgClosure *)bh)->payload[i] = 0;
3748 SET_INFO(bh,&stg_BLACKHOLE_info);
3752 // Fix the link in the current frame (should point to the frame below)
3753 frame->link = prev_frame;
3754 prev_was_update_frame = is_update_frame;
3757 // Now slide all words from sp up to the next frame
3759 if (displacement > 0) {
3760 P_ next_frame_bottom;
3762 if (next_frame != NULL)
3763 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3765 next_frame_bottom = tso->sp - 1;
3769 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3773 while (sp >= next_frame_bottom) {
3774 sp[displacement] = *sp;
3778 (P_)prev_frame = (P_)frame + displacement;
3782 tso->sp += displacement;
3783 tso->su = prev_frame;
3786 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3787 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3792 /* -----------------------------------------------------------------------------
3795 * We have to prepare for GC - this means doing lazy black holing
3796 * here. We also take the opportunity to do stack squeezing if it's
3798 * -------------------------------------------------------------------------- */
3800 threadPaused(StgTSO *tso)
3802 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3803 threadSqueezeStack(tso); // does black holing too
3805 threadLazyBlackHole(tso);
3808 /* -----------------------------------------------------------------------------
3810 * -------------------------------------------------------------------------- */
3814 printMutOnceList(generation *gen)
3816 StgMutClosure *p, *next;
3818 p = gen->mut_once_list;
3821 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3822 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3823 fprintf(stderr, "%p (%s), ",
3824 p, info_type((StgClosure *)p));
3826 fputc('\n', stderr);
3830 printMutableList(generation *gen)
3832 StgMutClosure *p, *next;
3837 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3838 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3839 fprintf(stderr, "%p (%s), ",
3840 p, info_type((StgClosure *)p));
3842 fputc('\n', stderr);
3845 static inline rtsBool
3846 maybeLarge(StgClosure *closure)
3848 StgInfoTable *info = get_itbl(closure);
3850 /* closure types that may be found on the new_large_objects list;
3851 see scavenge_large */
3852 return (info->type == MUT_ARR_PTRS ||
3853 info->type == MUT_ARR_PTRS_FROZEN ||
3854 info->type == TSO ||
3855 info->type == ARR_WORDS);