1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.104 2001/07/23 17:23:19 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void cleanup_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgClosure *p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 static inline rtsBool
162 mark_stack_empty(void)
164 return mark_sp == mark_stack;
167 static inline rtsBool
168 mark_stack_full(void)
170 return mark_sp >= mark_splim;
174 push_mark_stack(StgPtr p)
185 /* -----------------------------------------------------------------------------
188 For garbage collecting generation N (and all younger generations):
190 - follow all pointers in the root set. the root set includes all
191 mutable objects in all steps in all generations.
193 - for each pointer, evacuate the object it points to into either
194 + to-space in the next higher step in that generation, if one exists,
195 + if the object's generation == N, then evacuate it to the next
196 generation if one exists, or else to-space in the current
198 + if the object's generation < N, then evacuate it to to-space
199 in the next generation.
201 - repeatedly scavenge to-space from each step in each generation
202 being collected until no more objects can be evacuated.
204 - free from-space in each step, and set from-space = to-space.
206 -------------------------------------------------------------------------- */
209 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
213 lnat live, allocated, collected = 0, copied = 0;
217 CostCentreStack *prev_CCS;
220 #if defined(DEBUG) && defined(GRAN)
221 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
225 // tell the stats department that we've started a GC
228 // Init stats and print par specific (timing) info
229 PAR_TICKY_PAR_START();
231 // attribute any costs to CCS_GC
237 /* Approximate how much we allocated.
238 * Todo: only when generating stats?
240 allocated = calcAllocated();
242 /* Figure out which generation to collect
244 if (force_major_gc) {
245 N = RtsFlags.GcFlags.generations - 1;
249 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
250 if (generations[g].steps[0].n_blocks >= generations[g].max_blocks) {
254 major_gc = (N == RtsFlags.GcFlags.generations-1);
257 #ifdef RTS_GTK_FRONTPANEL
258 if (RtsFlags.GcFlags.frontpanel) {
259 updateFrontPanelBeforeGC(N);
263 // check stack sanity *before* GC (ToDo: check all threads)
265 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
267 IF_DEBUG(sanity, checkFreeListSanity());
269 /* Initialise the static object lists
271 static_objects = END_OF_STATIC_LIST;
272 scavenged_static_objects = END_OF_STATIC_LIST;
274 /* zero the mutable list for the oldest generation (see comment by
275 * zero_mutable_list below).
278 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
281 /* Save the old to-space if we're doing a two-space collection
283 if (RtsFlags.GcFlags.generations == 1) {
284 old_to_blocks = g0s0->to_blocks;
285 g0s0->to_blocks = NULL;
288 /* Keep a count of how many new blocks we allocated during this GC
289 * (used for resizing the allocation area, later).
293 /* Initialise to-space in all the generations/steps that we're
296 for (g = 0; g <= N; g++) {
297 generations[g].mut_once_list = END_MUT_LIST;
298 generations[g].mut_list = END_MUT_LIST;
300 for (s = 0; s < generations[g].n_steps; s++) {
302 // generation 0, step 0 doesn't need to-space
303 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
307 /* Get a free block for to-space. Extra blocks will be chained on
311 stp = &generations[g].steps[s];
312 ASSERT(stp->gen_no == g);
313 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
317 bd->flags = BF_EVACUATED; // it's a to-space block
319 stp->hpLim = stp->hp + BLOCK_SIZE_W;
322 stp->n_to_blocks = 1;
323 stp->scan = bd->start;
325 stp->new_large_objects = NULL;
326 stp->scavenged_large_objects = NULL;
328 // mark the large objects as not evacuated yet
329 for (bd = stp->large_objects; bd; bd = bd->link) {
330 bd->flags = BF_LARGE;
333 // for a compacted step, we need to allocate the bitmap
334 if (stp->is_compacted) {
335 nat bitmap_size; // in bytes
336 bdescr *bitmap_bdescr;
339 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
341 if (bitmap_size > 0) {
342 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
344 stp->bitmap = bitmap_bdescr;
345 bitmap = bitmap_bdescr->start;
347 IF_DEBUG(gc, fprintf(stderr, "bitmap_size: %d, bitmap: %p\n",
348 bitmap_size, bitmap););
350 // don't forget to fill it with zeros!
351 memset(bitmap, 0, bitmap_size);
353 // for each block in this step, point to its bitmap from the
355 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
356 bd->u.bitmap = bitmap;
357 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
364 /* make sure the older generations have at least one block to
365 * allocate into (this makes things easier for copy(), see below.
367 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
368 for (s = 0; s < generations[g].n_steps; s++) {
369 stp = &generations[g].steps[s];
370 if (stp->hp_bd == NULL) {
371 ASSERT(stp->blocks == NULL);
376 bd->flags = 0; // *not* a to-space block or a large object
378 stp->hpLim = stp->hp + BLOCK_SIZE_W;
384 /* Set the scan pointer for older generations: remember we
385 * still have to scavenge objects that have been promoted. */
387 stp->scan_bd = stp->hp_bd;
388 stp->to_blocks = NULL;
389 stp->n_to_blocks = 0;
390 stp->new_large_objects = NULL;
391 stp->scavenged_large_objects = NULL;
395 /* Allocate a mark stack if we're doing a major collection.
398 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
399 mark_stack = (StgPtr *)mark_stack_bdescr->start;
400 mark_sp = mark_stack;
401 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
403 mark_stack_bdescr = NULL;
406 /* -----------------------------------------------------------------------
407 * follow all the roots that we know about:
408 * - mutable lists from each generation > N
409 * we want to *scavenge* these roots, not evacuate them: they're not
410 * going to move in this GC.
411 * Also: do them in reverse generation order. This is because we
412 * often want to promote objects that are pointed to by older
413 * generations early, so we don't have to repeatedly copy them.
414 * Doing the generations in reverse order ensures that we don't end
415 * up in the situation where we want to evac an object to gen 3 and
416 * it has already been evaced to gen 2.
420 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
421 generations[g].saved_mut_list = generations[g].mut_list;
422 generations[g].mut_list = END_MUT_LIST;
425 // Do the mut-once lists first
426 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
427 IF_PAR_DEBUG(verbose,
428 printMutOnceList(&generations[g]));
429 scavenge_mut_once_list(&generations[g]);
431 for (st = generations[g].n_steps-1; st >= 0; st--) {
432 scavenge(&generations[g].steps[st]);
436 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
437 IF_PAR_DEBUG(verbose,
438 printMutableList(&generations[g]));
439 scavenge_mutable_list(&generations[g]);
441 for (st = generations[g].n_steps-1; st >= 0; st--) {
442 scavenge(&generations[g].steps[st]);
449 /* follow all the roots that the application knows about.
452 get_roots(mark_root);
455 /* And don't forget to mark the TSO if we got here direct from
457 /* Not needed in a seq version?
459 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
463 // Mark the entries in the GALA table of the parallel system
464 markLocalGAs(major_gc);
465 // Mark all entries on the list of pending fetches
466 markPendingFetches(major_gc);
469 /* Mark the weak pointer list, and prepare to detect dead weak
472 old_weak_ptr_list = weak_ptr_list;
473 weak_ptr_list = NULL;
474 weak_done = rtsFalse;
476 /* The all_threads list is like the weak_ptr_list.
477 * See traverse_weak_ptr_list() for the details.
479 old_all_threads = all_threads;
480 all_threads = END_TSO_QUEUE;
481 resurrected_threads = END_TSO_QUEUE;
483 /* Mark the stable pointer table.
485 markStablePtrTable(mark_root);
489 /* ToDo: To fix the caf leak, we need to make the commented out
490 * parts of this code do something sensible - as described in
493 extern void markHugsObjects(void);
498 /* -------------------------------------------------------------------------
499 * Repeatedly scavenge all the areas we know about until there's no
500 * more scavenging to be done.
507 // scavenge static objects
508 if (major_gc && static_objects != END_OF_STATIC_LIST) {
509 IF_DEBUG(sanity, checkStaticObjects(static_objects));
513 // scavenge objects in compacted generation
514 if (mark_stack_bdescr != NULL && !mark_stack_empty()) {
515 scavenge_mark_stack();
519 /* When scavenging the older generations: Objects may have been
520 * evacuated from generations <= N into older generations, and we
521 * need to scavenge these objects. We're going to try to ensure that
522 * any evacuations that occur move the objects into at least the
523 * same generation as the object being scavenged, otherwise we
524 * have to create new entries on the mutable list for the older
528 // scavenge each step in generations 0..maxgen
532 for (gen = RtsFlags.GcFlags.generations-1; gen >= 0; gen--) {
533 for (st = generations[gen].n_steps-1; st >= 0 ; st--) {
534 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
537 stp = &generations[gen].steps[st];
539 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
544 if (stp->new_large_objects != NULL) {
553 if (flag) { goto loop; }
556 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
561 /* Final traversal of the weak pointer list (see comment by
562 * cleanUpWeakPtrList below).
564 cleanup_weak_ptr_list(&weak_ptr_list);
567 // Reconstruct the Global Address tables used in GUM
568 rebuildGAtables(major_gc);
569 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
572 // Now see which stable names are still alive.
575 // Tidy the end of the to-space chains
576 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
577 for (s = 0; s < generations[g].n_steps; s++) {
578 stp = &generations[g].steps[s];
579 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
580 stp->hp_bd->free = stp->hp;
581 stp->hp_bd->link = NULL;
586 // NO MORE EVACUATION AFTER THIS POINT!
587 // Finally: compaction of the oldest generation.
588 if (major_gc && RtsFlags.GcFlags.compact) {
592 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
594 /* run through all the generations/steps and tidy up
596 copied = new_blocks * BLOCK_SIZE_W;
597 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
600 generations[g].collections++; // for stats
603 for (s = 0; s < generations[g].n_steps; s++) {
605 stp = &generations[g].steps[s];
607 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
608 // stats information: how much we copied
610 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
615 // for generations we collected...
618 collected += stp->n_blocks * BLOCK_SIZE_W; // for stats
620 /* free old memory and shift to-space into from-space for all
621 * the collected steps (except the allocation area). These
622 * freed blocks will probaby be quickly recycled.
624 if (!(g == 0 && s == 0)) {
625 if (stp->is_compacted) {
626 // for a compacted step, just shift the new to-space
627 // onto the front of the now-compacted existing blocks.
628 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
629 bd->flags &= ~BF_EVACUATED; // now from-space
631 // tack the new blocks on the end of the existing blocks
632 if (stp->blocks == NULL) {
633 stp->blocks = stp->to_blocks;
635 for (bd = stp->blocks; bd != NULL; bd = next) {
638 bd->link = stp->to_blocks;
642 // add the new blocks to the block tally
643 stp->n_blocks += stp->n_to_blocks;
645 freeChain(stp->blocks);
646 stp->blocks = stp->to_blocks;
647 stp->n_blocks = stp->n_to_blocks;
648 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
649 bd->flags &= ~BF_EVACUATED; // now from-space
652 stp->to_blocks = NULL;
653 stp->n_to_blocks = 0;
656 /* LARGE OBJECTS. The current live large objects are chained on
657 * scavenged_large, having been moved during garbage
658 * collection from large_objects. Any objects left on
659 * large_objects list are therefore dead, so we free them here.
661 for (bd = stp->large_objects; bd != NULL; bd = next) {
666 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
667 bd->flags &= ~BF_EVACUATED;
669 stp->large_objects = stp->scavenged_large_objects;
671 /* Set the maximum blocks for this generation, interpolating
672 * between the maximum size of the oldest and youngest
675 * max_blocks = oldgen_max_blocks * G
676 * ----------------------
681 generations[g].max_blocks = (oldest_gen->max_blocks * g)
682 / (RtsFlags.GcFlags.generations-1);
684 generations[g].max_blocks = oldest_gen->max_blocks;
687 // for older generations...
690 /* For older generations, we need to append the
691 * scavenged_large_object list (i.e. large objects that have been
692 * promoted during this GC) to the large_object list for that step.
694 for (bd = stp->scavenged_large_objects; bd; bd = next) {
696 bd->flags &= ~BF_EVACUATED;
697 dbl_link_onto(bd, &stp->large_objects);
700 // add the new blocks we promoted during this GC
701 stp->n_blocks += stp->n_to_blocks;
706 /* Set the maximum blocks for the oldest generation, based on twice
707 * the amount of live data now, adjusted to fit the maximum heap
710 * This is an approximation, since in the worst case we'll need
711 * twice the amount of live data plus whatever space the other
714 if (major_gc && RtsFlags.GcFlags.generations > 1) {
715 oldest_gen->max_blocks =
716 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
717 RtsFlags.GcFlags.minOldGenSize);
718 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
719 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
720 if (((int)oldest_gen->max_blocks -
721 (int)oldest_gen->steps[0].n_blocks) <
722 (RtsFlags.GcFlags.pcFreeHeap *
723 RtsFlags.GcFlags.maxHeapSize / 200)) {
729 // Guess the amount of live data for stats.
732 /* Free the small objects allocated via allocate(), since this will
733 * all have been copied into G0S1 now.
735 if (small_alloc_list != NULL) {
736 freeChain(small_alloc_list);
738 small_alloc_list = NULL;
742 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
744 /* Free the mark stack.
746 if (mark_stack_bdescr != NULL) {
747 freeGroup(mark_stack_bdescr);
752 for (g = 0; g <= N; g++) {
753 for (s = 0; s < generations[g].n_steps; s++) {
754 stp = &generations[g].steps[s];
755 if (stp->is_compacted && stp->bitmap != NULL) {
756 freeGroup(stp->bitmap);
761 /* Two-space collector:
762 * Free the old to-space, and estimate the amount of live data.
764 if (RtsFlags.GcFlags.generations == 1) {
767 if (old_to_blocks != NULL) {
768 freeChain(old_to_blocks);
770 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
771 bd->flags = 0; // now from-space
774 /* For a two-space collector, we need to resize the nursery. */
776 /* set up a new nursery. Allocate a nursery size based on a
777 * function of the amount of live data (currently a factor of 2,
778 * should be configurable (ToDo)). Use the blocks from the old
779 * nursery if possible, freeing up any left over blocks.
781 * If we get near the maximum heap size, then adjust our nursery
782 * size accordingly. If the nursery is the same size as the live
783 * data (L), then we need 3L bytes. We can reduce the size of the
784 * nursery to bring the required memory down near 2L bytes.
786 * A normal 2-space collector would need 4L bytes to give the same
787 * performance we get from 3L bytes, reducing to the same
788 * performance at 2L bytes.
790 blocks = g0s0->n_to_blocks;
792 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
793 RtsFlags.GcFlags.maxHeapSize ) {
794 int adjusted_blocks; // signed on purpose
797 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
798 IF_DEBUG(gc, fprintf(stderr, "@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %d\n", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
799 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
800 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
803 blocks = adjusted_blocks;
806 blocks *= RtsFlags.GcFlags.oldGenFactor;
807 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
808 blocks = RtsFlags.GcFlags.minAllocAreaSize;
811 resizeNursery(blocks);
814 /* Generational collector:
815 * If the user has given us a suggested heap size, adjust our
816 * allocation area to make best use of the memory available.
819 if (RtsFlags.GcFlags.heapSizeSuggestion) {
821 nat needed = calcNeeded(); // approx blocks needed at next GC
823 /* Guess how much will be live in generation 0 step 0 next time.
824 * A good approximation is the obtained by finding the
825 * percentage of g0s0 that was live at the last minor GC.
828 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
831 /* Estimate a size for the allocation area based on the
832 * information available. We might end up going slightly under
833 * or over the suggested heap size, but we should be pretty
836 * Formula: suggested - needed
837 * ----------------------------
838 * 1 + g0s0_pcnt_kept/100
840 * where 'needed' is the amount of memory needed at the next
841 * collection for collecting all steps except g0s0.
844 (((int)RtsFlags.GcFlags.heapSizeSuggestion - (int)needed) * 100) /
845 (100 + (int)g0s0_pcnt_kept);
847 if (blocks < (int)RtsFlags.GcFlags.minAllocAreaSize) {
848 blocks = RtsFlags.GcFlags.minAllocAreaSize;
851 resizeNursery((nat)blocks);
855 // mark the garbage collected CAFs as dead
856 #if 0 && defined(DEBUG) // doesn't work at the moment
857 if (major_gc) { gcCAFs(); }
860 // zero the scavenged static object list
862 zero_static_object_list(scavenged_static_objects);
869 // start any pending finalizers
870 scheduleFinalizers(old_weak_ptr_list);
872 // send exceptions to any threads which were about to die
873 resurrectThreads(resurrected_threads);
875 // Update the stable pointer hash table.
876 updateStablePtrTable(major_gc);
878 // check sanity after GC
879 IF_DEBUG(sanity, checkSanity());
881 // extra GC trace info
882 IF_DEBUG(gc, statDescribeGens());
885 // symbol-table based profiling
886 /* heapCensus(to_blocks); */ /* ToDo */
889 // restore enclosing cost centre
895 // check for memory leaks if sanity checking is on
896 IF_DEBUG(sanity, memInventory());
898 #ifdef RTS_GTK_FRONTPANEL
899 if (RtsFlags.GcFlags.frontpanel) {
900 updateFrontPanelAfterGC( N, live );
904 // ok, GC over: tell the stats department what happened.
905 stat_endGC(allocated, collected, live, copied, N);
911 /* -----------------------------------------------------------------------------
914 traverse_weak_ptr_list is called possibly many times during garbage
915 collection. It returns a flag indicating whether it did any work
916 (i.e. called evacuate on any live pointers).
918 Invariant: traverse_weak_ptr_list is called when the heap is in an
919 idempotent state. That means that there are no pending
920 evacuate/scavenge operations. This invariant helps the weak
921 pointer code decide which weak pointers are dead - if there are no
922 new live weak pointers, then all the currently unreachable ones are
925 For generational GC: we just don't try to finalize weak pointers in
926 older generations than the one we're collecting. This could
927 probably be optimised by keeping per-generation lists of weak
928 pointers, but for a few weak pointers this scheme will work.
929 -------------------------------------------------------------------------- */
932 traverse_weak_ptr_list(void)
934 StgWeak *w, **last_w, *next_w;
936 rtsBool flag = rtsFalse;
938 if (weak_done) { return rtsFalse; }
940 /* doesn't matter where we evacuate values/finalizers to, since
941 * these pointers are treated as roots (iff the keys are alive).
945 last_w = &old_weak_ptr_list;
946 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
948 /* First, this weak pointer might have been evacuated. If so,
949 * remove the forwarding pointer from the weak_ptr_list.
951 if (get_itbl(w)->type == EVACUATED) {
952 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
956 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
957 * called on a live weak pointer object. Just remove it.
959 if (w->header.info == &stg_DEAD_WEAK_info) {
960 next_w = ((StgDeadWeak *)w)->link;
965 ASSERT(get_itbl(w)->type == WEAK);
967 /* Now, check whether the key is reachable.
969 if ((new = isAlive(w->key))) {
971 // evacuate the value and finalizer
972 w->value = evacuate(w->value);
973 w->finalizer = evacuate(w->finalizer);
974 // remove this weak ptr from the old_weak_ptr list
976 // and put it on the new weak ptr list
978 w->link = weak_ptr_list;
981 IF_DEBUG(weak, fprintf(stderr,"Weak pointer still alive at %p -> %p\n", w, w->key));
991 /* Now deal with the all_threads list, which behaves somewhat like
992 * the weak ptr list. If we discover any threads that are about to
993 * become garbage, we wake them up and administer an exception.
996 StgTSO *t, *tmp, *next, **prev;
998 prev = &old_all_threads;
999 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1001 (StgClosure *)tmp = isAlive((StgClosure *)t);
1007 ASSERT(get_itbl(t)->type == TSO);
1008 switch (t->what_next) {
1009 case ThreadRelocated:
1014 case ThreadComplete:
1015 // finshed or died. The thread might still be alive, but we
1016 // don't keep it on the all_threads list. Don't forget to
1017 // stub out its global_link field.
1018 next = t->global_link;
1019 t->global_link = END_TSO_QUEUE;
1027 // not alive (yet): leave this thread on the old_all_threads list.
1028 prev = &(t->global_link);
1029 next = t->global_link;
1033 // alive: move this thread onto the all_threads list.
1034 next = t->global_link;
1035 t->global_link = all_threads;
1043 /* If we didn't make any changes, then we can go round and kill all
1044 * the dead weak pointers. The old_weak_ptr list is used as a list
1045 * of pending finalizers later on.
1047 if (flag == rtsFalse) {
1048 cleanup_weak_ptr_list(&old_weak_ptr_list);
1049 for (w = old_weak_ptr_list; w; w = w->link) {
1050 w->finalizer = evacuate(w->finalizer);
1053 /* And resurrect any threads which were about to become garbage.
1056 StgTSO *t, *tmp, *next;
1057 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1058 next = t->global_link;
1059 (StgClosure *)tmp = evacuate((StgClosure *)t);
1060 tmp->global_link = resurrected_threads;
1061 resurrected_threads = tmp;
1065 weak_done = rtsTrue;
1071 /* -----------------------------------------------------------------------------
1072 After GC, the live weak pointer list may have forwarding pointers
1073 on it, because a weak pointer object was evacuated after being
1074 moved to the live weak pointer list. We remove those forwarding
1077 Also, we don't consider weak pointer objects to be reachable, but
1078 we must nevertheless consider them to be "live" and retain them.
1079 Therefore any weak pointer objects which haven't as yet been
1080 evacuated need to be evacuated now.
1081 -------------------------------------------------------------------------- */
1085 cleanup_weak_ptr_list ( StgWeak **list )
1087 StgWeak *w, **last_w;
1090 for (w = *list; w; w = w->link) {
1092 if (get_itbl(w)->type == EVACUATED) {
1093 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1097 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1098 (StgClosure *)w = evacuate((StgClosure *)w);
1101 last_w = &(w->link);
1105 /* -----------------------------------------------------------------------------
1106 isAlive determines whether the given closure is still alive (after
1107 a garbage collection) or not. It returns the new address of the
1108 closure if it is alive, or NULL otherwise.
1110 NOTE: Use it before compaction only!
1111 -------------------------------------------------------------------------- */
1115 isAlive(StgClosure *p)
1117 const StgInfoTable *info;
1124 /* ToDo: for static closures, check the static link field.
1125 * Problem here is that we sometimes don't set the link field, eg.
1126 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1131 // ignore closures in generations that we're not collecting.
1132 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1135 // large objects have an evacuated flag
1136 if ((bd->flags & BF_LARGE) && (bd->flags & BF_EVACUATED)) {
1139 // check the mark bit for compacted steps
1140 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1144 switch (info->type) {
1149 case IND_OLDGEN: // rely on compatible layout with StgInd
1150 case IND_OLDGEN_PERM:
1151 // follow indirections
1152 p = ((StgInd *)p)->indirectee;
1157 return ((StgEvacuated *)p)->evacuee;
1160 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1161 p = (StgClosure *)((StgTSO *)p)->link;
1173 mark_root(StgClosure **root)
1175 *root = evacuate(*root);
1181 bdescr *bd = allocBlock();
1182 bd->gen_no = stp->gen_no;
1185 if (stp->gen_no <= N) {
1186 bd->flags = BF_EVACUATED;
1191 stp->hp_bd->free = stp->hp;
1192 stp->hp_bd->link = bd;
1193 stp->hp = bd->start;
1194 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1201 static __inline__ void
1202 upd_evacuee(StgClosure *p, StgClosure *dest)
1204 p->header.info = &stg_EVACUATED_info;
1205 ((StgEvacuated *)p)->evacuee = dest;
1209 static __inline__ StgClosure *
1210 copy(StgClosure *src, nat size, step *stp)
1214 TICK_GC_WORDS_COPIED(size);
1215 /* Find out where we're going, using the handy "to" pointer in
1216 * the step of the source object. If it turns out we need to
1217 * evacuate to an older generation, adjust it here (see comment
1220 if (stp->gen_no < evac_gen) {
1221 #ifdef NO_EAGER_PROMOTION
1222 failed_to_evac = rtsTrue;
1224 stp = &generations[evac_gen].steps[0];
1228 /* chain a new block onto the to-space for the destination step if
1231 if (stp->hp + size >= stp->hpLim) {
1235 for(to = stp->hp, from = (P_)src; size>0; --size) {
1241 upd_evacuee(src,(StgClosure *)dest);
1242 return (StgClosure *)dest;
1245 /* Special version of copy() for when we only want to copy the info
1246 * pointer of an object, but reserve some padding after it. This is
1247 * used to optimise evacuation of BLACKHOLEs.
1251 static __inline__ StgClosure *
1252 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1256 TICK_GC_WORDS_COPIED(size_to_copy);
1257 if (stp->gen_no < evac_gen) {
1258 #ifdef NO_EAGER_PROMOTION
1259 failed_to_evac = rtsTrue;
1261 stp = &generations[evac_gen].steps[0];
1265 if (stp->hp + size_to_reserve >= stp->hpLim) {
1269 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1274 stp->hp += size_to_reserve;
1275 upd_evacuee(src,(StgClosure *)dest);
1276 return (StgClosure *)dest;
1280 /* -----------------------------------------------------------------------------
1281 Evacuate a large object
1283 This just consists of removing the object from the (doubly-linked)
1284 large_alloc_list, and linking it on to the (singly-linked)
1285 new_large_objects list, from where it will be scavenged later.
1287 Convention: bd->flags has BF_EVACUATED set for a large object
1288 that has been evacuated, or unset otherwise.
1289 -------------------------------------------------------------------------- */
1293 evacuate_large(StgPtr p)
1295 bdescr *bd = Bdescr(p);
1298 // should point to the beginning of the block
1299 ASSERT(((W_)p & BLOCK_MASK) == 0);
1301 // already evacuated?
1302 if (bd->flags & BF_EVACUATED) {
1303 /* Don't forget to set the failed_to_evac flag if we didn't get
1304 * the desired destination (see comments in evacuate()).
1306 if (bd->gen_no < evac_gen) {
1307 failed_to_evac = rtsTrue;
1308 TICK_GC_FAILED_PROMOTION();
1314 // remove from large_object list
1316 bd->u.back->link = bd->link;
1317 } else { // first object in the list
1318 stp->large_objects = bd->link;
1321 bd->link->u.back = bd->u.back;
1324 /* link it on to the evacuated large object list of the destination step
1327 if (stp->gen_no < evac_gen) {
1328 #ifdef NO_EAGER_PROMOTION
1329 failed_to_evac = rtsTrue;
1331 stp = &generations[evac_gen].steps[0];
1336 bd->gen_no = stp->gen_no;
1337 bd->link = stp->new_large_objects;
1338 stp->new_large_objects = bd;
1339 bd->flags |= BF_EVACUATED;
1342 /* -----------------------------------------------------------------------------
1343 Adding a MUT_CONS to an older generation.
1345 This is necessary from time to time when we end up with an
1346 old-to-new generation pointer in a non-mutable object. We defer
1347 the promotion until the next GC.
1348 -------------------------------------------------------------------------- */
1352 mkMutCons(StgClosure *ptr, generation *gen)
1357 stp = &gen->steps[0];
1359 /* chain a new block onto the to-space for the destination step if
1362 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1366 q = (StgMutVar *)stp->hp;
1367 stp->hp += sizeofW(StgMutVar);
1369 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1371 recordOldToNewPtrs((StgMutClosure *)q);
1373 return (StgClosure *)q;
1376 /* -----------------------------------------------------------------------------
1379 This is called (eventually) for every live object in the system.
1381 The caller to evacuate specifies a desired generation in the
1382 evac_gen global variable. The following conditions apply to
1383 evacuating an object which resides in generation M when we're
1384 collecting up to generation N
1388 else evac to step->to
1390 if M < evac_gen evac to evac_gen, step 0
1392 if the object is already evacuated, then we check which generation
1395 if M >= evac_gen do nothing
1396 if M < evac_gen set failed_to_evac flag to indicate that we
1397 didn't manage to evacuate this object into evac_gen.
1399 -------------------------------------------------------------------------- */
1402 evacuate(StgClosure *q)
1407 const StgInfoTable *info;
1410 if (HEAP_ALLOCED(q)) {
1413 if (bd->gen_no > N) {
1414 /* Can't evacuate this object, because it's in a generation
1415 * older than the ones we're collecting. Let's hope that it's
1416 * in evac_gen or older, or we will have to arrange to track
1417 * this pointer using the mutable list.
1419 if (bd->gen_no < evac_gen) {
1421 failed_to_evac = rtsTrue;
1422 TICK_GC_FAILED_PROMOTION();
1427 /* evacuate large objects by re-linking them onto a different list.
1429 if (bd->flags & BF_LARGE) {
1431 if (info->type == TSO &&
1432 ((StgTSO *)q)->what_next == ThreadRelocated) {
1433 q = (StgClosure *)((StgTSO *)q)->link;
1436 evacuate_large((P_)q);
1440 /* If the object is in a step that we're compacting, then we
1441 * need to use an alternative evacuate procedure.
1443 if (bd->step->is_compacted) {
1444 if (!is_marked((P_)q,bd)) {
1446 if (mark_stack_full()) {
1447 barf("ToDo: mark stack full");
1449 push_mark_stack((P_)q);
1457 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1460 // make sure the info pointer is into text space
1461 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1462 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1465 switch (info -> type) {
1469 to = copy(q,sizeW_fromITBL(info),stp);
1474 StgWord w = (StgWord)q->payload[0];
1475 if (q->header.info == Czh_con_info &&
1476 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1477 (StgChar)w <= MAX_CHARLIKE) {
1478 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1480 if (q->header.info == Izh_con_info &&
1481 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1482 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1484 // else, fall through ...
1490 return copy(q,sizeofW(StgHeader)+1,stp);
1492 case THUNK_1_0: // here because of MIN_UPD_SIZE
1497 #ifdef NO_PROMOTE_THUNKS
1498 if (bd->gen_no == 0 &&
1499 bd->step->no != 0 &&
1500 bd->step->no == generations[bd->gen_no].n_steps-1) {
1504 return copy(q,sizeofW(StgHeader)+2,stp);
1512 return copy(q,sizeofW(StgHeader)+2,stp);
1518 case IND_OLDGEN_PERM:
1523 return copy(q,sizeW_fromITBL(info),stp);
1526 case SE_CAF_BLACKHOLE:
1529 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1532 to = copy(q,BLACKHOLE_sizeW(),stp);
1535 case THUNK_SELECTOR:
1537 const StgInfoTable* selectee_info;
1538 StgClosure* selectee = ((StgSelector*)q)->selectee;
1541 selectee_info = get_itbl(selectee);
1542 switch (selectee_info->type) {
1551 StgWord32 offset = info->layout.selector_offset;
1553 // check that the size is in range
1555 (StgWord32)(selectee_info->layout.payload.ptrs +
1556 selectee_info->layout.payload.nptrs));
1558 // perform the selection!
1559 q = selectee->payload[offset];
1561 /* if we're already in to-space, there's no need to continue
1562 * with the evacuation, just update the source address with
1563 * a pointer to the (evacuated) constructor field.
1565 if (HEAP_ALLOCED(q)) {
1566 bdescr *bd = Bdescr((P_)q);
1567 if (bd->flags & BF_EVACUATED) {
1568 if (bd->gen_no < evac_gen) {
1569 failed_to_evac = rtsTrue;
1570 TICK_GC_FAILED_PROMOTION();
1576 /* otherwise, carry on and evacuate this constructor field,
1577 * (but not the constructor itself)
1586 case IND_OLDGEN_PERM:
1587 selectee = ((StgInd *)selectee)->indirectee;
1591 selectee = ((StgEvacuated *)selectee)->evacuee;
1594 case THUNK_SELECTOR:
1596 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1597 something) to go into an infinite loop when the nightly
1598 stage2 compiles PrelTup.lhs. */
1600 /* we can't recurse indefinitely in evacuate(), so set a
1601 * limit on the number of times we can go around this
1604 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1606 bd = Bdescr((P_)selectee);
1607 if (!bd->flags & BF_EVACUATED) {
1608 thunk_selector_depth++;
1609 selectee = evacuate(selectee);
1610 thunk_selector_depth--;
1614 // otherwise, fall through...
1626 case SE_CAF_BLACKHOLE:
1630 // not evaluated yet
1634 // a copy of the top-level cases below
1635 case RBH: // cf. BLACKHOLE_BQ
1637 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1638 to = copy(q,BLACKHOLE_sizeW(),stp);
1639 //ToDo: derive size etc from reverted IP
1640 //to = copy(q,size,stp);
1641 // recordMutable((StgMutClosure *)to);
1646 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1647 to = copy(q,sizeofW(StgBlockedFetch),stp);
1654 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1655 to = copy(q,sizeofW(StgFetchMe),stp);
1659 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1660 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1665 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1666 (int)(selectee_info->type));
1669 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1673 // follow chains of indirections, don't evacuate them
1674 q = ((StgInd*)q)->indirectee;
1678 if (info->srt_len > 0 && major_gc &&
1679 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1680 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1681 static_objects = (StgClosure *)q;
1686 if (info->srt_len > 0 && major_gc &&
1687 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1688 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1689 static_objects = (StgClosure *)q;
1694 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1695 * on the CAF list, so don't do anything with it here (we'll
1696 * scavenge it later).
1699 && ((StgIndStatic *)q)->saved_info == NULL
1700 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1701 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1702 static_objects = (StgClosure *)q;
1707 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1708 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1709 static_objects = (StgClosure *)q;
1713 case CONSTR_INTLIKE:
1714 case CONSTR_CHARLIKE:
1715 case CONSTR_NOCAF_STATIC:
1716 /* no need to put these on the static linked list, they don't need
1731 // shouldn't see these
1732 barf("evacuate: stack frame at %p\n", q);
1736 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1737 * of stack, tagging and all.
1739 return copy(q,pap_sizeW((StgPAP*)q),stp);
1742 /* Already evacuated, just return the forwarding address.
1743 * HOWEVER: if the requested destination generation (evac_gen) is
1744 * older than the actual generation (because the object was
1745 * already evacuated to a younger generation) then we have to
1746 * set the failed_to_evac flag to indicate that we couldn't
1747 * manage to promote the object to the desired generation.
1749 if (evac_gen > 0) { // optimisation
1750 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1751 if (Bdescr((P_)p)->gen_no < evac_gen) {
1752 IF_DEBUG(gc, belch("@@ evacuate: evac of EVACUATED node %p failed!", p));
1753 failed_to_evac = rtsTrue;
1754 TICK_GC_FAILED_PROMOTION();
1757 return ((StgEvacuated*)q)->evacuee;
1760 // just copy the block
1761 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1764 case MUT_ARR_PTRS_FROZEN:
1765 // just copy the block
1766 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1770 StgTSO *tso = (StgTSO *)q;
1772 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1774 if (tso->what_next == ThreadRelocated) {
1775 q = (StgClosure *)tso->link;
1779 /* To evacuate a small TSO, we need to relocate the update frame
1783 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1784 move_TSO(tso, new_tso);
1785 return (StgClosure *)new_tso;
1790 case RBH: // cf. BLACKHOLE_BQ
1792 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1793 to = copy(q,BLACKHOLE_sizeW(),stp);
1794 //ToDo: derive size etc from reverted IP
1795 //to = copy(q,size,stp);
1797 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1798 q, info_type(q), to, info_type(to)));
1803 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1804 to = copy(q,sizeofW(StgBlockedFetch),stp);
1806 belch("@@ evacuate: %p (%s) to %p (%s)",
1807 q, info_type(q), to, info_type(to)));
1814 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1815 to = copy(q,sizeofW(StgFetchMe),stp);
1817 belch("@@ evacuate: %p (%s) to %p (%s)",
1818 q, info_type(q), to, info_type(to)));
1822 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1823 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1825 belch("@@ evacuate: %p (%s) to %p (%s)",
1826 q, info_type(q), to, info_type(to)));
1831 barf("evacuate: strange closure type %d", (int)(info->type));
1837 /* -----------------------------------------------------------------------------
1838 move_TSO is called to update the TSO structure after it has been
1839 moved from one place to another.
1840 -------------------------------------------------------------------------- */
1843 move_TSO(StgTSO *src, StgTSO *dest)
1847 // relocate the stack pointers...
1848 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1849 dest->sp = (StgPtr)dest->sp + diff;
1850 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1852 relocate_stack(dest, diff);
1855 /* -----------------------------------------------------------------------------
1856 relocate_stack is called to update the linkage between
1857 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1859 -------------------------------------------------------------------------- */
1862 relocate_stack(StgTSO *dest, int diff)
1870 while ((P_)su < dest->stack + dest->stack_size) {
1871 switch (get_itbl(su)->type) {
1873 // GCC actually manages to common up these three cases!
1876 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1881 cf = (StgCatchFrame *)su;
1882 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1887 sf = (StgSeqFrame *)su;
1888 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1897 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1908 scavenge_srt(const StgInfoTable *info)
1910 StgClosure **srt, **srt_end;
1912 /* evacuate the SRT. If srt_len is zero, then there isn't an
1913 * srt field in the info table. That's ok, because we'll
1914 * never dereference it.
1916 srt = (StgClosure **)(info->srt);
1917 srt_end = srt + info->srt_len;
1918 for (; srt < srt_end; srt++) {
1919 /* Special-case to handle references to closures hiding out in DLLs, since
1920 double indirections required to get at those. The code generator knows
1921 which is which when generating the SRT, so it stores the (indirect)
1922 reference to the DLL closure in the table by first adding one to it.
1923 We check for this here, and undo the addition before evacuating it.
1925 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1926 closure that's fixed at link-time, and no extra magic is required.
1928 #ifdef ENABLE_WIN32_DLL_SUPPORT
1929 if ( (unsigned long)(*srt) & 0x1 ) {
1930 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1940 /* -----------------------------------------------------------------------------
1942 -------------------------------------------------------------------------- */
1945 scavengeTSO (StgTSO *tso)
1947 // chase the link field for any TSOs on the same queue
1948 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1949 if ( tso->why_blocked == BlockedOnMVar
1950 || tso->why_blocked == BlockedOnBlackHole
1951 || tso->why_blocked == BlockedOnException
1953 || tso->why_blocked == BlockedOnGA
1954 || tso->why_blocked == BlockedOnGA_NoSend
1957 tso->block_info.closure = evacuate(tso->block_info.closure);
1959 if ( tso->blocked_exceptions != NULL ) {
1960 tso->blocked_exceptions =
1961 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1963 // scavenge this thread's stack
1964 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1967 /* -----------------------------------------------------------------------------
1968 Scavenge a given step until there are no more objects in this step
1971 evac_gen is set by the caller to be either zero (for a step in a
1972 generation < N) or G where G is the generation of the step being
1975 We sometimes temporarily change evac_gen back to zero if we're
1976 scavenging a mutable object where early promotion isn't such a good
1978 -------------------------------------------------------------------------- */
1986 nat saved_evac_gen = evac_gen;
1991 failed_to_evac = rtsFalse;
1993 /* scavenge phase - standard breadth-first scavenging of the
1997 while (bd != stp->hp_bd || p < stp->hp) {
1999 // If we're at the end of this block, move on to the next block
2000 if (bd != stp->hp_bd && p == bd->free) {
2006 info = get_itbl((StgClosure *)p);
2007 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2010 switch (info->type) {
2013 /* treat MVars specially, because we don't want to evacuate the
2014 * mut_link field in the middle of the closure.
2017 StgMVar *mvar = ((StgMVar *)p);
2019 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2020 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2021 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2022 evac_gen = saved_evac_gen;
2023 recordMutable((StgMutClosure *)mvar);
2024 failed_to_evac = rtsFalse; // mutable.
2025 p += sizeofW(StgMVar);
2033 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2034 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2035 p += sizeofW(StgHeader) + 2;
2040 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2041 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2047 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2048 p += sizeofW(StgHeader) + 1;
2053 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2059 p += sizeofW(StgHeader) + 1;
2066 p += sizeofW(StgHeader) + 2;
2073 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2074 p += sizeofW(StgHeader) + 2;
2090 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2091 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2092 (StgClosure *)*p = evacuate((StgClosure *)*p);
2094 p += info->layout.payload.nptrs;
2099 if (stp->gen_no != 0) {
2100 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2103 case IND_OLDGEN_PERM:
2104 ((StgIndOldGen *)p)->indirectee =
2105 evacuate(((StgIndOldGen *)p)->indirectee);
2106 if (failed_to_evac) {
2107 failed_to_evac = rtsFalse;
2108 recordOldToNewPtrs((StgMutClosure *)p);
2110 p += sizeofW(StgIndOldGen);
2115 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2116 evac_gen = saved_evac_gen;
2117 recordMutable((StgMutClosure *)p);
2118 failed_to_evac = rtsFalse; // mutable anyhow
2119 p += sizeofW(StgMutVar);
2124 failed_to_evac = rtsFalse; // mutable anyhow
2125 p += sizeofW(StgMutVar);
2129 case SE_CAF_BLACKHOLE:
2132 p += BLACKHOLE_sizeW();
2137 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2138 (StgClosure *)bh->blocking_queue =
2139 evacuate((StgClosure *)bh->blocking_queue);
2140 recordMutable((StgMutClosure *)bh);
2141 failed_to_evac = rtsFalse;
2142 p += BLACKHOLE_sizeW();
2146 case THUNK_SELECTOR:
2148 StgSelector *s = (StgSelector *)p;
2149 s->selectee = evacuate(s->selectee);
2150 p += THUNK_SELECTOR_sizeW();
2154 case AP_UPD: // same as PAPs
2156 /* Treat a PAP just like a section of stack, not forgetting to
2157 * evacuate the function pointer too...
2160 StgPAP* pap = (StgPAP *)p;
2162 pap->fun = evacuate(pap->fun);
2163 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2164 p += pap_sizeW(pap);
2169 // nothing to follow
2170 p += arr_words_sizeW((StgArrWords *)p);
2174 // follow everything
2178 evac_gen = 0; // repeatedly mutable
2179 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2180 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2181 (StgClosure *)*p = evacuate((StgClosure *)*p);
2183 evac_gen = saved_evac_gen;
2184 recordMutable((StgMutClosure *)q);
2185 failed_to_evac = rtsFalse; // mutable anyhow.
2189 case MUT_ARR_PTRS_FROZEN:
2190 // follow everything
2194 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2195 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2196 (StgClosure *)*p = evacuate((StgClosure *)*p);
2198 // it's tempting to recordMutable() if failed_to_evac is
2199 // false, but that breaks some assumptions (eg. every
2200 // closure on the mutable list is supposed to have the MUT
2201 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2207 StgTSO *tso = (StgTSO *)p;
2210 evac_gen = saved_evac_gen;
2211 recordMutable((StgMutClosure *)tso);
2212 failed_to_evac = rtsFalse; // mutable anyhow.
2213 p += tso_sizeW(tso);
2218 case RBH: // cf. BLACKHOLE_BQ
2221 nat size, ptrs, nonptrs, vhs;
2223 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2225 StgRBH *rbh = (StgRBH *)p;
2226 (StgClosure *)rbh->blocking_queue =
2227 evacuate((StgClosure *)rbh->blocking_queue);
2228 recordMutable((StgMutClosure *)to);
2229 failed_to_evac = rtsFalse; // mutable anyhow.
2231 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2232 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2233 // ToDo: use size of reverted closure here!
2234 p += BLACKHOLE_sizeW();
2240 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2241 // follow the pointer to the node which is being demanded
2242 (StgClosure *)bf->node =
2243 evacuate((StgClosure *)bf->node);
2244 // follow the link to the rest of the blocking queue
2245 (StgClosure *)bf->link =
2246 evacuate((StgClosure *)bf->link);
2247 if (failed_to_evac) {
2248 failed_to_evac = rtsFalse;
2249 recordMutable((StgMutClosure *)bf);
2252 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2253 bf, info_type((StgClosure *)bf),
2254 bf->node, info_type(bf->node)));
2255 p += sizeofW(StgBlockedFetch);
2263 p += sizeofW(StgFetchMe);
2264 break; // nothing to do in this case
2266 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2268 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2269 (StgClosure *)fmbq->blocking_queue =
2270 evacuate((StgClosure *)fmbq->blocking_queue);
2271 if (failed_to_evac) {
2272 failed_to_evac = rtsFalse;
2273 recordMutable((StgMutClosure *)fmbq);
2276 belch("@@ scavenge: %p (%s) exciting, isn't it",
2277 p, info_type((StgClosure *)p)));
2278 p += sizeofW(StgFetchMeBlockingQueue);
2284 barf("scavenge: unimplemented/strange closure type %d @ %p",
2288 /* If we didn't manage to promote all the objects pointed to by
2289 * the current object, then we have to designate this object as
2290 * mutable (because it contains old-to-new generation pointers).
2292 if (failed_to_evac) {
2293 failed_to_evac = rtsFalse;
2294 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2302 /* -----------------------------------------------------------------------------
2303 Scavenge everything on the mark stack.
2305 This is slightly different from scavenge():
2306 - we don't walk linearly through the objects, so the scavenger
2307 doesn't need to advance the pointer on to the next object.
2308 -------------------------------------------------------------------------- */
2311 scavenge_mark_stack(void)
2317 evac_gen = oldest_gen->no;
2318 saved_evac_gen = evac_gen;
2320 while (!mark_stack_empty()) {
2321 p = pop_mark_stack();
2323 info = get_itbl((StgClosure *)p);
2324 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2326 switch (info->type) {
2329 /* treat MVars specially, because we don't want to evacuate the
2330 * mut_link field in the middle of the closure.
2333 StgMVar *mvar = ((StgMVar *)p);
2335 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2336 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2337 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2338 evac_gen = saved_evac_gen;
2339 failed_to_evac = rtsFalse; // mutable.
2347 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2348 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2358 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2383 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2384 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2385 (StgClosure *)*p = evacuate((StgClosure *)*p);
2391 // don't need to do anything here: the only possible case
2392 // is that we're in a 1-space compacting collector, with
2393 // no "old" generation.
2397 case IND_OLDGEN_PERM:
2398 ((StgIndOldGen *)p)->indirectee =
2399 evacuate(((StgIndOldGen *)p)->indirectee);
2400 if (failed_to_evac) {
2401 recordOldToNewPtrs((StgMutClosure *)p);
2403 failed_to_evac = rtsFalse;
2408 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2409 evac_gen = saved_evac_gen;
2410 failed_to_evac = rtsFalse;
2415 failed_to_evac = rtsFalse;
2419 case SE_CAF_BLACKHOLE:
2427 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2428 (StgClosure *)bh->blocking_queue =
2429 evacuate((StgClosure *)bh->blocking_queue);
2430 failed_to_evac = rtsFalse;
2434 case THUNK_SELECTOR:
2436 StgSelector *s = (StgSelector *)p;
2437 s->selectee = evacuate(s->selectee);
2441 case AP_UPD: // same as PAPs
2443 /* Treat a PAP just like a section of stack, not forgetting to
2444 * evacuate the function pointer too...
2447 StgPAP* pap = (StgPAP *)p;
2449 pap->fun = evacuate(pap->fun);
2450 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2455 // follow everything
2459 evac_gen = 0; // repeatedly mutable
2460 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2461 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2462 (StgClosure *)*p = evacuate((StgClosure *)*p);
2464 evac_gen = saved_evac_gen;
2465 failed_to_evac = rtsFalse; // mutable anyhow.
2469 case MUT_ARR_PTRS_FROZEN:
2470 // follow everything
2474 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2475 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2476 (StgClosure *)*p = evacuate((StgClosure *)*p);
2483 StgTSO *tso = (StgTSO *)p;
2486 evac_gen = saved_evac_gen;
2487 failed_to_evac = rtsFalse;
2492 case RBH: // cf. BLACKHOLE_BQ
2495 nat size, ptrs, nonptrs, vhs;
2497 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2499 StgRBH *rbh = (StgRBH *)p;
2500 (StgClosure *)rbh->blocking_queue =
2501 evacuate((StgClosure *)rbh->blocking_queue);
2502 recordMutable((StgMutClosure *)rbh);
2503 failed_to_evac = rtsFalse; // mutable anyhow.
2505 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2506 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2512 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2513 // follow the pointer to the node which is being demanded
2514 (StgClosure *)bf->node =
2515 evacuate((StgClosure *)bf->node);
2516 // follow the link to the rest of the blocking queue
2517 (StgClosure *)bf->link =
2518 evacuate((StgClosure *)bf->link);
2519 if (failed_to_evac) {
2520 failed_to_evac = rtsFalse;
2521 recordMutable((StgMutClosure *)bf);
2524 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2525 bf, info_type((StgClosure *)bf),
2526 bf->node, info_type(bf->node)));
2534 break; // nothing to do in this case
2536 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2538 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2539 (StgClosure *)fmbq->blocking_queue =
2540 evacuate((StgClosure *)fmbq->blocking_queue);
2541 if (failed_to_evac) {
2542 failed_to_evac = rtsFalse;
2543 recordMutable((StgMutClosure *)fmbq);
2546 belch("@@ scavenge: %p (%s) exciting, isn't it",
2547 p, info_type((StgClosure *)p)));
2553 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2557 if (failed_to_evac) {
2558 failed_to_evac = rtsFalse;
2559 mkMutCons((StgClosure *)p, &generations[evac_gen]);
2562 } // while (!mark_stack_empty())
2565 /* -----------------------------------------------------------------------------
2566 Scavenge one object.
2568 This is used for objects that are temporarily marked as mutable
2569 because they contain old-to-new generation pointers. Only certain
2570 objects can have this property.
2571 -------------------------------------------------------------------------- */
2574 scavenge_one(StgClosure *p)
2576 const StgInfoTable *info;
2579 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2580 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2584 switch (info -> type) {
2587 case FUN_1_0: // hardly worth specialising these guys
2607 case IND_OLDGEN_PERM:
2611 end = (P_)p->payload + info->layout.payload.ptrs;
2612 for (q = (P_)p->payload; q < end; q++) {
2613 (StgClosure *)*q = evacuate((StgClosure *)*q);
2619 case SE_CAF_BLACKHOLE:
2624 case THUNK_SELECTOR:
2626 StgSelector *s = (StgSelector *)p;
2627 s->selectee = evacuate(s->selectee);
2631 case AP_UPD: /* same as PAPs */
2633 /* Treat a PAP just like a section of stack, not forgetting to
2634 * evacuate the function pointer too...
2637 StgPAP* pap = (StgPAP *)p;
2639 pap->fun = evacuate(pap->fun);
2640 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2645 /* This might happen if for instance a MUT_CONS was pointing to a
2646 * THUNK which has since been updated. The IND_OLDGEN will
2647 * be on the mutable list anyway, so we don't need to do anything
2652 case MUT_ARR_PTRS_FROZEN:
2654 // follow everything
2658 next = q + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2659 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < next; q++) {
2660 (StgClosure *)*q = evacuate((StgClosure *)*q);
2666 barf("scavenge_one: strange object %d", (int)(info->type));
2669 no_luck = failed_to_evac;
2670 failed_to_evac = rtsFalse;
2674 /* -----------------------------------------------------------------------------
2675 Scavenging mutable lists.
2677 We treat the mutable list of each generation > N (i.e. all the
2678 generations older than the one being collected) as roots. We also
2679 remove non-mutable objects from the mutable list at this point.
2680 -------------------------------------------------------------------------- */
2683 scavenge_mut_once_list(generation *gen)
2685 const StgInfoTable *info;
2686 StgMutClosure *p, *next, *new_list;
2688 p = gen->mut_once_list;
2689 new_list = END_MUT_LIST;
2693 failed_to_evac = rtsFalse;
2695 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2697 // make sure the info pointer is into text space
2698 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2699 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2703 if (info->type==RBH)
2704 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2706 switch(info->type) {
2709 case IND_OLDGEN_PERM:
2711 /* Try to pull the indirectee into this generation, so we can
2712 * remove the indirection from the mutable list.
2714 ((StgIndOldGen *)p)->indirectee =
2715 evacuate(((StgIndOldGen *)p)->indirectee);
2717 #if 0 && defined(DEBUG)
2718 if (RtsFlags.DebugFlags.gc)
2719 /* Debugging code to print out the size of the thing we just
2723 StgPtr start = gen->steps[0].scan;
2724 bdescr *start_bd = gen->steps[0].scan_bd;
2726 scavenge(&gen->steps[0]);
2727 if (start_bd != gen->steps[0].scan_bd) {
2728 size += (P_)BLOCK_ROUND_UP(start) - start;
2729 start_bd = start_bd->link;
2730 while (start_bd != gen->steps[0].scan_bd) {
2731 size += BLOCK_SIZE_W;
2732 start_bd = start_bd->link;
2734 size += gen->steps[0].scan -
2735 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2737 size = gen->steps[0].scan - start;
2739 fprintf(stderr,"evac IND_OLDGEN: %d bytes\n", size * sizeof(W_));
2743 /* failed_to_evac might happen if we've got more than two
2744 * generations, we're collecting only generation 0, the
2745 * indirection resides in generation 2 and the indirectee is
2748 if (failed_to_evac) {
2749 failed_to_evac = rtsFalse;
2750 p->mut_link = new_list;
2753 /* the mut_link field of an IND_STATIC is overloaded as the
2754 * static link field too (it just so happens that we don't need
2755 * both at the same time), so we need to NULL it out when
2756 * removing this object from the mutable list because the static
2757 * link fields are all assumed to be NULL before doing a major
2765 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2766 * it from the mutable list if possible by promoting whatever it
2769 scavenge_one((StgClosure *)((StgMutVar *)p)->var);
2770 if (failed_to_evac == rtsTrue) {
2771 /* didn't manage to promote everything, so put the
2772 * MUT_CONS back on the list.
2774 failed_to_evac = rtsFalse;
2775 p->mut_link = new_list;
2781 // shouldn't have anything else on the mutables list
2782 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2786 gen->mut_once_list = new_list;
2791 scavenge_mutable_list(generation *gen)
2793 const StgInfoTable *info;
2794 StgMutClosure *p, *next;
2796 p = gen->saved_mut_list;
2800 failed_to_evac = rtsFalse;
2802 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2804 // make sure the info pointer is into text space
2805 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2806 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2810 if (info->type==RBH)
2811 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2813 switch(info->type) {
2816 // follow everything
2817 p->mut_link = gen->mut_list;
2822 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2823 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2824 (StgClosure *)*q = evacuate((StgClosure *)*q);
2830 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2831 p->mut_link = gen->mut_list;
2837 StgMVar *mvar = (StgMVar *)p;
2838 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2839 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2840 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2841 p->mut_link = gen->mut_list;
2848 StgTSO *tso = (StgTSO *)p;
2852 /* Don't take this TSO off the mutable list - it might still
2853 * point to some younger objects (because we set evac_gen to 0
2856 tso->mut_link = gen->mut_list;
2857 gen->mut_list = (StgMutClosure *)tso;
2863 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2864 (StgClosure *)bh->blocking_queue =
2865 evacuate((StgClosure *)bh->blocking_queue);
2866 p->mut_link = gen->mut_list;
2871 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2874 case IND_OLDGEN_PERM:
2875 /* Try to pull the indirectee into this generation, so we can
2876 * remove the indirection from the mutable list.
2879 ((StgIndOldGen *)p)->indirectee =
2880 evacuate(((StgIndOldGen *)p)->indirectee);
2883 if (failed_to_evac) {
2884 failed_to_evac = rtsFalse;
2885 p->mut_link = gen->mut_once_list;
2886 gen->mut_once_list = p;
2893 // HWL: check whether all of these are necessary
2895 case RBH: // cf. BLACKHOLE_BQ
2897 // nat size, ptrs, nonptrs, vhs;
2899 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2900 StgRBH *rbh = (StgRBH *)p;
2901 (StgClosure *)rbh->blocking_queue =
2902 evacuate((StgClosure *)rbh->blocking_queue);
2903 if (failed_to_evac) {
2904 failed_to_evac = rtsFalse;
2905 recordMutable((StgMutClosure *)rbh);
2907 // ToDo: use size of reverted closure here!
2908 p += BLACKHOLE_sizeW();
2914 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2915 // follow the pointer to the node which is being demanded
2916 (StgClosure *)bf->node =
2917 evacuate((StgClosure *)bf->node);
2918 // follow the link to the rest of the blocking queue
2919 (StgClosure *)bf->link =
2920 evacuate((StgClosure *)bf->link);
2921 if (failed_to_evac) {
2922 failed_to_evac = rtsFalse;
2923 recordMutable((StgMutClosure *)bf);
2925 p += sizeofW(StgBlockedFetch);
2931 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
2934 p += sizeofW(StgFetchMe);
2935 break; // nothing to do in this case
2937 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2939 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2940 (StgClosure *)fmbq->blocking_queue =
2941 evacuate((StgClosure *)fmbq->blocking_queue);
2942 if (failed_to_evac) {
2943 failed_to_evac = rtsFalse;
2944 recordMutable((StgMutClosure *)fmbq);
2946 p += sizeofW(StgFetchMeBlockingQueue);
2952 // shouldn't have anything else on the mutables list
2953 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
2960 scavenge_static(void)
2962 StgClosure* p = static_objects;
2963 const StgInfoTable *info;
2965 /* Always evacuate straight to the oldest generation for static
2967 evac_gen = oldest_gen->no;
2969 /* keep going until we've scavenged all the objects on the linked
2971 while (p != END_OF_STATIC_LIST) {
2975 if (info->type==RBH)
2976 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2978 // make sure the info pointer is into text space
2979 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2980 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2982 /* Take this object *off* the static_objects list,
2983 * and put it on the scavenged_static_objects list.
2985 static_objects = STATIC_LINK(info,p);
2986 STATIC_LINK(info,p) = scavenged_static_objects;
2987 scavenged_static_objects = p;
2989 switch (info -> type) {
2993 StgInd *ind = (StgInd *)p;
2994 ind->indirectee = evacuate(ind->indirectee);
2996 /* might fail to evacuate it, in which case we have to pop it
2997 * back on the mutable list (and take it off the
2998 * scavenged_static list because the static link and mut link
2999 * pointers are one and the same).
3001 if (failed_to_evac) {
3002 failed_to_evac = rtsFalse;
3003 scavenged_static_objects = STATIC_LINK(info,p);
3004 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3005 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3019 next = (P_)p->payload + info->layout.payload.ptrs;
3020 // evacuate the pointers
3021 for (q = (P_)p->payload; q < next; q++) {
3022 (StgClosure *)*q = evacuate((StgClosure *)*q);
3028 barf("scavenge_static: strange closure %d", (int)(info->type));
3031 ASSERT(failed_to_evac == rtsFalse);
3033 /* get the next static object from the list. Remember, there might
3034 * be more stuff on this list now that we've done some evacuating!
3035 * (static_objects is a global)
3041 /* -----------------------------------------------------------------------------
3042 scavenge_stack walks over a section of stack and evacuates all the
3043 objects pointed to by it. We can use the same code for walking
3044 PAPs, since these are just sections of copied stack.
3045 -------------------------------------------------------------------------- */
3048 scavenge_stack(StgPtr p, StgPtr stack_end)
3051 const StgInfoTable* info;
3054 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3057 * Each time around this loop, we are looking at a chunk of stack
3058 * that starts with either a pending argument section or an
3059 * activation record.
3062 while (p < stack_end) {
3065 // If we've got a tag, skip over that many words on the stack
3066 if (IS_ARG_TAG((W_)q)) {
3071 /* Is q a pointer to a closure?
3073 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3075 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3076 ASSERT(closure_STATIC((StgClosure *)q));
3078 // otherwise, must be a pointer into the allocation space.
3081 (StgClosure *)*p = evacuate((StgClosure *)q);
3087 * Otherwise, q must be the info pointer of an activation
3088 * record. All activation records have 'bitmap' style layout
3091 info = get_itbl((StgClosure *)p);
3093 switch (info->type) {
3095 // Dynamic bitmap: the mask is stored on the stack
3097 bitmap = ((StgRetDyn *)p)->liveness;
3098 p = (P_)&((StgRetDyn *)p)->payload[0];
3101 // probably a slow-entry point return address:
3109 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3110 old_p, p, old_p+1));
3112 p++; // what if FHS!=1 !? -- HWL
3117 /* Specialised code for update frames, since they're so common.
3118 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3119 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3123 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3125 p += sizeofW(StgUpdateFrame);
3128 frame->updatee = evacuate(frame->updatee);
3130 #else // specialised code for update frames, not sure if it's worth it.
3132 nat type = get_itbl(frame->updatee)->type;
3134 if (type == EVACUATED) {
3135 frame->updatee = evacuate(frame->updatee);
3138 bdescr *bd = Bdescr((P_)frame->updatee);
3140 if (bd->gen_no > N) {
3141 if (bd->gen_no < evac_gen) {
3142 failed_to_evac = rtsTrue;
3147 // Don't promote blackholes
3149 if (!(stp->gen_no == 0 &&
3151 stp->no == stp->gen->n_steps-1)) {
3158 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3159 sizeofW(StgHeader), stp);
3160 frame->updatee = to;
3163 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3164 frame->updatee = to;
3165 recordMutable((StgMutClosure *)to);
3168 /* will never be SE_{,CAF_}BLACKHOLE, since we
3169 don't push an update frame for single-entry thunks. KSW 1999-01. */
3170 barf("scavenge_stack: UPDATE_FRAME updatee");
3176 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3183 bitmap = info->layout.bitmap;
3185 // this assumes that the payload starts immediately after the info-ptr
3187 while (bitmap != 0) {
3188 if ((bitmap & 1) == 0) {
3189 (StgClosure *)*p = evacuate((StgClosure *)*p);
3192 bitmap = bitmap >> 1;
3199 // large bitmap (> 32 entries)
3204 StgLargeBitmap *large_bitmap;
3207 large_bitmap = info->layout.large_bitmap;
3210 for (i=0; i<large_bitmap->size; i++) {
3211 bitmap = large_bitmap->bitmap[i];
3212 q = p + sizeof(W_) * 8;
3213 while (bitmap != 0) {
3214 if ((bitmap & 1) == 0) {
3215 (StgClosure *)*p = evacuate((StgClosure *)*p);
3218 bitmap = bitmap >> 1;
3220 if (i+1 < large_bitmap->size) {
3222 (StgClosure *)*p = evacuate((StgClosure *)*p);
3228 // and don't forget to follow the SRT
3233 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3238 /*-----------------------------------------------------------------------------
3239 scavenge the large object list.
3241 evac_gen set by caller; similar games played with evac_gen as with
3242 scavenge() - see comment at the top of scavenge(). Most large
3243 objects are (repeatedly) mutable, so most of the time evac_gen will
3245 --------------------------------------------------------------------------- */
3248 scavenge_large(step *stp)
3252 const StgInfoTable* info;
3253 nat saved_evac_gen = evac_gen; // used for temporarily changing evac_gen
3255 bd = stp->new_large_objects;
3257 for (; bd != NULL; bd = stp->new_large_objects) {
3259 /* take this object *off* the large objects list and put it on
3260 * the scavenged large objects list. This is so that we can
3261 * treat new_large_objects as a stack and push new objects on
3262 * the front when evacuating.
3264 stp->new_large_objects = bd->link;
3265 dbl_link_onto(bd, &stp->scavenged_large_objects);
3268 info = get_itbl((StgClosure *)p);
3270 // only certain objects can be "large"...
3272 switch (info->type) {
3275 // nothing to follow
3280 // follow everything
3283 evac_gen = 0; // repeatedly mutable
3284 recordMutable((StgMutClosure *)p);
3285 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3286 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3287 (StgClosure *)*p = evacuate((StgClosure *)*p);
3289 evac_gen = saved_evac_gen;
3290 failed_to_evac = rtsFalse;
3294 case MUT_ARR_PTRS_FROZEN:
3296 // follow everything
3299 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3300 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3301 (StgClosure *)*p = evacuate((StgClosure *)*p);
3308 StgTSO *tso = (StgTSO *)p;
3310 evac_gen = 0; // repeatedly mutable
3312 recordMutable((StgMutClosure *)tso);
3313 evac_gen = saved_evac_gen;
3314 failed_to_evac = rtsFalse;
3321 StgPAP* pap = (StgPAP *)p;
3322 pap->fun = evacuate(pap->fun);
3323 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
3328 barf("scavenge_large: unknown/strange object %d", (int)(info->type));
3331 if (failed_to_evac) {
3332 failed_to_evac = rtsFalse;
3333 mkMutCons((StgClosure *)q, &generations[evac_gen]);
3338 /* -----------------------------------------------------------------------------
3339 Initialising the static object & mutable lists
3340 -------------------------------------------------------------------------- */
3343 zero_static_object_list(StgClosure* first_static)
3347 const StgInfoTable *info;
3349 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3351 link = STATIC_LINK(info, p);
3352 STATIC_LINK(info,p) = NULL;
3356 /* This function is only needed because we share the mutable link
3357 * field with the static link field in an IND_STATIC, so we have to
3358 * zero the mut_link field before doing a major GC, which needs the
3359 * static link field.
3361 * It doesn't do any harm to zero all the mutable link fields on the
3366 zero_mutable_list( StgMutClosure *first )
3368 StgMutClosure *next, *c;
3370 for (c = first; c != END_MUT_LIST; c = next) {
3376 /* -----------------------------------------------------------------------------
3378 -------------------------------------------------------------------------- */
3385 for (c = (StgIndStatic *)caf_list; c != NULL;
3386 c = (StgIndStatic *)c->static_link)
3388 c->header.info = c->saved_info;
3389 c->saved_info = NULL;
3390 // could, but not necessary: c->static_link = NULL;
3396 scavengeCAFs( void )
3401 for (c = (StgIndStatic *)caf_list; c != NULL;
3402 c = (StgIndStatic *)c->static_link)
3404 c->indirectee = evacuate(c->indirectee);
3408 /* -----------------------------------------------------------------------------
3409 Sanity code for CAF garbage collection.
3411 With DEBUG turned on, we manage a CAF list in addition to the SRT
3412 mechanism. After GC, we run down the CAF list and blackhole any
3413 CAFs which have been garbage collected. This means we get an error
3414 whenever the program tries to enter a garbage collected CAF.
3416 Any garbage collected CAFs are taken off the CAF list at the same
3418 -------------------------------------------------------------------------- */
3420 #if 0 && defined(DEBUG)
3427 const StgInfoTable *info;
3438 ASSERT(info->type == IND_STATIC);
3440 if (STATIC_LINK(info,p) == NULL) {
3441 IF_DEBUG(gccafs, fprintf(stderr, "CAF gc'd at 0x%04x\n", (int)p));
3443 SET_INFO(p,&stg_BLACKHOLE_info);
3444 p = STATIC_LINK2(info,p);
3448 pp = &STATIC_LINK2(info,p);
3455 // fprintf(stderr, "%d CAFs live\n", i);
3460 /* -----------------------------------------------------------------------------
3463 Whenever a thread returns to the scheduler after possibly doing
3464 some work, we have to run down the stack and black-hole all the
3465 closures referred to by update frames.
3466 -------------------------------------------------------------------------- */
3469 threadLazyBlackHole(StgTSO *tso)
3471 StgUpdateFrame *update_frame;
3472 StgBlockingQueue *bh;
3475 stack_end = &tso->stack[tso->stack_size];
3476 update_frame = tso->su;
3479 switch (get_itbl(update_frame)->type) {
3482 update_frame = ((StgCatchFrame *)update_frame)->link;
3486 bh = (StgBlockingQueue *)update_frame->updatee;
3488 /* if the thunk is already blackholed, it means we've also
3489 * already blackholed the rest of the thunks on this stack,
3490 * so we can stop early.
3492 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3493 * don't interfere with this optimisation.
3495 if (bh->header.info == &stg_BLACKHOLE_info) {
3499 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3500 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3501 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3502 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3504 SET_INFO(bh,&stg_BLACKHOLE_info);
3507 update_frame = update_frame->link;
3511 update_frame = ((StgSeqFrame *)update_frame)->link;
3517 barf("threadPaused");
3523 /* -----------------------------------------------------------------------------
3526 * Code largely pinched from old RTS, then hacked to bits. We also do
3527 * lazy black holing here.
3529 * -------------------------------------------------------------------------- */
3532 threadSqueezeStack(StgTSO *tso)
3534 lnat displacement = 0;
3535 StgUpdateFrame *frame;
3536 StgUpdateFrame *next_frame; // Temporally next
3537 StgUpdateFrame *prev_frame; // Temporally previous
3539 rtsBool prev_was_update_frame;
3541 StgUpdateFrame *top_frame;
3542 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3544 void printObj( StgClosure *obj ); // from Printer.c
3546 top_frame = tso->su;
3549 bottom = &(tso->stack[tso->stack_size]);
3552 /* There must be at least one frame, namely the STOP_FRAME.
3554 ASSERT((P_)frame < bottom);
3556 /* Walk down the stack, reversing the links between frames so that
3557 * we can walk back up as we squeeze from the bottom. Note that
3558 * next_frame and prev_frame refer to next and previous as they were
3559 * added to the stack, rather than the way we see them in this
3560 * walk. (It makes the next loop less confusing.)
3562 * Stop if we find an update frame pointing to a black hole
3563 * (see comment in threadLazyBlackHole()).
3567 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3568 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3569 prev_frame = frame->link;
3570 frame->link = next_frame;
3575 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3576 printObj((StgClosure *)prev_frame);
3577 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3580 switch (get_itbl(frame)->type) {
3583 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3596 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3598 printObj((StgClosure *)prev_frame);
3601 if (get_itbl(frame)->type == UPDATE_FRAME
3602 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3607 /* Now, we're at the bottom. Frame points to the lowest update
3608 * frame on the stack, and its link actually points to the frame
3609 * above. We have to walk back up the stack, squeezing out empty
3610 * update frames and turning the pointers back around on the way
3613 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3614 * we never want to eliminate it anyway. Just walk one step up
3615 * before starting to squeeze. When you get to the topmost frame,
3616 * remember that there are still some words above it that might have
3623 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3626 * Loop through all of the frames (everything except the very
3627 * bottom). Things are complicated by the fact that we have
3628 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3629 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3631 while (frame != NULL) {
3633 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3634 rtsBool is_update_frame;
3636 next_frame = frame->link;
3637 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3640 * 1. both the previous and current frame are update frames
3641 * 2. the current frame is empty
3643 if (prev_was_update_frame && is_update_frame &&
3644 (P_)prev_frame == frame_bottom + displacement) {
3646 // Now squeeze out the current frame
3647 StgClosure *updatee_keep = prev_frame->updatee;
3648 StgClosure *updatee_bypass = frame->updatee;
3651 IF_DEBUG(gc, fprintf(stderr, "@@ squeezing frame at %p\n", frame));
3655 /* Deal with blocking queues. If both updatees have blocked
3656 * threads, then we should merge the queues into the update
3657 * frame that we're keeping.
3659 * Alternatively, we could just wake them up: they'll just go
3660 * straight to sleep on the proper blackhole! This is less code
3661 * and probably less bug prone, although it's probably much
3664 #if 0 // do it properly...
3665 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3666 # error Unimplemented lazy BH warning. (KSW 1999-01)
3668 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3669 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3671 // Sigh. It has one. Don't lose those threads!
3672 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3673 // Urgh. Two queues. Merge them.
3674 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3676 while (keep_tso->link != END_TSO_QUEUE) {
3677 keep_tso = keep_tso->link;
3679 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3682 // For simplicity, just swap the BQ for the BH
3683 P_ temp = updatee_keep;
3685 updatee_keep = updatee_bypass;
3686 updatee_bypass = temp;
3688 // Record the swap in the kept frame (below)
3689 prev_frame->updatee = updatee_keep;
3694 TICK_UPD_SQUEEZED();
3695 /* wasn't there something about update squeezing and ticky to be
3696 * sorted out? oh yes: we aren't counting each enter properly
3697 * in this case. See the log somewhere. KSW 1999-04-21
3699 * Check two things: that the two update frames don't point to
3700 * the same object, and that the updatee_bypass isn't already an
3701 * indirection. Both of these cases only happen when we're in a
3702 * block hole-style loop (and there are multiple update frames
3703 * on the stack pointing to the same closure), but they can both
3704 * screw us up if we don't check.
3706 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3707 // this wakes the threads up
3708 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3711 sp = (P_)frame - 1; // sp = stuff to slide
3712 displacement += sizeofW(StgUpdateFrame);
3715 // No squeeze for this frame
3716 sp = frame_bottom - 1; // Keep the current frame
3718 /* Do lazy black-holing.
3720 if (is_update_frame) {
3721 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3722 if (bh->header.info != &stg_BLACKHOLE_info &&
3723 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3724 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3725 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3726 fprintf(stderr,"Unexpected lazy BHing required at 0x%04x\n",(int)bh);
3729 /* zero out the slop so that the sanity checker can tell
3730 * where the next closure is.
3733 StgInfoTable *info = get_itbl(bh);
3734 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3735 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3736 * info is used for a different purpose, and it's exactly the
3737 * same size as a BLACKHOLE in any case.
3739 if (info->type != THUNK_SELECTOR) {
3740 for (i = np; i < np + nw; i++) {
3741 ((StgClosure *)bh)->payload[i] = 0;
3746 SET_INFO(bh,&stg_BLACKHOLE_info);
3750 // Fix the link in the current frame (should point to the frame below)
3751 frame->link = prev_frame;
3752 prev_was_update_frame = is_update_frame;
3755 // Now slide all words from sp up to the next frame
3757 if (displacement > 0) {
3758 P_ next_frame_bottom;
3760 if (next_frame != NULL)
3761 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3763 next_frame_bottom = tso->sp - 1;
3767 fprintf(stderr, "sliding [%p, %p] by %ld\n", sp, next_frame_bottom,
3771 while (sp >= next_frame_bottom) {
3772 sp[displacement] = *sp;
3776 (P_)prev_frame = (P_)frame + displacement;
3780 tso->sp += displacement;
3781 tso->su = prev_frame;
3784 fprintf(stderr, "@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames\n",
3785 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3790 /* -----------------------------------------------------------------------------
3793 * We have to prepare for GC - this means doing lazy black holing
3794 * here. We also take the opportunity to do stack squeezing if it's
3796 * -------------------------------------------------------------------------- */
3798 threadPaused(StgTSO *tso)
3800 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3801 threadSqueezeStack(tso); // does black holing too
3803 threadLazyBlackHole(tso);
3806 /* -----------------------------------------------------------------------------
3808 * -------------------------------------------------------------------------- */
3812 printMutOnceList(generation *gen)
3814 StgMutClosure *p, *next;
3816 p = gen->mut_once_list;
3819 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3820 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3821 fprintf(stderr, "%p (%s), ",
3822 p, info_type((StgClosure *)p));
3824 fputc('\n', stderr);
3828 printMutableList(generation *gen)
3830 StgMutClosure *p, *next;
3835 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3836 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3837 fprintf(stderr, "%p (%s), ",
3838 p, info_type((StgClosure *)p));
3840 fputc('\n', stderr);
3843 static inline rtsBool
3844 maybeLarge(StgClosure *closure)
3846 StgInfoTable *info = get_itbl(closure);
3848 /* closure types that may be found on the new_large_objects list;
3849 see scavenge_large */
3850 return (info->type == MUT_ARR_PTRS ||
3851 info->type == MUT_ARR_PTRS_FROZEN ||
3852 info->type == TSO ||
3853 info->type == ARR_WORDS);