1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.110 2001/07/26 14:29:26 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void cleanup_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
582 /* Final traversal of the weak pointer list (see comment by
583 * cleanUpWeakPtrList below).
585 cleanup_weak_ptr_list(&weak_ptr_list);
588 // Reconstruct the Global Address tables used in GUM
589 rebuildGAtables(major_gc);
590 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
593 // Now see which stable names are still alive.
596 // Tidy the end of the to-space chains
597 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
598 for (s = 0; s < generations[g].n_steps; s++) {
599 stp = &generations[g].steps[s];
600 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
601 stp->hp_bd->free = stp->hp;
602 stp->hp_bd->link = NULL;
607 // NO MORE EVACUATION AFTER THIS POINT!
608 // Finally: compaction of the oldest generation.
609 if (major_gc && RtsFlags.GcFlags.compact) {
610 // save number of blocks for stats
611 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
615 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
617 /* run through all the generations/steps and tidy up
619 copied = new_blocks * BLOCK_SIZE_W;
620 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
623 generations[g].collections++; // for stats
626 for (s = 0; s < generations[g].n_steps; s++) {
628 stp = &generations[g].steps[s];
630 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
631 // stats information: how much we copied
633 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
638 // for generations we collected...
641 // rough calculation of garbage collected, for stats output
642 if (stp->is_compacted) {
643 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
645 collected += stp->n_blocks * BLOCK_SIZE_W;
648 /* free old memory and shift to-space into from-space for all
649 * the collected steps (except the allocation area). These
650 * freed blocks will probaby be quickly recycled.
652 if (!(g == 0 && s == 0)) {
653 if (stp->is_compacted) {
654 // for a compacted step, just shift the new to-space
655 // onto the front of the now-compacted existing blocks.
656 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
657 bd->flags &= ~BF_EVACUATED; // now from-space
659 // tack the new blocks on the end of the existing blocks
660 if (stp->blocks == NULL) {
661 stp->blocks = stp->to_blocks;
663 for (bd = stp->blocks; bd != NULL; bd = next) {
666 bd->link = stp->to_blocks;
670 // add the new blocks to the block tally
671 stp->n_blocks += stp->n_to_blocks;
673 freeChain(stp->blocks);
674 stp->blocks = stp->to_blocks;
675 stp->n_blocks = stp->n_to_blocks;
676 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
677 bd->flags &= ~BF_EVACUATED; // now from-space
680 stp->to_blocks = NULL;
681 stp->n_to_blocks = 0;
684 /* LARGE OBJECTS. The current live large objects are chained on
685 * scavenged_large, having been moved during garbage
686 * collection from large_objects. Any objects left on
687 * large_objects list are therefore dead, so we free them here.
689 for (bd = stp->large_objects; bd != NULL; bd = next) {
695 // update the count of blocks used by large objects
696 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
697 bd->flags &= ~BF_EVACUATED;
699 stp->large_objects = stp->scavenged_large_objects;
700 stp->n_large_blocks = stp->n_scavenged_large_blocks;
702 /* Set the maximum blocks for this generation, interpolating
703 * between the maximum size of the oldest and youngest
706 * max_blocks = oldgen_max_blocks * G
707 * ----------------------
712 generations[g].max_blocks = (oldest_gen->max_blocks * g)
713 / (RtsFlags.GcFlags.generations-1);
715 generations[g].max_blocks = oldest_gen->max_blocks;
718 // for older generations...
721 /* For older generations, we need to append the
722 * scavenged_large_object list (i.e. large objects that have been
723 * promoted during this GC) to the large_object list for that step.
725 for (bd = stp->scavenged_large_objects; bd; bd = next) {
727 bd->flags &= ~BF_EVACUATED;
728 dbl_link_onto(bd, &stp->large_objects);
731 // add the new blocks we promoted during this GC
732 stp->n_blocks += stp->n_to_blocks;
733 stp->n_large_blocks += stp->n_scavenged_large_blocks;
738 /* Set the maximum blocks for the oldest generation, based on twice
739 * the amount of live data now, adjusted to fit the maximum heap
742 * This is an approximation, since in the worst case we'll need
743 * twice the amount of live data plus whatever space the other
746 if (major_gc && RtsFlags.GcFlags.generations > 1) {
747 oldest_gen->max_blocks =
748 stg_max(oldest_gen->steps[0].n_blocks * RtsFlags.GcFlags.oldGenFactor,
749 RtsFlags.GcFlags.minOldGenSize);
750 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
751 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
752 if (((int)oldest_gen->max_blocks -
753 (int)oldest_gen->steps[0].n_blocks) <
754 (RtsFlags.GcFlags.pcFreeHeap *
755 RtsFlags.GcFlags.maxHeapSize / 200)) {
761 // Guess the amount of live data for stats.
764 /* Free the small objects allocated via allocate(), since this will
765 * all have been copied into G0S1 now.
767 if (small_alloc_list != NULL) {
768 freeChain(small_alloc_list);
770 small_alloc_list = NULL;
774 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
776 /* Free the mark stack.
778 if (mark_stack_bdescr != NULL) {
779 freeGroup(mark_stack_bdescr);
784 for (g = 0; g <= N; g++) {
785 for (s = 0; s < generations[g].n_steps; s++) {
786 stp = &generations[g].steps[s];
787 if (stp->is_compacted && stp->bitmap != NULL) {
788 freeGroup(stp->bitmap);
793 /* Two-space collector:
794 * Free the old to-space, and estimate the amount of live data.
796 if (RtsFlags.GcFlags.generations == 1) {
799 if (old_to_blocks != NULL) {
800 freeChain(old_to_blocks);
802 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
803 bd->flags = 0; // now from-space
806 /* For a two-space collector, we need to resize the nursery. */
808 /* set up a new nursery. Allocate a nursery size based on a
809 * function of the amount of live data (currently a factor of 2,
810 * should be configurable (ToDo)). Use the blocks from the old
811 * nursery if possible, freeing up any left over blocks.
813 * If we get near the maximum heap size, then adjust our nursery
814 * size accordingly. If the nursery is the same size as the live
815 * data (L), then we need 3L bytes. We can reduce the size of the
816 * nursery to bring the required memory down near 2L bytes.
818 * A normal 2-space collector would need 4L bytes to give the same
819 * performance we get from 3L bytes, reducing to the same
820 * performance at 2L bytes.
822 blocks = g0s0->n_to_blocks;
824 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
825 RtsFlags.GcFlags.maxHeapSize ) {
826 long adjusted_blocks; // signed on purpose
829 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
830 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
831 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
832 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
835 blocks = adjusted_blocks;
838 blocks *= RtsFlags.GcFlags.oldGenFactor;
839 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
840 blocks = RtsFlags.GcFlags.minAllocAreaSize;
843 resizeNursery(blocks);
846 /* Generational collector:
847 * If the user has given us a suggested heap size, adjust our
848 * allocation area to make best use of the memory available.
851 if (RtsFlags.GcFlags.heapSizeSuggestion) {
853 nat needed = calcNeeded(); // approx blocks needed at next GC
855 /* Guess how much will be live in generation 0 step 0 next time.
856 * A good approximation is obtained by finding the
857 * percentage of g0s0 that was live at the last minor GC.
860 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
863 /* Estimate a size for the allocation area based on the
864 * information available. We might end up going slightly under
865 * or over the suggested heap size, but we should be pretty
868 * Formula: suggested - needed
869 * ----------------------------
870 * 1 + g0s0_pcnt_kept/100
872 * where 'needed' is the amount of memory needed at the next
873 * collection for collecting all steps except g0s0.
876 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
877 (100 + (long)g0s0_pcnt_kept);
879 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
880 blocks = RtsFlags.GcFlags.minAllocAreaSize;
883 resizeNursery((nat)blocks);
887 // mark the garbage collected CAFs as dead
888 #if 0 && defined(DEBUG) // doesn't work at the moment
889 if (major_gc) { gcCAFs(); }
892 // zero the scavenged static object list
894 zero_static_object_list(scavenged_static_objects);
901 // start any pending finalizers
902 scheduleFinalizers(old_weak_ptr_list);
904 // send exceptions to any threads which were about to die
905 resurrectThreads(resurrected_threads);
907 // Update the stable pointer hash table.
908 updateStablePtrTable(major_gc);
910 // check sanity after GC
911 IF_DEBUG(sanity, checkSanity());
913 // extra GC trace info
914 IF_DEBUG(gc, statDescribeGens());
917 // symbol-table based profiling
918 /* heapCensus(to_blocks); */ /* ToDo */
921 // restore enclosing cost centre
927 // check for memory leaks if sanity checking is on
928 IF_DEBUG(sanity, memInventory());
930 #ifdef RTS_GTK_FRONTPANEL
931 if (RtsFlags.GcFlags.frontpanel) {
932 updateFrontPanelAfterGC( N, live );
936 // ok, GC over: tell the stats department what happened.
937 stat_endGC(allocated, collected, live, copied, N);
943 /* -----------------------------------------------------------------------------
946 traverse_weak_ptr_list is called possibly many times during garbage
947 collection. It returns a flag indicating whether it did any work
948 (i.e. called evacuate on any live pointers).
950 Invariant: traverse_weak_ptr_list is called when the heap is in an
951 idempotent state. That means that there are no pending
952 evacuate/scavenge operations. This invariant helps the weak
953 pointer code decide which weak pointers are dead - if there are no
954 new live weak pointers, then all the currently unreachable ones are
957 For generational GC: we just don't try to finalize weak pointers in
958 older generations than the one we're collecting. This could
959 probably be optimised by keeping per-generation lists of weak
960 pointers, but for a few weak pointers this scheme will work.
961 -------------------------------------------------------------------------- */
964 traverse_weak_ptr_list(void)
966 StgWeak *w, **last_w, *next_w;
968 rtsBool flag = rtsFalse;
970 if (weak_done) { return rtsFalse; }
972 /* doesn't matter where we evacuate values/finalizers to, since
973 * these pointers are treated as roots (iff the keys are alive).
977 last_w = &old_weak_ptr_list;
978 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
980 /* First, this weak pointer might have been evacuated. If so,
981 * remove the forwarding pointer from the weak_ptr_list.
983 if (get_itbl(w)->type == EVACUATED) {
984 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
988 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
989 * called on a live weak pointer object. Just remove it.
991 if (w->header.info == &stg_DEAD_WEAK_info) {
992 next_w = ((StgDeadWeak *)w)->link;
997 ASSERT(get_itbl(w)->type == WEAK);
999 /* Now, check whether the key is reachable.
1001 if ((new = isAlive(w->key))) {
1003 // evacuate the value and finalizer
1004 w->value = evacuate(w->value);
1005 w->finalizer = evacuate(w->finalizer);
1006 // remove this weak ptr from the old_weak_ptr list
1008 // and put it on the new weak ptr list
1010 w->link = weak_ptr_list;
1013 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1017 last_w = &(w->link);
1023 /* Now deal with the all_threads list, which behaves somewhat like
1024 * the weak ptr list. If we discover any threads that are about to
1025 * become garbage, we wake them up and administer an exception.
1028 StgTSO *t, *tmp, *next, **prev;
1030 prev = &old_all_threads;
1031 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1033 (StgClosure *)tmp = isAlive((StgClosure *)t);
1039 ASSERT(get_itbl(t)->type == TSO);
1040 switch (t->what_next) {
1041 case ThreadRelocated:
1046 case ThreadComplete:
1047 // finshed or died. The thread might still be alive, but we
1048 // don't keep it on the all_threads list. Don't forget to
1049 // stub out its global_link field.
1050 next = t->global_link;
1051 t->global_link = END_TSO_QUEUE;
1059 // not alive (yet): leave this thread on the old_all_threads list.
1060 prev = &(t->global_link);
1061 next = t->global_link;
1065 // alive: move this thread onto the all_threads list.
1066 next = t->global_link;
1067 t->global_link = all_threads;
1075 /* If we didn't make any changes, then we can go round and kill all
1076 * the dead weak pointers. The old_weak_ptr list is used as a list
1077 * of pending finalizers later on.
1079 if (flag == rtsFalse) {
1080 cleanup_weak_ptr_list(&old_weak_ptr_list);
1081 for (w = old_weak_ptr_list; w; w = w->link) {
1082 w->finalizer = evacuate(w->finalizer);
1085 /* And resurrect any threads which were about to become garbage.
1088 StgTSO *t, *tmp, *next;
1089 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1090 next = t->global_link;
1091 (StgClosure *)tmp = evacuate((StgClosure *)t);
1092 tmp->global_link = resurrected_threads;
1093 resurrected_threads = tmp;
1097 weak_done = rtsTrue;
1103 /* -----------------------------------------------------------------------------
1104 After GC, the live weak pointer list may have forwarding pointers
1105 on it, because a weak pointer object was evacuated after being
1106 moved to the live weak pointer list. We remove those forwarding
1109 Also, we don't consider weak pointer objects to be reachable, but
1110 we must nevertheless consider them to be "live" and retain them.
1111 Therefore any weak pointer objects which haven't as yet been
1112 evacuated need to be evacuated now.
1113 -------------------------------------------------------------------------- */
1117 cleanup_weak_ptr_list ( StgWeak **list )
1119 StgWeak *w, **last_w;
1122 for (w = *list; w; w = w->link) {
1124 if (get_itbl(w)->type == EVACUATED) {
1125 w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1129 if ((Bdescr((P_)w)->flags & BF_EVACUATED) == 0) {
1130 (StgClosure *)w = evacuate((StgClosure *)w);
1133 last_w = &(w->link);
1137 /* -----------------------------------------------------------------------------
1138 isAlive determines whether the given closure is still alive (after
1139 a garbage collection) or not. It returns the new address of the
1140 closure if it is alive, or NULL otherwise.
1142 NOTE: Use it before compaction only!
1143 -------------------------------------------------------------------------- */
1147 isAlive(StgClosure *p)
1149 const StgInfoTable *info;
1156 /* ToDo: for static closures, check the static link field.
1157 * Problem here is that we sometimes don't set the link field, eg.
1158 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1163 // ignore closures in generations that we're not collecting.
1164 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1167 // large objects have an evacuated flag
1168 if (bd->flags & BF_LARGE) {
1169 if (bd->flags & BF_EVACUATED) {
1175 // check the mark bit for compacted steps
1176 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1180 switch (info->type) {
1185 case IND_OLDGEN: // rely on compatible layout with StgInd
1186 case IND_OLDGEN_PERM:
1187 // follow indirections
1188 p = ((StgInd *)p)->indirectee;
1193 return ((StgEvacuated *)p)->evacuee;
1196 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1197 p = (StgClosure *)((StgTSO *)p)->link;
1209 mark_root(StgClosure **root)
1211 *root = evacuate(*root);
1217 bdescr *bd = allocBlock();
1218 bd->gen_no = stp->gen_no;
1221 if (stp->gen_no <= N) {
1222 bd->flags = BF_EVACUATED;
1227 stp->hp_bd->free = stp->hp;
1228 stp->hp_bd->link = bd;
1229 stp->hp = bd->start;
1230 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1237 static __inline__ void
1238 upd_evacuee(StgClosure *p, StgClosure *dest)
1240 p->header.info = &stg_EVACUATED_info;
1241 ((StgEvacuated *)p)->evacuee = dest;
1245 static __inline__ StgClosure *
1246 copy(StgClosure *src, nat size, step *stp)
1250 TICK_GC_WORDS_COPIED(size);
1251 /* Find out where we're going, using the handy "to" pointer in
1252 * the step of the source object. If it turns out we need to
1253 * evacuate to an older generation, adjust it here (see comment
1256 if (stp->gen_no < evac_gen) {
1257 #ifdef NO_EAGER_PROMOTION
1258 failed_to_evac = rtsTrue;
1260 stp = &generations[evac_gen].steps[0];
1264 /* chain a new block onto the to-space for the destination step if
1267 if (stp->hp + size >= stp->hpLim) {
1271 for(to = stp->hp, from = (P_)src; size>0; --size) {
1277 upd_evacuee(src,(StgClosure *)dest);
1278 return (StgClosure *)dest;
1281 /* Special version of copy() for when we only want to copy the info
1282 * pointer of an object, but reserve some padding after it. This is
1283 * used to optimise evacuation of BLACKHOLEs.
1287 static __inline__ StgClosure *
1288 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1292 TICK_GC_WORDS_COPIED(size_to_copy);
1293 if (stp->gen_no < evac_gen) {
1294 #ifdef NO_EAGER_PROMOTION
1295 failed_to_evac = rtsTrue;
1297 stp = &generations[evac_gen].steps[0];
1301 if (stp->hp + size_to_reserve >= stp->hpLim) {
1305 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1310 stp->hp += size_to_reserve;
1311 upd_evacuee(src,(StgClosure *)dest);
1312 return (StgClosure *)dest;
1316 /* -----------------------------------------------------------------------------
1317 Evacuate a large object
1319 This just consists of removing the object from the (doubly-linked)
1320 large_alloc_list, and linking it on to the (singly-linked)
1321 new_large_objects list, from where it will be scavenged later.
1323 Convention: bd->flags has BF_EVACUATED set for a large object
1324 that has been evacuated, or unset otherwise.
1325 -------------------------------------------------------------------------- */
1329 evacuate_large(StgPtr p)
1331 bdescr *bd = Bdescr(p);
1334 // should point to the beginning of the block
1335 ASSERT(((W_)p & BLOCK_MASK) == 0);
1337 // already evacuated?
1338 if (bd->flags & BF_EVACUATED) {
1339 /* Don't forget to set the failed_to_evac flag if we didn't get
1340 * the desired destination (see comments in evacuate()).
1342 if (bd->gen_no < evac_gen) {
1343 failed_to_evac = rtsTrue;
1344 TICK_GC_FAILED_PROMOTION();
1350 // remove from large_object list
1352 bd->u.back->link = bd->link;
1353 } else { // first object in the list
1354 stp->large_objects = bd->link;
1357 bd->link->u.back = bd->u.back;
1360 /* link it on to the evacuated large object list of the destination step
1363 if (stp->gen_no < evac_gen) {
1364 #ifdef NO_EAGER_PROMOTION
1365 failed_to_evac = rtsTrue;
1367 stp = &generations[evac_gen].steps[0];
1372 bd->gen_no = stp->gen_no;
1373 bd->link = stp->new_large_objects;
1374 stp->new_large_objects = bd;
1375 bd->flags |= BF_EVACUATED;
1378 /* -----------------------------------------------------------------------------
1379 Adding a MUT_CONS to an older generation.
1381 This is necessary from time to time when we end up with an
1382 old-to-new generation pointer in a non-mutable object. We defer
1383 the promotion until the next GC.
1384 -------------------------------------------------------------------------- */
1388 mkMutCons(StgClosure *ptr, generation *gen)
1393 stp = &gen->steps[0];
1395 /* chain a new block onto the to-space for the destination step if
1398 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1402 q = (StgMutVar *)stp->hp;
1403 stp->hp += sizeofW(StgMutVar);
1405 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1407 recordOldToNewPtrs((StgMutClosure *)q);
1409 return (StgClosure *)q;
1412 /* -----------------------------------------------------------------------------
1415 This is called (eventually) for every live object in the system.
1417 The caller to evacuate specifies a desired generation in the
1418 evac_gen global variable. The following conditions apply to
1419 evacuating an object which resides in generation M when we're
1420 collecting up to generation N
1424 else evac to step->to
1426 if M < evac_gen evac to evac_gen, step 0
1428 if the object is already evacuated, then we check which generation
1431 if M >= evac_gen do nothing
1432 if M < evac_gen set failed_to_evac flag to indicate that we
1433 didn't manage to evacuate this object into evac_gen.
1435 -------------------------------------------------------------------------- */
1438 evacuate(StgClosure *q)
1443 const StgInfoTable *info;
1446 if (HEAP_ALLOCED(q)) {
1449 if (bd->gen_no > N) {
1450 /* Can't evacuate this object, because it's in a generation
1451 * older than the ones we're collecting. Let's hope that it's
1452 * in evac_gen or older, or we will have to arrange to track
1453 * this pointer using the mutable list.
1455 if (bd->gen_no < evac_gen) {
1457 failed_to_evac = rtsTrue;
1458 TICK_GC_FAILED_PROMOTION();
1463 /* evacuate large objects by re-linking them onto a different list.
1465 if (bd->flags & BF_LARGE) {
1467 if (info->type == TSO &&
1468 ((StgTSO *)q)->what_next == ThreadRelocated) {
1469 q = (StgClosure *)((StgTSO *)q)->link;
1472 evacuate_large((P_)q);
1476 /* If the object is in a step that we're compacting, then we
1477 * need to use an alternative evacuate procedure.
1479 if (bd->step->is_compacted) {
1480 if (!is_marked((P_)q,bd)) {
1482 if (mark_stack_full()) {
1483 mark_stack_overflowed = rtsTrue;
1486 push_mark_stack((P_)q);
1494 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1497 // make sure the info pointer is into text space
1498 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1499 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1502 switch (info -> type) {
1506 to = copy(q,sizeW_fromITBL(info),stp);
1511 StgWord w = (StgWord)q->payload[0];
1512 if (q->header.info == Czh_con_info &&
1513 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1514 (StgChar)w <= MAX_CHARLIKE) {
1515 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1517 if (q->header.info == Izh_con_info &&
1518 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1519 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1521 // else, fall through ...
1527 return copy(q,sizeofW(StgHeader)+1,stp);
1529 case THUNK_1_0: // here because of MIN_UPD_SIZE
1534 #ifdef NO_PROMOTE_THUNKS
1535 if (bd->gen_no == 0 &&
1536 bd->step->no != 0 &&
1537 bd->step->no == generations[bd->gen_no].n_steps-1) {
1541 return copy(q,sizeofW(StgHeader)+2,stp);
1549 return copy(q,sizeofW(StgHeader)+2,stp);
1555 case IND_OLDGEN_PERM:
1560 return copy(q,sizeW_fromITBL(info),stp);
1563 case SE_CAF_BLACKHOLE:
1566 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1569 to = copy(q,BLACKHOLE_sizeW(),stp);
1572 case THUNK_SELECTOR:
1574 const StgInfoTable* selectee_info;
1575 StgClosure* selectee = ((StgSelector*)q)->selectee;
1578 selectee_info = get_itbl(selectee);
1579 switch (selectee_info->type) {
1588 StgWord offset = info->layout.selector_offset;
1590 // check that the size is in range
1592 (StgWord32)(selectee_info->layout.payload.ptrs +
1593 selectee_info->layout.payload.nptrs));
1595 // perform the selection!
1596 q = selectee->payload[offset];
1598 /* if we're already in to-space, there's no need to continue
1599 * with the evacuation, just update the source address with
1600 * a pointer to the (evacuated) constructor field.
1602 if (HEAP_ALLOCED(q)) {
1603 bdescr *bd = Bdescr((P_)q);
1604 if (bd->flags & BF_EVACUATED) {
1605 if (bd->gen_no < evac_gen) {
1606 failed_to_evac = rtsTrue;
1607 TICK_GC_FAILED_PROMOTION();
1613 /* otherwise, carry on and evacuate this constructor field,
1614 * (but not the constructor itself)
1623 case IND_OLDGEN_PERM:
1624 selectee = ((StgInd *)selectee)->indirectee;
1628 selectee = ((StgEvacuated *)selectee)->evacuee;
1631 case THUNK_SELECTOR:
1633 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1634 something) to go into an infinite loop when the nightly
1635 stage2 compiles PrelTup.lhs. */
1637 /* we can't recurse indefinitely in evacuate(), so set a
1638 * limit on the number of times we can go around this
1641 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1643 bd = Bdescr((P_)selectee);
1644 if (!bd->flags & BF_EVACUATED) {
1645 thunk_selector_depth++;
1646 selectee = evacuate(selectee);
1647 thunk_selector_depth--;
1651 // otherwise, fall through...
1663 case SE_CAF_BLACKHOLE:
1667 // not evaluated yet
1671 // a copy of the top-level cases below
1672 case RBH: // cf. BLACKHOLE_BQ
1674 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1675 to = copy(q,BLACKHOLE_sizeW(),stp);
1676 //ToDo: derive size etc from reverted IP
1677 //to = copy(q,size,stp);
1678 // recordMutable((StgMutClosure *)to);
1683 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1684 to = copy(q,sizeofW(StgBlockedFetch),stp);
1691 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1692 to = copy(q,sizeofW(StgFetchMe),stp);
1696 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1697 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1702 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1703 (int)(selectee_info->type));
1706 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1710 // follow chains of indirections, don't evacuate them
1711 q = ((StgInd*)q)->indirectee;
1715 if (info->srt_len > 0 && major_gc &&
1716 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1717 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1718 static_objects = (StgClosure *)q;
1723 if (info->srt_len > 0 && major_gc &&
1724 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1725 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1726 static_objects = (StgClosure *)q;
1731 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1732 * on the CAF list, so don't do anything with it here (we'll
1733 * scavenge it later).
1736 && ((StgIndStatic *)q)->saved_info == NULL
1737 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1738 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1739 static_objects = (StgClosure *)q;
1744 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1745 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1746 static_objects = (StgClosure *)q;
1750 case CONSTR_INTLIKE:
1751 case CONSTR_CHARLIKE:
1752 case CONSTR_NOCAF_STATIC:
1753 /* no need to put these on the static linked list, they don't need
1768 // shouldn't see these
1769 barf("evacuate: stack frame at %p\n", q);
1773 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1774 * of stack, tagging and all.
1776 return copy(q,pap_sizeW((StgPAP*)q),stp);
1779 /* Already evacuated, just return the forwarding address.
1780 * HOWEVER: if the requested destination generation (evac_gen) is
1781 * older than the actual generation (because the object was
1782 * already evacuated to a younger generation) then we have to
1783 * set the failed_to_evac flag to indicate that we couldn't
1784 * manage to promote the object to the desired generation.
1786 if (evac_gen > 0) { // optimisation
1787 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1788 if (Bdescr((P_)p)->gen_no < evac_gen) {
1789 failed_to_evac = rtsTrue;
1790 TICK_GC_FAILED_PROMOTION();
1793 return ((StgEvacuated*)q)->evacuee;
1796 // just copy the block
1797 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1800 case MUT_ARR_PTRS_FROZEN:
1801 // just copy the block
1802 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1806 StgTSO *tso = (StgTSO *)q;
1808 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1810 if (tso->what_next == ThreadRelocated) {
1811 q = (StgClosure *)tso->link;
1815 /* To evacuate a small TSO, we need to relocate the update frame
1819 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1820 move_TSO(tso, new_tso);
1821 return (StgClosure *)new_tso;
1826 case RBH: // cf. BLACKHOLE_BQ
1828 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1829 to = copy(q,BLACKHOLE_sizeW(),stp);
1830 //ToDo: derive size etc from reverted IP
1831 //to = copy(q,size,stp);
1833 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1834 q, info_type(q), to, info_type(to)));
1839 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1840 to = copy(q,sizeofW(StgBlockedFetch),stp);
1842 belch("@@ evacuate: %p (%s) to %p (%s)",
1843 q, info_type(q), to, info_type(to)));
1850 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1851 to = copy(q,sizeofW(StgFetchMe),stp);
1853 belch("@@ evacuate: %p (%s) to %p (%s)",
1854 q, info_type(q), to, info_type(to)));
1858 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1859 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1861 belch("@@ evacuate: %p (%s) to %p (%s)",
1862 q, info_type(q), to, info_type(to)));
1867 barf("evacuate: strange closure type %d", (int)(info->type));
1873 /* -----------------------------------------------------------------------------
1874 move_TSO is called to update the TSO structure after it has been
1875 moved from one place to another.
1876 -------------------------------------------------------------------------- */
1879 move_TSO(StgTSO *src, StgTSO *dest)
1883 // relocate the stack pointers...
1884 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1885 dest->sp = (StgPtr)dest->sp + diff;
1886 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1888 relocate_stack(dest, diff);
1891 /* -----------------------------------------------------------------------------
1892 relocate_stack is called to update the linkage between
1893 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1895 -------------------------------------------------------------------------- */
1898 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1906 while ((P_)su < dest->stack + dest->stack_size) {
1907 switch (get_itbl(su)->type) {
1909 // GCC actually manages to common up these three cases!
1912 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1917 cf = (StgCatchFrame *)su;
1918 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1923 sf = (StgSeqFrame *)su;
1924 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1933 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1944 scavenge_srt(const StgInfoTable *info)
1946 StgClosure **srt, **srt_end;
1948 /* evacuate the SRT. If srt_len is zero, then there isn't an
1949 * srt field in the info table. That's ok, because we'll
1950 * never dereference it.
1952 srt = (StgClosure **)(info->srt);
1953 srt_end = srt + info->srt_len;
1954 for (; srt < srt_end; srt++) {
1955 /* Special-case to handle references to closures hiding out in DLLs, since
1956 double indirections required to get at those. The code generator knows
1957 which is which when generating the SRT, so it stores the (indirect)
1958 reference to the DLL closure in the table by first adding one to it.
1959 We check for this here, and undo the addition before evacuating it.
1961 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1962 closure that's fixed at link-time, and no extra magic is required.
1964 #ifdef ENABLE_WIN32_DLL_SUPPORT
1965 if ( (unsigned long)(*srt) & 0x1 ) {
1966 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1976 /* -----------------------------------------------------------------------------
1978 -------------------------------------------------------------------------- */
1981 scavengeTSO (StgTSO *tso)
1983 // chase the link field for any TSOs on the same queue
1984 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1985 if ( tso->why_blocked == BlockedOnMVar
1986 || tso->why_blocked == BlockedOnBlackHole
1987 || tso->why_blocked == BlockedOnException
1989 || tso->why_blocked == BlockedOnGA
1990 || tso->why_blocked == BlockedOnGA_NoSend
1993 tso->block_info.closure = evacuate(tso->block_info.closure);
1995 if ( tso->blocked_exceptions != NULL ) {
1996 tso->blocked_exceptions =
1997 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1999 // scavenge this thread's stack
2000 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2003 /* -----------------------------------------------------------------------------
2004 Scavenge a given step until there are no more objects in this step
2007 evac_gen is set by the caller to be either zero (for a step in a
2008 generation < N) or G where G is the generation of the step being
2011 We sometimes temporarily change evac_gen back to zero if we're
2012 scavenging a mutable object where early promotion isn't such a good
2014 -------------------------------------------------------------------------- */
2022 nat saved_evac_gen = evac_gen;
2027 failed_to_evac = rtsFalse;
2029 /* scavenge phase - standard breadth-first scavenging of the
2033 while (bd != stp->hp_bd || p < stp->hp) {
2035 // If we're at the end of this block, move on to the next block
2036 if (bd != stp->hp_bd && p == bd->free) {
2042 info = get_itbl((StgClosure *)p);
2043 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2046 switch (info->type) {
2049 /* treat MVars specially, because we don't want to evacuate the
2050 * mut_link field in the middle of the closure.
2053 StgMVar *mvar = ((StgMVar *)p);
2055 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2056 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2057 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2058 evac_gen = saved_evac_gen;
2059 recordMutable((StgMutClosure *)mvar);
2060 failed_to_evac = rtsFalse; // mutable.
2061 p += sizeofW(StgMVar);
2069 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2070 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2071 p += sizeofW(StgHeader) + 2;
2076 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2077 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2083 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2084 p += sizeofW(StgHeader) + 1;
2089 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2095 p += sizeofW(StgHeader) + 1;
2102 p += sizeofW(StgHeader) + 2;
2109 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2110 p += sizeofW(StgHeader) + 2;
2126 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2127 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2128 (StgClosure *)*p = evacuate((StgClosure *)*p);
2130 p += info->layout.payload.nptrs;
2135 if (stp->gen_no != 0) {
2136 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2139 case IND_OLDGEN_PERM:
2140 ((StgIndOldGen *)p)->indirectee =
2141 evacuate(((StgIndOldGen *)p)->indirectee);
2142 if (failed_to_evac) {
2143 failed_to_evac = rtsFalse;
2144 recordOldToNewPtrs((StgMutClosure *)p);
2146 p += sizeofW(StgIndOldGen);
2151 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2152 evac_gen = saved_evac_gen;
2153 recordMutable((StgMutClosure *)p);
2154 failed_to_evac = rtsFalse; // mutable anyhow
2155 p += sizeofW(StgMutVar);
2160 failed_to_evac = rtsFalse; // mutable anyhow
2161 p += sizeofW(StgMutVar);
2165 case SE_CAF_BLACKHOLE:
2168 p += BLACKHOLE_sizeW();
2173 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2174 (StgClosure *)bh->blocking_queue =
2175 evacuate((StgClosure *)bh->blocking_queue);
2176 recordMutable((StgMutClosure *)bh);
2177 failed_to_evac = rtsFalse;
2178 p += BLACKHOLE_sizeW();
2182 case THUNK_SELECTOR:
2184 StgSelector *s = (StgSelector *)p;
2185 s->selectee = evacuate(s->selectee);
2186 p += THUNK_SELECTOR_sizeW();
2190 case AP_UPD: // same as PAPs
2192 /* Treat a PAP just like a section of stack, not forgetting to
2193 * evacuate the function pointer too...
2196 StgPAP* pap = (StgPAP *)p;
2198 pap->fun = evacuate(pap->fun);
2199 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2200 p += pap_sizeW(pap);
2205 // nothing to follow
2206 p += arr_words_sizeW((StgArrWords *)p);
2210 // follow everything
2214 evac_gen = 0; // repeatedly mutable
2215 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2216 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2217 (StgClosure *)*p = evacuate((StgClosure *)*p);
2219 evac_gen = saved_evac_gen;
2220 recordMutable((StgMutClosure *)q);
2221 failed_to_evac = rtsFalse; // mutable anyhow.
2225 case MUT_ARR_PTRS_FROZEN:
2226 // follow everything
2230 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2231 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2232 (StgClosure *)*p = evacuate((StgClosure *)*p);
2234 // it's tempting to recordMutable() if failed_to_evac is
2235 // false, but that breaks some assumptions (eg. every
2236 // closure on the mutable list is supposed to have the MUT
2237 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2243 StgTSO *tso = (StgTSO *)p;
2246 evac_gen = saved_evac_gen;
2247 recordMutable((StgMutClosure *)tso);
2248 failed_to_evac = rtsFalse; // mutable anyhow.
2249 p += tso_sizeW(tso);
2254 case RBH: // cf. BLACKHOLE_BQ
2257 nat size, ptrs, nonptrs, vhs;
2259 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2261 StgRBH *rbh = (StgRBH *)p;
2262 (StgClosure *)rbh->blocking_queue =
2263 evacuate((StgClosure *)rbh->blocking_queue);
2264 recordMutable((StgMutClosure *)to);
2265 failed_to_evac = rtsFalse; // mutable anyhow.
2267 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2268 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2269 // ToDo: use size of reverted closure here!
2270 p += BLACKHOLE_sizeW();
2276 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2277 // follow the pointer to the node which is being demanded
2278 (StgClosure *)bf->node =
2279 evacuate((StgClosure *)bf->node);
2280 // follow the link to the rest of the blocking queue
2281 (StgClosure *)bf->link =
2282 evacuate((StgClosure *)bf->link);
2283 if (failed_to_evac) {
2284 failed_to_evac = rtsFalse;
2285 recordMutable((StgMutClosure *)bf);
2288 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2289 bf, info_type((StgClosure *)bf),
2290 bf->node, info_type(bf->node)));
2291 p += sizeofW(StgBlockedFetch);
2299 p += sizeofW(StgFetchMe);
2300 break; // nothing to do in this case
2302 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2304 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2305 (StgClosure *)fmbq->blocking_queue =
2306 evacuate((StgClosure *)fmbq->blocking_queue);
2307 if (failed_to_evac) {
2308 failed_to_evac = rtsFalse;
2309 recordMutable((StgMutClosure *)fmbq);
2312 belch("@@ scavenge: %p (%s) exciting, isn't it",
2313 p, info_type((StgClosure *)p)));
2314 p += sizeofW(StgFetchMeBlockingQueue);
2320 barf("scavenge: unimplemented/strange closure type %d @ %p",
2324 /* If we didn't manage to promote all the objects pointed to by
2325 * the current object, then we have to designate this object as
2326 * mutable (because it contains old-to-new generation pointers).
2328 if (failed_to_evac) {
2329 failed_to_evac = rtsFalse;
2330 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2338 /* -----------------------------------------------------------------------------
2339 Scavenge everything on the mark stack.
2341 This is slightly different from scavenge():
2342 - we don't walk linearly through the objects, so the scavenger
2343 doesn't need to advance the pointer on to the next object.
2344 -------------------------------------------------------------------------- */
2347 scavenge_mark_stack(void)
2353 evac_gen = oldest_gen->no;
2354 saved_evac_gen = evac_gen;
2357 while (!mark_stack_empty()) {
2358 p = pop_mark_stack();
2360 info = get_itbl((StgClosure *)p);
2361 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2364 switch (info->type) {
2367 /* treat MVars specially, because we don't want to evacuate the
2368 * mut_link field in the middle of the closure.
2371 StgMVar *mvar = ((StgMVar *)p);
2373 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2374 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2375 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2376 evac_gen = saved_evac_gen;
2377 failed_to_evac = rtsFalse; // mutable.
2385 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2386 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2396 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2421 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2422 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2423 (StgClosure *)*p = evacuate((StgClosure *)*p);
2429 // don't need to do anything here: the only possible case
2430 // is that we're in a 1-space compacting collector, with
2431 // no "old" generation.
2435 case IND_OLDGEN_PERM:
2436 ((StgIndOldGen *)p)->indirectee =
2437 evacuate(((StgIndOldGen *)p)->indirectee);
2438 if (failed_to_evac) {
2439 recordOldToNewPtrs((StgMutClosure *)p);
2441 failed_to_evac = rtsFalse;
2446 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2447 evac_gen = saved_evac_gen;
2448 failed_to_evac = rtsFalse;
2453 failed_to_evac = rtsFalse;
2457 case SE_CAF_BLACKHOLE:
2465 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2466 (StgClosure *)bh->blocking_queue =
2467 evacuate((StgClosure *)bh->blocking_queue);
2468 failed_to_evac = rtsFalse;
2472 case THUNK_SELECTOR:
2474 StgSelector *s = (StgSelector *)p;
2475 s->selectee = evacuate(s->selectee);
2479 case AP_UPD: // same as PAPs
2481 /* Treat a PAP just like a section of stack, not forgetting to
2482 * evacuate the function pointer too...
2485 StgPAP* pap = (StgPAP *)p;
2487 pap->fun = evacuate(pap->fun);
2488 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2493 // follow everything
2497 evac_gen = 0; // repeatedly mutable
2498 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2499 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2500 (StgClosure *)*p = evacuate((StgClosure *)*p);
2502 evac_gen = saved_evac_gen;
2503 failed_to_evac = rtsFalse; // mutable anyhow.
2507 case MUT_ARR_PTRS_FROZEN:
2508 // follow everything
2512 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2513 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2514 (StgClosure *)*p = evacuate((StgClosure *)*p);
2521 StgTSO *tso = (StgTSO *)p;
2524 evac_gen = saved_evac_gen;
2525 failed_to_evac = rtsFalse;
2530 case RBH: // cf. BLACKHOLE_BQ
2533 nat size, ptrs, nonptrs, vhs;
2535 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2537 StgRBH *rbh = (StgRBH *)p;
2538 (StgClosure *)rbh->blocking_queue =
2539 evacuate((StgClosure *)rbh->blocking_queue);
2540 recordMutable((StgMutClosure *)rbh);
2541 failed_to_evac = rtsFalse; // mutable anyhow.
2543 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2544 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2550 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2551 // follow the pointer to the node which is being demanded
2552 (StgClosure *)bf->node =
2553 evacuate((StgClosure *)bf->node);
2554 // follow the link to the rest of the blocking queue
2555 (StgClosure *)bf->link =
2556 evacuate((StgClosure *)bf->link);
2557 if (failed_to_evac) {
2558 failed_to_evac = rtsFalse;
2559 recordMutable((StgMutClosure *)bf);
2562 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2563 bf, info_type((StgClosure *)bf),
2564 bf->node, info_type(bf->node)));
2572 break; // nothing to do in this case
2574 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2576 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2577 (StgClosure *)fmbq->blocking_queue =
2578 evacuate((StgClosure *)fmbq->blocking_queue);
2579 if (failed_to_evac) {
2580 failed_to_evac = rtsFalse;
2581 recordMutable((StgMutClosure *)fmbq);
2584 belch("@@ scavenge: %p (%s) exciting, isn't it",
2585 p, info_type((StgClosure *)p)));
2591 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2595 if (failed_to_evac) {
2596 failed_to_evac = rtsFalse;
2597 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2600 // mark the next bit to indicate "scavenged"
2601 mark(q+1, Bdescr(q));
2603 } // while (!mark_stack_empty())
2605 // start a new linear scan if the mark stack overflowed at some point
2606 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2607 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2608 mark_stack_overflowed = rtsFalse;
2609 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2610 oldgen_scan = oldgen_scan_bd->start;
2613 if (oldgen_scan_bd) {
2614 // push a new thing on the mark stack
2616 // find a closure that is marked but not scavenged, and start
2618 while (oldgen_scan < oldgen_scan_bd->free
2619 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2623 if (oldgen_scan < oldgen_scan_bd->free) {
2625 // already scavenged?
2626 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2627 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2630 push_mark_stack(oldgen_scan);
2631 // ToDo: bump the linear scan by the actual size of the object
2632 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2636 oldgen_scan_bd = oldgen_scan_bd->link;
2637 if (oldgen_scan_bd != NULL) {
2638 oldgen_scan = oldgen_scan_bd->start;
2644 /* -----------------------------------------------------------------------------
2645 Scavenge one object.
2647 This is used for objects that are temporarily marked as mutable
2648 because they contain old-to-new generation pointers. Only certain
2649 objects can have this property.
2650 -------------------------------------------------------------------------- */
2653 scavenge_one(StgPtr p)
2655 const StgInfoTable *info;
2656 nat saved_evac_gen = evac_gen;
2659 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2660 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2662 info = get_itbl((StgClosure *)p);
2664 switch (info->type) {
2667 case FUN_1_0: // hardly worth specialising these guys
2687 case IND_OLDGEN_PERM:
2691 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2692 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2693 (StgClosure *)*q = evacuate((StgClosure *)*q);
2699 case SE_CAF_BLACKHOLE:
2704 case THUNK_SELECTOR:
2706 StgSelector *s = (StgSelector *)p;
2707 s->selectee = evacuate(s->selectee);
2712 // nothing to follow
2717 // follow everything
2720 evac_gen = 0; // repeatedly mutable
2721 recordMutable((StgMutClosure *)p);
2722 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2723 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2724 (StgClosure *)*p = evacuate((StgClosure *)*p);
2726 evac_gen = saved_evac_gen;
2727 failed_to_evac = rtsFalse;
2731 case MUT_ARR_PTRS_FROZEN:
2733 // follow everything
2736 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2737 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2738 (StgClosure *)*p = evacuate((StgClosure *)*p);
2745 StgTSO *tso = (StgTSO *)p;
2747 evac_gen = 0; // repeatedly mutable
2749 recordMutable((StgMutClosure *)tso);
2750 evac_gen = saved_evac_gen;
2751 failed_to_evac = rtsFalse;
2758 StgPAP* pap = (StgPAP *)p;
2759 pap->fun = evacuate(pap->fun);
2760 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2765 // This might happen if for instance a MUT_CONS was pointing to a
2766 // THUNK which has since been updated. The IND_OLDGEN will
2767 // be on the mutable list anyway, so we don't need to do anything
2772 barf("scavenge_one: strange object %d", (int)(info->type));
2775 no_luck = failed_to_evac;
2776 failed_to_evac = rtsFalse;
2780 /* -----------------------------------------------------------------------------
2781 Scavenging mutable lists.
2783 We treat the mutable list of each generation > N (i.e. all the
2784 generations older than the one being collected) as roots. We also
2785 remove non-mutable objects from the mutable list at this point.
2786 -------------------------------------------------------------------------- */
2789 scavenge_mut_once_list(generation *gen)
2791 const StgInfoTable *info;
2792 StgMutClosure *p, *next, *new_list;
2794 p = gen->mut_once_list;
2795 new_list = END_MUT_LIST;
2799 failed_to_evac = rtsFalse;
2801 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2803 // make sure the info pointer is into text space
2804 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2805 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2809 if (info->type==RBH)
2810 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2812 switch(info->type) {
2815 case IND_OLDGEN_PERM:
2817 /* Try to pull the indirectee into this generation, so we can
2818 * remove the indirection from the mutable list.
2820 ((StgIndOldGen *)p)->indirectee =
2821 evacuate(((StgIndOldGen *)p)->indirectee);
2823 #if 0 && defined(DEBUG)
2824 if (RtsFlags.DebugFlags.gc)
2825 /* Debugging code to print out the size of the thing we just
2829 StgPtr start = gen->steps[0].scan;
2830 bdescr *start_bd = gen->steps[0].scan_bd;
2832 scavenge(&gen->steps[0]);
2833 if (start_bd != gen->steps[0].scan_bd) {
2834 size += (P_)BLOCK_ROUND_UP(start) - start;
2835 start_bd = start_bd->link;
2836 while (start_bd != gen->steps[0].scan_bd) {
2837 size += BLOCK_SIZE_W;
2838 start_bd = start_bd->link;
2840 size += gen->steps[0].scan -
2841 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2843 size = gen->steps[0].scan - start;
2845 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2849 /* failed_to_evac might happen if we've got more than two
2850 * generations, we're collecting only generation 0, the
2851 * indirection resides in generation 2 and the indirectee is
2854 if (failed_to_evac) {
2855 failed_to_evac = rtsFalse;
2856 p->mut_link = new_list;
2859 /* the mut_link field of an IND_STATIC is overloaded as the
2860 * static link field too (it just so happens that we don't need
2861 * both at the same time), so we need to NULL it out when
2862 * removing this object from the mutable list because the static
2863 * link fields are all assumed to be NULL before doing a major
2871 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2872 * it from the mutable list if possible by promoting whatever it
2875 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2876 /* didn't manage to promote everything, so put the
2877 * MUT_CONS back on the list.
2879 p->mut_link = new_list;
2885 // shouldn't have anything else on the mutables list
2886 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2890 gen->mut_once_list = new_list;
2895 scavenge_mutable_list(generation *gen)
2897 const StgInfoTable *info;
2898 StgMutClosure *p, *next;
2900 p = gen->saved_mut_list;
2904 failed_to_evac = rtsFalse;
2906 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2908 // make sure the info pointer is into text space
2909 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2910 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2914 if (info->type==RBH)
2915 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2917 switch(info->type) {
2920 // follow everything
2921 p->mut_link = gen->mut_list;
2926 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2927 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2928 (StgClosure *)*q = evacuate((StgClosure *)*q);
2933 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2934 case MUT_ARR_PTRS_FROZEN:
2939 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2940 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2941 (StgClosure *)*q = evacuate((StgClosure *)*q);
2945 if (failed_to_evac) {
2946 failed_to_evac = rtsFalse;
2947 mkMutCons((StgClosure *)p, gen);
2953 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2954 p->mut_link = gen->mut_list;
2960 StgMVar *mvar = (StgMVar *)p;
2961 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2962 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2963 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2964 p->mut_link = gen->mut_list;
2971 StgTSO *tso = (StgTSO *)p;
2975 /* Don't take this TSO off the mutable list - it might still
2976 * point to some younger objects (because we set evac_gen to 0
2979 tso->mut_link = gen->mut_list;
2980 gen->mut_list = (StgMutClosure *)tso;
2986 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2987 (StgClosure *)bh->blocking_queue =
2988 evacuate((StgClosure *)bh->blocking_queue);
2989 p->mut_link = gen->mut_list;
2994 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2997 case IND_OLDGEN_PERM:
2998 /* Try to pull the indirectee into this generation, so we can
2999 * remove the indirection from the mutable list.
3002 ((StgIndOldGen *)p)->indirectee =
3003 evacuate(((StgIndOldGen *)p)->indirectee);
3006 if (failed_to_evac) {
3007 failed_to_evac = rtsFalse;
3008 p->mut_link = gen->mut_once_list;
3009 gen->mut_once_list = p;
3016 // HWL: check whether all of these are necessary
3018 case RBH: // cf. BLACKHOLE_BQ
3020 // nat size, ptrs, nonptrs, vhs;
3022 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3023 StgRBH *rbh = (StgRBH *)p;
3024 (StgClosure *)rbh->blocking_queue =
3025 evacuate((StgClosure *)rbh->blocking_queue);
3026 if (failed_to_evac) {
3027 failed_to_evac = rtsFalse;
3028 recordMutable((StgMutClosure *)rbh);
3030 // ToDo: use size of reverted closure here!
3031 p += BLACKHOLE_sizeW();
3037 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3038 // follow the pointer to the node which is being demanded
3039 (StgClosure *)bf->node =
3040 evacuate((StgClosure *)bf->node);
3041 // follow the link to the rest of the blocking queue
3042 (StgClosure *)bf->link =
3043 evacuate((StgClosure *)bf->link);
3044 if (failed_to_evac) {
3045 failed_to_evac = rtsFalse;
3046 recordMutable((StgMutClosure *)bf);
3048 p += sizeofW(StgBlockedFetch);
3054 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3057 p += sizeofW(StgFetchMe);
3058 break; // nothing to do in this case
3060 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3062 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3063 (StgClosure *)fmbq->blocking_queue =
3064 evacuate((StgClosure *)fmbq->blocking_queue);
3065 if (failed_to_evac) {
3066 failed_to_evac = rtsFalse;
3067 recordMutable((StgMutClosure *)fmbq);
3069 p += sizeofW(StgFetchMeBlockingQueue);
3075 // shouldn't have anything else on the mutables list
3076 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3083 scavenge_static(void)
3085 StgClosure* p = static_objects;
3086 const StgInfoTable *info;
3088 /* Always evacuate straight to the oldest generation for static
3090 evac_gen = oldest_gen->no;
3092 /* keep going until we've scavenged all the objects on the linked
3094 while (p != END_OF_STATIC_LIST) {
3098 if (info->type==RBH)
3099 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3101 // make sure the info pointer is into text space
3102 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3103 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3105 /* Take this object *off* the static_objects list,
3106 * and put it on the scavenged_static_objects list.
3108 static_objects = STATIC_LINK(info,p);
3109 STATIC_LINK(info,p) = scavenged_static_objects;
3110 scavenged_static_objects = p;
3112 switch (info -> type) {
3116 StgInd *ind = (StgInd *)p;
3117 ind->indirectee = evacuate(ind->indirectee);
3119 /* might fail to evacuate it, in which case we have to pop it
3120 * back on the mutable list (and take it off the
3121 * scavenged_static list because the static link and mut link
3122 * pointers are one and the same).
3124 if (failed_to_evac) {
3125 failed_to_evac = rtsFalse;
3126 scavenged_static_objects = IND_STATIC_LINK(p);
3127 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3128 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3142 next = (P_)p->payload + info->layout.payload.ptrs;
3143 // evacuate the pointers
3144 for (q = (P_)p->payload; q < next; q++) {
3145 (StgClosure *)*q = evacuate((StgClosure *)*q);
3151 barf("scavenge_static: strange closure %d", (int)(info->type));
3154 ASSERT(failed_to_evac == rtsFalse);
3156 /* get the next static object from the list. Remember, there might
3157 * be more stuff on this list now that we've done some evacuating!
3158 * (static_objects is a global)
3164 /* -----------------------------------------------------------------------------
3165 scavenge_stack walks over a section of stack and evacuates all the
3166 objects pointed to by it. We can use the same code for walking
3167 PAPs, since these are just sections of copied stack.
3168 -------------------------------------------------------------------------- */
3171 scavenge_stack(StgPtr p, StgPtr stack_end)
3174 const StgInfoTable* info;
3177 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3180 * Each time around this loop, we are looking at a chunk of stack
3181 * that starts with either a pending argument section or an
3182 * activation record.
3185 while (p < stack_end) {
3188 // If we've got a tag, skip over that many words on the stack
3189 if (IS_ARG_TAG((W_)q)) {
3194 /* Is q a pointer to a closure?
3196 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3198 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3199 ASSERT(closure_STATIC((StgClosure *)q));
3201 // otherwise, must be a pointer into the allocation space.
3204 (StgClosure *)*p = evacuate((StgClosure *)q);
3210 * Otherwise, q must be the info pointer of an activation
3211 * record. All activation records have 'bitmap' style layout
3214 info = get_itbl((StgClosure *)p);
3216 switch (info->type) {
3218 // Dynamic bitmap: the mask is stored on the stack
3220 bitmap = ((StgRetDyn *)p)->liveness;
3221 p = (P_)&((StgRetDyn *)p)->payload[0];
3224 // probably a slow-entry point return address:
3232 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3233 old_p, p, old_p+1));
3235 p++; // what if FHS!=1 !? -- HWL
3240 /* Specialised code for update frames, since they're so common.
3241 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3242 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3246 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3248 p += sizeofW(StgUpdateFrame);
3251 frame->updatee = evacuate(frame->updatee);
3253 #else // specialised code for update frames, not sure if it's worth it.
3255 nat type = get_itbl(frame->updatee)->type;
3257 if (type == EVACUATED) {
3258 frame->updatee = evacuate(frame->updatee);
3261 bdescr *bd = Bdescr((P_)frame->updatee);
3263 if (bd->gen_no > N) {
3264 if (bd->gen_no < evac_gen) {
3265 failed_to_evac = rtsTrue;
3270 // Don't promote blackholes
3272 if (!(stp->gen_no == 0 &&
3274 stp->no == stp->gen->n_steps-1)) {
3281 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3282 sizeofW(StgHeader), stp);
3283 frame->updatee = to;
3286 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3287 frame->updatee = to;
3288 recordMutable((StgMutClosure *)to);
3291 /* will never be SE_{,CAF_}BLACKHOLE, since we
3292 don't push an update frame for single-entry thunks. KSW 1999-01. */
3293 barf("scavenge_stack: UPDATE_FRAME updatee");
3299 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3306 bitmap = info->layout.bitmap;
3308 // this assumes that the payload starts immediately after the info-ptr
3310 while (bitmap != 0) {
3311 if ((bitmap & 1) == 0) {
3312 (StgClosure *)*p = evacuate((StgClosure *)*p);
3315 bitmap = bitmap >> 1;
3322 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3327 StgLargeBitmap *large_bitmap;
3330 large_bitmap = info->layout.large_bitmap;
3333 for (i=0; i<large_bitmap->size; i++) {
3334 bitmap = large_bitmap->bitmap[i];
3335 q = p + BITS_IN(W_);
3336 while (bitmap != 0) {
3337 if ((bitmap & 1) == 0) {
3338 (StgClosure *)*p = evacuate((StgClosure *)*p);
3341 bitmap = bitmap >> 1;
3343 if (i+1 < large_bitmap->size) {
3345 (StgClosure *)*p = evacuate((StgClosure *)*p);
3351 // and don't forget to follow the SRT
3356 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3361 /*-----------------------------------------------------------------------------
3362 scavenge the large object list.
3364 evac_gen set by caller; similar games played with evac_gen as with
3365 scavenge() - see comment at the top of scavenge(). Most large
3366 objects are (repeatedly) mutable, so most of the time evac_gen will
3368 --------------------------------------------------------------------------- */
3371 scavenge_large(step *stp)
3376 bd = stp->new_large_objects;
3378 for (; bd != NULL; bd = stp->new_large_objects) {
3380 /* take this object *off* the large objects list and put it on
3381 * the scavenged large objects list. This is so that we can
3382 * treat new_large_objects as a stack and push new objects on
3383 * the front when evacuating.
3385 stp->new_large_objects = bd->link;
3386 dbl_link_onto(bd, &stp->scavenged_large_objects);
3388 // update the block count in this step.
3389 stp->n_scavenged_large_blocks += bd->blocks;
3392 if (scavenge_one(p)) {
3393 mkMutCons((StgClosure *)p, stp->gen);
3398 /* -----------------------------------------------------------------------------
3399 Initialising the static object & mutable lists
3400 -------------------------------------------------------------------------- */
3403 zero_static_object_list(StgClosure* first_static)
3407 const StgInfoTable *info;
3409 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3411 link = STATIC_LINK(info, p);
3412 STATIC_LINK(info,p) = NULL;
3416 /* This function is only needed because we share the mutable link
3417 * field with the static link field in an IND_STATIC, so we have to
3418 * zero the mut_link field before doing a major GC, which needs the
3419 * static link field.
3421 * It doesn't do any harm to zero all the mutable link fields on the
3426 zero_mutable_list( StgMutClosure *first )
3428 StgMutClosure *next, *c;
3430 for (c = first; c != END_MUT_LIST; c = next) {
3436 /* -----------------------------------------------------------------------------
3438 -------------------------------------------------------------------------- */
3445 for (c = (StgIndStatic *)caf_list; c != NULL;
3446 c = (StgIndStatic *)c->static_link)
3448 c->header.info = c->saved_info;
3449 c->saved_info = NULL;
3450 // could, but not necessary: c->static_link = NULL;
3456 scavengeCAFs( void )
3461 for (c = (StgIndStatic *)caf_list; c != NULL;
3462 c = (StgIndStatic *)c->static_link)
3464 c->indirectee = evacuate(c->indirectee);
3468 /* -----------------------------------------------------------------------------
3469 Sanity code for CAF garbage collection.
3471 With DEBUG turned on, we manage a CAF list in addition to the SRT
3472 mechanism. After GC, we run down the CAF list and blackhole any
3473 CAFs which have been garbage collected. This means we get an error
3474 whenever the program tries to enter a garbage collected CAF.
3476 Any garbage collected CAFs are taken off the CAF list at the same
3478 -------------------------------------------------------------------------- */
3480 #if 0 && defined(DEBUG)
3487 const StgInfoTable *info;
3498 ASSERT(info->type == IND_STATIC);
3500 if (STATIC_LINK(info,p) == NULL) {
3501 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3503 SET_INFO(p,&stg_BLACKHOLE_info);
3504 p = STATIC_LINK2(info,p);
3508 pp = &STATIC_LINK2(info,p);
3515 // belch("%d CAFs live", i);
3520 /* -----------------------------------------------------------------------------
3523 Whenever a thread returns to the scheduler after possibly doing
3524 some work, we have to run down the stack and black-hole all the
3525 closures referred to by update frames.
3526 -------------------------------------------------------------------------- */
3529 threadLazyBlackHole(StgTSO *tso)
3531 StgUpdateFrame *update_frame;
3532 StgBlockingQueue *bh;
3535 stack_end = &tso->stack[tso->stack_size];
3536 update_frame = tso->su;
3539 switch (get_itbl(update_frame)->type) {
3542 update_frame = ((StgCatchFrame *)update_frame)->link;
3546 bh = (StgBlockingQueue *)update_frame->updatee;
3548 /* if the thunk is already blackholed, it means we've also
3549 * already blackholed the rest of the thunks on this stack,
3550 * so we can stop early.
3552 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3553 * don't interfere with this optimisation.
3555 if (bh->header.info == &stg_BLACKHOLE_info) {
3559 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3560 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3561 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3562 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3564 SET_INFO(bh,&stg_BLACKHOLE_info);
3567 update_frame = update_frame->link;
3571 update_frame = ((StgSeqFrame *)update_frame)->link;
3577 barf("threadPaused");
3583 /* -----------------------------------------------------------------------------
3586 * Code largely pinched from old RTS, then hacked to bits. We also do
3587 * lazy black holing here.
3589 * -------------------------------------------------------------------------- */
3592 threadSqueezeStack(StgTSO *tso)
3594 lnat displacement = 0;
3595 StgUpdateFrame *frame;
3596 StgUpdateFrame *next_frame; // Temporally next
3597 StgUpdateFrame *prev_frame; // Temporally previous
3599 rtsBool prev_was_update_frame;
3601 StgUpdateFrame *top_frame;
3602 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3604 void printObj( StgClosure *obj ); // from Printer.c
3606 top_frame = tso->su;
3609 bottom = &(tso->stack[tso->stack_size]);
3612 /* There must be at least one frame, namely the STOP_FRAME.
3614 ASSERT((P_)frame < bottom);
3616 /* Walk down the stack, reversing the links between frames so that
3617 * we can walk back up as we squeeze from the bottom. Note that
3618 * next_frame and prev_frame refer to next and previous as they were
3619 * added to the stack, rather than the way we see them in this
3620 * walk. (It makes the next loop less confusing.)
3622 * Stop if we find an update frame pointing to a black hole
3623 * (see comment in threadLazyBlackHole()).
3627 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3628 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3629 prev_frame = frame->link;
3630 frame->link = next_frame;
3635 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3636 printObj((StgClosure *)prev_frame);
3637 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3640 switch (get_itbl(frame)->type) {
3643 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3656 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3658 printObj((StgClosure *)prev_frame);
3661 if (get_itbl(frame)->type == UPDATE_FRAME
3662 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3667 /* Now, we're at the bottom. Frame points to the lowest update
3668 * frame on the stack, and its link actually points to the frame
3669 * above. We have to walk back up the stack, squeezing out empty
3670 * update frames and turning the pointers back around on the way
3673 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3674 * we never want to eliminate it anyway. Just walk one step up
3675 * before starting to squeeze. When you get to the topmost frame,
3676 * remember that there are still some words above it that might have
3683 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3686 * Loop through all of the frames (everything except the very
3687 * bottom). Things are complicated by the fact that we have
3688 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3689 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3691 while (frame != NULL) {
3693 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3694 rtsBool is_update_frame;
3696 next_frame = frame->link;
3697 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3700 * 1. both the previous and current frame are update frames
3701 * 2. the current frame is empty
3703 if (prev_was_update_frame && is_update_frame &&
3704 (P_)prev_frame == frame_bottom + displacement) {
3706 // Now squeeze out the current frame
3707 StgClosure *updatee_keep = prev_frame->updatee;
3708 StgClosure *updatee_bypass = frame->updatee;
3711 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3715 /* Deal with blocking queues. If both updatees have blocked
3716 * threads, then we should merge the queues into the update
3717 * frame that we're keeping.
3719 * Alternatively, we could just wake them up: they'll just go
3720 * straight to sleep on the proper blackhole! This is less code
3721 * and probably less bug prone, although it's probably much
3724 #if 0 // do it properly...
3725 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3726 # error Unimplemented lazy BH warning. (KSW 1999-01)
3728 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3729 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3731 // Sigh. It has one. Don't lose those threads!
3732 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3733 // Urgh. Two queues. Merge them.
3734 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3736 while (keep_tso->link != END_TSO_QUEUE) {
3737 keep_tso = keep_tso->link;
3739 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3742 // For simplicity, just swap the BQ for the BH
3743 P_ temp = updatee_keep;
3745 updatee_keep = updatee_bypass;
3746 updatee_bypass = temp;
3748 // Record the swap in the kept frame (below)
3749 prev_frame->updatee = updatee_keep;
3754 TICK_UPD_SQUEEZED();
3755 /* wasn't there something about update squeezing and ticky to be
3756 * sorted out? oh yes: we aren't counting each enter properly
3757 * in this case. See the log somewhere. KSW 1999-04-21
3759 * Check two things: that the two update frames don't point to
3760 * the same object, and that the updatee_bypass isn't already an
3761 * indirection. Both of these cases only happen when we're in a
3762 * block hole-style loop (and there are multiple update frames
3763 * on the stack pointing to the same closure), but they can both
3764 * screw us up if we don't check.
3766 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3767 // this wakes the threads up
3768 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3771 sp = (P_)frame - 1; // sp = stuff to slide
3772 displacement += sizeofW(StgUpdateFrame);
3775 // No squeeze for this frame
3776 sp = frame_bottom - 1; // Keep the current frame
3778 /* Do lazy black-holing.
3780 if (is_update_frame) {
3781 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3782 if (bh->header.info != &stg_BLACKHOLE_info &&
3783 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3784 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3785 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3786 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3789 /* zero out the slop so that the sanity checker can tell
3790 * where the next closure is.
3793 StgInfoTable *info = get_itbl(bh);
3794 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3795 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3796 * info is used for a different purpose, and it's exactly the
3797 * same size as a BLACKHOLE in any case.
3799 if (info->type != THUNK_SELECTOR) {
3800 for (i = np; i < np + nw; i++) {
3801 ((StgClosure *)bh)->payload[i] = 0;
3806 SET_INFO(bh,&stg_BLACKHOLE_info);
3810 // Fix the link in the current frame (should point to the frame below)
3811 frame->link = prev_frame;
3812 prev_was_update_frame = is_update_frame;
3815 // Now slide all words from sp up to the next frame
3817 if (displacement > 0) {
3818 P_ next_frame_bottom;
3820 if (next_frame != NULL)
3821 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3823 next_frame_bottom = tso->sp - 1;
3827 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3831 while (sp >= next_frame_bottom) {
3832 sp[displacement] = *sp;
3836 (P_)prev_frame = (P_)frame + displacement;
3840 tso->sp += displacement;
3841 tso->su = prev_frame;
3844 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3845 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3850 /* -----------------------------------------------------------------------------
3853 * We have to prepare for GC - this means doing lazy black holing
3854 * here. We also take the opportunity to do stack squeezing if it's
3856 * -------------------------------------------------------------------------- */
3858 threadPaused(StgTSO *tso)
3860 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3861 threadSqueezeStack(tso); // does black holing too
3863 threadLazyBlackHole(tso);
3866 /* -----------------------------------------------------------------------------
3868 * -------------------------------------------------------------------------- */
3872 printMutOnceList(generation *gen)
3874 StgMutClosure *p, *next;
3876 p = gen->mut_once_list;
3879 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3880 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3881 fprintf(stderr, "%p (%s), ",
3882 p, info_type((StgClosure *)p));
3884 fputc('\n', stderr);
3888 printMutableList(generation *gen)
3890 StgMutClosure *p, *next;
3895 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3896 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3897 fprintf(stderr, "%p (%s), ",
3898 p, info_type((StgClosure *)p));
3900 fputc('\n', stderr);
3903 static inline rtsBool
3904 maybeLarge(StgClosure *closure)
3906 StgInfoTable *info = get_itbl(closure);
3908 /* closure types that may be found on the new_large_objects list;
3909 see scavenge_large */
3910 return (info->type == MUT_ARR_PTRS ||
3911 info->type == MUT_ARR_PTRS_FROZEN ||
3912 info->type == TSO ||
3913 info->type == ARR_WORDS);