1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.113 2001/08/04 06:07:22 ken Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && RtsFlags.GcFlags.compact) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* Set the maximum blocks for the oldest generation, based on twice
613 * the amount of live data now, adjusted to fit the maximum heap
616 * This is an approximation, since in the worst case we'll need
617 * twice the amount of live data plus whatever space the other
620 if (major_gc && RtsFlags.GcFlags.generations > 1) {
621 nat blocks = oldest_gen->steps[0].n_blocks +
622 oldest_gen->steps[0].n_large_blocks;
624 oldest_gen->max_blocks =
625 stg_max(blocks * RtsFlags.GcFlags.oldGenFactor,
626 RtsFlags.GcFlags.minOldGenSize);
627 if (RtsFlags.GcFlags.compact) {
628 if ( oldest_gen->max_blocks >
629 RtsFlags.GcFlags.maxHeapSize *
630 (100 - RtsFlags.GcFlags.pcFreeHeap) / 100 ) {
631 oldest_gen->max_blocks =
632 RtsFlags.GcFlags.maxHeapSize *
633 (100 - RtsFlags.GcFlags.pcFreeHeap) / 100;
634 if (oldest_gen->max_blocks < blocks) {
635 belch("max_blocks: %ld, blocks: %ld, maxHeapSize: %ld",
636 oldest_gen->max_blocks, blocks, RtsFlags.GcFlags.maxHeapSize);
641 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
642 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
643 if (((int)oldest_gen->max_blocks - (int)blocks) <
644 (RtsFlags.GcFlags.pcFreeHeap *
645 RtsFlags.GcFlags.maxHeapSize / 200)) {
652 /* run through all the generations/steps and tidy up
654 copied = new_blocks * BLOCK_SIZE_W;
655 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
658 generations[g].collections++; // for stats
661 for (s = 0; s < generations[g].n_steps; s++) {
663 stp = &generations[g].steps[s];
665 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
666 // stats information: how much we copied
668 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
673 // for generations we collected...
676 // rough calculation of garbage collected, for stats output
677 if (stp->is_compacted) {
678 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
680 collected += stp->n_blocks * BLOCK_SIZE_W;
683 /* free old memory and shift to-space into from-space for all
684 * the collected steps (except the allocation area). These
685 * freed blocks will probaby be quickly recycled.
687 if (!(g == 0 && s == 0)) {
688 if (stp->is_compacted) {
689 // for a compacted step, just shift the new to-space
690 // onto the front of the now-compacted existing blocks.
691 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED; // now from-space
694 // tack the new blocks on the end of the existing blocks
695 if (stp->blocks == NULL) {
696 stp->blocks = stp->to_blocks;
698 for (bd = stp->blocks; bd != NULL; bd = next) {
701 bd->link = stp->to_blocks;
705 // add the new blocks to the block tally
706 stp->n_blocks += stp->n_to_blocks;
708 freeChain(stp->blocks);
709 stp->blocks = stp->to_blocks;
710 stp->n_blocks = stp->n_to_blocks;
711 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
712 bd->flags &= ~BF_EVACUATED; // now from-space
715 stp->to_blocks = NULL;
716 stp->n_to_blocks = 0;
719 /* LARGE OBJECTS. The current live large objects are chained on
720 * scavenged_large, having been moved during garbage
721 * collection from large_objects. Any objects left on
722 * large_objects list are therefore dead, so we free them here.
724 for (bd = stp->large_objects; bd != NULL; bd = next) {
730 // update the count of blocks used by large objects
731 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
732 bd->flags &= ~BF_EVACUATED;
734 stp->large_objects = stp->scavenged_large_objects;
735 stp->n_large_blocks = stp->n_scavenged_large_blocks;
737 /* Set the maximum blocks for this generation, interpolating
738 * between the maximum size of the oldest and youngest
741 * max_blocks = oldgen_max_blocks * G
742 * ----------------------
747 generations[g].max_blocks = (oldest_gen->max_blocks * g)
748 / (RtsFlags.GcFlags.generations-1);
750 generations[g].max_blocks = oldest_gen->max_blocks;
753 // for older generations...
756 /* For older generations, we need to append the
757 * scavenged_large_object list (i.e. large objects that have been
758 * promoted during this GC) to the large_object list for that step.
760 for (bd = stp->scavenged_large_objects; bd; bd = next) {
762 bd->flags &= ~BF_EVACUATED;
763 dbl_link_onto(bd, &stp->large_objects);
766 // add the new blocks we promoted during this GC
767 stp->n_blocks += stp->n_to_blocks;
768 stp->n_large_blocks += stp->n_scavenged_large_blocks;
773 // Guess the amount of live data for stats.
776 /* Free the small objects allocated via allocate(), since this will
777 * all have been copied into G0S1 now.
779 if (small_alloc_list != NULL) {
780 freeChain(small_alloc_list);
782 small_alloc_list = NULL;
786 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
788 /* Free the mark stack.
790 if (mark_stack_bdescr != NULL) {
791 freeGroup(mark_stack_bdescr);
796 for (g = 0; g <= N; g++) {
797 for (s = 0; s < generations[g].n_steps; s++) {
798 stp = &generations[g].steps[s];
799 if (stp->is_compacted && stp->bitmap != NULL) {
800 freeGroup(stp->bitmap);
805 /* Two-space collector:
806 * Free the old to-space, and estimate the amount of live data.
808 if (RtsFlags.GcFlags.generations == 1) {
811 if (old_to_blocks != NULL) {
812 freeChain(old_to_blocks);
814 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
815 bd->flags = 0; // now from-space
818 /* For a two-space collector, we need to resize the nursery. */
820 /* set up a new nursery. Allocate a nursery size based on a
821 * function of the amount of live data (currently a factor of 2,
822 * should be configurable (ToDo)). Use the blocks from the old
823 * nursery if possible, freeing up any left over blocks.
825 * If we get near the maximum heap size, then adjust our nursery
826 * size accordingly. If the nursery is the same size as the live
827 * data (L), then we need 3L bytes. We can reduce the size of the
828 * nursery to bring the required memory down near 2L bytes.
830 * A normal 2-space collector would need 4L bytes to give the same
831 * performance we get from 3L bytes, reducing to the same
832 * performance at 2L bytes.
834 blocks = g0s0->n_to_blocks;
836 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
837 RtsFlags.GcFlags.maxHeapSize ) {
838 long adjusted_blocks; // signed on purpose
841 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
842 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
843 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
844 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
847 blocks = adjusted_blocks;
850 blocks *= RtsFlags.GcFlags.oldGenFactor;
851 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
852 blocks = RtsFlags.GcFlags.minAllocAreaSize;
855 resizeNursery(blocks);
858 /* Generational collector:
859 * If the user has given us a suggested heap size, adjust our
860 * allocation area to make best use of the memory available.
863 if (RtsFlags.GcFlags.heapSizeSuggestion) {
865 nat needed = calcNeeded(); // approx blocks needed at next GC
867 /* Guess how much will be live in generation 0 step 0 next time.
868 * A good approximation is obtained by finding the
869 * percentage of g0s0 that was live at the last minor GC.
872 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
875 /* Estimate a size for the allocation area based on the
876 * information available. We might end up going slightly under
877 * or over the suggested heap size, but we should be pretty
880 * Formula: suggested - needed
881 * ----------------------------
882 * 1 + g0s0_pcnt_kept/100
884 * where 'needed' is the amount of memory needed at the next
885 * collection for collecting all steps except g0s0.
888 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
889 (100 + (long)g0s0_pcnt_kept);
891 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
892 blocks = RtsFlags.GcFlags.minAllocAreaSize;
895 resizeNursery((nat)blocks);
899 // mark the garbage collected CAFs as dead
900 #if 0 && defined(DEBUG) // doesn't work at the moment
901 if (major_gc) { gcCAFs(); }
904 // zero the scavenged static object list
906 zero_static_object_list(scavenged_static_objects);
913 // start any pending finalizers
914 scheduleFinalizers(old_weak_ptr_list);
916 // send exceptions to any threads which were about to die
917 resurrectThreads(resurrected_threads);
919 // Update the stable pointer hash table.
920 updateStablePtrTable(major_gc);
922 // check sanity after GC
923 IF_DEBUG(sanity, checkSanity());
925 // extra GC trace info
926 IF_DEBUG(gc, statDescribeGens());
929 // symbol-table based profiling
930 /* heapCensus(to_blocks); */ /* ToDo */
933 // restore enclosing cost centre
939 // check for memory leaks if sanity checking is on
940 IF_DEBUG(sanity, memInventory());
942 #ifdef RTS_GTK_FRONTPANEL
943 if (RtsFlags.GcFlags.frontpanel) {
944 updateFrontPanelAfterGC( N, live );
948 // ok, GC over: tell the stats department what happened.
949 stat_endGC(allocated, collected, live, copied, N);
955 /* -----------------------------------------------------------------------------
958 traverse_weak_ptr_list is called possibly many times during garbage
959 collection. It returns a flag indicating whether it did any work
960 (i.e. called evacuate on any live pointers).
962 Invariant: traverse_weak_ptr_list is called when the heap is in an
963 idempotent state. That means that there are no pending
964 evacuate/scavenge operations. This invariant helps the weak
965 pointer code decide which weak pointers are dead - if there are no
966 new live weak pointers, then all the currently unreachable ones are
969 For generational GC: we just don't try to finalize weak pointers in
970 older generations than the one we're collecting. This could
971 probably be optimised by keeping per-generation lists of weak
972 pointers, but for a few weak pointers this scheme will work.
973 -------------------------------------------------------------------------- */
976 traverse_weak_ptr_list(void)
978 StgWeak *w, **last_w, *next_w;
980 rtsBool flag = rtsFalse;
982 if (weak_done) { return rtsFalse; }
984 /* doesn't matter where we evacuate values/finalizers to, since
985 * these pointers are treated as roots (iff the keys are alive).
989 last_w = &old_weak_ptr_list;
990 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
992 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
993 * called on a live weak pointer object. Just remove it.
995 if (w->header.info == &stg_DEAD_WEAK_info) {
996 next_w = ((StgDeadWeak *)w)->link;
1001 ASSERT(get_itbl(w)->type == WEAK);
1003 /* Now, check whether the key is reachable.
1005 if ((new = isAlive(w->key))) {
1007 // evacuate the value and finalizer
1008 w->value = evacuate(w->value);
1009 w->finalizer = evacuate(w->finalizer);
1010 // remove this weak ptr from the old_weak_ptr list
1012 // and put it on the new weak ptr list
1014 w->link = weak_ptr_list;
1017 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1021 last_w = &(w->link);
1027 /* Now deal with the all_threads list, which behaves somewhat like
1028 * the weak ptr list. If we discover any threads that are about to
1029 * become garbage, we wake them up and administer an exception.
1032 StgTSO *t, *tmp, *next, **prev;
1034 prev = &old_all_threads;
1035 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1037 (StgClosure *)tmp = isAlive((StgClosure *)t);
1043 ASSERT(get_itbl(t)->type == TSO);
1044 switch (t->what_next) {
1045 case ThreadRelocated:
1050 case ThreadComplete:
1051 // finshed or died. The thread might still be alive, but we
1052 // don't keep it on the all_threads list. Don't forget to
1053 // stub out its global_link field.
1054 next = t->global_link;
1055 t->global_link = END_TSO_QUEUE;
1063 // not alive (yet): leave this thread on the old_all_threads list.
1064 prev = &(t->global_link);
1065 next = t->global_link;
1069 // alive: move this thread onto the all_threads list.
1070 next = t->global_link;
1071 t->global_link = all_threads;
1079 /* If we didn't make any changes, then we can go round and kill all
1080 * the dead weak pointers. The old_weak_ptr list is used as a list
1081 * of pending finalizers later on.
1083 if (flag == rtsFalse) {
1084 for (w = old_weak_ptr_list; w; w = w->link) {
1085 w->finalizer = evacuate(w->finalizer);
1088 /* And resurrect any threads which were about to become garbage.
1091 StgTSO *t, *tmp, *next;
1092 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1093 next = t->global_link;
1094 (StgClosure *)tmp = evacuate((StgClosure *)t);
1095 tmp->global_link = resurrected_threads;
1096 resurrected_threads = tmp;
1100 weak_done = rtsTrue;
1106 /* -----------------------------------------------------------------------------
1107 After GC, the live weak pointer list may have forwarding pointers
1108 on it, because a weak pointer object was evacuated after being
1109 moved to the live weak pointer list. We remove those forwarding
1112 Also, we don't consider weak pointer objects to be reachable, but
1113 we must nevertheless consider them to be "live" and retain them.
1114 Therefore any weak pointer objects which haven't as yet been
1115 evacuated need to be evacuated now.
1116 -------------------------------------------------------------------------- */
1120 mark_weak_ptr_list ( StgWeak **list )
1122 StgWeak *w, **last_w;
1125 for (w = *list; w; w = w->link) {
1126 (StgClosure *)w = evacuate((StgClosure *)w);
1128 last_w = &(w->link);
1132 /* -----------------------------------------------------------------------------
1133 isAlive determines whether the given closure is still alive (after
1134 a garbage collection) or not. It returns the new address of the
1135 closure if it is alive, or NULL otherwise.
1137 NOTE: Use it before compaction only!
1138 -------------------------------------------------------------------------- */
1142 isAlive(StgClosure *p)
1144 const StgInfoTable *info;
1151 /* ToDo: for static closures, check the static link field.
1152 * Problem here is that we sometimes don't set the link field, eg.
1153 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1158 // ignore closures in generations that we're not collecting.
1159 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1162 // large objects have an evacuated flag
1163 if (bd->flags & BF_LARGE) {
1164 if (bd->flags & BF_EVACUATED) {
1170 // check the mark bit for compacted steps
1171 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1175 switch (info->type) {
1180 case IND_OLDGEN: // rely on compatible layout with StgInd
1181 case IND_OLDGEN_PERM:
1182 // follow indirections
1183 p = ((StgInd *)p)->indirectee;
1188 return ((StgEvacuated *)p)->evacuee;
1191 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1192 p = (StgClosure *)((StgTSO *)p)->link;
1204 mark_root(StgClosure **root)
1206 *root = evacuate(*root);
1212 bdescr *bd = allocBlock();
1213 bd->gen_no = stp->gen_no;
1216 if (stp->gen_no <= N) {
1217 bd->flags = BF_EVACUATED;
1222 stp->hp_bd->free = stp->hp;
1223 stp->hp_bd->link = bd;
1224 stp->hp = bd->start;
1225 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1232 static __inline__ void
1233 upd_evacuee(StgClosure *p, StgClosure *dest)
1235 p->header.info = &stg_EVACUATED_info;
1236 ((StgEvacuated *)p)->evacuee = dest;
1240 static __inline__ StgClosure *
1241 copy(StgClosure *src, nat size, step *stp)
1245 TICK_GC_WORDS_COPIED(size);
1246 /* Find out where we're going, using the handy "to" pointer in
1247 * the step of the source object. If it turns out we need to
1248 * evacuate to an older generation, adjust it here (see comment
1251 if (stp->gen_no < evac_gen) {
1252 #ifdef NO_EAGER_PROMOTION
1253 failed_to_evac = rtsTrue;
1255 stp = &generations[evac_gen].steps[0];
1259 /* chain a new block onto the to-space for the destination step if
1262 if (stp->hp + size >= stp->hpLim) {
1266 for(to = stp->hp, from = (P_)src; size>0; --size) {
1272 upd_evacuee(src,(StgClosure *)dest);
1273 return (StgClosure *)dest;
1276 /* Special version of copy() for when we only want to copy the info
1277 * pointer of an object, but reserve some padding after it. This is
1278 * used to optimise evacuation of BLACKHOLEs.
1282 static __inline__ StgClosure *
1283 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1287 TICK_GC_WORDS_COPIED(size_to_copy);
1288 if (stp->gen_no < evac_gen) {
1289 #ifdef NO_EAGER_PROMOTION
1290 failed_to_evac = rtsTrue;
1292 stp = &generations[evac_gen].steps[0];
1296 if (stp->hp + size_to_reserve >= stp->hpLim) {
1300 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1305 stp->hp += size_to_reserve;
1306 upd_evacuee(src,(StgClosure *)dest);
1307 return (StgClosure *)dest;
1311 /* -----------------------------------------------------------------------------
1312 Evacuate a large object
1314 This just consists of removing the object from the (doubly-linked)
1315 large_alloc_list, and linking it on to the (singly-linked)
1316 new_large_objects list, from where it will be scavenged later.
1318 Convention: bd->flags has BF_EVACUATED set for a large object
1319 that has been evacuated, or unset otherwise.
1320 -------------------------------------------------------------------------- */
1324 evacuate_large(StgPtr p)
1326 bdescr *bd = Bdescr(p);
1329 // should point to the beginning of the block
1330 ASSERT(((W_)p & BLOCK_MASK) == 0);
1332 // already evacuated?
1333 if (bd->flags & BF_EVACUATED) {
1334 /* Don't forget to set the failed_to_evac flag if we didn't get
1335 * the desired destination (see comments in evacuate()).
1337 if (bd->gen_no < evac_gen) {
1338 failed_to_evac = rtsTrue;
1339 TICK_GC_FAILED_PROMOTION();
1345 // remove from large_object list
1347 bd->u.back->link = bd->link;
1348 } else { // first object in the list
1349 stp->large_objects = bd->link;
1352 bd->link->u.back = bd->u.back;
1355 /* link it on to the evacuated large object list of the destination step
1358 if (stp->gen_no < evac_gen) {
1359 #ifdef NO_EAGER_PROMOTION
1360 failed_to_evac = rtsTrue;
1362 stp = &generations[evac_gen].steps[0];
1367 bd->gen_no = stp->gen_no;
1368 bd->link = stp->new_large_objects;
1369 stp->new_large_objects = bd;
1370 bd->flags |= BF_EVACUATED;
1373 /* -----------------------------------------------------------------------------
1374 Adding a MUT_CONS to an older generation.
1376 This is necessary from time to time when we end up with an
1377 old-to-new generation pointer in a non-mutable object. We defer
1378 the promotion until the next GC.
1379 -------------------------------------------------------------------------- */
1383 mkMutCons(StgClosure *ptr, generation *gen)
1388 stp = &gen->steps[0];
1390 /* chain a new block onto the to-space for the destination step if
1393 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1397 q = (StgMutVar *)stp->hp;
1398 stp->hp += sizeofW(StgMutVar);
1400 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1402 recordOldToNewPtrs((StgMutClosure *)q);
1404 return (StgClosure *)q;
1407 /* -----------------------------------------------------------------------------
1410 This is called (eventually) for every live object in the system.
1412 The caller to evacuate specifies a desired generation in the
1413 evac_gen global variable. The following conditions apply to
1414 evacuating an object which resides in generation M when we're
1415 collecting up to generation N
1419 else evac to step->to
1421 if M < evac_gen evac to evac_gen, step 0
1423 if the object is already evacuated, then we check which generation
1426 if M >= evac_gen do nothing
1427 if M < evac_gen set failed_to_evac flag to indicate that we
1428 didn't manage to evacuate this object into evac_gen.
1430 -------------------------------------------------------------------------- */
1433 evacuate(StgClosure *q)
1438 const StgInfoTable *info;
1441 if (HEAP_ALLOCED(q)) {
1444 if (bd->gen_no > N) {
1445 /* Can't evacuate this object, because it's in a generation
1446 * older than the ones we're collecting. Let's hope that it's
1447 * in evac_gen or older, or we will have to arrange to track
1448 * this pointer using the mutable list.
1450 if (bd->gen_no < evac_gen) {
1452 failed_to_evac = rtsTrue;
1453 TICK_GC_FAILED_PROMOTION();
1458 /* evacuate large objects by re-linking them onto a different list.
1460 if (bd->flags & BF_LARGE) {
1462 if (info->type == TSO &&
1463 ((StgTSO *)q)->what_next == ThreadRelocated) {
1464 q = (StgClosure *)((StgTSO *)q)->link;
1467 evacuate_large((P_)q);
1471 /* If the object is in a step that we're compacting, then we
1472 * need to use an alternative evacuate procedure.
1474 if (bd->step->is_compacted) {
1475 if (!is_marked((P_)q,bd)) {
1477 if (mark_stack_full()) {
1478 mark_stack_overflowed = rtsTrue;
1481 push_mark_stack((P_)q);
1489 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1492 // make sure the info pointer is into text space
1493 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1494 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1497 switch (info -> type) {
1501 to = copy(q,sizeW_fromITBL(info),stp);
1506 StgWord w = (StgWord)q->payload[0];
1507 if (q->header.info == Czh_con_info &&
1508 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1509 (StgChar)w <= MAX_CHARLIKE) {
1510 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1512 if (q->header.info == Izh_con_info &&
1513 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1514 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1516 // else, fall through ...
1522 return copy(q,sizeofW(StgHeader)+1,stp);
1524 case THUNK_1_0: // here because of MIN_UPD_SIZE
1529 #ifdef NO_PROMOTE_THUNKS
1530 if (bd->gen_no == 0 &&
1531 bd->step->no != 0 &&
1532 bd->step->no == generations[bd->gen_no].n_steps-1) {
1536 return copy(q,sizeofW(StgHeader)+2,stp);
1544 return copy(q,sizeofW(StgHeader)+2,stp);
1550 case IND_OLDGEN_PERM:
1555 return copy(q,sizeW_fromITBL(info),stp);
1558 case SE_CAF_BLACKHOLE:
1561 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1564 to = copy(q,BLACKHOLE_sizeW(),stp);
1567 case THUNK_SELECTOR:
1569 const StgInfoTable* selectee_info;
1570 StgClosure* selectee = ((StgSelector*)q)->selectee;
1573 selectee_info = get_itbl(selectee);
1574 switch (selectee_info->type) {
1583 StgWord offset = info->layout.selector_offset;
1585 // check that the size is in range
1587 (StgWord32)(selectee_info->layout.payload.ptrs +
1588 selectee_info->layout.payload.nptrs));
1590 // perform the selection!
1591 q = selectee->payload[offset];
1593 /* if we're already in to-space, there's no need to continue
1594 * with the evacuation, just update the source address with
1595 * a pointer to the (evacuated) constructor field.
1597 if (HEAP_ALLOCED(q)) {
1598 bdescr *bd = Bdescr((P_)q);
1599 if (bd->flags & BF_EVACUATED) {
1600 if (bd->gen_no < evac_gen) {
1601 failed_to_evac = rtsTrue;
1602 TICK_GC_FAILED_PROMOTION();
1608 /* otherwise, carry on and evacuate this constructor field,
1609 * (but not the constructor itself)
1618 case IND_OLDGEN_PERM:
1619 selectee = ((StgInd *)selectee)->indirectee;
1623 selectee = ((StgEvacuated *)selectee)->evacuee;
1626 case THUNK_SELECTOR:
1628 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1629 something) to go into an infinite loop when the nightly
1630 stage2 compiles PrelTup.lhs. */
1632 /* we can't recurse indefinitely in evacuate(), so set a
1633 * limit on the number of times we can go around this
1636 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1638 bd = Bdescr((P_)selectee);
1639 if (!bd->flags & BF_EVACUATED) {
1640 thunk_selector_depth++;
1641 selectee = evacuate(selectee);
1642 thunk_selector_depth--;
1646 // otherwise, fall through...
1658 case SE_CAF_BLACKHOLE:
1662 // not evaluated yet
1666 // a copy of the top-level cases below
1667 case RBH: // cf. BLACKHOLE_BQ
1669 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1670 to = copy(q,BLACKHOLE_sizeW(),stp);
1671 //ToDo: derive size etc from reverted IP
1672 //to = copy(q,size,stp);
1673 // recordMutable((StgMutClosure *)to);
1678 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1679 to = copy(q,sizeofW(StgBlockedFetch),stp);
1686 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1687 to = copy(q,sizeofW(StgFetchMe),stp);
1691 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1692 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1697 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1698 (int)(selectee_info->type));
1701 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1705 // follow chains of indirections, don't evacuate them
1706 q = ((StgInd*)q)->indirectee;
1710 if (info->srt_len > 0 && major_gc &&
1711 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1712 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1713 static_objects = (StgClosure *)q;
1718 if (info->srt_len > 0 && major_gc &&
1719 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1720 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1721 static_objects = (StgClosure *)q;
1726 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1727 * on the CAF list, so don't do anything with it here (we'll
1728 * scavenge it later).
1731 && ((StgIndStatic *)q)->saved_info == NULL
1732 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1733 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1734 static_objects = (StgClosure *)q;
1739 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1740 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1741 static_objects = (StgClosure *)q;
1745 case CONSTR_INTLIKE:
1746 case CONSTR_CHARLIKE:
1747 case CONSTR_NOCAF_STATIC:
1748 /* no need to put these on the static linked list, they don't need
1763 // shouldn't see these
1764 barf("evacuate: stack frame at %p\n", q);
1768 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1769 * of stack, tagging and all.
1771 return copy(q,pap_sizeW((StgPAP*)q),stp);
1774 /* Already evacuated, just return the forwarding address.
1775 * HOWEVER: if the requested destination generation (evac_gen) is
1776 * older than the actual generation (because the object was
1777 * already evacuated to a younger generation) then we have to
1778 * set the failed_to_evac flag to indicate that we couldn't
1779 * manage to promote the object to the desired generation.
1781 if (evac_gen > 0) { // optimisation
1782 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1783 if (Bdescr((P_)p)->gen_no < evac_gen) {
1784 failed_to_evac = rtsTrue;
1785 TICK_GC_FAILED_PROMOTION();
1788 return ((StgEvacuated*)q)->evacuee;
1791 // just copy the block
1792 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1795 case MUT_ARR_PTRS_FROZEN:
1796 // just copy the block
1797 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1801 StgTSO *tso = (StgTSO *)q;
1803 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1805 if (tso->what_next == ThreadRelocated) {
1806 q = (StgClosure *)tso->link;
1810 /* To evacuate a small TSO, we need to relocate the update frame
1814 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1815 move_TSO(tso, new_tso);
1816 return (StgClosure *)new_tso;
1821 case RBH: // cf. BLACKHOLE_BQ
1823 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1824 to = copy(q,BLACKHOLE_sizeW(),stp);
1825 //ToDo: derive size etc from reverted IP
1826 //to = copy(q,size,stp);
1828 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1829 q, info_type(q), to, info_type(to)));
1834 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1835 to = copy(q,sizeofW(StgBlockedFetch),stp);
1837 belch("@@ evacuate: %p (%s) to %p (%s)",
1838 q, info_type(q), to, info_type(to)));
1845 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1846 to = copy(q,sizeofW(StgFetchMe),stp);
1848 belch("@@ evacuate: %p (%s) to %p (%s)",
1849 q, info_type(q), to, info_type(to)));
1853 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1854 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1856 belch("@@ evacuate: %p (%s) to %p (%s)",
1857 q, info_type(q), to, info_type(to)));
1862 barf("evacuate: strange closure type %d", (int)(info->type));
1868 /* -----------------------------------------------------------------------------
1869 move_TSO is called to update the TSO structure after it has been
1870 moved from one place to another.
1871 -------------------------------------------------------------------------- */
1874 move_TSO(StgTSO *src, StgTSO *dest)
1878 // relocate the stack pointers...
1879 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1880 dest->sp = (StgPtr)dest->sp + diff;
1881 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1883 relocate_stack(dest, diff);
1886 /* -----------------------------------------------------------------------------
1887 relocate_stack is called to update the linkage between
1888 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1890 -------------------------------------------------------------------------- */
1893 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1901 while ((P_)su < dest->stack + dest->stack_size) {
1902 switch (get_itbl(su)->type) {
1904 // GCC actually manages to common up these three cases!
1907 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1912 cf = (StgCatchFrame *)su;
1913 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1918 sf = (StgSeqFrame *)su;
1919 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1928 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1939 scavenge_srt(const StgInfoTable *info)
1941 StgClosure **srt, **srt_end;
1943 /* evacuate the SRT. If srt_len is zero, then there isn't an
1944 * srt field in the info table. That's ok, because we'll
1945 * never dereference it.
1947 srt = (StgClosure **)(info->srt);
1948 srt_end = srt + info->srt_len;
1949 for (; srt < srt_end; srt++) {
1950 /* Special-case to handle references to closures hiding out in DLLs, since
1951 double indirections required to get at those. The code generator knows
1952 which is which when generating the SRT, so it stores the (indirect)
1953 reference to the DLL closure in the table by first adding one to it.
1954 We check for this here, and undo the addition before evacuating it.
1956 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1957 closure that's fixed at link-time, and no extra magic is required.
1959 #ifdef ENABLE_WIN32_DLL_SUPPORT
1960 if ( (unsigned long)(*srt) & 0x1 ) {
1961 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1971 /* -----------------------------------------------------------------------------
1973 -------------------------------------------------------------------------- */
1976 scavengeTSO (StgTSO *tso)
1978 // chase the link field for any TSOs on the same queue
1979 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1980 if ( tso->why_blocked == BlockedOnMVar
1981 || tso->why_blocked == BlockedOnBlackHole
1982 || tso->why_blocked == BlockedOnException
1984 || tso->why_blocked == BlockedOnGA
1985 || tso->why_blocked == BlockedOnGA_NoSend
1988 tso->block_info.closure = evacuate(tso->block_info.closure);
1990 if ( tso->blocked_exceptions != NULL ) {
1991 tso->blocked_exceptions =
1992 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1994 // scavenge this thread's stack
1995 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1998 /* -----------------------------------------------------------------------------
1999 Scavenge a given step until there are no more objects in this step
2002 evac_gen is set by the caller to be either zero (for a step in a
2003 generation < N) or G where G is the generation of the step being
2006 We sometimes temporarily change evac_gen back to zero if we're
2007 scavenging a mutable object where early promotion isn't such a good
2009 -------------------------------------------------------------------------- */
2017 nat saved_evac_gen = evac_gen;
2022 failed_to_evac = rtsFalse;
2024 /* scavenge phase - standard breadth-first scavenging of the
2028 while (bd != stp->hp_bd || p < stp->hp) {
2030 // If we're at the end of this block, move on to the next block
2031 if (bd != stp->hp_bd && p == bd->free) {
2037 info = get_itbl((StgClosure *)p);
2038 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2041 switch (info->type) {
2044 /* treat MVars specially, because we don't want to evacuate the
2045 * mut_link field in the middle of the closure.
2048 StgMVar *mvar = ((StgMVar *)p);
2050 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2051 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2052 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2053 evac_gen = saved_evac_gen;
2054 recordMutable((StgMutClosure *)mvar);
2055 failed_to_evac = rtsFalse; // mutable.
2056 p += sizeofW(StgMVar);
2064 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2065 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2066 p += sizeofW(StgHeader) + 2;
2071 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2072 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2078 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2079 p += sizeofW(StgHeader) + 1;
2084 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2090 p += sizeofW(StgHeader) + 1;
2097 p += sizeofW(StgHeader) + 2;
2104 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2105 p += sizeofW(StgHeader) + 2;
2121 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2122 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2123 (StgClosure *)*p = evacuate((StgClosure *)*p);
2125 p += info->layout.payload.nptrs;
2130 if (stp->gen_no != 0) {
2131 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2134 case IND_OLDGEN_PERM:
2135 ((StgIndOldGen *)p)->indirectee =
2136 evacuate(((StgIndOldGen *)p)->indirectee);
2137 if (failed_to_evac) {
2138 failed_to_evac = rtsFalse;
2139 recordOldToNewPtrs((StgMutClosure *)p);
2141 p += sizeofW(StgIndOldGen);
2146 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2147 evac_gen = saved_evac_gen;
2148 recordMutable((StgMutClosure *)p);
2149 failed_to_evac = rtsFalse; // mutable anyhow
2150 p += sizeofW(StgMutVar);
2155 failed_to_evac = rtsFalse; // mutable anyhow
2156 p += sizeofW(StgMutVar);
2160 case SE_CAF_BLACKHOLE:
2163 p += BLACKHOLE_sizeW();
2168 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2169 (StgClosure *)bh->blocking_queue =
2170 evacuate((StgClosure *)bh->blocking_queue);
2171 recordMutable((StgMutClosure *)bh);
2172 failed_to_evac = rtsFalse;
2173 p += BLACKHOLE_sizeW();
2177 case THUNK_SELECTOR:
2179 StgSelector *s = (StgSelector *)p;
2180 s->selectee = evacuate(s->selectee);
2181 p += THUNK_SELECTOR_sizeW();
2185 case AP_UPD: // same as PAPs
2187 /* Treat a PAP just like a section of stack, not forgetting to
2188 * evacuate the function pointer too...
2191 StgPAP* pap = (StgPAP *)p;
2193 pap->fun = evacuate(pap->fun);
2194 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2195 p += pap_sizeW(pap);
2200 // nothing to follow
2201 p += arr_words_sizeW((StgArrWords *)p);
2205 // follow everything
2209 evac_gen = 0; // repeatedly mutable
2210 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2211 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2212 (StgClosure *)*p = evacuate((StgClosure *)*p);
2214 evac_gen = saved_evac_gen;
2215 recordMutable((StgMutClosure *)q);
2216 failed_to_evac = rtsFalse; // mutable anyhow.
2220 case MUT_ARR_PTRS_FROZEN:
2221 // follow everything
2225 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2226 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2227 (StgClosure *)*p = evacuate((StgClosure *)*p);
2229 // it's tempting to recordMutable() if failed_to_evac is
2230 // false, but that breaks some assumptions (eg. every
2231 // closure on the mutable list is supposed to have the MUT
2232 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2238 StgTSO *tso = (StgTSO *)p;
2241 evac_gen = saved_evac_gen;
2242 recordMutable((StgMutClosure *)tso);
2243 failed_to_evac = rtsFalse; // mutable anyhow.
2244 p += tso_sizeW(tso);
2249 case RBH: // cf. BLACKHOLE_BQ
2252 nat size, ptrs, nonptrs, vhs;
2254 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2256 StgRBH *rbh = (StgRBH *)p;
2257 (StgClosure *)rbh->blocking_queue =
2258 evacuate((StgClosure *)rbh->blocking_queue);
2259 recordMutable((StgMutClosure *)to);
2260 failed_to_evac = rtsFalse; // mutable anyhow.
2262 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2263 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2264 // ToDo: use size of reverted closure here!
2265 p += BLACKHOLE_sizeW();
2271 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2272 // follow the pointer to the node which is being demanded
2273 (StgClosure *)bf->node =
2274 evacuate((StgClosure *)bf->node);
2275 // follow the link to the rest of the blocking queue
2276 (StgClosure *)bf->link =
2277 evacuate((StgClosure *)bf->link);
2278 if (failed_to_evac) {
2279 failed_to_evac = rtsFalse;
2280 recordMutable((StgMutClosure *)bf);
2283 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2284 bf, info_type((StgClosure *)bf),
2285 bf->node, info_type(bf->node)));
2286 p += sizeofW(StgBlockedFetch);
2294 p += sizeofW(StgFetchMe);
2295 break; // nothing to do in this case
2297 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2299 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2300 (StgClosure *)fmbq->blocking_queue =
2301 evacuate((StgClosure *)fmbq->blocking_queue);
2302 if (failed_to_evac) {
2303 failed_to_evac = rtsFalse;
2304 recordMutable((StgMutClosure *)fmbq);
2307 belch("@@ scavenge: %p (%s) exciting, isn't it",
2308 p, info_type((StgClosure *)p)));
2309 p += sizeofW(StgFetchMeBlockingQueue);
2315 barf("scavenge: unimplemented/strange closure type %d @ %p",
2319 /* If we didn't manage to promote all the objects pointed to by
2320 * the current object, then we have to designate this object as
2321 * mutable (because it contains old-to-new generation pointers).
2323 if (failed_to_evac) {
2324 failed_to_evac = rtsFalse;
2325 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2333 /* -----------------------------------------------------------------------------
2334 Scavenge everything on the mark stack.
2336 This is slightly different from scavenge():
2337 - we don't walk linearly through the objects, so the scavenger
2338 doesn't need to advance the pointer on to the next object.
2339 -------------------------------------------------------------------------- */
2342 scavenge_mark_stack(void)
2348 evac_gen = oldest_gen->no;
2349 saved_evac_gen = evac_gen;
2352 while (!mark_stack_empty()) {
2353 p = pop_mark_stack();
2355 info = get_itbl((StgClosure *)p);
2356 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2359 switch (info->type) {
2362 /* treat MVars specially, because we don't want to evacuate the
2363 * mut_link field in the middle of the closure.
2366 StgMVar *mvar = ((StgMVar *)p);
2368 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2369 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2370 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2371 evac_gen = saved_evac_gen;
2372 failed_to_evac = rtsFalse; // mutable.
2380 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2381 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2391 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2416 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2417 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2418 (StgClosure *)*p = evacuate((StgClosure *)*p);
2424 // don't need to do anything here: the only possible case
2425 // is that we're in a 1-space compacting collector, with
2426 // no "old" generation.
2430 case IND_OLDGEN_PERM:
2431 ((StgIndOldGen *)p)->indirectee =
2432 evacuate(((StgIndOldGen *)p)->indirectee);
2433 if (failed_to_evac) {
2434 recordOldToNewPtrs((StgMutClosure *)p);
2436 failed_to_evac = rtsFalse;
2441 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2442 evac_gen = saved_evac_gen;
2443 failed_to_evac = rtsFalse;
2448 failed_to_evac = rtsFalse;
2452 case SE_CAF_BLACKHOLE:
2460 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2461 (StgClosure *)bh->blocking_queue =
2462 evacuate((StgClosure *)bh->blocking_queue);
2463 failed_to_evac = rtsFalse;
2467 case THUNK_SELECTOR:
2469 StgSelector *s = (StgSelector *)p;
2470 s->selectee = evacuate(s->selectee);
2474 case AP_UPD: // same as PAPs
2476 /* Treat a PAP just like a section of stack, not forgetting to
2477 * evacuate the function pointer too...
2480 StgPAP* pap = (StgPAP *)p;
2482 pap->fun = evacuate(pap->fun);
2483 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2488 // follow everything
2492 evac_gen = 0; // repeatedly mutable
2493 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2494 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2495 (StgClosure *)*p = evacuate((StgClosure *)*p);
2497 evac_gen = saved_evac_gen;
2498 failed_to_evac = rtsFalse; // mutable anyhow.
2502 case MUT_ARR_PTRS_FROZEN:
2503 // follow everything
2507 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2508 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2509 (StgClosure *)*p = evacuate((StgClosure *)*p);
2516 StgTSO *tso = (StgTSO *)p;
2519 evac_gen = saved_evac_gen;
2520 failed_to_evac = rtsFalse;
2525 case RBH: // cf. BLACKHOLE_BQ
2528 nat size, ptrs, nonptrs, vhs;
2530 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2532 StgRBH *rbh = (StgRBH *)p;
2533 (StgClosure *)rbh->blocking_queue =
2534 evacuate((StgClosure *)rbh->blocking_queue);
2535 recordMutable((StgMutClosure *)rbh);
2536 failed_to_evac = rtsFalse; // mutable anyhow.
2538 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2539 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2545 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2546 // follow the pointer to the node which is being demanded
2547 (StgClosure *)bf->node =
2548 evacuate((StgClosure *)bf->node);
2549 // follow the link to the rest of the blocking queue
2550 (StgClosure *)bf->link =
2551 evacuate((StgClosure *)bf->link);
2552 if (failed_to_evac) {
2553 failed_to_evac = rtsFalse;
2554 recordMutable((StgMutClosure *)bf);
2557 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2558 bf, info_type((StgClosure *)bf),
2559 bf->node, info_type(bf->node)));
2567 break; // nothing to do in this case
2569 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2571 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2572 (StgClosure *)fmbq->blocking_queue =
2573 evacuate((StgClosure *)fmbq->blocking_queue);
2574 if (failed_to_evac) {
2575 failed_to_evac = rtsFalse;
2576 recordMutable((StgMutClosure *)fmbq);
2579 belch("@@ scavenge: %p (%s) exciting, isn't it",
2580 p, info_type((StgClosure *)p)));
2586 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2590 if (failed_to_evac) {
2591 failed_to_evac = rtsFalse;
2592 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2595 // mark the next bit to indicate "scavenged"
2596 mark(q+1, Bdescr(q));
2598 } // while (!mark_stack_empty())
2600 // start a new linear scan if the mark stack overflowed at some point
2601 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2602 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2603 mark_stack_overflowed = rtsFalse;
2604 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2605 oldgen_scan = oldgen_scan_bd->start;
2608 if (oldgen_scan_bd) {
2609 // push a new thing on the mark stack
2611 // find a closure that is marked but not scavenged, and start
2613 while (oldgen_scan < oldgen_scan_bd->free
2614 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2618 if (oldgen_scan < oldgen_scan_bd->free) {
2620 // already scavenged?
2621 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2622 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2625 push_mark_stack(oldgen_scan);
2626 // ToDo: bump the linear scan by the actual size of the object
2627 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2631 oldgen_scan_bd = oldgen_scan_bd->link;
2632 if (oldgen_scan_bd != NULL) {
2633 oldgen_scan = oldgen_scan_bd->start;
2639 /* -----------------------------------------------------------------------------
2640 Scavenge one object.
2642 This is used for objects that are temporarily marked as mutable
2643 because they contain old-to-new generation pointers. Only certain
2644 objects can have this property.
2645 -------------------------------------------------------------------------- */
2648 scavenge_one(StgPtr p)
2650 const StgInfoTable *info;
2651 nat saved_evac_gen = evac_gen;
2654 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2655 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2657 info = get_itbl((StgClosure *)p);
2659 switch (info->type) {
2662 case FUN_1_0: // hardly worth specialising these guys
2682 case IND_OLDGEN_PERM:
2686 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2687 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2688 (StgClosure *)*q = evacuate((StgClosure *)*q);
2694 case SE_CAF_BLACKHOLE:
2699 case THUNK_SELECTOR:
2701 StgSelector *s = (StgSelector *)p;
2702 s->selectee = evacuate(s->selectee);
2707 // nothing to follow
2712 // follow everything
2715 evac_gen = 0; // repeatedly mutable
2716 recordMutable((StgMutClosure *)p);
2717 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2718 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2719 (StgClosure *)*p = evacuate((StgClosure *)*p);
2721 evac_gen = saved_evac_gen;
2722 failed_to_evac = rtsFalse;
2726 case MUT_ARR_PTRS_FROZEN:
2728 // follow everything
2731 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2732 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2733 (StgClosure *)*p = evacuate((StgClosure *)*p);
2740 StgTSO *tso = (StgTSO *)p;
2742 evac_gen = 0; // repeatedly mutable
2744 recordMutable((StgMutClosure *)tso);
2745 evac_gen = saved_evac_gen;
2746 failed_to_evac = rtsFalse;
2753 StgPAP* pap = (StgPAP *)p;
2754 pap->fun = evacuate(pap->fun);
2755 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2760 // This might happen if for instance a MUT_CONS was pointing to a
2761 // THUNK which has since been updated. The IND_OLDGEN will
2762 // be on the mutable list anyway, so we don't need to do anything
2767 barf("scavenge_one: strange object %d", (int)(info->type));
2770 no_luck = failed_to_evac;
2771 failed_to_evac = rtsFalse;
2775 /* -----------------------------------------------------------------------------
2776 Scavenging mutable lists.
2778 We treat the mutable list of each generation > N (i.e. all the
2779 generations older than the one being collected) as roots. We also
2780 remove non-mutable objects from the mutable list at this point.
2781 -------------------------------------------------------------------------- */
2784 scavenge_mut_once_list(generation *gen)
2786 const StgInfoTable *info;
2787 StgMutClosure *p, *next, *new_list;
2789 p = gen->mut_once_list;
2790 new_list = END_MUT_LIST;
2794 failed_to_evac = rtsFalse;
2796 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2798 // make sure the info pointer is into text space
2799 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2800 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2804 if (info->type==RBH)
2805 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2807 switch(info->type) {
2810 case IND_OLDGEN_PERM:
2812 /* Try to pull the indirectee into this generation, so we can
2813 * remove the indirection from the mutable list.
2815 ((StgIndOldGen *)p)->indirectee =
2816 evacuate(((StgIndOldGen *)p)->indirectee);
2818 #if 0 && defined(DEBUG)
2819 if (RtsFlags.DebugFlags.gc)
2820 /* Debugging code to print out the size of the thing we just
2824 StgPtr start = gen->steps[0].scan;
2825 bdescr *start_bd = gen->steps[0].scan_bd;
2827 scavenge(&gen->steps[0]);
2828 if (start_bd != gen->steps[0].scan_bd) {
2829 size += (P_)BLOCK_ROUND_UP(start) - start;
2830 start_bd = start_bd->link;
2831 while (start_bd != gen->steps[0].scan_bd) {
2832 size += BLOCK_SIZE_W;
2833 start_bd = start_bd->link;
2835 size += gen->steps[0].scan -
2836 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2838 size = gen->steps[0].scan - start;
2840 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2844 /* failed_to_evac might happen if we've got more than two
2845 * generations, we're collecting only generation 0, the
2846 * indirection resides in generation 2 and the indirectee is
2849 if (failed_to_evac) {
2850 failed_to_evac = rtsFalse;
2851 p->mut_link = new_list;
2854 /* the mut_link field of an IND_STATIC is overloaded as the
2855 * static link field too (it just so happens that we don't need
2856 * both at the same time), so we need to NULL it out when
2857 * removing this object from the mutable list because the static
2858 * link fields are all assumed to be NULL before doing a major
2866 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2867 * it from the mutable list if possible by promoting whatever it
2870 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2871 /* didn't manage to promote everything, so put the
2872 * MUT_CONS back on the list.
2874 p->mut_link = new_list;
2880 // shouldn't have anything else on the mutables list
2881 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2885 gen->mut_once_list = new_list;
2890 scavenge_mutable_list(generation *gen)
2892 const StgInfoTable *info;
2893 StgMutClosure *p, *next;
2895 p = gen->saved_mut_list;
2899 failed_to_evac = rtsFalse;
2901 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2903 // make sure the info pointer is into text space
2904 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2905 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2909 if (info->type==RBH)
2910 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2912 switch(info->type) {
2915 // follow everything
2916 p->mut_link = gen->mut_list;
2921 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2922 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2923 (StgClosure *)*q = evacuate((StgClosure *)*q);
2928 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2929 case MUT_ARR_PTRS_FROZEN:
2934 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2935 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2936 (StgClosure *)*q = evacuate((StgClosure *)*q);
2940 if (failed_to_evac) {
2941 failed_to_evac = rtsFalse;
2942 mkMutCons((StgClosure *)p, gen);
2948 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2949 p->mut_link = gen->mut_list;
2955 StgMVar *mvar = (StgMVar *)p;
2956 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2957 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2958 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2959 p->mut_link = gen->mut_list;
2966 StgTSO *tso = (StgTSO *)p;
2970 /* Don't take this TSO off the mutable list - it might still
2971 * point to some younger objects (because we set evac_gen to 0
2974 tso->mut_link = gen->mut_list;
2975 gen->mut_list = (StgMutClosure *)tso;
2981 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2982 (StgClosure *)bh->blocking_queue =
2983 evacuate((StgClosure *)bh->blocking_queue);
2984 p->mut_link = gen->mut_list;
2989 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2992 case IND_OLDGEN_PERM:
2993 /* Try to pull the indirectee into this generation, so we can
2994 * remove the indirection from the mutable list.
2997 ((StgIndOldGen *)p)->indirectee =
2998 evacuate(((StgIndOldGen *)p)->indirectee);
3001 if (failed_to_evac) {
3002 failed_to_evac = rtsFalse;
3003 p->mut_link = gen->mut_once_list;
3004 gen->mut_once_list = p;
3011 // HWL: check whether all of these are necessary
3013 case RBH: // cf. BLACKHOLE_BQ
3015 // nat size, ptrs, nonptrs, vhs;
3017 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3018 StgRBH *rbh = (StgRBH *)p;
3019 (StgClosure *)rbh->blocking_queue =
3020 evacuate((StgClosure *)rbh->blocking_queue);
3021 if (failed_to_evac) {
3022 failed_to_evac = rtsFalse;
3023 recordMutable((StgMutClosure *)rbh);
3025 // ToDo: use size of reverted closure here!
3026 p += BLACKHOLE_sizeW();
3032 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3033 // follow the pointer to the node which is being demanded
3034 (StgClosure *)bf->node =
3035 evacuate((StgClosure *)bf->node);
3036 // follow the link to the rest of the blocking queue
3037 (StgClosure *)bf->link =
3038 evacuate((StgClosure *)bf->link);
3039 if (failed_to_evac) {
3040 failed_to_evac = rtsFalse;
3041 recordMutable((StgMutClosure *)bf);
3043 p += sizeofW(StgBlockedFetch);
3049 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3052 p += sizeofW(StgFetchMe);
3053 break; // nothing to do in this case
3055 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3057 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3058 (StgClosure *)fmbq->blocking_queue =
3059 evacuate((StgClosure *)fmbq->blocking_queue);
3060 if (failed_to_evac) {
3061 failed_to_evac = rtsFalse;
3062 recordMutable((StgMutClosure *)fmbq);
3064 p += sizeofW(StgFetchMeBlockingQueue);
3070 // shouldn't have anything else on the mutables list
3071 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3078 scavenge_static(void)
3080 StgClosure* p = static_objects;
3081 const StgInfoTable *info;
3083 /* Always evacuate straight to the oldest generation for static
3085 evac_gen = oldest_gen->no;
3087 /* keep going until we've scavenged all the objects on the linked
3089 while (p != END_OF_STATIC_LIST) {
3093 if (info->type==RBH)
3094 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3096 // make sure the info pointer is into text space
3097 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3098 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3100 /* Take this object *off* the static_objects list,
3101 * and put it on the scavenged_static_objects list.
3103 static_objects = STATIC_LINK(info,p);
3104 STATIC_LINK(info,p) = scavenged_static_objects;
3105 scavenged_static_objects = p;
3107 switch (info -> type) {
3111 StgInd *ind = (StgInd *)p;
3112 ind->indirectee = evacuate(ind->indirectee);
3114 /* might fail to evacuate it, in which case we have to pop it
3115 * back on the mutable list (and take it off the
3116 * scavenged_static list because the static link and mut link
3117 * pointers are one and the same).
3119 if (failed_to_evac) {
3120 failed_to_evac = rtsFalse;
3121 scavenged_static_objects = IND_STATIC_LINK(p);
3122 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3123 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3137 next = (P_)p->payload + info->layout.payload.ptrs;
3138 // evacuate the pointers
3139 for (q = (P_)p->payload; q < next; q++) {
3140 (StgClosure *)*q = evacuate((StgClosure *)*q);
3146 barf("scavenge_static: strange closure %d", (int)(info->type));
3149 ASSERT(failed_to_evac == rtsFalse);
3151 /* get the next static object from the list. Remember, there might
3152 * be more stuff on this list now that we've done some evacuating!
3153 * (static_objects is a global)
3159 /* -----------------------------------------------------------------------------
3160 scavenge_stack walks over a section of stack and evacuates all the
3161 objects pointed to by it. We can use the same code for walking
3162 PAPs, since these are just sections of copied stack.
3163 -------------------------------------------------------------------------- */
3166 scavenge_stack(StgPtr p, StgPtr stack_end)
3169 const StgInfoTable* info;
3172 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3175 * Each time around this loop, we are looking at a chunk of stack
3176 * that starts with either a pending argument section or an
3177 * activation record.
3180 while (p < stack_end) {
3183 // If we've got a tag, skip over that many words on the stack
3184 if (IS_ARG_TAG((W_)q)) {
3189 /* Is q a pointer to a closure?
3191 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3193 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3194 ASSERT(closure_STATIC((StgClosure *)q));
3196 // otherwise, must be a pointer into the allocation space.
3199 (StgClosure *)*p = evacuate((StgClosure *)q);
3205 * Otherwise, q must be the info pointer of an activation
3206 * record. All activation records have 'bitmap' style layout
3209 info = get_itbl((StgClosure *)p);
3211 switch (info->type) {
3213 // Dynamic bitmap: the mask is stored on the stack
3215 bitmap = ((StgRetDyn *)p)->liveness;
3216 p = (P_)&((StgRetDyn *)p)->payload[0];
3219 // probably a slow-entry point return address:
3227 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3228 old_p, p, old_p+1));
3230 p++; // what if FHS!=1 !? -- HWL
3235 /* Specialised code for update frames, since they're so common.
3236 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3237 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3241 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3243 p += sizeofW(StgUpdateFrame);
3246 frame->updatee = evacuate(frame->updatee);
3248 #else // specialised code for update frames, not sure if it's worth it.
3250 nat type = get_itbl(frame->updatee)->type;
3252 if (type == EVACUATED) {
3253 frame->updatee = evacuate(frame->updatee);
3256 bdescr *bd = Bdescr((P_)frame->updatee);
3258 if (bd->gen_no > N) {
3259 if (bd->gen_no < evac_gen) {
3260 failed_to_evac = rtsTrue;
3265 // Don't promote blackholes
3267 if (!(stp->gen_no == 0 &&
3269 stp->no == stp->gen->n_steps-1)) {
3276 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3277 sizeofW(StgHeader), stp);
3278 frame->updatee = to;
3281 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3282 frame->updatee = to;
3283 recordMutable((StgMutClosure *)to);
3286 /* will never be SE_{,CAF_}BLACKHOLE, since we
3287 don't push an update frame for single-entry thunks. KSW 1999-01. */
3288 barf("scavenge_stack: UPDATE_FRAME updatee");
3294 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3301 bitmap = info->layout.bitmap;
3303 // this assumes that the payload starts immediately after the info-ptr
3305 while (bitmap != 0) {
3306 if ((bitmap & 1) == 0) {
3307 (StgClosure *)*p = evacuate((StgClosure *)*p);
3310 bitmap = bitmap >> 1;
3317 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3322 StgLargeBitmap *large_bitmap;
3325 large_bitmap = info->layout.large_bitmap;
3328 for (i=0; i<large_bitmap->size; i++) {
3329 bitmap = large_bitmap->bitmap[i];
3330 q = p + BITS_IN(W_);
3331 while (bitmap != 0) {
3332 if ((bitmap & 1) == 0) {
3333 (StgClosure *)*p = evacuate((StgClosure *)*p);
3336 bitmap = bitmap >> 1;
3338 if (i+1 < large_bitmap->size) {
3340 (StgClosure *)*p = evacuate((StgClosure *)*p);
3346 // and don't forget to follow the SRT
3351 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3356 /*-----------------------------------------------------------------------------
3357 scavenge the large object list.
3359 evac_gen set by caller; similar games played with evac_gen as with
3360 scavenge() - see comment at the top of scavenge(). Most large
3361 objects are (repeatedly) mutable, so most of the time evac_gen will
3363 --------------------------------------------------------------------------- */
3366 scavenge_large(step *stp)
3371 bd = stp->new_large_objects;
3373 for (; bd != NULL; bd = stp->new_large_objects) {
3375 /* take this object *off* the large objects list and put it on
3376 * the scavenged large objects list. This is so that we can
3377 * treat new_large_objects as a stack and push new objects on
3378 * the front when evacuating.
3380 stp->new_large_objects = bd->link;
3381 dbl_link_onto(bd, &stp->scavenged_large_objects);
3383 // update the block count in this step.
3384 stp->n_scavenged_large_blocks += bd->blocks;
3387 if (scavenge_one(p)) {
3388 mkMutCons((StgClosure *)p, stp->gen);
3393 /* -----------------------------------------------------------------------------
3394 Initialising the static object & mutable lists
3395 -------------------------------------------------------------------------- */
3398 zero_static_object_list(StgClosure* first_static)
3402 const StgInfoTable *info;
3404 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3406 link = STATIC_LINK(info, p);
3407 STATIC_LINK(info,p) = NULL;
3411 /* This function is only needed because we share the mutable link
3412 * field with the static link field in an IND_STATIC, so we have to
3413 * zero the mut_link field before doing a major GC, which needs the
3414 * static link field.
3416 * It doesn't do any harm to zero all the mutable link fields on the
3421 zero_mutable_list( StgMutClosure *first )
3423 StgMutClosure *next, *c;
3425 for (c = first; c != END_MUT_LIST; c = next) {
3431 /* -----------------------------------------------------------------------------
3433 -------------------------------------------------------------------------- */
3440 for (c = (StgIndStatic *)caf_list; c != NULL;
3441 c = (StgIndStatic *)c->static_link)
3443 c->header.info = c->saved_info;
3444 c->saved_info = NULL;
3445 // could, but not necessary: c->static_link = NULL;
3451 scavengeCAFs( void )
3456 for (c = (StgIndStatic *)caf_list; c != NULL;
3457 c = (StgIndStatic *)c->static_link)
3459 c->indirectee = evacuate(c->indirectee);
3463 /* -----------------------------------------------------------------------------
3464 Sanity code for CAF garbage collection.
3466 With DEBUG turned on, we manage a CAF list in addition to the SRT
3467 mechanism. After GC, we run down the CAF list and blackhole any
3468 CAFs which have been garbage collected. This means we get an error
3469 whenever the program tries to enter a garbage collected CAF.
3471 Any garbage collected CAFs are taken off the CAF list at the same
3473 -------------------------------------------------------------------------- */
3475 #if 0 && defined(DEBUG)
3482 const StgInfoTable *info;
3493 ASSERT(info->type == IND_STATIC);
3495 if (STATIC_LINK(info,p) == NULL) {
3496 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3498 SET_INFO(p,&stg_BLACKHOLE_info);
3499 p = STATIC_LINK2(info,p);
3503 pp = &STATIC_LINK2(info,p);
3510 // belch("%d CAFs live", i);
3515 /* -----------------------------------------------------------------------------
3518 Whenever a thread returns to the scheduler after possibly doing
3519 some work, we have to run down the stack and black-hole all the
3520 closures referred to by update frames.
3521 -------------------------------------------------------------------------- */
3524 threadLazyBlackHole(StgTSO *tso)
3526 StgUpdateFrame *update_frame;
3527 StgBlockingQueue *bh;
3530 stack_end = &tso->stack[tso->stack_size];
3531 update_frame = tso->su;
3534 switch (get_itbl(update_frame)->type) {
3537 update_frame = ((StgCatchFrame *)update_frame)->link;
3541 bh = (StgBlockingQueue *)update_frame->updatee;
3543 /* if the thunk is already blackholed, it means we've also
3544 * already blackholed the rest of the thunks on this stack,
3545 * so we can stop early.
3547 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3548 * don't interfere with this optimisation.
3550 if (bh->header.info == &stg_BLACKHOLE_info) {
3554 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3555 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3556 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3557 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3559 SET_INFO(bh,&stg_BLACKHOLE_info);
3562 update_frame = update_frame->link;
3566 update_frame = ((StgSeqFrame *)update_frame)->link;
3572 barf("threadPaused");
3578 /* -----------------------------------------------------------------------------
3581 * Code largely pinched from old RTS, then hacked to bits. We also do
3582 * lazy black holing here.
3584 * -------------------------------------------------------------------------- */
3587 threadSqueezeStack(StgTSO *tso)
3589 lnat displacement = 0;
3590 StgUpdateFrame *frame;
3591 StgUpdateFrame *next_frame; // Temporally next
3592 StgUpdateFrame *prev_frame; // Temporally previous
3594 rtsBool prev_was_update_frame;
3596 StgUpdateFrame *top_frame;
3597 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3599 void printObj( StgClosure *obj ); // from Printer.c
3601 top_frame = tso->su;
3604 bottom = &(tso->stack[tso->stack_size]);
3607 /* There must be at least one frame, namely the STOP_FRAME.
3609 ASSERT((P_)frame < bottom);
3611 /* Walk down the stack, reversing the links between frames so that
3612 * we can walk back up as we squeeze from the bottom. Note that
3613 * next_frame and prev_frame refer to next and previous as they were
3614 * added to the stack, rather than the way we see them in this
3615 * walk. (It makes the next loop less confusing.)
3617 * Stop if we find an update frame pointing to a black hole
3618 * (see comment in threadLazyBlackHole()).
3622 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3623 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3624 prev_frame = frame->link;
3625 frame->link = next_frame;
3630 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3631 printObj((StgClosure *)prev_frame);
3632 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3635 switch (get_itbl(frame)->type) {
3638 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3651 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3653 printObj((StgClosure *)prev_frame);
3656 if (get_itbl(frame)->type == UPDATE_FRAME
3657 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3662 /* Now, we're at the bottom. Frame points to the lowest update
3663 * frame on the stack, and its link actually points to the frame
3664 * above. We have to walk back up the stack, squeezing out empty
3665 * update frames and turning the pointers back around on the way
3668 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3669 * we never want to eliminate it anyway. Just walk one step up
3670 * before starting to squeeze. When you get to the topmost frame,
3671 * remember that there are still some words above it that might have
3678 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3681 * Loop through all of the frames (everything except the very
3682 * bottom). Things are complicated by the fact that we have
3683 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3684 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3686 while (frame != NULL) {
3688 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3689 rtsBool is_update_frame;
3691 next_frame = frame->link;
3692 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3695 * 1. both the previous and current frame are update frames
3696 * 2. the current frame is empty
3698 if (prev_was_update_frame && is_update_frame &&
3699 (P_)prev_frame == frame_bottom + displacement) {
3701 // Now squeeze out the current frame
3702 StgClosure *updatee_keep = prev_frame->updatee;
3703 StgClosure *updatee_bypass = frame->updatee;
3706 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3710 /* Deal with blocking queues. If both updatees have blocked
3711 * threads, then we should merge the queues into the update
3712 * frame that we're keeping.
3714 * Alternatively, we could just wake them up: they'll just go
3715 * straight to sleep on the proper blackhole! This is less code
3716 * and probably less bug prone, although it's probably much
3719 #if 0 // do it properly...
3720 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3721 # error Unimplemented lazy BH warning. (KSW 1999-01)
3723 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3724 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3726 // Sigh. It has one. Don't lose those threads!
3727 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3728 // Urgh. Two queues. Merge them.
3729 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3731 while (keep_tso->link != END_TSO_QUEUE) {
3732 keep_tso = keep_tso->link;
3734 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3737 // For simplicity, just swap the BQ for the BH
3738 P_ temp = updatee_keep;
3740 updatee_keep = updatee_bypass;
3741 updatee_bypass = temp;
3743 // Record the swap in the kept frame (below)
3744 prev_frame->updatee = updatee_keep;
3749 TICK_UPD_SQUEEZED();
3750 /* wasn't there something about update squeezing and ticky to be
3751 * sorted out? oh yes: we aren't counting each enter properly
3752 * in this case. See the log somewhere. KSW 1999-04-21
3754 * Check two things: that the two update frames don't point to
3755 * the same object, and that the updatee_bypass isn't already an
3756 * indirection. Both of these cases only happen when we're in a
3757 * block hole-style loop (and there are multiple update frames
3758 * on the stack pointing to the same closure), but they can both
3759 * screw us up if we don't check.
3761 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3762 // this wakes the threads up
3763 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3766 sp = (P_)frame - 1; // sp = stuff to slide
3767 displacement += sizeofW(StgUpdateFrame);
3770 // No squeeze for this frame
3771 sp = frame_bottom - 1; // Keep the current frame
3773 /* Do lazy black-holing.
3775 if (is_update_frame) {
3776 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3777 if (bh->header.info != &stg_BLACKHOLE_info &&
3778 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3779 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3780 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3781 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3784 /* zero out the slop so that the sanity checker can tell
3785 * where the next closure is.
3788 StgInfoTable *info = get_itbl(bh);
3789 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3790 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3791 * info is used for a different purpose, and it's exactly the
3792 * same size as a BLACKHOLE in any case.
3794 if (info->type != THUNK_SELECTOR) {
3795 for (i = np; i < np + nw; i++) {
3796 ((StgClosure *)bh)->payload[i] = 0;
3801 SET_INFO(bh,&stg_BLACKHOLE_info);
3805 // Fix the link in the current frame (should point to the frame below)
3806 frame->link = prev_frame;
3807 prev_was_update_frame = is_update_frame;
3810 // Now slide all words from sp up to the next frame
3812 if (displacement > 0) {
3813 P_ next_frame_bottom;
3815 if (next_frame != NULL)
3816 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3818 next_frame_bottom = tso->sp - 1;
3822 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3826 while (sp >= next_frame_bottom) {
3827 sp[displacement] = *sp;
3831 (P_)prev_frame = (P_)frame + displacement;
3835 tso->sp += displacement;
3836 tso->su = prev_frame;
3839 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3840 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3845 /* -----------------------------------------------------------------------------
3848 * We have to prepare for GC - this means doing lazy black holing
3849 * here. We also take the opportunity to do stack squeezing if it's
3851 * -------------------------------------------------------------------------- */
3853 threadPaused(StgTSO *tso)
3855 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3856 threadSqueezeStack(tso); // does black holing too
3858 threadLazyBlackHole(tso);
3861 /* -----------------------------------------------------------------------------
3863 * -------------------------------------------------------------------------- */
3867 printMutOnceList(generation *gen)
3869 StgMutClosure *p, *next;
3871 p = gen->mut_once_list;
3874 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3875 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3876 fprintf(stderr, "%p (%s), ",
3877 p, info_type((StgClosure *)p));
3879 fputc('\n', stderr);
3883 printMutableList(generation *gen)
3885 StgMutClosure *p, *next;
3890 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3891 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3892 fprintf(stderr, "%p (%s), ",
3893 p, info_type((StgClosure *)p));
3895 fputc('\n', stderr);
3898 static inline rtsBool
3899 maybeLarge(StgClosure *closure)
3901 StgInfoTable *info = get_itbl(closure);
3903 /* closure types that may be found on the new_large_objects list;
3904 see scavenge_large */
3905 return (info->type == MUT_ARR_PTRS ||
3906 info->type == MUT_ARR_PTRS_FROZEN ||
3907 info->type == TSO ||
3908 info->type == ARR_WORDS);