1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.117 2001/08/08 13:45:02 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && oldest_gen->steps[0].is_compacted) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* run through all the generations/steps and tidy up
614 copied = new_blocks * BLOCK_SIZE_W;
615 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
618 generations[g].collections++; // for stats
621 for (s = 0; s < generations[g].n_steps; s++) {
623 stp = &generations[g].steps[s];
625 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
626 // stats information: how much we copied
628 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
633 // for generations we collected...
636 // rough calculation of garbage collected, for stats output
637 if (stp->is_compacted) {
638 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
640 collected += stp->n_blocks * BLOCK_SIZE_W;
643 /* free old memory and shift to-space into from-space for all
644 * the collected steps (except the allocation area). These
645 * freed blocks will probaby be quickly recycled.
647 if (!(g == 0 && s == 0)) {
648 if (stp->is_compacted) {
649 // for a compacted step, just shift the new to-space
650 // onto the front of the now-compacted existing blocks.
651 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
652 bd->flags &= ~BF_EVACUATED; // now from-space
654 // tack the new blocks on the end of the existing blocks
655 if (stp->blocks == NULL) {
656 stp->blocks = stp->to_blocks;
658 for (bd = stp->blocks; bd != NULL; bd = next) {
661 bd->link = stp->to_blocks;
665 // add the new blocks to the block tally
666 stp->n_blocks += stp->n_to_blocks;
668 freeChain(stp->blocks);
669 stp->blocks = stp->to_blocks;
670 stp->n_blocks = stp->n_to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
672 bd->flags &= ~BF_EVACUATED; // now from-space
675 stp->to_blocks = NULL;
676 stp->n_to_blocks = 0;
679 /* LARGE OBJECTS. The current live large objects are chained on
680 * scavenged_large, having been moved during garbage
681 * collection from large_objects. Any objects left on
682 * large_objects list are therefore dead, so we free them here.
684 for (bd = stp->large_objects; bd != NULL; bd = next) {
690 // update the count of blocks used by large objects
691 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED;
694 stp->large_objects = stp->scavenged_large_objects;
695 stp->n_large_blocks = stp->n_scavenged_large_blocks;
698 // for older generations...
700 /* For older generations, we need to append the
701 * scavenged_large_object list (i.e. large objects that have been
702 * promoted during this GC) to the large_object list for that step.
704 for (bd = stp->scavenged_large_objects; bd; bd = next) {
706 bd->flags &= ~BF_EVACUATED;
707 dbl_link_onto(bd, &stp->large_objects);
710 // add the new blocks we promoted during this GC
711 stp->n_blocks += stp->n_to_blocks;
712 stp->n_large_blocks += stp->n_scavenged_large_blocks;
717 /* Reset the sizes of the older generations when we do a major
720 * CURRENT STRATEGY: make all generations except zero the same size.
721 * We have to stay within the maximum heap size, and leave a certain
722 * percentage of the maximum heap size available to allocate into.
724 if (major_gc && RtsFlags.GcFlags.generations > 1) {
725 nat live, size, min_alloc;
726 nat max = RtsFlags.GcFlags.maxHeapSize;
727 nat gens = RtsFlags.GcFlags.generations;
729 // live in the oldest generations
730 live = oldest_gen->steps[0].n_blocks +
731 oldest_gen->steps[0].n_large_blocks;
733 // default max size for all generations except zero
734 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
737 // minimum size for generation zero
738 min_alloc = (RtsFlags.GcFlags.pcFreeHeap * max) / 200;
740 // if we're going to go over the maximum heap size, reduce the
741 // size of the generations accordingly. The calculation is
742 // different if compaction is turned on, because we don't need
743 // to double the space required to collect the old generation.
745 if (RtsFlags.GcFlags.compact) {
746 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
747 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
750 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
751 size = (max - min_alloc) / ((gens - 1) * 2);
761 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
762 min_alloc, size, max);
765 for (g = 0; g < gens; g++) {
766 generations[g].max_blocks = size;
769 // Auto-enable compaction when the residency reaches a
770 // certain percentage of the maximum heap size (default: 30%).
771 if (RtsFlags.GcFlags.compact &&
773 oldest_gen->steps[0].n_blocks >
774 (RtsFlags.GcFlags.compactThreshold * max) / 100) {
775 oldest_gen->steps[0].is_compacted = 1;
776 // fprintf(stderr,"compaction: on\n", live);
778 oldest_gen->steps[0].is_compacted = 0;
779 // fprintf(stderr,"compaction: off\n", live);
783 // Guess the amount of live data for stats.
786 /* Free the small objects allocated via allocate(), since this will
787 * all have been copied into G0S1 now.
789 if (small_alloc_list != NULL) {
790 freeChain(small_alloc_list);
792 small_alloc_list = NULL;
796 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
798 // Start a new pinned_object_block
799 pinned_object_block = NULL;
801 /* Free the mark stack.
803 if (mark_stack_bdescr != NULL) {
804 freeGroup(mark_stack_bdescr);
809 for (g = 0; g <= N; g++) {
810 for (s = 0; s < generations[g].n_steps; s++) {
811 stp = &generations[g].steps[s];
812 if (stp->is_compacted && stp->bitmap != NULL) {
813 freeGroup(stp->bitmap);
818 /* Two-space collector:
819 * Free the old to-space, and estimate the amount of live data.
821 if (RtsFlags.GcFlags.generations == 1) {
824 if (old_to_blocks != NULL) {
825 freeChain(old_to_blocks);
827 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
828 bd->flags = 0; // now from-space
831 /* For a two-space collector, we need to resize the nursery. */
833 /* set up a new nursery. Allocate a nursery size based on a
834 * function of the amount of live data (by default a factor of 2)
835 * Use the blocks from the old nursery if possible, freeing up any
838 * If we get near the maximum heap size, then adjust our nursery
839 * size accordingly. If the nursery is the same size as the live
840 * data (L), then we need 3L bytes. We can reduce the size of the
841 * nursery to bring the required memory down near 2L bytes.
843 * A normal 2-space collector would need 4L bytes to give the same
844 * performance we get from 3L bytes, reducing to the same
845 * performance at 2L bytes.
847 blocks = g0s0->n_to_blocks;
849 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
850 RtsFlags.GcFlags.maxHeapSize ) {
851 long adjusted_blocks; // signed on purpose
854 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
855 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
856 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
857 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
860 blocks = adjusted_blocks;
863 blocks *= RtsFlags.GcFlags.oldGenFactor;
864 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
865 blocks = RtsFlags.GcFlags.minAllocAreaSize;
868 resizeNursery(blocks);
871 /* Generational collector:
872 * If the user has given us a suggested heap size, adjust our
873 * allocation area to make best use of the memory available.
876 if (RtsFlags.GcFlags.heapSizeSuggestion) {
878 nat needed = calcNeeded(); // approx blocks needed at next GC
880 /* Guess how much will be live in generation 0 step 0 next time.
881 * A good approximation is obtained by finding the
882 * percentage of g0s0 that was live at the last minor GC.
885 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
888 /* Estimate a size for the allocation area based on the
889 * information available. We might end up going slightly under
890 * or over the suggested heap size, but we should be pretty
893 * Formula: suggested - needed
894 * ----------------------------
895 * 1 + g0s0_pcnt_kept/100
897 * where 'needed' is the amount of memory needed at the next
898 * collection for collecting all steps except g0s0.
901 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
902 (100 + (long)g0s0_pcnt_kept);
904 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
905 blocks = RtsFlags.GcFlags.minAllocAreaSize;
908 resizeNursery((nat)blocks);
912 // mark the garbage collected CAFs as dead
913 #if 0 && defined(DEBUG) // doesn't work at the moment
914 if (major_gc) { gcCAFs(); }
917 // zero the scavenged static object list
919 zero_static_object_list(scavenged_static_objects);
925 // start any pending finalizers
926 scheduleFinalizers(old_weak_ptr_list);
928 // send exceptions to any threads which were about to die
929 resurrectThreads(resurrected_threads);
931 // Update the stable pointer hash table.
932 updateStablePtrTable(major_gc);
934 // check sanity after GC
935 IF_DEBUG(sanity, checkSanity());
937 // extra GC trace info
938 IF_DEBUG(gc, statDescribeGens());
941 // symbol-table based profiling
942 /* heapCensus(to_blocks); */ /* ToDo */
945 // restore enclosing cost centre
951 // check for memory leaks if sanity checking is on
952 IF_DEBUG(sanity, memInventory());
954 #ifdef RTS_GTK_FRONTPANEL
955 if (RtsFlags.GcFlags.frontpanel) {
956 updateFrontPanelAfterGC( N, live );
960 // ok, GC over: tell the stats department what happened.
961 stat_endGC(allocated, collected, live, copied, N);
967 /* -----------------------------------------------------------------------------
970 traverse_weak_ptr_list is called possibly many times during garbage
971 collection. It returns a flag indicating whether it did any work
972 (i.e. called evacuate on any live pointers).
974 Invariant: traverse_weak_ptr_list is called when the heap is in an
975 idempotent state. That means that there are no pending
976 evacuate/scavenge operations. This invariant helps the weak
977 pointer code decide which weak pointers are dead - if there are no
978 new live weak pointers, then all the currently unreachable ones are
981 For generational GC: we just don't try to finalize weak pointers in
982 older generations than the one we're collecting. This could
983 probably be optimised by keeping per-generation lists of weak
984 pointers, but for a few weak pointers this scheme will work.
985 -------------------------------------------------------------------------- */
988 traverse_weak_ptr_list(void)
990 StgWeak *w, **last_w, *next_w;
992 rtsBool flag = rtsFalse;
994 if (weak_done) { return rtsFalse; }
996 /* doesn't matter where we evacuate values/finalizers to, since
997 * these pointers are treated as roots (iff the keys are alive).
1001 last_w = &old_weak_ptr_list;
1002 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1004 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1005 * called on a live weak pointer object. Just remove it.
1007 if (w->header.info == &stg_DEAD_WEAK_info) {
1008 next_w = ((StgDeadWeak *)w)->link;
1013 ASSERT(get_itbl(w)->type == WEAK);
1015 /* Now, check whether the key is reachable.
1017 new = isAlive(w->key);
1020 // evacuate the value and finalizer
1021 w->value = evacuate(w->value);
1022 w->finalizer = evacuate(w->finalizer);
1023 // remove this weak ptr from the old_weak_ptr list
1025 // and put it on the new weak ptr list
1027 w->link = weak_ptr_list;
1030 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1034 last_w = &(w->link);
1040 /* Now deal with the all_threads list, which behaves somewhat like
1041 * the weak ptr list. If we discover any threads that are about to
1042 * become garbage, we wake them up and administer an exception.
1045 StgTSO *t, *tmp, *next, **prev;
1047 prev = &old_all_threads;
1048 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1050 (StgClosure *)tmp = isAlive((StgClosure *)t);
1056 ASSERT(get_itbl(t)->type == TSO);
1057 switch (t->what_next) {
1058 case ThreadRelocated:
1063 case ThreadComplete:
1064 // finshed or died. The thread might still be alive, but we
1065 // don't keep it on the all_threads list. Don't forget to
1066 // stub out its global_link field.
1067 next = t->global_link;
1068 t->global_link = END_TSO_QUEUE;
1076 // not alive (yet): leave this thread on the old_all_threads list.
1077 prev = &(t->global_link);
1078 next = t->global_link;
1081 // alive: move this thread onto the all_threads list.
1082 next = t->global_link;
1083 t->global_link = all_threads;
1090 /* If we didn't make any changes, then we can go round and kill all
1091 * the dead weak pointers. The old_weak_ptr list is used as a list
1092 * of pending finalizers later on.
1094 if (flag == rtsFalse) {
1095 for (w = old_weak_ptr_list; w; w = w->link) {
1096 w->finalizer = evacuate(w->finalizer);
1099 /* And resurrect any threads which were about to become garbage.
1102 StgTSO *t, *tmp, *next;
1103 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1104 next = t->global_link;
1105 (StgClosure *)tmp = evacuate((StgClosure *)t);
1106 tmp->global_link = resurrected_threads;
1107 resurrected_threads = tmp;
1111 weak_done = rtsTrue;
1117 /* -----------------------------------------------------------------------------
1118 After GC, the live weak pointer list may have forwarding pointers
1119 on it, because a weak pointer object was evacuated after being
1120 moved to the live weak pointer list. We remove those forwarding
1123 Also, we don't consider weak pointer objects to be reachable, but
1124 we must nevertheless consider them to be "live" and retain them.
1125 Therefore any weak pointer objects which haven't as yet been
1126 evacuated need to be evacuated now.
1127 -------------------------------------------------------------------------- */
1131 mark_weak_ptr_list ( StgWeak **list )
1133 StgWeak *w, **last_w;
1136 for (w = *list; w; w = w->link) {
1137 (StgClosure *)w = evacuate((StgClosure *)w);
1139 last_w = &(w->link);
1143 /* -----------------------------------------------------------------------------
1144 isAlive determines whether the given closure is still alive (after
1145 a garbage collection) or not. It returns the new address of the
1146 closure if it is alive, or NULL otherwise.
1148 NOTE: Use it before compaction only!
1149 -------------------------------------------------------------------------- */
1153 isAlive(StgClosure *p)
1155 const StgInfoTable *info;
1162 /* ToDo: for static closures, check the static link field.
1163 * Problem here is that we sometimes don't set the link field, eg.
1164 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1169 // ignore closures in generations that we're not collecting.
1170 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1173 // large objects have an evacuated flag
1174 if (bd->flags & BF_LARGE) {
1175 if (bd->flags & BF_EVACUATED) {
1181 // check the mark bit for compacted steps
1182 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1186 switch (info->type) {
1191 case IND_OLDGEN: // rely on compatible layout with StgInd
1192 case IND_OLDGEN_PERM:
1193 // follow indirections
1194 p = ((StgInd *)p)->indirectee;
1199 return ((StgEvacuated *)p)->evacuee;
1202 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1203 p = (StgClosure *)((StgTSO *)p)->link;
1215 mark_root(StgClosure **root)
1217 *root = evacuate(*root);
1223 bdescr *bd = allocBlock();
1224 bd->gen_no = stp->gen_no;
1227 if (stp->gen_no <= N) {
1228 bd->flags = BF_EVACUATED;
1233 stp->hp_bd->free = stp->hp;
1234 stp->hp_bd->link = bd;
1235 stp->hp = bd->start;
1236 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1243 static __inline__ void
1244 upd_evacuee(StgClosure *p, StgClosure *dest)
1246 p->header.info = &stg_EVACUATED_info;
1247 ((StgEvacuated *)p)->evacuee = dest;
1251 static __inline__ StgClosure *
1252 copy(StgClosure *src, nat size, step *stp)
1256 TICK_GC_WORDS_COPIED(size);
1257 /* Find out where we're going, using the handy "to" pointer in
1258 * the step of the source object. If it turns out we need to
1259 * evacuate to an older generation, adjust it here (see comment
1262 if (stp->gen_no < evac_gen) {
1263 #ifdef NO_EAGER_PROMOTION
1264 failed_to_evac = rtsTrue;
1266 stp = &generations[evac_gen].steps[0];
1270 /* chain a new block onto the to-space for the destination step if
1273 if (stp->hp + size >= stp->hpLim) {
1277 for(to = stp->hp, from = (P_)src; size>0; --size) {
1283 upd_evacuee(src,(StgClosure *)dest);
1284 return (StgClosure *)dest;
1287 /* Special version of copy() for when we only want to copy the info
1288 * pointer of an object, but reserve some padding after it. This is
1289 * used to optimise evacuation of BLACKHOLEs.
1293 static __inline__ StgClosure *
1294 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1298 TICK_GC_WORDS_COPIED(size_to_copy);
1299 if (stp->gen_no < evac_gen) {
1300 #ifdef NO_EAGER_PROMOTION
1301 failed_to_evac = rtsTrue;
1303 stp = &generations[evac_gen].steps[0];
1307 if (stp->hp + size_to_reserve >= stp->hpLim) {
1311 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1316 stp->hp += size_to_reserve;
1317 upd_evacuee(src,(StgClosure *)dest);
1318 return (StgClosure *)dest;
1322 /* -----------------------------------------------------------------------------
1323 Evacuate a large object
1325 This just consists of removing the object from the (doubly-linked)
1326 large_alloc_list, and linking it on to the (singly-linked)
1327 new_large_objects list, from where it will be scavenged later.
1329 Convention: bd->flags has BF_EVACUATED set for a large object
1330 that has been evacuated, or unset otherwise.
1331 -------------------------------------------------------------------------- */
1335 evacuate_large(StgPtr p)
1337 bdescr *bd = Bdescr(p);
1340 // should point to the beginning of the block
1341 ASSERT(((W_)p & BLOCK_MASK) == 0);
1343 // already evacuated?
1344 if (bd->flags & BF_EVACUATED) {
1345 /* Don't forget to set the failed_to_evac flag if we didn't get
1346 * the desired destination (see comments in evacuate()).
1348 if (bd->gen_no < evac_gen) {
1349 failed_to_evac = rtsTrue;
1350 TICK_GC_FAILED_PROMOTION();
1356 // remove from large_object list
1358 bd->u.back->link = bd->link;
1359 } else { // first object in the list
1360 stp->large_objects = bd->link;
1363 bd->link->u.back = bd->u.back;
1366 /* link it on to the evacuated large object list of the destination step
1369 if (stp->gen_no < evac_gen) {
1370 #ifdef NO_EAGER_PROMOTION
1371 failed_to_evac = rtsTrue;
1373 stp = &generations[evac_gen].steps[0];
1378 bd->gen_no = stp->gen_no;
1379 bd->link = stp->new_large_objects;
1380 stp->new_large_objects = bd;
1381 bd->flags |= BF_EVACUATED;
1384 /* -----------------------------------------------------------------------------
1385 Adding a MUT_CONS to an older generation.
1387 This is necessary from time to time when we end up with an
1388 old-to-new generation pointer in a non-mutable object. We defer
1389 the promotion until the next GC.
1390 -------------------------------------------------------------------------- */
1394 mkMutCons(StgClosure *ptr, generation *gen)
1399 stp = &gen->steps[0];
1401 /* chain a new block onto the to-space for the destination step if
1404 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1408 q = (StgMutVar *)stp->hp;
1409 stp->hp += sizeofW(StgMutVar);
1411 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1413 recordOldToNewPtrs((StgMutClosure *)q);
1415 return (StgClosure *)q;
1418 /* -----------------------------------------------------------------------------
1421 This is called (eventually) for every live object in the system.
1423 The caller to evacuate specifies a desired generation in the
1424 evac_gen global variable. The following conditions apply to
1425 evacuating an object which resides in generation M when we're
1426 collecting up to generation N
1430 else evac to step->to
1432 if M < evac_gen evac to evac_gen, step 0
1434 if the object is already evacuated, then we check which generation
1437 if M >= evac_gen do nothing
1438 if M < evac_gen set failed_to_evac flag to indicate that we
1439 didn't manage to evacuate this object into evac_gen.
1441 -------------------------------------------------------------------------- */
1444 evacuate(StgClosure *q)
1449 const StgInfoTable *info;
1452 if (HEAP_ALLOCED(q)) {
1455 if (bd->gen_no > N) {
1456 /* Can't evacuate this object, because it's in a generation
1457 * older than the ones we're collecting. Let's hope that it's
1458 * in evac_gen or older, or we will have to arrange to track
1459 * this pointer using the mutable list.
1461 if (bd->gen_no < evac_gen) {
1463 failed_to_evac = rtsTrue;
1464 TICK_GC_FAILED_PROMOTION();
1469 /* evacuate large objects by re-linking them onto a different list.
1471 if (bd->flags & BF_LARGE) {
1473 if (info->type == TSO &&
1474 ((StgTSO *)q)->what_next == ThreadRelocated) {
1475 q = (StgClosure *)((StgTSO *)q)->link;
1478 evacuate_large((P_)q);
1482 /* If the object is in a step that we're compacting, then we
1483 * need to use an alternative evacuate procedure.
1485 if (bd->step->is_compacted) {
1486 if (!is_marked((P_)q,bd)) {
1488 if (mark_stack_full()) {
1489 mark_stack_overflowed = rtsTrue;
1492 push_mark_stack((P_)q);
1500 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1503 // make sure the info pointer is into text space
1504 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1505 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1508 switch (info -> type) {
1512 to = copy(q,sizeW_fromITBL(info),stp);
1517 StgWord w = (StgWord)q->payload[0];
1518 if (q->header.info == Czh_con_info &&
1519 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1520 (StgChar)w <= MAX_CHARLIKE) {
1521 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1523 if (q->header.info == Izh_con_info &&
1524 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1525 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1527 // else, fall through ...
1533 return copy(q,sizeofW(StgHeader)+1,stp);
1535 case THUNK_1_0: // here because of MIN_UPD_SIZE
1540 #ifdef NO_PROMOTE_THUNKS
1541 if (bd->gen_no == 0 &&
1542 bd->step->no != 0 &&
1543 bd->step->no == generations[bd->gen_no].n_steps-1) {
1547 return copy(q,sizeofW(StgHeader)+2,stp);
1555 return copy(q,sizeofW(StgHeader)+2,stp);
1561 case IND_OLDGEN_PERM:
1566 return copy(q,sizeW_fromITBL(info),stp);
1569 case SE_CAF_BLACKHOLE:
1572 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1575 to = copy(q,BLACKHOLE_sizeW(),stp);
1578 case THUNK_SELECTOR:
1580 const StgInfoTable* selectee_info;
1581 StgClosure* selectee = ((StgSelector*)q)->selectee;
1584 selectee_info = get_itbl(selectee);
1585 switch (selectee_info->type) {
1594 StgWord offset = info->layout.selector_offset;
1596 // check that the size is in range
1598 (StgWord32)(selectee_info->layout.payload.ptrs +
1599 selectee_info->layout.payload.nptrs));
1601 // perform the selection!
1602 q = selectee->payload[offset];
1604 /* if we're already in to-space, there's no need to continue
1605 * with the evacuation, just update the source address with
1606 * a pointer to the (evacuated) constructor field.
1608 if (HEAP_ALLOCED(q)) {
1609 bdescr *bd = Bdescr((P_)q);
1610 if (bd->flags & BF_EVACUATED) {
1611 if (bd->gen_no < evac_gen) {
1612 failed_to_evac = rtsTrue;
1613 TICK_GC_FAILED_PROMOTION();
1619 /* otherwise, carry on and evacuate this constructor field,
1620 * (but not the constructor itself)
1629 case IND_OLDGEN_PERM:
1630 selectee = ((StgInd *)selectee)->indirectee;
1634 selectee = ((StgEvacuated *)selectee)->evacuee;
1637 case THUNK_SELECTOR:
1639 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1640 something) to go into an infinite loop when the nightly
1641 stage2 compiles PrelTup.lhs. */
1643 /* we can't recurse indefinitely in evacuate(), so set a
1644 * limit on the number of times we can go around this
1647 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1649 bd = Bdescr((P_)selectee);
1650 if (!bd->flags & BF_EVACUATED) {
1651 thunk_selector_depth++;
1652 selectee = evacuate(selectee);
1653 thunk_selector_depth--;
1657 // otherwise, fall through...
1669 case SE_CAF_BLACKHOLE:
1673 // not evaluated yet
1677 // a copy of the top-level cases below
1678 case RBH: // cf. BLACKHOLE_BQ
1680 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1681 to = copy(q,BLACKHOLE_sizeW(),stp);
1682 //ToDo: derive size etc from reverted IP
1683 //to = copy(q,size,stp);
1684 // recordMutable((StgMutClosure *)to);
1689 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1690 to = copy(q,sizeofW(StgBlockedFetch),stp);
1697 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1698 to = copy(q,sizeofW(StgFetchMe),stp);
1702 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1703 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1708 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1709 (int)(selectee_info->type));
1712 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1716 // follow chains of indirections, don't evacuate them
1717 q = ((StgInd*)q)->indirectee;
1721 if (info->srt_len > 0 && major_gc &&
1722 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1723 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1724 static_objects = (StgClosure *)q;
1729 if (info->srt_len > 0 && major_gc &&
1730 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1731 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1732 static_objects = (StgClosure *)q;
1737 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1738 * on the CAF list, so don't do anything with it here (we'll
1739 * scavenge it later).
1742 && ((StgIndStatic *)q)->saved_info == NULL
1743 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1744 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1745 static_objects = (StgClosure *)q;
1750 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1751 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1752 static_objects = (StgClosure *)q;
1756 case CONSTR_INTLIKE:
1757 case CONSTR_CHARLIKE:
1758 case CONSTR_NOCAF_STATIC:
1759 /* no need to put these on the static linked list, they don't need
1774 // shouldn't see these
1775 barf("evacuate: stack frame at %p\n", q);
1779 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1780 * of stack, tagging and all.
1782 return copy(q,pap_sizeW((StgPAP*)q),stp);
1785 /* Already evacuated, just return the forwarding address.
1786 * HOWEVER: if the requested destination generation (evac_gen) is
1787 * older than the actual generation (because the object was
1788 * already evacuated to a younger generation) then we have to
1789 * set the failed_to_evac flag to indicate that we couldn't
1790 * manage to promote the object to the desired generation.
1792 if (evac_gen > 0) { // optimisation
1793 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1794 if (Bdescr((P_)p)->gen_no < evac_gen) {
1795 failed_to_evac = rtsTrue;
1796 TICK_GC_FAILED_PROMOTION();
1799 return ((StgEvacuated*)q)->evacuee;
1802 // just copy the block
1803 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1806 case MUT_ARR_PTRS_FROZEN:
1807 // just copy the block
1808 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1812 StgTSO *tso = (StgTSO *)q;
1814 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1816 if (tso->what_next == ThreadRelocated) {
1817 q = (StgClosure *)tso->link;
1821 /* To evacuate a small TSO, we need to relocate the update frame
1825 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1826 move_TSO(tso, new_tso);
1827 return (StgClosure *)new_tso;
1832 case RBH: // cf. BLACKHOLE_BQ
1834 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1835 to = copy(q,BLACKHOLE_sizeW(),stp);
1836 //ToDo: derive size etc from reverted IP
1837 //to = copy(q,size,stp);
1839 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1840 q, info_type(q), to, info_type(to)));
1845 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1846 to = copy(q,sizeofW(StgBlockedFetch),stp);
1848 belch("@@ evacuate: %p (%s) to %p (%s)",
1849 q, info_type(q), to, info_type(to)));
1856 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1857 to = copy(q,sizeofW(StgFetchMe),stp);
1859 belch("@@ evacuate: %p (%s) to %p (%s)",
1860 q, info_type(q), to, info_type(to)));
1864 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1865 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1867 belch("@@ evacuate: %p (%s) to %p (%s)",
1868 q, info_type(q), to, info_type(to)));
1873 barf("evacuate: strange closure type %d", (int)(info->type));
1879 /* -----------------------------------------------------------------------------
1880 move_TSO is called to update the TSO structure after it has been
1881 moved from one place to another.
1882 -------------------------------------------------------------------------- */
1885 move_TSO(StgTSO *src, StgTSO *dest)
1889 // relocate the stack pointers...
1890 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1891 dest->sp = (StgPtr)dest->sp + diff;
1892 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1894 relocate_stack(dest, diff);
1897 /* -----------------------------------------------------------------------------
1898 relocate_stack is called to update the linkage between
1899 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1901 -------------------------------------------------------------------------- */
1904 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1912 while ((P_)su < dest->stack + dest->stack_size) {
1913 switch (get_itbl(su)->type) {
1915 // GCC actually manages to common up these three cases!
1918 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1923 cf = (StgCatchFrame *)su;
1924 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1929 sf = (StgSeqFrame *)su;
1930 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1939 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1950 scavenge_srt(const StgInfoTable *info)
1952 StgClosure **srt, **srt_end;
1954 /* evacuate the SRT. If srt_len is zero, then there isn't an
1955 * srt field in the info table. That's ok, because we'll
1956 * never dereference it.
1958 srt = (StgClosure **)(info->srt);
1959 srt_end = srt + info->srt_len;
1960 for (; srt < srt_end; srt++) {
1961 /* Special-case to handle references to closures hiding out in DLLs, since
1962 double indirections required to get at those. The code generator knows
1963 which is which when generating the SRT, so it stores the (indirect)
1964 reference to the DLL closure in the table by first adding one to it.
1965 We check for this here, and undo the addition before evacuating it.
1967 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1968 closure that's fixed at link-time, and no extra magic is required.
1970 #ifdef ENABLE_WIN32_DLL_SUPPORT
1971 if ( (unsigned long)(*srt) & 0x1 ) {
1972 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1982 /* -----------------------------------------------------------------------------
1984 -------------------------------------------------------------------------- */
1987 scavengeTSO (StgTSO *tso)
1989 // chase the link field for any TSOs on the same queue
1990 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1991 if ( tso->why_blocked == BlockedOnMVar
1992 || tso->why_blocked == BlockedOnBlackHole
1993 || tso->why_blocked == BlockedOnException
1995 || tso->why_blocked == BlockedOnGA
1996 || tso->why_blocked == BlockedOnGA_NoSend
1999 tso->block_info.closure = evacuate(tso->block_info.closure);
2001 if ( tso->blocked_exceptions != NULL ) {
2002 tso->blocked_exceptions =
2003 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2005 // scavenge this thread's stack
2006 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2009 /* -----------------------------------------------------------------------------
2010 Scavenge a given step until there are no more objects in this step
2013 evac_gen is set by the caller to be either zero (for a step in a
2014 generation < N) or G where G is the generation of the step being
2017 We sometimes temporarily change evac_gen back to zero if we're
2018 scavenging a mutable object where early promotion isn't such a good
2020 -------------------------------------------------------------------------- */
2028 nat saved_evac_gen = evac_gen;
2033 failed_to_evac = rtsFalse;
2035 /* scavenge phase - standard breadth-first scavenging of the
2039 while (bd != stp->hp_bd || p < stp->hp) {
2041 // If we're at the end of this block, move on to the next block
2042 if (bd != stp->hp_bd && p == bd->free) {
2048 info = get_itbl((StgClosure *)p);
2049 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2052 switch (info->type) {
2055 /* treat MVars specially, because we don't want to evacuate the
2056 * mut_link field in the middle of the closure.
2059 StgMVar *mvar = ((StgMVar *)p);
2061 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2062 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2063 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2064 evac_gen = saved_evac_gen;
2065 recordMutable((StgMutClosure *)mvar);
2066 failed_to_evac = rtsFalse; // mutable.
2067 p += sizeofW(StgMVar);
2075 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2076 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2077 p += sizeofW(StgHeader) + 2;
2082 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2083 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2089 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2090 p += sizeofW(StgHeader) + 1;
2095 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2101 p += sizeofW(StgHeader) + 1;
2108 p += sizeofW(StgHeader) + 2;
2115 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2116 p += sizeofW(StgHeader) + 2;
2132 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2133 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2134 (StgClosure *)*p = evacuate((StgClosure *)*p);
2136 p += info->layout.payload.nptrs;
2141 if (stp->gen_no != 0) {
2142 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2145 case IND_OLDGEN_PERM:
2146 ((StgIndOldGen *)p)->indirectee =
2147 evacuate(((StgIndOldGen *)p)->indirectee);
2148 if (failed_to_evac) {
2149 failed_to_evac = rtsFalse;
2150 recordOldToNewPtrs((StgMutClosure *)p);
2152 p += sizeofW(StgIndOldGen);
2157 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2158 evac_gen = saved_evac_gen;
2159 recordMutable((StgMutClosure *)p);
2160 failed_to_evac = rtsFalse; // mutable anyhow
2161 p += sizeofW(StgMutVar);
2166 failed_to_evac = rtsFalse; // mutable anyhow
2167 p += sizeofW(StgMutVar);
2171 case SE_CAF_BLACKHOLE:
2174 p += BLACKHOLE_sizeW();
2179 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2180 (StgClosure *)bh->blocking_queue =
2181 evacuate((StgClosure *)bh->blocking_queue);
2182 recordMutable((StgMutClosure *)bh);
2183 failed_to_evac = rtsFalse;
2184 p += BLACKHOLE_sizeW();
2188 case THUNK_SELECTOR:
2190 StgSelector *s = (StgSelector *)p;
2191 s->selectee = evacuate(s->selectee);
2192 p += THUNK_SELECTOR_sizeW();
2196 case AP_UPD: // same as PAPs
2198 /* Treat a PAP just like a section of stack, not forgetting to
2199 * evacuate the function pointer too...
2202 StgPAP* pap = (StgPAP *)p;
2204 pap->fun = evacuate(pap->fun);
2205 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2206 p += pap_sizeW(pap);
2211 // nothing to follow
2212 p += arr_words_sizeW((StgArrWords *)p);
2216 // follow everything
2220 evac_gen = 0; // repeatedly mutable
2221 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2222 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2223 (StgClosure *)*p = evacuate((StgClosure *)*p);
2225 evac_gen = saved_evac_gen;
2226 recordMutable((StgMutClosure *)q);
2227 failed_to_evac = rtsFalse; // mutable anyhow.
2231 case MUT_ARR_PTRS_FROZEN:
2232 // follow everything
2236 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2237 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2238 (StgClosure *)*p = evacuate((StgClosure *)*p);
2240 // it's tempting to recordMutable() if failed_to_evac is
2241 // false, but that breaks some assumptions (eg. every
2242 // closure on the mutable list is supposed to have the MUT
2243 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2249 StgTSO *tso = (StgTSO *)p;
2252 evac_gen = saved_evac_gen;
2253 recordMutable((StgMutClosure *)tso);
2254 failed_to_evac = rtsFalse; // mutable anyhow.
2255 p += tso_sizeW(tso);
2260 case RBH: // cf. BLACKHOLE_BQ
2263 nat size, ptrs, nonptrs, vhs;
2265 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2267 StgRBH *rbh = (StgRBH *)p;
2268 (StgClosure *)rbh->blocking_queue =
2269 evacuate((StgClosure *)rbh->blocking_queue);
2270 recordMutable((StgMutClosure *)to);
2271 failed_to_evac = rtsFalse; // mutable anyhow.
2273 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2274 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2275 // ToDo: use size of reverted closure here!
2276 p += BLACKHOLE_sizeW();
2282 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2283 // follow the pointer to the node which is being demanded
2284 (StgClosure *)bf->node =
2285 evacuate((StgClosure *)bf->node);
2286 // follow the link to the rest of the blocking queue
2287 (StgClosure *)bf->link =
2288 evacuate((StgClosure *)bf->link);
2289 if (failed_to_evac) {
2290 failed_to_evac = rtsFalse;
2291 recordMutable((StgMutClosure *)bf);
2294 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2295 bf, info_type((StgClosure *)bf),
2296 bf->node, info_type(bf->node)));
2297 p += sizeofW(StgBlockedFetch);
2305 p += sizeofW(StgFetchMe);
2306 break; // nothing to do in this case
2308 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2310 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2311 (StgClosure *)fmbq->blocking_queue =
2312 evacuate((StgClosure *)fmbq->blocking_queue);
2313 if (failed_to_evac) {
2314 failed_to_evac = rtsFalse;
2315 recordMutable((StgMutClosure *)fmbq);
2318 belch("@@ scavenge: %p (%s) exciting, isn't it",
2319 p, info_type((StgClosure *)p)));
2320 p += sizeofW(StgFetchMeBlockingQueue);
2326 barf("scavenge: unimplemented/strange closure type %d @ %p",
2330 /* If we didn't manage to promote all the objects pointed to by
2331 * the current object, then we have to designate this object as
2332 * mutable (because it contains old-to-new generation pointers).
2334 if (failed_to_evac) {
2335 failed_to_evac = rtsFalse;
2336 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2344 /* -----------------------------------------------------------------------------
2345 Scavenge everything on the mark stack.
2347 This is slightly different from scavenge():
2348 - we don't walk linearly through the objects, so the scavenger
2349 doesn't need to advance the pointer on to the next object.
2350 -------------------------------------------------------------------------- */
2353 scavenge_mark_stack(void)
2359 evac_gen = oldest_gen->no;
2360 saved_evac_gen = evac_gen;
2363 while (!mark_stack_empty()) {
2364 p = pop_mark_stack();
2366 info = get_itbl((StgClosure *)p);
2367 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2370 switch (info->type) {
2373 /* treat MVars specially, because we don't want to evacuate the
2374 * mut_link field in the middle of the closure.
2377 StgMVar *mvar = ((StgMVar *)p);
2379 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2380 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2381 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2382 evac_gen = saved_evac_gen;
2383 failed_to_evac = rtsFalse; // mutable.
2391 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2392 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2402 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2427 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2428 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2429 (StgClosure *)*p = evacuate((StgClosure *)*p);
2435 // don't need to do anything here: the only possible case
2436 // is that we're in a 1-space compacting collector, with
2437 // no "old" generation.
2441 case IND_OLDGEN_PERM:
2442 ((StgIndOldGen *)p)->indirectee =
2443 evacuate(((StgIndOldGen *)p)->indirectee);
2444 if (failed_to_evac) {
2445 recordOldToNewPtrs((StgMutClosure *)p);
2447 failed_to_evac = rtsFalse;
2452 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2453 evac_gen = saved_evac_gen;
2454 failed_to_evac = rtsFalse;
2459 failed_to_evac = rtsFalse;
2463 case SE_CAF_BLACKHOLE:
2471 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2472 (StgClosure *)bh->blocking_queue =
2473 evacuate((StgClosure *)bh->blocking_queue);
2474 failed_to_evac = rtsFalse;
2478 case THUNK_SELECTOR:
2480 StgSelector *s = (StgSelector *)p;
2481 s->selectee = evacuate(s->selectee);
2485 case AP_UPD: // same as PAPs
2487 /* Treat a PAP just like a section of stack, not forgetting to
2488 * evacuate the function pointer too...
2491 StgPAP* pap = (StgPAP *)p;
2493 pap->fun = evacuate(pap->fun);
2494 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2499 // follow everything
2503 evac_gen = 0; // repeatedly mutable
2504 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2505 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2506 (StgClosure *)*p = evacuate((StgClosure *)*p);
2508 evac_gen = saved_evac_gen;
2509 failed_to_evac = rtsFalse; // mutable anyhow.
2513 case MUT_ARR_PTRS_FROZEN:
2514 // follow everything
2518 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2519 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2520 (StgClosure *)*p = evacuate((StgClosure *)*p);
2527 StgTSO *tso = (StgTSO *)p;
2530 evac_gen = saved_evac_gen;
2531 failed_to_evac = rtsFalse;
2536 case RBH: // cf. BLACKHOLE_BQ
2539 nat size, ptrs, nonptrs, vhs;
2541 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2543 StgRBH *rbh = (StgRBH *)p;
2544 (StgClosure *)rbh->blocking_queue =
2545 evacuate((StgClosure *)rbh->blocking_queue);
2546 recordMutable((StgMutClosure *)rbh);
2547 failed_to_evac = rtsFalse; // mutable anyhow.
2549 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2550 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2556 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2557 // follow the pointer to the node which is being demanded
2558 (StgClosure *)bf->node =
2559 evacuate((StgClosure *)bf->node);
2560 // follow the link to the rest of the blocking queue
2561 (StgClosure *)bf->link =
2562 evacuate((StgClosure *)bf->link);
2563 if (failed_to_evac) {
2564 failed_to_evac = rtsFalse;
2565 recordMutable((StgMutClosure *)bf);
2568 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2569 bf, info_type((StgClosure *)bf),
2570 bf->node, info_type(bf->node)));
2578 break; // nothing to do in this case
2580 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2582 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2583 (StgClosure *)fmbq->blocking_queue =
2584 evacuate((StgClosure *)fmbq->blocking_queue);
2585 if (failed_to_evac) {
2586 failed_to_evac = rtsFalse;
2587 recordMutable((StgMutClosure *)fmbq);
2590 belch("@@ scavenge: %p (%s) exciting, isn't it",
2591 p, info_type((StgClosure *)p)));
2597 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2601 if (failed_to_evac) {
2602 failed_to_evac = rtsFalse;
2603 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2606 // mark the next bit to indicate "scavenged"
2607 mark(q+1, Bdescr(q));
2609 } // while (!mark_stack_empty())
2611 // start a new linear scan if the mark stack overflowed at some point
2612 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2613 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2614 mark_stack_overflowed = rtsFalse;
2615 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2616 oldgen_scan = oldgen_scan_bd->start;
2619 if (oldgen_scan_bd) {
2620 // push a new thing on the mark stack
2622 // find a closure that is marked but not scavenged, and start
2624 while (oldgen_scan < oldgen_scan_bd->free
2625 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2629 if (oldgen_scan < oldgen_scan_bd->free) {
2631 // already scavenged?
2632 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2633 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2636 push_mark_stack(oldgen_scan);
2637 // ToDo: bump the linear scan by the actual size of the object
2638 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2642 oldgen_scan_bd = oldgen_scan_bd->link;
2643 if (oldgen_scan_bd != NULL) {
2644 oldgen_scan = oldgen_scan_bd->start;
2650 /* -----------------------------------------------------------------------------
2651 Scavenge one object.
2653 This is used for objects that are temporarily marked as mutable
2654 because they contain old-to-new generation pointers. Only certain
2655 objects can have this property.
2656 -------------------------------------------------------------------------- */
2659 scavenge_one(StgPtr p)
2661 const StgInfoTable *info;
2662 nat saved_evac_gen = evac_gen;
2665 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2666 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2668 info = get_itbl((StgClosure *)p);
2670 switch (info->type) {
2673 case FUN_1_0: // hardly worth specialising these guys
2693 case IND_OLDGEN_PERM:
2697 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2698 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2699 (StgClosure *)*q = evacuate((StgClosure *)*q);
2705 case SE_CAF_BLACKHOLE:
2710 case THUNK_SELECTOR:
2712 StgSelector *s = (StgSelector *)p;
2713 s->selectee = evacuate(s->selectee);
2718 // nothing to follow
2723 // follow everything
2726 evac_gen = 0; // repeatedly mutable
2727 recordMutable((StgMutClosure *)p);
2728 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2729 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2730 (StgClosure *)*p = evacuate((StgClosure *)*p);
2732 evac_gen = saved_evac_gen;
2733 failed_to_evac = rtsFalse;
2737 case MUT_ARR_PTRS_FROZEN:
2739 // follow everything
2742 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2743 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2744 (StgClosure *)*p = evacuate((StgClosure *)*p);
2751 StgTSO *tso = (StgTSO *)p;
2753 evac_gen = 0; // repeatedly mutable
2755 recordMutable((StgMutClosure *)tso);
2756 evac_gen = saved_evac_gen;
2757 failed_to_evac = rtsFalse;
2764 StgPAP* pap = (StgPAP *)p;
2765 pap->fun = evacuate(pap->fun);
2766 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2771 // This might happen if for instance a MUT_CONS was pointing to a
2772 // THUNK which has since been updated. The IND_OLDGEN will
2773 // be on the mutable list anyway, so we don't need to do anything
2778 barf("scavenge_one: strange object %d", (int)(info->type));
2781 no_luck = failed_to_evac;
2782 failed_to_evac = rtsFalse;
2786 /* -----------------------------------------------------------------------------
2787 Scavenging mutable lists.
2789 We treat the mutable list of each generation > N (i.e. all the
2790 generations older than the one being collected) as roots. We also
2791 remove non-mutable objects from the mutable list at this point.
2792 -------------------------------------------------------------------------- */
2795 scavenge_mut_once_list(generation *gen)
2797 const StgInfoTable *info;
2798 StgMutClosure *p, *next, *new_list;
2800 p = gen->mut_once_list;
2801 new_list = END_MUT_LIST;
2805 failed_to_evac = rtsFalse;
2807 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2809 // make sure the info pointer is into text space
2810 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2811 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2815 if (info->type==RBH)
2816 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2818 switch(info->type) {
2821 case IND_OLDGEN_PERM:
2823 /* Try to pull the indirectee into this generation, so we can
2824 * remove the indirection from the mutable list.
2826 ((StgIndOldGen *)p)->indirectee =
2827 evacuate(((StgIndOldGen *)p)->indirectee);
2829 #if 0 && defined(DEBUG)
2830 if (RtsFlags.DebugFlags.gc)
2831 /* Debugging code to print out the size of the thing we just
2835 StgPtr start = gen->steps[0].scan;
2836 bdescr *start_bd = gen->steps[0].scan_bd;
2838 scavenge(&gen->steps[0]);
2839 if (start_bd != gen->steps[0].scan_bd) {
2840 size += (P_)BLOCK_ROUND_UP(start) - start;
2841 start_bd = start_bd->link;
2842 while (start_bd != gen->steps[0].scan_bd) {
2843 size += BLOCK_SIZE_W;
2844 start_bd = start_bd->link;
2846 size += gen->steps[0].scan -
2847 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2849 size = gen->steps[0].scan - start;
2851 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2855 /* failed_to_evac might happen if we've got more than two
2856 * generations, we're collecting only generation 0, the
2857 * indirection resides in generation 2 and the indirectee is
2860 if (failed_to_evac) {
2861 failed_to_evac = rtsFalse;
2862 p->mut_link = new_list;
2865 /* the mut_link field of an IND_STATIC is overloaded as the
2866 * static link field too (it just so happens that we don't need
2867 * both at the same time), so we need to NULL it out when
2868 * removing this object from the mutable list because the static
2869 * link fields are all assumed to be NULL before doing a major
2877 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2878 * it from the mutable list if possible by promoting whatever it
2881 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2882 /* didn't manage to promote everything, so put the
2883 * MUT_CONS back on the list.
2885 p->mut_link = new_list;
2891 // shouldn't have anything else on the mutables list
2892 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2896 gen->mut_once_list = new_list;
2901 scavenge_mutable_list(generation *gen)
2903 const StgInfoTable *info;
2904 StgMutClosure *p, *next;
2906 p = gen->saved_mut_list;
2910 failed_to_evac = rtsFalse;
2912 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2914 // make sure the info pointer is into text space
2915 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2916 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2920 if (info->type==RBH)
2921 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2923 switch(info->type) {
2926 // follow everything
2927 p->mut_link = gen->mut_list;
2932 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2933 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2934 (StgClosure *)*q = evacuate((StgClosure *)*q);
2939 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2940 case MUT_ARR_PTRS_FROZEN:
2945 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2946 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2947 (StgClosure *)*q = evacuate((StgClosure *)*q);
2951 if (failed_to_evac) {
2952 failed_to_evac = rtsFalse;
2953 mkMutCons((StgClosure *)p, gen);
2959 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2960 p->mut_link = gen->mut_list;
2966 StgMVar *mvar = (StgMVar *)p;
2967 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2968 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2969 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2970 p->mut_link = gen->mut_list;
2977 StgTSO *tso = (StgTSO *)p;
2981 /* Don't take this TSO off the mutable list - it might still
2982 * point to some younger objects (because we set evac_gen to 0
2985 tso->mut_link = gen->mut_list;
2986 gen->mut_list = (StgMutClosure *)tso;
2992 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2993 (StgClosure *)bh->blocking_queue =
2994 evacuate((StgClosure *)bh->blocking_queue);
2995 p->mut_link = gen->mut_list;
3000 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3003 case IND_OLDGEN_PERM:
3004 /* Try to pull the indirectee into this generation, so we can
3005 * remove the indirection from the mutable list.
3008 ((StgIndOldGen *)p)->indirectee =
3009 evacuate(((StgIndOldGen *)p)->indirectee);
3012 if (failed_to_evac) {
3013 failed_to_evac = rtsFalse;
3014 p->mut_link = gen->mut_once_list;
3015 gen->mut_once_list = p;
3022 // HWL: check whether all of these are necessary
3024 case RBH: // cf. BLACKHOLE_BQ
3026 // nat size, ptrs, nonptrs, vhs;
3028 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3029 StgRBH *rbh = (StgRBH *)p;
3030 (StgClosure *)rbh->blocking_queue =
3031 evacuate((StgClosure *)rbh->blocking_queue);
3032 if (failed_to_evac) {
3033 failed_to_evac = rtsFalse;
3034 recordMutable((StgMutClosure *)rbh);
3036 // ToDo: use size of reverted closure here!
3037 p += BLACKHOLE_sizeW();
3043 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3044 // follow the pointer to the node which is being demanded
3045 (StgClosure *)bf->node =
3046 evacuate((StgClosure *)bf->node);
3047 // follow the link to the rest of the blocking queue
3048 (StgClosure *)bf->link =
3049 evacuate((StgClosure *)bf->link);
3050 if (failed_to_evac) {
3051 failed_to_evac = rtsFalse;
3052 recordMutable((StgMutClosure *)bf);
3054 p += sizeofW(StgBlockedFetch);
3060 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3063 p += sizeofW(StgFetchMe);
3064 break; // nothing to do in this case
3066 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3068 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3069 (StgClosure *)fmbq->blocking_queue =
3070 evacuate((StgClosure *)fmbq->blocking_queue);
3071 if (failed_to_evac) {
3072 failed_to_evac = rtsFalse;
3073 recordMutable((StgMutClosure *)fmbq);
3075 p += sizeofW(StgFetchMeBlockingQueue);
3081 // shouldn't have anything else on the mutables list
3082 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3089 scavenge_static(void)
3091 StgClosure* p = static_objects;
3092 const StgInfoTable *info;
3094 /* Always evacuate straight to the oldest generation for static
3096 evac_gen = oldest_gen->no;
3098 /* keep going until we've scavenged all the objects on the linked
3100 while (p != END_OF_STATIC_LIST) {
3104 if (info->type==RBH)
3105 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3107 // make sure the info pointer is into text space
3108 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3109 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3111 /* Take this object *off* the static_objects list,
3112 * and put it on the scavenged_static_objects list.
3114 static_objects = STATIC_LINK(info,p);
3115 STATIC_LINK(info,p) = scavenged_static_objects;
3116 scavenged_static_objects = p;
3118 switch (info -> type) {
3122 StgInd *ind = (StgInd *)p;
3123 ind->indirectee = evacuate(ind->indirectee);
3125 /* might fail to evacuate it, in which case we have to pop it
3126 * back on the mutable list (and take it off the
3127 * scavenged_static list because the static link and mut link
3128 * pointers are one and the same).
3130 if (failed_to_evac) {
3131 failed_to_evac = rtsFalse;
3132 scavenged_static_objects = IND_STATIC_LINK(p);
3133 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3134 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3148 next = (P_)p->payload + info->layout.payload.ptrs;
3149 // evacuate the pointers
3150 for (q = (P_)p->payload; q < next; q++) {
3151 (StgClosure *)*q = evacuate((StgClosure *)*q);
3157 barf("scavenge_static: strange closure %d", (int)(info->type));
3160 ASSERT(failed_to_evac == rtsFalse);
3162 /* get the next static object from the list. Remember, there might
3163 * be more stuff on this list now that we've done some evacuating!
3164 * (static_objects is a global)
3170 /* -----------------------------------------------------------------------------
3171 scavenge_stack walks over a section of stack and evacuates all the
3172 objects pointed to by it. We can use the same code for walking
3173 PAPs, since these are just sections of copied stack.
3174 -------------------------------------------------------------------------- */
3177 scavenge_stack(StgPtr p, StgPtr stack_end)
3180 const StgInfoTable* info;
3183 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3186 * Each time around this loop, we are looking at a chunk of stack
3187 * that starts with either a pending argument section or an
3188 * activation record.
3191 while (p < stack_end) {
3194 // If we've got a tag, skip over that many words on the stack
3195 if (IS_ARG_TAG((W_)q)) {
3200 /* Is q a pointer to a closure?
3202 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3204 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3205 ASSERT(closure_STATIC((StgClosure *)q));
3207 // otherwise, must be a pointer into the allocation space.
3210 (StgClosure *)*p = evacuate((StgClosure *)q);
3216 * Otherwise, q must be the info pointer of an activation
3217 * record. All activation records have 'bitmap' style layout
3220 info = get_itbl((StgClosure *)p);
3222 switch (info->type) {
3224 // Dynamic bitmap: the mask is stored on the stack
3226 bitmap = ((StgRetDyn *)p)->liveness;
3227 p = (P_)&((StgRetDyn *)p)->payload[0];
3230 // probably a slow-entry point return address:
3238 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3239 old_p, p, old_p+1));
3241 p++; // what if FHS!=1 !? -- HWL
3246 /* Specialised code for update frames, since they're so common.
3247 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3248 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3252 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3254 p += sizeofW(StgUpdateFrame);
3257 frame->updatee = evacuate(frame->updatee);
3259 #else // specialised code for update frames, not sure if it's worth it.
3261 nat type = get_itbl(frame->updatee)->type;
3263 if (type == EVACUATED) {
3264 frame->updatee = evacuate(frame->updatee);
3267 bdescr *bd = Bdescr((P_)frame->updatee);
3269 if (bd->gen_no > N) {
3270 if (bd->gen_no < evac_gen) {
3271 failed_to_evac = rtsTrue;
3276 // Don't promote blackholes
3278 if (!(stp->gen_no == 0 &&
3280 stp->no == stp->gen->n_steps-1)) {
3287 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3288 sizeofW(StgHeader), stp);
3289 frame->updatee = to;
3292 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3293 frame->updatee = to;
3294 recordMutable((StgMutClosure *)to);
3297 /* will never be SE_{,CAF_}BLACKHOLE, since we
3298 don't push an update frame for single-entry thunks. KSW 1999-01. */
3299 barf("scavenge_stack: UPDATE_FRAME updatee");
3305 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3312 bitmap = info->layout.bitmap;
3314 // this assumes that the payload starts immediately after the info-ptr
3316 while (bitmap != 0) {
3317 if ((bitmap & 1) == 0) {
3318 (StgClosure *)*p = evacuate((StgClosure *)*p);
3321 bitmap = bitmap >> 1;
3328 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3333 StgLargeBitmap *large_bitmap;
3336 large_bitmap = info->layout.large_bitmap;
3339 for (i=0; i<large_bitmap->size; i++) {
3340 bitmap = large_bitmap->bitmap[i];
3341 q = p + BITS_IN(W_);
3342 while (bitmap != 0) {
3343 if ((bitmap & 1) == 0) {
3344 (StgClosure *)*p = evacuate((StgClosure *)*p);
3347 bitmap = bitmap >> 1;
3349 if (i+1 < large_bitmap->size) {
3351 (StgClosure *)*p = evacuate((StgClosure *)*p);
3357 // and don't forget to follow the SRT
3362 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3367 /*-----------------------------------------------------------------------------
3368 scavenge the large object list.
3370 evac_gen set by caller; similar games played with evac_gen as with
3371 scavenge() - see comment at the top of scavenge(). Most large
3372 objects are (repeatedly) mutable, so most of the time evac_gen will
3374 --------------------------------------------------------------------------- */
3377 scavenge_large(step *stp)
3382 bd = stp->new_large_objects;
3384 for (; bd != NULL; bd = stp->new_large_objects) {
3386 /* take this object *off* the large objects list and put it on
3387 * the scavenged large objects list. This is so that we can
3388 * treat new_large_objects as a stack and push new objects on
3389 * the front when evacuating.
3391 stp->new_large_objects = bd->link;
3392 dbl_link_onto(bd, &stp->scavenged_large_objects);
3394 // update the block count in this step.
3395 stp->n_scavenged_large_blocks += bd->blocks;
3398 if (scavenge_one(p)) {
3399 mkMutCons((StgClosure *)p, stp->gen);
3404 /* -----------------------------------------------------------------------------
3405 Initialising the static object & mutable lists
3406 -------------------------------------------------------------------------- */
3409 zero_static_object_list(StgClosure* first_static)
3413 const StgInfoTable *info;
3415 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3417 link = STATIC_LINK(info, p);
3418 STATIC_LINK(info,p) = NULL;
3422 /* This function is only needed because we share the mutable link
3423 * field with the static link field in an IND_STATIC, so we have to
3424 * zero the mut_link field before doing a major GC, which needs the
3425 * static link field.
3427 * It doesn't do any harm to zero all the mutable link fields on the
3432 zero_mutable_list( StgMutClosure *first )
3434 StgMutClosure *next, *c;
3436 for (c = first; c != END_MUT_LIST; c = next) {
3442 /* -----------------------------------------------------------------------------
3444 -------------------------------------------------------------------------- */
3451 for (c = (StgIndStatic *)caf_list; c != NULL;
3452 c = (StgIndStatic *)c->static_link)
3454 c->header.info = c->saved_info;
3455 c->saved_info = NULL;
3456 // could, but not necessary: c->static_link = NULL;
3462 scavengeCAFs( void )
3467 for (c = (StgIndStatic *)caf_list; c != NULL;
3468 c = (StgIndStatic *)c->static_link)
3470 c->indirectee = evacuate(c->indirectee);
3474 /* -----------------------------------------------------------------------------
3475 Sanity code for CAF garbage collection.
3477 With DEBUG turned on, we manage a CAF list in addition to the SRT
3478 mechanism. After GC, we run down the CAF list and blackhole any
3479 CAFs which have been garbage collected. This means we get an error
3480 whenever the program tries to enter a garbage collected CAF.
3482 Any garbage collected CAFs are taken off the CAF list at the same
3484 -------------------------------------------------------------------------- */
3486 #if 0 && defined(DEBUG)
3493 const StgInfoTable *info;
3504 ASSERT(info->type == IND_STATIC);
3506 if (STATIC_LINK(info,p) == NULL) {
3507 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3509 SET_INFO(p,&stg_BLACKHOLE_info);
3510 p = STATIC_LINK2(info,p);
3514 pp = &STATIC_LINK2(info,p);
3521 // belch("%d CAFs live", i);
3526 /* -----------------------------------------------------------------------------
3529 Whenever a thread returns to the scheduler after possibly doing
3530 some work, we have to run down the stack and black-hole all the
3531 closures referred to by update frames.
3532 -------------------------------------------------------------------------- */
3535 threadLazyBlackHole(StgTSO *tso)
3537 StgUpdateFrame *update_frame;
3538 StgBlockingQueue *bh;
3541 stack_end = &tso->stack[tso->stack_size];
3542 update_frame = tso->su;
3545 switch (get_itbl(update_frame)->type) {
3548 update_frame = ((StgCatchFrame *)update_frame)->link;
3552 bh = (StgBlockingQueue *)update_frame->updatee;
3554 /* if the thunk is already blackholed, it means we've also
3555 * already blackholed the rest of the thunks on this stack,
3556 * so we can stop early.
3558 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3559 * don't interfere with this optimisation.
3561 if (bh->header.info == &stg_BLACKHOLE_info) {
3565 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3566 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3567 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3568 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3570 SET_INFO(bh,&stg_BLACKHOLE_info);
3573 update_frame = update_frame->link;
3577 update_frame = ((StgSeqFrame *)update_frame)->link;
3583 barf("threadPaused");
3589 /* -----------------------------------------------------------------------------
3592 * Code largely pinched from old RTS, then hacked to bits. We also do
3593 * lazy black holing here.
3595 * -------------------------------------------------------------------------- */
3598 threadSqueezeStack(StgTSO *tso)
3600 lnat displacement = 0;
3601 StgUpdateFrame *frame;
3602 StgUpdateFrame *next_frame; // Temporally next
3603 StgUpdateFrame *prev_frame; // Temporally previous
3605 rtsBool prev_was_update_frame;
3607 StgUpdateFrame *top_frame;
3608 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3610 void printObj( StgClosure *obj ); // from Printer.c
3612 top_frame = tso->su;
3615 bottom = &(tso->stack[tso->stack_size]);
3618 /* There must be at least one frame, namely the STOP_FRAME.
3620 ASSERT((P_)frame < bottom);
3622 /* Walk down the stack, reversing the links between frames so that
3623 * we can walk back up as we squeeze from the bottom. Note that
3624 * next_frame and prev_frame refer to next and previous as they were
3625 * added to the stack, rather than the way we see them in this
3626 * walk. (It makes the next loop less confusing.)
3628 * Stop if we find an update frame pointing to a black hole
3629 * (see comment in threadLazyBlackHole()).
3633 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3634 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3635 prev_frame = frame->link;
3636 frame->link = next_frame;
3641 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3642 printObj((StgClosure *)prev_frame);
3643 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3646 switch (get_itbl(frame)->type) {
3649 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3662 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3664 printObj((StgClosure *)prev_frame);
3667 if (get_itbl(frame)->type == UPDATE_FRAME
3668 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3673 /* Now, we're at the bottom. Frame points to the lowest update
3674 * frame on the stack, and its link actually points to the frame
3675 * above. We have to walk back up the stack, squeezing out empty
3676 * update frames and turning the pointers back around on the way
3679 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3680 * we never want to eliminate it anyway. Just walk one step up
3681 * before starting to squeeze. When you get to the topmost frame,
3682 * remember that there are still some words above it that might have
3689 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3692 * Loop through all of the frames (everything except the very
3693 * bottom). Things are complicated by the fact that we have
3694 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3695 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3697 while (frame != NULL) {
3699 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3700 rtsBool is_update_frame;
3702 next_frame = frame->link;
3703 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3706 * 1. both the previous and current frame are update frames
3707 * 2. the current frame is empty
3709 if (prev_was_update_frame && is_update_frame &&
3710 (P_)prev_frame == frame_bottom + displacement) {
3712 // Now squeeze out the current frame
3713 StgClosure *updatee_keep = prev_frame->updatee;
3714 StgClosure *updatee_bypass = frame->updatee;
3717 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3721 /* Deal with blocking queues. If both updatees have blocked
3722 * threads, then we should merge the queues into the update
3723 * frame that we're keeping.
3725 * Alternatively, we could just wake them up: they'll just go
3726 * straight to sleep on the proper blackhole! This is less code
3727 * and probably less bug prone, although it's probably much
3730 #if 0 // do it properly...
3731 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3732 # error Unimplemented lazy BH warning. (KSW 1999-01)
3734 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3735 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3737 // Sigh. It has one. Don't lose those threads!
3738 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3739 // Urgh. Two queues. Merge them.
3740 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3742 while (keep_tso->link != END_TSO_QUEUE) {
3743 keep_tso = keep_tso->link;
3745 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3748 // For simplicity, just swap the BQ for the BH
3749 P_ temp = updatee_keep;
3751 updatee_keep = updatee_bypass;
3752 updatee_bypass = temp;
3754 // Record the swap in the kept frame (below)
3755 prev_frame->updatee = updatee_keep;
3760 TICK_UPD_SQUEEZED();
3761 /* wasn't there something about update squeezing and ticky to be
3762 * sorted out? oh yes: we aren't counting each enter properly
3763 * in this case. See the log somewhere. KSW 1999-04-21
3765 * Check two things: that the two update frames don't point to
3766 * the same object, and that the updatee_bypass isn't already an
3767 * indirection. Both of these cases only happen when we're in a
3768 * block hole-style loop (and there are multiple update frames
3769 * on the stack pointing to the same closure), but they can both
3770 * screw us up if we don't check.
3772 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3773 // this wakes the threads up
3774 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3777 sp = (P_)frame - 1; // sp = stuff to slide
3778 displacement += sizeofW(StgUpdateFrame);
3781 // No squeeze for this frame
3782 sp = frame_bottom - 1; // Keep the current frame
3784 /* Do lazy black-holing.
3786 if (is_update_frame) {
3787 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3788 if (bh->header.info != &stg_BLACKHOLE_info &&
3789 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3790 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3791 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3792 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3795 /* zero out the slop so that the sanity checker can tell
3796 * where the next closure is.
3799 StgInfoTable *info = get_itbl(bh);
3800 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3801 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3802 * info is used for a different purpose, and it's exactly the
3803 * same size as a BLACKHOLE in any case.
3805 if (info->type != THUNK_SELECTOR) {
3806 for (i = np; i < np + nw; i++) {
3807 ((StgClosure *)bh)->payload[i] = 0;
3812 SET_INFO(bh,&stg_BLACKHOLE_info);
3816 // Fix the link in the current frame (should point to the frame below)
3817 frame->link = prev_frame;
3818 prev_was_update_frame = is_update_frame;
3821 // Now slide all words from sp up to the next frame
3823 if (displacement > 0) {
3824 P_ next_frame_bottom;
3826 if (next_frame != NULL)
3827 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3829 next_frame_bottom = tso->sp - 1;
3833 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3837 while (sp >= next_frame_bottom) {
3838 sp[displacement] = *sp;
3842 (P_)prev_frame = (P_)frame + displacement;
3846 tso->sp += displacement;
3847 tso->su = prev_frame;
3850 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3851 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3856 /* -----------------------------------------------------------------------------
3859 * We have to prepare for GC - this means doing lazy black holing
3860 * here. We also take the opportunity to do stack squeezing if it's
3862 * -------------------------------------------------------------------------- */
3864 threadPaused(StgTSO *tso)
3866 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3867 threadSqueezeStack(tso); // does black holing too
3869 threadLazyBlackHole(tso);
3872 /* -----------------------------------------------------------------------------
3874 * -------------------------------------------------------------------------- */
3878 printMutOnceList(generation *gen)
3880 StgMutClosure *p, *next;
3882 p = gen->mut_once_list;
3885 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3886 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3887 fprintf(stderr, "%p (%s), ",
3888 p, info_type((StgClosure *)p));
3890 fputc('\n', stderr);
3894 printMutableList(generation *gen)
3896 StgMutClosure *p, *next;
3901 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3902 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3903 fprintf(stderr, "%p (%s), ",
3904 p, info_type((StgClosure *)p));
3906 fputc('\n', stderr);
3909 static inline rtsBool
3910 maybeLarge(StgClosure *closure)
3912 StgInfoTable *info = get_itbl(closure);
3914 /* closure types that may be found on the new_large_objects list;
3915 see scavenge_large */
3916 return (info->type == MUT_ARR_PTRS ||
3917 info->type == MUT_ARR_PTRS_FROZEN ||
3918 info->type == TSO ||
3919 info->type == ARR_WORDS);