1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.119 2001/08/10 10:52:12 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && oldest_gen->steps[0].is_compacted) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* run through all the generations/steps and tidy up
614 copied = new_blocks * BLOCK_SIZE_W;
615 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
618 generations[g].collections++; // for stats
621 for (s = 0; s < generations[g].n_steps; s++) {
623 stp = &generations[g].steps[s];
625 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
626 // stats information: how much we copied
628 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
633 // for generations we collected...
636 // rough calculation of garbage collected, for stats output
637 if (stp->is_compacted) {
638 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
640 collected += stp->n_blocks * BLOCK_SIZE_W;
643 /* free old memory and shift to-space into from-space for all
644 * the collected steps (except the allocation area). These
645 * freed blocks will probaby be quickly recycled.
647 if (!(g == 0 && s == 0)) {
648 if (stp->is_compacted) {
649 // for a compacted step, just shift the new to-space
650 // onto the front of the now-compacted existing blocks.
651 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
652 bd->flags &= ~BF_EVACUATED; // now from-space
654 // tack the new blocks on the end of the existing blocks
655 if (stp->blocks == NULL) {
656 stp->blocks = stp->to_blocks;
658 for (bd = stp->blocks; bd != NULL; bd = next) {
661 bd->link = stp->to_blocks;
665 // add the new blocks to the block tally
666 stp->n_blocks += stp->n_to_blocks;
668 freeChain(stp->blocks);
669 stp->blocks = stp->to_blocks;
670 stp->n_blocks = stp->n_to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
672 bd->flags &= ~BF_EVACUATED; // now from-space
675 stp->to_blocks = NULL;
676 stp->n_to_blocks = 0;
679 /* LARGE OBJECTS. The current live large objects are chained on
680 * scavenged_large, having been moved during garbage
681 * collection from large_objects. Any objects left on
682 * large_objects list are therefore dead, so we free them here.
684 for (bd = stp->large_objects; bd != NULL; bd = next) {
690 // update the count of blocks used by large objects
691 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED;
694 stp->large_objects = stp->scavenged_large_objects;
695 stp->n_large_blocks = stp->n_scavenged_large_blocks;
698 // for older generations...
700 /* For older generations, we need to append the
701 * scavenged_large_object list (i.e. large objects that have been
702 * promoted during this GC) to the large_object list for that step.
704 for (bd = stp->scavenged_large_objects; bd; bd = next) {
706 bd->flags &= ~BF_EVACUATED;
707 dbl_link_onto(bd, &stp->large_objects);
710 // add the new blocks we promoted during this GC
711 stp->n_blocks += stp->n_to_blocks;
712 stp->n_large_blocks += stp->n_scavenged_large_blocks;
717 /* Reset the sizes of the older generations when we do a major
720 * CURRENT STRATEGY: make all generations except zero the same size.
721 * We have to stay within the maximum heap size, and leave a certain
722 * percentage of the maximum heap size available to allocate into.
724 if (major_gc && RtsFlags.GcFlags.generations > 1) {
725 nat live, size, min_alloc;
726 nat max = RtsFlags.GcFlags.maxHeapSize;
727 nat gens = RtsFlags.GcFlags.generations;
729 // live in the oldest generations
730 live = oldest_gen->steps[0].n_blocks +
731 oldest_gen->steps[0].n_large_blocks;
733 // default max size for all generations except zero
734 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
737 // minimum size for generation zero
738 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
739 RtsFlags.GcFlags.minAllocAreaSize);
741 // Auto-enable compaction when the residency reaches a
742 // certain percentage of the maximum heap size (default: 30%).
743 if (RtsFlags.GcFlags.compact ||
745 oldest_gen->steps[0].n_blocks >
746 (RtsFlags.GcFlags.compactThreshold * max) / 100)) {
747 oldest_gen->steps[0].is_compacted = 1;
748 // fprintf(stderr,"compaction: on\n", live);
750 oldest_gen->steps[0].is_compacted = 0;
751 // fprintf(stderr,"compaction: off\n", live);
754 // if we're going to go over the maximum heap size, reduce the
755 // size of the generations accordingly. The calculation is
756 // different if compaction is turned on, because we don't need
757 // to double the space required to collect the old generation.
759 if (oldest_gen->steps[0].is_compacted) {
760 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
761 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
764 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
765 size = (max - min_alloc) / ((gens - 1) * 2);
775 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
776 min_alloc, size, max);
779 for (g = 0; g < gens; g++) {
780 generations[g].max_blocks = size;
784 // Guess the amount of live data for stats.
787 /* Free the small objects allocated via allocate(), since this will
788 * all have been copied into G0S1 now.
790 if (small_alloc_list != NULL) {
791 freeChain(small_alloc_list);
793 small_alloc_list = NULL;
797 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
799 // Start a new pinned_object_block
800 pinned_object_block = NULL;
802 /* Free the mark stack.
804 if (mark_stack_bdescr != NULL) {
805 freeGroup(mark_stack_bdescr);
810 for (g = 0; g <= N; g++) {
811 for (s = 0; s < generations[g].n_steps; s++) {
812 stp = &generations[g].steps[s];
813 if (stp->is_compacted && stp->bitmap != NULL) {
814 freeGroup(stp->bitmap);
819 /* Two-space collector:
820 * Free the old to-space, and estimate the amount of live data.
822 if (RtsFlags.GcFlags.generations == 1) {
825 if (old_to_blocks != NULL) {
826 freeChain(old_to_blocks);
828 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
829 bd->flags = 0; // now from-space
832 /* For a two-space collector, we need to resize the nursery. */
834 /* set up a new nursery. Allocate a nursery size based on a
835 * function of the amount of live data (by default a factor of 2)
836 * Use the blocks from the old nursery if possible, freeing up any
839 * If we get near the maximum heap size, then adjust our nursery
840 * size accordingly. If the nursery is the same size as the live
841 * data (L), then we need 3L bytes. We can reduce the size of the
842 * nursery to bring the required memory down near 2L bytes.
844 * A normal 2-space collector would need 4L bytes to give the same
845 * performance we get from 3L bytes, reducing to the same
846 * performance at 2L bytes.
848 blocks = g0s0->n_to_blocks;
850 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
851 RtsFlags.GcFlags.maxHeapSize ) {
852 long adjusted_blocks; // signed on purpose
855 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
856 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
857 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
858 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
861 blocks = adjusted_blocks;
864 blocks *= RtsFlags.GcFlags.oldGenFactor;
865 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
866 blocks = RtsFlags.GcFlags.minAllocAreaSize;
869 resizeNursery(blocks);
872 /* Generational collector:
873 * If the user has given us a suggested heap size, adjust our
874 * allocation area to make best use of the memory available.
877 if (RtsFlags.GcFlags.heapSizeSuggestion) {
879 nat needed = calcNeeded(); // approx blocks needed at next GC
881 /* Guess how much will be live in generation 0 step 0 next time.
882 * A good approximation is obtained by finding the
883 * percentage of g0s0 that was live at the last minor GC.
886 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
889 /* Estimate a size for the allocation area based on the
890 * information available. We might end up going slightly under
891 * or over the suggested heap size, but we should be pretty
894 * Formula: suggested - needed
895 * ----------------------------
896 * 1 + g0s0_pcnt_kept/100
898 * where 'needed' is the amount of memory needed at the next
899 * collection for collecting all steps except g0s0.
902 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
903 (100 + (long)g0s0_pcnt_kept);
905 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
906 blocks = RtsFlags.GcFlags.minAllocAreaSize;
909 resizeNursery((nat)blocks);
913 // mark the garbage collected CAFs as dead
914 #if 0 && defined(DEBUG) // doesn't work at the moment
915 if (major_gc) { gcCAFs(); }
918 // zero the scavenged static object list
920 zero_static_object_list(scavenged_static_objects);
926 // start any pending finalizers
927 scheduleFinalizers(old_weak_ptr_list);
929 // send exceptions to any threads which were about to die
930 resurrectThreads(resurrected_threads);
932 // Update the stable pointer hash table.
933 updateStablePtrTable(major_gc);
935 // check sanity after GC
936 IF_DEBUG(sanity, checkSanity());
938 // extra GC trace info
939 IF_DEBUG(gc, statDescribeGens());
942 // symbol-table based profiling
943 /* heapCensus(to_blocks); */ /* ToDo */
946 // restore enclosing cost centre
952 // check for memory leaks if sanity checking is on
953 IF_DEBUG(sanity, memInventory());
955 #ifdef RTS_GTK_FRONTPANEL
956 if (RtsFlags.GcFlags.frontpanel) {
957 updateFrontPanelAfterGC( N, live );
961 // ok, GC over: tell the stats department what happened.
962 stat_endGC(allocated, collected, live, copied, N);
968 /* -----------------------------------------------------------------------------
971 traverse_weak_ptr_list is called possibly many times during garbage
972 collection. It returns a flag indicating whether it did any work
973 (i.e. called evacuate on any live pointers).
975 Invariant: traverse_weak_ptr_list is called when the heap is in an
976 idempotent state. That means that there are no pending
977 evacuate/scavenge operations. This invariant helps the weak
978 pointer code decide which weak pointers are dead - if there are no
979 new live weak pointers, then all the currently unreachable ones are
982 For generational GC: we just don't try to finalize weak pointers in
983 older generations than the one we're collecting. This could
984 probably be optimised by keeping per-generation lists of weak
985 pointers, but for a few weak pointers this scheme will work.
986 -------------------------------------------------------------------------- */
989 traverse_weak_ptr_list(void)
991 StgWeak *w, **last_w, *next_w;
993 rtsBool flag = rtsFalse;
995 if (weak_done) { return rtsFalse; }
997 /* doesn't matter where we evacuate values/finalizers to, since
998 * these pointers are treated as roots (iff the keys are alive).
1002 last_w = &old_weak_ptr_list;
1003 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1005 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1006 * called on a live weak pointer object. Just remove it.
1008 if (w->header.info == &stg_DEAD_WEAK_info) {
1009 next_w = ((StgDeadWeak *)w)->link;
1014 ASSERT(get_itbl(w)->type == WEAK);
1016 /* Now, check whether the key is reachable.
1018 new = isAlive(w->key);
1021 // evacuate the value and finalizer
1022 w->value = evacuate(w->value);
1023 w->finalizer = evacuate(w->finalizer);
1024 // remove this weak ptr from the old_weak_ptr list
1026 // and put it on the new weak ptr list
1028 w->link = weak_ptr_list;
1031 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1035 last_w = &(w->link);
1041 /* Now deal with the all_threads list, which behaves somewhat like
1042 * the weak ptr list. If we discover any threads that are about to
1043 * become garbage, we wake them up and administer an exception.
1046 StgTSO *t, *tmp, *next, **prev;
1048 prev = &old_all_threads;
1049 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1051 (StgClosure *)tmp = isAlive((StgClosure *)t);
1057 ASSERT(get_itbl(t)->type == TSO);
1058 switch (t->what_next) {
1059 case ThreadRelocated:
1064 case ThreadComplete:
1065 // finshed or died. The thread might still be alive, but we
1066 // don't keep it on the all_threads list. Don't forget to
1067 // stub out its global_link field.
1068 next = t->global_link;
1069 t->global_link = END_TSO_QUEUE;
1077 // not alive (yet): leave this thread on the old_all_threads list.
1078 prev = &(t->global_link);
1079 next = t->global_link;
1082 // alive: move this thread onto the all_threads list.
1083 next = t->global_link;
1084 t->global_link = all_threads;
1091 /* If we didn't make any changes, then we can go round and kill all
1092 * the dead weak pointers. The old_weak_ptr list is used as a list
1093 * of pending finalizers later on.
1095 if (flag == rtsFalse) {
1096 for (w = old_weak_ptr_list; w; w = w->link) {
1097 w->finalizer = evacuate(w->finalizer);
1100 /* And resurrect any threads which were about to become garbage.
1103 StgTSO *t, *tmp, *next;
1104 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1105 next = t->global_link;
1106 (StgClosure *)tmp = evacuate((StgClosure *)t);
1107 tmp->global_link = resurrected_threads;
1108 resurrected_threads = tmp;
1112 weak_done = rtsTrue;
1118 /* -----------------------------------------------------------------------------
1119 After GC, the live weak pointer list may have forwarding pointers
1120 on it, because a weak pointer object was evacuated after being
1121 moved to the live weak pointer list. We remove those forwarding
1124 Also, we don't consider weak pointer objects to be reachable, but
1125 we must nevertheless consider them to be "live" and retain them.
1126 Therefore any weak pointer objects which haven't as yet been
1127 evacuated need to be evacuated now.
1128 -------------------------------------------------------------------------- */
1132 mark_weak_ptr_list ( StgWeak **list )
1134 StgWeak *w, **last_w;
1137 for (w = *list; w; w = w->link) {
1138 (StgClosure *)w = evacuate((StgClosure *)w);
1140 last_w = &(w->link);
1144 /* -----------------------------------------------------------------------------
1145 isAlive determines whether the given closure is still alive (after
1146 a garbage collection) or not. It returns the new address of the
1147 closure if it is alive, or NULL otherwise.
1149 NOTE: Use it before compaction only!
1150 -------------------------------------------------------------------------- */
1154 isAlive(StgClosure *p)
1156 const StgInfoTable *info;
1163 /* ToDo: for static closures, check the static link field.
1164 * Problem here is that we sometimes don't set the link field, eg.
1165 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1170 // ignore closures in generations that we're not collecting.
1171 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1174 // large objects have an evacuated flag
1175 if (bd->flags & BF_LARGE) {
1176 if (bd->flags & BF_EVACUATED) {
1182 // check the mark bit for compacted steps
1183 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1187 switch (info->type) {
1192 case IND_OLDGEN: // rely on compatible layout with StgInd
1193 case IND_OLDGEN_PERM:
1194 // follow indirections
1195 p = ((StgInd *)p)->indirectee;
1200 return ((StgEvacuated *)p)->evacuee;
1203 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1204 p = (StgClosure *)((StgTSO *)p)->link;
1216 mark_root(StgClosure **root)
1218 *root = evacuate(*root);
1224 bdescr *bd = allocBlock();
1225 bd->gen_no = stp->gen_no;
1228 if (stp->gen_no <= N) {
1229 bd->flags = BF_EVACUATED;
1234 stp->hp_bd->free = stp->hp;
1235 stp->hp_bd->link = bd;
1236 stp->hp = bd->start;
1237 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1244 static __inline__ void
1245 upd_evacuee(StgClosure *p, StgClosure *dest)
1247 p->header.info = &stg_EVACUATED_info;
1248 ((StgEvacuated *)p)->evacuee = dest;
1252 static __inline__ StgClosure *
1253 copy(StgClosure *src, nat size, step *stp)
1257 TICK_GC_WORDS_COPIED(size);
1258 /* Find out where we're going, using the handy "to" pointer in
1259 * the step of the source object. If it turns out we need to
1260 * evacuate to an older generation, adjust it here (see comment
1263 if (stp->gen_no < evac_gen) {
1264 #ifdef NO_EAGER_PROMOTION
1265 failed_to_evac = rtsTrue;
1267 stp = &generations[evac_gen].steps[0];
1271 /* chain a new block onto the to-space for the destination step if
1274 if (stp->hp + size >= stp->hpLim) {
1278 for(to = stp->hp, from = (P_)src; size>0; --size) {
1284 upd_evacuee(src,(StgClosure *)dest);
1285 return (StgClosure *)dest;
1288 /* Special version of copy() for when we only want to copy the info
1289 * pointer of an object, but reserve some padding after it. This is
1290 * used to optimise evacuation of BLACKHOLEs.
1294 static __inline__ StgClosure *
1295 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1299 TICK_GC_WORDS_COPIED(size_to_copy);
1300 if (stp->gen_no < evac_gen) {
1301 #ifdef NO_EAGER_PROMOTION
1302 failed_to_evac = rtsTrue;
1304 stp = &generations[evac_gen].steps[0];
1308 if (stp->hp + size_to_reserve >= stp->hpLim) {
1312 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1317 stp->hp += size_to_reserve;
1318 upd_evacuee(src,(StgClosure *)dest);
1319 return (StgClosure *)dest;
1323 /* -----------------------------------------------------------------------------
1324 Evacuate a large object
1326 This just consists of removing the object from the (doubly-linked)
1327 large_alloc_list, and linking it on to the (singly-linked)
1328 new_large_objects list, from where it will be scavenged later.
1330 Convention: bd->flags has BF_EVACUATED set for a large object
1331 that has been evacuated, or unset otherwise.
1332 -------------------------------------------------------------------------- */
1336 evacuate_large(StgPtr p)
1338 bdescr *bd = Bdescr(p);
1341 // object must be at the beginning of the block (or be a ByteArray)
1342 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1343 (((W_)p & BLOCK_MASK) == 0));
1345 // already evacuated?
1346 if (bd->flags & BF_EVACUATED) {
1347 /* Don't forget to set the failed_to_evac flag if we didn't get
1348 * the desired destination (see comments in evacuate()).
1350 if (bd->gen_no < evac_gen) {
1351 failed_to_evac = rtsTrue;
1352 TICK_GC_FAILED_PROMOTION();
1358 // remove from large_object list
1360 bd->u.back->link = bd->link;
1361 } else { // first object in the list
1362 stp->large_objects = bd->link;
1365 bd->link->u.back = bd->u.back;
1368 /* link it on to the evacuated large object list of the destination step
1371 if (stp->gen_no < evac_gen) {
1372 #ifdef NO_EAGER_PROMOTION
1373 failed_to_evac = rtsTrue;
1375 stp = &generations[evac_gen].steps[0];
1380 bd->gen_no = stp->gen_no;
1381 bd->link = stp->new_large_objects;
1382 stp->new_large_objects = bd;
1383 bd->flags |= BF_EVACUATED;
1386 /* -----------------------------------------------------------------------------
1387 Adding a MUT_CONS to an older generation.
1389 This is necessary from time to time when we end up with an
1390 old-to-new generation pointer in a non-mutable object. We defer
1391 the promotion until the next GC.
1392 -------------------------------------------------------------------------- */
1396 mkMutCons(StgClosure *ptr, generation *gen)
1401 stp = &gen->steps[0];
1403 /* chain a new block onto the to-space for the destination step if
1406 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1410 q = (StgMutVar *)stp->hp;
1411 stp->hp += sizeofW(StgMutVar);
1413 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1415 recordOldToNewPtrs((StgMutClosure *)q);
1417 return (StgClosure *)q;
1420 /* -----------------------------------------------------------------------------
1423 This is called (eventually) for every live object in the system.
1425 The caller to evacuate specifies a desired generation in the
1426 evac_gen global variable. The following conditions apply to
1427 evacuating an object which resides in generation M when we're
1428 collecting up to generation N
1432 else evac to step->to
1434 if M < evac_gen evac to evac_gen, step 0
1436 if the object is already evacuated, then we check which generation
1439 if M >= evac_gen do nothing
1440 if M < evac_gen set failed_to_evac flag to indicate that we
1441 didn't manage to evacuate this object into evac_gen.
1443 -------------------------------------------------------------------------- */
1446 evacuate(StgClosure *q)
1451 const StgInfoTable *info;
1454 if (HEAP_ALLOCED(q)) {
1457 if (bd->gen_no > N) {
1458 /* Can't evacuate this object, because it's in a generation
1459 * older than the ones we're collecting. Let's hope that it's
1460 * in evac_gen or older, or we will have to arrange to track
1461 * this pointer using the mutable list.
1463 if (bd->gen_no < evac_gen) {
1465 failed_to_evac = rtsTrue;
1466 TICK_GC_FAILED_PROMOTION();
1471 /* evacuate large objects by re-linking them onto a different list.
1473 if (bd->flags & BF_LARGE) {
1475 if (info->type == TSO &&
1476 ((StgTSO *)q)->what_next == ThreadRelocated) {
1477 q = (StgClosure *)((StgTSO *)q)->link;
1480 evacuate_large((P_)q);
1484 /* If the object is in a step that we're compacting, then we
1485 * need to use an alternative evacuate procedure.
1487 if (bd->step->is_compacted) {
1488 if (!is_marked((P_)q,bd)) {
1490 if (mark_stack_full()) {
1491 mark_stack_overflowed = rtsTrue;
1494 push_mark_stack((P_)q);
1502 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1505 // make sure the info pointer is into text space
1506 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1507 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1510 switch (info -> type) {
1514 to = copy(q,sizeW_fromITBL(info),stp);
1519 StgWord w = (StgWord)q->payload[0];
1520 if (q->header.info == Czh_con_info &&
1521 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1522 (StgChar)w <= MAX_CHARLIKE) {
1523 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1525 if (q->header.info == Izh_con_info &&
1526 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1527 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1529 // else, fall through ...
1535 return copy(q,sizeofW(StgHeader)+1,stp);
1537 case THUNK_1_0: // here because of MIN_UPD_SIZE
1542 #ifdef NO_PROMOTE_THUNKS
1543 if (bd->gen_no == 0 &&
1544 bd->step->no != 0 &&
1545 bd->step->no == generations[bd->gen_no].n_steps-1) {
1549 return copy(q,sizeofW(StgHeader)+2,stp);
1557 return copy(q,sizeofW(StgHeader)+2,stp);
1563 case IND_OLDGEN_PERM:
1568 return copy(q,sizeW_fromITBL(info),stp);
1571 case SE_CAF_BLACKHOLE:
1574 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1577 to = copy(q,BLACKHOLE_sizeW(),stp);
1580 case THUNK_SELECTOR:
1582 const StgInfoTable* selectee_info;
1583 StgClosure* selectee = ((StgSelector*)q)->selectee;
1586 selectee_info = get_itbl(selectee);
1587 switch (selectee_info->type) {
1596 StgWord offset = info->layout.selector_offset;
1598 // check that the size is in range
1600 (StgWord32)(selectee_info->layout.payload.ptrs +
1601 selectee_info->layout.payload.nptrs));
1603 // perform the selection!
1604 q = selectee->payload[offset];
1606 /* if we're already in to-space, there's no need to continue
1607 * with the evacuation, just update the source address with
1608 * a pointer to the (evacuated) constructor field.
1610 if (HEAP_ALLOCED(q)) {
1611 bdescr *bd = Bdescr((P_)q);
1612 if (bd->flags & BF_EVACUATED) {
1613 if (bd->gen_no < evac_gen) {
1614 failed_to_evac = rtsTrue;
1615 TICK_GC_FAILED_PROMOTION();
1621 /* otherwise, carry on and evacuate this constructor field,
1622 * (but not the constructor itself)
1631 case IND_OLDGEN_PERM:
1632 selectee = ((StgInd *)selectee)->indirectee;
1636 selectee = ((StgEvacuated *)selectee)->evacuee;
1639 case THUNK_SELECTOR:
1641 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1642 something) to go into an infinite loop when the nightly
1643 stage2 compiles PrelTup.lhs. */
1645 /* we can't recurse indefinitely in evacuate(), so set a
1646 * limit on the number of times we can go around this
1649 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1651 bd = Bdescr((P_)selectee);
1652 if (!bd->flags & BF_EVACUATED) {
1653 thunk_selector_depth++;
1654 selectee = evacuate(selectee);
1655 thunk_selector_depth--;
1659 // otherwise, fall through...
1671 case SE_CAF_BLACKHOLE:
1675 // not evaluated yet
1679 // a copy of the top-level cases below
1680 case RBH: // cf. BLACKHOLE_BQ
1682 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1683 to = copy(q,BLACKHOLE_sizeW(),stp);
1684 //ToDo: derive size etc from reverted IP
1685 //to = copy(q,size,stp);
1686 // recordMutable((StgMutClosure *)to);
1691 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1692 to = copy(q,sizeofW(StgBlockedFetch),stp);
1699 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1700 to = copy(q,sizeofW(StgFetchMe),stp);
1704 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1705 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1710 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1711 (int)(selectee_info->type));
1714 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1718 // follow chains of indirections, don't evacuate them
1719 q = ((StgInd*)q)->indirectee;
1723 if (info->srt_len > 0 && major_gc &&
1724 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1725 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1726 static_objects = (StgClosure *)q;
1731 if (info->srt_len > 0 && major_gc &&
1732 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1733 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1734 static_objects = (StgClosure *)q;
1739 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1740 * on the CAF list, so don't do anything with it here (we'll
1741 * scavenge it later).
1744 && ((StgIndStatic *)q)->saved_info == NULL
1745 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1746 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1747 static_objects = (StgClosure *)q;
1752 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1753 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1754 static_objects = (StgClosure *)q;
1758 case CONSTR_INTLIKE:
1759 case CONSTR_CHARLIKE:
1760 case CONSTR_NOCAF_STATIC:
1761 /* no need to put these on the static linked list, they don't need
1776 // shouldn't see these
1777 barf("evacuate: stack frame at %p\n", q);
1781 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1782 * of stack, tagging and all.
1784 return copy(q,pap_sizeW((StgPAP*)q),stp);
1787 /* Already evacuated, just return the forwarding address.
1788 * HOWEVER: if the requested destination generation (evac_gen) is
1789 * older than the actual generation (because the object was
1790 * already evacuated to a younger generation) then we have to
1791 * set the failed_to_evac flag to indicate that we couldn't
1792 * manage to promote the object to the desired generation.
1794 if (evac_gen > 0) { // optimisation
1795 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1796 if (Bdescr((P_)p)->gen_no < evac_gen) {
1797 failed_to_evac = rtsTrue;
1798 TICK_GC_FAILED_PROMOTION();
1801 return ((StgEvacuated*)q)->evacuee;
1804 // just copy the block
1805 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1808 case MUT_ARR_PTRS_FROZEN:
1809 // just copy the block
1810 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1814 StgTSO *tso = (StgTSO *)q;
1816 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1818 if (tso->what_next == ThreadRelocated) {
1819 q = (StgClosure *)tso->link;
1823 /* To evacuate a small TSO, we need to relocate the update frame
1827 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1828 move_TSO(tso, new_tso);
1829 return (StgClosure *)new_tso;
1834 case RBH: // cf. BLACKHOLE_BQ
1836 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1837 to = copy(q,BLACKHOLE_sizeW(),stp);
1838 //ToDo: derive size etc from reverted IP
1839 //to = copy(q,size,stp);
1841 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1842 q, info_type(q), to, info_type(to)));
1847 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1848 to = copy(q,sizeofW(StgBlockedFetch),stp);
1850 belch("@@ evacuate: %p (%s) to %p (%s)",
1851 q, info_type(q), to, info_type(to)));
1858 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1859 to = copy(q,sizeofW(StgFetchMe),stp);
1861 belch("@@ evacuate: %p (%s) to %p (%s)",
1862 q, info_type(q), to, info_type(to)));
1866 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1867 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1869 belch("@@ evacuate: %p (%s) to %p (%s)",
1870 q, info_type(q), to, info_type(to)));
1875 barf("evacuate: strange closure type %d", (int)(info->type));
1881 /* -----------------------------------------------------------------------------
1882 move_TSO is called to update the TSO structure after it has been
1883 moved from one place to another.
1884 -------------------------------------------------------------------------- */
1887 move_TSO(StgTSO *src, StgTSO *dest)
1891 // relocate the stack pointers...
1892 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1893 dest->sp = (StgPtr)dest->sp + diff;
1894 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1896 relocate_stack(dest, diff);
1899 /* -----------------------------------------------------------------------------
1900 relocate_stack is called to update the linkage between
1901 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1903 -------------------------------------------------------------------------- */
1906 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1914 while ((P_)su < dest->stack + dest->stack_size) {
1915 switch (get_itbl(su)->type) {
1917 // GCC actually manages to common up these three cases!
1920 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1925 cf = (StgCatchFrame *)su;
1926 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1931 sf = (StgSeqFrame *)su;
1932 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1941 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1952 scavenge_srt(const StgInfoTable *info)
1954 StgClosure **srt, **srt_end;
1956 /* evacuate the SRT. If srt_len is zero, then there isn't an
1957 * srt field in the info table. That's ok, because we'll
1958 * never dereference it.
1960 srt = (StgClosure **)(info->srt);
1961 srt_end = srt + info->srt_len;
1962 for (; srt < srt_end; srt++) {
1963 /* Special-case to handle references to closures hiding out in DLLs, since
1964 double indirections required to get at those. The code generator knows
1965 which is which when generating the SRT, so it stores the (indirect)
1966 reference to the DLL closure in the table by first adding one to it.
1967 We check for this here, and undo the addition before evacuating it.
1969 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1970 closure that's fixed at link-time, and no extra magic is required.
1972 #ifdef ENABLE_WIN32_DLL_SUPPORT
1973 if ( (unsigned long)(*srt) & 0x1 ) {
1974 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1984 /* -----------------------------------------------------------------------------
1986 -------------------------------------------------------------------------- */
1989 scavengeTSO (StgTSO *tso)
1991 // chase the link field for any TSOs on the same queue
1992 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1993 if ( tso->why_blocked == BlockedOnMVar
1994 || tso->why_blocked == BlockedOnBlackHole
1995 || tso->why_blocked == BlockedOnException
1997 || tso->why_blocked == BlockedOnGA
1998 || tso->why_blocked == BlockedOnGA_NoSend
2001 tso->block_info.closure = evacuate(tso->block_info.closure);
2003 if ( tso->blocked_exceptions != NULL ) {
2004 tso->blocked_exceptions =
2005 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2007 // scavenge this thread's stack
2008 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2011 /* -----------------------------------------------------------------------------
2012 Scavenge a given step until there are no more objects in this step
2015 evac_gen is set by the caller to be either zero (for a step in a
2016 generation < N) or G where G is the generation of the step being
2019 We sometimes temporarily change evac_gen back to zero if we're
2020 scavenging a mutable object where early promotion isn't such a good
2022 -------------------------------------------------------------------------- */
2030 nat saved_evac_gen = evac_gen;
2035 failed_to_evac = rtsFalse;
2037 /* scavenge phase - standard breadth-first scavenging of the
2041 while (bd != stp->hp_bd || p < stp->hp) {
2043 // If we're at the end of this block, move on to the next block
2044 if (bd != stp->hp_bd && p == bd->free) {
2050 info = get_itbl((StgClosure *)p);
2051 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2054 switch (info->type) {
2057 /* treat MVars specially, because we don't want to evacuate the
2058 * mut_link field in the middle of the closure.
2061 StgMVar *mvar = ((StgMVar *)p);
2063 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2064 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2065 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2066 evac_gen = saved_evac_gen;
2067 recordMutable((StgMutClosure *)mvar);
2068 failed_to_evac = rtsFalse; // mutable.
2069 p += sizeofW(StgMVar);
2077 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2078 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2079 p += sizeofW(StgHeader) + 2;
2084 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2085 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2091 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2092 p += sizeofW(StgHeader) + 1;
2097 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2103 p += sizeofW(StgHeader) + 1;
2110 p += sizeofW(StgHeader) + 2;
2117 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2118 p += sizeofW(StgHeader) + 2;
2134 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2135 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2136 (StgClosure *)*p = evacuate((StgClosure *)*p);
2138 p += info->layout.payload.nptrs;
2143 if (stp->gen_no != 0) {
2144 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2147 case IND_OLDGEN_PERM:
2148 ((StgIndOldGen *)p)->indirectee =
2149 evacuate(((StgIndOldGen *)p)->indirectee);
2150 if (failed_to_evac) {
2151 failed_to_evac = rtsFalse;
2152 recordOldToNewPtrs((StgMutClosure *)p);
2154 p += sizeofW(StgIndOldGen);
2159 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2160 evac_gen = saved_evac_gen;
2161 recordMutable((StgMutClosure *)p);
2162 failed_to_evac = rtsFalse; // mutable anyhow
2163 p += sizeofW(StgMutVar);
2168 failed_to_evac = rtsFalse; // mutable anyhow
2169 p += sizeofW(StgMutVar);
2173 case SE_CAF_BLACKHOLE:
2176 p += BLACKHOLE_sizeW();
2181 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2182 (StgClosure *)bh->blocking_queue =
2183 evacuate((StgClosure *)bh->blocking_queue);
2184 recordMutable((StgMutClosure *)bh);
2185 failed_to_evac = rtsFalse;
2186 p += BLACKHOLE_sizeW();
2190 case THUNK_SELECTOR:
2192 StgSelector *s = (StgSelector *)p;
2193 s->selectee = evacuate(s->selectee);
2194 p += THUNK_SELECTOR_sizeW();
2198 case AP_UPD: // same as PAPs
2200 /* Treat a PAP just like a section of stack, not forgetting to
2201 * evacuate the function pointer too...
2204 StgPAP* pap = (StgPAP *)p;
2206 pap->fun = evacuate(pap->fun);
2207 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2208 p += pap_sizeW(pap);
2213 // nothing to follow
2214 p += arr_words_sizeW((StgArrWords *)p);
2218 // follow everything
2222 evac_gen = 0; // repeatedly mutable
2223 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2224 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2225 (StgClosure *)*p = evacuate((StgClosure *)*p);
2227 evac_gen = saved_evac_gen;
2228 recordMutable((StgMutClosure *)q);
2229 failed_to_evac = rtsFalse; // mutable anyhow.
2233 case MUT_ARR_PTRS_FROZEN:
2234 // follow everything
2238 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2239 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2240 (StgClosure *)*p = evacuate((StgClosure *)*p);
2242 // it's tempting to recordMutable() if failed_to_evac is
2243 // false, but that breaks some assumptions (eg. every
2244 // closure on the mutable list is supposed to have the MUT
2245 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2251 StgTSO *tso = (StgTSO *)p;
2254 evac_gen = saved_evac_gen;
2255 recordMutable((StgMutClosure *)tso);
2256 failed_to_evac = rtsFalse; // mutable anyhow.
2257 p += tso_sizeW(tso);
2262 case RBH: // cf. BLACKHOLE_BQ
2265 nat size, ptrs, nonptrs, vhs;
2267 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2269 StgRBH *rbh = (StgRBH *)p;
2270 (StgClosure *)rbh->blocking_queue =
2271 evacuate((StgClosure *)rbh->blocking_queue);
2272 recordMutable((StgMutClosure *)to);
2273 failed_to_evac = rtsFalse; // mutable anyhow.
2275 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2276 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2277 // ToDo: use size of reverted closure here!
2278 p += BLACKHOLE_sizeW();
2284 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2285 // follow the pointer to the node which is being demanded
2286 (StgClosure *)bf->node =
2287 evacuate((StgClosure *)bf->node);
2288 // follow the link to the rest of the blocking queue
2289 (StgClosure *)bf->link =
2290 evacuate((StgClosure *)bf->link);
2291 if (failed_to_evac) {
2292 failed_to_evac = rtsFalse;
2293 recordMutable((StgMutClosure *)bf);
2296 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2297 bf, info_type((StgClosure *)bf),
2298 bf->node, info_type(bf->node)));
2299 p += sizeofW(StgBlockedFetch);
2307 p += sizeofW(StgFetchMe);
2308 break; // nothing to do in this case
2310 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2312 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2313 (StgClosure *)fmbq->blocking_queue =
2314 evacuate((StgClosure *)fmbq->blocking_queue);
2315 if (failed_to_evac) {
2316 failed_to_evac = rtsFalse;
2317 recordMutable((StgMutClosure *)fmbq);
2320 belch("@@ scavenge: %p (%s) exciting, isn't it",
2321 p, info_type((StgClosure *)p)));
2322 p += sizeofW(StgFetchMeBlockingQueue);
2328 barf("scavenge: unimplemented/strange closure type %d @ %p",
2332 /* If we didn't manage to promote all the objects pointed to by
2333 * the current object, then we have to designate this object as
2334 * mutable (because it contains old-to-new generation pointers).
2336 if (failed_to_evac) {
2337 failed_to_evac = rtsFalse;
2338 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2346 /* -----------------------------------------------------------------------------
2347 Scavenge everything on the mark stack.
2349 This is slightly different from scavenge():
2350 - we don't walk linearly through the objects, so the scavenger
2351 doesn't need to advance the pointer on to the next object.
2352 -------------------------------------------------------------------------- */
2355 scavenge_mark_stack(void)
2361 evac_gen = oldest_gen->no;
2362 saved_evac_gen = evac_gen;
2365 while (!mark_stack_empty()) {
2366 p = pop_mark_stack();
2368 info = get_itbl((StgClosure *)p);
2369 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2372 switch (info->type) {
2375 /* treat MVars specially, because we don't want to evacuate the
2376 * mut_link field in the middle of the closure.
2379 StgMVar *mvar = ((StgMVar *)p);
2381 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2382 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2383 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2384 evac_gen = saved_evac_gen;
2385 failed_to_evac = rtsFalse; // mutable.
2393 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2394 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2404 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2429 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2430 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2431 (StgClosure *)*p = evacuate((StgClosure *)*p);
2437 // don't need to do anything here: the only possible case
2438 // is that we're in a 1-space compacting collector, with
2439 // no "old" generation.
2443 case IND_OLDGEN_PERM:
2444 ((StgIndOldGen *)p)->indirectee =
2445 evacuate(((StgIndOldGen *)p)->indirectee);
2446 if (failed_to_evac) {
2447 recordOldToNewPtrs((StgMutClosure *)p);
2449 failed_to_evac = rtsFalse;
2454 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2455 evac_gen = saved_evac_gen;
2456 failed_to_evac = rtsFalse;
2461 failed_to_evac = rtsFalse;
2465 case SE_CAF_BLACKHOLE:
2473 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2474 (StgClosure *)bh->blocking_queue =
2475 evacuate((StgClosure *)bh->blocking_queue);
2476 failed_to_evac = rtsFalse;
2480 case THUNK_SELECTOR:
2482 StgSelector *s = (StgSelector *)p;
2483 s->selectee = evacuate(s->selectee);
2487 case AP_UPD: // same as PAPs
2489 /* Treat a PAP just like a section of stack, not forgetting to
2490 * evacuate the function pointer too...
2493 StgPAP* pap = (StgPAP *)p;
2495 pap->fun = evacuate(pap->fun);
2496 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2501 // follow everything
2505 evac_gen = 0; // repeatedly mutable
2506 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2507 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2508 (StgClosure *)*p = evacuate((StgClosure *)*p);
2510 evac_gen = saved_evac_gen;
2511 failed_to_evac = rtsFalse; // mutable anyhow.
2515 case MUT_ARR_PTRS_FROZEN:
2516 // follow everything
2520 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2521 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2522 (StgClosure *)*p = evacuate((StgClosure *)*p);
2529 StgTSO *tso = (StgTSO *)p;
2532 evac_gen = saved_evac_gen;
2533 failed_to_evac = rtsFalse;
2538 case RBH: // cf. BLACKHOLE_BQ
2541 nat size, ptrs, nonptrs, vhs;
2543 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2545 StgRBH *rbh = (StgRBH *)p;
2546 (StgClosure *)rbh->blocking_queue =
2547 evacuate((StgClosure *)rbh->blocking_queue);
2548 recordMutable((StgMutClosure *)rbh);
2549 failed_to_evac = rtsFalse; // mutable anyhow.
2551 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2552 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2558 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2559 // follow the pointer to the node which is being demanded
2560 (StgClosure *)bf->node =
2561 evacuate((StgClosure *)bf->node);
2562 // follow the link to the rest of the blocking queue
2563 (StgClosure *)bf->link =
2564 evacuate((StgClosure *)bf->link);
2565 if (failed_to_evac) {
2566 failed_to_evac = rtsFalse;
2567 recordMutable((StgMutClosure *)bf);
2570 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2571 bf, info_type((StgClosure *)bf),
2572 bf->node, info_type(bf->node)));
2580 break; // nothing to do in this case
2582 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2584 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2585 (StgClosure *)fmbq->blocking_queue =
2586 evacuate((StgClosure *)fmbq->blocking_queue);
2587 if (failed_to_evac) {
2588 failed_to_evac = rtsFalse;
2589 recordMutable((StgMutClosure *)fmbq);
2592 belch("@@ scavenge: %p (%s) exciting, isn't it",
2593 p, info_type((StgClosure *)p)));
2599 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2603 if (failed_to_evac) {
2604 failed_to_evac = rtsFalse;
2605 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2608 // mark the next bit to indicate "scavenged"
2609 mark(q+1, Bdescr(q));
2611 } // while (!mark_stack_empty())
2613 // start a new linear scan if the mark stack overflowed at some point
2614 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2615 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2616 mark_stack_overflowed = rtsFalse;
2617 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2618 oldgen_scan = oldgen_scan_bd->start;
2621 if (oldgen_scan_bd) {
2622 // push a new thing on the mark stack
2624 // find a closure that is marked but not scavenged, and start
2626 while (oldgen_scan < oldgen_scan_bd->free
2627 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2631 if (oldgen_scan < oldgen_scan_bd->free) {
2633 // already scavenged?
2634 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2635 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2638 push_mark_stack(oldgen_scan);
2639 // ToDo: bump the linear scan by the actual size of the object
2640 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2644 oldgen_scan_bd = oldgen_scan_bd->link;
2645 if (oldgen_scan_bd != NULL) {
2646 oldgen_scan = oldgen_scan_bd->start;
2652 /* -----------------------------------------------------------------------------
2653 Scavenge one object.
2655 This is used for objects that are temporarily marked as mutable
2656 because they contain old-to-new generation pointers. Only certain
2657 objects can have this property.
2658 -------------------------------------------------------------------------- */
2661 scavenge_one(StgPtr p)
2663 const StgInfoTable *info;
2664 nat saved_evac_gen = evac_gen;
2667 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2668 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2670 info = get_itbl((StgClosure *)p);
2672 switch (info->type) {
2675 case FUN_1_0: // hardly worth specialising these guys
2695 case IND_OLDGEN_PERM:
2699 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2700 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2701 (StgClosure *)*q = evacuate((StgClosure *)*q);
2707 case SE_CAF_BLACKHOLE:
2712 case THUNK_SELECTOR:
2714 StgSelector *s = (StgSelector *)p;
2715 s->selectee = evacuate(s->selectee);
2720 // nothing to follow
2725 // follow everything
2728 evac_gen = 0; // repeatedly mutable
2729 recordMutable((StgMutClosure *)p);
2730 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2731 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2732 (StgClosure *)*p = evacuate((StgClosure *)*p);
2734 evac_gen = saved_evac_gen;
2735 failed_to_evac = rtsFalse;
2739 case MUT_ARR_PTRS_FROZEN:
2741 // follow everything
2744 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2745 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2746 (StgClosure *)*p = evacuate((StgClosure *)*p);
2753 StgTSO *tso = (StgTSO *)p;
2755 evac_gen = 0; // repeatedly mutable
2757 recordMutable((StgMutClosure *)tso);
2758 evac_gen = saved_evac_gen;
2759 failed_to_evac = rtsFalse;
2766 StgPAP* pap = (StgPAP *)p;
2767 pap->fun = evacuate(pap->fun);
2768 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2773 // This might happen if for instance a MUT_CONS was pointing to a
2774 // THUNK which has since been updated. The IND_OLDGEN will
2775 // be on the mutable list anyway, so we don't need to do anything
2780 barf("scavenge_one: strange object %d", (int)(info->type));
2783 no_luck = failed_to_evac;
2784 failed_to_evac = rtsFalse;
2788 /* -----------------------------------------------------------------------------
2789 Scavenging mutable lists.
2791 We treat the mutable list of each generation > N (i.e. all the
2792 generations older than the one being collected) as roots. We also
2793 remove non-mutable objects from the mutable list at this point.
2794 -------------------------------------------------------------------------- */
2797 scavenge_mut_once_list(generation *gen)
2799 const StgInfoTable *info;
2800 StgMutClosure *p, *next, *new_list;
2802 p = gen->mut_once_list;
2803 new_list = END_MUT_LIST;
2807 failed_to_evac = rtsFalse;
2809 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2811 // make sure the info pointer is into text space
2812 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2813 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2817 if (info->type==RBH)
2818 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2820 switch(info->type) {
2823 case IND_OLDGEN_PERM:
2825 /* Try to pull the indirectee into this generation, so we can
2826 * remove the indirection from the mutable list.
2828 ((StgIndOldGen *)p)->indirectee =
2829 evacuate(((StgIndOldGen *)p)->indirectee);
2831 #if 0 && defined(DEBUG)
2832 if (RtsFlags.DebugFlags.gc)
2833 /* Debugging code to print out the size of the thing we just
2837 StgPtr start = gen->steps[0].scan;
2838 bdescr *start_bd = gen->steps[0].scan_bd;
2840 scavenge(&gen->steps[0]);
2841 if (start_bd != gen->steps[0].scan_bd) {
2842 size += (P_)BLOCK_ROUND_UP(start) - start;
2843 start_bd = start_bd->link;
2844 while (start_bd != gen->steps[0].scan_bd) {
2845 size += BLOCK_SIZE_W;
2846 start_bd = start_bd->link;
2848 size += gen->steps[0].scan -
2849 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2851 size = gen->steps[0].scan - start;
2853 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2857 /* failed_to_evac might happen if we've got more than two
2858 * generations, we're collecting only generation 0, the
2859 * indirection resides in generation 2 and the indirectee is
2862 if (failed_to_evac) {
2863 failed_to_evac = rtsFalse;
2864 p->mut_link = new_list;
2867 /* the mut_link field of an IND_STATIC is overloaded as the
2868 * static link field too (it just so happens that we don't need
2869 * both at the same time), so we need to NULL it out when
2870 * removing this object from the mutable list because the static
2871 * link fields are all assumed to be NULL before doing a major
2879 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2880 * it from the mutable list if possible by promoting whatever it
2883 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2884 /* didn't manage to promote everything, so put the
2885 * MUT_CONS back on the list.
2887 p->mut_link = new_list;
2893 // shouldn't have anything else on the mutables list
2894 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2898 gen->mut_once_list = new_list;
2903 scavenge_mutable_list(generation *gen)
2905 const StgInfoTable *info;
2906 StgMutClosure *p, *next;
2908 p = gen->saved_mut_list;
2912 failed_to_evac = rtsFalse;
2914 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2916 // make sure the info pointer is into text space
2917 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2918 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2922 if (info->type==RBH)
2923 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2925 switch(info->type) {
2928 // follow everything
2929 p->mut_link = gen->mut_list;
2934 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2935 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2936 (StgClosure *)*q = evacuate((StgClosure *)*q);
2941 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2942 case MUT_ARR_PTRS_FROZEN:
2947 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2948 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2949 (StgClosure *)*q = evacuate((StgClosure *)*q);
2953 if (failed_to_evac) {
2954 failed_to_evac = rtsFalse;
2955 mkMutCons((StgClosure *)p, gen);
2961 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2962 p->mut_link = gen->mut_list;
2968 StgMVar *mvar = (StgMVar *)p;
2969 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2970 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2971 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2972 p->mut_link = gen->mut_list;
2979 StgTSO *tso = (StgTSO *)p;
2983 /* Don't take this TSO off the mutable list - it might still
2984 * point to some younger objects (because we set evac_gen to 0
2987 tso->mut_link = gen->mut_list;
2988 gen->mut_list = (StgMutClosure *)tso;
2994 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2995 (StgClosure *)bh->blocking_queue =
2996 evacuate((StgClosure *)bh->blocking_queue);
2997 p->mut_link = gen->mut_list;
3002 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3005 case IND_OLDGEN_PERM:
3006 /* Try to pull the indirectee into this generation, so we can
3007 * remove the indirection from the mutable list.
3010 ((StgIndOldGen *)p)->indirectee =
3011 evacuate(((StgIndOldGen *)p)->indirectee);
3014 if (failed_to_evac) {
3015 failed_to_evac = rtsFalse;
3016 p->mut_link = gen->mut_once_list;
3017 gen->mut_once_list = p;
3024 // HWL: check whether all of these are necessary
3026 case RBH: // cf. BLACKHOLE_BQ
3028 // nat size, ptrs, nonptrs, vhs;
3030 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3031 StgRBH *rbh = (StgRBH *)p;
3032 (StgClosure *)rbh->blocking_queue =
3033 evacuate((StgClosure *)rbh->blocking_queue);
3034 if (failed_to_evac) {
3035 failed_to_evac = rtsFalse;
3036 recordMutable((StgMutClosure *)rbh);
3038 // ToDo: use size of reverted closure here!
3039 p += BLACKHOLE_sizeW();
3045 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3046 // follow the pointer to the node which is being demanded
3047 (StgClosure *)bf->node =
3048 evacuate((StgClosure *)bf->node);
3049 // follow the link to the rest of the blocking queue
3050 (StgClosure *)bf->link =
3051 evacuate((StgClosure *)bf->link);
3052 if (failed_to_evac) {
3053 failed_to_evac = rtsFalse;
3054 recordMutable((StgMutClosure *)bf);
3056 p += sizeofW(StgBlockedFetch);
3062 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3065 p += sizeofW(StgFetchMe);
3066 break; // nothing to do in this case
3068 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3070 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3071 (StgClosure *)fmbq->blocking_queue =
3072 evacuate((StgClosure *)fmbq->blocking_queue);
3073 if (failed_to_evac) {
3074 failed_to_evac = rtsFalse;
3075 recordMutable((StgMutClosure *)fmbq);
3077 p += sizeofW(StgFetchMeBlockingQueue);
3083 // shouldn't have anything else on the mutables list
3084 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3091 scavenge_static(void)
3093 StgClosure* p = static_objects;
3094 const StgInfoTable *info;
3096 /* Always evacuate straight to the oldest generation for static
3098 evac_gen = oldest_gen->no;
3100 /* keep going until we've scavenged all the objects on the linked
3102 while (p != END_OF_STATIC_LIST) {
3106 if (info->type==RBH)
3107 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3109 // make sure the info pointer is into text space
3110 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3111 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3113 /* Take this object *off* the static_objects list,
3114 * and put it on the scavenged_static_objects list.
3116 static_objects = STATIC_LINK(info,p);
3117 STATIC_LINK(info,p) = scavenged_static_objects;
3118 scavenged_static_objects = p;
3120 switch (info -> type) {
3124 StgInd *ind = (StgInd *)p;
3125 ind->indirectee = evacuate(ind->indirectee);
3127 /* might fail to evacuate it, in which case we have to pop it
3128 * back on the mutable list (and take it off the
3129 * scavenged_static list because the static link and mut link
3130 * pointers are one and the same).
3132 if (failed_to_evac) {
3133 failed_to_evac = rtsFalse;
3134 scavenged_static_objects = IND_STATIC_LINK(p);
3135 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3136 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3150 next = (P_)p->payload + info->layout.payload.ptrs;
3151 // evacuate the pointers
3152 for (q = (P_)p->payload; q < next; q++) {
3153 (StgClosure *)*q = evacuate((StgClosure *)*q);
3159 barf("scavenge_static: strange closure %d", (int)(info->type));
3162 ASSERT(failed_to_evac == rtsFalse);
3164 /* get the next static object from the list. Remember, there might
3165 * be more stuff on this list now that we've done some evacuating!
3166 * (static_objects is a global)
3172 /* -----------------------------------------------------------------------------
3173 scavenge_stack walks over a section of stack and evacuates all the
3174 objects pointed to by it. We can use the same code for walking
3175 PAPs, since these are just sections of copied stack.
3176 -------------------------------------------------------------------------- */
3179 scavenge_stack(StgPtr p, StgPtr stack_end)
3182 const StgInfoTable* info;
3185 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3188 * Each time around this loop, we are looking at a chunk of stack
3189 * that starts with either a pending argument section or an
3190 * activation record.
3193 while (p < stack_end) {
3196 // If we've got a tag, skip over that many words on the stack
3197 if (IS_ARG_TAG((W_)q)) {
3202 /* Is q a pointer to a closure?
3204 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3206 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3207 ASSERT(closure_STATIC((StgClosure *)q));
3209 // otherwise, must be a pointer into the allocation space.
3212 (StgClosure *)*p = evacuate((StgClosure *)q);
3218 * Otherwise, q must be the info pointer of an activation
3219 * record. All activation records have 'bitmap' style layout
3222 info = get_itbl((StgClosure *)p);
3224 switch (info->type) {
3226 // Dynamic bitmap: the mask is stored on the stack
3228 bitmap = ((StgRetDyn *)p)->liveness;
3229 p = (P_)&((StgRetDyn *)p)->payload[0];
3232 // probably a slow-entry point return address:
3240 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3241 old_p, p, old_p+1));
3243 p++; // what if FHS!=1 !? -- HWL
3248 /* Specialised code for update frames, since they're so common.
3249 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3250 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3254 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3256 p += sizeofW(StgUpdateFrame);
3259 frame->updatee = evacuate(frame->updatee);
3261 #else // specialised code for update frames, not sure if it's worth it.
3263 nat type = get_itbl(frame->updatee)->type;
3265 if (type == EVACUATED) {
3266 frame->updatee = evacuate(frame->updatee);
3269 bdescr *bd = Bdescr((P_)frame->updatee);
3271 if (bd->gen_no > N) {
3272 if (bd->gen_no < evac_gen) {
3273 failed_to_evac = rtsTrue;
3278 // Don't promote blackholes
3280 if (!(stp->gen_no == 0 &&
3282 stp->no == stp->gen->n_steps-1)) {
3289 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3290 sizeofW(StgHeader), stp);
3291 frame->updatee = to;
3294 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3295 frame->updatee = to;
3296 recordMutable((StgMutClosure *)to);
3299 /* will never be SE_{,CAF_}BLACKHOLE, since we
3300 don't push an update frame for single-entry thunks. KSW 1999-01. */
3301 barf("scavenge_stack: UPDATE_FRAME updatee");
3307 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3314 bitmap = info->layout.bitmap;
3316 // this assumes that the payload starts immediately after the info-ptr
3318 while (bitmap != 0) {
3319 if ((bitmap & 1) == 0) {
3320 (StgClosure *)*p = evacuate((StgClosure *)*p);
3323 bitmap = bitmap >> 1;
3330 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3335 StgLargeBitmap *large_bitmap;
3338 large_bitmap = info->layout.large_bitmap;
3341 for (i=0; i<large_bitmap->size; i++) {
3342 bitmap = large_bitmap->bitmap[i];
3343 q = p + BITS_IN(W_);
3344 while (bitmap != 0) {
3345 if ((bitmap & 1) == 0) {
3346 (StgClosure *)*p = evacuate((StgClosure *)*p);
3349 bitmap = bitmap >> 1;
3351 if (i+1 < large_bitmap->size) {
3353 (StgClosure *)*p = evacuate((StgClosure *)*p);
3359 // and don't forget to follow the SRT
3364 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3369 /*-----------------------------------------------------------------------------
3370 scavenge the large object list.
3372 evac_gen set by caller; similar games played with evac_gen as with
3373 scavenge() - see comment at the top of scavenge(). Most large
3374 objects are (repeatedly) mutable, so most of the time evac_gen will
3376 --------------------------------------------------------------------------- */
3379 scavenge_large(step *stp)
3384 bd = stp->new_large_objects;
3386 for (; bd != NULL; bd = stp->new_large_objects) {
3388 /* take this object *off* the large objects list and put it on
3389 * the scavenged large objects list. This is so that we can
3390 * treat new_large_objects as a stack and push new objects on
3391 * the front when evacuating.
3393 stp->new_large_objects = bd->link;
3394 dbl_link_onto(bd, &stp->scavenged_large_objects);
3396 // update the block count in this step.
3397 stp->n_scavenged_large_blocks += bd->blocks;
3400 if (scavenge_one(p)) {
3401 mkMutCons((StgClosure *)p, stp->gen);
3406 /* -----------------------------------------------------------------------------
3407 Initialising the static object & mutable lists
3408 -------------------------------------------------------------------------- */
3411 zero_static_object_list(StgClosure* first_static)
3415 const StgInfoTable *info;
3417 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3419 link = STATIC_LINK(info, p);
3420 STATIC_LINK(info,p) = NULL;
3424 /* This function is only needed because we share the mutable link
3425 * field with the static link field in an IND_STATIC, so we have to
3426 * zero the mut_link field before doing a major GC, which needs the
3427 * static link field.
3429 * It doesn't do any harm to zero all the mutable link fields on the
3434 zero_mutable_list( StgMutClosure *first )
3436 StgMutClosure *next, *c;
3438 for (c = first; c != END_MUT_LIST; c = next) {
3444 /* -----------------------------------------------------------------------------
3446 -------------------------------------------------------------------------- */
3453 for (c = (StgIndStatic *)caf_list; c != NULL;
3454 c = (StgIndStatic *)c->static_link)
3456 c->header.info = c->saved_info;
3457 c->saved_info = NULL;
3458 // could, but not necessary: c->static_link = NULL;
3464 scavengeCAFs( void )
3469 for (c = (StgIndStatic *)caf_list; c != NULL;
3470 c = (StgIndStatic *)c->static_link)
3472 c->indirectee = evacuate(c->indirectee);
3476 /* -----------------------------------------------------------------------------
3477 Sanity code for CAF garbage collection.
3479 With DEBUG turned on, we manage a CAF list in addition to the SRT
3480 mechanism. After GC, we run down the CAF list and blackhole any
3481 CAFs which have been garbage collected. This means we get an error
3482 whenever the program tries to enter a garbage collected CAF.
3484 Any garbage collected CAFs are taken off the CAF list at the same
3486 -------------------------------------------------------------------------- */
3488 #if 0 && defined(DEBUG)
3495 const StgInfoTable *info;
3506 ASSERT(info->type == IND_STATIC);
3508 if (STATIC_LINK(info,p) == NULL) {
3509 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3511 SET_INFO(p,&stg_BLACKHOLE_info);
3512 p = STATIC_LINK2(info,p);
3516 pp = &STATIC_LINK2(info,p);
3523 // belch("%d CAFs live", i);
3528 /* -----------------------------------------------------------------------------
3531 Whenever a thread returns to the scheduler after possibly doing
3532 some work, we have to run down the stack and black-hole all the
3533 closures referred to by update frames.
3534 -------------------------------------------------------------------------- */
3537 threadLazyBlackHole(StgTSO *tso)
3539 StgUpdateFrame *update_frame;
3540 StgBlockingQueue *bh;
3543 stack_end = &tso->stack[tso->stack_size];
3544 update_frame = tso->su;
3547 switch (get_itbl(update_frame)->type) {
3550 update_frame = ((StgCatchFrame *)update_frame)->link;
3554 bh = (StgBlockingQueue *)update_frame->updatee;
3556 /* if the thunk is already blackholed, it means we've also
3557 * already blackholed the rest of the thunks on this stack,
3558 * so we can stop early.
3560 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3561 * don't interfere with this optimisation.
3563 if (bh->header.info == &stg_BLACKHOLE_info) {
3567 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3568 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3569 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3570 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3572 SET_INFO(bh,&stg_BLACKHOLE_info);
3575 update_frame = update_frame->link;
3579 update_frame = ((StgSeqFrame *)update_frame)->link;
3585 barf("threadPaused");
3591 /* -----------------------------------------------------------------------------
3594 * Code largely pinched from old RTS, then hacked to bits. We also do
3595 * lazy black holing here.
3597 * -------------------------------------------------------------------------- */
3600 threadSqueezeStack(StgTSO *tso)
3602 lnat displacement = 0;
3603 StgUpdateFrame *frame;
3604 StgUpdateFrame *next_frame; // Temporally next
3605 StgUpdateFrame *prev_frame; // Temporally previous
3607 rtsBool prev_was_update_frame;
3609 StgUpdateFrame *top_frame;
3610 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3612 void printObj( StgClosure *obj ); // from Printer.c
3614 top_frame = tso->su;
3617 bottom = &(tso->stack[tso->stack_size]);
3620 /* There must be at least one frame, namely the STOP_FRAME.
3622 ASSERT((P_)frame < bottom);
3624 /* Walk down the stack, reversing the links between frames so that
3625 * we can walk back up as we squeeze from the bottom. Note that
3626 * next_frame and prev_frame refer to next and previous as they were
3627 * added to the stack, rather than the way we see them in this
3628 * walk. (It makes the next loop less confusing.)
3630 * Stop if we find an update frame pointing to a black hole
3631 * (see comment in threadLazyBlackHole()).
3635 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3636 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3637 prev_frame = frame->link;
3638 frame->link = next_frame;
3643 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3644 printObj((StgClosure *)prev_frame);
3645 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3648 switch (get_itbl(frame)->type) {
3651 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3664 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3666 printObj((StgClosure *)prev_frame);
3669 if (get_itbl(frame)->type == UPDATE_FRAME
3670 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3675 /* Now, we're at the bottom. Frame points to the lowest update
3676 * frame on the stack, and its link actually points to the frame
3677 * above. We have to walk back up the stack, squeezing out empty
3678 * update frames and turning the pointers back around on the way
3681 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3682 * we never want to eliminate it anyway. Just walk one step up
3683 * before starting to squeeze. When you get to the topmost frame,
3684 * remember that there are still some words above it that might have
3691 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3694 * Loop through all of the frames (everything except the very
3695 * bottom). Things are complicated by the fact that we have
3696 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3697 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3699 while (frame != NULL) {
3701 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3702 rtsBool is_update_frame;
3704 next_frame = frame->link;
3705 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3708 * 1. both the previous and current frame are update frames
3709 * 2. the current frame is empty
3711 if (prev_was_update_frame && is_update_frame &&
3712 (P_)prev_frame == frame_bottom + displacement) {
3714 // Now squeeze out the current frame
3715 StgClosure *updatee_keep = prev_frame->updatee;
3716 StgClosure *updatee_bypass = frame->updatee;
3719 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3723 /* Deal with blocking queues. If both updatees have blocked
3724 * threads, then we should merge the queues into the update
3725 * frame that we're keeping.
3727 * Alternatively, we could just wake them up: they'll just go
3728 * straight to sleep on the proper blackhole! This is less code
3729 * and probably less bug prone, although it's probably much
3732 #if 0 // do it properly...
3733 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3734 # error Unimplemented lazy BH warning. (KSW 1999-01)
3736 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3737 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3739 // Sigh. It has one. Don't lose those threads!
3740 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3741 // Urgh. Two queues. Merge them.
3742 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3744 while (keep_tso->link != END_TSO_QUEUE) {
3745 keep_tso = keep_tso->link;
3747 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3750 // For simplicity, just swap the BQ for the BH
3751 P_ temp = updatee_keep;
3753 updatee_keep = updatee_bypass;
3754 updatee_bypass = temp;
3756 // Record the swap in the kept frame (below)
3757 prev_frame->updatee = updatee_keep;
3762 TICK_UPD_SQUEEZED();
3763 /* wasn't there something about update squeezing and ticky to be
3764 * sorted out? oh yes: we aren't counting each enter properly
3765 * in this case. See the log somewhere. KSW 1999-04-21
3767 * Check two things: that the two update frames don't point to
3768 * the same object, and that the updatee_bypass isn't already an
3769 * indirection. Both of these cases only happen when we're in a
3770 * block hole-style loop (and there are multiple update frames
3771 * on the stack pointing to the same closure), but they can both
3772 * screw us up if we don't check.
3774 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3775 // this wakes the threads up
3776 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3779 sp = (P_)frame - 1; // sp = stuff to slide
3780 displacement += sizeofW(StgUpdateFrame);
3783 // No squeeze for this frame
3784 sp = frame_bottom - 1; // Keep the current frame
3786 /* Do lazy black-holing.
3788 if (is_update_frame) {
3789 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3790 if (bh->header.info != &stg_BLACKHOLE_info &&
3791 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3792 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3793 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3794 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3797 /* zero out the slop so that the sanity checker can tell
3798 * where the next closure is.
3801 StgInfoTable *info = get_itbl(bh);
3802 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3803 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3804 * info is used for a different purpose, and it's exactly the
3805 * same size as a BLACKHOLE in any case.
3807 if (info->type != THUNK_SELECTOR) {
3808 for (i = np; i < np + nw; i++) {
3809 ((StgClosure *)bh)->payload[i] = 0;
3814 SET_INFO(bh,&stg_BLACKHOLE_info);
3818 // Fix the link in the current frame (should point to the frame below)
3819 frame->link = prev_frame;
3820 prev_was_update_frame = is_update_frame;
3823 // Now slide all words from sp up to the next frame
3825 if (displacement > 0) {
3826 P_ next_frame_bottom;
3828 if (next_frame != NULL)
3829 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3831 next_frame_bottom = tso->sp - 1;
3835 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3839 while (sp >= next_frame_bottom) {
3840 sp[displacement] = *sp;
3844 (P_)prev_frame = (P_)frame + displacement;
3848 tso->sp += displacement;
3849 tso->su = prev_frame;
3852 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3853 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3858 /* -----------------------------------------------------------------------------
3861 * We have to prepare for GC - this means doing lazy black holing
3862 * here. We also take the opportunity to do stack squeezing if it's
3864 * -------------------------------------------------------------------------- */
3866 threadPaused(StgTSO *tso)
3868 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3869 threadSqueezeStack(tso); // does black holing too
3871 threadLazyBlackHole(tso);
3874 /* -----------------------------------------------------------------------------
3876 * -------------------------------------------------------------------------- */
3880 printMutOnceList(generation *gen)
3882 StgMutClosure *p, *next;
3884 p = gen->mut_once_list;
3887 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3888 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3889 fprintf(stderr, "%p (%s), ",
3890 p, info_type((StgClosure *)p));
3892 fputc('\n', stderr);
3896 printMutableList(generation *gen)
3898 StgMutClosure *p, *next;
3903 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3904 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3905 fprintf(stderr, "%p (%s), ",
3906 p, info_type((StgClosure *)p));
3908 fputc('\n', stderr);
3911 static inline rtsBool
3912 maybeLarge(StgClosure *closure)
3914 StgInfoTable *info = get_itbl(closure);
3916 /* closure types that may be found on the new_large_objects list;
3917 see scavenge_large */
3918 return (info->type == MUT_ARR_PTRS ||
3919 info->type == MUT_ARR_PTRS_FROZEN ||
3920 info->type == TSO ||
3921 info->type == ARR_WORDS);