1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.123 2001/10/01 10:52:36 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 mark_weak_ptr_list(&weak_ptr_list);
491 old_weak_ptr_list = weak_ptr_list;
492 weak_ptr_list = NULL;
493 weak_done = rtsFalse;
495 /* The all_threads list is like the weak_ptr_list.
496 * See traverse_weak_ptr_list() for the details.
498 old_all_threads = all_threads;
499 all_threads = END_TSO_QUEUE;
500 resurrected_threads = END_TSO_QUEUE;
502 /* Mark the stable pointer table.
504 markStablePtrTable(mark_root);
508 /* ToDo: To fix the caf leak, we need to make the commented out
509 * parts of this code do something sensible - as described in
512 extern void markHugsObjects(void);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 if (flag) { goto loop; }
578 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
584 // Reconstruct the Global Address tables used in GUM
585 rebuildGAtables(major_gc);
586 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
589 // Now see which stable names are still alive.
592 // Tidy the end of the to-space chains
593 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
594 for (s = 0; s < generations[g].n_steps; s++) {
595 stp = &generations[g].steps[s];
596 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
597 stp->hp_bd->free = stp->hp;
598 stp->hp_bd->link = NULL;
603 // NO MORE EVACUATION AFTER THIS POINT!
604 // Finally: compaction of the oldest generation.
605 if (major_gc && oldest_gen->steps[0].is_compacted) {
606 // save number of blocks for stats
607 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
611 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
613 /* run through all the generations/steps and tidy up
615 copied = new_blocks * BLOCK_SIZE_W;
616 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
619 generations[g].collections++; // for stats
622 for (s = 0; s < generations[g].n_steps; s++) {
624 stp = &generations[g].steps[s];
626 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
627 // stats information: how much we copied
629 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
634 // for generations we collected...
637 // rough calculation of garbage collected, for stats output
638 if (stp->is_compacted) {
639 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
641 collected += stp->n_blocks * BLOCK_SIZE_W;
644 /* free old memory and shift to-space into from-space for all
645 * the collected steps (except the allocation area). These
646 * freed blocks will probaby be quickly recycled.
648 if (!(g == 0 && s == 0)) {
649 if (stp->is_compacted) {
650 // for a compacted step, just shift the new to-space
651 // onto the front of the now-compacted existing blocks.
652 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
653 bd->flags &= ~BF_EVACUATED; // now from-space
655 // tack the new blocks on the end of the existing blocks
656 if (stp->blocks == NULL) {
657 stp->blocks = stp->to_blocks;
659 for (bd = stp->blocks; bd != NULL; bd = next) {
662 bd->link = stp->to_blocks;
666 // add the new blocks to the block tally
667 stp->n_blocks += stp->n_to_blocks;
669 freeChain(stp->blocks);
670 stp->blocks = stp->to_blocks;
671 stp->n_blocks = stp->n_to_blocks;
672 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
673 bd->flags &= ~BF_EVACUATED; // now from-space
676 stp->to_blocks = NULL;
677 stp->n_to_blocks = 0;
680 /* LARGE OBJECTS. The current live large objects are chained on
681 * scavenged_large, having been moved during garbage
682 * collection from large_objects. Any objects left on
683 * large_objects list are therefore dead, so we free them here.
685 for (bd = stp->large_objects; bd != NULL; bd = next) {
691 // update the count of blocks used by large objects
692 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED;
695 stp->large_objects = stp->scavenged_large_objects;
696 stp->n_large_blocks = stp->n_scavenged_large_blocks;
699 // for older generations...
701 /* For older generations, we need to append the
702 * scavenged_large_object list (i.e. large objects that have been
703 * promoted during this GC) to the large_object list for that step.
705 for (bd = stp->scavenged_large_objects; bd; bd = next) {
707 bd->flags &= ~BF_EVACUATED;
708 dbl_link_onto(bd, &stp->large_objects);
711 // add the new blocks we promoted during this GC
712 stp->n_blocks += stp->n_to_blocks;
713 stp->n_large_blocks += stp->n_scavenged_large_blocks;
718 /* Reset the sizes of the older generations when we do a major
721 * CURRENT STRATEGY: make all generations except zero the same size.
722 * We have to stay within the maximum heap size, and leave a certain
723 * percentage of the maximum heap size available to allocate into.
725 if (major_gc && RtsFlags.GcFlags.generations > 1) {
726 nat live, size, min_alloc;
727 nat max = RtsFlags.GcFlags.maxHeapSize;
728 nat gens = RtsFlags.GcFlags.generations;
730 // live in the oldest generations
731 live = oldest_gen->steps[0].n_blocks +
732 oldest_gen->steps[0].n_large_blocks;
734 // default max size for all generations except zero
735 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
736 RtsFlags.GcFlags.minOldGenSize);
738 // minimum size for generation zero
739 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
740 RtsFlags.GcFlags.minAllocAreaSize);
742 // Auto-enable compaction when the residency reaches a
743 // certain percentage of the maximum heap size (default: 30%).
744 if (RtsFlags.GcFlags.generations > 1 &&
745 (RtsFlags.GcFlags.compact ||
747 oldest_gen->steps[0].n_blocks >
748 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
749 oldest_gen->steps[0].is_compacted = 1;
750 // fprintf(stderr,"compaction: on\n", live);
752 oldest_gen->steps[0].is_compacted = 0;
753 // fprintf(stderr,"compaction: off\n", live);
756 // if we're going to go over the maximum heap size, reduce the
757 // size of the generations accordingly. The calculation is
758 // different if compaction is turned on, because we don't need
759 // to double the space required to collect the old generation.
762 // this test is necessary to ensure that the calculations
763 // below don't have any negative results - we're working
764 // with unsigned values here.
765 if (max < min_alloc) {
769 if (oldest_gen->steps[0].is_compacted) {
770 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
771 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
774 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
775 size = (max - min_alloc) / ((gens - 1) * 2);
785 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
786 min_alloc, size, max);
789 for (g = 0; g < gens; g++) {
790 generations[g].max_blocks = size;
794 // Guess the amount of live data for stats.
797 /* Free the small objects allocated via allocate(), since this will
798 * all have been copied into G0S1 now.
800 if (small_alloc_list != NULL) {
801 freeChain(small_alloc_list);
803 small_alloc_list = NULL;
807 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
809 // Start a new pinned_object_block
810 pinned_object_block = NULL;
812 /* Free the mark stack.
814 if (mark_stack_bdescr != NULL) {
815 freeGroup(mark_stack_bdescr);
820 for (g = 0; g <= N; g++) {
821 for (s = 0; s < generations[g].n_steps; s++) {
822 stp = &generations[g].steps[s];
823 if (stp->is_compacted && stp->bitmap != NULL) {
824 freeGroup(stp->bitmap);
829 /* Two-space collector:
830 * Free the old to-space, and estimate the amount of live data.
832 if (RtsFlags.GcFlags.generations == 1) {
835 if (old_to_blocks != NULL) {
836 freeChain(old_to_blocks);
838 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
839 bd->flags = 0; // now from-space
842 /* For a two-space collector, we need to resize the nursery. */
844 /* set up a new nursery. Allocate a nursery size based on a
845 * function of the amount of live data (by default a factor of 2)
846 * Use the blocks from the old nursery if possible, freeing up any
849 * If we get near the maximum heap size, then adjust our nursery
850 * size accordingly. If the nursery is the same size as the live
851 * data (L), then we need 3L bytes. We can reduce the size of the
852 * nursery to bring the required memory down near 2L bytes.
854 * A normal 2-space collector would need 4L bytes to give the same
855 * performance we get from 3L bytes, reducing to the same
856 * performance at 2L bytes.
858 blocks = g0s0->n_to_blocks;
860 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
861 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
862 RtsFlags.GcFlags.maxHeapSize ) {
863 long adjusted_blocks; // signed on purpose
866 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
867 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
868 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
869 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
872 blocks = adjusted_blocks;
875 blocks *= RtsFlags.GcFlags.oldGenFactor;
876 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
877 blocks = RtsFlags.GcFlags.minAllocAreaSize;
880 resizeNursery(blocks);
883 /* Generational collector:
884 * If the user has given us a suggested heap size, adjust our
885 * allocation area to make best use of the memory available.
888 if (RtsFlags.GcFlags.heapSizeSuggestion) {
890 nat needed = calcNeeded(); // approx blocks needed at next GC
892 /* Guess how much will be live in generation 0 step 0 next time.
893 * A good approximation is obtained by finding the
894 * percentage of g0s0 that was live at the last minor GC.
897 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
900 /* Estimate a size for the allocation area based on the
901 * information available. We might end up going slightly under
902 * or over the suggested heap size, but we should be pretty
905 * Formula: suggested - needed
906 * ----------------------------
907 * 1 + g0s0_pcnt_kept/100
909 * where 'needed' is the amount of memory needed at the next
910 * collection for collecting all steps except g0s0.
913 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
914 (100 + (long)g0s0_pcnt_kept);
916 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
917 blocks = RtsFlags.GcFlags.minAllocAreaSize;
920 resizeNursery((nat)blocks);
924 // mark the garbage collected CAFs as dead
925 #if 0 && defined(DEBUG) // doesn't work at the moment
926 if (major_gc) { gcCAFs(); }
929 // zero the scavenged static object list
931 zero_static_object_list(scavenged_static_objects);
937 // start any pending finalizers
938 scheduleFinalizers(old_weak_ptr_list);
940 // send exceptions to any threads which were about to die
941 resurrectThreads(resurrected_threads);
943 // Update the stable pointer hash table.
944 updateStablePtrTable(major_gc);
946 // check sanity after GC
947 IF_DEBUG(sanity, checkSanity());
949 // extra GC trace info
950 IF_DEBUG(gc, statDescribeGens());
953 // symbol-table based profiling
954 /* heapCensus(to_blocks); */ /* ToDo */
957 // restore enclosing cost centre
963 // check for memory leaks if sanity checking is on
964 IF_DEBUG(sanity, memInventory());
966 #ifdef RTS_GTK_FRONTPANEL
967 if (RtsFlags.GcFlags.frontpanel) {
968 updateFrontPanelAfterGC( N, live );
972 // ok, GC over: tell the stats department what happened.
973 stat_endGC(allocated, collected, live, copied, N);
979 /* -----------------------------------------------------------------------------
982 traverse_weak_ptr_list is called possibly many times during garbage
983 collection. It returns a flag indicating whether it did any work
984 (i.e. called evacuate on any live pointers).
986 Invariant: traverse_weak_ptr_list is called when the heap is in an
987 idempotent state. That means that there are no pending
988 evacuate/scavenge operations. This invariant helps the weak
989 pointer code decide which weak pointers are dead - if there are no
990 new live weak pointers, then all the currently unreachable ones are
993 For generational GC: we just don't try to finalize weak pointers in
994 older generations than the one we're collecting. This could
995 probably be optimised by keeping per-generation lists of weak
996 pointers, but for a few weak pointers this scheme will work.
997 -------------------------------------------------------------------------- */
1000 traverse_weak_ptr_list(void)
1002 StgWeak *w, **last_w, *next_w;
1004 rtsBool flag = rtsFalse;
1006 if (weak_done) { return rtsFalse; }
1008 /* doesn't matter where we evacuate values/finalizers to, since
1009 * these pointers are treated as roots (iff the keys are alive).
1013 last_w = &old_weak_ptr_list;
1014 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1016 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1017 * called on a live weak pointer object. Just remove it.
1019 if (w->header.info == &stg_DEAD_WEAK_info) {
1020 next_w = ((StgDeadWeak *)w)->link;
1025 ASSERT(get_itbl(w)->type == WEAK);
1027 /* Now, check whether the key is reachable.
1029 new = isAlive(w->key);
1032 // evacuate the value and finalizer
1033 w->value = evacuate(w->value);
1034 w->finalizer = evacuate(w->finalizer);
1035 // remove this weak ptr from the old_weak_ptr list
1037 // and put it on the new weak ptr list
1039 w->link = weak_ptr_list;
1042 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1046 last_w = &(w->link);
1052 /* Now deal with the all_threads list, which behaves somewhat like
1053 * the weak ptr list. If we discover any threads that are about to
1054 * become garbage, we wake them up and administer an exception.
1057 StgTSO *t, *tmp, *next, **prev;
1059 prev = &old_all_threads;
1060 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1062 (StgClosure *)tmp = isAlive((StgClosure *)t);
1068 ASSERT(get_itbl(t)->type == TSO);
1069 switch (t->what_next) {
1070 case ThreadRelocated:
1075 case ThreadComplete:
1076 // finshed or died. The thread might still be alive, but we
1077 // don't keep it on the all_threads list. Don't forget to
1078 // stub out its global_link field.
1079 next = t->global_link;
1080 t->global_link = END_TSO_QUEUE;
1088 // not alive (yet): leave this thread on the old_all_threads list.
1089 prev = &(t->global_link);
1090 next = t->global_link;
1093 // alive: move this thread onto the all_threads list.
1094 next = t->global_link;
1095 t->global_link = all_threads;
1102 /* If we didn't make any changes, then we can go round and kill all
1103 * the dead weak pointers. The old_weak_ptr list is used as a list
1104 * of pending finalizers later on.
1106 if (flag == rtsFalse) {
1107 for (w = old_weak_ptr_list; w; w = w->link) {
1108 w->finalizer = evacuate(w->finalizer);
1111 /* And resurrect any threads which were about to become garbage.
1114 StgTSO *t, *tmp, *next;
1115 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1116 next = t->global_link;
1117 (StgClosure *)tmp = evacuate((StgClosure *)t);
1118 tmp->global_link = resurrected_threads;
1119 resurrected_threads = tmp;
1123 weak_done = rtsTrue;
1129 /* -----------------------------------------------------------------------------
1130 After GC, the live weak pointer list may have forwarding pointers
1131 on it, because a weak pointer object was evacuated after being
1132 moved to the live weak pointer list. We remove those forwarding
1135 Also, we don't consider weak pointer objects to be reachable, but
1136 we must nevertheless consider them to be "live" and retain them.
1137 Therefore any weak pointer objects which haven't as yet been
1138 evacuated need to be evacuated now.
1139 -------------------------------------------------------------------------- */
1143 mark_weak_ptr_list ( StgWeak **list )
1145 StgWeak *w, **last_w;
1148 for (w = *list; w; w = w->link) {
1149 (StgClosure *)w = evacuate((StgClosure *)w);
1151 last_w = &(w->link);
1155 /* -----------------------------------------------------------------------------
1156 isAlive determines whether the given closure is still alive (after
1157 a garbage collection) or not. It returns the new address of the
1158 closure if it is alive, or NULL otherwise.
1160 NOTE: Use it before compaction only!
1161 -------------------------------------------------------------------------- */
1165 isAlive(StgClosure *p)
1167 const StgInfoTable *info;
1174 /* ToDo: for static closures, check the static link field.
1175 * Problem here is that we sometimes don't set the link field, eg.
1176 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1181 // ignore closures in generations that we're not collecting.
1182 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1185 // large objects have an evacuated flag
1186 if (bd->flags & BF_LARGE) {
1187 if (bd->flags & BF_EVACUATED) {
1193 // check the mark bit for compacted steps
1194 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1198 switch (info->type) {
1203 case IND_OLDGEN: // rely on compatible layout with StgInd
1204 case IND_OLDGEN_PERM:
1205 // follow indirections
1206 p = ((StgInd *)p)->indirectee;
1211 return ((StgEvacuated *)p)->evacuee;
1214 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1215 p = (StgClosure *)((StgTSO *)p)->link;
1227 mark_root(StgClosure **root)
1229 *root = evacuate(*root);
1235 bdescr *bd = allocBlock();
1236 bd->gen_no = stp->gen_no;
1239 if (stp->gen_no <= N) {
1240 bd->flags = BF_EVACUATED;
1245 stp->hp_bd->free = stp->hp;
1246 stp->hp_bd->link = bd;
1247 stp->hp = bd->start;
1248 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1255 static __inline__ void
1256 upd_evacuee(StgClosure *p, StgClosure *dest)
1258 p->header.info = &stg_EVACUATED_info;
1259 ((StgEvacuated *)p)->evacuee = dest;
1263 static __inline__ StgClosure *
1264 copy(StgClosure *src, nat size, step *stp)
1268 TICK_GC_WORDS_COPIED(size);
1269 /* Find out where we're going, using the handy "to" pointer in
1270 * the step of the source object. If it turns out we need to
1271 * evacuate to an older generation, adjust it here (see comment
1274 if (stp->gen_no < evac_gen) {
1275 #ifdef NO_EAGER_PROMOTION
1276 failed_to_evac = rtsTrue;
1278 stp = &generations[evac_gen].steps[0];
1282 /* chain a new block onto the to-space for the destination step if
1285 if (stp->hp + size >= stp->hpLim) {
1289 for(to = stp->hp, from = (P_)src; size>0; --size) {
1295 upd_evacuee(src,(StgClosure *)dest);
1296 return (StgClosure *)dest;
1299 /* Special version of copy() for when we only want to copy the info
1300 * pointer of an object, but reserve some padding after it. This is
1301 * used to optimise evacuation of BLACKHOLEs.
1305 static __inline__ StgClosure *
1306 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1310 TICK_GC_WORDS_COPIED(size_to_copy);
1311 if (stp->gen_no < evac_gen) {
1312 #ifdef NO_EAGER_PROMOTION
1313 failed_to_evac = rtsTrue;
1315 stp = &generations[evac_gen].steps[0];
1319 if (stp->hp + size_to_reserve >= stp->hpLim) {
1323 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1328 stp->hp += size_to_reserve;
1329 upd_evacuee(src,(StgClosure *)dest);
1330 return (StgClosure *)dest;
1334 /* -----------------------------------------------------------------------------
1335 Evacuate a large object
1337 This just consists of removing the object from the (doubly-linked)
1338 large_alloc_list, and linking it on to the (singly-linked)
1339 new_large_objects list, from where it will be scavenged later.
1341 Convention: bd->flags has BF_EVACUATED set for a large object
1342 that has been evacuated, or unset otherwise.
1343 -------------------------------------------------------------------------- */
1347 evacuate_large(StgPtr p)
1349 bdescr *bd = Bdescr(p);
1352 // object must be at the beginning of the block (or be a ByteArray)
1353 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1354 (((W_)p & BLOCK_MASK) == 0));
1356 // already evacuated?
1357 if (bd->flags & BF_EVACUATED) {
1358 /* Don't forget to set the failed_to_evac flag if we didn't get
1359 * the desired destination (see comments in evacuate()).
1361 if (bd->gen_no < evac_gen) {
1362 failed_to_evac = rtsTrue;
1363 TICK_GC_FAILED_PROMOTION();
1369 // remove from large_object list
1371 bd->u.back->link = bd->link;
1372 } else { // first object in the list
1373 stp->large_objects = bd->link;
1376 bd->link->u.back = bd->u.back;
1379 /* link it on to the evacuated large object list of the destination step
1382 if (stp->gen_no < evac_gen) {
1383 #ifdef NO_EAGER_PROMOTION
1384 failed_to_evac = rtsTrue;
1386 stp = &generations[evac_gen].steps[0];
1391 bd->gen_no = stp->gen_no;
1392 bd->link = stp->new_large_objects;
1393 stp->new_large_objects = bd;
1394 bd->flags |= BF_EVACUATED;
1397 /* -----------------------------------------------------------------------------
1398 Adding a MUT_CONS to an older generation.
1400 This is necessary from time to time when we end up with an
1401 old-to-new generation pointer in a non-mutable object. We defer
1402 the promotion until the next GC.
1403 -------------------------------------------------------------------------- */
1407 mkMutCons(StgClosure *ptr, generation *gen)
1412 stp = &gen->steps[0];
1414 /* chain a new block onto the to-space for the destination step if
1417 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1421 q = (StgMutVar *)stp->hp;
1422 stp->hp += sizeofW(StgMutVar);
1424 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1426 recordOldToNewPtrs((StgMutClosure *)q);
1428 return (StgClosure *)q;
1431 /* -----------------------------------------------------------------------------
1434 This is called (eventually) for every live object in the system.
1436 The caller to evacuate specifies a desired generation in the
1437 evac_gen global variable. The following conditions apply to
1438 evacuating an object which resides in generation M when we're
1439 collecting up to generation N
1443 else evac to step->to
1445 if M < evac_gen evac to evac_gen, step 0
1447 if the object is already evacuated, then we check which generation
1450 if M >= evac_gen do nothing
1451 if M < evac_gen set failed_to_evac flag to indicate that we
1452 didn't manage to evacuate this object into evac_gen.
1454 -------------------------------------------------------------------------- */
1457 evacuate(StgClosure *q)
1462 const StgInfoTable *info;
1465 if (HEAP_ALLOCED(q)) {
1468 if (bd->gen_no > N) {
1469 /* Can't evacuate this object, because it's in a generation
1470 * older than the ones we're collecting. Let's hope that it's
1471 * in evac_gen or older, or we will have to arrange to track
1472 * this pointer using the mutable list.
1474 if (bd->gen_no < evac_gen) {
1476 failed_to_evac = rtsTrue;
1477 TICK_GC_FAILED_PROMOTION();
1482 /* evacuate large objects by re-linking them onto a different list.
1484 if (bd->flags & BF_LARGE) {
1486 if (info->type == TSO &&
1487 ((StgTSO *)q)->what_next == ThreadRelocated) {
1488 q = (StgClosure *)((StgTSO *)q)->link;
1491 evacuate_large((P_)q);
1495 /* If the object is in a step that we're compacting, then we
1496 * need to use an alternative evacuate procedure.
1498 if (bd->step->is_compacted) {
1499 if (!is_marked((P_)q,bd)) {
1501 if (mark_stack_full()) {
1502 mark_stack_overflowed = rtsTrue;
1505 push_mark_stack((P_)q);
1513 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1516 // make sure the info pointer is into text space
1517 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1518 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1521 switch (info -> type) {
1525 to = copy(q,sizeW_fromITBL(info),stp);
1530 StgWord w = (StgWord)q->payload[0];
1531 if (q->header.info == Czh_con_info &&
1532 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1533 (StgChar)w <= MAX_CHARLIKE) {
1534 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1536 if (q->header.info == Izh_con_info &&
1537 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1538 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1540 // else, fall through ...
1546 return copy(q,sizeofW(StgHeader)+1,stp);
1548 case THUNK_1_0: // here because of MIN_UPD_SIZE
1553 #ifdef NO_PROMOTE_THUNKS
1554 if (bd->gen_no == 0 &&
1555 bd->step->no != 0 &&
1556 bd->step->no == generations[bd->gen_no].n_steps-1) {
1560 return copy(q,sizeofW(StgHeader)+2,stp);
1568 return copy(q,sizeofW(StgHeader)+2,stp);
1574 case IND_OLDGEN_PERM:
1579 return copy(q,sizeW_fromITBL(info),stp);
1582 case SE_CAF_BLACKHOLE:
1585 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1588 to = copy(q,BLACKHOLE_sizeW(),stp);
1591 case THUNK_SELECTOR:
1593 const StgInfoTable* selectee_info;
1594 StgClosure* selectee = ((StgSelector*)q)->selectee;
1597 selectee_info = get_itbl(selectee);
1598 switch (selectee_info->type) {
1607 StgWord offset = info->layout.selector_offset;
1609 // check that the size is in range
1611 (StgWord32)(selectee_info->layout.payload.ptrs +
1612 selectee_info->layout.payload.nptrs));
1614 // perform the selection!
1615 q = selectee->payload[offset];
1617 /* if we're already in to-space, there's no need to continue
1618 * with the evacuation, just update the source address with
1619 * a pointer to the (evacuated) constructor field.
1621 if (HEAP_ALLOCED(q)) {
1622 bdescr *bd = Bdescr((P_)q);
1623 if (bd->flags & BF_EVACUATED) {
1624 if (bd->gen_no < evac_gen) {
1625 failed_to_evac = rtsTrue;
1626 TICK_GC_FAILED_PROMOTION();
1632 /* otherwise, carry on and evacuate this constructor field,
1633 * (but not the constructor itself)
1642 case IND_OLDGEN_PERM:
1643 selectee = ((StgInd *)selectee)->indirectee;
1647 selectee = ((StgEvacuated *)selectee)->evacuee;
1650 case THUNK_SELECTOR:
1652 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1653 something) to go into an infinite loop when the nightly
1654 stage2 compiles PrelTup.lhs. */
1656 /* we can't recurse indefinitely in evacuate(), so set a
1657 * limit on the number of times we can go around this
1660 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1662 bd = Bdescr((P_)selectee);
1663 if (!bd->flags & BF_EVACUATED) {
1664 thunk_selector_depth++;
1665 selectee = evacuate(selectee);
1666 thunk_selector_depth--;
1670 // otherwise, fall through...
1682 case SE_CAF_BLACKHOLE:
1686 // not evaluated yet
1690 // a copy of the top-level cases below
1691 case RBH: // cf. BLACKHOLE_BQ
1693 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1694 to = copy(q,BLACKHOLE_sizeW(),stp);
1695 //ToDo: derive size etc from reverted IP
1696 //to = copy(q,size,stp);
1697 // recordMutable((StgMutClosure *)to);
1702 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1703 to = copy(q,sizeofW(StgBlockedFetch),stp);
1710 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1711 to = copy(q,sizeofW(StgFetchMe),stp);
1715 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1716 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1721 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1722 (int)(selectee_info->type));
1725 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1729 // follow chains of indirections, don't evacuate them
1730 q = ((StgInd*)q)->indirectee;
1734 if (info->srt_len > 0 && major_gc &&
1735 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1736 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1737 static_objects = (StgClosure *)q;
1742 if (info->srt_len > 0 && major_gc &&
1743 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1744 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1745 static_objects = (StgClosure *)q;
1750 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1751 * on the CAF list, so don't do anything with it here (we'll
1752 * scavenge it later).
1755 && ((StgIndStatic *)q)->saved_info == NULL
1756 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1757 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1758 static_objects = (StgClosure *)q;
1763 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1764 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1765 static_objects = (StgClosure *)q;
1769 case CONSTR_INTLIKE:
1770 case CONSTR_CHARLIKE:
1771 case CONSTR_NOCAF_STATIC:
1772 /* no need to put these on the static linked list, they don't need
1787 // shouldn't see these
1788 barf("evacuate: stack frame at %p\n", q);
1792 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1793 * of stack, tagging and all.
1795 return copy(q,pap_sizeW((StgPAP*)q),stp);
1798 /* Already evacuated, just return the forwarding address.
1799 * HOWEVER: if the requested destination generation (evac_gen) is
1800 * older than the actual generation (because the object was
1801 * already evacuated to a younger generation) then we have to
1802 * set the failed_to_evac flag to indicate that we couldn't
1803 * manage to promote the object to the desired generation.
1805 if (evac_gen > 0) { // optimisation
1806 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1807 if (Bdescr((P_)p)->gen_no < evac_gen) {
1808 failed_to_evac = rtsTrue;
1809 TICK_GC_FAILED_PROMOTION();
1812 return ((StgEvacuated*)q)->evacuee;
1815 // just copy the block
1816 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1819 case MUT_ARR_PTRS_FROZEN:
1820 // just copy the block
1821 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1825 StgTSO *tso = (StgTSO *)q;
1827 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1829 if (tso->what_next == ThreadRelocated) {
1830 q = (StgClosure *)tso->link;
1834 /* To evacuate a small TSO, we need to relocate the update frame
1838 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1839 move_TSO(tso, new_tso);
1840 return (StgClosure *)new_tso;
1845 case RBH: // cf. BLACKHOLE_BQ
1847 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1848 to = copy(q,BLACKHOLE_sizeW(),stp);
1849 //ToDo: derive size etc from reverted IP
1850 //to = copy(q,size,stp);
1852 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1853 q, info_type(q), to, info_type(to)));
1858 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1859 to = copy(q,sizeofW(StgBlockedFetch),stp);
1861 belch("@@ evacuate: %p (%s) to %p (%s)",
1862 q, info_type(q), to, info_type(to)));
1869 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1870 to = copy(q,sizeofW(StgFetchMe),stp);
1872 belch("@@ evacuate: %p (%s) to %p (%s)",
1873 q, info_type(q), to, info_type(to)));
1877 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1878 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1880 belch("@@ evacuate: %p (%s) to %p (%s)",
1881 q, info_type(q), to, info_type(to)));
1886 barf("evacuate: strange closure type %d", (int)(info->type));
1892 /* -----------------------------------------------------------------------------
1893 move_TSO is called to update the TSO structure after it has been
1894 moved from one place to another.
1895 -------------------------------------------------------------------------- */
1898 move_TSO(StgTSO *src, StgTSO *dest)
1902 // relocate the stack pointers...
1903 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1904 dest->sp = (StgPtr)dest->sp + diff;
1905 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1907 relocate_stack(dest, diff);
1910 /* -----------------------------------------------------------------------------
1911 relocate_stack is called to update the linkage between
1912 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1914 -------------------------------------------------------------------------- */
1917 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1925 while ((P_)su < dest->stack + dest->stack_size) {
1926 switch (get_itbl(su)->type) {
1928 // GCC actually manages to common up these three cases!
1931 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1936 cf = (StgCatchFrame *)su;
1937 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1942 sf = (StgSeqFrame *)su;
1943 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1952 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1963 scavenge_srt(const StgInfoTable *info)
1965 StgClosure **srt, **srt_end;
1967 /* evacuate the SRT. If srt_len is zero, then there isn't an
1968 * srt field in the info table. That's ok, because we'll
1969 * never dereference it.
1971 srt = (StgClosure **)(info->srt);
1972 srt_end = srt + info->srt_len;
1973 for (; srt < srt_end; srt++) {
1974 /* Special-case to handle references to closures hiding out in DLLs, since
1975 double indirections required to get at those. The code generator knows
1976 which is which when generating the SRT, so it stores the (indirect)
1977 reference to the DLL closure in the table by first adding one to it.
1978 We check for this here, and undo the addition before evacuating it.
1980 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1981 closure that's fixed at link-time, and no extra magic is required.
1983 #ifdef ENABLE_WIN32_DLL_SUPPORT
1984 if ( (unsigned long)(*srt) & 0x1 ) {
1985 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1995 /* -----------------------------------------------------------------------------
1997 -------------------------------------------------------------------------- */
2000 scavengeTSO (StgTSO *tso)
2002 // chase the link field for any TSOs on the same queue
2003 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2004 if ( tso->why_blocked == BlockedOnMVar
2005 || tso->why_blocked == BlockedOnBlackHole
2006 || tso->why_blocked == BlockedOnException
2008 || tso->why_blocked == BlockedOnGA
2009 || tso->why_blocked == BlockedOnGA_NoSend
2012 tso->block_info.closure = evacuate(tso->block_info.closure);
2014 if ( tso->blocked_exceptions != NULL ) {
2015 tso->blocked_exceptions =
2016 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2018 // scavenge this thread's stack
2019 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2022 /* -----------------------------------------------------------------------------
2023 Scavenge a given step until there are no more objects in this step
2026 evac_gen is set by the caller to be either zero (for a step in a
2027 generation < N) or G where G is the generation of the step being
2030 We sometimes temporarily change evac_gen back to zero if we're
2031 scavenging a mutable object where early promotion isn't such a good
2033 -------------------------------------------------------------------------- */
2041 nat saved_evac_gen = evac_gen;
2046 failed_to_evac = rtsFalse;
2048 /* scavenge phase - standard breadth-first scavenging of the
2052 while (bd != stp->hp_bd || p < stp->hp) {
2054 // If we're at the end of this block, move on to the next block
2055 if (bd != stp->hp_bd && p == bd->free) {
2061 info = get_itbl((StgClosure *)p);
2062 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2065 switch (info->type) {
2068 /* treat MVars specially, because we don't want to evacuate the
2069 * mut_link field in the middle of the closure.
2072 StgMVar *mvar = ((StgMVar *)p);
2074 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2075 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2076 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2077 evac_gen = saved_evac_gen;
2078 recordMutable((StgMutClosure *)mvar);
2079 failed_to_evac = rtsFalse; // mutable.
2080 p += sizeofW(StgMVar);
2088 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2089 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2090 p += sizeofW(StgHeader) + 2;
2095 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2096 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2102 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2103 p += sizeofW(StgHeader) + 1;
2108 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2114 p += sizeofW(StgHeader) + 1;
2121 p += sizeofW(StgHeader) + 2;
2128 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2129 p += sizeofW(StgHeader) + 2;
2145 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2146 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2147 (StgClosure *)*p = evacuate((StgClosure *)*p);
2149 p += info->layout.payload.nptrs;
2154 if (stp->gen_no != 0) {
2155 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2158 case IND_OLDGEN_PERM:
2159 ((StgIndOldGen *)p)->indirectee =
2160 evacuate(((StgIndOldGen *)p)->indirectee);
2161 if (failed_to_evac) {
2162 failed_to_evac = rtsFalse;
2163 recordOldToNewPtrs((StgMutClosure *)p);
2165 p += sizeofW(StgIndOldGen);
2170 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2171 evac_gen = saved_evac_gen;
2172 recordMutable((StgMutClosure *)p);
2173 failed_to_evac = rtsFalse; // mutable anyhow
2174 p += sizeofW(StgMutVar);
2179 failed_to_evac = rtsFalse; // mutable anyhow
2180 p += sizeofW(StgMutVar);
2184 case SE_CAF_BLACKHOLE:
2187 p += BLACKHOLE_sizeW();
2192 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2193 (StgClosure *)bh->blocking_queue =
2194 evacuate((StgClosure *)bh->blocking_queue);
2195 recordMutable((StgMutClosure *)bh);
2196 failed_to_evac = rtsFalse;
2197 p += BLACKHOLE_sizeW();
2201 case THUNK_SELECTOR:
2203 StgSelector *s = (StgSelector *)p;
2204 s->selectee = evacuate(s->selectee);
2205 p += THUNK_SELECTOR_sizeW();
2209 case AP_UPD: // same as PAPs
2211 /* Treat a PAP just like a section of stack, not forgetting to
2212 * evacuate the function pointer too...
2215 StgPAP* pap = (StgPAP *)p;
2217 pap->fun = evacuate(pap->fun);
2218 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2219 p += pap_sizeW(pap);
2224 // nothing to follow
2225 p += arr_words_sizeW((StgArrWords *)p);
2229 // follow everything
2233 evac_gen = 0; // repeatedly mutable
2234 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2235 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2236 (StgClosure *)*p = evacuate((StgClosure *)*p);
2238 evac_gen = saved_evac_gen;
2239 recordMutable((StgMutClosure *)q);
2240 failed_to_evac = rtsFalse; // mutable anyhow.
2244 case MUT_ARR_PTRS_FROZEN:
2245 // follow everything
2249 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2250 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2251 (StgClosure *)*p = evacuate((StgClosure *)*p);
2253 // it's tempting to recordMutable() if failed_to_evac is
2254 // false, but that breaks some assumptions (eg. every
2255 // closure on the mutable list is supposed to have the MUT
2256 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2262 StgTSO *tso = (StgTSO *)p;
2265 evac_gen = saved_evac_gen;
2266 recordMutable((StgMutClosure *)tso);
2267 failed_to_evac = rtsFalse; // mutable anyhow.
2268 p += tso_sizeW(tso);
2273 case RBH: // cf. BLACKHOLE_BQ
2276 nat size, ptrs, nonptrs, vhs;
2278 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2280 StgRBH *rbh = (StgRBH *)p;
2281 (StgClosure *)rbh->blocking_queue =
2282 evacuate((StgClosure *)rbh->blocking_queue);
2283 recordMutable((StgMutClosure *)to);
2284 failed_to_evac = rtsFalse; // mutable anyhow.
2286 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2287 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2288 // ToDo: use size of reverted closure here!
2289 p += BLACKHOLE_sizeW();
2295 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2296 // follow the pointer to the node which is being demanded
2297 (StgClosure *)bf->node =
2298 evacuate((StgClosure *)bf->node);
2299 // follow the link to the rest of the blocking queue
2300 (StgClosure *)bf->link =
2301 evacuate((StgClosure *)bf->link);
2302 if (failed_to_evac) {
2303 failed_to_evac = rtsFalse;
2304 recordMutable((StgMutClosure *)bf);
2307 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2308 bf, info_type((StgClosure *)bf),
2309 bf->node, info_type(bf->node)));
2310 p += sizeofW(StgBlockedFetch);
2318 p += sizeofW(StgFetchMe);
2319 break; // nothing to do in this case
2321 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2323 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2324 (StgClosure *)fmbq->blocking_queue =
2325 evacuate((StgClosure *)fmbq->blocking_queue);
2326 if (failed_to_evac) {
2327 failed_to_evac = rtsFalse;
2328 recordMutable((StgMutClosure *)fmbq);
2331 belch("@@ scavenge: %p (%s) exciting, isn't it",
2332 p, info_type((StgClosure *)p)));
2333 p += sizeofW(StgFetchMeBlockingQueue);
2339 barf("scavenge: unimplemented/strange closure type %d @ %p",
2343 /* If we didn't manage to promote all the objects pointed to by
2344 * the current object, then we have to designate this object as
2345 * mutable (because it contains old-to-new generation pointers).
2347 if (failed_to_evac) {
2348 failed_to_evac = rtsFalse;
2349 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2357 /* -----------------------------------------------------------------------------
2358 Scavenge everything on the mark stack.
2360 This is slightly different from scavenge():
2361 - we don't walk linearly through the objects, so the scavenger
2362 doesn't need to advance the pointer on to the next object.
2363 -------------------------------------------------------------------------- */
2366 scavenge_mark_stack(void)
2372 evac_gen = oldest_gen->no;
2373 saved_evac_gen = evac_gen;
2376 while (!mark_stack_empty()) {
2377 p = pop_mark_stack();
2379 info = get_itbl((StgClosure *)p);
2380 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2383 switch (info->type) {
2386 /* treat MVars specially, because we don't want to evacuate the
2387 * mut_link field in the middle of the closure.
2390 StgMVar *mvar = ((StgMVar *)p);
2392 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2393 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2394 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2395 evac_gen = saved_evac_gen;
2396 failed_to_evac = rtsFalse; // mutable.
2404 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2405 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2415 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2440 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2441 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2442 (StgClosure *)*p = evacuate((StgClosure *)*p);
2448 // don't need to do anything here: the only possible case
2449 // is that we're in a 1-space compacting collector, with
2450 // no "old" generation.
2454 case IND_OLDGEN_PERM:
2455 ((StgIndOldGen *)p)->indirectee =
2456 evacuate(((StgIndOldGen *)p)->indirectee);
2457 if (failed_to_evac) {
2458 recordOldToNewPtrs((StgMutClosure *)p);
2460 failed_to_evac = rtsFalse;
2465 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2466 evac_gen = saved_evac_gen;
2467 failed_to_evac = rtsFalse;
2472 failed_to_evac = rtsFalse;
2476 case SE_CAF_BLACKHOLE:
2484 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2485 (StgClosure *)bh->blocking_queue =
2486 evacuate((StgClosure *)bh->blocking_queue);
2487 failed_to_evac = rtsFalse;
2491 case THUNK_SELECTOR:
2493 StgSelector *s = (StgSelector *)p;
2494 s->selectee = evacuate(s->selectee);
2498 case AP_UPD: // same as PAPs
2500 /* Treat a PAP just like a section of stack, not forgetting to
2501 * evacuate the function pointer too...
2504 StgPAP* pap = (StgPAP *)p;
2506 pap->fun = evacuate(pap->fun);
2507 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2512 // follow everything
2516 evac_gen = 0; // repeatedly mutable
2517 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2518 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2519 (StgClosure *)*p = evacuate((StgClosure *)*p);
2521 evac_gen = saved_evac_gen;
2522 failed_to_evac = rtsFalse; // mutable anyhow.
2526 case MUT_ARR_PTRS_FROZEN:
2527 // follow everything
2531 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2532 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2533 (StgClosure *)*p = evacuate((StgClosure *)*p);
2540 StgTSO *tso = (StgTSO *)p;
2543 evac_gen = saved_evac_gen;
2544 failed_to_evac = rtsFalse;
2549 case RBH: // cf. BLACKHOLE_BQ
2552 nat size, ptrs, nonptrs, vhs;
2554 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2556 StgRBH *rbh = (StgRBH *)p;
2557 (StgClosure *)rbh->blocking_queue =
2558 evacuate((StgClosure *)rbh->blocking_queue);
2559 recordMutable((StgMutClosure *)rbh);
2560 failed_to_evac = rtsFalse; // mutable anyhow.
2562 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2563 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2569 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2570 // follow the pointer to the node which is being demanded
2571 (StgClosure *)bf->node =
2572 evacuate((StgClosure *)bf->node);
2573 // follow the link to the rest of the blocking queue
2574 (StgClosure *)bf->link =
2575 evacuate((StgClosure *)bf->link);
2576 if (failed_to_evac) {
2577 failed_to_evac = rtsFalse;
2578 recordMutable((StgMutClosure *)bf);
2581 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2582 bf, info_type((StgClosure *)bf),
2583 bf->node, info_type(bf->node)));
2591 break; // nothing to do in this case
2593 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2595 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2596 (StgClosure *)fmbq->blocking_queue =
2597 evacuate((StgClosure *)fmbq->blocking_queue);
2598 if (failed_to_evac) {
2599 failed_to_evac = rtsFalse;
2600 recordMutable((StgMutClosure *)fmbq);
2603 belch("@@ scavenge: %p (%s) exciting, isn't it",
2604 p, info_type((StgClosure *)p)));
2610 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2614 if (failed_to_evac) {
2615 failed_to_evac = rtsFalse;
2616 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2619 // mark the next bit to indicate "scavenged"
2620 mark(q+1, Bdescr(q));
2622 } // while (!mark_stack_empty())
2624 // start a new linear scan if the mark stack overflowed at some point
2625 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2626 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2627 mark_stack_overflowed = rtsFalse;
2628 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2629 oldgen_scan = oldgen_scan_bd->start;
2632 if (oldgen_scan_bd) {
2633 // push a new thing on the mark stack
2635 // find a closure that is marked but not scavenged, and start
2637 while (oldgen_scan < oldgen_scan_bd->free
2638 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2642 if (oldgen_scan < oldgen_scan_bd->free) {
2644 // already scavenged?
2645 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2646 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2649 push_mark_stack(oldgen_scan);
2650 // ToDo: bump the linear scan by the actual size of the object
2651 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2655 oldgen_scan_bd = oldgen_scan_bd->link;
2656 if (oldgen_scan_bd != NULL) {
2657 oldgen_scan = oldgen_scan_bd->start;
2663 /* -----------------------------------------------------------------------------
2664 Scavenge one object.
2666 This is used for objects that are temporarily marked as mutable
2667 because they contain old-to-new generation pointers. Only certain
2668 objects can have this property.
2669 -------------------------------------------------------------------------- */
2672 scavenge_one(StgPtr p)
2674 const StgInfoTable *info;
2675 nat saved_evac_gen = evac_gen;
2678 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2679 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2681 info = get_itbl((StgClosure *)p);
2683 switch (info->type) {
2686 case FUN_1_0: // hardly worth specialising these guys
2706 case IND_OLDGEN_PERM:
2710 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2711 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2712 (StgClosure *)*q = evacuate((StgClosure *)*q);
2718 case SE_CAF_BLACKHOLE:
2723 case THUNK_SELECTOR:
2725 StgSelector *s = (StgSelector *)p;
2726 s->selectee = evacuate(s->selectee);
2731 // nothing to follow
2736 // follow everything
2739 evac_gen = 0; // repeatedly mutable
2740 recordMutable((StgMutClosure *)p);
2741 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2742 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2743 (StgClosure *)*p = evacuate((StgClosure *)*p);
2745 evac_gen = saved_evac_gen;
2746 failed_to_evac = rtsFalse;
2750 case MUT_ARR_PTRS_FROZEN:
2752 // follow everything
2755 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2756 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2757 (StgClosure *)*p = evacuate((StgClosure *)*p);
2764 StgTSO *tso = (StgTSO *)p;
2766 evac_gen = 0; // repeatedly mutable
2768 recordMutable((StgMutClosure *)tso);
2769 evac_gen = saved_evac_gen;
2770 failed_to_evac = rtsFalse;
2777 StgPAP* pap = (StgPAP *)p;
2778 pap->fun = evacuate(pap->fun);
2779 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2784 // This might happen if for instance a MUT_CONS was pointing to a
2785 // THUNK which has since been updated. The IND_OLDGEN will
2786 // be on the mutable list anyway, so we don't need to do anything
2791 barf("scavenge_one: strange object %d", (int)(info->type));
2794 no_luck = failed_to_evac;
2795 failed_to_evac = rtsFalse;
2799 /* -----------------------------------------------------------------------------
2800 Scavenging mutable lists.
2802 We treat the mutable list of each generation > N (i.e. all the
2803 generations older than the one being collected) as roots. We also
2804 remove non-mutable objects from the mutable list at this point.
2805 -------------------------------------------------------------------------- */
2808 scavenge_mut_once_list(generation *gen)
2810 const StgInfoTable *info;
2811 StgMutClosure *p, *next, *new_list;
2813 p = gen->mut_once_list;
2814 new_list = END_MUT_LIST;
2818 failed_to_evac = rtsFalse;
2820 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2822 // make sure the info pointer is into text space
2823 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2824 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2828 if (info->type==RBH)
2829 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2831 switch(info->type) {
2834 case IND_OLDGEN_PERM:
2836 /* Try to pull the indirectee into this generation, so we can
2837 * remove the indirection from the mutable list.
2839 ((StgIndOldGen *)p)->indirectee =
2840 evacuate(((StgIndOldGen *)p)->indirectee);
2842 #if 0 && defined(DEBUG)
2843 if (RtsFlags.DebugFlags.gc)
2844 /* Debugging code to print out the size of the thing we just
2848 StgPtr start = gen->steps[0].scan;
2849 bdescr *start_bd = gen->steps[0].scan_bd;
2851 scavenge(&gen->steps[0]);
2852 if (start_bd != gen->steps[0].scan_bd) {
2853 size += (P_)BLOCK_ROUND_UP(start) - start;
2854 start_bd = start_bd->link;
2855 while (start_bd != gen->steps[0].scan_bd) {
2856 size += BLOCK_SIZE_W;
2857 start_bd = start_bd->link;
2859 size += gen->steps[0].scan -
2860 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2862 size = gen->steps[0].scan - start;
2864 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2868 /* failed_to_evac might happen if we've got more than two
2869 * generations, we're collecting only generation 0, the
2870 * indirection resides in generation 2 and the indirectee is
2873 if (failed_to_evac) {
2874 failed_to_evac = rtsFalse;
2875 p->mut_link = new_list;
2878 /* the mut_link field of an IND_STATIC is overloaded as the
2879 * static link field too (it just so happens that we don't need
2880 * both at the same time), so we need to NULL it out when
2881 * removing this object from the mutable list because the static
2882 * link fields are all assumed to be NULL before doing a major
2890 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2891 * it from the mutable list if possible by promoting whatever it
2894 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2895 /* didn't manage to promote everything, so put the
2896 * MUT_CONS back on the list.
2898 p->mut_link = new_list;
2904 // shouldn't have anything else on the mutables list
2905 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2909 gen->mut_once_list = new_list;
2914 scavenge_mutable_list(generation *gen)
2916 const StgInfoTable *info;
2917 StgMutClosure *p, *next;
2919 p = gen->saved_mut_list;
2923 failed_to_evac = rtsFalse;
2925 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2927 // make sure the info pointer is into text space
2928 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2929 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2933 if (info->type==RBH)
2934 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2936 switch(info->type) {
2939 // follow everything
2940 p->mut_link = gen->mut_list;
2945 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2946 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2947 (StgClosure *)*q = evacuate((StgClosure *)*q);
2952 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2953 case MUT_ARR_PTRS_FROZEN:
2958 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2959 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2960 (StgClosure *)*q = evacuate((StgClosure *)*q);
2964 if (failed_to_evac) {
2965 failed_to_evac = rtsFalse;
2966 mkMutCons((StgClosure *)p, gen);
2972 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2973 p->mut_link = gen->mut_list;
2979 StgMVar *mvar = (StgMVar *)p;
2980 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2981 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2982 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2983 p->mut_link = gen->mut_list;
2990 StgTSO *tso = (StgTSO *)p;
2994 /* Don't take this TSO off the mutable list - it might still
2995 * point to some younger objects (because we set evac_gen to 0
2998 tso->mut_link = gen->mut_list;
2999 gen->mut_list = (StgMutClosure *)tso;
3005 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3006 (StgClosure *)bh->blocking_queue =
3007 evacuate((StgClosure *)bh->blocking_queue);
3008 p->mut_link = gen->mut_list;
3013 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3016 case IND_OLDGEN_PERM:
3017 /* Try to pull the indirectee into this generation, so we can
3018 * remove the indirection from the mutable list.
3021 ((StgIndOldGen *)p)->indirectee =
3022 evacuate(((StgIndOldGen *)p)->indirectee);
3025 if (failed_to_evac) {
3026 failed_to_evac = rtsFalse;
3027 p->mut_link = gen->mut_once_list;
3028 gen->mut_once_list = p;
3035 // HWL: check whether all of these are necessary
3037 case RBH: // cf. BLACKHOLE_BQ
3039 // nat size, ptrs, nonptrs, vhs;
3041 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3042 StgRBH *rbh = (StgRBH *)p;
3043 (StgClosure *)rbh->blocking_queue =
3044 evacuate((StgClosure *)rbh->blocking_queue);
3045 if (failed_to_evac) {
3046 failed_to_evac = rtsFalse;
3047 recordMutable((StgMutClosure *)rbh);
3049 // ToDo: use size of reverted closure here!
3050 p += BLACKHOLE_sizeW();
3056 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3057 // follow the pointer to the node which is being demanded
3058 (StgClosure *)bf->node =
3059 evacuate((StgClosure *)bf->node);
3060 // follow the link to the rest of the blocking queue
3061 (StgClosure *)bf->link =
3062 evacuate((StgClosure *)bf->link);
3063 if (failed_to_evac) {
3064 failed_to_evac = rtsFalse;
3065 recordMutable((StgMutClosure *)bf);
3067 p += sizeofW(StgBlockedFetch);
3073 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3076 p += sizeofW(StgFetchMe);
3077 break; // nothing to do in this case
3079 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3081 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3082 (StgClosure *)fmbq->blocking_queue =
3083 evacuate((StgClosure *)fmbq->blocking_queue);
3084 if (failed_to_evac) {
3085 failed_to_evac = rtsFalse;
3086 recordMutable((StgMutClosure *)fmbq);
3088 p += sizeofW(StgFetchMeBlockingQueue);
3094 // shouldn't have anything else on the mutables list
3095 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3102 scavenge_static(void)
3104 StgClosure* p = static_objects;
3105 const StgInfoTable *info;
3107 /* Always evacuate straight to the oldest generation for static
3109 evac_gen = oldest_gen->no;
3111 /* keep going until we've scavenged all the objects on the linked
3113 while (p != END_OF_STATIC_LIST) {
3117 if (info->type==RBH)
3118 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3120 // make sure the info pointer is into text space
3121 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3122 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3124 /* Take this object *off* the static_objects list,
3125 * and put it on the scavenged_static_objects list.
3127 static_objects = STATIC_LINK(info,p);
3128 STATIC_LINK(info,p) = scavenged_static_objects;
3129 scavenged_static_objects = p;
3131 switch (info -> type) {
3135 StgInd *ind = (StgInd *)p;
3136 ind->indirectee = evacuate(ind->indirectee);
3138 /* might fail to evacuate it, in which case we have to pop it
3139 * back on the mutable list (and take it off the
3140 * scavenged_static list because the static link and mut link
3141 * pointers are one and the same).
3143 if (failed_to_evac) {
3144 failed_to_evac = rtsFalse;
3145 scavenged_static_objects = IND_STATIC_LINK(p);
3146 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3147 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3161 next = (P_)p->payload + info->layout.payload.ptrs;
3162 // evacuate the pointers
3163 for (q = (P_)p->payload; q < next; q++) {
3164 (StgClosure *)*q = evacuate((StgClosure *)*q);
3170 barf("scavenge_static: strange closure %d", (int)(info->type));
3173 ASSERT(failed_to_evac == rtsFalse);
3175 /* get the next static object from the list. Remember, there might
3176 * be more stuff on this list now that we've done some evacuating!
3177 * (static_objects is a global)
3183 /* -----------------------------------------------------------------------------
3184 scavenge_stack walks over a section of stack and evacuates all the
3185 objects pointed to by it. We can use the same code for walking
3186 PAPs, since these are just sections of copied stack.
3187 -------------------------------------------------------------------------- */
3190 scavenge_stack(StgPtr p, StgPtr stack_end)
3193 const StgInfoTable* info;
3196 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3199 * Each time around this loop, we are looking at a chunk of stack
3200 * that starts with either a pending argument section or an
3201 * activation record.
3204 while (p < stack_end) {
3207 // If we've got a tag, skip over that many words on the stack
3208 if (IS_ARG_TAG((W_)q)) {
3213 /* Is q a pointer to a closure?
3215 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3217 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3218 ASSERT(closure_STATIC((StgClosure *)q));
3220 // otherwise, must be a pointer into the allocation space.
3223 (StgClosure *)*p = evacuate((StgClosure *)q);
3229 * Otherwise, q must be the info pointer of an activation
3230 * record. All activation records have 'bitmap' style layout
3233 info = get_itbl((StgClosure *)p);
3235 switch (info->type) {
3237 // Dynamic bitmap: the mask is stored on the stack
3239 bitmap = ((StgRetDyn *)p)->liveness;
3240 p = (P_)&((StgRetDyn *)p)->payload[0];
3243 // probably a slow-entry point return address:
3251 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3252 old_p, p, old_p+1));
3254 p++; // what if FHS!=1 !? -- HWL
3259 /* Specialised code for update frames, since they're so common.
3260 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3261 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3265 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3267 p += sizeofW(StgUpdateFrame);
3270 frame->updatee = evacuate(frame->updatee);
3272 #else // specialised code for update frames, not sure if it's worth it.
3274 nat type = get_itbl(frame->updatee)->type;
3276 if (type == EVACUATED) {
3277 frame->updatee = evacuate(frame->updatee);
3280 bdescr *bd = Bdescr((P_)frame->updatee);
3282 if (bd->gen_no > N) {
3283 if (bd->gen_no < evac_gen) {
3284 failed_to_evac = rtsTrue;
3289 // Don't promote blackholes
3291 if (!(stp->gen_no == 0 &&
3293 stp->no == stp->gen->n_steps-1)) {
3300 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3301 sizeofW(StgHeader), stp);
3302 frame->updatee = to;
3305 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3306 frame->updatee = to;
3307 recordMutable((StgMutClosure *)to);
3310 /* will never be SE_{,CAF_}BLACKHOLE, since we
3311 don't push an update frame for single-entry thunks. KSW 1999-01. */
3312 barf("scavenge_stack: UPDATE_FRAME updatee");
3318 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3325 bitmap = info->layout.bitmap;
3327 // this assumes that the payload starts immediately after the info-ptr
3329 while (bitmap != 0) {
3330 if ((bitmap & 1) == 0) {
3331 (StgClosure *)*p = evacuate((StgClosure *)*p);
3334 bitmap = bitmap >> 1;
3341 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3346 StgLargeBitmap *large_bitmap;
3349 large_bitmap = info->layout.large_bitmap;
3352 for (i=0; i<large_bitmap->size; i++) {
3353 bitmap = large_bitmap->bitmap[i];
3354 q = p + BITS_IN(W_);
3355 while (bitmap != 0) {
3356 if ((bitmap & 1) == 0) {
3357 (StgClosure *)*p = evacuate((StgClosure *)*p);
3360 bitmap = bitmap >> 1;
3362 if (i+1 < large_bitmap->size) {
3364 (StgClosure *)*p = evacuate((StgClosure *)*p);
3370 // and don't forget to follow the SRT
3375 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3380 /*-----------------------------------------------------------------------------
3381 scavenge the large object list.
3383 evac_gen set by caller; similar games played with evac_gen as with
3384 scavenge() - see comment at the top of scavenge(). Most large
3385 objects are (repeatedly) mutable, so most of the time evac_gen will
3387 --------------------------------------------------------------------------- */
3390 scavenge_large(step *stp)
3395 bd = stp->new_large_objects;
3397 for (; bd != NULL; bd = stp->new_large_objects) {
3399 /* take this object *off* the large objects list and put it on
3400 * the scavenged large objects list. This is so that we can
3401 * treat new_large_objects as a stack and push new objects on
3402 * the front when evacuating.
3404 stp->new_large_objects = bd->link;
3405 dbl_link_onto(bd, &stp->scavenged_large_objects);
3407 // update the block count in this step.
3408 stp->n_scavenged_large_blocks += bd->blocks;
3411 if (scavenge_one(p)) {
3412 mkMutCons((StgClosure *)p, stp->gen);
3417 /* -----------------------------------------------------------------------------
3418 Initialising the static object & mutable lists
3419 -------------------------------------------------------------------------- */
3422 zero_static_object_list(StgClosure* first_static)
3426 const StgInfoTable *info;
3428 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3430 link = STATIC_LINK(info, p);
3431 STATIC_LINK(info,p) = NULL;
3435 /* This function is only needed because we share the mutable link
3436 * field with the static link field in an IND_STATIC, so we have to
3437 * zero the mut_link field before doing a major GC, which needs the
3438 * static link field.
3440 * It doesn't do any harm to zero all the mutable link fields on the
3445 zero_mutable_list( StgMutClosure *first )
3447 StgMutClosure *next, *c;
3449 for (c = first; c != END_MUT_LIST; c = next) {
3455 /* -----------------------------------------------------------------------------
3457 -------------------------------------------------------------------------- */
3464 for (c = (StgIndStatic *)caf_list; c != NULL;
3465 c = (StgIndStatic *)c->static_link)
3467 c->header.info = c->saved_info;
3468 c->saved_info = NULL;
3469 // could, but not necessary: c->static_link = NULL;
3475 scavengeCAFs( void )
3480 for (c = (StgIndStatic *)caf_list; c != NULL;
3481 c = (StgIndStatic *)c->static_link)
3483 c->indirectee = evacuate(c->indirectee);
3487 /* -----------------------------------------------------------------------------
3488 Sanity code for CAF garbage collection.
3490 With DEBUG turned on, we manage a CAF list in addition to the SRT
3491 mechanism. After GC, we run down the CAF list and blackhole any
3492 CAFs which have been garbage collected. This means we get an error
3493 whenever the program tries to enter a garbage collected CAF.
3495 Any garbage collected CAFs are taken off the CAF list at the same
3497 -------------------------------------------------------------------------- */
3499 #if 0 && defined(DEBUG)
3506 const StgInfoTable *info;
3517 ASSERT(info->type == IND_STATIC);
3519 if (STATIC_LINK(info,p) == NULL) {
3520 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3522 SET_INFO(p,&stg_BLACKHOLE_info);
3523 p = STATIC_LINK2(info,p);
3527 pp = &STATIC_LINK2(info,p);
3534 // belch("%d CAFs live", i);
3539 /* -----------------------------------------------------------------------------
3542 Whenever a thread returns to the scheduler after possibly doing
3543 some work, we have to run down the stack and black-hole all the
3544 closures referred to by update frames.
3545 -------------------------------------------------------------------------- */
3548 threadLazyBlackHole(StgTSO *tso)
3550 StgUpdateFrame *update_frame;
3551 StgBlockingQueue *bh;
3554 stack_end = &tso->stack[tso->stack_size];
3555 update_frame = tso->su;
3558 switch (get_itbl(update_frame)->type) {
3561 update_frame = ((StgCatchFrame *)update_frame)->link;
3565 bh = (StgBlockingQueue *)update_frame->updatee;
3567 /* if the thunk is already blackholed, it means we've also
3568 * already blackholed the rest of the thunks on this stack,
3569 * so we can stop early.
3571 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3572 * don't interfere with this optimisation.
3574 if (bh->header.info == &stg_BLACKHOLE_info) {
3578 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3579 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3580 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3581 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3583 SET_INFO(bh,&stg_BLACKHOLE_info);
3586 update_frame = update_frame->link;
3590 update_frame = ((StgSeqFrame *)update_frame)->link;
3596 barf("threadPaused");
3602 /* -----------------------------------------------------------------------------
3605 * Code largely pinched from old RTS, then hacked to bits. We also do
3606 * lazy black holing here.
3608 * -------------------------------------------------------------------------- */
3611 threadSqueezeStack(StgTSO *tso)
3613 lnat displacement = 0;
3614 StgUpdateFrame *frame;
3615 StgUpdateFrame *next_frame; // Temporally next
3616 StgUpdateFrame *prev_frame; // Temporally previous
3618 rtsBool prev_was_update_frame;
3620 StgUpdateFrame *top_frame;
3621 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3623 void printObj( StgClosure *obj ); // from Printer.c
3625 top_frame = tso->su;
3628 bottom = &(tso->stack[tso->stack_size]);
3631 /* There must be at least one frame, namely the STOP_FRAME.
3633 ASSERT((P_)frame < bottom);
3635 /* Walk down the stack, reversing the links between frames so that
3636 * we can walk back up as we squeeze from the bottom. Note that
3637 * next_frame and prev_frame refer to next and previous as they were
3638 * added to the stack, rather than the way we see them in this
3639 * walk. (It makes the next loop less confusing.)
3641 * Stop if we find an update frame pointing to a black hole
3642 * (see comment in threadLazyBlackHole()).
3646 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3647 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3648 prev_frame = frame->link;
3649 frame->link = next_frame;
3654 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3655 printObj((StgClosure *)prev_frame);
3656 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3659 switch (get_itbl(frame)->type) {
3662 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3675 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3677 printObj((StgClosure *)prev_frame);
3680 if (get_itbl(frame)->type == UPDATE_FRAME
3681 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3686 /* Now, we're at the bottom. Frame points to the lowest update
3687 * frame on the stack, and its link actually points to the frame
3688 * above. We have to walk back up the stack, squeezing out empty
3689 * update frames and turning the pointers back around on the way
3692 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3693 * we never want to eliminate it anyway. Just walk one step up
3694 * before starting to squeeze. When you get to the topmost frame,
3695 * remember that there are still some words above it that might have
3702 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3705 * Loop through all of the frames (everything except the very
3706 * bottom). Things are complicated by the fact that we have
3707 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3708 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3710 while (frame != NULL) {
3712 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3713 rtsBool is_update_frame;
3715 next_frame = frame->link;
3716 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3719 * 1. both the previous and current frame are update frames
3720 * 2. the current frame is empty
3722 if (prev_was_update_frame && is_update_frame &&
3723 (P_)prev_frame == frame_bottom + displacement) {
3725 // Now squeeze out the current frame
3726 StgClosure *updatee_keep = prev_frame->updatee;
3727 StgClosure *updatee_bypass = frame->updatee;
3730 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3734 /* Deal with blocking queues. If both updatees have blocked
3735 * threads, then we should merge the queues into the update
3736 * frame that we're keeping.
3738 * Alternatively, we could just wake them up: they'll just go
3739 * straight to sleep on the proper blackhole! This is less code
3740 * and probably less bug prone, although it's probably much
3743 #if 0 // do it properly...
3744 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3745 # error Unimplemented lazy BH warning. (KSW 1999-01)
3747 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3748 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3750 // Sigh. It has one. Don't lose those threads!
3751 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3752 // Urgh. Two queues. Merge them.
3753 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3755 while (keep_tso->link != END_TSO_QUEUE) {
3756 keep_tso = keep_tso->link;
3758 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3761 // For simplicity, just swap the BQ for the BH
3762 P_ temp = updatee_keep;
3764 updatee_keep = updatee_bypass;
3765 updatee_bypass = temp;
3767 // Record the swap in the kept frame (below)
3768 prev_frame->updatee = updatee_keep;
3773 TICK_UPD_SQUEEZED();
3774 /* wasn't there something about update squeezing and ticky to be
3775 * sorted out? oh yes: we aren't counting each enter properly
3776 * in this case. See the log somewhere. KSW 1999-04-21
3778 * Check two things: that the two update frames don't point to
3779 * the same object, and that the updatee_bypass isn't already an
3780 * indirection. Both of these cases only happen when we're in a
3781 * block hole-style loop (and there are multiple update frames
3782 * on the stack pointing to the same closure), but they can both
3783 * screw us up if we don't check.
3785 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3786 // this wakes the threads up
3787 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3790 sp = (P_)frame - 1; // sp = stuff to slide
3791 displacement += sizeofW(StgUpdateFrame);
3794 // No squeeze for this frame
3795 sp = frame_bottom - 1; // Keep the current frame
3797 /* Do lazy black-holing.
3799 if (is_update_frame) {
3800 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3801 if (bh->header.info != &stg_BLACKHOLE_info &&
3802 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3803 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3804 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3805 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3808 /* zero out the slop so that the sanity checker can tell
3809 * where the next closure is.
3812 StgInfoTable *info = get_itbl(bh);
3813 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3814 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3815 * info is used for a different purpose, and it's exactly the
3816 * same size as a BLACKHOLE in any case.
3818 if (info->type != THUNK_SELECTOR) {
3819 for (i = np; i < np + nw; i++) {
3820 ((StgClosure *)bh)->payload[i] = 0;
3825 SET_INFO(bh,&stg_BLACKHOLE_info);
3829 // Fix the link in the current frame (should point to the frame below)
3830 frame->link = prev_frame;
3831 prev_was_update_frame = is_update_frame;
3834 // Now slide all words from sp up to the next frame
3836 if (displacement > 0) {
3837 P_ next_frame_bottom;
3839 if (next_frame != NULL)
3840 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3842 next_frame_bottom = tso->sp - 1;
3846 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3850 while (sp >= next_frame_bottom) {
3851 sp[displacement] = *sp;
3855 (P_)prev_frame = (P_)frame + displacement;
3859 tso->sp += displacement;
3860 tso->su = prev_frame;
3863 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3864 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3869 /* -----------------------------------------------------------------------------
3872 * We have to prepare for GC - this means doing lazy black holing
3873 * here. We also take the opportunity to do stack squeezing if it's
3875 * -------------------------------------------------------------------------- */
3877 threadPaused(StgTSO *tso)
3879 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3880 threadSqueezeStack(tso); // does black holing too
3882 threadLazyBlackHole(tso);
3885 /* -----------------------------------------------------------------------------
3887 * -------------------------------------------------------------------------- */
3891 printMutOnceList(generation *gen)
3893 StgMutClosure *p, *next;
3895 p = gen->mut_once_list;
3898 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3899 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3900 fprintf(stderr, "%p (%s), ",
3901 p, info_type((StgClosure *)p));
3903 fputc('\n', stderr);
3907 printMutableList(generation *gen)
3909 StgMutClosure *p, *next;
3914 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3915 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3916 fprintf(stderr, "%p (%s), ",
3917 p, info_type((StgClosure *)p));
3919 fputc('\n', stderr);
3922 static inline rtsBool
3923 maybeLarge(StgClosure *closure)
3925 StgInfoTable *info = get_itbl(closure);
3927 /* closure types that may be found on the new_large_objects list;
3928 see scavenge_large */
3929 return (info->type == MUT_ARR_PTRS ||
3930 info->type == MUT_ARR_PTRS_FROZEN ||
3931 info->type == TSO ||
3932 info->type == ARR_WORDS);