1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.124 2001/10/17 15:19:24 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 mark_weak_ptr_list(&weak_ptr_list);
491 old_weak_ptr_list = weak_ptr_list;
492 weak_ptr_list = NULL;
493 weak_done = rtsFalse;
495 /* The all_threads list is like the weak_ptr_list.
496 * See traverse_weak_ptr_list() for the details.
498 old_all_threads = all_threads;
499 all_threads = END_TSO_QUEUE;
500 resurrected_threads = END_TSO_QUEUE;
502 /* Mark the stable pointer table.
504 markStablePtrTable(mark_root);
508 /* ToDo: To fix the caf leak, we need to make the commented out
509 * parts of this code do something sensible - as described in
512 extern void markHugsObjects(void);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 if (flag) { goto loop; }
578 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
584 // Reconstruct the Global Address tables used in GUM
585 rebuildGAtables(major_gc);
586 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
589 // Now see which stable names are still alive.
592 // Tidy the end of the to-space chains
593 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
594 for (s = 0; s < generations[g].n_steps; s++) {
595 stp = &generations[g].steps[s];
596 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
597 stp->hp_bd->free = stp->hp;
598 stp->hp_bd->link = NULL;
603 // NO MORE EVACUATION AFTER THIS POINT!
604 // Finally: compaction of the oldest generation.
605 if (major_gc && oldest_gen->steps[0].is_compacted) {
606 // save number of blocks for stats
607 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
611 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
613 /* run through all the generations/steps and tidy up
615 copied = new_blocks * BLOCK_SIZE_W;
616 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
619 generations[g].collections++; // for stats
622 for (s = 0; s < generations[g].n_steps; s++) {
624 stp = &generations[g].steps[s];
626 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
627 // stats information: how much we copied
629 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
634 // for generations we collected...
637 // rough calculation of garbage collected, for stats output
638 if (stp->is_compacted) {
639 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
641 collected += stp->n_blocks * BLOCK_SIZE_W;
644 /* free old memory and shift to-space into from-space for all
645 * the collected steps (except the allocation area). These
646 * freed blocks will probaby be quickly recycled.
648 if (!(g == 0 && s == 0)) {
649 if (stp->is_compacted) {
650 // for a compacted step, just shift the new to-space
651 // onto the front of the now-compacted existing blocks.
652 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
653 bd->flags &= ~BF_EVACUATED; // now from-space
655 // tack the new blocks on the end of the existing blocks
656 if (stp->blocks == NULL) {
657 stp->blocks = stp->to_blocks;
659 for (bd = stp->blocks; bd != NULL; bd = next) {
662 bd->link = stp->to_blocks;
666 // add the new blocks to the block tally
667 stp->n_blocks += stp->n_to_blocks;
669 freeChain(stp->blocks);
670 stp->blocks = stp->to_blocks;
671 stp->n_blocks = stp->n_to_blocks;
672 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
673 bd->flags &= ~BF_EVACUATED; // now from-space
676 stp->to_blocks = NULL;
677 stp->n_to_blocks = 0;
680 /* LARGE OBJECTS. The current live large objects are chained on
681 * scavenged_large, having been moved during garbage
682 * collection from large_objects. Any objects left on
683 * large_objects list are therefore dead, so we free them here.
685 for (bd = stp->large_objects; bd != NULL; bd = next) {
691 // update the count of blocks used by large objects
692 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED;
695 stp->large_objects = stp->scavenged_large_objects;
696 stp->n_large_blocks = stp->n_scavenged_large_blocks;
699 // for older generations...
701 /* For older generations, we need to append the
702 * scavenged_large_object list (i.e. large objects that have been
703 * promoted during this GC) to the large_object list for that step.
705 for (bd = stp->scavenged_large_objects; bd; bd = next) {
707 bd->flags &= ~BF_EVACUATED;
708 dbl_link_onto(bd, &stp->large_objects);
711 // add the new blocks we promoted during this GC
712 stp->n_blocks += stp->n_to_blocks;
713 stp->n_large_blocks += stp->n_scavenged_large_blocks;
718 /* Reset the sizes of the older generations when we do a major
721 * CURRENT STRATEGY: make all generations except zero the same size.
722 * We have to stay within the maximum heap size, and leave a certain
723 * percentage of the maximum heap size available to allocate into.
725 if (major_gc && RtsFlags.GcFlags.generations > 1) {
726 nat live, size, min_alloc;
727 nat max = RtsFlags.GcFlags.maxHeapSize;
728 nat gens = RtsFlags.GcFlags.generations;
730 // live in the oldest generations
731 live = oldest_gen->steps[0].n_blocks +
732 oldest_gen->steps[0].n_large_blocks;
734 // default max size for all generations except zero
735 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
736 RtsFlags.GcFlags.minOldGenSize);
738 // minimum size for generation zero
739 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
740 RtsFlags.GcFlags.minAllocAreaSize);
742 // Auto-enable compaction when the residency reaches a
743 // certain percentage of the maximum heap size (default: 30%).
744 if (RtsFlags.GcFlags.generations > 1 &&
745 (RtsFlags.GcFlags.compact ||
747 oldest_gen->steps[0].n_blocks >
748 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
749 oldest_gen->steps[0].is_compacted = 1;
750 // fprintf(stderr,"compaction: on\n", live);
752 oldest_gen->steps[0].is_compacted = 0;
753 // fprintf(stderr,"compaction: off\n", live);
756 // if we're going to go over the maximum heap size, reduce the
757 // size of the generations accordingly. The calculation is
758 // different if compaction is turned on, because we don't need
759 // to double the space required to collect the old generation.
762 // this test is necessary to ensure that the calculations
763 // below don't have any negative results - we're working
764 // with unsigned values here.
765 if (max < min_alloc) {
769 if (oldest_gen->steps[0].is_compacted) {
770 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
771 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
774 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
775 size = (max - min_alloc) / ((gens - 1) * 2);
785 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
786 min_alloc, size, max);
789 for (g = 0; g < gens; g++) {
790 generations[g].max_blocks = size;
794 // Guess the amount of live data for stats.
797 /* Free the small objects allocated via allocate(), since this will
798 * all have been copied into G0S1 now.
800 if (small_alloc_list != NULL) {
801 freeChain(small_alloc_list);
803 small_alloc_list = NULL;
807 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
809 // Start a new pinned_object_block
810 pinned_object_block = NULL;
812 /* Free the mark stack.
814 if (mark_stack_bdescr != NULL) {
815 freeGroup(mark_stack_bdescr);
820 for (g = 0; g <= N; g++) {
821 for (s = 0; s < generations[g].n_steps; s++) {
822 stp = &generations[g].steps[s];
823 if (stp->is_compacted && stp->bitmap != NULL) {
824 freeGroup(stp->bitmap);
829 /* Two-space collector:
830 * Free the old to-space, and estimate the amount of live data.
832 if (RtsFlags.GcFlags.generations == 1) {
835 if (old_to_blocks != NULL) {
836 freeChain(old_to_blocks);
838 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
839 bd->flags = 0; // now from-space
842 /* For a two-space collector, we need to resize the nursery. */
844 /* set up a new nursery. Allocate a nursery size based on a
845 * function of the amount of live data (by default a factor of 2)
846 * Use the blocks from the old nursery if possible, freeing up any
849 * If we get near the maximum heap size, then adjust our nursery
850 * size accordingly. If the nursery is the same size as the live
851 * data (L), then we need 3L bytes. We can reduce the size of the
852 * nursery to bring the required memory down near 2L bytes.
854 * A normal 2-space collector would need 4L bytes to give the same
855 * performance we get from 3L bytes, reducing to the same
856 * performance at 2L bytes.
858 blocks = g0s0->n_to_blocks;
860 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
861 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
862 RtsFlags.GcFlags.maxHeapSize ) {
863 long adjusted_blocks; // signed on purpose
866 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
867 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
868 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
869 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
872 blocks = adjusted_blocks;
875 blocks *= RtsFlags.GcFlags.oldGenFactor;
876 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
877 blocks = RtsFlags.GcFlags.minAllocAreaSize;
880 resizeNursery(blocks);
883 /* Generational collector:
884 * If the user has given us a suggested heap size, adjust our
885 * allocation area to make best use of the memory available.
888 if (RtsFlags.GcFlags.heapSizeSuggestion) {
890 nat needed = calcNeeded(); // approx blocks needed at next GC
892 /* Guess how much will be live in generation 0 step 0 next time.
893 * A good approximation is obtained by finding the
894 * percentage of g0s0 that was live at the last minor GC.
897 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
900 /* Estimate a size for the allocation area based on the
901 * information available. We might end up going slightly under
902 * or over the suggested heap size, but we should be pretty
905 * Formula: suggested - needed
906 * ----------------------------
907 * 1 + g0s0_pcnt_kept/100
909 * where 'needed' is the amount of memory needed at the next
910 * collection for collecting all steps except g0s0.
913 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
914 (100 + (long)g0s0_pcnt_kept);
916 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
917 blocks = RtsFlags.GcFlags.minAllocAreaSize;
920 resizeNursery((nat)blocks);
924 // mark the garbage collected CAFs as dead
925 #if 0 && defined(DEBUG) // doesn't work at the moment
926 if (major_gc) { gcCAFs(); }
929 // zero the scavenged static object list
931 zero_static_object_list(scavenged_static_objects);
937 // start any pending finalizers
938 scheduleFinalizers(old_weak_ptr_list);
940 // send exceptions to any threads which were about to die
941 resurrectThreads(resurrected_threads);
943 // Update the stable pointer hash table.
944 updateStablePtrTable(major_gc);
946 // check sanity after GC
947 IF_DEBUG(sanity, checkSanity());
949 // extra GC trace info
950 IF_DEBUG(gc, statDescribeGens());
953 // symbol-table based profiling
954 /* heapCensus(to_blocks); */ /* ToDo */
957 // restore enclosing cost centre
963 // check for memory leaks if sanity checking is on
964 IF_DEBUG(sanity, memInventory());
966 #ifdef RTS_GTK_FRONTPANEL
967 if (RtsFlags.GcFlags.frontpanel) {
968 updateFrontPanelAfterGC( N, live );
972 // ok, GC over: tell the stats department what happened.
973 stat_endGC(allocated, collected, live, copied, N);
979 /* -----------------------------------------------------------------------------
982 traverse_weak_ptr_list is called possibly many times during garbage
983 collection. It returns a flag indicating whether it did any work
984 (i.e. called evacuate on any live pointers).
986 Invariant: traverse_weak_ptr_list is called when the heap is in an
987 idempotent state. That means that there are no pending
988 evacuate/scavenge operations. This invariant helps the weak
989 pointer code decide which weak pointers are dead - if there are no
990 new live weak pointers, then all the currently unreachable ones are
993 For generational GC: we just don't try to finalize weak pointers in
994 older generations than the one we're collecting. This could
995 probably be optimised by keeping per-generation lists of weak
996 pointers, but for a few weak pointers this scheme will work.
997 -------------------------------------------------------------------------- */
1000 traverse_weak_ptr_list(void)
1002 StgWeak *w, **last_w, *next_w;
1004 rtsBool flag = rtsFalse;
1006 if (weak_done) { return rtsFalse; }
1008 /* doesn't matter where we evacuate values/finalizers to, since
1009 * these pointers are treated as roots (iff the keys are alive).
1013 last_w = &old_weak_ptr_list;
1014 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1016 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1017 * called on a live weak pointer object. Just remove it.
1019 if (w->header.info == &stg_DEAD_WEAK_info) {
1020 next_w = ((StgDeadWeak *)w)->link;
1025 ASSERT(get_itbl(w)->type == WEAK);
1027 /* Now, check whether the key is reachable.
1029 new = isAlive(w->key);
1032 // evacuate the value and finalizer
1033 w->value = evacuate(w->value);
1034 w->finalizer = evacuate(w->finalizer);
1035 // remove this weak ptr from the old_weak_ptr list
1037 // and put it on the new weak ptr list
1039 w->link = weak_ptr_list;
1042 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1046 last_w = &(w->link);
1052 /* Now deal with the all_threads list, which behaves somewhat like
1053 * the weak ptr list. If we discover any threads that are about to
1054 * become garbage, we wake them up and administer an exception.
1057 StgTSO *t, *tmp, *next, **prev;
1059 prev = &old_all_threads;
1060 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1062 (StgClosure *)tmp = isAlive((StgClosure *)t);
1068 ASSERT(get_itbl(t)->type == TSO);
1069 switch (t->what_next) {
1070 case ThreadRelocated:
1075 case ThreadComplete:
1076 // finshed or died. The thread might still be alive, but we
1077 // don't keep it on the all_threads list. Don't forget to
1078 // stub out its global_link field.
1079 next = t->global_link;
1080 t->global_link = END_TSO_QUEUE;
1088 // not alive (yet): leave this thread on the old_all_threads list.
1089 prev = &(t->global_link);
1090 next = t->global_link;
1093 // alive: move this thread onto the all_threads list.
1094 next = t->global_link;
1095 t->global_link = all_threads;
1102 /* If we didn't make any changes, then we can go round and kill all
1103 * the dead weak pointers. The old_weak_ptr list is used as a list
1104 * of pending finalizers later on.
1106 if (flag == rtsFalse) {
1107 for (w = old_weak_ptr_list; w; w = w->link) {
1108 w->finalizer = evacuate(w->finalizer);
1111 /* And resurrect any threads which were about to become garbage.
1114 StgTSO *t, *tmp, *next;
1115 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1116 next = t->global_link;
1117 (StgClosure *)tmp = evacuate((StgClosure *)t);
1118 tmp->global_link = resurrected_threads;
1119 resurrected_threads = tmp;
1123 weak_done = rtsTrue;
1129 /* -----------------------------------------------------------------------------
1130 After GC, the live weak pointer list may have forwarding pointers
1131 on it, because a weak pointer object was evacuated after being
1132 moved to the live weak pointer list. We remove those forwarding
1135 Also, we don't consider weak pointer objects to be reachable, but
1136 we must nevertheless consider them to be "live" and retain them.
1137 Therefore any weak pointer objects which haven't as yet been
1138 evacuated need to be evacuated now.
1139 -------------------------------------------------------------------------- */
1143 mark_weak_ptr_list ( StgWeak **list )
1145 StgWeak *w, **last_w;
1148 for (w = *list; w; w = w->link) {
1149 (StgClosure *)w = evacuate((StgClosure *)w);
1151 last_w = &(w->link);
1155 /* -----------------------------------------------------------------------------
1156 isAlive determines whether the given closure is still alive (after
1157 a garbage collection) or not. It returns the new address of the
1158 closure if it is alive, or NULL otherwise.
1160 NOTE: Use it before compaction only!
1161 -------------------------------------------------------------------------- */
1165 isAlive(StgClosure *p)
1167 const StgInfoTable *info;
1174 /* ToDo: for static closures, check the static link field.
1175 * Problem here is that we sometimes don't set the link field, eg.
1176 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1181 // ignore closures in generations that we're not collecting.
1182 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1185 // large objects have an evacuated flag
1186 if (bd->flags & BF_LARGE) {
1187 if (bd->flags & BF_EVACUATED) {
1193 // check the mark bit for compacted steps
1194 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1198 switch (info->type) {
1203 case IND_OLDGEN: // rely on compatible layout with StgInd
1204 case IND_OLDGEN_PERM:
1205 // follow indirections
1206 p = ((StgInd *)p)->indirectee;
1211 return ((StgEvacuated *)p)->evacuee;
1214 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1215 p = (StgClosure *)((StgTSO *)p)->link;
1227 mark_root(StgClosure **root)
1229 *root = evacuate(*root);
1235 bdescr *bd = allocBlock();
1236 bd->gen_no = stp->gen_no;
1239 if (stp->gen_no <= N) {
1240 bd->flags = BF_EVACUATED;
1245 stp->hp_bd->free = stp->hp;
1246 stp->hp_bd->link = bd;
1247 stp->hp = bd->start;
1248 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1255 static __inline__ void
1256 upd_evacuee(StgClosure *p, StgClosure *dest)
1258 p->header.info = &stg_EVACUATED_info;
1259 ((StgEvacuated *)p)->evacuee = dest;
1263 static __inline__ StgClosure *
1264 copy(StgClosure *src, nat size, step *stp)
1268 TICK_GC_WORDS_COPIED(size);
1269 /* Find out where we're going, using the handy "to" pointer in
1270 * the step of the source object. If it turns out we need to
1271 * evacuate to an older generation, adjust it here (see comment
1274 if (stp->gen_no < evac_gen) {
1275 #ifdef NO_EAGER_PROMOTION
1276 failed_to_evac = rtsTrue;
1278 stp = &generations[evac_gen].steps[0];
1282 /* chain a new block onto the to-space for the destination step if
1285 if (stp->hp + size >= stp->hpLim) {
1289 for(to = stp->hp, from = (P_)src; size>0; --size) {
1295 upd_evacuee(src,(StgClosure *)dest);
1296 return (StgClosure *)dest;
1299 /* Special version of copy() for when we only want to copy the info
1300 * pointer of an object, but reserve some padding after it. This is
1301 * used to optimise evacuation of BLACKHOLEs.
1305 static __inline__ StgClosure *
1306 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1310 TICK_GC_WORDS_COPIED(size_to_copy);
1311 if (stp->gen_no < evac_gen) {
1312 #ifdef NO_EAGER_PROMOTION
1313 failed_to_evac = rtsTrue;
1315 stp = &generations[evac_gen].steps[0];
1319 if (stp->hp + size_to_reserve >= stp->hpLim) {
1323 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1328 stp->hp += size_to_reserve;
1329 upd_evacuee(src,(StgClosure *)dest);
1330 return (StgClosure *)dest;
1334 /* -----------------------------------------------------------------------------
1335 Evacuate a large object
1337 This just consists of removing the object from the (doubly-linked)
1338 large_alloc_list, and linking it on to the (singly-linked)
1339 new_large_objects list, from where it will be scavenged later.
1341 Convention: bd->flags has BF_EVACUATED set for a large object
1342 that has been evacuated, or unset otherwise.
1343 -------------------------------------------------------------------------- */
1347 evacuate_large(StgPtr p)
1349 bdescr *bd = Bdescr(p);
1352 // object must be at the beginning of the block (or be a ByteArray)
1353 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1354 (((W_)p & BLOCK_MASK) == 0));
1356 // already evacuated?
1357 if (bd->flags & BF_EVACUATED) {
1358 /* Don't forget to set the failed_to_evac flag if we didn't get
1359 * the desired destination (see comments in evacuate()).
1361 if (bd->gen_no < evac_gen) {
1362 failed_to_evac = rtsTrue;
1363 TICK_GC_FAILED_PROMOTION();
1369 // remove from large_object list
1371 bd->u.back->link = bd->link;
1372 } else { // first object in the list
1373 stp->large_objects = bd->link;
1376 bd->link->u.back = bd->u.back;
1379 /* link it on to the evacuated large object list of the destination step
1382 if (stp->gen_no < evac_gen) {
1383 #ifdef NO_EAGER_PROMOTION
1384 failed_to_evac = rtsTrue;
1386 stp = &generations[evac_gen].steps[0];
1391 bd->gen_no = stp->gen_no;
1392 bd->link = stp->new_large_objects;
1393 stp->new_large_objects = bd;
1394 bd->flags |= BF_EVACUATED;
1397 /* -----------------------------------------------------------------------------
1398 Adding a MUT_CONS to an older generation.
1400 This is necessary from time to time when we end up with an
1401 old-to-new generation pointer in a non-mutable object. We defer
1402 the promotion until the next GC.
1403 -------------------------------------------------------------------------- */
1407 mkMutCons(StgClosure *ptr, generation *gen)
1412 stp = &gen->steps[0];
1414 /* chain a new block onto the to-space for the destination step if
1417 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1421 q = (StgMutVar *)stp->hp;
1422 stp->hp += sizeofW(StgMutVar);
1424 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1426 recordOldToNewPtrs((StgMutClosure *)q);
1428 return (StgClosure *)q;
1431 /* -----------------------------------------------------------------------------
1434 This is called (eventually) for every live object in the system.
1436 The caller to evacuate specifies a desired generation in the
1437 evac_gen global variable. The following conditions apply to
1438 evacuating an object which resides in generation M when we're
1439 collecting up to generation N
1443 else evac to step->to
1445 if M < evac_gen evac to evac_gen, step 0
1447 if the object is already evacuated, then we check which generation
1450 if M >= evac_gen do nothing
1451 if M < evac_gen set failed_to_evac flag to indicate that we
1452 didn't manage to evacuate this object into evac_gen.
1454 -------------------------------------------------------------------------- */
1457 evacuate(StgClosure *q)
1462 const StgInfoTable *info;
1465 if (HEAP_ALLOCED(q)) {
1468 if (bd->gen_no > N) {
1469 /* Can't evacuate this object, because it's in a generation
1470 * older than the ones we're collecting. Let's hope that it's
1471 * in evac_gen or older, or we will have to arrange to track
1472 * this pointer using the mutable list.
1474 if (bd->gen_no < evac_gen) {
1476 failed_to_evac = rtsTrue;
1477 TICK_GC_FAILED_PROMOTION();
1482 /* evacuate large objects by re-linking them onto a different list.
1484 if (bd->flags & BF_LARGE) {
1486 if (info->type == TSO &&
1487 ((StgTSO *)q)->what_next == ThreadRelocated) {
1488 q = (StgClosure *)((StgTSO *)q)->link;
1491 evacuate_large((P_)q);
1495 /* If the object is in a step that we're compacting, then we
1496 * need to use an alternative evacuate procedure.
1498 if (bd->step->is_compacted) {
1499 if (!is_marked((P_)q,bd)) {
1501 if (mark_stack_full()) {
1502 mark_stack_overflowed = rtsTrue;
1505 push_mark_stack((P_)q);
1513 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1516 // make sure the info pointer is into text space
1517 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1518 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1521 switch (info -> type) {
1525 to = copy(q,sizeW_fromITBL(info),stp);
1530 StgWord w = (StgWord)q->payload[0];
1531 if (q->header.info == Czh_con_info &&
1532 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1533 (StgChar)w <= MAX_CHARLIKE) {
1534 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1536 if (q->header.info == Izh_con_info &&
1537 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1538 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1540 // else, fall through ...
1546 return copy(q,sizeofW(StgHeader)+1,stp);
1548 case THUNK_1_0: // here because of MIN_UPD_SIZE
1553 #ifdef NO_PROMOTE_THUNKS
1554 if (bd->gen_no == 0 &&
1555 bd->step->no != 0 &&
1556 bd->step->no == generations[bd->gen_no].n_steps-1) {
1560 return copy(q,sizeofW(StgHeader)+2,stp);
1568 return copy(q,sizeofW(StgHeader)+2,stp);
1574 case IND_OLDGEN_PERM:
1579 return copy(q,sizeW_fromITBL(info),stp);
1582 case SE_CAF_BLACKHOLE:
1585 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1588 to = copy(q,BLACKHOLE_sizeW(),stp);
1591 case THUNK_SELECTOR:
1593 const StgInfoTable* selectee_info;
1594 StgClosure* selectee = ((StgSelector*)q)->selectee;
1597 selectee_info = get_itbl(selectee);
1598 switch (selectee_info->type) {
1606 case CONSTR_NOCAF_STATIC:
1608 StgWord offset = info->layout.selector_offset;
1610 // check that the size is in range
1612 (StgWord32)(selectee_info->layout.payload.ptrs +
1613 selectee_info->layout.payload.nptrs));
1615 // perform the selection!
1616 q = selectee->payload[offset];
1618 /* if we're already in to-space, there's no need to continue
1619 * with the evacuation, just update the source address with
1620 * a pointer to the (evacuated) constructor field.
1622 if (HEAP_ALLOCED(q)) {
1623 bdescr *bd = Bdescr((P_)q);
1624 if (bd->flags & BF_EVACUATED) {
1625 if (bd->gen_no < evac_gen) {
1626 failed_to_evac = rtsTrue;
1627 TICK_GC_FAILED_PROMOTION();
1633 /* otherwise, carry on and evacuate this constructor field,
1634 * (but not the constructor itself)
1643 case IND_OLDGEN_PERM:
1644 selectee = ((StgInd *)selectee)->indirectee;
1648 selectee = ((StgEvacuated *)selectee)->evacuee;
1651 case THUNK_SELECTOR:
1653 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1654 something) to go into an infinite loop when the nightly
1655 stage2 compiles PrelTup.lhs. */
1657 /* we can't recurse indefinitely in evacuate(), so set a
1658 * limit on the number of times we can go around this
1661 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1663 bd = Bdescr((P_)selectee);
1664 if (!bd->flags & BF_EVACUATED) {
1665 thunk_selector_depth++;
1666 selectee = evacuate(selectee);
1667 thunk_selector_depth--;
1671 // otherwise, fall through...
1683 case SE_CAF_BLACKHOLE:
1687 // not evaluated yet
1691 // a copy of the top-level cases below
1692 case RBH: // cf. BLACKHOLE_BQ
1694 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1695 to = copy(q,BLACKHOLE_sizeW(),stp);
1696 //ToDo: derive size etc from reverted IP
1697 //to = copy(q,size,stp);
1698 // recordMutable((StgMutClosure *)to);
1703 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1704 to = copy(q,sizeofW(StgBlockedFetch),stp);
1711 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1712 to = copy(q,sizeofW(StgFetchMe),stp);
1716 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1717 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1722 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1723 (int)(selectee_info->type));
1726 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1730 // follow chains of indirections, don't evacuate them
1731 q = ((StgInd*)q)->indirectee;
1735 if (info->srt_len > 0 && major_gc &&
1736 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1737 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1738 static_objects = (StgClosure *)q;
1743 if (info->srt_len > 0 && major_gc &&
1744 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1745 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1746 static_objects = (StgClosure *)q;
1751 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1752 * on the CAF list, so don't do anything with it here (we'll
1753 * scavenge it later).
1756 && ((StgIndStatic *)q)->saved_info == NULL
1757 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1758 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1759 static_objects = (StgClosure *)q;
1764 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1765 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1766 static_objects = (StgClosure *)q;
1770 case CONSTR_INTLIKE:
1771 case CONSTR_CHARLIKE:
1772 case CONSTR_NOCAF_STATIC:
1773 /* no need to put these on the static linked list, they don't need
1788 // shouldn't see these
1789 barf("evacuate: stack frame at %p\n", q);
1793 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1794 * of stack, tagging and all.
1796 return copy(q,pap_sizeW((StgPAP*)q),stp);
1799 /* Already evacuated, just return the forwarding address.
1800 * HOWEVER: if the requested destination generation (evac_gen) is
1801 * older than the actual generation (because the object was
1802 * already evacuated to a younger generation) then we have to
1803 * set the failed_to_evac flag to indicate that we couldn't
1804 * manage to promote the object to the desired generation.
1806 if (evac_gen > 0) { // optimisation
1807 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1808 if (Bdescr((P_)p)->gen_no < evac_gen) {
1809 failed_to_evac = rtsTrue;
1810 TICK_GC_FAILED_PROMOTION();
1813 return ((StgEvacuated*)q)->evacuee;
1816 // just copy the block
1817 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1820 case MUT_ARR_PTRS_FROZEN:
1821 // just copy the block
1822 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1826 StgTSO *tso = (StgTSO *)q;
1828 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1830 if (tso->what_next == ThreadRelocated) {
1831 q = (StgClosure *)tso->link;
1835 /* To evacuate a small TSO, we need to relocate the update frame
1839 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1840 move_TSO(tso, new_tso);
1841 return (StgClosure *)new_tso;
1846 case RBH: // cf. BLACKHOLE_BQ
1848 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1849 to = copy(q,BLACKHOLE_sizeW(),stp);
1850 //ToDo: derive size etc from reverted IP
1851 //to = copy(q,size,stp);
1853 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1854 q, info_type(q), to, info_type(to)));
1859 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1860 to = copy(q,sizeofW(StgBlockedFetch),stp);
1862 belch("@@ evacuate: %p (%s) to %p (%s)",
1863 q, info_type(q), to, info_type(to)));
1870 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1871 to = copy(q,sizeofW(StgFetchMe),stp);
1873 belch("@@ evacuate: %p (%s) to %p (%s)",
1874 q, info_type(q), to, info_type(to)));
1878 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1879 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1881 belch("@@ evacuate: %p (%s) to %p (%s)",
1882 q, info_type(q), to, info_type(to)));
1887 barf("evacuate: strange closure type %d", (int)(info->type));
1893 /* -----------------------------------------------------------------------------
1894 move_TSO is called to update the TSO structure after it has been
1895 moved from one place to another.
1896 -------------------------------------------------------------------------- */
1899 move_TSO(StgTSO *src, StgTSO *dest)
1903 // relocate the stack pointers...
1904 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1905 dest->sp = (StgPtr)dest->sp + diff;
1906 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1908 relocate_stack(dest, diff);
1911 /* -----------------------------------------------------------------------------
1912 relocate_stack is called to update the linkage between
1913 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1915 -------------------------------------------------------------------------- */
1918 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1926 while ((P_)su < dest->stack + dest->stack_size) {
1927 switch (get_itbl(su)->type) {
1929 // GCC actually manages to common up these three cases!
1932 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1937 cf = (StgCatchFrame *)su;
1938 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1943 sf = (StgSeqFrame *)su;
1944 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1953 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1964 scavenge_srt(const StgInfoTable *info)
1966 StgClosure **srt, **srt_end;
1968 /* evacuate the SRT. If srt_len is zero, then there isn't an
1969 * srt field in the info table. That's ok, because we'll
1970 * never dereference it.
1972 srt = (StgClosure **)(info->srt);
1973 srt_end = srt + info->srt_len;
1974 for (; srt < srt_end; srt++) {
1975 /* Special-case to handle references to closures hiding out in DLLs, since
1976 double indirections required to get at those. The code generator knows
1977 which is which when generating the SRT, so it stores the (indirect)
1978 reference to the DLL closure in the table by first adding one to it.
1979 We check for this here, and undo the addition before evacuating it.
1981 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1982 closure that's fixed at link-time, and no extra magic is required.
1984 #ifdef ENABLE_WIN32_DLL_SUPPORT
1985 if ( (unsigned long)(*srt) & 0x1 ) {
1986 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1996 /* -----------------------------------------------------------------------------
1998 -------------------------------------------------------------------------- */
2001 scavengeTSO (StgTSO *tso)
2003 // chase the link field for any TSOs on the same queue
2004 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2005 if ( tso->why_blocked == BlockedOnMVar
2006 || tso->why_blocked == BlockedOnBlackHole
2007 || tso->why_blocked == BlockedOnException
2009 || tso->why_blocked == BlockedOnGA
2010 || tso->why_blocked == BlockedOnGA_NoSend
2013 tso->block_info.closure = evacuate(tso->block_info.closure);
2015 if ( tso->blocked_exceptions != NULL ) {
2016 tso->blocked_exceptions =
2017 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2019 // scavenge this thread's stack
2020 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2023 /* -----------------------------------------------------------------------------
2024 Scavenge a given step until there are no more objects in this step
2027 evac_gen is set by the caller to be either zero (for a step in a
2028 generation < N) or G where G is the generation of the step being
2031 We sometimes temporarily change evac_gen back to zero if we're
2032 scavenging a mutable object where early promotion isn't such a good
2034 -------------------------------------------------------------------------- */
2042 nat saved_evac_gen = evac_gen;
2047 failed_to_evac = rtsFalse;
2049 /* scavenge phase - standard breadth-first scavenging of the
2053 while (bd != stp->hp_bd || p < stp->hp) {
2055 // If we're at the end of this block, move on to the next block
2056 if (bd != stp->hp_bd && p == bd->free) {
2062 info = get_itbl((StgClosure *)p);
2063 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2066 switch (info->type) {
2069 /* treat MVars specially, because we don't want to evacuate the
2070 * mut_link field in the middle of the closure.
2073 StgMVar *mvar = ((StgMVar *)p);
2075 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2076 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2077 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2078 evac_gen = saved_evac_gen;
2079 recordMutable((StgMutClosure *)mvar);
2080 failed_to_evac = rtsFalse; // mutable.
2081 p += sizeofW(StgMVar);
2089 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2090 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2091 p += sizeofW(StgHeader) + 2;
2096 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2097 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2103 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2104 p += sizeofW(StgHeader) + 1;
2109 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2115 p += sizeofW(StgHeader) + 1;
2122 p += sizeofW(StgHeader) + 2;
2129 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2130 p += sizeofW(StgHeader) + 2;
2146 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2147 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2148 (StgClosure *)*p = evacuate((StgClosure *)*p);
2150 p += info->layout.payload.nptrs;
2155 if (stp->gen_no != 0) {
2156 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2159 case IND_OLDGEN_PERM:
2160 ((StgIndOldGen *)p)->indirectee =
2161 evacuate(((StgIndOldGen *)p)->indirectee);
2162 if (failed_to_evac) {
2163 failed_to_evac = rtsFalse;
2164 recordOldToNewPtrs((StgMutClosure *)p);
2166 p += sizeofW(StgIndOldGen);
2171 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2172 evac_gen = saved_evac_gen;
2173 recordMutable((StgMutClosure *)p);
2174 failed_to_evac = rtsFalse; // mutable anyhow
2175 p += sizeofW(StgMutVar);
2180 failed_to_evac = rtsFalse; // mutable anyhow
2181 p += sizeofW(StgMutVar);
2185 case SE_CAF_BLACKHOLE:
2188 p += BLACKHOLE_sizeW();
2193 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2194 (StgClosure *)bh->blocking_queue =
2195 evacuate((StgClosure *)bh->blocking_queue);
2196 recordMutable((StgMutClosure *)bh);
2197 failed_to_evac = rtsFalse;
2198 p += BLACKHOLE_sizeW();
2202 case THUNK_SELECTOR:
2204 StgSelector *s = (StgSelector *)p;
2205 s->selectee = evacuate(s->selectee);
2206 p += THUNK_SELECTOR_sizeW();
2210 case AP_UPD: // same as PAPs
2212 /* Treat a PAP just like a section of stack, not forgetting to
2213 * evacuate the function pointer too...
2216 StgPAP* pap = (StgPAP *)p;
2218 pap->fun = evacuate(pap->fun);
2219 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2220 p += pap_sizeW(pap);
2225 // nothing to follow
2226 p += arr_words_sizeW((StgArrWords *)p);
2230 // follow everything
2234 evac_gen = 0; // repeatedly mutable
2235 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2236 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2237 (StgClosure *)*p = evacuate((StgClosure *)*p);
2239 evac_gen = saved_evac_gen;
2240 recordMutable((StgMutClosure *)q);
2241 failed_to_evac = rtsFalse; // mutable anyhow.
2245 case MUT_ARR_PTRS_FROZEN:
2246 // follow everything
2250 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2251 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2252 (StgClosure *)*p = evacuate((StgClosure *)*p);
2254 // it's tempting to recordMutable() if failed_to_evac is
2255 // false, but that breaks some assumptions (eg. every
2256 // closure on the mutable list is supposed to have the MUT
2257 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2263 StgTSO *tso = (StgTSO *)p;
2266 evac_gen = saved_evac_gen;
2267 recordMutable((StgMutClosure *)tso);
2268 failed_to_evac = rtsFalse; // mutable anyhow.
2269 p += tso_sizeW(tso);
2274 case RBH: // cf. BLACKHOLE_BQ
2277 nat size, ptrs, nonptrs, vhs;
2279 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2281 StgRBH *rbh = (StgRBH *)p;
2282 (StgClosure *)rbh->blocking_queue =
2283 evacuate((StgClosure *)rbh->blocking_queue);
2284 recordMutable((StgMutClosure *)to);
2285 failed_to_evac = rtsFalse; // mutable anyhow.
2287 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2288 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2289 // ToDo: use size of reverted closure here!
2290 p += BLACKHOLE_sizeW();
2296 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2297 // follow the pointer to the node which is being demanded
2298 (StgClosure *)bf->node =
2299 evacuate((StgClosure *)bf->node);
2300 // follow the link to the rest of the blocking queue
2301 (StgClosure *)bf->link =
2302 evacuate((StgClosure *)bf->link);
2303 if (failed_to_evac) {
2304 failed_to_evac = rtsFalse;
2305 recordMutable((StgMutClosure *)bf);
2308 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2309 bf, info_type((StgClosure *)bf),
2310 bf->node, info_type(bf->node)));
2311 p += sizeofW(StgBlockedFetch);
2319 p += sizeofW(StgFetchMe);
2320 break; // nothing to do in this case
2322 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2324 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2325 (StgClosure *)fmbq->blocking_queue =
2326 evacuate((StgClosure *)fmbq->blocking_queue);
2327 if (failed_to_evac) {
2328 failed_to_evac = rtsFalse;
2329 recordMutable((StgMutClosure *)fmbq);
2332 belch("@@ scavenge: %p (%s) exciting, isn't it",
2333 p, info_type((StgClosure *)p)));
2334 p += sizeofW(StgFetchMeBlockingQueue);
2340 barf("scavenge: unimplemented/strange closure type %d @ %p",
2344 /* If we didn't manage to promote all the objects pointed to by
2345 * the current object, then we have to designate this object as
2346 * mutable (because it contains old-to-new generation pointers).
2348 if (failed_to_evac) {
2349 failed_to_evac = rtsFalse;
2350 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2358 /* -----------------------------------------------------------------------------
2359 Scavenge everything on the mark stack.
2361 This is slightly different from scavenge():
2362 - we don't walk linearly through the objects, so the scavenger
2363 doesn't need to advance the pointer on to the next object.
2364 -------------------------------------------------------------------------- */
2367 scavenge_mark_stack(void)
2373 evac_gen = oldest_gen->no;
2374 saved_evac_gen = evac_gen;
2377 while (!mark_stack_empty()) {
2378 p = pop_mark_stack();
2380 info = get_itbl((StgClosure *)p);
2381 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2384 switch (info->type) {
2387 /* treat MVars specially, because we don't want to evacuate the
2388 * mut_link field in the middle of the closure.
2391 StgMVar *mvar = ((StgMVar *)p);
2393 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2394 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2395 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2396 evac_gen = saved_evac_gen;
2397 failed_to_evac = rtsFalse; // mutable.
2405 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2406 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2416 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2441 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2442 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2443 (StgClosure *)*p = evacuate((StgClosure *)*p);
2449 // don't need to do anything here: the only possible case
2450 // is that we're in a 1-space compacting collector, with
2451 // no "old" generation.
2455 case IND_OLDGEN_PERM:
2456 ((StgIndOldGen *)p)->indirectee =
2457 evacuate(((StgIndOldGen *)p)->indirectee);
2458 if (failed_to_evac) {
2459 recordOldToNewPtrs((StgMutClosure *)p);
2461 failed_to_evac = rtsFalse;
2466 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2467 evac_gen = saved_evac_gen;
2468 failed_to_evac = rtsFalse;
2473 failed_to_evac = rtsFalse;
2477 case SE_CAF_BLACKHOLE:
2485 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2486 (StgClosure *)bh->blocking_queue =
2487 evacuate((StgClosure *)bh->blocking_queue);
2488 failed_to_evac = rtsFalse;
2492 case THUNK_SELECTOR:
2494 StgSelector *s = (StgSelector *)p;
2495 s->selectee = evacuate(s->selectee);
2499 case AP_UPD: // same as PAPs
2501 /* Treat a PAP just like a section of stack, not forgetting to
2502 * evacuate the function pointer too...
2505 StgPAP* pap = (StgPAP *)p;
2507 pap->fun = evacuate(pap->fun);
2508 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2513 // follow everything
2517 evac_gen = 0; // repeatedly mutable
2518 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2519 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2520 (StgClosure *)*p = evacuate((StgClosure *)*p);
2522 evac_gen = saved_evac_gen;
2523 failed_to_evac = rtsFalse; // mutable anyhow.
2527 case MUT_ARR_PTRS_FROZEN:
2528 // follow everything
2532 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2533 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2534 (StgClosure *)*p = evacuate((StgClosure *)*p);
2541 StgTSO *tso = (StgTSO *)p;
2544 evac_gen = saved_evac_gen;
2545 failed_to_evac = rtsFalse;
2550 case RBH: // cf. BLACKHOLE_BQ
2553 nat size, ptrs, nonptrs, vhs;
2555 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2557 StgRBH *rbh = (StgRBH *)p;
2558 (StgClosure *)rbh->blocking_queue =
2559 evacuate((StgClosure *)rbh->blocking_queue);
2560 recordMutable((StgMutClosure *)rbh);
2561 failed_to_evac = rtsFalse; // mutable anyhow.
2563 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2564 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2570 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2571 // follow the pointer to the node which is being demanded
2572 (StgClosure *)bf->node =
2573 evacuate((StgClosure *)bf->node);
2574 // follow the link to the rest of the blocking queue
2575 (StgClosure *)bf->link =
2576 evacuate((StgClosure *)bf->link);
2577 if (failed_to_evac) {
2578 failed_to_evac = rtsFalse;
2579 recordMutable((StgMutClosure *)bf);
2582 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2583 bf, info_type((StgClosure *)bf),
2584 bf->node, info_type(bf->node)));
2592 break; // nothing to do in this case
2594 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2596 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2597 (StgClosure *)fmbq->blocking_queue =
2598 evacuate((StgClosure *)fmbq->blocking_queue);
2599 if (failed_to_evac) {
2600 failed_to_evac = rtsFalse;
2601 recordMutable((StgMutClosure *)fmbq);
2604 belch("@@ scavenge: %p (%s) exciting, isn't it",
2605 p, info_type((StgClosure *)p)));
2611 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2615 if (failed_to_evac) {
2616 failed_to_evac = rtsFalse;
2617 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2620 // mark the next bit to indicate "scavenged"
2621 mark(q+1, Bdescr(q));
2623 } // while (!mark_stack_empty())
2625 // start a new linear scan if the mark stack overflowed at some point
2626 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2627 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2628 mark_stack_overflowed = rtsFalse;
2629 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2630 oldgen_scan = oldgen_scan_bd->start;
2633 if (oldgen_scan_bd) {
2634 // push a new thing on the mark stack
2636 // find a closure that is marked but not scavenged, and start
2638 while (oldgen_scan < oldgen_scan_bd->free
2639 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2643 if (oldgen_scan < oldgen_scan_bd->free) {
2645 // already scavenged?
2646 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2647 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2650 push_mark_stack(oldgen_scan);
2651 // ToDo: bump the linear scan by the actual size of the object
2652 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2656 oldgen_scan_bd = oldgen_scan_bd->link;
2657 if (oldgen_scan_bd != NULL) {
2658 oldgen_scan = oldgen_scan_bd->start;
2664 /* -----------------------------------------------------------------------------
2665 Scavenge one object.
2667 This is used for objects that are temporarily marked as mutable
2668 because they contain old-to-new generation pointers. Only certain
2669 objects can have this property.
2670 -------------------------------------------------------------------------- */
2673 scavenge_one(StgPtr p)
2675 const StgInfoTable *info;
2676 nat saved_evac_gen = evac_gen;
2679 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2680 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2682 info = get_itbl((StgClosure *)p);
2684 switch (info->type) {
2687 case FUN_1_0: // hardly worth specialising these guys
2707 case IND_OLDGEN_PERM:
2711 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2712 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2713 (StgClosure *)*q = evacuate((StgClosure *)*q);
2719 case SE_CAF_BLACKHOLE:
2724 case THUNK_SELECTOR:
2726 StgSelector *s = (StgSelector *)p;
2727 s->selectee = evacuate(s->selectee);
2732 // nothing to follow
2737 // follow everything
2740 evac_gen = 0; // repeatedly mutable
2741 recordMutable((StgMutClosure *)p);
2742 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2743 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2744 (StgClosure *)*p = evacuate((StgClosure *)*p);
2746 evac_gen = saved_evac_gen;
2747 failed_to_evac = rtsFalse;
2751 case MUT_ARR_PTRS_FROZEN:
2753 // follow everything
2756 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2757 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2758 (StgClosure *)*p = evacuate((StgClosure *)*p);
2765 StgTSO *tso = (StgTSO *)p;
2767 evac_gen = 0; // repeatedly mutable
2769 recordMutable((StgMutClosure *)tso);
2770 evac_gen = saved_evac_gen;
2771 failed_to_evac = rtsFalse;
2778 StgPAP* pap = (StgPAP *)p;
2779 pap->fun = evacuate(pap->fun);
2780 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2785 // This might happen if for instance a MUT_CONS was pointing to a
2786 // THUNK which has since been updated. The IND_OLDGEN will
2787 // be on the mutable list anyway, so we don't need to do anything
2792 barf("scavenge_one: strange object %d", (int)(info->type));
2795 no_luck = failed_to_evac;
2796 failed_to_evac = rtsFalse;
2800 /* -----------------------------------------------------------------------------
2801 Scavenging mutable lists.
2803 We treat the mutable list of each generation > N (i.e. all the
2804 generations older than the one being collected) as roots. We also
2805 remove non-mutable objects from the mutable list at this point.
2806 -------------------------------------------------------------------------- */
2809 scavenge_mut_once_list(generation *gen)
2811 const StgInfoTable *info;
2812 StgMutClosure *p, *next, *new_list;
2814 p = gen->mut_once_list;
2815 new_list = END_MUT_LIST;
2819 failed_to_evac = rtsFalse;
2821 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2823 // make sure the info pointer is into text space
2824 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2825 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2829 if (info->type==RBH)
2830 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2832 switch(info->type) {
2835 case IND_OLDGEN_PERM:
2837 /* Try to pull the indirectee into this generation, so we can
2838 * remove the indirection from the mutable list.
2840 ((StgIndOldGen *)p)->indirectee =
2841 evacuate(((StgIndOldGen *)p)->indirectee);
2843 #if 0 && defined(DEBUG)
2844 if (RtsFlags.DebugFlags.gc)
2845 /* Debugging code to print out the size of the thing we just
2849 StgPtr start = gen->steps[0].scan;
2850 bdescr *start_bd = gen->steps[0].scan_bd;
2852 scavenge(&gen->steps[0]);
2853 if (start_bd != gen->steps[0].scan_bd) {
2854 size += (P_)BLOCK_ROUND_UP(start) - start;
2855 start_bd = start_bd->link;
2856 while (start_bd != gen->steps[0].scan_bd) {
2857 size += BLOCK_SIZE_W;
2858 start_bd = start_bd->link;
2860 size += gen->steps[0].scan -
2861 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2863 size = gen->steps[0].scan - start;
2865 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2869 /* failed_to_evac might happen if we've got more than two
2870 * generations, we're collecting only generation 0, the
2871 * indirection resides in generation 2 and the indirectee is
2874 if (failed_to_evac) {
2875 failed_to_evac = rtsFalse;
2876 p->mut_link = new_list;
2879 /* the mut_link field of an IND_STATIC is overloaded as the
2880 * static link field too (it just so happens that we don't need
2881 * both at the same time), so we need to NULL it out when
2882 * removing this object from the mutable list because the static
2883 * link fields are all assumed to be NULL before doing a major
2891 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2892 * it from the mutable list if possible by promoting whatever it
2895 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2896 /* didn't manage to promote everything, so put the
2897 * MUT_CONS back on the list.
2899 p->mut_link = new_list;
2905 // shouldn't have anything else on the mutables list
2906 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2910 gen->mut_once_list = new_list;
2915 scavenge_mutable_list(generation *gen)
2917 const StgInfoTable *info;
2918 StgMutClosure *p, *next;
2920 p = gen->saved_mut_list;
2924 failed_to_evac = rtsFalse;
2926 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2928 // make sure the info pointer is into text space
2929 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2930 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2934 if (info->type==RBH)
2935 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2937 switch(info->type) {
2940 // follow everything
2941 p->mut_link = gen->mut_list;
2946 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2947 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2948 (StgClosure *)*q = evacuate((StgClosure *)*q);
2953 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2954 case MUT_ARR_PTRS_FROZEN:
2959 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2960 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2961 (StgClosure *)*q = evacuate((StgClosure *)*q);
2965 if (failed_to_evac) {
2966 failed_to_evac = rtsFalse;
2967 mkMutCons((StgClosure *)p, gen);
2973 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2974 p->mut_link = gen->mut_list;
2980 StgMVar *mvar = (StgMVar *)p;
2981 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2982 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2983 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2984 p->mut_link = gen->mut_list;
2991 StgTSO *tso = (StgTSO *)p;
2995 /* Don't take this TSO off the mutable list - it might still
2996 * point to some younger objects (because we set evac_gen to 0
2999 tso->mut_link = gen->mut_list;
3000 gen->mut_list = (StgMutClosure *)tso;
3006 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3007 (StgClosure *)bh->blocking_queue =
3008 evacuate((StgClosure *)bh->blocking_queue);
3009 p->mut_link = gen->mut_list;
3014 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3017 case IND_OLDGEN_PERM:
3018 /* Try to pull the indirectee into this generation, so we can
3019 * remove the indirection from the mutable list.
3022 ((StgIndOldGen *)p)->indirectee =
3023 evacuate(((StgIndOldGen *)p)->indirectee);
3026 if (failed_to_evac) {
3027 failed_to_evac = rtsFalse;
3028 p->mut_link = gen->mut_once_list;
3029 gen->mut_once_list = p;
3036 // HWL: check whether all of these are necessary
3038 case RBH: // cf. BLACKHOLE_BQ
3040 // nat size, ptrs, nonptrs, vhs;
3042 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3043 StgRBH *rbh = (StgRBH *)p;
3044 (StgClosure *)rbh->blocking_queue =
3045 evacuate((StgClosure *)rbh->blocking_queue);
3046 if (failed_to_evac) {
3047 failed_to_evac = rtsFalse;
3048 recordMutable((StgMutClosure *)rbh);
3050 // ToDo: use size of reverted closure here!
3051 p += BLACKHOLE_sizeW();
3057 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3058 // follow the pointer to the node which is being demanded
3059 (StgClosure *)bf->node =
3060 evacuate((StgClosure *)bf->node);
3061 // follow the link to the rest of the blocking queue
3062 (StgClosure *)bf->link =
3063 evacuate((StgClosure *)bf->link);
3064 if (failed_to_evac) {
3065 failed_to_evac = rtsFalse;
3066 recordMutable((StgMutClosure *)bf);
3068 p += sizeofW(StgBlockedFetch);
3074 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3077 p += sizeofW(StgFetchMe);
3078 break; // nothing to do in this case
3080 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3082 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3083 (StgClosure *)fmbq->blocking_queue =
3084 evacuate((StgClosure *)fmbq->blocking_queue);
3085 if (failed_to_evac) {
3086 failed_to_evac = rtsFalse;
3087 recordMutable((StgMutClosure *)fmbq);
3089 p += sizeofW(StgFetchMeBlockingQueue);
3095 // shouldn't have anything else on the mutables list
3096 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3103 scavenge_static(void)
3105 StgClosure* p = static_objects;
3106 const StgInfoTable *info;
3108 /* Always evacuate straight to the oldest generation for static
3110 evac_gen = oldest_gen->no;
3112 /* keep going until we've scavenged all the objects on the linked
3114 while (p != END_OF_STATIC_LIST) {
3118 if (info->type==RBH)
3119 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3121 // make sure the info pointer is into text space
3122 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3123 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3125 /* Take this object *off* the static_objects list,
3126 * and put it on the scavenged_static_objects list.
3128 static_objects = STATIC_LINK(info,p);
3129 STATIC_LINK(info,p) = scavenged_static_objects;
3130 scavenged_static_objects = p;
3132 switch (info -> type) {
3136 StgInd *ind = (StgInd *)p;
3137 ind->indirectee = evacuate(ind->indirectee);
3139 /* might fail to evacuate it, in which case we have to pop it
3140 * back on the mutable list (and take it off the
3141 * scavenged_static list because the static link and mut link
3142 * pointers are one and the same).
3144 if (failed_to_evac) {
3145 failed_to_evac = rtsFalse;
3146 scavenged_static_objects = IND_STATIC_LINK(p);
3147 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3148 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3162 next = (P_)p->payload + info->layout.payload.ptrs;
3163 // evacuate the pointers
3164 for (q = (P_)p->payload; q < next; q++) {
3165 (StgClosure *)*q = evacuate((StgClosure *)*q);
3171 barf("scavenge_static: strange closure %d", (int)(info->type));
3174 ASSERT(failed_to_evac == rtsFalse);
3176 /* get the next static object from the list. Remember, there might
3177 * be more stuff on this list now that we've done some evacuating!
3178 * (static_objects is a global)
3184 /* -----------------------------------------------------------------------------
3185 scavenge_stack walks over a section of stack and evacuates all the
3186 objects pointed to by it. We can use the same code for walking
3187 PAPs, since these are just sections of copied stack.
3188 -------------------------------------------------------------------------- */
3191 scavenge_stack(StgPtr p, StgPtr stack_end)
3194 const StgInfoTable* info;
3197 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3200 * Each time around this loop, we are looking at a chunk of stack
3201 * that starts with either a pending argument section or an
3202 * activation record.
3205 while (p < stack_end) {
3208 // If we've got a tag, skip over that many words on the stack
3209 if (IS_ARG_TAG((W_)q)) {
3214 /* Is q a pointer to a closure?
3216 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3218 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3219 ASSERT(closure_STATIC((StgClosure *)q));
3221 // otherwise, must be a pointer into the allocation space.
3224 (StgClosure *)*p = evacuate((StgClosure *)q);
3230 * Otherwise, q must be the info pointer of an activation
3231 * record. All activation records have 'bitmap' style layout
3234 info = get_itbl((StgClosure *)p);
3236 switch (info->type) {
3238 // Dynamic bitmap: the mask is stored on the stack
3240 bitmap = ((StgRetDyn *)p)->liveness;
3241 p = (P_)&((StgRetDyn *)p)->payload[0];
3244 // probably a slow-entry point return address:
3252 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3253 old_p, p, old_p+1));
3255 p++; // what if FHS!=1 !? -- HWL
3260 /* Specialised code for update frames, since they're so common.
3261 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3262 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3266 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3268 p += sizeofW(StgUpdateFrame);
3271 frame->updatee = evacuate(frame->updatee);
3273 #else // specialised code for update frames, not sure if it's worth it.
3275 nat type = get_itbl(frame->updatee)->type;
3277 if (type == EVACUATED) {
3278 frame->updatee = evacuate(frame->updatee);
3281 bdescr *bd = Bdescr((P_)frame->updatee);
3283 if (bd->gen_no > N) {
3284 if (bd->gen_no < evac_gen) {
3285 failed_to_evac = rtsTrue;
3290 // Don't promote blackholes
3292 if (!(stp->gen_no == 0 &&
3294 stp->no == stp->gen->n_steps-1)) {
3301 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3302 sizeofW(StgHeader), stp);
3303 frame->updatee = to;
3306 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3307 frame->updatee = to;
3308 recordMutable((StgMutClosure *)to);
3311 /* will never be SE_{,CAF_}BLACKHOLE, since we
3312 don't push an update frame for single-entry thunks. KSW 1999-01. */
3313 barf("scavenge_stack: UPDATE_FRAME updatee");
3319 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3326 bitmap = info->layout.bitmap;
3328 // this assumes that the payload starts immediately after the info-ptr
3330 while (bitmap != 0) {
3331 if ((bitmap & 1) == 0) {
3332 (StgClosure *)*p = evacuate((StgClosure *)*p);
3335 bitmap = bitmap >> 1;
3342 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3347 StgLargeBitmap *large_bitmap;
3350 large_bitmap = info->layout.large_bitmap;
3353 for (i=0; i<large_bitmap->size; i++) {
3354 bitmap = large_bitmap->bitmap[i];
3355 q = p + BITS_IN(W_);
3356 while (bitmap != 0) {
3357 if ((bitmap & 1) == 0) {
3358 (StgClosure *)*p = evacuate((StgClosure *)*p);
3361 bitmap = bitmap >> 1;
3363 if (i+1 < large_bitmap->size) {
3365 (StgClosure *)*p = evacuate((StgClosure *)*p);
3371 // and don't forget to follow the SRT
3376 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3381 /*-----------------------------------------------------------------------------
3382 scavenge the large object list.
3384 evac_gen set by caller; similar games played with evac_gen as with
3385 scavenge() - see comment at the top of scavenge(). Most large
3386 objects are (repeatedly) mutable, so most of the time evac_gen will
3388 --------------------------------------------------------------------------- */
3391 scavenge_large(step *stp)
3396 bd = stp->new_large_objects;
3398 for (; bd != NULL; bd = stp->new_large_objects) {
3400 /* take this object *off* the large objects list and put it on
3401 * the scavenged large objects list. This is so that we can
3402 * treat new_large_objects as a stack and push new objects on
3403 * the front when evacuating.
3405 stp->new_large_objects = bd->link;
3406 dbl_link_onto(bd, &stp->scavenged_large_objects);
3408 // update the block count in this step.
3409 stp->n_scavenged_large_blocks += bd->blocks;
3412 if (scavenge_one(p)) {
3413 mkMutCons((StgClosure *)p, stp->gen);
3418 /* -----------------------------------------------------------------------------
3419 Initialising the static object & mutable lists
3420 -------------------------------------------------------------------------- */
3423 zero_static_object_list(StgClosure* first_static)
3427 const StgInfoTable *info;
3429 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3431 link = STATIC_LINK(info, p);
3432 STATIC_LINK(info,p) = NULL;
3436 /* This function is only needed because we share the mutable link
3437 * field with the static link field in an IND_STATIC, so we have to
3438 * zero the mut_link field before doing a major GC, which needs the
3439 * static link field.
3441 * It doesn't do any harm to zero all the mutable link fields on the
3446 zero_mutable_list( StgMutClosure *first )
3448 StgMutClosure *next, *c;
3450 for (c = first; c != END_MUT_LIST; c = next) {
3456 /* -----------------------------------------------------------------------------
3458 -------------------------------------------------------------------------- */
3465 for (c = (StgIndStatic *)caf_list; c != NULL;
3466 c = (StgIndStatic *)c->static_link)
3468 c->header.info = c->saved_info;
3469 c->saved_info = NULL;
3470 // could, but not necessary: c->static_link = NULL;
3476 scavengeCAFs( void )
3481 for (c = (StgIndStatic *)caf_list; c != NULL;
3482 c = (StgIndStatic *)c->static_link)
3484 c->indirectee = evacuate(c->indirectee);
3488 /* -----------------------------------------------------------------------------
3489 Sanity code for CAF garbage collection.
3491 With DEBUG turned on, we manage a CAF list in addition to the SRT
3492 mechanism. After GC, we run down the CAF list and blackhole any
3493 CAFs which have been garbage collected. This means we get an error
3494 whenever the program tries to enter a garbage collected CAF.
3496 Any garbage collected CAFs are taken off the CAF list at the same
3498 -------------------------------------------------------------------------- */
3500 #if 0 && defined(DEBUG)
3507 const StgInfoTable *info;
3518 ASSERT(info->type == IND_STATIC);
3520 if (STATIC_LINK(info,p) == NULL) {
3521 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3523 SET_INFO(p,&stg_BLACKHOLE_info);
3524 p = STATIC_LINK2(info,p);
3528 pp = &STATIC_LINK2(info,p);
3535 // belch("%d CAFs live", i);
3540 /* -----------------------------------------------------------------------------
3543 Whenever a thread returns to the scheduler after possibly doing
3544 some work, we have to run down the stack and black-hole all the
3545 closures referred to by update frames.
3546 -------------------------------------------------------------------------- */
3549 threadLazyBlackHole(StgTSO *tso)
3551 StgUpdateFrame *update_frame;
3552 StgBlockingQueue *bh;
3555 stack_end = &tso->stack[tso->stack_size];
3556 update_frame = tso->su;
3559 switch (get_itbl(update_frame)->type) {
3562 update_frame = ((StgCatchFrame *)update_frame)->link;
3566 bh = (StgBlockingQueue *)update_frame->updatee;
3568 /* if the thunk is already blackholed, it means we've also
3569 * already blackholed the rest of the thunks on this stack,
3570 * so we can stop early.
3572 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3573 * don't interfere with this optimisation.
3575 if (bh->header.info == &stg_BLACKHOLE_info) {
3579 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3580 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3581 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3582 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3584 SET_INFO(bh,&stg_BLACKHOLE_info);
3587 update_frame = update_frame->link;
3591 update_frame = ((StgSeqFrame *)update_frame)->link;
3597 barf("threadPaused");
3603 /* -----------------------------------------------------------------------------
3606 * Code largely pinched from old RTS, then hacked to bits. We also do
3607 * lazy black holing here.
3609 * -------------------------------------------------------------------------- */
3612 threadSqueezeStack(StgTSO *tso)
3614 lnat displacement = 0;
3615 StgUpdateFrame *frame;
3616 StgUpdateFrame *next_frame; // Temporally next
3617 StgUpdateFrame *prev_frame; // Temporally previous
3619 rtsBool prev_was_update_frame;
3621 StgUpdateFrame *top_frame;
3622 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3624 void printObj( StgClosure *obj ); // from Printer.c
3626 top_frame = tso->su;
3629 bottom = &(tso->stack[tso->stack_size]);
3632 /* There must be at least one frame, namely the STOP_FRAME.
3634 ASSERT((P_)frame < bottom);
3636 /* Walk down the stack, reversing the links between frames so that
3637 * we can walk back up as we squeeze from the bottom. Note that
3638 * next_frame and prev_frame refer to next and previous as they were
3639 * added to the stack, rather than the way we see them in this
3640 * walk. (It makes the next loop less confusing.)
3642 * Stop if we find an update frame pointing to a black hole
3643 * (see comment in threadLazyBlackHole()).
3647 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3648 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3649 prev_frame = frame->link;
3650 frame->link = next_frame;
3655 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3656 printObj((StgClosure *)prev_frame);
3657 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3660 switch (get_itbl(frame)->type) {
3663 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3676 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3678 printObj((StgClosure *)prev_frame);
3681 if (get_itbl(frame)->type == UPDATE_FRAME
3682 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3687 /* Now, we're at the bottom. Frame points to the lowest update
3688 * frame on the stack, and its link actually points to the frame
3689 * above. We have to walk back up the stack, squeezing out empty
3690 * update frames and turning the pointers back around on the way
3693 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3694 * we never want to eliminate it anyway. Just walk one step up
3695 * before starting to squeeze. When you get to the topmost frame,
3696 * remember that there are still some words above it that might have
3703 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3706 * Loop through all of the frames (everything except the very
3707 * bottom). Things are complicated by the fact that we have
3708 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3709 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3711 while (frame != NULL) {
3713 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3714 rtsBool is_update_frame;
3716 next_frame = frame->link;
3717 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3720 * 1. both the previous and current frame are update frames
3721 * 2. the current frame is empty
3723 if (prev_was_update_frame && is_update_frame &&
3724 (P_)prev_frame == frame_bottom + displacement) {
3726 // Now squeeze out the current frame
3727 StgClosure *updatee_keep = prev_frame->updatee;
3728 StgClosure *updatee_bypass = frame->updatee;
3731 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3735 /* Deal with blocking queues. If both updatees have blocked
3736 * threads, then we should merge the queues into the update
3737 * frame that we're keeping.
3739 * Alternatively, we could just wake them up: they'll just go
3740 * straight to sleep on the proper blackhole! This is less code
3741 * and probably less bug prone, although it's probably much
3744 #if 0 // do it properly...
3745 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3746 # error Unimplemented lazy BH warning. (KSW 1999-01)
3748 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3749 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3751 // Sigh. It has one. Don't lose those threads!
3752 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3753 // Urgh. Two queues. Merge them.
3754 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3756 while (keep_tso->link != END_TSO_QUEUE) {
3757 keep_tso = keep_tso->link;
3759 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3762 // For simplicity, just swap the BQ for the BH
3763 P_ temp = updatee_keep;
3765 updatee_keep = updatee_bypass;
3766 updatee_bypass = temp;
3768 // Record the swap in the kept frame (below)
3769 prev_frame->updatee = updatee_keep;
3774 TICK_UPD_SQUEEZED();
3775 /* wasn't there something about update squeezing and ticky to be
3776 * sorted out? oh yes: we aren't counting each enter properly
3777 * in this case. See the log somewhere. KSW 1999-04-21
3779 * Check two things: that the two update frames don't point to
3780 * the same object, and that the updatee_bypass isn't already an
3781 * indirection. Both of these cases only happen when we're in a
3782 * block hole-style loop (and there are multiple update frames
3783 * on the stack pointing to the same closure), but they can both
3784 * screw us up if we don't check.
3786 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3787 // this wakes the threads up
3788 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3791 sp = (P_)frame - 1; // sp = stuff to slide
3792 displacement += sizeofW(StgUpdateFrame);
3795 // No squeeze for this frame
3796 sp = frame_bottom - 1; // Keep the current frame
3798 /* Do lazy black-holing.
3800 if (is_update_frame) {
3801 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3802 if (bh->header.info != &stg_BLACKHOLE_info &&
3803 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3804 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3805 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3806 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3809 /* zero out the slop so that the sanity checker can tell
3810 * where the next closure is.
3813 StgInfoTable *info = get_itbl(bh);
3814 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3815 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3816 * info is used for a different purpose, and it's exactly the
3817 * same size as a BLACKHOLE in any case.
3819 if (info->type != THUNK_SELECTOR) {
3820 for (i = np; i < np + nw; i++) {
3821 ((StgClosure *)bh)->payload[i] = 0;
3826 SET_INFO(bh,&stg_BLACKHOLE_info);
3830 // Fix the link in the current frame (should point to the frame below)
3831 frame->link = prev_frame;
3832 prev_was_update_frame = is_update_frame;
3835 // Now slide all words from sp up to the next frame
3837 if (displacement > 0) {
3838 P_ next_frame_bottom;
3840 if (next_frame != NULL)
3841 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3843 next_frame_bottom = tso->sp - 1;
3847 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3851 while (sp >= next_frame_bottom) {
3852 sp[displacement] = *sp;
3856 (P_)prev_frame = (P_)frame + displacement;
3860 tso->sp += displacement;
3861 tso->su = prev_frame;
3864 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3865 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3870 /* -----------------------------------------------------------------------------
3873 * We have to prepare for GC - this means doing lazy black holing
3874 * here. We also take the opportunity to do stack squeezing if it's
3876 * -------------------------------------------------------------------------- */
3878 threadPaused(StgTSO *tso)
3880 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3881 threadSqueezeStack(tso); // does black holing too
3883 threadLazyBlackHole(tso);
3886 /* -----------------------------------------------------------------------------
3888 * -------------------------------------------------------------------------- */
3892 printMutOnceList(generation *gen)
3894 StgMutClosure *p, *next;
3896 p = gen->mut_once_list;
3899 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3900 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3901 fprintf(stderr, "%p (%s), ",
3902 p, info_type((StgClosure *)p));
3904 fputc('\n', stderr);
3908 printMutableList(generation *gen)
3910 StgMutClosure *p, *next;
3915 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3916 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3917 fprintf(stderr, "%p (%s), ",
3918 p, info_type((StgClosure *)p));
3920 fputc('\n', stderr);
3923 static inline rtsBool
3924 maybeLarge(StgClosure *closure)
3926 StgInfoTable *info = get_itbl(closure);
3928 /* closure types that may be found on the new_large_objects list;
3929 see scavenge_large */
3930 return (info->type == MUT_ARR_PTRS ||
3931 info->type == MUT_ARR_PTRS_FROZEN ||
3932 info->type == TSO ||
3933 info->type == ARR_WORDS);