1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.122 2001/08/30 10:22:52 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 mark_weak_ptr_list(&weak_ptr_list);
491 old_weak_ptr_list = weak_ptr_list;
492 weak_ptr_list = NULL;
493 weak_done = rtsFalse;
495 /* The all_threads list is like the weak_ptr_list.
496 * See traverse_weak_ptr_list() for the details.
498 old_all_threads = all_threads;
499 all_threads = END_TSO_QUEUE;
500 resurrected_threads = END_TSO_QUEUE;
502 /* Mark the stable pointer table.
504 markStablePtrTable(mark_root);
508 /* ToDo: To fix the caf leak, we need to make the commented out
509 * parts of this code do something sensible - as described in
512 extern void markHugsObjects(void);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 if (flag) { goto loop; }
578 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
584 // Reconstruct the Global Address tables used in GUM
585 rebuildGAtables(major_gc);
586 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
589 // Now see which stable names are still alive.
592 // Tidy the end of the to-space chains
593 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
594 for (s = 0; s < generations[g].n_steps; s++) {
595 stp = &generations[g].steps[s];
596 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
597 stp->hp_bd->free = stp->hp;
598 stp->hp_bd->link = NULL;
603 // NO MORE EVACUATION AFTER THIS POINT!
604 // Finally: compaction of the oldest generation.
605 if (major_gc && oldest_gen->steps[0].is_compacted) {
606 // save number of blocks for stats
607 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
611 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
613 /* run through all the generations/steps and tidy up
615 copied = new_blocks * BLOCK_SIZE_W;
616 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
619 generations[g].collections++; // for stats
622 for (s = 0; s < generations[g].n_steps; s++) {
624 stp = &generations[g].steps[s];
626 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
627 // stats information: how much we copied
629 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
634 // for generations we collected...
637 // rough calculation of garbage collected, for stats output
638 if (stp->is_compacted) {
639 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
641 collected += stp->n_blocks * BLOCK_SIZE_W;
644 /* free old memory and shift to-space into from-space for all
645 * the collected steps (except the allocation area). These
646 * freed blocks will probaby be quickly recycled.
648 if (!(g == 0 && s == 0)) {
649 if (stp->is_compacted) {
650 // for a compacted step, just shift the new to-space
651 // onto the front of the now-compacted existing blocks.
652 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
653 bd->flags &= ~BF_EVACUATED; // now from-space
655 // tack the new blocks on the end of the existing blocks
656 if (stp->blocks == NULL) {
657 stp->blocks = stp->to_blocks;
659 for (bd = stp->blocks; bd != NULL; bd = next) {
662 bd->link = stp->to_blocks;
666 // add the new blocks to the block tally
667 stp->n_blocks += stp->n_to_blocks;
669 freeChain(stp->blocks);
670 stp->blocks = stp->to_blocks;
671 stp->n_blocks = stp->n_to_blocks;
672 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
673 bd->flags &= ~BF_EVACUATED; // now from-space
676 stp->to_blocks = NULL;
677 stp->n_to_blocks = 0;
680 /* LARGE OBJECTS. The current live large objects are chained on
681 * scavenged_large, having been moved during garbage
682 * collection from large_objects. Any objects left on
683 * large_objects list are therefore dead, so we free them here.
685 for (bd = stp->large_objects; bd != NULL; bd = next) {
691 // update the count of blocks used by large objects
692 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED;
695 stp->large_objects = stp->scavenged_large_objects;
696 stp->n_large_blocks = stp->n_scavenged_large_blocks;
699 // for older generations...
701 /* For older generations, we need to append the
702 * scavenged_large_object list (i.e. large objects that have been
703 * promoted during this GC) to the large_object list for that step.
705 for (bd = stp->scavenged_large_objects; bd; bd = next) {
707 bd->flags &= ~BF_EVACUATED;
708 dbl_link_onto(bd, &stp->large_objects);
711 // add the new blocks we promoted during this GC
712 stp->n_blocks += stp->n_to_blocks;
713 stp->n_large_blocks += stp->n_scavenged_large_blocks;
718 /* Reset the sizes of the older generations when we do a major
721 * CURRENT STRATEGY: make all generations except zero the same size.
722 * We have to stay within the maximum heap size, and leave a certain
723 * percentage of the maximum heap size available to allocate into.
725 if (major_gc && RtsFlags.GcFlags.generations > 1) {
726 nat live, size, min_alloc;
727 nat max = RtsFlags.GcFlags.maxHeapSize;
728 nat gens = RtsFlags.GcFlags.generations;
730 // live in the oldest generations
731 live = oldest_gen->steps[0].n_blocks +
732 oldest_gen->steps[0].n_large_blocks;
734 // default max size for all generations except zero
735 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
736 RtsFlags.GcFlags.minOldGenSize);
738 // minimum size for generation zero
739 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
740 RtsFlags.GcFlags.minAllocAreaSize);
742 // Auto-enable compaction when the residency reaches a
743 // certain percentage of the maximum heap size (default: 30%).
744 if (RtsFlags.GcFlags.generations > 1 &&
745 (RtsFlags.GcFlags.compact ||
747 oldest_gen->steps[0].n_blocks >
748 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
749 oldest_gen->steps[0].is_compacted = 1;
750 // fprintf(stderr,"compaction: on\n", live);
752 oldest_gen->steps[0].is_compacted = 0;
753 // fprintf(stderr,"compaction: off\n", live);
756 // if we're going to go over the maximum heap size, reduce the
757 // size of the generations accordingly. The calculation is
758 // different if compaction is turned on, because we don't need
759 // to double the space required to collect the old generation.
761 if (oldest_gen->steps[0].is_compacted) {
762 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
763 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
766 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
767 size = (max - min_alloc) / ((gens - 1) * 2);
777 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
778 min_alloc, size, max);
781 for (g = 0; g < gens; g++) {
782 generations[g].max_blocks = size;
786 // Guess the amount of live data for stats.
789 /* Free the small objects allocated via allocate(), since this will
790 * all have been copied into G0S1 now.
792 if (small_alloc_list != NULL) {
793 freeChain(small_alloc_list);
795 small_alloc_list = NULL;
799 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
801 // Start a new pinned_object_block
802 pinned_object_block = NULL;
804 /* Free the mark stack.
806 if (mark_stack_bdescr != NULL) {
807 freeGroup(mark_stack_bdescr);
812 for (g = 0; g <= N; g++) {
813 for (s = 0; s < generations[g].n_steps; s++) {
814 stp = &generations[g].steps[s];
815 if (stp->is_compacted && stp->bitmap != NULL) {
816 freeGroup(stp->bitmap);
821 /* Two-space collector:
822 * Free the old to-space, and estimate the amount of live data.
824 if (RtsFlags.GcFlags.generations == 1) {
827 if (old_to_blocks != NULL) {
828 freeChain(old_to_blocks);
830 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
831 bd->flags = 0; // now from-space
834 /* For a two-space collector, we need to resize the nursery. */
836 /* set up a new nursery. Allocate a nursery size based on a
837 * function of the amount of live data (by default a factor of 2)
838 * Use the blocks from the old nursery if possible, freeing up any
841 * If we get near the maximum heap size, then adjust our nursery
842 * size accordingly. If the nursery is the same size as the live
843 * data (L), then we need 3L bytes. We can reduce the size of the
844 * nursery to bring the required memory down near 2L bytes.
846 * A normal 2-space collector would need 4L bytes to give the same
847 * performance we get from 3L bytes, reducing to the same
848 * performance at 2L bytes.
850 blocks = g0s0->n_to_blocks;
852 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
853 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
854 RtsFlags.GcFlags.maxHeapSize ) {
855 long adjusted_blocks; // signed on purpose
858 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
859 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
860 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
861 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
864 blocks = adjusted_blocks;
867 blocks *= RtsFlags.GcFlags.oldGenFactor;
868 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
869 blocks = RtsFlags.GcFlags.minAllocAreaSize;
872 resizeNursery(blocks);
875 /* Generational collector:
876 * If the user has given us a suggested heap size, adjust our
877 * allocation area to make best use of the memory available.
880 if (RtsFlags.GcFlags.heapSizeSuggestion) {
882 nat needed = calcNeeded(); // approx blocks needed at next GC
884 /* Guess how much will be live in generation 0 step 0 next time.
885 * A good approximation is obtained by finding the
886 * percentage of g0s0 that was live at the last minor GC.
889 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
892 /* Estimate a size for the allocation area based on the
893 * information available. We might end up going slightly under
894 * or over the suggested heap size, but we should be pretty
897 * Formula: suggested - needed
898 * ----------------------------
899 * 1 + g0s0_pcnt_kept/100
901 * where 'needed' is the amount of memory needed at the next
902 * collection for collecting all steps except g0s0.
905 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
906 (100 + (long)g0s0_pcnt_kept);
908 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
909 blocks = RtsFlags.GcFlags.minAllocAreaSize;
912 resizeNursery((nat)blocks);
916 // mark the garbage collected CAFs as dead
917 #if 0 && defined(DEBUG) // doesn't work at the moment
918 if (major_gc) { gcCAFs(); }
921 // zero the scavenged static object list
923 zero_static_object_list(scavenged_static_objects);
929 // start any pending finalizers
930 scheduleFinalizers(old_weak_ptr_list);
932 // send exceptions to any threads which were about to die
933 resurrectThreads(resurrected_threads);
935 // Update the stable pointer hash table.
936 updateStablePtrTable(major_gc);
938 // check sanity after GC
939 IF_DEBUG(sanity, checkSanity());
941 // extra GC trace info
942 IF_DEBUG(gc, statDescribeGens());
945 // symbol-table based profiling
946 /* heapCensus(to_blocks); */ /* ToDo */
949 // restore enclosing cost centre
955 // check for memory leaks if sanity checking is on
956 IF_DEBUG(sanity, memInventory());
958 #ifdef RTS_GTK_FRONTPANEL
959 if (RtsFlags.GcFlags.frontpanel) {
960 updateFrontPanelAfterGC( N, live );
964 // ok, GC over: tell the stats department what happened.
965 stat_endGC(allocated, collected, live, copied, N);
971 /* -----------------------------------------------------------------------------
974 traverse_weak_ptr_list is called possibly many times during garbage
975 collection. It returns a flag indicating whether it did any work
976 (i.e. called evacuate on any live pointers).
978 Invariant: traverse_weak_ptr_list is called when the heap is in an
979 idempotent state. That means that there are no pending
980 evacuate/scavenge operations. This invariant helps the weak
981 pointer code decide which weak pointers are dead - if there are no
982 new live weak pointers, then all the currently unreachable ones are
985 For generational GC: we just don't try to finalize weak pointers in
986 older generations than the one we're collecting. This could
987 probably be optimised by keeping per-generation lists of weak
988 pointers, but for a few weak pointers this scheme will work.
989 -------------------------------------------------------------------------- */
992 traverse_weak_ptr_list(void)
994 StgWeak *w, **last_w, *next_w;
996 rtsBool flag = rtsFalse;
998 if (weak_done) { return rtsFalse; }
1000 /* doesn't matter where we evacuate values/finalizers to, since
1001 * these pointers are treated as roots (iff the keys are alive).
1005 last_w = &old_weak_ptr_list;
1006 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1008 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1009 * called on a live weak pointer object. Just remove it.
1011 if (w->header.info == &stg_DEAD_WEAK_info) {
1012 next_w = ((StgDeadWeak *)w)->link;
1017 ASSERT(get_itbl(w)->type == WEAK);
1019 /* Now, check whether the key is reachable.
1021 new = isAlive(w->key);
1024 // evacuate the value and finalizer
1025 w->value = evacuate(w->value);
1026 w->finalizer = evacuate(w->finalizer);
1027 // remove this weak ptr from the old_weak_ptr list
1029 // and put it on the new weak ptr list
1031 w->link = weak_ptr_list;
1034 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1038 last_w = &(w->link);
1044 /* Now deal with the all_threads list, which behaves somewhat like
1045 * the weak ptr list. If we discover any threads that are about to
1046 * become garbage, we wake them up and administer an exception.
1049 StgTSO *t, *tmp, *next, **prev;
1051 prev = &old_all_threads;
1052 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1054 (StgClosure *)tmp = isAlive((StgClosure *)t);
1060 ASSERT(get_itbl(t)->type == TSO);
1061 switch (t->what_next) {
1062 case ThreadRelocated:
1067 case ThreadComplete:
1068 // finshed or died. The thread might still be alive, but we
1069 // don't keep it on the all_threads list. Don't forget to
1070 // stub out its global_link field.
1071 next = t->global_link;
1072 t->global_link = END_TSO_QUEUE;
1080 // not alive (yet): leave this thread on the old_all_threads list.
1081 prev = &(t->global_link);
1082 next = t->global_link;
1085 // alive: move this thread onto the all_threads list.
1086 next = t->global_link;
1087 t->global_link = all_threads;
1094 /* If we didn't make any changes, then we can go round and kill all
1095 * the dead weak pointers. The old_weak_ptr list is used as a list
1096 * of pending finalizers later on.
1098 if (flag == rtsFalse) {
1099 for (w = old_weak_ptr_list; w; w = w->link) {
1100 w->finalizer = evacuate(w->finalizer);
1103 /* And resurrect any threads which were about to become garbage.
1106 StgTSO *t, *tmp, *next;
1107 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1108 next = t->global_link;
1109 (StgClosure *)tmp = evacuate((StgClosure *)t);
1110 tmp->global_link = resurrected_threads;
1111 resurrected_threads = tmp;
1115 weak_done = rtsTrue;
1121 /* -----------------------------------------------------------------------------
1122 After GC, the live weak pointer list may have forwarding pointers
1123 on it, because a weak pointer object was evacuated after being
1124 moved to the live weak pointer list. We remove those forwarding
1127 Also, we don't consider weak pointer objects to be reachable, but
1128 we must nevertheless consider them to be "live" and retain them.
1129 Therefore any weak pointer objects which haven't as yet been
1130 evacuated need to be evacuated now.
1131 -------------------------------------------------------------------------- */
1135 mark_weak_ptr_list ( StgWeak **list )
1137 StgWeak *w, **last_w;
1140 for (w = *list; w; w = w->link) {
1141 (StgClosure *)w = evacuate((StgClosure *)w);
1143 last_w = &(w->link);
1147 /* -----------------------------------------------------------------------------
1148 isAlive determines whether the given closure is still alive (after
1149 a garbage collection) or not. It returns the new address of the
1150 closure if it is alive, or NULL otherwise.
1152 NOTE: Use it before compaction only!
1153 -------------------------------------------------------------------------- */
1157 isAlive(StgClosure *p)
1159 const StgInfoTable *info;
1166 /* ToDo: for static closures, check the static link field.
1167 * Problem here is that we sometimes don't set the link field, eg.
1168 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1173 // ignore closures in generations that we're not collecting.
1174 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1177 // large objects have an evacuated flag
1178 if (bd->flags & BF_LARGE) {
1179 if (bd->flags & BF_EVACUATED) {
1185 // check the mark bit for compacted steps
1186 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1190 switch (info->type) {
1195 case IND_OLDGEN: // rely on compatible layout with StgInd
1196 case IND_OLDGEN_PERM:
1197 // follow indirections
1198 p = ((StgInd *)p)->indirectee;
1203 return ((StgEvacuated *)p)->evacuee;
1206 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1207 p = (StgClosure *)((StgTSO *)p)->link;
1219 mark_root(StgClosure **root)
1221 *root = evacuate(*root);
1227 bdescr *bd = allocBlock();
1228 bd->gen_no = stp->gen_no;
1231 if (stp->gen_no <= N) {
1232 bd->flags = BF_EVACUATED;
1237 stp->hp_bd->free = stp->hp;
1238 stp->hp_bd->link = bd;
1239 stp->hp = bd->start;
1240 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1247 static __inline__ void
1248 upd_evacuee(StgClosure *p, StgClosure *dest)
1250 p->header.info = &stg_EVACUATED_info;
1251 ((StgEvacuated *)p)->evacuee = dest;
1255 static __inline__ StgClosure *
1256 copy(StgClosure *src, nat size, step *stp)
1260 TICK_GC_WORDS_COPIED(size);
1261 /* Find out where we're going, using the handy "to" pointer in
1262 * the step of the source object. If it turns out we need to
1263 * evacuate to an older generation, adjust it here (see comment
1266 if (stp->gen_no < evac_gen) {
1267 #ifdef NO_EAGER_PROMOTION
1268 failed_to_evac = rtsTrue;
1270 stp = &generations[evac_gen].steps[0];
1274 /* chain a new block onto the to-space for the destination step if
1277 if (stp->hp + size >= stp->hpLim) {
1281 for(to = stp->hp, from = (P_)src; size>0; --size) {
1287 upd_evacuee(src,(StgClosure *)dest);
1288 return (StgClosure *)dest;
1291 /* Special version of copy() for when we only want to copy the info
1292 * pointer of an object, but reserve some padding after it. This is
1293 * used to optimise evacuation of BLACKHOLEs.
1297 static __inline__ StgClosure *
1298 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1302 TICK_GC_WORDS_COPIED(size_to_copy);
1303 if (stp->gen_no < evac_gen) {
1304 #ifdef NO_EAGER_PROMOTION
1305 failed_to_evac = rtsTrue;
1307 stp = &generations[evac_gen].steps[0];
1311 if (stp->hp + size_to_reserve >= stp->hpLim) {
1315 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1320 stp->hp += size_to_reserve;
1321 upd_evacuee(src,(StgClosure *)dest);
1322 return (StgClosure *)dest;
1326 /* -----------------------------------------------------------------------------
1327 Evacuate a large object
1329 This just consists of removing the object from the (doubly-linked)
1330 large_alloc_list, and linking it on to the (singly-linked)
1331 new_large_objects list, from where it will be scavenged later.
1333 Convention: bd->flags has BF_EVACUATED set for a large object
1334 that has been evacuated, or unset otherwise.
1335 -------------------------------------------------------------------------- */
1339 evacuate_large(StgPtr p)
1341 bdescr *bd = Bdescr(p);
1344 // object must be at the beginning of the block (or be a ByteArray)
1345 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1346 (((W_)p & BLOCK_MASK) == 0));
1348 // already evacuated?
1349 if (bd->flags & BF_EVACUATED) {
1350 /* Don't forget to set the failed_to_evac flag if we didn't get
1351 * the desired destination (see comments in evacuate()).
1353 if (bd->gen_no < evac_gen) {
1354 failed_to_evac = rtsTrue;
1355 TICK_GC_FAILED_PROMOTION();
1361 // remove from large_object list
1363 bd->u.back->link = bd->link;
1364 } else { // first object in the list
1365 stp->large_objects = bd->link;
1368 bd->link->u.back = bd->u.back;
1371 /* link it on to the evacuated large object list of the destination step
1374 if (stp->gen_no < evac_gen) {
1375 #ifdef NO_EAGER_PROMOTION
1376 failed_to_evac = rtsTrue;
1378 stp = &generations[evac_gen].steps[0];
1383 bd->gen_no = stp->gen_no;
1384 bd->link = stp->new_large_objects;
1385 stp->new_large_objects = bd;
1386 bd->flags |= BF_EVACUATED;
1389 /* -----------------------------------------------------------------------------
1390 Adding a MUT_CONS to an older generation.
1392 This is necessary from time to time when we end up with an
1393 old-to-new generation pointer in a non-mutable object. We defer
1394 the promotion until the next GC.
1395 -------------------------------------------------------------------------- */
1399 mkMutCons(StgClosure *ptr, generation *gen)
1404 stp = &gen->steps[0];
1406 /* chain a new block onto the to-space for the destination step if
1409 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1413 q = (StgMutVar *)stp->hp;
1414 stp->hp += sizeofW(StgMutVar);
1416 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1418 recordOldToNewPtrs((StgMutClosure *)q);
1420 return (StgClosure *)q;
1423 /* -----------------------------------------------------------------------------
1426 This is called (eventually) for every live object in the system.
1428 The caller to evacuate specifies a desired generation in the
1429 evac_gen global variable. The following conditions apply to
1430 evacuating an object which resides in generation M when we're
1431 collecting up to generation N
1435 else evac to step->to
1437 if M < evac_gen evac to evac_gen, step 0
1439 if the object is already evacuated, then we check which generation
1442 if M >= evac_gen do nothing
1443 if M < evac_gen set failed_to_evac flag to indicate that we
1444 didn't manage to evacuate this object into evac_gen.
1446 -------------------------------------------------------------------------- */
1449 evacuate(StgClosure *q)
1454 const StgInfoTable *info;
1457 if (HEAP_ALLOCED(q)) {
1460 if (bd->gen_no > N) {
1461 /* Can't evacuate this object, because it's in a generation
1462 * older than the ones we're collecting. Let's hope that it's
1463 * in evac_gen or older, or we will have to arrange to track
1464 * this pointer using the mutable list.
1466 if (bd->gen_no < evac_gen) {
1468 failed_to_evac = rtsTrue;
1469 TICK_GC_FAILED_PROMOTION();
1474 /* evacuate large objects by re-linking them onto a different list.
1476 if (bd->flags & BF_LARGE) {
1478 if (info->type == TSO &&
1479 ((StgTSO *)q)->what_next == ThreadRelocated) {
1480 q = (StgClosure *)((StgTSO *)q)->link;
1483 evacuate_large((P_)q);
1487 /* If the object is in a step that we're compacting, then we
1488 * need to use an alternative evacuate procedure.
1490 if (bd->step->is_compacted) {
1491 if (!is_marked((P_)q,bd)) {
1493 if (mark_stack_full()) {
1494 mark_stack_overflowed = rtsTrue;
1497 push_mark_stack((P_)q);
1505 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1508 // make sure the info pointer is into text space
1509 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1510 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1513 switch (info -> type) {
1517 to = copy(q,sizeW_fromITBL(info),stp);
1522 StgWord w = (StgWord)q->payload[0];
1523 if (q->header.info == Czh_con_info &&
1524 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1525 (StgChar)w <= MAX_CHARLIKE) {
1526 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1528 if (q->header.info == Izh_con_info &&
1529 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1530 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1532 // else, fall through ...
1538 return copy(q,sizeofW(StgHeader)+1,stp);
1540 case THUNK_1_0: // here because of MIN_UPD_SIZE
1545 #ifdef NO_PROMOTE_THUNKS
1546 if (bd->gen_no == 0 &&
1547 bd->step->no != 0 &&
1548 bd->step->no == generations[bd->gen_no].n_steps-1) {
1552 return copy(q,sizeofW(StgHeader)+2,stp);
1560 return copy(q,sizeofW(StgHeader)+2,stp);
1566 case IND_OLDGEN_PERM:
1571 return copy(q,sizeW_fromITBL(info),stp);
1574 case SE_CAF_BLACKHOLE:
1577 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1580 to = copy(q,BLACKHOLE_sizeW(),stp);
1583 case THUNK_SELECTOR:
1585 const StgInfoTable* selectee_info;
1586 StgClosure* selectee = ((StgSelector*)q)->selectee;
1589 selectee_info = get_itbl(selectee);
1590 switch (selectee_info->type) {
1599 StgWord offset = info->layout.selector_offset;
1601 // check that the size is in range
1603 (StgWord32)(selectee_info->layout.payload.ptrs +
1604 selectee_info->layout.payload.nptrs));
1606 // perform the selection!
1607 q = selectee->payload[offset];
1609 /* if we're already in to-space, there's no need to continue
1610 * with the evacuation, just update the source address with
1611 * a pointer to the (evacuated) constructor field.
1613 if (HEAP_ALLOCED(q)) {
1614 bdescr *bd = Bdescr((P_)q);
1615 if (bd->flags & BF_EVACUATED) {
1616 if (bd->gen_no < evac_gen) {
1617 failed_to_evac = rtsTrue;
1618 TICK_GC_FAILED_PROMOTION();
1624 /* otherwise, carry on and evacuate this constructor field,
1625 * (but not the constructor itself)
1634 case IND_OLDGEN_PERM:
1635 selectee = ((StgInd *)selectee)->indirectee;
1639 selectee = ((StgEvacuated *)selectee)->evacuee;
1642 case THUNK_SELECTOR:
1644 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1645 something) to go into an infinite loop when the nightly
1646 stage2 compiles PrelTup.lhs. */
1648 /* we can't recurse indefinitely in evacuate(), so set a
1649 * limit on the number of times we can go around this
1652 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1654 bd = Bdescr((P_)selectee);
1655 if (!bd->flags & BF_EVACUATED) {
1656 thunk_selector_depth++;
1657 selectee = evacuate(selectee);
1658 thunk_selector_depth--;
1662 // otherwise, fall through...
1674 case SE_CAF_BLACKHOLE:
1678 // not evaluated yet
1682 // a copy of the top-level cases below
1683 case RBH: // cf. BLACKHOLE_BQ
1685 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1686 to = copy(q,BLACKHOLE_sizeW(),stp);
1687 //ToDo: derive size etc from reverted IP
1688 //to = copy(q,size,stp);
1689 // recordMutable((StgMutClosure *)to);
1694 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1695 to = copy(q,sizeofW(StgBlockedFetch),stp);
1702 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1703 to = copy(q,sizeofW(StgFetchMe),stp);
1707 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1708 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1713 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1714 (int)(selectee_info->type));
1717 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1721 // follow chains of indirections, don't evacuate them
1722 q = ((StgInd*)q)->indirectee;
1726 if (info->srt_len > 0 && major_gc &&
1727 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1728 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1729 static_objects = (StgClosure *)q;
1734 if (info->srt_len > 0 && major_gc &&
1735 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1736 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1737 static_objects = (StgClosure *)q;
1742 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1743 * on the CAF list, so don't do anything with it here (we'll
1744 * scavenge it later).
1747 && ((StgIndStatic *)q)->saved_info == NULL
1748 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1749 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1750 static_objects = (StgClosure *)q;
1755 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1756 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1757 static_objects = (StgClosure *)q;
1761 case CONSTR_INTLIKE:
1762 case CONSTR_CHARLIKE:
1763 case CONSTR_NOCAF_STATIC:
1764 /* no need to put these on the static linked list, they don't need
1779 // shouldn't see these
1780 barf("evacuate: stack frame at %p\n", q);
1784 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1785 * of stack, tagging and all.
1787 return copy(q,pap_sizeW((StgPAP*)q),stp);
1790 /* Already evacuated, just return the forwarding address.
1791 * HOWEVER: if the requested destination generation (evac_gen) is
1792 * older than the actual generation (because the object was
1793 * already evacuated to a younger generation) then we have to
1794 * set the failed_to_evac flag to indicate that we couldn't
1795 * manage to promote the object to the desired generation.
1797 if (evac_gen > 0) { // optimisation
1798 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1799 if (Bdescr((P_)p)->gen_no < evac_gen) {
1800 failed_to_evac = rtsTrue;
1801 TICK_GC_FAILED_PROMOTION();
1804 return ((StgEvacuated*)q)->evacuee;
1807 // just copy the block
1808 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1811 case MUT_ARR_PTRS_FROZEN:
1812 // just copy the block
1813 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1817 StgTSO *tso = (StgTSO *)q;
1819 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1821 if (tso->what_next == ThreadRelocated) {
1822 q = (StgClosure *)tso->link;
1826 /* To evacuate a small TSO, we need to relocate the update frame
1830 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1831 move_TSO(tso, new_tso);
1832 return (StgClosure *)new_tso;
1837 case RBH: // cf. BLACKHOLE_BQ
1839 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1840 to = copy(q,BLACKHOLE_sizeW(),stp);
1841 //ToDo: derive size etc from reverted IP
1842 //to = copy(q,size,stp);
1844 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1845 q, info_type(q), to, info_type(to)));
1850 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1851 to = copy(q,sizeofW(StgBlockedFetch),stp);
1853 belch("@@ evacuate: %p (%s) to %p (%s)",
1854 q, info_type(q), to, info_type(to)));
1861 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1862 to = copy(q,sizeofW(StgFetchMe),stp);
1864 belch("@@ evacuate: %p (%s) to %p (%s)",
1865 q, info_type(q), to, info_type(to)));
1869 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1870 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1872 belch("@@ evacuate: %p (%s) to %p (%s)",
1873 q, info_type(q), to, info_type(to)));
1878 barf("evacuate: strange closure type %d", (int)(info->type));
1884 /* -----------------------------------------------------------------------------
1885 move_TSO is called to update the TSO structure after it has been
1886 moved from one place to another.
1887 -------------------------------------------------------------------------- */
1890 move_TSO(StgTSO *src, StgTSO *dest)
1894 // relocate the stack pointers...
1895 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1896 dest->sp = (StgPtr)dest->sp + diff;
1897 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1899 relocate_stack(dest, diff);
1902 /* -----------------------------------------------------------------------------
1903 relocate_stack is called to update the linkage between
1904 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1906 -------------------------------------------------------------------------- */
1909 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1917 while ((P_)su < dest->stack + dest->stack_size) {
1918 switch (get_itbl(su)->type) {
1920 // GCC actually manages to common up these three cases!
1923 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1928 cf = (StgCatchFrame *)su;
1929 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1934 sf = (StgSeqFrame *)su;
1935 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1944 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1955 scavenge_srt(const StgInfoTable *info)
1957 StgClosure **srt, **srt_end;
1959 /* evacuate the SRT. If srt_len is zero, then there isn't an
1960 * srt field in the info table. That's ok, because we'll
1961 * never dereference it.
1963 srt = (StgClosure **)(info->srt);
1964 srt_end = srt + info->srt_len;
1965 for (; srt < srt_end; srt++) {
1966 /* Special-case to handle references to closures hiding out in DLLs, since
1967 double indirections required to get at those. The code generator knows
1968 which is which when generating the SRT, so it stores the (indirect)
1969 reference to the DLL closure in the table by first adding one to it.
1970 We check for this here, and undo the addition before evacuating it.
1972 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1973 closure that's fixed at link-time, and no extra magic is required.
1975 #ifdef ENABLE_WIN32_DLL_SUPPORT
1976 if ( (unsigned long)(*srt) & 0x1 ) {
1977 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1987 /* -----------------------------------------------------------------------------
1989 -------------------------------------------------------------------------- */
1992 scavengeTSO (StgTSO *tso)
1994 // chase the link field for any TSOs on the same queue
1995 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1996 if ( tso->why_blocked == BlockedOnMVar
1997 || tso->why_blocked == BlockedOnBlackHole
1998 || tso->why_blocked == BlockedOnException
2000 || tso->why_blocked == BlockedOnGA
2001 || tso->why_blocked == BlockedOnGA_NoSend
2004 tso->block_info.closure = evacuate(tso->block_info.closure);
2006 if ( tso->blocked_exceptions != NULL ) {
2007 tso->blocked_exceptions =
2008 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2010 // scavenge this thread's stack
2011 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2014 /* -----------------------------------------------------------------------------
2015 Scavenge a given step until there are no more objects in this step
2018 evac_gen is set by the caller to be either zero (for a step in a
2019 generation < N) or G where G is the generation of the step being
2022 We sometimes temporarily change evac_gen back to zero if we're
2023 scavenging a mutable object where early promotion isn't such a good
2025 -------------------------------------------------------------------------- */
2033 nat saved_evac_gen = evac_gen;
2038 failed_to_evac = rtsFalse;
2040 /* scavenge phase - standard breadth-first scavenging of the
2044 while (bd != stp->hp_bd || p < stp->hp) {
2046 // If we're at the end of this block, move on to the next block
2047 if (bd != stp->hp_bd && p == bd->free) {
2053 info = get_itbl((StgClosure *)p);
2054 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2057 switch (info->type) {
2060 /* treat MVars specially, because we don't want to evacuate the
2061 * mut_link field in the middle of the closure.
2064 StgMVar *mvar = ((StgMVar *)p);
2066 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2067 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2068 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2069 evac_gen = saved_evac_gen;
2070 recordMutable((StgMutClosure *)mvar);
2071 failed_to_evac = rtsFalse; // mutable.
2072 p += sizeofW(StgMVar);
2080 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2081 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2082 p += sizeofW(StgHeader) + 2;
2087 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2088 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2094 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2095 p += sizeofW(StgHeader) + 1;
2100 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2106 p += sizeofW(StgHeader) + 1;
2113 p += sizeofW(StgHeader) + 2;
2120 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2121 p += sizeofW(StgHeader) + 2;
2137 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2138 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2139 (StgClosure *)*p = evacuate((StgClosure *)*p);
2141 p += info->layout.payload.nptrs;
2146 if (stp->gen_no != 0) {
2147 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2150 case IND_OLDGEN_PERM:
2151 ((StgIndOldGen *)p)->indirectee =
2152 evacuate(((StgIndOldGen *)p)->indirectee);
2153 if (failed_to_evac) {
2154 failed_to_evac = rtsFalse;
2155 recordOldToNewPtrs((StgMutClosure *)p);
2157 p += sizeofW(StgIndOldGen);
2162 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2163 evac_gen = saved_evac_gen;
2164 recordMutable((StgMutClosure *)p);
2165 failed_to_evac = rtsFalse; // mutable anyhow
2166 p += sizeofW(StgMutVar);
2171 failed_to_evac = rtsFalse; // mutable anyhow
2172 p += sizeofW(StgMutVar);
2176 case SE_CAF_BLACKHOLE:
2179 p += BLACKHOLE_sizeW();
2184 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2185 (StgClosure *)bh->blocking_queue =
2186 evacuate((StgClosure *)bh->blocking_queue);
2187 recordMutable((StgMutClosure *)bh);
2188 failed_to_evac = rtsFalse;
2189 p += BLACKHOLE_sizeW();
2193 case THUNK_SELECTOR:
2195 StgSelector *s = (StgSelector *)p;
2196 s->selectee = evacuate(s->selectee);
2197 p += THUNK_SELECTOR_sizeW();
2201 case AP_UPD: // same as PAPs
2203 /* Treat a PAP just like a section of stack, not forgetting to
2204 * evacuate the function pointer too...
2207 StgPAP* pap = (StgPAP *)p;
2209 pap->fun = evacuate(pap->fun);
2210 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2211 p += pap_sizeW(pap);
2216 // nothing to follow
2217 p += arr_words_sizeW((StgArrWords *)p);
2221 // follow everything
2225 evac_gen = 0; // repeatedly mutable
2226 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2227 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2228 (StgClosure *)*p = evacuate((StgClosure *)*p);
2230 evac_gen = saved_evac_gen;
2231 recordMutable((StgMutClosure *)q);
2232 failed_to_evac = rtsFalse; // mutable anyhow.
2236 case MUT_ARR_PTRS_FROZEN:
2237 // follow everything
2241 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2242 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2243 (StgClosure *)*p = evacuate((StgClosure *)*p);
2245 // it's tempting to recordMutable() if failed_to_evac is
2246 // false, but that breaks some assumptions (eg. every
2247 // closure on the mutable list is supposed to have the MUT
2248 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2254 StgTSO *tso = (StgTSO *)p;
2257 evac_gen = saved_evac_gen;
2258 recordMutable((StgMutClosure *)tso);
2259 failed_to_evac = rtsFalse; // mutable anyhow.
2260 p += tso_sizeW(tso);
2265 case RBH: // cf. BLACKHOLE_BQ
2268 nat size, ptrs, nonptrs, vhs;
2270 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2272 StgRBH *rbh = (StgRBH *)p;
2273 (StgClosure *)rbh->blocking_queue =
2274 evacuate((StgClosure *)rbh->blocking_queue);
2275 recordMutable((StgMutClosure *)to);
2276 failed_to_evac = rtsFalse; // mutable anyhow.
2278 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2279 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2280 // ToDo: use size of reverted closure here!
2281 p += BLACKHOLE_sizeW();
2287 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2288 // follow the pointer to the node which is being demanded
2289 (StgClosure *)bf->node =
2290 evacuate((StgClosure *)bf->node);
2291 // follow the link to the rest of the blocking queue
2292 (StgClosure *)bf->link =
2293 evacuate((StgClosure *)bf->link);
2294 if (failed_to_evac) {
2295 failed_to_evac = rtsFalse;
2296 recordMutable((StgMutClosure *)bf);
2299 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2300 bf, info_type((StgClosure *)bf),
2301 bf->node, info_type(bf->node)));
2302 p += sizeofW(StgBlockedFetch);
2310 p += sizeofW(StgFetchMe);
2311 break; // nothing to do in this case
2313 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2315 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2316 (StgClosure *)fmbq->blocking_queue =
2317 evacuate((StgClosure *)fmbq->blocking_queue);
2318 if (failed_to_evac) {
2319 failed_to_evac = rtsFalse;
2320 recordMutable((StgMutClosure *)fmbq);
2323 belch("@@ scavenge: %p (%s) exciting, isn't it",
2324 p, info_type((StgClosure *)p)));
2325 p += sizeofW(StgFetchMeBlockingQueue);
2331 barf("scavenge: unimplemented/strange closure type %d @ %p",
2335 /* If we didn't manage to promote all the objects pointed to by
2336 * the current object, then we have to designate this object as
2337 * mutable (because it contains old-to-new generation pointers).
2339 if (failed_to_evac) {
2340 failed_to_evac = rtsFalse;
2341 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2349 /* -----------------------------------------------------------------------------
2350 Scavenge everything on the mark stack.
2352 This is slightly different from scavenge():
2353 - we don't walk linearly through the objects, so the scavenger
2354 doesn't need to advance the pointer on to the next object.
2355 -------------------------------------------------------------------------- */
2358 scavenge_mark_stack(void)
2364 evac_gen = oldest_gen->no;
2365 saved_evac_gen = evac_gen;
2368 while (!mark_stack_empty()) {
2369 p = pop_mark_stack();
2371 info = get_itbl((StgClosure *)p);
2372 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2375 switch (info->type) {
2378 /* treat MVars specially, because we don't want to evacuate the
2379 * mut_link field in the middle of the closure.
2382 StgMVar *mvar = ((StgMVar *)p);
2384 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2385 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2386 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2387 evac_gen = saved_evac_gen;
2388 failed_to_evac = rtsFalse; // mutable.
2396 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2397 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2407 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2432 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2433 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2434 (StgClosure *)*p = evacuate((StgClosure *)*p);
2440 // don't need to do anything here: the only possible case
2441 // is that we're in a 1-space compacting collector, with
2442 // no "old" generation.
2446 case IND_OLDGEN_PERM:
2447 ((StgIndOldGen *)p)->indirectee =
2448 evacuate(((StgIndOldGen *)p)->indirectee);
2449 if (failed_to_evac) {
2450 recordOldToNewPtrs((StgMutClosure *)p);
2452 failed_to_evac = rtsFalse;
2457 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2458 evac_gen = saved_evac_gen;
2459 failed_to_evac = rtsFalse;
2464 failed_to_evac = rtsFalse;
2468 case SE_CAF_BLACKHOLE:
2476 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2477 (StgClosure *)bh->blocking_queue =
2478 evacuate((StgClosure *)bh->blocking_queue);
2479 failed_to_evac = rtsFalse;
2483 case THUNK_SELECTOR:
2485 StgSelector *s = (StgSelector *)p;
2486 s->selectee = evacuate(s->selectee);
2490 case AP_UPD: // same as PAPs
2492 /* Treat a PAP just like a section of stack, not forgetting to
2493 * evacuate the function pointer too...
2496 StgPAP* pap = (StgPAP *)p;
2498 pap->fun = evacuate(pap->fun);
2499 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2504 // follow everything
2508 evac_gen = 0; // repeatedly mutable
2509 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2510 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2511 (StgClosure *)*p = evacuate((StgClosure *)*p);
2513 evac_gen = saved_evac_gen;
2514 failed_to_evac = rtsFalse; // mutable anyhow.
2518 case MUT_ARR_PTRS_FROZEN:
2519 // follow everything
2523 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2524 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2525 (StgClosure *)*p = evacuate((StgClosure *)*p);
2532 StgTSO *tso = (StgTSO *)p;
2535 evac_gen = saved_evac_gen;
2536 failed_to_evac = rtsFalse;
2541 case RBH: // cf. BLACKHOLE_BQ
2544 nat size, ptrs, nonptrs, vhs;
2546 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2548 StgRBH *rbh = (StgRBH *)p;
2549 (StgClosure *)rbh->blocking_queue =
2550 evacuate((StgClosure *)rbh->blocking_queue);
2551 recordMutable((StgMutClosure *)rbh);
2552 failed_to_evac = rtsFalse; // mutable anyhow.
2554 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2555 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2561 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2562 // follow the pointer to the node which is being demanded
2563 (StgClosure *)bf->node =
2564 evacuate((StgClosure *)bf->node);
2565 // follow the link to the rest of the blocking queue
2566 (StgClosure *)bf->link =
2567 evacuate((StgClosure *)bf->link);
2568 if (failed_to_evac) {
2569 failed_to_evac = rtsFalse;
2570 recordMutable((StgMutClosure *)bf);
2573 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2574 bf, info_type((StgClosure *)bf),
2575 bf->node, info_type(bf->node)));
2583 break; // nothing to do in this case
2585 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2587 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2588 (StgClosure *)fmbq->blocking_queue =
2589 evacuate((StgClosure *)fmbq->blocking_queue);
2590 if (failed_to_evac) {
2591 failed_to_evac = rtsFalse;
2592 recordMutable((StgMutClosure *)fmbq);
2595 belch("@@ scavenge: %p (%s) exciting, isn't it",
2596 p, info_type((StgClosure *)p)));
2602 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2606 if (failed_to_evac) {
2607 failed_to_evac = rtsFalse;
2608 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2611 // mark the next bit to indicate "scavenged"
2612 mark(q+1, Bdescr(q));
2614 } // while (!mark_stack_empty())
2616 // start a new linear scan if the mark stack overflowed at some point
2617 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2618 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2619 mark_stack_overflowed = rtsFalse;
2620 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2621 oldgen_scan = oldgen_scan_bd->start;
2624 if (oldgen_scan_bd) {
2625 // push a new thing on the mark stack
2627 // find a closure that is marked but not scavenged, and start
2629 while (oldgen_scan < oldgen_scan_bd->free
2630 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2634 if (oldgen_scan < oldgen_scan_bd->free) {
2636 // already scavenged?
2637 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2638 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2641 push_mark_stack(oldgen_scan);
2642 // ToDo: bump the linear scan by the actual size of the object
2643 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2647 oldgen_scan_bd = oldgen_scan_bd->link;
2648 if (oldgen_scan_bd != NULL) {
2649 oldgen_scan = oldgen_scan_bd->start;
2655 /* -----------------------------------------------------------------------------
2656 Scavenge one object.
2658 This is used for objects that are temporarily marked as mutable
2659 because they contain old-to-new generation pointers. Only certain
2660 objects can have this property.
2661 -------------------------------------------------------------------------- */
2664 scavenge_one(StgPtr p)
2666 const StgInfoTable *info;
2667 nat saved_evac_gen = evac_gen;
2670 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2671 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2673 info = get_itbl((StgClosure *)p);
2675 switch (info->type) {
2678 case FUN_1_0: // hardly worth specialising these guys
2698 case IND_OLDGEN_PERM:
2702 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2703 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2704 (StgClosure *)*q = evacuate((StgClosure *)*q);
2710 case SE_CAF_BLACKHOLE:
2715 case THUNK_SELECTOR:
2717 StgSelector *s = (StgSelector *)p;
2718 s->selectee = evacuate(s->selectee);
2723 // nothing to follow
2728 // follow everything
2731 evac_gen = 0; // repeatedly mutable
2732 recordMutable((StgMutClosure *)p);
2733 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2734 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2735 (StgClosure *)*p = evacuate((StgClosure *)*p);
2737 evac_gen = saved_evac_gen;
2738 failed_to_evac = rtsFalse;
2742 case MUT_ARR_PTRS_FROZEN:
2744 // follow everything
2747 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2748 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2749 (StgClosure *)*p = evacuate((StgClosure *)*p);
2756 StgTSO *tso = (StgTSO *)p;
2758 evac_gen = 0; // repeatedly mutable
2760 recordMutable((StgMutClosure *)tso);
2761 evac_gen = saved_evac_gen;
2762 failed_to_evac = rtsFalse;
2769 StgPAP* pap = (StgPAP *)p;
2770 pap->fun = evacuate(pap->fun);
2771 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2776 // This might happen if for instance a MUT_CONS was pointing to a
2777 // THUNK which has since been updated. The IND_OLDGEN will
2778 // be on the mutable list anyway, so we don't need to do anything
2783 barf("scavenge_one: strange object %d", (int)(info->type));
2786 no_luck = failed_to_evac;
2787 failed_to_evac = rtsFalse;
2791 /* -----------------------------------------------------------------------------
2792 Scavenging mutable lists.
2794 We treat the mutable list of each generation > N (i.e. all the
2795 generations older than the one being collected) as roots. We also
2796 remove non-mutable objects from the mutable list at this point.
2797 -------------------------------------------------------------------------- */
2800 scavenge_mut_once_list(generation *gen)
2802 const StgInfoTable *info;
2803 StgMutClosure *p, *next, *new_list;
2805 p = gen->mut_once_list;
2806 new_list = END_MUT_LIST;
2810 failed_to_evac = rtsFalse;
2812 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2814 // make sure the info pointer is into text space
2815 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2816 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2820 if (info->type==RBH)
2821 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2823 switch(info->type) {
2826 case IND_OLDGEN_PERM:
2828 /* Try to pull the indirectee into this generation, so we can
2829 * remove the indirection from the mutable list.
2831 ((StgIndOldGen *)p)->indirectee =
2832 evacuate(((StgIndOldGen *)p)->indirectee);
2834 #if 0 && defined(DEBUG)
2835 if (RtsFlags.DebugFlags.gc)
2836 /* Debugging code to print out the size of the thing we just
2840 StgPtr start = gen->steps[0].scan;
2841 bdescr *start_bd = gen->steps[0].scan_bd;
2843 scavenge(&gen->steps[0]);
2844 if (start_bd != gen->steps[0].scan_bd) {
2845 size += (P_)BLOCK_ROUND_UP(start) - start;
2846 start_bd = start_bd->link;
2847 while (start_bd != gen->steps[0].scan_bd) {
2848 size += BLOCK_SIZE_W;
2849 start_bd = start_bd->link;
2851 size += gen->steps[0].scan -
2852 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2854 size = gen->steps[0].scan - start;
2856 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2860 /* failed_to_evac might happen if we've got more than two
2861 * generations, we're collecting only generation 0, the
2862 * indirection resides in generation 2 and the indirectee is
2865 if (failed_to_evac) {
2866 failed_to_evac = rtsFalse;
2867 p->mut_link = new_list;
2870 /* the mut_link field of an IND_STATIC is overloaded as the
2871 * static link field too (it just so happens that we don't need
2872 * both at the same time), so we need to NULL it out when
2873 * removing this object from the mutable list because the static
2874 * link fields are all assumed to be NULL before doing a major
2882 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2883 * it from the mutable list if possible by promoting whatever it
2886 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2887 /* didn't manage to promote everything, so put the
2888 * MUT_CONS back on the list.
2890 p->mut_link = new_list;
2896 // shouldn't have anything else on the mutables list
2897 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2901 gen->mut_once_list = new_list;
2906 scavenge_mutable_list(generation *gen)
2908 const StgInfoTable *info;
2909 StgMutClosure *p, *next;
2911 p = gen->saved_mut_list;
2915 failed_to_evac = rtsFalse;
2917 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2919 // make sure the info pointer is into text space
2920 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2921 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2925 if (info->type==RBH)
2926 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2928 switch(info->type) {
2931 // follow everything
2932 p->mut_link = gen->mut_list;
2937 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2938 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2939 (StgClosure *)*q = evacuate((StgClosure *)*q);
2944 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2945 case MUT_ARR_PTRS_FROZEN:
2950 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2951 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2952 (StgClosure *)*q = evacuate((StgClosure *)*q);
2956 if (failed_to_evac) {
2957 failed_to_evac = rtsFalse;
2958 mkMutCons((StgClosure *)p, gen);
2964 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2965 p->mut_link = gen->mut_list;
2971 StgMVar *mvar = (StgMVar *)p;
2972 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2973 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2974 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2975 p->mut_link = gen->mut_list;
2982 StgTSO *tso = (StgTSO *)p;
2986 /* Don't take this TSO off the mutable list - it might still
2987 * point to some younger objects (because we set evac_gen to 0
2990 tso->mut_link = gen->mut_list;
2991 gen->mut_list = (StgMutClosure *)tso;
2997 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2998 (StgClosure *)bh->blocking_queue =
2999 evacuate((StgClosure *)bh->blocking_queue);
3000 p->mut_link = gen->mut_list;
3005 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3008 case IND_OLDGEN_PERM:
3009 /* Try to pull the indirectee into this generation, so we can
3010 * remove the indirection from the mutable list.
3013 ((StgIndOldGen *)p)->indirectee =
3014 evacuate(((StgIndOldGen *)p)->indirectee);
3017 if (failed_to_evac) {
3018 failed_to_evac = rtsFalse;
3019 p->mut_link = gen->mut_once_list;
3020 gen->mut_once_list = p;
3027 // HWL: check whether all of these are necessary
3029 case RBH: // cf. BLACKHOLE_BQ
3031 // nat size, ptrs, nonptrs, vhs;
3033 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3034 StgRBH *rbh = (StgRBH *)p;
3035 (StgClosure *)rbh->blocking_queue =
3036 evacuate((StgClosure *)rbh->blocking_queue);
3037 if (failed_to_evac) {
3038 failed_to_evac = rtsFalse;
3039 recordMutable((StgMutClosure *)rbh);
3041 // ToDo: use size of reverted closure here!
3042 p += BLACKHOLE_sizeW();
3048 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3049 // follow the pointer to the node which is being demanded
3050 (StgClosure *)bf->node =
3051 evacuate((StgClosure *)bf->node);
3052 // follow the link to the rest of the blocking queue
3053 (StgClosure *)bf->link =
3054 evacuate((StgClosure *)bf->link);
3055 if (failed_to_evac) {
3056 failed_to_evac = rtsFalse;
3057 recordMutable((StgMutClosure *)bf);
3059 p += sizeofW(StgBlockedFetch);
3065 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3068 p += sizeofW(StgFetchMe);
3069 break; // nothing to do in this case
3071 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3073 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3074 (StgClosure *)fmbq->blocking_queue =
3075 evacuate((StgClosure *)fmbq->blocking_queue);
3076 if (failed_to_evac) {
3077 failed_to_evac = rtsFalse;
3078 recordMutable((StgMutClosure *)fmbq);
3080 p += sizeofW(StgFetchMeBlockingQueue);
3086 // shouldn't have anything else on the mutables list
3087 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3094 scavenge_static(void)
3096 StgClosure* p = static_objects;
3097 const StgInfoTable *info;
3099 /* Always evacuate straight to the oldest generation for static
3101 evac_gen = oldest_gen->no;
3103 /* keep going until we've scavenged all the objects on the linked
3105 while (p != END_OF_STATIC_LIST) {
3109 if (info->type==RBH)
3110 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3112 // make sure the info pointer is into text space
3113 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3114 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3116 /* Take this object *off* the static_objects list,
3117 * and put it on the scavenged_static_objects list.
3119 static_objects = STATIC_LINK(info,p);
3120 STATIC_LINK(info,p) = scavenged_static_objects;
3121 scavenged_static_objects = p;
3123 switch (info -> type) {
3127 StgInd *ind = (StgInd *)p;
3128 ind->indirectee = evacuate(ind->indirectee);
3130 /* might fail to evacuate it, in which case we have to pop it
3131 * back on the mutable list (and take it off the
3132 * scavenged_static list because the static link and mut link
3133 * pointers are one and the same).
3135 if (failed_to_evac) {
3136 failed_to_evac = rtsFalse;
3137 scavenged_static_objects = IND_STATIC_LINK(p);
3138 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3139 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3153 next = (P_)p->payload + info->layout.payload.ptrs;
3154 // evacuate the pointers
3155 for (q = (P_)p->payload; q < next; q++) {
3156 (StgClosure *)*q = evacuate((StgClosure *)*q);
3162 barf("scavenge_static: strange closure %d", (int)(info->type));
3165 ASSERT(failed_to_evac == rtsFalse);
3167 /* get the next static object from the list. Remember, there might
3168 * be more stuff on this list now that we've done some evacuating!
3169 * (static_objects is a global)
3175 /* -----------------------------------------------------------------------------
3176 scavenge_stack walks over a section of stack and evacuates all the
3177 objects pointed to by it. We can use the same code for walking
3178 PAPs, since these are just sections of copied stack.
3179 -------------------------------------------------------------------------- */
3182 scavenge_stack(StgPtr p, StgPtr stack_end)
3185 const StgInfoTable* info;
3188 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3191 * Each time around this loop, we are looking at a chunk of stack
3192 * that starts with either a pending argument section or an
3193 * activation record.
3196 while (p < stack_end) {
3199 // If we've got a tag, skip over that many words on the stack
3200 if (IS_ARG_TAG((W_)q)) {
3205 /* Is q a pointer to a closure?
3207 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3209 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3210 ASSERT(closure_STATIC((StgClosure *)q));
3212 // otherwise, must be a pointer into the allocation space.
3215 (StgClosure *)*p = evacuate((StgClosure *)q);
3221 * Otherwise, q must be the info pointer of an activation
3222 * record. All activation records have 'bitmap' style layout
3225 info = get_itbl((StgClosure *)p);
3227 switch (info->type) {
3229 // Dynamic bitmap: the mask is stored on the stack
3231 bitmap = ((StgRetDyn *)p)->liveness;
3232 p = (P_)&((StgRetDyn *)p)->payload[0];
3235 // probably a slow-entry point return address:
3243 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3244 old_p, p, old_p+1));
3246 p++; // what if FHS!=1 !? -- HWL
3251 /* Specialised code for update frames, since they're so common.
3252 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3253 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3257 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3259 p += sizeofW(StgUpdateFrame);
3262 frame->updatee = evacuate(frame->updatee);
3264 #else // specialised code for update frames, not sure if it's worth it.
3266 nat type = get_itbl(frame->updatee)->type;
3268 if (type == EVACUATED) {
3269 frame->updatee = evacuate(frame->updatee);
3272 bdescr *bd = Bdescr((P_)frame->updatee);
3274 if (bd->gen_no > N) {
3275 if (bd->gen_no < evac_gen) {
3276 failed_to_evac = rtsTrue;
3281 // Don't promote blackholes
3283 if (!(stp->gen_no == 0 &&
3285 stp->no == stp->gen->n_steps-1)) {
3292 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3293 sizeofW(StgHeader), stp);
3294 frame->updatee = to;
3297 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3298 frame->updatee = to;
3299 recordMutable((StgMutClosure *)to);
3302 /* will never be SE_{,CAF_}BLACKHOLE, since we
3303 don't push an update frame for single-entry thunks. KSW 1999-01. */
3304 barf("scavenge_stack: UPDATE_FRAME updatee");
3310 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3317 bitmap = info->layout.bitmap;
3319 // this assumes that the payload starts immediately after the info-ptr
3321 while (bitmap != 0) {
3322 if ((bitmap & 1) == 0) {
3323 (StgClosure *)*p = evacuate((StgClosure *)*p);
3326 bitmap = bitmap >> 1;
3333 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3338 StgLargeBitmap *large_bitmap;
3341 large_bitmap = info->layout.large_bitmap;
3344 for (i=0; i<large_bitmap->size; i++) {
3345 bitmap = large_bitmap->bitmap[i];
3346 q = p + BITS_IN(W_);
3347 while (bitmap != 0) {
3348 if ((bitmap & 1) == 0) {
3349 (StgClosure *)*p = evacuate((StgClosure *)*p);
3352 bitmap = bitmap >> 1;
3354 if (i+1 < large_bitmap->size) {
3356 (StgClosure *)*p = evacuate((StgClosure *)*p);
3362 // and don't forget to follow the SRT
3367 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3372 /*-----------------------------------------------------------------------------
3373 scavenge the large object list.
3375 evac_gen set by caller; similar games played with evac_gen as with
3376 scavenge() - see comment at the top of scavenge(). Most large
3377 objects are (repeatedly) mutable, so most of the time evac_gen will
3379 --------------------------------------------------------------------------- */
3382 scavenge_large(step *stp)
3387 bd = stp->new_large_objects;
3389 for (; bd != NULL; bd = stp->new_large_objects) {
3391 /* take this object *off* the large objects list and put it on
3392 * the scavenged large objects list. This is so that we can
3393 * treat new_large_objects as a stack and push new objects on
3394 * the front when evacuating.
3396 stp->new_large_objects = bd->link;
3397 dbl_link_onto(bd, &stp->scavenged_large_objects);
3399 // update the block count in this step.
3400 stp->n_scavenged_large_blocks += bd->blocks;
3403 if (scavenge_one(p)) {
3404 mkMutCons((StgClosure *)p, stp->gen);
3409 /* -----------------------------------------------------------------------------
3410 Initialising the static object & mutable lists
3411 -------------------------------------------------------------------------- */
3414 zero_static_object_list(StgClosure* first_static)
3418 const StgInfoTable *info;
3420 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3422 link = STATIC_LINK(info, p);
3423 STATIC_LINK(info,p) = NULL;
3427 /* This function is only needed because we share the mutable link
3428 * field with the static link field in an IND_STATIC, so we have to
3429 * zero the mut_link field before doing a major GC, which needs the
3430 * static link field.
3432 * It doesn't do any harm to zero all the mutable link fields on the
3437 zero_mutable_list( StgMutClosure *first )
3439 StgMutClosure *next, *c;
3441 for (c = first; c != END_MUT_LIST; c = next) {
3447 /* -----------------------------------------------------------------------------
3449 -------------------------------------------------------------------------- */
3456 for (c = (StgIndStatic *)caf_list; c != NULL;
3457 c = (StgIndStatic *)c->static_link)
3459 c->header.info = c->saved_info;
3460 c->saved_info = NULL;
3461 // could, but not necessary: c->static_link = NULL;
3467 scavengeCAFs( void )
3472 for (c = (StgIndStatic *)caf_list; c != NULL;
3473 c = (StgIndStatic *)c->static_link)
3475 c->indirectee = evacuate(c->indirectee);
3479 /* -----------------------------------------------------------------------------
3480 Sanity code for CAF garbage collection.
3482 With DEBUG turned on, we manage a CAF list in addition to the SRT
3483 mechanism. After GC, we run down the CAF list and blackhole any
3484 CAFs which have been garbage collected. This means we get an error
3485 whenever the program tries to enter a garbage collected CAF.
3487 Any garbage collected CAFs are taken off the CAF list at the same
3489 -------------------------------------------------------------------------- */
3491 #if 0 && defined(DEBUG)
3498 const StgInfoTable *info;
3509 ASSERT(info->type == IND_STATIC);
3511 if (STATIC_LINK(info,p) == NULL) {
3512 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3514 SET_INFO(p,&stg_BLACKHOLE_info);
3515 p = STATIC_LINK2(info,p);
3519 pp = &STATIC_LINK2(info,p);
3526 // belch("%d CAFs live", i);
3531 /* -----------------------------------------------------------------------------
3534 Whenever a thread returns to the scheduler after possibly doing
3535 some work, we have to run down the stack and black-hole all the
3536 closures referred to by update frames.
3537 -------------------------------------------------------------------------- */
3540 threadLazyBlackHole(StgTSO *tso)
3542 StgUpdateFrame *update_frame;
3543 StgBlockingQueue *bh;
3546 stack_end = &tso->stack[tso->stack_size];
3547 update_frame = tso->su;
3550 switch (get_itbl(update_frame)->type) {
3553 update_frame = ((StgCatchFrame *)update_frame)->link;
3557 bh = (StgBlockingQueue *)update_frame->updatee;
3559 /* if the thunk is already blackholed, it means we've also
3560 * already blackholed the rest of the thunks on this stack,
3561 * so we can stop early.
3563 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3564 * don't interfere with this optimisation.
3566 if (bh->header.info == &stg_BLACKHOLE_info) {
3570 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3571 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3572 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3573 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3575 SET_INFO(bh,&stg_BLACKHOLE_info);
3578 update_frame = update_frame->link;
3582 update_frame = ((StgSeqFrame *)update_frame)->link;
3588 barf("threadPaused");
3594 /* -----------------------------------------------------------------------------
3597 * Code largely pinched from old RTS, then hacked to bits. We also do
3598 * lazy black holing here.
3600 * -------------------------------------------------------------------------- */
3603 threadSqueezeStack(StgTSO *tso)
3605 lnat displacement = 0;
3606 StgUpdateFrame *frame;
3607 StgUpdateFrame *next_frame; // Temporally next
3608 StgUpdateFrame *prev_frame; // Temporally previous
3610 rtsBool prev_was_update_frame;
3612 StgUpdateFrame *top_frame;
3613 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3615 void printObj( StgClosure *obj ); // from Printer.c
3617 top_frame = tso->su;
3620 bottom = &(tso->stack[tso->stack_size]);
3623 /* There must be at least one frame, namely the STOP_FRAME.
3625 ASSERT((P_)frame < bottom);
3627 /* Walk down the stack, reversing the links between frames so that
3628 * we can walk back up as we squeeze from the bottom. Note that
3629 * next_frame and prev_frame refer to next and previous as they were
3630 * added to the stack, rather than the way we see them in this
3631 * walk. (It makes the next loop less confusing.)
3633 * Stop if we find an update frame pointing to a black hole
3634 * (see comment in threadLazyBlackHole()).
3638 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3639 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3640 prev_frame = frame->link;
3641 frame->link = next_frame;
3646 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3647 printObj((StgClosure *)prev_frame);
3648 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3651 switch (get_itbl(frame)->type) {
3654 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3667 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3669 printObj((StgClosure *)prev_frame);
3672 if (get_itbl(frame)->type == UPDATE_FRAME
3673 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3678 /* Now, we're at the bottom. Frame points to the lowest update
3679 * frame on the stack, and its link actually points to the frame
3680 * above. We have to walk back up the stack, squeezing out empty
3681 * update frames and turning the pointers back around on the way
3684 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3685 * we never want to eliminate it anyway. Just walk one step up
3686 * before starting to squeeze. When you get to the topmost frame,
3687 * remember that there are still some words above it that might have
3694 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3697 * Loop through all of the frames (everything except the very
3698 * bottom). Things are complicated by the fact that we have
3699 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3700 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3702 while (frame != NULL) {
3704 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3705 rtsBool is_update_frame;
3707 next_frame = frame->link;
3708 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3711 * 1. both the previous and current frame are update frames
3712 * 2. the current frame is empty
3714 if (prev_was_update_frame && is_update_frame &&
3715 (P_)prev_frame == frame_bottom + displacement) {
3717 // Now squeeze out the current frame
3718 StgClosure *updatee_keep = prev_frame->updatee;
3719 StgClosure *updatee_bypass = frame->updatee;
3722 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3726 /* Deal with blocking queues. If both updatees have blocked
3727 * threads, then we should merge the queues into the update
3728 * frame that we're keeping.
3730 * Alternatively, we could just wake them up: they'll just go
3731 * straight to sleep on the proper blackhole! This is less code
3732 * and probably less bug prone, although it's probably much
3735 #if 0 // do it properly...
3736 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3737 # error Unimplemented lazy BH warning. (KSW 1999-01)
3739 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3740 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3742 // Sigh. It has one. Don't lose those threads!
3743 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3744 // Urgh. Two queues. Merge them.
3745 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3747 while (keep_tso->link != END_TSO_QUEUE) {
3748 keep_tso = keep_tso->link;
3750 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3753 // For simplicity, just swap the BQ for the BH
3754 P_ temp = updatee_keep;
3756 updatee_keep = updatee_bypass;
3757 updatee_bypass = temp;
3759 // Record the swap in the kept frame (below)
3760 prev_frame->updatee = updatee_keep;
3765 TICK_UPD_SQUEEZED();
3766 /* wasn't there something about update squeezing and ticky to be
3767 * sorted out? oh yes: we aren't counting each enter properly
3768 * in this case. See the log somewhere. KSW 1999-04-21
3770 * Check two things: that the two update frames don't point to
3771 * the same object, and that the updatee_bypass isn't already an
3772 * indirection. Both of these cases only happen when we're in a
3773 * block hole-style loop (and there are multiple update frames
3774 * on the stack pointing to the same closure), but they can both
3775 * screw us up if we don't check.
3777 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3778 // this wakes the threads up
3779 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3782 sp = (P_)frame - 1; // sp = stuff to slide
3783 displacement += sizeofW(StgUpdateFrame);
3786 // No squeeze for this frame
3787 sp = frame_bottom - 1; // Keep the current frame
3789 /* Do lazy black-holing.
3791 if (is_update_frame) {
3792 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3793 if (bh->header.info != &stg_BLACKHOLE_info &&
3794 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3795 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3796 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3797 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3800 /* zero out the slop so that the sanity checker can tell
3801 * where the next closure is.
3804 StgInfoTable *info = get_itbl(bh);
3805 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3806 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3807 * info is used for a different purpose, and it's exactly the
3808 * same size as a BLACKHOLE in any case.
3810 if (info->type != THUNK_SELECTOR) {
3811 for (i = np; i < np + nw; i++) {
3812 ((StgClosure *)bh)->payload[i] = 0;
3817 SET_INFO(bh,&stg_BLACKHOLE_info);
3821 // Fix the link in the current frame (should point to the frame below)
3822 frame->link = prev_frame;
3823 prev_was_update_frame = is_update_frame;
3826 // Now slide all words from sp up to the next frame
3828 if (displacement > 0) {
3829 P_ next_frame_bottom;
3831 if (next_frame != NULL)
3832 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3834 next_frame_bottom = tso->sp - 1;
3838 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3842 while (sp >= next_frame_bottom) {
3843 sp[displacement] = *sp;
3847 (P_)prev_frame = (P_)frame + displacement;
3851 tso->sp += displacement;
3852 tso->su = prev_frame;
3855 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3856 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3861 /* -----------------------------------------------------------------------------
3864 * We have to prepare for GC - this means doing lazy black holing
3865 * here. We also take the opportunity to do stack squeezing if it's
3867 * -------------------------------------------------------------------------- */
3869 threadPaused(StgTSO *tso)
3871 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3872 threadSqueezeStack(tso); // does black holing too
3874 threadLazyBlackHole(tso);
3877 /* -----------------------------------------------------------------------------
3879 * -------------------------------------------------------------------------- */
3883 printMutOnceList(generation *gen)
3885 StgMutClosure *p, *next;
3887 p = gen->mut_once_list;
3890 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3891 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3892 fprintf(stderr, "%p (%s), ",
3893 p, info_type((StgClosure *)p));
3895 fputc('\n', stderr);
3899 printMutableList(generation *gen)
3901 StgMutClosure *p, *next;
3906 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3907 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3908 fprintf(stderr, "%p (%s), ",
3909 p, info_type((StgClosure *)p));
3911 fputc('\n', stderr);
3914 static inline rtsBool
3915 maybeLarge(StgClosure *closure)
3917 StgInfoTable *info = get_itbl(closure);
3919 /* closure types that may be found on the new_large_objects list;
3920 see scavenge_large */
3921 return (info->type == MUT_ARR_PTRS ||
3922 info->type == MUT_ARR_PTRS_FROZEN ||
3923 info->type == TSO ||
3924 info->type == ARR_WORDS);