1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.111 2001/07/30 12:54:12 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 mark_weak_ptr_list(&weak_ptr_list);
491 old_weak_ptr_list = weak_ptr_list;
492 weak_ptr_list = NULL;
493 weak_done = rtsFalse;
495 /* The all_threads list is like the weak_ptr_list.
496 * See traverse_weak_ptr_list() for the details.
498 old_all_threads = all_threads;
499 all_threads = END_TSO_QUEUE;
500 resurrected_threads = END_TSO_QUEUE;
502 /* Mark the stable pointer table.
504 markStablePtrTable(mark_root);
508 /* ToDo: To fix the caf leak, we need to make the commented out
509 * parts of this code do something sensible - as described in
512 extern void markHugsObjects(void);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 if (flag) { goto loop; }
578 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
584 // Reconstruct the Global Address tables used in GUM
585 rebuildGAtables(major_gc);
586 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
589 // Now see which stable names are still alive.
592 // Tidy the end of the to-space chains
593 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
594 for (s = 0; s < generations[g].n_steps; s++) {
595 stp = &generations[g].steps[s];
596 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
597 stp->hp_bd->free = stp->hp;
598 stp->hp_bd->link = NULL;
603 // NO MORE EVACUATION AFTER THIS POINT!
604 // Finally: compaction of the oldest generation.
605 if (major_gc && RtsFlags.GcFlags.compact) {
606 // save number of blocks for stats
607 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
611 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
613 /* Set the maximum blocks for the oldest generation, based on twice
614 * the amount of live data now, adjusted to fit the maximum heap
617 * This is an approximation, since in the worst case we'll need
618 * twice the amount of live data plus whatever space the other
621 if (major_gc && RtsFlags.GcFlags.generations > 1) {
622 nat blocks = oldest_gen->steps[0].n_blocks +
623 oldest_gen->steps[0].n_large_blocks;
625 oldest_gen->max_blocks =
626 stg_max(blocks * RtsFlags.GcFlags.oldGenFactor,
627 RtsFlags.GcFlags.minOldGenSize);
628 if (RtsFlags.GcFlags.compact) {
629 if ( oldest_gen->max_blocks >
630 RtsFlags.GcFlags.maxHeapSize *
631 (100 - RtsFlags.GcFlags.pcFreeHeap) / 100 ) {
632 oldest_gen->max_blocks =
633 RtsFlags.GcFlags.maxHeapSize *
634 (100 - RtsFlags.GcFlags.pcFreeHeap) / 100;
635 if (oldest_gen->max_blocks < blocks) {
636 belch("max_blocks: %ld, blocks: %ld, maxHeapSize: %ld",
637 oldest_gen->max_blocks, blocks, RtsFlags.GcFlags.maxHeapSize);
642 if (oldest_gen->max_blocks > RtsFlags.GcFlags.maxHeapSize / 2) {
643 oldest_gen->max_blocks = RtsFlags.GcFlags.maxHeapSize / 2;
644 if (((int)oldest_gen->max_blocks - (int)blocks) <
645 (RtsFlags.GcFlags.pcFreeHeap *
646 RtsFlags.GcFlags.maxHeapSize / 200)) {
653 /* run through all the generations/steps and tidy up
655 copied = new_blocks * BLOCK_SIZE_W;
656 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
659 generations[g].collections++; // for stats
662 for (s = 0; s < generations[g].n_steps; s++) {
664 stp = &generations[g].steps[s];
666 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
667 // stats information: how much we copied
669 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
674 // for generations we collected...
677 // rough calculation of garbage collected, for stats output
678 if (stp->is_compacted) {
679 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
681 collected += stp->n_blocks * BLOCK_SIZE_W;
684 /* free old memory and shift to-space into from-space for all
685 * the collected steps (except the allocation area). These
686 * freed blocks will probaby be quickly recycled.
688 if (!(g == 0 && s == 0)) {
689 if (stp->is_compacted) {
690 // for a compacted step, just shift the new to-space
691 // onto the front of the now-compacted existing blocks.
692 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED; // now from-space
695 // tack the new blocks on the end of the existing blocks
696 if (stp->blocks == NULL) {
697 stp->blocks = stp->to_blocks;
699 for (bd = stp->blocks; bd != NULL; bd = next) {
702 bd->link = stp->to_blocks;
706 // add the new blocks to the block tally
707 stp->n_blocks += stp->n_to_blocks;
709 freeChain(stp->blocks);
710 stp->blocks = stp->to_blocks;
711 stp->n_blocks = stp->n_to_blocks;
712 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
713 bd->flags &= ~BF_EVACUATED; // now from-space
716 stp->to_blocks = NULL;
717 stp->n_to_blocks = 0;
720 /* LARGE OBJECTS. The current live large objects are chained on
721 * scavenged_large, having been moved during garbage
722 * collection from large_objects. Any objects left on
723 * large_objects list are therefore dead, so we free them here.
725 for (bd = stp->large_objects; bd != NULL; bd = next) {
731 // update the count of blocks used by large objects
732 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
733 bd->flags &= ~BF_EVACUATED;
735 stp->large_objects = stp->scavenged_large_objects;
736 stp->n_large_blocks = stp->n_scavenged_large_blocks;
738 /* Set the maximum blocks for this generation, interpolating
739 * between the maximum size of the oldest and youngest
742 * max_blocks = oldgen_max_blocks * G
743 * ----------------------
748 generations[g].max_blocks = (oldest_gen->max_blocks * g)
749 / (RtsFlags.GcFlags.generations-1);
751 generations[g].max_blocks = oldest_gen->max_blocks;
754 // for older generations...
757 /* For older generations, we need to append the
758 * scavenged_large_object list (i.e. large objects that have been
759 * promoted during this GC) to the large_object list for that step.
761 for (bd = stp->scavenged_large_objects; bd; bd = next) {
763 bd->flags &= ~BF_EVACUATED;
764 dbl_link_onto(bd, &stp->large_objects);
767 // add the new blocks we promoted during this GC
768 stp->n_blocks += stp->n_to_blocks;
769 stp->n_large_blocks += stp->n_scavenged_large_blocks;
774 // Guess the amount of live data for stats.
777 /* Free the small objects allocated via allocate(), since this will
778 * all have been copied into G0S1 now.
780 if (small_alloc_list != NULL) {
781 freeChain(small_alloc_list);
783 small_alloc_list = NULL;
787 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
789 /* Free the mark stack.
791 if (mark_stack_bdescr != NULL) {
792 freeGroup(mark_stack_bdescr);
797 for (g = 0; g <= N; g++) {
798 for (s = 0; s < generations[g].n_steps; s++) {
799 stp = &generations[g].steps[s];
800 if (stp->is_compacted && stp->bitmap != NULL) {
801 freeGroup(stp->bitmap);
806 /* Two-space collector:
807 * Free the old to-space, and estimate the amount of live data.
809 if (RtsFlags.GcFlags.generations == 1) {
812 if (old_to_blocks != NULL) {
813 freeChain(old_to_blocks);
815 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
816 bd->flags = 0; // now from-space
819 /* For a two-space collector, we need to resize the nursery. */
821 /* set up a new nursery. Allocate a nursery size based on a
822 * function of the amount of live data (currently a factor of 2,
823 * should be configurable (ToDo)). Use the blocks from the old
824 * nursery if possible, freeing up any left over blocks.
826 * If we get near the maximum heap size, then adjust our nursery
827 * size accordingly. If the nursery is the same size as the live
828 * data (L), then we need 3L bytes. We can reduce the size of the
829 * nursery to bring the required memory down near 2L bytes.
831 * A normal 2-space collector would need 4L bytes to give the same
832 * performance we get from 3L bytes, reducing to the same
833 * performance at 2L bytes.
835 blocks = g0s0->n_to_blocks;
837 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
838 RtsFlags.GcFlags.maxHeapSize ) {
839 long adjusted_blocks; // signed on purpose
842 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
843 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
844 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
845 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
848 blocks = adjusted_blocks;
851 blocks *= RtsFlags.GcFlags.oldGenFactor;
852 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
853 blocks = RtsFlags.GcFlags.minAllocAreaSize;
856 resizeNursery(blocks);
859 /* Generational collector:
860 * If the user has given us a suggested heap size, adjust our
861 * allocation area to make best use of the memory available.
864 if (RtsFlags.GcFlags.heapSizeSuggestion) {
866 nat needed = calcNeeded(); // approx blocks needed at next GC
868 /* Guess how much will be live in generation 0 step 0 next time.
869 * A good approximation is obtained by finding the
870 * percentage of g0s0 that was live at the last minor GC.
873 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
876 /* Estimate a size for the allocation area based on the
877 * information available. We might end up going slightly under
878 * or over the suggested heap size, but we should be pretty
881 * Formula: suggested - needed
882 * ----------------------------
883 * 1 + g0s0_pcnt_kept/100
885 * where 'needed' is the amount of memory needed at the next
886 * collection for collecting all steps except g0s0.
889 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
890 (100 + (long)g0s0_pcnt_kept);
892 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
893 blocks = RtsFlags.GcFlags.minAllocAreaSize;
896 resizeNursery((nat)blocks);
900 // mark the garbage collected CAFs as dead
901 #if 0 && defined(DEBUG) // doesn't work at the moment
902 if (major_gc) { gcCAFs(); }
905 // zero the scavenged static object list
907 zero_static_object_list(scavenged_static_objects);
914 // start any pending finalizers
915 scheduleFinalizers(old_weak_ptr_list);
917 // send exceptions to any threads which were about to die
918 resurrectThreads(resurrected_threads);
920 // Update the stable pointer hash table.
921 updateStablePtrTable(major_gc);
923 // check sanity after GC
924 IF_DEBUG(sanity, checkSanity());
926 // extra GC trace info
927 IF_DEBUG(gc, statDescribeGens());
930 // symbol-table based profiling
931 /* heapCensus(to_blocks); */ /* ToDo */
934 // restore enclosing cost centre
940 // check for memory leaks if sanity checking is on
941 IF_DEBUG(sanity, memInventory());
943 #ifdef RTS_GTK_FRONTPANEL
944 if (RtsFlags.GcFlags.frontpanel) {
945 updateFrontPanelAfterGC( N, live );
949 // ok, GC over: tell the stats department what happened.
950 stat_endGC(allocated, collected, live, copied, N);
956 /* -----------------------------------------------------------------------------
959 traverse_weak_ptr_list is called possibly many times during garbage
960 collection. It returns a flag indicating whether it did any work
961 (i.e. called evacuate on any live pointers).
963 Invariant: traverse_weak_ptr_list is called when the heap is in an
964 idempotent state. That means that there are no pending
965 evacuate/scavenge operations. This invariant helps the weak
966 pointer code decide which weak pointers are dead - if there are no
967 new live weak pointers, then all the currently unreachable ones are
970 For generational GC: we just don't try to finalize weak pointers in
971 older generations than the one we're collecting. This could
972 probably be optimised by keeping per-generation lists of weak
973 pointers, but for a few weak pointers this scheme will work.
974 -------------------------------------------------------------------------- */
977 traverse_weak_ptr_list(void)
979 StgWeak *w, **last_w, *next_w;
981 rtsBool flag = rtsFalse;
983 if (weak_done) { return rtsFalse; }
985 /* doesn't matter where we evacuate values/finalizers to, since
986 * these pointers are treated as roots (iff the keys are alive).
990 last_w = &old_weak_ptr_list;
991 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
993 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
994 * called on a live weak pointer object. Just remove it.
996 if (w->header.info == &stg_DEAD_WEAK_info) {
997 next_w = ((StgDeadWeak *)w)->link;
1002 ASSERT(get_itbl(w)->type == WEAK);
1004 /* Now, check whether the key is reachable.
1006 if ((new = isAlive(w->key))) {
1008 // evacuate the value and finalizer
1009 w->value = evacuate(w->value);
1010 w->finalizer = evacuate(w->finalizer);
1011 // remove this weak ptr from the old_weak_ptr list
1013 // and put it on the new weak ptr list
1015 w->link = weak_ptr_list;
1018 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1022 last_w = &(w->link);
1028 /* Now deal with the all_threads list, which behaves somewhat like
1029 * the weak ptr list. If we discover any threads that are about to
1030 * become garbage, we wake them up and administer an exception.
1033 StgTSO *t, *tmp, *next, **prev;
1035 prev = &old_all_threads;
1036 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1038 (StgClosure *)tmp = isAlive((StgClosure *)t);
1044 ASSERT(get_itbl(t)->type == TSO);
1045 switch (t->what_next) {
1046 case ThreadRelocated:
1051 case ThreadComplete:
1052 // finshed or died. The thread might still be alive, but we
1053 // don't keep it on the all_threads list. Don't forget to
1054 // stub out its global_link field.
1055 next = t->global_link;
1056 t->global_link = END_TSO_QUEUE;
1064 // not alive (yet): leave this thread on the old_all_threads list.
1065 prev = &(t->global_link);
1066 next = t->global_link;
1070 // alive: move this thread onto the all_threads list.
1071 next = t->global_link;
1072 t->global_link = all_threads;
1080 /* If we didn't make any changes, then we can go round and kill all
1081 * the dead weak pointers. The old_weak_ptr list is used as a list
1082 * of pending finalizers later on.
1084 if (flag == rtsFalse) {
1085 for (w = old_weak_ptr_list; w; w = w->link) {
1086 w->finalizer = evacuate(w->finalizer);
1089 /* And resurrect any threads which were about to become garbage.
1092 StgTSO *t, *tmp, *next;
1093 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1094 next = t->global_link;
1095 (StgClosure *)tmp = evacuate((StgClosure *)t);
1096 tmp->global_link = resurrected_threads;
1097 resurrected_threads = tmp;
1101 weak_done = rtsTrue;
1107 /* -----------------------------------------------------------------------------
1108 After GC, the live weak pointer list may have forwarding pointers
1109 on it, because a weak pointer object was evacuated after being
1110 moved to the live weak pointer list. We remove those forwarding
1113 Also, we don't consider weak pointer objects to be reachable, but
1114 we must nevertheless consider them to be "live" and retain them.
1115 Therefore any weak pointer objects which haven't as yet been
1116 evacuated need to be evacuated now.
1117 -------------------------------------------------------------------------- */
1121 mark_weak_ptr_list ( StgWeak **list )
1123 StgWeak *w, **last_w;
1126 for (w = *list; w; w = w->link) {
1127 (StgClosure *)w = evacuate((StgClosure *)w);
1129 last_w = &(w->link);
1133 /* -----------------------------------------------------------------------------
1134 isAlive determines whether the given closure is still alive (after
1135 a garbage collection) or not. It returns the new address of the
1136 closure if it is alive, or NULL otherwise.
1138 NOTE: Use it before compaction only!
1139 -------------------------------------------------------------------------- */
1143 isAlive(StgClosure *p)
1145 const StgInfoTable *info;
1152 /* ToDo: for static closures, check the static link field.
1153 * Problem here is that we sometimes don't set the link field, eg.
1154 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1159 // ignore closures in generations that we're not collecting.
1160 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1163 // large objects have an evacuated flag
1164 if (bd->flags & BF_LARGE) {
1165 if (bd->flags & BF_EVACUATED) {
1171 // check the mark bit for compacted steps
1172 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1176 switch (info->type) {
1181 case IND_OLDGEN: // rely on compatible layout with StgInd
1182 case IND_OLDGEN_PERM:
1183 // follow indirections
1184 p = ((StgInd *)p)->indirectee;
1189 return ((StgEvacuated *)p)->evacuee;
1192 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1193 p = (StgClosure *)((StgTSO *)p)->link;
1205 mark_root(StgClosure **root)
1207 *root = evacuate(*root);
1213 bdescr *bd = allocBlock();
1214 bd->gen_no = stp->gen_no;
1217 if (stp->gen_no <= N) {
1218 bd->flags = BF_EVACUATED;
1223 stp->hp_bd->free = stp->hp;
1224 stp->hp_bd->link = bd;
1225 stp->hp = bd->start;
1226 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1233 static __inline__ void
1234 upd_evacuee(StgClosure *p, StgClosure *dest)
1236 p->header.info = &stg_EVACUATED_info;
1237 ((StgEvacuated *)p)->evacuee = dest;
1241 static __inline__ StgClosure *
1242 copy(StgClosure *src, nat size, step *stp)
1246 TICK_GC_WORDS_COPIED(size);
1247 /* Find out where we're going, using the handy "to" pointer in
1248 * the step of the source object. If it turns out we need to
1249 * evacuate to an older generation, adjust it here (see comment
1252 if (stp->gen_no < evac_gen) {
1253 #ifdef NO_EAGER_PROMOTION
1254 failed_to_evac = rtsTrue;
1256 stp = &generations[evac_gen].steps[0];
1260 /* chain a new block onto the to-space for the destination step if
1263 if (stp->hp + size >= stp->hpLim) {
1267 for(to = stp->hp, from = (P_)src; size>0; --size) {
1273 upd_evacuee(src,(StgClosure *)dest);
1274 return (StgClosure *)dest;
1277 /* Special version of copy() for when we only want to copy the info
1278 * pointer of an object, but reserve some padding after it. This is
1279 * used to optimise evacuation of BLACKHOLEs.
1283 static __inline__ StgClosure *
1284 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1288 TICK_GC_WORDS_COPIED(size_to_copy);
1289 if (stp->gen_no < evac_gen) {
1290 #ifdef NO_EAGER_PROMOTION
1291 failed_to_evac = rtsTrue;
1293 stp = &generations[evac_gen].steps[0];
1297 if (stp->hp + size_to_reserve >= stp->hpLim) {
1301 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1306 stp->hp += size_to_reserve;
1307 upd_evacuee(src,(StgClosure *)dest);
1308 return (StgClosure *)dest;
1312 /* -----------------------------------------------------------------------------
1313 Evacuate a large object
1315 This just consists of removing the object from the (doubly-linked)
1316 large_alloc_list, and linking it on to the (singly-linked)
1317 new_large_objects list, from where it will be scavenged later.
1319 Convention: bd->flags has BF_EVACUATED set for a large object
1320 that has been evacuated, or unset otherwise.
1321 -------------------------------------------------------------------------- */
1325 evacuate_large(StgPtr p)
1327 bdescr *bd = Bdescr(p);
1330 // should point to the beginning of the block
1331 ASSERT(((W_)p & BLOCK_MASK) == 0);
1333 // already evacuated?
1334 if (bd->flags & BF_EVACUATED) {
1335 /* Don't forget to set the failed_to_evac flag if we didn't get
1336 * the desired destination (see comments in evacuate()).
1338 if (bd->gen_no < evac_gen) {
1339 failed_to_evac = rtsTrue;
1340 TICK_GC_FAILED_PROMOTION();
1346 // remove from large_object list
1348 bd->u.back->link = bd->link;
1349 } else { // first object in the list
1350 stp->large_objects = bd->link;
1353 bd->link->u.back = bd->u.back;
1356 /* link it on to the evacuated large object list of the destination step
1359 if (stp->gen_no < evac_gen) {
1360 #ifdef NO_EAGER_PROMOTION
1361 failed_to_evac = rtsTrue;
1363 stp = &generations[evac_gen].steps[0];
1368 bd->gen_no = stp->gen_no;
1369 bd->link = stp->new_large_objects;
1370 stp->new_large_objects = bd;
1371 bd->flags |= BF_EVACUATED;
1374 /* -----------------------------------------------------------------------------
1375 Adding a MUT_CONS to an older generation.
1377 This is necessary from time to time when we end up with an
1378 old-to-new generation pointer in a non-mutable object. We defer
1379 the promotion until the next GC.
1380 -------------------------------------------------------------------------- */
1384 mkMutCons(StgClosure *ptr, generation *gen)
1389 stp = &gen->steps[0];
1391 /* chain a new block onto the to-space for the destination step if
1394 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1398 q = (StgMutVar *)stp->hp;
1399 stp->hp += sizeofW(StgMutVar);
1401 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1403 recordOldToNewPtrs((StgMutClosure *)q);
1405 return (StgClosure *)q;
1408 /* -----------------------------------------------------------------------------
1411 This is called (eventually) for every live object in the system.
1413 The caller to evacuate specifies a desired generation in the
1414 evac_gen global variable. The following conditions apply to
1415 evacuating an object which resides in generation M when we're
1416 collecting up to generation N
1420 else evac to step->to
1422 if M < evac_gen evac to evac_gen, step 0
1424 if the object is already evacuated, then we check which generation
1427 if M >= evac_gen do nothing
1428 if M < evac_gen set failed_to_evac flag to indicate that we
1429 didn't manage to evacuate this object into evac_gen.
1431 -------------------------------------------------------------------------- */
1434 evacuate(StgClosure *q)
1439 const StgInfoTable *info;
1442 if (HEAP_ALLOCED(q)) {
1445 if (bd->gen_no > N) {
1446 /* Can't evacuate this object, because it's in a generation
1447 * older than the ones we're collecting. Let's hope that it's
1448 * in evac_gen or older, or we will have to arrange to track
1449 * this pointer using the mutable list.
1451 if (bd->gen_no < evac_gen) {
1453 failed_to_evac = rtsTrue;
1454 TICK_GC_FAILED_PROMOTION();
1459 /* evacuate large objects by re-linking them onto a different list.
1461 if (bd->flags & BF_LARGE) {
1463 if (info->type == TSO &&
1464 ((StgTSO *)q)->what_next == ThreadRelocated) {
1465 q = (StgClosure *)((StgTSO *)q)->link;
1468 evacuate_large((P_)q);
1472 /* If the object is in a step that we're compacting, then we
1473 * need to use an alternative evacuate procedure.
1475 if (bd->step->is_compacted) {
1476 if (!is_marked((P_)q,bd)) {
1478 if (mark_stack_full()) {
1479 mark_stack_overflowed = rtsTrue;
1482 push_mark_stack((P_)q);
1490 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1493 // make sure the info pointer is into text space
1494 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1495 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1498 switch (info -> type) {
1502 to = copy(q,sizeW_fromITBL(info),stp);
1507 StgWord w = (StgWord)q->payload[0];
1508 if (q->header.info == Czh_con_info &&
1509 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1510 (StgChar)w <= MAX_CHARLIKE) {
1511 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1513 if (q->header.info == Izh_con_info &&
1514 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1515 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1517 // else, fall through ...
1523 return copy(q,sizeofW(StgHeader)+1,stp);
1525 case THUNK_1_0: // here because of MIN_UPD_SIZE
1530 #ifdef NO_PROMOTE_THUNKS
1531 if (bd->gen_no == 0 &&
1532 bd->step->no != 0 &&
1533 bd->step->no == generations[bd->gen_no].n_steps-1) {
1537 return copy(q,sizeofW(StgHeader)+2,stp);
1545 return copy(q,sizeofW(StgHeader)+2,stp);
1551 case IND_OLDGEN_PERM:
1556 return copy(q,sizeW_fromITBL(info),stp);
1559 case SE_CAF_BLACKHOLE:
1562 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1565 to = copy(q,BLACKHOLE_sizeW(),stp);
1568 case THUNK_SELECTOR:
1570 const StgInfoTable* selectee_info;
1571 StgClosure* selectee = ((StgSelector*)q)->selectee;
1574 selectee_info = get_itbl(selectee);
1575 switch (selectee_info->type) {
1584 StgWord offset = info->layout.selector_offset;
1586 // check that the size is in range
1588 (StgWord32)(selectee_info->layout.payload.ptrs +
1589 selectee_info->layout.payload.nptrs));
1591 // perform the selection!
1592 q = selectee->payload[offset];
1594 /* if we're already in to-space, there's no need to continue
1595 * with the evacuation, just update the source address with
1596 * a pointer to the (evacuated) constructor field.
1598 if (HEAP_ALLOCED(q)) {
1599 bdescr *bd = Bdescr((P_)q);
1600 if (bd->flags & BF_EVACUATED) {
1601 if (bd->gen_no < evac_gen) {
1602 failed_to_evac = rtsTrue;
1603 TICK_GC_FAILED_PROMOTION();
1609 /* otherwise, carry on and evacuate this constructor field,
1610 * (but not the constructor itself)
1619 case IND_OLDGEN_PERM:
1620 selectee = ((StgInd *)selectee)->indirectee;
1624 selectee = ((StgEvacuated *)selectee)->evacuee;
1627 case THUNK_SELECTOR:
1629 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1630 something) to go into an infinite loop when the nightly
1631 stage2 compiles PrelTup.lhs. */
1633 /* we can't recurse indefinitely in evacuate(), so set a
1634 * limit on the number of times we can go around this
1637 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1639 bd = Bdescr((P_)selectee);
1640 if (!bd->flags & BF_EVACUATED) {
1641 thunk_selector_depth++;
1642 selectee = evacuate(selectee);
1643 thunk_selector_depth--;
1647 // otherwise, fall through...
1659 case SE_CAF_BLACKHOLE:
1663 // not evaluated yet
1667 // a copy of the top-level cases below
1668 case RBH: // cf. BLACKHOLE_BQ
1670 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1671 to = copy(q,BLACKHOLE_sizeW(),stp);
1672 //ToDo: derive size etc from reverted IP
1673 //to = copy(q,size,stp);
1674 // recordMutable((StgMutClosure *)to);
1679 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1680 to = copy(q,sizeofW(StgBlockedFetch),stp);
1687 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1688 to = copy(q,sizeofW(StgFetchMe),stp);
1692 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1693 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1698 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1699 (int)(selectee_info->type));
1702 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1706 // follow chains of indirections, don't evacuate them
1707 q = ((StgInd*)q)->indirectee;
1711 if (info->srt_len > 0 && major_gc &&
1712 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1713 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1714 static_objects = (StgClosure *)q;
1719 if (info->srt_len > 0 && major_gc &&
1720 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1721 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1722 static_objects = (StgClosure *)q;
1727 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1728 * on the CAF list, so don't do anything with it here (we'll
1729 * scavenge it later).
1732 && ((StgIndStatic *)q)->saved_info == NULL
1733 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1734 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1735 static_objects = (StgClosure *)q;
1740 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1741 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1742 static_objects = (StgClosure *)q;
1746 case CONSTR_INTLIKE:
1747 case CONSTR_CHARLIKE:
1748 case CONSTR_NOCAF_STATIC:
1749 /* no need to put these on the static linked list, they don't need
1764 // shouldn't see these
1765 barf("evacuate: stack frame at %p\n", q);
1769 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1770 * of stack, tagging and all.
1772 return copy(q,pap_sizeW((StgPAP*)q),stp);
1775 /* Already evacuated, just return the forwarding address.
1776 * HOWEVER: if the requested destination generation (evac_gen) is
1777 * older than the actual generation (because the object was
1778 * already evacuated to a younger generation) then we have to
1779 * set the failed_to_evac flag to indicate that we couldn't
1780 * manage to promote the object to the desired generation.
1782 if (evac_gen > 0) { // optimisation
1783 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1784 if (Bdescr((P_)p)->gen_no < evac_gen) {
1785 failed_to_evac = rtsTrue;
1786 TICK_GC_FAILED_PROMOTION();
1789 return ((StgEvacuated*)q)->evacuee;
1792 // just copy the block
1793 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1796 case MUT_ARR_PTRS_FROZEN:
1797 // just copy the block
1798 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1802 StgTSO *tso = (StgTSO *)q;
1804 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1806 if (tso->what_next == ThreadRelocated) {
1807 q = (StgClosure *)tso->link;
1811 /* To evacuate a small TSO, we need to relocate the update frame
1815 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1816 move_TSO(tso, new_tso);
1817 return (StgClosure *)new_tso;
1822 case RBH: // cf. BLACKHOLE_BQ
1824 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1825 to = copy(q,BLACKHOLE_sizeW(),stp);
1826 //ToDo: derive size etc from reverted IP
1827 //to = copy(q,size,stp);
1829 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1830 q, info_type(q), to, info_type(to)));
1835 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1836 to = copy(q,sizeofW(StgBlockedFetch),stp);
1838 belch("@@ evacuate: %p (%s) to %p (%s)",
1839 q, info_type(q), to, info_type(to)));
1846 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1847 to = copy(q,sizeofW(StgFetchMe),stp);
1849 belch("@@ evacuate: %p (%s) to %p (%s)",
1850 q, info_type(q), to, info_type(to)));
1854 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1855 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1857 belch("@@ evacuate: %p (%s) to %p (%s)",
1858 q, info_type(q), to, info_type(to)));
1863 barf("evacuate: strange closure type %d", (int)(info->type));
1869 /* -----------------------------------------------------------------------------
1870 move_TSO is called to update the TSO structure after it has been
1871 moved from one place to another.
1872 -------------------------------------------------------------------------- */
1875 move_TSO(StgTSO *src, StgTSO *dest)
1879 // relocate the stack pointers...
1880 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1881 dest->sp = (StgPtr)dest->sp + diff;
1882 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1884 relocate_stack(dest, diff);
1887 /* -----------------------------------------------------------------------------
1888 relocate_stack is called to update the linkage between
1889 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1891 -------------------------------------------------------------------------- */
1894 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1902 while ((P_)su < dest->stack + dest->stack_size) {
1903 switch (get_itbl(su)->type) {
1905 // GCC actually manages to common up these three cases!
1908 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1913 cf = (StgCatchFrame *)su;
1914 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1919 sf = (StgSeqFrame *)su;
1920 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1929 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1940 scavenge_srt(const StgInfoTable *info)
1942 StgClosure **srt, **srt_end;
1944 /* evacuate the SRT. If srt_len is zero, then there isn't an
1945 * srt field in the info table. That's ok, because we'll
1946 * never dereference it.
1948 srt = (StgClosure **)(info->srt);
1949 srt_end = srt + info->srt_len;
1950 for (; srt < srt_end; srt++) {
1951 /* Special-case to handle references to closures hiding out in DLLs, since
1952 double indirections required to get at those. The code generator knows
1953 which is which when generating the SRT, so it stores the (indirect)
1954 reference to the DLL closure in the table by first adding one to it.
1955 We check for this here, and undo the addition before evacuating it.
1957 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1958 closure that's fixed at link-time, and no extra magic is required.
1960 #ifdef ENABLE_WIN32_DLL_SUPPORT
1961 if ( (unsigned long)(*srt) & 0x1 ) {
1962 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1972 /* -----------------------------------------------------------------------------
1974 -------------------------------------------------------------------------- */
1977 scavengeTSO (StgTSO *tso)
1979 // chase the link field for any TSOs on the same queue
1980 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1981 if ( tso->why_blocked == BlockedOnMVar
1982 || tso->why_blocked == BlockedOnBlackHole
1983 || tso->why_blocked == BlockedOnException
1985 || tso->why_blocked == BlockedOnGA
1986 || tso->why_blocked == BlockedOnGA_NoSend
1989 tso->block_info.closure = evacuate(tso->block_info.closure);
1991 if ( tso->blocked_exceptions != NULL ) {
1992 tso->blocked_exceptions =
1993 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
1995 // scavenge this thread's stack
1996 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
1999 /* -----------------------------------------------------------------------------
2000 Scavenge a given step until there are no more objects in this step
2003 evac_gen is set by the caller to be either zero (for a step in a
2004 generation < N) or G where G is the generation of the step being
2007 We sometimes temporarily change evac_gen back to zero if we're
2008 scavenging a mutable object where early promotion isn't such a good
2010 -------------------------------------------------------------------------- */
2018 nat saved_evac_gen = evac_gen;
2023 failed_to_evac = rtsFalse;
2025 /* scavenge phase - standard breadth-first scavenging of the
2029 while (bd != stp->hp_bd || p < stp->hp) {
2031 // If we're at the end of this block, move on to the next block
2032 if (bd != stp->hp_bd && p == bd->free) {
2038 info = get_itbl((StgClosure *)p);
2039 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2042 switch (info->type) {
2045 /* treat MVars specially, because we don't want to evacuate the
2046 * mut_link field in the middle of the closure.
2049 StgMVar *mvar = ((StgMVar *)p);
2051 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2052 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2053 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2054 evac_gen = saved_evac_gen;
2055 recordMutable((StgMutClosure *)mvar);
2056 failed_to_evac = rtsFalse; // mutable.
2057 p += sizeofW(StgMVar);
2065 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2066 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2067 p += sizeofW(StgHeader) + 2;
2072 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2073 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2079 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2080 p += sizeofW(StgHeader) + 1;
2085 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2091 p += sizeofW(StgHeader) + 1;
2098 p += sizeofW(StgHeader) + 2;
2105 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2106 p += sizeofW(StgHeader) + 2;
2122 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2123 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2124 (StgClosure *)*p = evacuate((StgClosure *)*p);
2126 p += info->layout.payload.nptrs;
2131 if (stp->gen_no != 0) {
2132 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2135 case IND_OLDGEN_PERM:
2136 ((StgIndOldGen *)p)->indirectee =
2137 evacuate(((StgIndOldGen *)p)->indirectee);
2138 if (failed_to_evac) {
2139 failed_to_evac = rtsFalse;
2140 recordOldToNewPtrs((StgMutClosure *)p);
2142 p += sizeofW(StgIndOldGen);
2147 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2148 evac_gen = saved_evac_gen;
2149 recordMutable((StgMutClosure *)p);
2150 failed_to_evac = rtsFalse; // mutable anyhow
2151 p += sizeofW(StgMutVar);
2156 failed_to_evac = rtsFalse; // mutable anyhow
2157 p += sizeofW(StgMutVar);
2161 case SE_CAF_BLACKHOLE:
2164 p += BLACKHOLE_sizeW();
2169 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2170 (StgClosure *)bh->blocking_queue =
2171 evacuate((StgClosure *)bh->blocking_queue);
2172 recordMutable((StgMutClosure *)bh);
2173 failed_to_evac = rtsFalse;
2174 p += BLACKHOLE_sizeW();
2178 case THUNK_SELECTOR:
2180 StgSelector *s = (StgSelector *)p;
2181 s->selectee = evacuate(s->selectee);
2182 p += THUNK_SELECTOR_sizeW();
2186 case AP_UPD: // same as PAPs
2188 /* Treat a PAP just like a section of stack, not forgetting to
2189 * evacuate the function pointer too...
2192 StgPAP* pap = (StgPAP *)p;
2194 pap->fun = evacuate(pap->fun);
2195 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2196 p += pap_sizeW(pap);
2201 // nothing to follow
2202 p += arr_words_sizeW((StgArrWords *)p);
2206 // follow everything
2210 evac_gen = 0; // repeatedly mutable
2211 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2212 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2213 (StgClosure *)*p = evacuate((StgClosure *)*p);
2215 evac_gen = saved_evac_gen;
2216 recordMutable((StgMutClosure *)q);
2217 failed_to_evac = rtsFalse; // mutable anyhow.
2221 case MUT_ARR_PTRS_FROZEN:
2222 // follow everything
2226 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2227 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2228 (StgClosure *)*p = evacuate((StgClosure *)*p);
2230 // it's tempting to recordMutable() if failed_to_evac is
2231 // false, but that breaks some assumptions (eg. every
2232 // closure on the mutable list is supposed to have the MUT
2233 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2239 StgTSO *tso = (StgTSO *)p;
2242 evac_gen = saved_evac_gen;
2243 recordMutable((StgMutClosure *)tso);
2244 failed_to_evac = rtsFalse; // mutable anyhow.
2245 p += tso_sizeW(tso);
2250 case RBH: // cf. BLACKHOLE_BQ
2253 nat size, ptrs, nonptrs, vhs;
2255 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2257 StgRBH *rbh = (StgRBH *)p;
2258 (StgClosure *)rbh->blocking_queue =
2259 evacuate((StgClosure *)rbh->blocking_queue);
2260 recordMutable((StgMutClosure *)to);
2261 failed_to_evac = rtsFalse; // mutable anyhow.
2263 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2264 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2265 // ToDo: use size of reverted closure here!
2266 p += BLACKHOLE_sizeW();
2272 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2273 // follow the pointer to the node which is being demanded
2274 (StgClosure *)bf->node =
2275 evacuate((StgClosure *)bf->node);
2276 // follow the link to the rest of the blocking queue
2277 (StgClosure *)bf->link =
2278 evacuate((StgClosure *)bf->link);
2279 if (failed_to_evac) {
2280 failed_to_evac = rtsFalse;
2281 recordMutable((StgMutClosure *)bf);
2284 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2285 bf, info_type((StgClosure *)bf),
2286 bf->node, info_type(bf->node)));
2287 p += sizeofW(StgBlockedFetch);
2295 p += sizeofW(StgFetchMe);
2296 break; // nothing to do in this case
2298 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2300 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2301 (StgClosure *)fmbq->blocking_queue =
2302 evacuate((StgClosure *)fmbq->blocking_queue);
2303 if (failed_to_evac) {
2304 failed_to_evac = rtsFalse;
2305 recordMutable((StgMutClosure *)fmbq);
2308 belch("@@ scavenge: %p (%s) exciting, isn't it",
2309 p, info_type((StgClosure *)p)));
2310 p += sizeofW(StgFetchMeBlockingQueue);
2316 barf("scavenge: unimplemented/strange closure type %d @ %p",
2320 /* If we didn't manage to promote all the objects pointed to by
2321 * the current object, then we have to designate this object as
2322 * mutable (because it contains old-to-new generation pointers).
2324 if (failed_to_evac) {
2325 failed_to_evac = rtsFalse;
2326 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2334 /* -----------------------------------------------------------------------------
2335 Scavenge everything on the mark stack.
2337 This is slightly different from scavenge():
2338 - we don't walk linearly through the objects, so the scavenger
2339 doesn't need to advance the pointer on to the next object.
2340 -------------------------------------------------------------------------- */
2343 scavenge_mark_stack(void)
2349 evac_gen = oldest_gen->no;
2350 saved_evac_gen = evac_gen;
2353 while (!mark_stack_empty()) {
2354 p = pop_mark_stack();
2356 info = get_itbl((StgClosure *)p);
2357 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2360 switch (info->type) {
2363 /* treat MVars specially, because we don't want to evacuate the
2364 * mut_link field in the middle of the closure.
2367 StgMVar *mvar = ((StgMVar *)p);
2369 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2370 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2371 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2372 evac_gen = saved_evac_gen;
2373 failed_to_evac = rtsFalse; // mutable.
2381 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2382 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2392 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2417 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2418 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2419 (StgClosure *)*p = evacuate((StgClosure *)*p);
2425 // don't need to do anything here: the only possible case
2426 // is that we're in a 1-space compacting collector, with
2427 // no "old" generation.
2431 case IND_OLDGEN_PERM:
2432 ((StgIndOldGen *)p)->indirectee =
2433 evacuate(((StgIndOldGen *)p)->indirectee);
2434 if (failed_to_evac) {
2435 recordOldToNewPtrs((StgMutClosure *)p);
2437 failed_to_evac = rtsFalse;
2442 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2443 evac_gen = saved_evac_gen;
2444 failed_to_evac = rtsFalse;
2449 failed_to_evac = rtsFalse;
2453 case SE_CAF_BLACKHOLE:
2461 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2462 (StgClosure *)bh->blocking_queue =
2463 evacuate((StgClosure *)bh->blocking_queue);
2464 failed_to_evac = rtsFalse;
2468 case THUNK_SELECTOR:
2470 StgSelector *s = (StgSelector *)p;
2471 s->selectee = evacuate(s->selectee);
2475 case AP_UPD: // same as PAPs
2477 /* Treat a PAP just like a section of stack, not forgetting to
2478 * evacuate the function pointer too...
2481 StgPAP* pap = (StgPAP *)p;
2483 pap->fun = evacuate(pap->fun);
2484 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2489 // follow everything
2493 evac_gen = 0; // repeatedly mutable
2494 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2495 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2496 (StgClosure *)*p = evacuate((StgClosure *)*p);
2498 evac_gen = saved_evac_gen;
2499 failed_to_evac = rtsFalse; // mutable anyhow.
2503 case MUT_ARR_PTRS_FROZEN:
2504 // follow everything
2508 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2509 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2510 (StgClosure *)*p = evacuate((StgClosure *)*p);
2517 StgTSO *tso = (StgTSO *)p;
2520 evac_gen = saved_evac_gen;
2521 failed_to_evac = rtsFalse;
2526 case RBH: // cf. BLACKHOLE_BQ
2529 nat size, ptrs, nonptrs, vhs;
2531 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2533 StgRBH *rbh = (StgRBH *)p;
2534 (StgClosure *)rbh->blocking_queue =
2535 evacuate((StgClosure *)rbh->blocking_queue);
2536 recordMutable((StgMutClosure *)rbh);
2537 failed_to_evac = rtsFalse; // mutable anyhow.
2539 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2540 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2546 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2547 // follow the pointer to the node which is being demanded
2548 (StgClosure *)bf->node =
2549 evacuate((StgClosure *)bf->node);
2550 // follow the link to the rest of the blocking queue
2551 (StgClosure *)bf->link =
2552 evacuate((StgClosure *)bf->link);
2553 if (failed_to_evac) {
2554 failed_to_evac = rtsFalse;
2555 recordMutable((StgMutClosure *)bf);
2558 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2559 bf, info_type((StgClosure *)bf),
2560 bf->node, info_type(bf->node)));
2568 break; // nothing to do in this case
2570 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2572 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2573 (StgClosure *)fmbq->blocking_queue =
2574 evacuate((StgClosure *)fmbq->blocking_queue);
2575 if (failed_to_evac) {
2576 failed_to_evac = rtsFalse;
2577 recordMutable((StgMutClosure *)fmbq);
2580 belch("@@ scavenge: %p (%s) exciting, isn't it",
2581 p, info_type((StgClosure *)p)));
2587 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2591 if (failed_to_evac) {
2592 failed_to_evac = rtsFalse;
2593 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2596 // mark the next bit to indicate "scavenged"
2597 mark(q+1, Bdescr(q));
2599 } // while (!mark_stack_empty())
2601 // start a new linear scan if the mark stack overflowed at some point
2602 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2603 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2604 mark_stack_overflowed = rtsFalse;
2605 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2606 oldgen_scan = oldgen_scan_bd->start;
2609 if (oldgen_scan_bd) {
2610 // push a new thing on the mark stack
2612 // find a closure that is marked but not scavenged, and start
2614 while (oldgen_scan < oldgen_scan_bd->free
2615 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2619 if (oldgen_scan < oldgen_scan_bd->free) {
2621 // already scavenged?
2622 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2623 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2626 push_mark_stack(oldgen_scan);
2627 // ToDo: bump the linear scan by the actual size of the object
2628 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2632 oldgen_scan_bd = oldgen_scan_bd->link;
2633 if (oldgen_scan_bd != NULL) {
2634 oldgen_scan = oldgen_scan_bd->start;
2640 /* -----------------------------------------------------------------------------
2641 Scavenge one object.
2643 This is used for objects that are temporarily marked as mutable
2644 because they contain old-to-new generation pointers. Only certain
2645 objects can have this property.
2646 -------------------------------------------------------------------------- */
2649 scavenge_one(StgPtr p)
2651 const StgInfoTable *info;
2652 nat saved_evac_gen = evac_gen;
2655 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2656 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2658 info = get_itbl((StgClosure *)p);
2660 switch (info->type) {
2663 case FUN_1_0: // hardly worth specialising these guys
2683 case IND_OLDGEN_PERM:
2687 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2688 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2689 (StgClosure *)*q = evacuate((StgClosure *)*q);
2695 case SE_CAF_BLACKHOLE:
2700 case THUNK_SELECTOR:
2702 StgSelector *s = (StgSelector *)p;
2703 s->selectee = evacuate(s->selectee);
2708 // nothing to follow
2713 // follow everything
2716 evac_gen = 0; // repeatedly mutable
2717 recordMutable((StgMutClosure *)p);
2718 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2719 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2720 (StgClosure *)*p = evacuate((StgClosure *)*p);
2722 evac_gen = saved_evac_gen;
2723 failed_to_evac = rtsFalse;
2727 case MUT_ARR_PTRS_FROZEN:
2729 // follow everything
2732 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2733 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2734 (StgClosure *)*p = evacuate((StgClosure *)*p);
2741 StgTSO *tso = (StgTSO *)p;
2743 evac_gen = 0; // repeatedly mutable
2745 recordMutable((StgMutClosure *)tso);
2746 evac_gen = saved_evac_gen;
2747 failed_to_evac = rtsFalse;
2754 StgPAP* pap = (StgPAP *)p;
2755 pap->fun = evacuate(pap->fun);
2756 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2761 // This might happen if for instance a MUT_CONS was pointing to a
2762 // THUNK which has since been updated. The IND_OLDGEN will
2763 // be on the mutable list anyway, so we don't need to do anything
2768 barf("scavenge_one: strange object %d", (int)(info->type));
2771 no_luck = failed_to_evac;
2772 failed_to_evac = rtsFalse;
2776 /* -----------------------------------------------------------------------------
2777 Scavenging mutable lists.
2779 We treat the mutable list of each generation > N (i.e. all the
2780 generations older than the one being collected) as roots. We also
2781 remove non-mutable objects from the mutable list at this point.
2782 -------------------------------------------------------------------------- */
2785 scavenge_mut_once_list(generation *gen)
2787 const StgInfoTable *info;
2788 StgMutClosure *p, *next, *new_list;
2790 p = gen->mut_once_list;
2791 new_list = END_MUT_LIST;
2795 failed_to_evac = rtsFalse;
2797 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2799 // make sure the info pointer is into text space
2800 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2801 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2805 if (info->type==RBH)
2806 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2808 switch(info->type) {
2811 case IND_OLDGEN_PERM:
2813 /* Try to pull the indirectee into this generation, so we can
2814 * remove the indirection from the mutable list.
2816 ((StgIndOldGen *)p)->indirectee =
2817 evacuate(((StgIndOldGen *)p)->indirectee);
2819 #if 0 && defined(DEBUG)
2820 if (RtsFlags.DebugFlags.gc)
2821 /* Debugging code to print out the size of the thing we just
2825 StgPtr start = gen->steps[0].scan;
2826 bdescr *start_bd = gen->steps[0].scan_bd;
2828 scavenge(&gen->steps[0]);
2829 if (start_bd != gen->steps[0].scan_bd) {
2830 size += (P_)BLOCK_ROUND_UP(start) - start;
2831 start_bd = start_bd->link;
2832 while (start_bd != gen->steps[0].scan_bd) {
2833 size += BLOCK_SIZE_W;
2834 start_bd = start_bd->link;
2836 size += gen->steps[0].scan -
2837 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2839 size = gen->steps[0].scan - start;
2841 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2845 /* failed_to_evac might happen if we've got more than two
2846 * generations, we're collecting only generation 0, the
2847 * indirection resides in generation 2 and the indirectee is
2850 if (failed_to_evac) {
2851 failed_to_evac = rtsFalse;
2852 p->mut_link = new_list;
2855 /* the mut_link field of an IND_STATIC is overloaded as the
2856 * static link field too (it just so happens that we don't need
2857 * both at the same time), so we need to NULL it out when
2858 * removing this object from the mutable list because the static
2859 * link fields are all assumed to be NULL before doing a major
2867 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2868 * it from the mutable list if possible by promoting whatever it
2871 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2872 /* didn't manage to promote everything, so put the
2873 * MUT_CONS back on the list.
2875 p->mut_link = new_list;
2881 // shouldn't have anything else on the mutables list
2882 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2886 gen->mut_once_list = new_list;
2891 scavenge_mutable_list(generation *gen)
2893 const StgInfoTable *info;
2894 StgMutClosure *p, *next;
2896 p = gen->saved_mut_list;
2900 failed_to_evac = rtsFalse;
2902 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2904 // make sure the info pointer is into text space
2905 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2906 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2910 if (info->type==RBH)
2911 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2913 switch(info->type) {
2916 // follow everything
2917 p->mut_link = gen->mut_list;
2922 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2923 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2924 (StgClosure *)*q = evacuate((StgClosure *)*q);
2929 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2930 case MUT_ARR_PTRS_FROZEN:
2935 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2936 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2937 (StgClosure *)*q = evacuate((StgClosure *)*q);
2941 if (failed_to_evac) {
2942 failed_to_evac = rtsFalse;
2943 mkMutCons((StgClosure *)p, gen);
2949 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2950 p->mut_link = gen->mut_list;
2956 StgMVar *mvar = (StgMVar *)p;
2957 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2958 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2959 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2960 p->mut_link = gen->mut_list;
2967 StgTSO *tso = (StgTSO *)p;
2971 /* Don't take this TSO off the mutable list - it might still
2972 * point to some younger objects (because we set evac_gen to 0
2975 tso->mut_link = gen->mut_list;
2976 gen->mut_list = (StgMutClosure *)tso;
2982 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2983 (StgClosure *)bh->blocking_queue =
2984 evacuate((StgClosure *)bh->blocking_queue);
2985 p->mut_link = gen->mut_list;
2990 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
2993 case IND_OLDGEN_PERM:
2994 /* Try to pull the indirectee into this generation, so we can
2995 * remove the indirection from the mutable list.
2998 ((StgIndOldGen *)p)->indirectee =
2999 evacuate(((StgIndOldGen *)p)->indirectee);
3002 if (failed_to_evac) {
3003 failed_to_evac = rtsFalse;
3004 p->mut_link = gen->mut_once_list;
3005 gen->mut_once_list = p;
3012 // HWL: check whether all of these are necessary
3014 case RBH: // cf. BLACKHOLE_BQ
3016 // nat size, ptrs, nonptrs, vhs;
3018 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3019 StgRBH *rbh = (StgRBH *)p;
3020 (StgClosure *)rbh->blocking_queue =
3021 evacuate((StgClosure *)rbh->blocking_queue);
3022 if (failed_to_evac) {
3023 failed_to_evac = rtsFalse;
3024 recordMutable((StgMutClosure *)rbh);
3026 // ToDo: use size of reverted closure here!
3027 p += BLACKHOLE_sizeW();
3033 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3034 // follow the pointer to the node which is being demanded
3035 (StgClosure *)bf->node =
3036 evacuate((StgClosure *)bf->node);
3037 // follow the link to the rest of the blocking queue
3038 (StgClosure *)bf->link =
3039 evacuate((StgClosure *)bf->link);
3040 if (failed_to_evac) {
3041 failed_to_evac = rtsFalse;
3042 recordMutable((StgMutClosure *)bf);
3044 p += sizeofW(StgBlockedFetch);
3050 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3053 p += sizeofW(StgFetchMe);
3054 break; // nothing to do in this case
3056 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3058 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3059 (StgClosure *)fmbq->blocking_queue =
3060 evacuate((StgClosure *)fmbq->blocking_queue);
3061 if (failed_to_evac) {
3062 failed_to_evac = rtsFalse;
3063 recordMutable((StgMutClosure *)fmbq);
3065 p += sizeofW(StgFetchMeBlockingQueue);
3071 // shouldn't have anything else on the mutables list
3072 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3079 scavenge_static(void)
3081 StgClosure* p = static_objects;
3082 const StgInfoTable *info;
3084 /* Always evacuate straight to the oldest generation for static
3086 evac_gen = oldest_gen->no;
3088 /* keep going until we've scavenged all the objects on the linked
3090 while (p != END_OF_STATIC_LIST) {
3094 if (info->type==RBH)
3095 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3097 // make sure the info pointer is into text space
3098 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3099 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3101 /* Take this object *off* the static_objects list,
3102 * and put it on the scavenged_static_objects list.
3104 static_objects = STATIC_LINK(info,p);
3105 STATIC_LINK(info,p) = scavenged_static_objects;
3106 scavenged_static_objects = p;
3108 switch (info -> type) {
3112 StgInd *ind = (StgInd *)p;
3113 ind->indirectee = evacuate(ind->indirectee);
3115 /* might fail to evacuate it, in which case we have to pop it
3116 * back on the mutable list (and take it off the
3117 * scavenged_static list because the static link and mut link
3118 * pointers are one and the same).
3120 if (failed_to_evac) {
3121 failed_to_evac = rtsFalse;
3122 scavenged_static_objects = IND_STATIC_LINK(p);
3123 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3124 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3138 next = (P_)p->payload + info->layout.payload.ptrs;
3139 // evacuate the pointers
3140 for (q = (P_)p->payload; q < next; q++) {
3141 (StgClosure *)*q = evacuate((StgClosure *)*q);
3147 barf("scavenge_static: strange closure %d", (int)(info->type));
3150 ASSERT(failed_to_evac == rtsFalse);
3152 /* get the next static object from the list. Remember, there might
3153 * be more stuff on this list now that we've done some evacuating!
3154 * (static_objects is a global)
3160 /* -----------------------------------------------------------------------------
3161 scavenge_stack walks over a section of stack and evacuates all the
3162 objects pointed to by it. We can use the same code for walking
3163 PAPs, since these are just sections of copied stack.
3164 -------------------------------------------------------------------------- */
3167 scavenge_stack(StgPtr p, StgPtr stack_end)
3170 const StgInfoTable* info;
3173 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3176 * Each time around this loop, we are looking at a chunk of stack
3177 * that starts with either a pending argument section or an
3178 * activation record.
3181 while (p < stack_end) {
3184 // If we've got a tag, skip over that many words on the stack
3185 if (IS_ARG_TAG((W_)q)) {
3190 /* Is q a pointer to a closure?
3192 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3194 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3195 ASSERT(closure_STATIC((StgClosure *)q));
3197 // otherwise, must be a pointer into the allocation space.
3200 (StgClosure *)*p = evacuate((StgClosure *)q);
3206 * Otherwise, q must be the info pointer of an activation
3207 * record. All activation records have 'bitmap' style layout
3210 info = get_itbl((StgClosure *)p);
3212 switch (info->type) {
3214 // Dynamic bitmap: the mask is stored on the stack
3216 bitmap = ((StgRetDyn *)p)->liveness;
3217 p = (P_)&((StgRetDyn *)p)->payload[0];
3220 // probably a slow-entry point return address:
3228 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3229 old_p, p, old_p+1));
3231 p++; // what if FHS!=1 !? -- HWL
3236 /* Specialised code for update frames, since they're so common.
3237 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3238 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3242 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3244 p += sizeofW(StgUpdateFrame);
3247 frame->updatee = evacuate(frame->updatee);
3249 #else // specialised code for update frames, not sure if it's worth it.
3251 nat type = get_itbl(frame->updatee)->type;
3253 if (type == EVACUATED) {
3254 frame->updatee = evacuate(frame->updatee);
3257 bdescr *bd = Bdescr((P_)frame->updatee);
3259 if (bd->gen_no > N) {
3260 if (bd->gen_no < evac_gen) {
3261 failed_to_evac = rtsTrue;
3266 // Don't promote blackholes
3268 if (!(stp->gen_no == 0 &&
3270 stp->no == stp->gen->n_steps-1)) {
3277 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3278 sizeofW(StgHeader), stp);
3279 frame->updatee = to;
3282 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3283 frame->updatee = to;
3284 recordMutable((StgMutClosure *)to);
3287 /* will never be SE_{,CAF_}BLACKHOLE, since we
3288 don't push an update frame for single-entry thunks. KSW 1999-01. */
3289 barf("scavenge_stack: UPDATE_FRAME updatee");
3295 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3302 bitmap = info->layout.bitmap;
3304 // this assumes that the payload starts immediately after the info-ptr
3306 while (bitmap != 0) {
3307 if ((bitmap & 1) == 0) {
3308 (StgClosure *)*p = evacuate((StgClosure *)*p);
3311 bitmap = bitmap >> 1;
3318 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3323 StgLargeBitmap *large_bitmap;
3326 large_bitmap = info->layout.large_bitmap;
3329 for (i=0; i<large_bitmap->size; i++) {
3330 bitmap = large_bitmap->bitmap[i];
3331 q = p + BITS_IN(W_);
3332 while (bitmap != 0) {
3333 if ((bitmap & 1) == 0) {
3334 (StgClosure *)*p = evacuate((StgClosure *)*p);
3337 bitmap = bitmap >> 1;
3339 if (i+1 < large_bitmap->size) {
3341 (StgClosure *)*p = evacuate((StgClosure *)*p);
3347 // and don't forget to follow the SRT
3352 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3357 /*-----------------------------------------------------------------------------
3358 scavenge the large object list.
3360 evac_gen set by caller; similar games played with evac_gen as with
3361 scavenge() - see comment at the top of scavenge(). Most large
3362 objects are (repeatedly) mutable, so most of the time evac_gen will
3364 --------------------------------------------------------------------------- */
3367 scavenge_large(step *stp)
3372 bd = stp->new_large_objects;
3374 for (; bd != NULL; bd = stp->new_large_objects) {
3376 /* take this object *off* the large objects list and put it on
3377 * the scavenged large objects list. This is so that we can
3378 * treat new_large_objects as a stack and push new objects on
3379 * the front when evacuating.
3381 stp->new_large_objects = bd->link;
3382 dbl_link_onto(bd, &stp->scavenged_large_objects);
3384 // update the block count in this step.
3385 stp->n_scavenged_large_blocks += bd->blocks;
3388 if (scavenge_one(p)) {
3389 mkMutCons((StgClosure *)p, stp->gen);
3394 /* -----------------------------------------------------------------------------
3395 Initialising the static object & mutable lists
3396 -------------------------------------------------------------------------- */
3399 zero_static_object_list(StgClosure* first_static)
3403 const StgInfoTable *info;
3405 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3407 link = STATIC_LINK(info, p);
3408 STATIC_LINK(info,p) = NULL;
3412 /* This function is only needed because we share the mutable link
3413 * field with the static link field in an IND_STATIC, so we have to
3414 * zero the mut_link field before doing a major GC, which needs the
3415 * static link field.
3417 * It doesn't do any harm to zero all the mutable link fields on the
3422 zero_mutable_list( StgMutClosure *first )
3424 StgMutClosure *next, *c;
3426 for (c = first; c != END_MUT_LIST; c = next) {
3432 /* -----------------------------------------------------------------------------
3434 -------------------------------------------------------------------------- */
3441 for (c = (StgIndStatic *)caf_list; c != NULL;
3442 c = (StgIndStatic *)c->static_link)
3444 c->header.info = c->saved_info;
3445 c->saved_info = NULL;
3446 // could, but not necessary: c->static_link = NULL;
3452 scavengeCAFs( void )
3457 for (c = (StgIndStatic *)caf_list; c != NULL;
3458 c = (StgIndStatic *)c->static_link)
3460 c->indirectee = evacuate(c->indirectee);
3464 /* -----------------------------------------------------------------------------
3465 Sanity code for CAF garbage collection.
3467 With DEBUG turned on, we manage a CAF list in addition to the SRT
3468 mechanism. After GC, we run down the CAF list and blackhole any
3469 CAFs which have been garbage collected. This means we get an error
3470 whenever the program tries to enter a garbage collected CAF.
3472 Any garbage collected CAFs are taken off the CAF list at the same
3474 -------------------------------------------------------------------------- */
3476 #if 0 && defined(DEBUG)
3483 const StgInfoTable *info;
3494 ASSERT(info->type == IND_STATIC);
3496 if (STATIC_LINK(info,p) == NULL) {
3497 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3499 SET_INFO(p,&stg_BLACKHOLE_info);
3500 p = STATIC_LINK2(info,p);
3504 pp = &STATIC_LINK2(info,p);
3511 // belch("%d CAFs live", i);
3516 /* -----------------------------------------------------------------------------
3519 Whenever a thread returns to the scheduler after possibly doing
3520 some work, we have to run down the stack and black-hole all the
3521 closures referred to by update frames.
3522 -------------------------------------------------------------------------- */
3525 threadLazyBlackHole(StgTSO *tso)
3527 StgUpdateFrame *update_frame;
3528 StgBlockingQueue *bh;
3531 stack_end = &tso->stack[tso->stack_size];
3532 update_frame = tso->su;
3535 switch (get_itbl(update_frame)->type) {
3538 update_frame = ((StgCatchFrame *)update_frame)->link;
3542 bh = (StgBlockingQueue *)update_frame->updatee;
3544 /* if the thunk is already blackholed, it means we've also
3545 * already blackholed the rest of the thunks on this stack,
3546 * so we can stop early.
3548 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3549 * don't interfere with this optimisation.
3551 if (bh->header.info == &stg_BLACKHOLE_info) {
3555 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3556 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3557 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3558 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3560 SET_INFO(bh,&stg_BLACKHOLE_info);
3563 update_frame = update_frame->link;
3567 update_frame = ((StgSeqFrame *)update_frame)->link;
3573 barf("threadPaused");
3579 /* -----------------------------------------------------------------------------
3582 * Code largely pinched from old RTS, then hacked to bits. We also do
3583 * lazy black holing here.
3585 * -------------------------------------------------------------------------- */
3588 threadSqueezeStack(StgTSO *tso)
3590 lnat displacement = 0;
3591 StgUpdateFrame *frame;
3592 StgUpdateFrame *next_frame; // Temporally next
3593 StgUpdateFrame *prev_frame; // Temporally previous
3595 rtsBool prev_was_update_frame;
3597 StgUpdateFrame *top_frame;
3598 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3600 void printObj( StgClosure *obj ); // from Printer.c
3602 top_frame = tso->su;
3605 bottom = &(tso->stack[tso->stack_size]);
3608 /* There must be at least one frame, namely the STOP_FRAME.
3610 ASSERT((P_)frame < bottom);
3612 /* Walk down the stack, reversing the links between frames so that
3613 * we can walk back up as we squeeze from the bottom. Note that
3614 * next_frame and prev_frame refer to next and previous as they were
3615 * added to the stack, rather than the way we see them in this
3616 * walk. (It makes the next loop less confusing.)
3618 * Stop if we find an update frame pointing to a black hole
3619 * (see comment in threadLazyBlackHole()).
3623 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3624 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3625 prev_frame = frame->link;
3626 frame->link = next_frame;
3631 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3632 printObj((StgClosure *)prev_frame);
3633 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3636 switch (get_itbl(frame)->type) {
3639 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3652 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3654 printObj((StgClosure *)prev_frame);
3657 if (get_itbl(frame)->type == UPDATE_FRAME
3658 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3663 /* Now, we're at the bottom. Frame points to the lowest update
3664 * frame on the stack, and its link actually points to the frame
3665 * above. We have to walk back up the stack, squeezing out empty
3666 * update frames and turning the pointers back around on the way
3669 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3670 * we never want to eliminate it anyway. Just walk one step up
3671 * before starting to squeeze. When you get to the topmost frame,
3672 * remember that there are still some words above it that might have
3679 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3682 * Loop through all of the frames (everything except the very
3683 * bottom). Things are complicated by the fact that we have
3684 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3685 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3687 while (frame != NULL) {
3689 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3690 rtsBool is_update_frame;
3692 next_frame = frame->link;
3693 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3696 * 1. both the previous and current frame are update frames
3697 * 2. the current frame is empty
3699 if (prev_was_update_frame && is_update_frame &&
3700 (P_)prev_frame == frame_bottom + displacement) {
3702 // Now squeeze out the current frame
3703 StgClosure *updatee_keep = prev_frame->updatee;
3704 StgClosure *updatee_bypass = frame->updatee;
3707 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3711 /* Deal with blocking queues. If both updatees have blocked
3712 * threads, then we should merge the queues into the update
3713 * frame that we're keeping.
3715 * Alternatively, we could just wake them up: they'll just go
3716 * straight to sleep on the proper blackhole! This is less code
3717 * and probably less bug prone, although it's probably much
3720 #if 0 // do it properly...
3721 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3722 # error Unimplemented lazy BH warning. (KSW 1999-01)
3724 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3725 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3727 // Sigh. It has one. Don't lose those threads!
3728 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3729 // Urgh. Two queues. Merge them.
3730 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3732 while (keep_tso->link != END_TSO_QUEUE) {
3733 keep_tso = keep_tso->link;
3735 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3738 // For simplicity, just swap the BQ for the BH
3739 P_ temp = updatee_keep;
3741 updatee_keep = updatee_bypass;
3742 updatee_bypass = temp;
3744 // Record the swap in the kept frame (below)
3745 prev_frame->updatee = updatee_keep;
3750 TICK_UPD_SQUEEZED();
3751 /* wasn't there something about update squeezing and ticky to be
3752 * sorted out? oh yes: we aren't counting each enter properly
3753 * in this case. See the log somewhere. KSW 1999-04-21
3755 * Check two things: that the two update frames don't point to
3756 * the same object, and that the updatee_bypass isn't already an
3757 * indirection. Both of these cases only happen when we're in a
3758 * block hole-style loop (and there are multiple update frames
3759 * on the stack pointing to the same closure), but they can both
3760 * screw us up if we don't check.
3762 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3763 // this wakes the threads up
3764 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3767 sp = (P_)frame - 1; // sp = stuff to slide
3768 displacement += sizeofW(StgUpdateFrame);
3771 // No squeeze for this frame
3772 sp = frame_bottom - 1; // Keep the current frame
3774 /* Do lazy black-holing.
3776 if (is_update_frame) {
3777 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3778 if (bh->header.info != &stg_BLACKHOLE_info &&
3779 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3780 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3781 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3782 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3785 /* zero out the slop so that the sanity checker can tell
3786 * where the next closure is.
3789 StgInfoTable *info = get_itbl(bh);
3790 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3791 /* don't zero out slop for a THUNK_SELECTOR, because it's layout
3792 * info is used for a different purpose, and it's exactly the
3793 * same size as a BLACKHOLE in any case.
3795 if (info->type != THUNK_SELECTOR) {
3796 for (i = np; i < np + nw; i++) {
3797 ((StgClosure *)bh)->payload[i] = 0;
3802 SET_INFO(bh,&stg_BLACKHOLE_info);
3806 // Fix the link in the current frame (should point to the frame below)
3807 frame->link = prev_frame;
3808 prev_was_update_frame = is_update_frame;
3811 // Now slide all words from sp up to the next frame
3813 if (displacement > 0) {
3814 P_ next_frame_bottom;
3816 if (next_frame != NULL)
3817 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3819 next_frame_bottom = tso->sp - 1;
3823 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3827 while (sp >= next_frame_bottom) {
3828 sp[displacement] = *sp;
3832 (P_)prev_frame = (P_)frame + displacement;
3836 tso->sp += displacement;
3837 tso->su = prev_frame;
3840 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3841 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3846 /* -----------------------------------------------------------------------------
3849 * We have to prepare for GC - this means doing lazy black holing
3850 * here. We also take the opportunity to do stack squeezing if it's
3852 * -------------------------------------------------------------------------- */
3854 threadPaused(StgTSO *tso)
3856 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3857 threadSqueezeStack(tso); // does black holing too
3859 threadLazyBlackHole(tso);
3862 /* -----------------------------------------------------------------------------
3864 * -------------------------------------------------------------------------- */
3868 printMutOnceList(generation *gen)
3870 StgMutClosure *p, *next;
3872 p = gen->mut_once_list;
3875 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3876 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3877 fprintf(stderr, "%p (%s), ",
3878 p, info_type((StgClosure *)p));
3880 fputc('\n', stderr);
3884 printMutableList(generation *gen)
3886 StgMutClosure *p, *next;
3891 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3892 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3893 fprintf(stderr, "%p (%s), ",
3894 p, info_type((StgClosure *)p));
3896 fputc('\n', stderr);
3899 static inline rtsBool
3900 maybeLarge(StgClosure *closure)
3902 StgInfoTable *info = get_itbl(closure);
3904 /* closure types that may be found on the new_large_objects list;
3905 see scavenge_large */
3906 return (info->type == MUT_ARR_PTRS ||
3907 info->type == MUT_ARR_PTRS_FROZEN ||
3908 info->type == TSO ||
3909 info->type == ARR_WORDS);