1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.121 2001/08/17 15:46:54 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
145 static void scavengeCAFs ( void );
147 #if 0 && defined(DEBUG)
148 static void gcCAFs ( void );
151 /* -----------------------------------------------------------------------------
152 inline functions etc. for dealing with the mark bitmap & stack.
153 -------------------------------------------------------------------------- */
155 #define MARK_STACK_BLOCKS 4
157 static bdescr *mark_stack_bdescr;
158 static StgPtr *mark_stack;
159 static StgPtr *mark_sp;
160 static StgPtr *mark_splim;
162 // Flag and pointers used for falling back to a linear scan when the
163 // mark stack overflows.
164 static rtsBool mark_stack_overflowed;
165 static bdescr *oldgen_scan_bd;
166 static StgPtr oldgen_scan;
168 static inline rtsBool
169 mark_stack_empty(void)
171 return mark_sp == mark_stack;
174 static inline rtsBool
175 mark_stack_full(void)
177 return mark_sp >= mark_splim;
181 reset_mark_stack(void)
183 mark_sp = mark_stack;
187 push_mark_stack(StgPtr p)
198 /* -----------------------------------------------------------------------------
201 For garbage collecting generation N (and all younger generations):
203 - follow all pointers in the root set. the root set includes all
204 mutable objects in all steps in all generations.
206 - for each pointer, evacuate the object it points to into either
207 + to-space in the next higher step in that generation, if one exists,
208 + if the object's generation == N, then evacuate it to the next
209 generation if one exists, or else to-space in the current
211 + if the object's generation < N, then evacuate it to to-space
212 in the next generation.
214 - repeatedly scavenge to-space from each step in each generation
215 being collected until no more objects can be evacuated.
217 - free from-space in each step, and set from-space = to-space.
219 -------------------------------------------------------------------------- */
222 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
226 lnat live, allocated, collected = 0, copied = 0;
227 lnat oldgen_saved_blocks = 0;
231 CostCentreStack *prev_CCS;
234 #if defined(DEBUG) && defined(GRAN)
235 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
239 // tell the stats department that we've started a GC
242 // Init stats and print par specific (timing) info
243 PAR_TICKY_PAR_START();
245 // attribute any costs to CCS_GC
251 /* Approximate how much we allocated.
252 * Todo: only when generating stats?
254 allocated = calcAllocated();
256 /* Figure out which generation to collect
258 if (force_major_gc) {
259 N = RtsFlags.GcFlags.generations - 1;
263 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
264 if (generations[g].steps[0].n_blocks +
265 generations[g].steps[0].n_large_blocks
266 >= generations[g].max_blocks) {
270 major_gc = (N == RtsFlags.GcFlags.generations-1);
273 #ifdef RTS_GTK_FRONTPANEL
274 if (RtsFlags.GcFlags.frontpanel) {
275 updateFrontPanelBeforeGC(N);
279 // check stack sanity *before* GC (ToDo: check all threads)
281 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
283 IF_DEBUG(sanity, checkFreeListSanity());
285 /* Initialise the static object lists
287 static_objects = END_OF_STATIC_LIST;
288 scavenged_static_objects = END_OF_STATIC_LIST;
290 /* zero the mutable list for the oldest generation (see comment by
291 * zero_mutable_list below).
294 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
297 /* Save the old to-space if we're doing a two-space collection
299 if (RtsFlags.GcFlags.generations == 1) {
300 old_to_blocks = g0s0->to_blocks;
301 g0s0->to_blocks = NULL;
304 /* Keep a count of how many new blocks we allocated during this GC
305 * (used for resizing the allocation area, later).
309 /* Initialise to-space in all the generations/steps that we're
312 for (g = 0; g <= N; g++) {
313 generations[g].mut_once_list = END_MUT_LIST;
314 generations[g].mut_list = END_MUT_LIST;
316 for (s = 0; s < generations[g].n_steps; s++) {
318 // generation 0, step 0 doesn't need to-space
319 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
323 /* Get a free block for to-space. Extra blocks will be chained on
327 stp = &generations[g].steps[s];
328 ASSERT(stp->gen_no == g);
329 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
333 bd->flags = BF_EVACUATED; // it's a to-space block
335 stp->hpLim = stp->hp + BLOCK_SIZE_W;
338 stp->n_to_blocks = 1;
339 stp->scan = bd->start;
341 stp->new_large_objects = NULL;
342 stp->scavenged_large_objects = NULL;
343 stp->n_scavenged_large_blocks = 0;
345 // mark the large objects as not evacuated yet
346 for (bd = stp->large_objects; bd; bd = bd->link) {
347 bd->flags = BF_LARGE;
350 // for a compacted step, we need to allocate the bitmap
351 if (stp->is_compacted) {
352 nat bitmap_size; // in bytes
353 bdescr *bitmap_bdescr;
356 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
358 if (bitmap_size > 0) {
359 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
361 stp->bitmap = bitmap_bdescr;
362 bitmap = bitmap_bdescr->start;
364 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
365 bitmap_size, bitmap););
367 // don't forget to fill it with zeros!
368 memset(bitmap, 0, bitmap_size);
370 // for each block in this step, point to its bitmap from the
372 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
373 bd->u.bitmap = bitmap;
374 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
381 /* make sure the older generations have at least one block to
382 * allocate into (this makes things easier for copy(), see below.
384 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
385 for (s = 0; s < generations[g].n_steps; s++) {
386 stp = &generations[g].steps[s];
387 if (stp->hp_bd == NULL) {
388 ASSERT(stp->blocks == NULL);
393 bd->flags = 0; // *not* a to-space block or a large object
395 stp->hpLim = stp->hp + BLOCK_SIZE_W;
401 /* Set the scan pointer for older generations: remember we
402 * still have to scavenge objects that have been promoted. */
404 stp->scan_bd = stp->hp_bd;
405 stp->to_blocks = NULL;
406 stp->n_to_blocks = 0;
407 stp->new_large_objects = NULL;
408 stp->scavenged_large_objects = NULL;
409 stp->n_scavenged_large_blocks = 0;
413 /* Allocate a mark stack if we're doing a major collection.
416 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
417 mark_stack = (StgPtr *)mark_stack_bdescr->start;
418 mark_sp = mark_stack;
419 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
421 mark_stack_bdescr = NULL;
424 /* -----------------------------------------------------------------------
425 * follow all the roots that we know about:
426 * - mutable lists from each generation > N
427 * we want to *scavenge* these roots, not evacuate them: they're not
428 * going to move in this GC.
429 * Also: do them in reverse generation order. This is because we
430 * often want to promote objects that are pointed to by older
431 * generations early, so we don't have to repeatedly copy them.
432 * Doing the generations in reverse order ensures that we don't end
433 * up in the situation where we want to evac an object to gen 3 and
434 * it has already been evaced to gen 2.
438 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
439 generations[g].saved_mut_list = generations[g].mut_list;
440 generations[g].mut_list = END_MUT_LIST;
443 // Do the mut-once lists first
444 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
445 IF_PAR_DEBUG(verbose,
446 printMutOnceList(&generations[g]));
447 scavenge_mut_once_list(&generations[g]);
449 for (st = generations[g].n_steps-1; st >= 0; st--) {
450 scavenge(&generations[g].steps[st]);
454 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
455 IF_PAR_DEBUG(verbose,
456 printMutableList(&generations[g]));
457 scavenge_mutable_list(&generations[g]);
459 for (st = generations[g].n_steps-1; st >= 0; st--) {
460 scavenge(&generations[g].steps[st]);
467 /* follow all the roots that the application knows about.
470 get_roots(mark_root);
473 /* And don't forget to mark the TSO if we got here direct from
475 /* Not needed in a seq version?
477 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
481 // Mark the entries in the GALA table of the parallel system
482 markLocalGAs(major_gc);
483 // Mark all entries on the list of pending fetches
484 markPendingFetches(major_gc);
487 /* Mark the weak pointer list, and prepare to detect dead weak
490 mark_weak_ptr_list(&weak_ptr_list);
491 old_weak_ptr_list = weak_ptr_list;
492 weak_ptr_list = NULL;
493 weak_done = rtsFalse;
495 /* The all_threads list is like the weak_ptr_list.
496 * See traverse_weak_ptr_list() for the details.
498 old_all_threads = all_threads;
499 all_threads = END_TSO_QUEUE;
500 resurrected_threads = END_TSO_QUEUE;
502 /* Mark the stable pointer table.
504 markStablePtrTable(mark_root);
508 /* ToDo: To fix the caf leak, we need to make the commented out
509 * parts of this code do something sensible - as described in
512 extern void markHugsObjects(void);
517 /* -------------------------------------------------------------------------
518 * Repeatedly scavenge all the areas we know about until there's no
519 * more scavenging to be done.
526 // scavenge static objects
527 if (major_gc && static_objects != END_OF_STATIC_LIST) {
528 IF_DEBUG(sanity, checkStaticObjects(static_objects));
532 /* When scavenging the older generations: Objects may have been
533 * evacuated from generations <= N into older generations, and we
534 * need to scavenge these objects. We're going to try to ensure that
535 * any evacuations that occur move the objects into at least the
536 * same generation as the object being scavenged, otherwise we
537 * have to create new entries on the mutable list for the older
541 // scavenge each step in generations 0..maxgen
547 // scavenge objects in compacted generation
548 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
549 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
550 scavenge_mark_stack();
554 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
555 for (st = generations[gen].n_steps; --st >= 0; ) {
556 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
559 stp = &generations[gen].steps[st];
561 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
566 if (stp->new_large_objects != NULL) {
575 if (flag) { goto loop; }
578 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
584 // Reconstruct the Global Address tables used in GUM
585 rebuildGAtables(major_gc);
586 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
589 // Now see which stable names are still alive.
592 // Tidy the end of the to-space chains
593 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
594 for (s = 0; s < generations[g].n_steps; s++) {
595 stp = &generations[g].steps[s];
596 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
597 stp->hp_bd->free = stp->hp;
598 stp->hp_bd->link = NULL;
603 // NO MORE EVACUATION AFTER THIS POINT!
604 // Finally: compaction of the oldest generation.
605 if (major_gc && oldest_gen->steps[0].is_compacted) {
606 // save number of blocks for stats
607 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
611 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
613 /* run through all the generations/steps and tidy up
615 copied = new_blocks * BLOCK_SIZE_W;
616 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
619 generations[g].collections++; // for stats
622 for (s = 0; s < generations[g].n_steps; s++) {
624 stp = &generations[g].steps[s];
626 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
627 // stats information: how much we copied
629 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
634 // for generations we collected...
637 // rough calculation of garbage collected, for stats output
638 if (stp->is_compacted) {
639 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
641 collected += stp->n_blocks * BLOCK_SIZE_W;
644 /* free old memory and shift to-space into from-space for all
645 * the collected steps (except the allocation area). These
646 * freed blocks will probaby be quickly recycled.
648 if (!(g == 0 && s == 0)) {
649 if (stp->is_compacted) {
650 // for a compacted step, just shift the new to-space
651 // onto the front of the now-compacted existing blocks.
652 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
653 bd->flags &= ~BF_EVACUATED; // now from-space
655 // tack the new blocks on the end of the existing blocks
656 if (stp->blocks == NULL) {
657 stp->blocks = stp->to_blocks;
659 for (bd = stp->blocks; bd != NULL; bd = next) {
662 bd->link = stp->to_blocks;
666 // add the new blocks to the block tally
667 stp->n_blocks += stp->n_to_blocks;
669 freeChain(stp->blocks);
670 stp->blocks = stp->to_blocks;
671 stp->n_blocks = stp->n_to_blocks;
672 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
673 bd->flags &= ~BF_EVACUATED; // now from-space
676 stp->to_blocks = NULL;
677 stp->n_to_blocks = 0;
680 /* LARGE OBJECTS. The current live large objects are chained on
681 * scavenged_large, having been moved during garbage
682 * collection from large_objects. Any objects left on
683 * large_objects list are therefore dead, so we free them here.
685 for (bd = stp->large_objects; bd != NULL; bd = next) {
691 // update the count of blocks used by large objects
692 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
693 bd->flags &= ~BF_EVACUATED;
695 stp->large_objects = stp->scavenged_large_objects;
696 stp->n_large_blocks = stp->n_scavenged_large_blocks;
699 // for older generations...
701 /* For older generations, we need to append the
702 * scavenged_large_object list (i.e. large objects that have been
703 * promoted during this GC) to the large_object list for that step.
705 for (bd = stp->scavenged_large_objects; bd; bd = next) {
707 bd->flags &= ~BF_EVACUATED;
708 dbl_link_onto(bd, &stp->large_objects);
711 // add the new blocks we promoted during this GC
712 stp->n_blocks += stp->n_to_blocks;
713 stp->n_large_blocks += stp->n_scavenged_large_blocks;
718 /* Reset the sizes of the older generations when we do a major
721 * CURRENT STRATEGY: make all generations except zero the same size.
722 * We have to stay within the maximum heap size, and leave a certain
723 * percentage of the maximum heap size available to allocate into.
725 if (major_gc && RtsFlags.GcFlags.generations > 1) {
726 nat live, size, min_alloc;
727 nat max = RtsFlags.GcFlags.maxHeapSize;
728 nat gens = RtsFlags.GcFlags.generations;
730 // live in the oldest generations
731 live = oldest_gen->steps[0].n_blocks +
732 oldest_gen->steps[0].n_large_blocks;
734 // default max size for all generations except zero
735 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
736 RtsFlags.GcFlags.minOldGenSize);
738 // minimum size for generation zero
739 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
740 RtsFlags.GcFlags.minAllocAreaSize);
742 // Auto-enable compaction when the residency reaches a
743 // certain percentage of the maximum heap size (default: 30%).
744 if (RtsFlags.GcFlags.compact ||
746 oldest_gen->steps[0].n_blocks >
747 (RtsFlags.GcFlags.compactThreshold * max) / 100)) {
748 oldest_gen->steps[0].is_compacted = 1;
749 // fprintf(stderr,"compaction: on\n", live);
751 oldest_gen->steps[0].is_compacted = 0;
752 // fprintf(stderr,"compaction: off\n", live);
755 // if we're going to go over the maximum heap size, reduce the
756 // size of the generations accordingly. The calculation is
757 // different if compaction is turned on, because we don't need
758 // to double the space required to collect the old generation.
760 if (oldest_gen->steps[0].is_compacted) {
761 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
762 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
765 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
766 size = (max - min_alloc) / ((gens - 1) * 2);
776 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
777 min_alloc, size, max);
780 for (g = 0; g < gens; g++) {
781 generations[g].max_blocks = size;
785 // Guess the amount of live data for stats.
788 /* Free the small objects allocated via allocate(), since this will
789 * all have been copied into G0S1 now.
791 if (small_alloc_list != NULL) {
792 freeChain(small_alloc_list);
794 small_alloc_list = NULL;
798 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
800 // Start a new pinned_object_block
801 pinned_object_block = NULL;
803 /* Free the mark stack.
805 if (mark_stack_bdescr != NULL) {
806 freeGroup(mark_stack_bdescr);
811 for (g = 0; g <= N; g++) {
812 for (s = 0; s < generations[g].n_steps; s++) {
813 stp = &generations[g].steps[s];
814 if (stp->is_compacted && stp->bitmap != NULL) {
815 freeGroup(stp->bitmap);
820 /* Two-space collector:
821 * Free the old to-space, and estimate the amount of live data.
823 if (RtsFlags.GcFlags.generations == 1) {
826 if (old_to_blocks != NULL) {
827 freeChain(old_to_blocks);
829 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
830 bd->flags = 0; // now from-space
833 /* For a two-space collector, we need to resize the nursery. */
835 /* set up a new nursery. Allocate a nursery size based on a
836 * function of the amount of live data (by default a factor of 2)
837 * Use the blocks from the old nursery if possible, freeing up any
840 * If we get near the maximum heap size, then adjust our nursery
841 * size accordingly. If the nursery is the same size as the live
842 * data (L), then we need 3L bytes. We can reduce the size of the
843 * nursery to bring the required memory down near 2L bytes.
845 * A normal 2-space collector would need 4L bytes to give the same
846 * performance we get from 3L bytes, reducing to the same
847 * performance at 2L bytes.
849 blocks = g0s0->n_to_blocks;
851 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
852 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
853 RtsFlags.GcFlags.maxHeapSize ) {
854 long adjusted_blocks; // signed on purpose
857 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
858 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
859 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
860 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
863 blocks = adjusted_blocks;
866 blocks *= RtsFlags.GcFlags.oldGenFactor;
867 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
868 blocks = RtsFlags.GcFlags.minAllocAreaSize;
871 resizeNursery(blocks);
874 /* Generational collector:
875 * If the user has given us a suggested heap size, adjust our
876 * allocation area to make best use of the memory available.
879 if (RtsFlags.GcFlags.heapSizeSuggestion) {
881 nat needed = calcNeeded(); // approx blocks needed at next GC
883 /* Guess how much will be live in generation 0 step 0 next time.
884 * A good approximation is obtained by finding the
885 * percentage of g0s0 that was live at the last minor GC.
888 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
891 /* Estimate a size for the allocation area based on the
892 * information available. We might end up going slightly under
893 * or over the suggested heap size, but we should be pretty
896 * Formula: suggested - needed
897 * ----------------------------
898 * 1 + g0s0_pcnt_kept/100
900 * where 'needed' is the amount of memory needed at the next
901 * collection for collecting all steps except g0s0.
904 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
905 (100 + (long)g0s0_pcnt_kept);
907 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
908 blocks = RtsFlags.GcFlags.minAllocAreaSize;
911 resizeNursery((nat)blocks);
915 // mark the garbage collected CAFs as dead
916 #if 0 && defined(DEBUG) // doesn't work at the moment
917 if (major_gc) { gcCAFs(); }
920 // zero the scavenged static object list
922 zero_static_object_list(scavenged_static_objects);
928 // start any pending finalizers
929 scheduleFinalizers(old_weak_ptr_list);
931 // send exceptions to any threads which were about to die
932 resurrectThreads(resurrected_threads);
934 // Update the stable pointer hash table.
935 updateStablePtrTable(major_gc);
937 // check sanity after GC
938 IF_DEBUG(sanity, checkSanity());
940 // extra GC trace info
941 IF_DEBUG(gc, statDescribeGens());
944 // symbol-table based profiling
945 /* heapCensus(to_blocks); */ /* ToDo */
948 // restore enclosing cost centre
954 // check for memory leaks if sanity checking is on
955 IF_DEBUG(sanity, memInventory());
957 #ifdef RTS_GTK_FRONTPANEL
958 if (RtsFlags.GcFlags.frontpanel) {
959 updateFrontPanelAfterGC( N, live );
963 // ok, GC over: tell the stats department what happened.
964 stat_endGC(allocated, collected, live, copied, N);
970 /* -----------------------------------------------------------------------------
973 traverse_weak_ptr_list is called possibly many times during garbage
974 collection. It returns a flag indicating whether it did any work
975 (i.e. called evacuate on any live pointers).
977 Invariant: traverse_weak_ptr_list is called when the heap is in an
978 idempotent state. That means that there are no pending
979 evacuate/scavenge operations. This invariant helps the weak
980 pointer code decide which weak pointers are dead - if there are no
981 new live weak pointers, then all the currently unreachable ones are
984 For generational GC: we just don't try to finalize weak pointers in
985 older generations than the one we're collecting. This could
986 probably be optimised by keeping per-generation lists of weak
987 pointers, but for a few weak pointers this scheme will work.
988 -------------------------------------------------------------------------- */
991 traverse_weak_ptr_list(void)
993 StgWeak *w, **last_w, *next_w;
995 rtsBool flag = rtsFalse;
997 if (weak_done) { return rtsFalse; }
999 /* doesn't matter where we evacuate values/finalizers to, since
1000 * these pointers are treated as roots (iff the keys are alive).
1004 last_w = &old_weak_ptr_list;
1005 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1007 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1008 * called on a live weak pointer object. Just remove it.
1010 if (w->header.info == &stg_DEAD_WEAK_info) {
1011 next_w = ((StgDeadWeak *)w)->link;
1016 ASSERT(get_itbl(w)->type == WEAK);
1018 /* Now, check whether the key is reachable.
1020 new = isAlive(w->key);
1023 // evacuate the value and finalizer
1024 w->value = evacuate(w->value);
1025 w->finalizer = evacuate(w->finalizer);
1026 // remove this weak ptr from the old_weak_ptr list
1028 // and put it on the new weak ptr list
1030 w->link = weak_ptr_list;
1033 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1037 last_w = &(w->link);
1043 /* Now deal with the all_threads list, which behaves somewhat like
1044 * the weak ptr list. If we discover any threads that are about to
1045 * become garbage, we wake them up and administer an exception.
1048 StgTSO *t, *tmp, *next, **prev;
1050 prev = &old_all_threads;
1051 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1053 (StgClosure *)tmp = isAlive((StgClosure *)t);
1059 ASSERT(get_itbl(t)->type == TSO);
1060 switch (t->what_next) {
1061 case ThreadRelocated:
1066 case ThreadComplete:
1067 // finshed or died. The thread might still be alive, but we
1068 // don't keep it on the all_threads list. Don't forget to
1069 // stub out its global_link field.
1070 next = t->global_link;
1071 t->global_link = END_TSO_QUEUE;
1079 // not alive (yet): leave this thread on the old_all_threads list.
1080 prev = &(t->global_link);
1081 next = t->global_link;
1084 // alive: move this thread onto the all_threads list.
1085 next = t->global_link;
1086 t->global_link = all_threads;
1093 /* If we didn't make any changes, then we can go round and kill all
1094 * the dead weak pointers. The old_weak_ptr list is used as a list
1095 * of pending finalizers later on.
1097 if (flag == rtsFalse) {
1098 for (w = old_weak_ptr_list; w; w = w->link) {
1099 w->finalizer = evacuate(w->finalizer);
1102 /* And resurrect any threads which were about to become garbage.
1105 StgTSO *t, *tmp, *next;
1106 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1107 next = t->global_link;
1108 (StgClosure *)tmp = evacuate((StgClosure *)t);
1109 tmp->global_link = resurrected_threads;
1110 resurrected_threads = tmp;
1114 weak_done = rtsTrue;
1120 /* -----------------------------------------------------------------------------
1121 After GC, the live weak pointer list may have forwarding pointers
1122 on it, because a weak pointer object was evacuated after being
1123 moved to the live weak pointer list. We remove those forwarding
1126 Also, we don't consider weak pointer objects to be reachable, but
1127 we must nevertheless consider them to be "live" and retain them.
1128 Therefore any weak pointer objects which haven't as yet been
1129 evacuated need to be evacuated now.
1130 -------------------------------------------------------------------------- */
1134 mark_weak_ptr_list ( StgWeak **list )
1136 StgWeak *w, **last_w;
1139 for (w = *list; w; w = w->link) {
1140 (StgClosure *)w = evacuate((StgClosure *)w);
1142 last_w = &(w->link);
1146 /* -----------------------------------------------------------------------------
1147 isAlive determines whether the given closure is still alive (after
1148 a garbage collection) or not. It returns the new address of the
1149 closure if it is alive, or NULL otherwise.
1151 NOTE: Use it before compaction only!
1152 -------------------------------------------------------------------------- */
1156 isAlive(StgClosure *p)
1158 const StgInfoTable *info;
1165 /* ToDo: for static closures, check the static link field.
1166 * Problem here is that we sometimes don't set the link field, eg.
1167 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1172 // ignore closures in generations that we're not collecting.
1173 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1176 // large objects have an evacuated flag
1177 if (bd->flags & BF_LARGE) {
1178 if (bd->flags & BF_EVACUATED) {
1184 // check the mark bit for compacted steps
1185 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1189 switch (info->type) {
1194 case IND_OLDGEN: // rely on compatible layout with StgInd
1195 case IND_OLDGEN_PERM:
1196 // follow indirections
1197 p = ((StgInd *)p)->indirectee;
1202 return ((StgEvacuated *)p)->evacuee;
1205 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1206 p = (StgClosure *)((StgTSO *)p)->link;
1218 mark_root(StgClosure **root)
1220 *root = evacuate(*root);
1226 bdescr *bd = allocBlock();
1227 bd->gen_no = stp->gen_no;
1230 if (stp->gen_no <= N) {
1231 bd->flags = BF_EVACUATED;
1236 stp->hp_bd->free = stp->hp;
1237 stp->hp_bd->link = bd;
1238 stp->hp = bd->start;
1239 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1246 static __inline__ void
1247 upd_evacuee(StgClosure *p, StgClosure *dest)
1249 p->header.info = &stg_EVACUATED_info;
1250 ((StgEvacuated *)p)->evacuee = dest;
1254 static __inline__ StgClosure *
1255 copy(StgClosure *src, nat size, step *stp)
1259 TICK_GC_WORDS_COPIED(size);
1260 /* Find out where we're going, using the handy "to" pointer in
1261 * the step of the source object. If it turns out we need to
1262 * evacuate to an older generation, adjust it here (see comment
1265 if (stp->gen_no < evac_gen) {
1266 #ifdef NO_EAGER_PROMOTION
1267 failed_to_evac = rtsTrue;
1269 stp = &generations[evac_gen].steps[0];
1273 /* chain a new block onto the to-space for the destination step if
1276 if (stp->hp + size >= stp->hpLim) {
1280 for(to = stp->hp, from = (P_)src; size>0; --size) {
1286 upd_evacuee(src,(StgClosure *)dest);
1287 return (StgClosure *)dest;
1290 /* Special version of copy() for when we only want to copy the info
1291 * pointer of an object, but reserve some padding after it. This is
1292 * used to optimise evacuation of BLACKHOLEs.
1296 static __inline__ StgClosure *
1297 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1301 TICK_GC_WORDS_COPIED(size_to_copy);
1302 if (stp->gen_no < evac_gen) {
1303 #ifdef NO_EAGER_PROMOTION
1304 failed_to_evac = rtsTrue;
1306 stp = &generations[evac_gen].steps[0];
1310 if (stp->hp + size_to_reserve >= stp->hpLim) {
1314 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1319 stp->hp += size_to_reserve;
1320 upd_evacuee(src,(StgClosure *)dest);
1321 return (StgClosure *)dest;
1325 /* -----------------------------------------------------------------------------
1326 Evacuate a large object
1328 This just consists of removing the object from the (doubly-linked)
1329 large_alloc_list, and linking it on to the (singly-linked)
1330 new_large_objects list, from where it will be scavenged later.
1332 Convention: bd->flags has BF_EVACUATED set for a large object
1333 that has been evacuated, or unset otherwise.
1334 -------------------------------------------------------------------------- */
1338 evacuate_large(StgPtr p)
1340 bdescr *bd = Bdescr(p);
1343 // object must be at the beginning of the block (or be a ByteArray)
1344 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1345 (((W_)p & BLOCK_MASK) == 0));
1347 // already evacuated?
1348 if (bd->flags & BF_EVACUATED) {
1349 /* Don't forget to set the failed_to_evac flag if we didn't get
1350 * the desired destination (see comments in evacuate()).
1352 if (bd->gen_no < evac_gen) {
1353 failed_to_evac = rtsTrue;
1354 TICK_GC_FAILED_PROMOTION();
1360 // remove from large_object list
1362 bd->u.back->link = bd->link;
1363 } else { // first object in the list
1364 stp->large_objects = bd->link;
1367 bd->link->u.back = bd->u.back;
1370 /* link it on to the evacuated large object list of the destination step
1373 if (stp->gen_no < evac_gen) {
1374 #ifdef NO_EAGER_PROMOTION
1375 failed_to_evac = rtsTrue;
1377 stp = &generations[evac_gen].steps[0];
1382 bd->gen_no = stp->gen_no;
1383 bd->link = stp->new_large_objects;
1384 stp->new_large_objects = bd;
1385 bd->flags |= BF_EVACUATED;
1388 /* -----------------------------------------------------------------------------
1389 Adding a MUT_CONS to an older generation.
1391 This is necessary from time to time when we end up with an
1392 old-to-new generation pointer in a non-mutable object. We defer
1393 the promotion until the next GC.
1394 -------------------------------------------------------------------------- */
1398 mkMutCons(StgClosure *ptr, generation *gen)
1403 stp = &gen->steps[0];
1405 /* chain a new block onto the to-space for the destination step if
1408 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1412 q = (StgMutVar *)stp->hp;
1413 stp->hp += sizeofW(StgMutVar);
1415 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1417 recordOldToNewPtrs((StgMutClosure *)q);
1419 return (StgClosure *)q;
1422 /* -----------------------------------------------------------------------------
1425 This is called (eventually) for every live object in the system.
1427 The caller to evacuate specifies a desired generation in the
1428 evac_gen global variable. The following conditions apply to
1429 evacuating an object which resides in generation M when we're
1430 collecting up to generation N
1434 else evac to step->to
1436 if M < evac_gen evac to evac_gen, step 0
1438 if the object is already evacuated, then we check which generation
1441 if M >= evac_gen do nothing
1442 if M < evac_gen set failed_to_evac flag to indicate that we
1443 didn't manage to evacuate this object into evac_gen.
1445 -------------------------------------------------------------------------- */
1448 evacuate(StgClosure *q)
1453 const StgInfoTable *info;
1456 if (HEAP_ALLOCED(q)) {
1459 if (bd->gen_no > N) {
1460 /* Can't evacuate this object, because it's in a generation
1461 * older than the ones we're collecting. Let's hope that it's
1462 * in evac_gen or older, or we will have to arrange to track
1463 * this pointer using the mutable list.
1465 if (bd->gen_no < evac_gen) {
1467 failed_to_evac = rtsTrue;
1468 TICK_GC_FAILED_PROMOTION();
1473 /* evacuate large objects by re-linking them onto a different list.
1475 if (bd->flags & BF_LARGE) {
1477 if (info->type == TSO &&
1478 ((StgTSO *)q)->what_next == ThreadRelocated) {
1479 q = (StgClosure *)((StgTSO *)q)->link;
1482 evacuate_large((P_)q);
1486 /* If the object is in a step that we're compacting, then we
1487 * need to use an alternative evacuate procedure.
1489 if (bd->step->is_compacted) {
1490 if (!is_marked((P_)q,bd)) {
1492 if (mark_stack_full()) {
1493 mark_stack_overflowed = rtsTrue;
1496 push_mark_stack((P_)q);
1504 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1507 // make sure the info pointer is into text space
1508 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1509 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1512 switch (info -> type) {
1516 to = copy(q,sizeW_fromITBL(info),stp);
1521 StgWord w = (StgWord)q->payload[0];
1522 if (q->header.info == Czh_con_info &&
1523 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1524 (StgChar)w <= MAX_CHARLIKE) {
1525 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1527 if (q->header.info == Izh_con_info &&
1528 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1529 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1531 // else, fall through ...
1537 return copy(q,sizeofW(StgHeader)+1,stp);
1539 case THUNK_1_0: // here because of MIN_UPD_SIZE
1544 #ifdef NO_PROMOTE_THUNKS
1545 if (bd->gen_no == 0 &&
1546 bd->step->no != 0 &&
1547 bd->step->no == generations[bd->gen_no].n_steps-1) {
1551 return copy(q,sizeofW(StgHeader)+2,stp);
1559 return copy(q,sizeofW(StgHeader)+2,stp);
1565 case IND_OLDGEN_PERM:
1570 return copy(q,sizeW_fromITBL(info),stp);
1573 case SE_CAF_BLACKHOLE:
1576 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1579 to = copy(q,BLACKHOLE_sizeW(),stp);
1582 case THUNK_SELECTOR:
1584 const StgInfoTable* selectee_info;
1585 StgClosure* selectee = ((StgSelector*)q)->selectee;
1588 selectee_info = get_itbl(selectee);
1589 switch (selectee_info->type) {
1598 StgWord offset = info->layout.selector_offset;
1600 // check that the size is in range
1602 (StgWord32)(selectee_info->layout.payload.ptrs +
1603 selectee_info->layout.payload.nptrs));
1605 // perform the selection!
1606 q = selectee->payload[offset];
1608 /* if we're already in to-space, there's no need to continue
1609 * with the evacuation, just update the source address with
1610 * a pointer to the (evacuated) constructor field.
1612 if (HEAP_ALLOCED(q)) {
1613 bdescr *bd = Bdescr((P_)q);
1614 if (bd->flags & BF_EVACUATED) {
1615 if (bd->gen_no < evac_gen) {
1616 failed_to_evac = rtsTrue;
1617 TICK_GC_FAILED_PROMOTION();
1623 /* otherwise, carry on and evacuate this constructor field,
1624 * (but not the constructor itself)
1633 case IND_OLDGEN_PERM:
1634 selectee = ((StgInd *)selectee)->indirectee;
1638 selectee = ((StgEvacuated *)selectee)->evacuee;
1641 case THUNK_SELECTOR:
1643 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1644 something) to go into an infinite loop when the nightly
1645 stage2 compiles PrelTup.lhs. */
1647 /* we can't recurse indefinitely in evacuate(), so set a
1648 * limit on the number of times we can go around this
1651 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1653 bd = Bdescr((P_)selectee);
1654 if (!bd->flags & BF_EVACUATED) {
1655 thunk_selector_depth++;
1656 selectee = evacuate(selectee);
1657 thunk_selector_depth--;
1661 // otherwise, fall through...
1673 case SE_CAF_BLACKHOLE:
1677 // not evaluated yet
1681 // a copy of the top-level cases below
1682 case RBH: // cf. BLACKHOLE_BQ
1684 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1685 to = copy(q,BLACKHOLE_sizeW(),stp);
1686 //ToDo: derive size etc from reverted IP
1687 //to = copy(q,size,stp);
1688 // recordMutable((StgMutClosure *)to);
1693 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1694 to = copy(q,sizeofW(StgBlockedFetch),stp);
1701 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1702 to = copy(q,sizeofW(StgFetchMe),stp);
1706 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1707 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1712 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1713 (int)(selectee_info->type));
1716 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1720 // follow chains of indirections, don't evacuate them
1721 q = ((StgInd*)q)->indirectee;
1725 if (info->srt_len > 0 && major_gc &&
1726 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1727 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1728 static_objects = (StgClosure *)q;
1733 if (info->srt_len > 0 && major_gc &&
1734 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1735 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1736 static_objects = (StgClosure *)q;
1741 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1742 * on the CAF list, so don't do anything with it here (we'll
1743 * scavenge it later).
1746 && ((StgIndStatic *)q)->saved_info == NULL
1747 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1748 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1749 static_objects = (StgClosure *)q;
1754 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1755 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1756 static_objects = (StgClosure *)q;
1760 case CONSTR_INTLIKE:
1761 case CONSTR_CHARLIKE:
1762 case CONSTR_NOCAF_STATIC:
1763 /* no need to put these on the static linked list, they don't need
1778 // shouldn't see these
1779 barf("evacuate: stack frame at %p\n", q);
1783 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1784 * of stack, tagging and all.
1786 return copy(q,pap_sizeW((StgPAP*)q),stp);
1789 /* Already evacuated, just return the forwarding address.
1790 * HOWEVER: if the requested destination generation (evac_gen) is
1791 * older than the actual generation (because the object was
1792 * already evacuated to a younger generation) then we have to
1793 * set the failed_to_evac flag to indicate that we couldn't
1794 * manage to promote the object to the desired generation.
1796 if (evac_gen > 0) { // optimisation
1797 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1798 if (Bdescr((P_)p)->gen_no < evac_gen) {
1799 failed_to_evac = rtsTrue;
1800 TICK_GC_FAILED_PROMOTION();
1803 return ((StgEvacuated*)q)->evacuee;
1806 // just copy the block
1807 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1810 case MUT_ARR_PTRS_FROZEN:
1811 // just copy the block
1812 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1816 StgTSO *tso = (StgTSO *)q;
1818 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1820 if (tso->what_next == ThreadRelocated) {
1821 q = (StgClosure *)tso->link;
1825 /* To evacuate a small TSO, we need to relocate the update frame
1829 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1830 move_TSO(tso, new_tso);
1831 return (StgClosure *)new_tso;
1836 case RBH: // cf. BLACKHOLE_BQ
1838 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1839 to = copy(q,BLACKHOLE_sizeW(),stp);
1840 //ToDo: derive size etc from reverted IP
1841 //to = copy(q,size,stp);
1843 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1844 q, info_type(q), to, info_type(to)));
1849 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1850 to = copy(q,sizeofW(StgBlockedFetch),stp);
1852 belch("@@ evacuate: %p (%s) to %p (%s)",
1853 q, info_type(q), to, info_type(to)));
1860 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1861 to = copy(q,sizeofW(StgFetchMe),stp);
1863 belch("@@ evacuate: %p (%s) to %p (%s)",
1864 q, info_type(q), to, info_type(to)));
1868 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1869 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1871 belch("@@ evacuate: %p (%s) to %p (%s)",
1872 q, info_type(q), to, info_type(to)));
1877 barf("evacuate: strange closure type %d", (int)(info->type));
1883 /* -----------------------------------------------------------------------------
1884 move_TSO is called to update the TSO structure after it has been
1885 moved from one place to another.
1886 -------------------------------------------------------------------------- */
1889 move_TSO(StgTSO *src, StgTSO *dest)
1893 // relocate the stack pointers...
1894 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1895 dest->sp = (StgPtr)dest->sp + diff;
1896 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1898 relocate_stack(dest, diff);
1901 /* -----------------------------------------------------------------------------
1902 relocate_stack is called to update the linkage between
1903 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1905 -------------------------------------------------------------------------- */
1908 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1916 while ((P_)su < dest->stack + dest->stack_size) {
1917 switch (get_itbl(su)->type) {
1919 // GCC actually manages to common up these three cases!
1922 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1927 cf = (StgCatchFrame *)su;
1928 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1933 sf = (StgSeqFrame *)su;
1934 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1943 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1954 scavenge_srt(const StgInfoTable *info)
1956 StgClosure **srt, **srt_end;
1958 /* evacuate the SRT. If srt_len is zero, then there isn't an
1959 * srt field in the info table. That's ok, because we'll
1960 * never dereference it.
1962 srt = (StgClosure **)(info->srt);
1963 srt_end = srt + info->srt_len;
1964 for (; srt < srt_end; srt++) {
1965 /* Special-case to handle references to closures hiding out in DLLs, since
1966 double indirections required to get at those. The code generator knows
1967 which is which when generating the SRT, so it stores the (indirect)
1968 reference to the DLL closure in the table by first adding one to it.
1969 We check for this here, and undo the addition before evacuating it.
1971 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1972 closure that's fixed at link-time, and no extra magic is required.
1974 #ifdef ENABLE_WIN32_DLL_SUPPORT
1975 if ( (unsigned long)(*srt) & 0x1 ) {
1976 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1986 /* -----------------------------------------------------------------------------
1988 -------------------------------------------------------------------------- */
1991 scavengeTSO (StgTSO *tso)
1993 // chase the link field for any TSOs on the same queue
1994 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1995 if ( tso->why_blocked == BlockedOnMVar
1996 || tso->why_blocked == BlockedOnBlackHole
1997 || tso->why_blocked == BlockedOnException
1999 || tso->why_blocked == BlockedOnGA
2000 || tso->why_blocked == BlockedOnGA_NoSend
2003 tso->block_info.closure = evacuate(tso->block_info.closure);
2005 if ( tso->blocked_exceptions != NULL ) {
2006 tso->blocked_exceptions =
2007 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2009 // scavenge this thread's stack
2010 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2013 /* -----------------------------------------------------------------------------
2014 Scavenge a given step until there are no more objects in this step
2017 evac_gen is set by the caller to be either zero (for a step in a
2018 generation < N) or G where G is the generation of the step being
2021 We sometimes temporarily change evac_gen back to zero if we're
2022 scavenging a mutable object where early promotion isn't such a good
2024 -------------------------------------------------------------------------- */
2032 nat saved_evac_gen = evac_gen;
2037 failed_to_evac = rtsFalse;
2039 /* scavenge phase - standard breadth-first scavenging of the
2043 while (bd != stp->hp_bd || p < stp->hp) {
2045 // If we're at the end of this block, move on to the next block
2046 if (bd != stp->hp_bd && p == bd->free) {
2052 info = get_itbl((StgClosure *)p);
2053 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2056 switch (info->type) {
2059 /* treat MVars specially, because we don't want to evacuate the
2060 * mut_link field in the middle of the closure.
2063 StgMVar *mvar = ((StgMVar *)p);
2065 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2066 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2067 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2068 evac_gen = saved_evac_gen;
2069 recordMutable((StgMutClosure *)mvar);
2070 failed_to_evac = rtsFalse; // mutable.
2071 p += sizeofW(StgMVar);
2079 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2080 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2081 p += sizeofW(StgHeader) + 2;
2086 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2087 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2093 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2094 p += sizeofW(StgHeader) + 1;
2099 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2105 p += sizeofW(StgHeader) + 1;
2112 p += sizeofW(StgHeader) + 2;
2119 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2120 p += sizeofW(StgHeader) + 2;
2136 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2137 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2138 (StgClosure *)*p = evacuate((StgClosure *)*p);
2140 p += info->layout.payload.nptrs;
2145 if (stp->gen_no != 0) {
2146 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2149 case IND_OLDGEN_PERM:
2150 ((StgIndOldGen *)p)->indirectee =
2151 evacuate(((StgIndOldGen *)p)->indirectee);
2152 if (failed_to_evac) {
2153 failed_to_evac = rtsFalse;
2154 recordOldToNewPtrs((StgMutClosure *)p);
2156 p += sizeofW(StgIndOldGen);
2161 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2162 evac_gen = saved_evac_gen;
2163 recordMutable((StgMutClosure *)p);
2164 failed_to_evac = rtsFalse; // mutable anyhow
2165 p += sizeofW(StgMutVar);
2170 failed_to_evac = rtsFalse; // mutable anyhow
2171 p += sizeofW(StgMutVar);
2175 case SE_CAF_BLACKHOLE:
2178 p += BLACKHOLE_sizeW();
2183 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2184 (StgClosure *)bh->blocking_queue =
2185 evacuate((StgClosure *)bh->blocking_queue);
2186 recordMutable((StgMutClosure *)bh);
2187 failed_to_evac = rtsFalse;
2188 p += BLACKHOLE_sizeW();
2192 case THUNK_SELECTOR:
2194 StgSelector *s = (StgSelector *)p;
2195 s->selectee = evacuate(s->selectee);
2196 p += THUNK_SELECTOR_sizeW();
2200 case AP_UPD: // same as PAPs
2202 /* Treat a PAP just like a section of stack, not forgetting to
2203 * evacuate the function pointer too...
2206 StgPAP* pap = (StgPAP *)p;
2208 pap->fun = evacuate(pap->fun);
2209 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2210 p += pap_sizeW(pap);
2215 // nothing to follow
2216 p += arr_words_sizeW((StgArrWords *)p);
2220 // follow everything
2224 evac_gen = 0; // repeatedly mutable
2225 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2226 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2227 (StgClosure *)*p = evacuate((StgClosure *)*p);
2229 evac_gen = saved_evac_gen;
2230 recordMutable((StgMutClosure *)q);
2231 failed_to_evac = rtsFalse; // mutable anyhow.
2235 case MUT_ARR_PTRS_FROZEN:
2236 // follow everything
2240 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2241 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2242 (StgClosure *)*p = evacuate((StgClosure *)*p);
2244 // it's tempting to recordMutable() if failed_to_evac is
2245 // false, but that breaks some assumptions (eg. every
2246 // closure on the mutable list is supposed to have the MUT
2247 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2253 StgTSO *tso = (StgTSO *)p;
2256 evac_gen = saved_evac_gen;
2257 recordMutable((StgMutClosure *)tso);
2258 failed_to_evac = rtsFalse; // mutable anyhow.
2259 p += tso_sizeW(tso);
2264 case RBH: // cf. BLACKHOLE_BQ
2267 nat size, ptrs, nonptrs, vhs;
2269 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2271 StgRBH *rbh = (StgRBH *)p;
2272 (StgClosure *)rbh->blocking_queue =
2273 evacuate((StgClosure *)rbh->blocking_queue);
2274 recordMutable((StgMutClosure *)to);
2275 failed_to_evac = rtsFalse; // mutable anyhow.
2277 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2278 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2279 // ToDo: use size of reverted closure here!
2280 p += BLACKHOLE_sizeW();
2286 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2287 // follow the pointer to the node which is being demanded
2288 (StgClosure *)bf->node =
2289 evacuate((StgClosure *)bf->node);
2290 // follow the link to the rest of the blocking queue
2291 (StgClosure *)bf->link =
2292 evacuate((StgClosure *)bf->link);
2293 if (failed_to_evac) {
2294 failed_to_evac = rtsFalse;
2295 recordMutable((StgMutClosure *)bf);
2298 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2299 bf, info_type((StgClosure *)bf),
2300 bf->node, info_type(bf->node)));
2301 p += sizeofW(StgBlockedFetch);
2309 p += sizeofW(StgFetchMe);
2310 break; // nothing to do in this case
2312 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2314 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2315 (StgClosure *)fmbq->blocking_queue =
2316 evacuate((StgClosure *)fmbq->blocking_queue);
2317 if (failed_to_evac) {
2318 failed_to_evac = rtsFalse;
2319 recordMutable((StgMutClosure *)fmbq);
2322 belch("@@ scavenge: %p (%s) exciting, isn't it",
2323 p, info_type((StgClosure *)p)));
2324 p += sizeofW(StgFetchMeBlockingQueue);
2330 barf("scavenge: unimplemented/strange closure type %d @ %p",
2334 /* If we didn't manage to promote all the objects pointed to by
2335 * the current object, then we have to designate this object as
2336 * mutable (because it contains old-to-new generation pointers).
2338 if (failed_to_evac) {
2339 failed_to_evac = rtsFalse;
2340 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2348 /* -----------------------------------------------------------------------------
2349 Scavenge everything on the mark stack.
2351 This is slightly different from scavenge():
2352 - we don't walk linearly through the objects, so the scavenger
2353 doesn't need to advance the pointer on to the next object.
2354 -------------------------------------------------------------------------- */
2357 scavenge_mark_stack(void)
2363 evac_gen = oldest_gen->no;
2364 saved_evac_gen = evac_gen;
2367 while (!mark_stack_empty()) {
2368 p = pop_mark_stack();
2370 info = get_itbl((StgClosure *)p);
2371 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2374 switch (info->type) {
2377 /* treat MVars specially, because we don't want to evacuate the
2378 * mut_link field in the middle of the closure.
2381 StgMVar *mvar = ((StgMVar *)p);
2383 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2384 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2385 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2386 evac_gen = saved_evac_gen;
2387 failed_to_evac = rtsFalse; // mutable.
2395 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2396 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2406 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2431 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2432 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2433 (StgClosure *)*p = evacuate((StgClosure *)*p);
2439 // don't need to do anything here: the only possible case
2440 // is that we're in a 1-space compacting collector, with
2441 // no "old" generation.
2445 case IND_OLDGEN_PERM:
2446 ((StgIndOldGen *)p)->indirectee =
2447 evacuate(((StgIndOldGen *)p)->indirectee);
2448 if (failed_to_evac) {
2449 recordOldToNewPtrs((StgMutClosure *)p);
2451 failed_to_evac = rtsFalse;
2456 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2457 evac_gen = saved_evac_gen;
2458 failed_to_evac = rtsFalse;
2463 failed_to_evac = rtsFalse;
2467 case SE_CAF_BLACKHOLE:
2475 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2476 (StgClosure *)bh->blocking_queue =
2477 evacuate((StgClosure *)bh->blocking_queue);
2478 failed_to_evac = rtsFalse;
2482 case THUNK_SELECTOR:
2484 StgSelector *s = (StgSelector *)p;
2485 s->selectee = evacuate(s->selectee);
2489 case AP_UPD: // same as PAPs
2491 /* Treat a PAP just like a section of stack, not forgetting to
2492 * evacuate the function pointer too...
2495 StgPAP* pap = (StgPAP *)p;
2497 pap->fun = evacuate(pap->fun);
2498 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2503 // follow everything
2507 evac_gen = 0; // repeatedly mutable
2508 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2509 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2510 (StgClosure *)*p = evacuate((StgClosure *)*p);
2512 evac_gen = saved_evac_gen;
2513 failed_to_evac = rtsFalse; // mutable anyhow.
2517 case MUT_ARR_PTRS_FROZEN:
2518 // follow everything
2522 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2523 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2524 (StgClosure *)*p = evacuate((StgClosure *)*p);
2531 StgTSO *tso = (StgTSO *)p;
2534 evac_gen = saved_evac_gen;
2535 failed_to_evac = rtsFalse;
2540 case RBH: // cf. BLACKHOLE_BQ
2543 nat size, ptrs, nonptrs, vhs;
2545 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2547 StgRBH *rbh = (StgRBH *)p;
2548 (StgClosure *)rbh->blocking_queue =
2549 evacuate((StgClosure *)rbh->blocking_queue);
2550 recordMutable((StgMutClosure *)rbh);
2551 failed_to_evac = rtsFalse; // mutable anyhow.
2553 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2554 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2560 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2561 // follow the pointer to the node which is being demanded
2562 (StgClosure *)bf->node =
2563 evacuate((StgClosure *)bf->node);
2564 // follow the link to the rest of the blocking queue
2565 (StgClosure *)bf->link =
2566 evacuate((StgClosure *)bf->link);
2567 if (failed_to_evac) {
2568 failed_to_evac = rtsFalse;
2569 recordMutable((StgMutClosure *)bf);
2572 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2573 bf, info_type((StgClosure *)bf),
2574 bf->node, info_type(bf->node)));
2582 break; // nothing to do in this case
2584 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2586 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2587 (StgClosure *)fmbq->blocking_queue =
2588 evacuate((StgClosure *)fmbq->blocking_queue);
2589 if (failed_to_evac) {
2590 failed_to_evac = rtsFalse;
2591 recordMutable((StgMutClosure *)fmbq);
2594 belch("@@ scavenge: %p (%s) exciting, isn't it",
2595 p, info_type((StgClosure *)p)));
2601 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2605 if (failed_to_evac) {
2606 failed_to_evac = rtsFalse;
2607 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2610 // mark the next bit to indicate "scavenged"
2611 mark(q+1, Bdescr(q));
2613 } // while (!mark_stack_empty())
2615 // start a new linear scan if the mark stack overflowed at some point
2616 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2617 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2618 mark_stack_overflowed = rtsFalse;
2619 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2620 oldgen_scan = oldgen_scan_bd->start;
2623 if (oldgen_scan_bd) {
2624 // push a new thing on the mark stack
2626 // find a closure that is marked but not scavenged, and start
2628 while (oldgen_scan < oldgen_scan_bd->free
2629 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2633 if (oldgen_scan < oldgen_scan_bd->free) {
2635 // already scavenged?
2636 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2637 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2640 push_mark_stack(oldgen_scan);
2641 // ToDo: bump the linear scan by the actual size of the object
2642 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2646 oldgen_scan_bd = oldgen_scan_bd->link;
2647 if (oldgen_scan_bd != NULL) {
2648 oldgen_scan = oldgen_scan_bd->start;
2654 /* -----------------------------------------------------------------------------
2655 Scavenge one object.
2657 This is used for objects that are temporarily marked as mutable
2658 because they contain old-to-new generation pointers. Only certain
2659 objects can have this property.
2660 -------------------------------------------------------------------------- */
2663 scavenge_one(StgPtr p)
2665 const StgInfoTable *info;
2666 nat saved_evac_gen = evac_gen;
2669 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2670 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2672 info = get_itbl((StgClosure *)p);
2674 switch (info->type) {
2677 case FUN_1_0: // hardly worth specialising these guys
2697 case IND_OLDGEN_PERM:
2701 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2702 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2703 (StgClosure *)*q = evacuate((StgClosure *)*q);
2709 case SE_CAF_BLACKHOLE:
2714 case THUNK_SELECTOR:
2716 StgSelector *s = (StgSelector *)p;
2717 s->selectee = evacuate(s->selectee);
2722 // nothing to follow
2727 // follow everything
2730 evac_gen = 0; // repeatedly mutable
2731 recordMutable((StgMutClosure *)p);
2732 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2733 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2734 (StgClosure *)*p = evacuate((StgClosure *)*p);
2736 evac_gen = saved_evac_gen;
2737 failed_to_evac = rtsFalse;
2741 case MUT_ARR_PTRS_FROZEN:
2743 // follow everything
2746 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2747 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2748 (StgClosure *)*p = evacuate((StgClosure *)*p);
2755 StgTSO *tso = (StgTSO *)p;
2757 evac_gen = 0; // repeatedly mutable
2759 recordMutable((StgMutClosure *)tso);
2760 evac_gen = saved_evac_gen;
2761 failed_to_evac = rtsFalse;
2768 StgPAP* pap = (StgPAP *)p;
2769 pap->fun = evacuate(pap->fun);
2770 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2775 // This might happen if for instance a MUT_CONS was pointing to a
2776 // THUNK which has since been updated. The IND_OLDGEN will
2777 // be on the mutable list anyway, so we don't need to do anything
2782 barf("scavenge_one: strange object %d", (int)(info->type));
2785 no_luck = failed_to_evac;
2786 failed_to_evac = rtsFalse;
2790 /* -----------------------------------------------------------------------------
2791 Scavenging mutable lists.
2793 We treat the mutable list of each generation > N (i.e. all the
2794 generations older than the one being collected) as roots. We also
2795 remove non-mutable objects from the mutable list at this point.
2796 -------------------------------------------------------------------------- */
2799 scavenge_mut_once_list(generation *gen)
2801 const StgInfoTable *info;
2802 StgMutClosure *p, *next, *new_list;
2804 p = gen->mut_once_list;
2805 new_list = END_MUT_LIST;
2809 failed_to_evac = rtsFalse;
2811 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2813 // make sure the info pointer is into text space
2814 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2815 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2819 if (info->type==RBH)
2820 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2822 switch(info->type) {
2825 case IND_OLDGEN_PERM:
2827 /* Try to pull the indirectee into this generation, so we can
2828 * remove the indirection from the mutable list.
2830 ((StgIndOldGen *)p)->indirectee =
2831 evacuate(((StgIndOldGen *)p)->indirectee);
2833 #if 0 && defined(DEBUG)
2834 if (RtsFlags.DebugFlags.gc)
2835 /* Debugging code to print out the size of the thing we just
2839 StgPtr start = gen->steps[0].scan;
2840 bdescr *start_bd = gen->steps[0].scan_bd;
2842 scavenge(&gen->steps[0]);
2843 if (start_bd != gen->steps[0].scan_bd) {
2844 size += (P_)BLOCK_ROUND_UP(start) - start;
2845 start_bd = start_bd->link;
2846 while (start_bd != gen->steps[0].scan_bd) {
2847 size += BLOCK_SIZE_W;
2848 start_bd = start_bd->link;
2850 size += gen->steps[0].scan -
2851 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2853 size = gen->steps[0].scan - start;
2855 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2859 /* failed_to_evac might happen if we've got more than two
2860 * generations, we're collecting only generation 0, the
2861 * indirection resides in generation 2 and the indirectee is
2864 if (failed_to_evac) {
2865 failed_to_evac = rtsFalse;
2866 p->mut_link = new_list;
2869 /* the mut_link field of an IND_STATIC is overloaded as the
2870 * static link field too (it just so happens that we don't need
2871 * both at the same time), so we need to NULL it out when
2872 * removing this object from the mutable list because the static
2873 * link fields are all assumed to be NULL before doing a major
2881 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2882 * it from the mutable list if possible by promoting whatever it
2885 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2886 /* didn't manage to promote everything, so put the
2887 * MUT_CONS back on the list.
2889 p->mut_link = new_list;
2895 // shouldn't have anything else on the mutables list
2896 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2900 gen->mut_once_list = new_list;
2905 scavenge_mutable_list(generation *gen)
2907 const StgInfoTable *info;
2908 StgMutClosure *p, *next;
2910 p = gen->saved_mut_list;
2914 failed_to_evac = rtsFalse;
2916 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2918 // make sure the info pointer is into text space
2919 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2920 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2924 if (info->type==RBH)
2925 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2927 switch(info->type) {
2930 // follow everything
2931 p->mut_link = gen->mut_list;
2936 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2937 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2938 (StgClosure *)*q = evacuate((StgClosure *)*q);
2943 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2944 case MUT_ARR_PTRS_FROZEN:
2949 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2950 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2951 (StgClosure *)*q = evacuate((StgClosure *)*q);
2955 if (failed_to_evac) {
2956 failed_to_evac = rtsFalse;
2957 mkMutCons((StgClosure *)p, gen);
2963 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2964 p->mut_link = gen->mut_list;
2970 StgMVar *mvar = (StgMVar *)p;
2971 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2972 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2973 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2974 p->mut_link = gen->mut_list;
2981 StgTSO *tso = (StgTSO *)p;
2985 /* Don't take this TSO off the mutable list - it might still
2986 * point to some younger objects (because we set evac_gen to 0
2989 tso->mut_link = gen->mut_list;
2990 gen->mut_list = (StgMutClosure *)tso;
2996 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2997 (StgClosure *)bh->blocking_queue =
2998 evacuate((StgClosure *)bh->blocking_queue);
2999 p->mut_link = gen->mut_list;
3004 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3007 case IND_OLDGEN_PERM:
3008 /* Try to pull the indirectee into this generation, so we can
3009 * remove the indirection from the mutable list.
3012 ((StgIndOldGen *)p)->indirectee =
3013 evacuate(((StgIndOldGen *)p)->indirectee);
3016 if (failed_to_evac) {
3017 failed_to_evac = rtsFalse;
3018 p->mut_link = gen->mut_once_list;
3019 gen->mut_once_list = p;
3026 // HWL: check whether all of these are necessary
3028 case RBH: // cf. BLACKHOLE_BQ
3030 // nat size, ptrs, nonptrs, vhs;
3032 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3033 StgRBH *rbh = (StgRBH *)p;
3034 (StgClosure *)rbh->blocking_queue =
3035 evacuate((StgClosure *)rbh->blocking_queue);
3036 if (failed_to_evac) {
3037 failed_to_evac = rtsFalse;
3038 recordMutable((StgMutClosure *)rbh);
3040 // ToDo: use size of reverted closure here!
3041 p += BLACKHOLE_sizeW();
3047 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3048 // follow the pointer to the node which is being demanded
3049 (StgClosure *)bf->node =
3050 evacuate((StgClosure *)bf->node);
3051 // follow the link to the rest of the blocking queue
3052 (StgClosure *)bf->link =
3053 evacuate((StgClosure *)bf->link);
3054 if (failed_to_evac) {
3055 failed_to_evac = rtsFalse;
3056 recordMutable((StgMutClosure *)bf);
3058 p += sizeofW(StgBlockedFetch);
3064 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3067 p += sizeofW(StgFetchMe);
3068 break; // nothing to do in this case
3070 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3072 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3073 (StgClosure *)fmbq->blocking_queue =
3074 evacuate((StgClosure *)fmbq->blocking_queue);
3075 if (failed_to_evac) {
3076 failed_to_evac = rtsFalse;
3077 recordMutable((StgMutClosure *)fmbq);
3079 p += sizeofW(StgFetchMeBlockingQueue);
3085 // shouldn't have anything else on the mutables list
3086 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3093 scavenge_static(void)
3095 StgClosure* p = static_objects;
3096 const StgInfoTable *info;
3098 /* Always evacuate straight to the oldest generation for static
3100 evac_gen = oldest_gen->no;
3102 /* keep going until we've scavenged all the objects on the linked
3104 while (p != END_OF_STATIC_LIST) {
3108 if (info->type==RBH)
3109 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3111 // make sure the info pointer is into text space
3112 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3113 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3115 /* Take this object *off* the static_objects list,
3116 * and put it on the scavenged_static_objects list.
3118 static_objects = STATIC_LINK(info,p);
3119 STATIC_LINK(info,p) = scavenged_static_objects;
3120 scavenged_static_objects = p;
3122 switch (info -> type) {
3126 StgInd *ind = (StgInd *)p;
3127 ind->indirectee = evacuate(ind->indirectee);
3129 /* might fail to evacuate it, in which case we have to pop it
3130 * back on the mutable list (and take it off the
3131 * scavenged_static list because the static link and mut link
3132 * pointers are one and the same).
3134 if (failed_to_evac) {
3135 failed_to_evac = rtsFalse;
3136 scavenged_static_objects = IND_STATIC_LINK(p);
3137 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3138 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3152 next = (P_)p->payload + info->layout.payload.ptrs;
3153 // evacuate the pointers
3154 for (q = (P_)p->payload; q < next; q++) {
3155 (StgClosure *)*q = evacuate((StgClosure *)*q);
3161 barf("scavenge_static: strange closure %d", (int)(info->type));
3164 ASSERT(failed_to_evac == rtsFalse);
3166 /* get the next static object from the list. Remember, there might
3167 * be more stuff on this list now that we've done some evacuating!
3168 * (static_objects is a global)
3174 /* -----------------------------------------------------------------------------
3175 scavenge_stack walks over a section of stack and evacuates all the
3176 objects pointed to by it. We can use the same code for walking
3177 PAPs, since these are just sections of copied stack.
3178 -------------------------------------------------------------------------- */
3181 scavenge_stack(StgPtr p, StgPtr stack_end)
3184 const StgInfoTable* info;
3187 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3190 * Each time around this loop, we are looking at a chunk of stack
3191 * that starts with either a pending argument section or an
3192 * activation record.
3195 while (p < stack_end) {
3198 // If we've got a tag, skip over that many words on the stack
3199 if (IS_ARG_TAG((W_)q)) {
3204 /* Is q a pointer to a closure?
3206 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3208 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3209 ASSERT(closure_STATIC((StgClosure *)q));
3211 // otherwise, must be a pointer into the allocation space.
3214 (StgClosure *)*p = evacuate((StgClosure *)q);
3220 * Otherwise, q must be the info pointer of an activation
3221 * record. All activation records have 'bitmap' style layout
3224 info = get_itbl((StgClosure *)p);
3226 switch (info->type) {
3228 // Dynamic bitmap: the mask is stored on the stack
3230 bitmap = ((StgRetDyn *)p)->liveness;
3231 p = (P_)&((StgRetDyn *)p)->payload[0];
3234 // probably a slow-entry point return address:
3242 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3243 old_p, p, old_p+1));
3245 p++; // what if FHS!=1 !? -- HWL
3250 /* Specialised code for update frames, since they're so common.
3251 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3252 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3256 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3258 p += sizeofW(StgUpdateFrame);
3261 frame->updatee = evacuate(frame->updatee);
3263 #else // specialised code for update frames, not sure if it's worth it.
3265 nat type = get_itbl(frame->updatee)->type;
3267 if (type == EVACUATED) {
3268 frame->updatee = evacuate(frame->updatee);
3271 bdescr *bd = Bdescr((P_)frame->updatee);
3273 if (bd->gen_no > N) {
3274 if (bd->gen_no < evac_gen) {
3275 failed_to_evac = rtsTrue;
3280 // Don't promote blackholes
3282 if (!(stp->gen_no == 0 &&
3284 stp->no == stp->gen->n_steps-1)) {
3291 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3292 sizeofW(StgHeader), stp);
3293 frame->updatee = to;
3296 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3297 frame->updatee = to;
3298 recordMutable((StgMutClosure *)to);
3301 /* will never be SE_{,CAF_}BLACKHOLE, since we
3302 don't push an update frame for single-entry thunks. KSW 1999-01. */
3303 barf("scavenge_stack: UPDATE_FRAME updatee");
3309 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3316 bitmap = info->layout.bitmap;
3318 // this assumes that the payload starts immediately after the info-ptr
3320 while (bitmap != 0) {
3321 if ((bitmap & 1) == 0) {
3322 (StgClosure *)*p = evacuate((StgClosure *)*p);
3325 bitmap = bitmap >> 1;
3332 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3337 StgLargeBitmap *large_bitmap;
3340 large_bitmap = info->layout.large_bitmap;
3343 for (i=0; i<large_bitmap->size; i++) {
3344 bitmap = large_bitmap->bitmap[i];
3345 q = p + BITS_IN(W_);
3346 while (bitmap != 0) {
3347 if ((bitmap & 1) == 0) {
3348 (StgClosure *)*p = evacuate((StgClosure *)*p);
3351 bitmap = bitmap >> 1;
3353 if (i+1 < large_bitmap->size) {
3355 (StgClosure *)*p = evacuate((StgClosure *)*p);
3361 // and don't forget to follow the SRT
3366 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3371 /*-----------------------------------------------------------------------------
3372 scavenge the large object list.
3374 evac_gen set by caller; similar games played with evac_gen as with
3375 scavenge() - see comment at the top of scavenge(). Most large
3376 objects are (repeatedly) mutable, so most of the time evac_gen will
3378 --------------------------------------------------------------------------- */
3381 scavenge_large(step *stp)
3386 bd = stp->new_large_objects;
3388 for (; bd != NULL; bd = stp->new_large_objects) {
3390 /* take this object *off* the large objects list and put it on
3391 * the scavenged large objects list. This is so that we can
3392 * treat new_large_objects as a stack and push new objects on
3393 * the front when evacuating.
3395 stp->new_large_objects = bd->link;
3396 dbl_link_onto(bd, &stp->scavenged_large_objects);
3398 // update the block count in this step.
3399 stp->n_scavenged_large_blocks += bd->blocks;
3402 if (scavenge_one(p)) {
3403 mkMutCons((StgClosure *)p, stp->gen);
3408 /* -----------------------------------------------------------------------------
3409 Initialising the static object & mutable lists
3410 -------------------------------------------------------------------------- */
3413 zero_static_object_list(StgClosure* first_static)
3417 const StgInfoTable *info;
3419 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3421 link = STATIC_LINK(info, p);
3422 STATIC_LINK(info,p) = NULL;
3426 /* This function is only needed because we share the mutable link
3427 * field with the static link field in an IND_STATIC, so we have to
3428 * zero the mut_link field before doing a major GC, which needs the
3429 * static link field.
3431 * It doesn't do any harm to zero all the mutable link fields on the
3436 zero_mutable_list( StgMutClosure *first )
3438 StgMutClosure *next, *c;
3440 for (c = first; c != END_MUT_LIST; c = next) {
3446 /* -----------------------------------------------------------------------------
3448 -------------------------------------------------------------------------- */
3455 for (c = (StgIndStatic *)caf_list; c != NULL;
3456 c = (StgIndStatic *)c->static_link)
3458 c->header.info = c->saved_info;
3459 c->saved_info = NULL;
3460 // could, but not necessary: c->static_link = NULL;
3466 scavengeCAFs( void )
3471 for (c = (StgIndStatic *)caf_list; c != NULL;
3472 c = (StgIndStatic *)c->static_link)
3474 c->indirectee = evacuate(c->indirectee);
3478 /* -----------------------------------------------------------------------------
3479 Sanity code for CAF garbage collection.
3481 With DEBUG turned on, we manage a CAF list in addition to the SRT
3482 mechanism. After GC, we run down the CAF list and blackhole any
3483 CAFs which have been garbage collected. This means we get an error
3484 whenever the program tries to enter a garbage collected CAF.
3486 Any garbage collected CAFs are taken off the CAF list at the same
3488 -------------------------------------------------------------------------- */
3490 #if 0 && defined(DEBUG)
3497 const StgInfoTable *info;
3508 ASSERT(info->type == IND_STATIC);
3510 if (STATIC_LINK(info,p) == NULL) {
3511 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3513 SET_INFO(p,&stg_BLACKHOLE_info);
3514 p = STATIC_LINK2(info,p);
3518 pp = &STATIC_LINK2(info,p);
3525 // belch("%d CAFs live", i);
3530 /* -----------------------------------------------------------------------------
3533 Whenever a thread returns to the scheduler after possibly doing
3534 some work, we have to run down the stack and black-hole all the
3535 closures referred to by update frames.
3536 -------------------------------------------------------------------------- */
3539 threadLazyBlackHole(StgTSO *tso)
3541 StgUpdateFrame *update_frame;
3542 StgBlockingQueue *bh;
3545 stack_end = &tso->stack[tso->stack_size];
3546 update_frame = tso->su;
3549 switch (get_itbl(update_frame)->type) {
3552 update_frame = ((StgCatchFrame *)update_frame)->link;
3556 bh = (StgBlockingQueue *)update_frame->updatee;
3558 /* if the thunk is already blackholed, it means we've also
3559 * already blackholed the rest of the thunks on this stack,
3560 * so we can stop early.
3562 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3563 * don't interfere with this optimisation.
3565 if (bh->header.info == &stg_BLACKHOLE_info) {
3569 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3570 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3571 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3572 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3574 SET_INFO(bh,&stg_BLACKHOLE_info);
3577 update_frame = update_frame->link;
3581 update_frame = ((StgSeqFrame *)update_frame)->link;
3587 barf("threadPaused");
3593 /* -----------------------------------------------------------------------------
3596 * Code largely pinched from old RTS, then hacked to bits. We also do
3597 * lazy black holing here.
3599 * -------------------------------------------------------------------------- */
3602 threadSqueezeStack(StgTSO *tso)
3604 lnat displacement = 0;
3605 StgUpdateFrame *frame;
3606 StgUpdateFrame *next_frame; // Temporally next
3607 StgUpdateFrame *prev_frame; // Temporally previous
3609 rtsBool prev_was_update_frame;
3611 StgUpdateFrame *top_frame;
3612 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3614 void printObj( StgClosure *obj ); // from Printer.c
3616 top_frame = tso->su;
3619 bottom = &(tso->stack[tso->stack_size]);
3622 /* There must be at least one frame, namely the STOP_FRAME.
3624 ASSERT((P_)frame < bottom);
3626 /* Walk down the stack, reversing the links between frames so that
3627 * we can walk back up as we squeeze from the bottom. Note that
3628 * next_frame and prev_frame refer to next and previous as they were
3629 * added to the stack, rather than the way we see them in this
3630 * walk. (It makes the next loop less confusing.)
3632 * Stop if we find an update frame pointing to a black hole
3633 * (see comment in threadLazyBlackHole()).
3637 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3638 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3639 prev_frame = frame->link;
3640 frame->link = next_frame;
3645 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3646 printObj((StgClosure *)prev_frame);
3647 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3650 switch (get_itbl(frame)->type) {
3653 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3666 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3668 printObj((StgClosure *)prev_frame);
3671 if (get_itbl(frame)->type == UPDATE_FRAME
3672 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3677 /* Now, we're at the bottom. Frame points to the lowest update
3678 * frame on the stack, and its link actually points to the frame
3679 * above. We have to walk back up the stack, squeezing out empty
3680 * update frames and turning the pointers back around on the way
3683 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3684 * we never want to eliminate it anyway. Just walk one step up
3685 * before starting to squeeze. When you get to the topmost frame,
3686 * remember that there are still some words above it that might have
3693 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3696 * Loop through all of the frames (everything except the very
3697 * bottom). Things are complicated by the fact that we have
3698 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3699 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3701 while (frame != NULL) {
3703 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3704 rtsBool is_update_frame;
3706 next_frame = frame->link;
3707 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3710 * 1. both the previous and current frame are update frames
3711 * 2. the current frame is empty
3713 if (prev_was_update_frame && is_update_frame &&
3714 (P_)prev_frame == frame_bottom + displacement) {
3716 // Now squeeze out the current frame
3717 StgClosure *updatee_keep = prev_frame->updatee;
3718 StgClosure *updatee_bypass = frame->updatee;
3721 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3725 /* Deal with blocking queues. If both updatees have blocked
3726 * threads, then we should merge the queues into the update
3727 * frame that we're keeping.
3729 * Alternatively, we could just wake them up: they'll just go
3730 * straight to sleep on the proper blackhole! This is less code
3731 * and probably less bug prone, although it's probably much
3734 #if 0 // do it properly...
3735 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3736 # error Unimplemented lazy BH warning. (KSW 1999-01)
3738 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3739 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3741 // Sigh. It has one. Don't lose those threads!
3742 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3743 // Urgh. Two queues. Merge them.
3744 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3746 while (keep_tso->link != END_TSO_QUEUE) {
3747 keep_tso = keep_tso->link;
3749 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3752 // For simplicity, just swap the BQ for the BH
3753 P_ temp = updatee_keep;
3755 updatee_keep = updatee_bypass;
3756 updatee_bypass = temp;
3758 // Record the swap in the kept frame (below)
3759 prev_frame->updatee = updatee_keep;
3764 TICK_UPD_SQUEEZED();
3765 /* wasn't there something about update squeezing and ticky to be
3766 * sorted out? oh yes: we aren't counting each enter properly
3767 * in this case. See the log somewhere. KSW 1999-04-21
3769 * Check two things: that the two update frames don't point to
3770 * the same object, and that the updatee_bypass isn't already an
3771 * indirection. Both of these cases only happen when we're in a
3772 * block hole-style loop (and there are multiple update frames
3773 * on the stack pointing to the same closure), but they can both
3774 * screw us up if we don't check.
3776 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3777 // this wakes the threads up
3778 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3781 sp = (P_)frame - 1; // sp = stuff to slide
3782 displacement += sizeofW(StgUpdateFrame);
3785 // No squeeze for this frame
3786 sp = frame_bottom - 1; // Keep the current frame
3788 /* Do lazy black-holing.
3790 if (is_update_frame) {
3791 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3792 if (bh->header.info != &stg_BLACKHOLE_info &&
3793 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3794 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3795 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3796 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3799 /* zero out the slop so that the sanity checker can tell
3800 * where the next closure is.
3803 StgInfoTable *info = get_itbl(bh);
3804 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3805 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3806 * info is used for a different purpose, and it's exactly the
3807 * same size as a BLACKHOLE in any case.
3809 if (info->type != THUNK_SELECTOR) {
3810 for (i = np; i < np + nw; i++) {
3811 ((StgClosure *)bh)->payload[i] = 0;
3816 SET_INFO(bh,&stg_BLACKHOLE_info);
3820 // Fix the link in the current frame (should point to the frame below)
3821 frame->link = prev_frame;
3822 prev_was_update_frame = is_update_frame;
3825 // Now slide all words from sp up to the next frame
3827 if (displacement > 0) {
3828 P_ next_frame_bottom;
3830 if (next_frame != NULL)
3831 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3833 next_frame_bottom = tso->sp - 1;
3837 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3841 while (sp >= next_frame_bottom) {
3842 sp[displacement] = *sp;
3846 (P_)prev_frame = (P_)frame + displacement;
3850 tso->sp += displacement;
3851 tso->su = prev_frame;
3854 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3855 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3860 /* -----------------------------------------------------------------------------
3863 * We have to prepare for GC - this means doing lazy black holing
3864 * here. We also take the opportunity to do stack squeezing if it's
3866 * -------------------------------------------------------------------------- */
3868 threadPaused(StgTSO *tso)
3870 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3871 threadSqueezeStack(tso); // does black holing too
3873 threadLazyBlackHole(tso);
3876 /* -----------------------------------------------------------------------------
3878 * -------------------------------------------------------------------------- */
3882 printMutOnceList(generation *gen)
3884 StgMutClosure *p, *next;
3886 p = gen->mut_once_list;
3889 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3890 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3891 fprintf(stderr, "%p (%s), ",
3892 p, info_type((StgClosure *)p));
3894 fputc('\n', stderr);
3898 printMutableList(generation *gen)
3900 StgMutClosure *p, *next;
3905 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3906 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3907 fprintf(stderr, "%p (%s), ",
3908 p, info_type((StgClosure *)p));
3910 fputc('\n', stderr);
3913 static inline rtsBool
3914 maybeLarge(StgClosure *closure)
3916 StgInfoTable *info = get_itbl(closure);
3918 /* closure types that may be found on the new_large_objects list;
3919 see scavenge_large */
3920 return (info->type == MUT_ARR_PTRS ||
3921 info->type == MUT_ARR_PTRS_FROZEN ||
3922 info->type == TSO ||
3923 info->type == ARR_WORDS);