1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.126 2001/11/08 12:46:31 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
10 #include "PosixSource.h"
15 #include "StoragePriv.h"
18 #include "SchedAPI.h" // for ReverCAFs prototype
20 #include "BlockAlloc.h"
26 #include "StablePriv.h"
28 #include "ParTicky.h" // ToDo: move into Rts.h
29 #include "GCCompact.h"
30 #if defined(GRAN) || defined(PAR)
31 # include "GranSimRts.h"
32 # include "ParallelRts.h"
36 # include "ParallelDebug.h"
41 #if defined(RTS_GTK_FRONTPANEL)
42 #include "FrontPanel.h"
45 /* STATIC OBJECT LIST.
48 * We maintain a linked list of static objects that are still live.
49 * The requirements for this list are:
51 * - we need to scan the list while adding to it, in order to
52 * scavenge all the static objects (in the same way that
53 * breadth-first scavenging works for dynamic objects).
55 * - we need to be able to tell whether an object is already on
56 * the list, to break loops.
58 * Each static object has a "static link field", which we use for
59 * linking objects on to the list. We use a stack-type list, consing
60 * objects on the front as they are added (this means that the
61 * scavenge phase is depth-first, not breadth-first, but that
64 * A separate list is kept for objects that have been scavenged
65 * already - this is so that we can zero all the marks afterwards.
67 * An object is on the list if its static link field is non-zero; this
68 * means that we have to mark the end of the list with '1', not NULL.
70 * Extra notes for generational GC:
72 * Each generation has a static object list associated with it. When
73 * collecting generations up to N, we treat the static object lists
74 * from generations > N as roots.
76 * We build up a static object list while collecting generations 0..N,
77 * which is then appended to the static object list of generation N+1.
79 StgClosure* static_objects; // live static objects
80 StgClosure* scavenged_static_objects; // static objects scavenged so far
82 /* N is the oldest generation being collected, where the generations
83 * are numbered starting at 0. A major GC (indicated by the major_gc
84 * flag) is when we're collecting all generations. We only attempt to
85 * deal with static objects and GC CAFs when doing a major GC.
88 static rtsBool major_gc;
90 /* Youngest generation that objects should be evacuated to in
91 * evacuate(). (Logically an argument to evacuate, but it's static
92 * a lot of the time so we optimise it into a global variable).
98 StgWeak *old_weak_ptr_list; // also pending finaliser list
99 static rtsBool weak_done; // all done for this pass
101 /* List of all threads during GC
103 static StgTSO *old_all_threads;
104 static StgTSO *resurrected_threads;
106 /* Flag indicating failure to evacuate an object to the desired
109 static rtsBool failed_to_evac;
111 /* Old to-space (used for two-space collector only)
113 bdescr *old_to_blocks;
115 /* Data used for allocation area sizing.
117 lnat new_blocks; // blocks allocated during this GC
118 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
120 /* Used to avoid long recursion due to selector thunks
122 lnat thunk_selector_depth = 0;
123 #define MAX_THUNK_SELECTOR_DEPTH 256
125 /* -----------------------------------------------------------------------------
126 Static function declarations
127 -------------------------------------------------------------------------- */
129 static void mark_root ( StgClosure **root );
130 static StgClosure * evacuate ( StgClosure *q );
131 static void zero_static_object_list ( StgClosure* first_static );
132 static void zero_mutable_list ( StgMutClosure *first );
134 static rtsBool traverse_weak_ptr_list ( void );
135 static void mark_weak_ptr_list ( StgWeak **list );
137 static void scavenge ( step * );
138 static void scavenge_mark_stack ( void );
139 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
140 static rtsBool scavenge_one ( StgPtr p );
141 static void scavenge_large ( step * );
142 static void scavenge_static ( void );
143 static void scavenge_mutable_list ( generation *g );
144 static void scavenge_mut_once_list ( generation *g );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
464 /* follow roots from the CAF list (used by GHCi)
469 /* follow all the roots that the application knows about.
472 get_roots(mark_root);
475 /* And don't forget to mark the TSO if we got here direct from
477 /* Not needed in a seq version?
479 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
483 // Mark the entries in the GALA table of the parallel system
484 markLocalGAs(major_gc);
485 // Mark all entries on the list of pending fetches
486 markPendingFetches(major_gc);
489 /* Mark the weak pointer list, and prepare to detect dead weak
492 mark_weak_ptr_list(&weak_ptr_list);
493 old_weak_ptr_list = weak_ptr_list;
494 weak_ptr_list = NULL;
495 weak_done = rtsFalse;
497 /* The all_threads list is like the weak_ptr_list.
498 * See traverse_weak_ptr_list() for the details.
500 old_all_threads = all_threads;
501 all_threads = END_TSO_QUEUE;
502 resurrected_threads = END_TSO_QUEUE;
504 /* Mark the stable pointer table.
506 markStablePtrTable(mark_root);
510 /* ToDo: To fix the caf leak, we need to make the commented out
511 * parts of this code do something sensible - as described in
514 extern void markHugsObjects(void);
519 /* -------------------------------------------------------------------------
520 * Repeatedly scavenge all the areas we know about until there's no
521 * more scavenging to be done.
528 // scavenge static objects
529 if (major_gc && static_objects != END_OF_STATIC_LIST) {
530 IF_DEBUG(sanity, checkStaticObjects(static_objects));
534 /* When scavenging the older generations: Objects may have been
535 * evacuated from generations <= N into older generations, and we
536 * need to scavenge these objects. We're going to try to ensure that
537 * any evacuations that occur move the objects into at least the
538 * same generation as the object being scavenged, otherwise we
539 * have to create new entries on the mutable list for the older
543 // scavenge each step in generations 0..maxgen
549 // scavenge objects in compacted generation
550 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
551 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
552 scavenge_mark_stack();
556 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
557 for (st = generations[gen].n_steps; --st >= 0; ) {
558 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
561 stp = &generations[gen].steps[st];
563 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
568 if (stp->new_large_objects != NULL) {
577 if (flag) { goto loop; }
580 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
586 // Reconstruct the Global Address tables used in GUM
587 rebuildGAtables(major_gc);
588 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
591 // Now see which stable names are still alive.
594 // Tidy the end of the to-space chains
595 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
596 for (s = 0; s < generations[g].n_steps; s++) {
597 stp = &generations[g].steps[s];
598 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
599 stp->hp_bd->free = stp->hp;
600 stp->hp_bd->link = NULL;
605 // NO MORE EVACUATION AFTER THIS POINT!
606 // Finally: compaction of the oldest generation.
607 if (major_gc && oldest_gen->steps[0].is_compacted) {
608 // save number of blocks for stats
609 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
613 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
615 /* run through all the generations/steps and tidy up
617 copied = new_blocks * BLOCK_SIZE_W;
618 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
621 generations[g].collections++; // for stats
624 for (s = 0; s < generations[g].n_steps; s++) {
626 stp = &generations[g].steps[s];
628 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
629 // stats information: how much we copied
631 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
636 // for generations we collected...
639 // rough calculation of garbage collected, for stats output
640 if (stp->is_compacted) {
641 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
643 collected += stp->n_blocks * BLOCK_SIZE_W;
646 /* free old memory and shift to-space into from-space for all
647 * the collected steps (except the allocation area). These
648 * freed blocks will probaby be quickly recycled.
650 if (!(g == 0 && s == 0)) {
651 if (stp->is_compacted) {
652 // for a compacted step, just shift the new to-space
653 // onto the front of the now-compacted existing blocks.
654 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
655 bd->flags &= ~BF_EVACUATED; // now from-space
657 // tack the new blocks on the end of the existing blocks
658 if (stp->blocks == NULL) {
659 stp->blocks = stp->to_blocks;
661 for (bd = stp->blocks; bd != NULL; bd = next) {
664 bd->link = stp->to_blocks;
668 // add the new blocks to the block tally
669 stp->n_blocks += stp->n_to_blocks;
671 freeChain(stp->blocks);
672 stp->blocks = stp->to_blocks;
673 stp->n_blocks = stp->n_to_blocks;
674 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
675 bd->flags &= ~BF_EVACUATED; // now from-space
678 stp->to_blocks = NULL;
679 stp->n_to_blocks = 0;
682 /* LARGE OBJECTS. The current live large objects are chained on
683 * scavenged_large, having been moved during garbage
684 * collection from large_objects. Any objects left on
685 * large_objects list are therefore dead, so we free them here.
687 for (bd = stp->large_objects; bd != NULL; bd = next) {
693 // update the count of blocks used by large objects
694 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
695 bd->flags &= ~BF_EVACUATED;
697 stp->large_objects = stp->scavenged_large_objects;
698 stp->n_large_blocks = stp->n_scavenged_large_blocks;
701 // for older generations...
703 /* For older generations, we need to append the
704 * scavenged_large_object list (i.e. large objects that have been
705 * promoted during this GC) to the large_object list for that step.
707 for (bd = stp->scavenged_large_objects; bd; bd = next) {
709 bd->flags &= ~BF_EVACUATED;
710 dbl_link_onto(bd, &stp->large_objects);
713 // add the new blocks we promoted during this GC
714 stp->n_blocks += stp->n_to_blocks;
715 stp->n_large_blocks += stp->n_scavenged_large_blocks;
720 /* Reset the sizes of the older generations when we do a major
723 * CURRENT STRATEGY: make all generations except zero the same size.
724 * We have to stay within the maximum heap size, and leave a certain
725 * percentage of the maximum heap size available to allocate into.
727 if (major_gc && RtsFlags.GcFlags.generations > 1) {
728 nat live, size, min_alloc;
729 nat max = RtsFlags.GcFlags.maxHeapSize;
730 nat gens = RtsFlags.GcFlags.generations;
732 // live in the oldest generations
733 live = oldest_gen->steps[0].n_blocks +
734 oldest_gen->steps[0].n_large_blocks;
736 // default max size for all generations except zero
737 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
738 RtsFlags.GcFlags.minOldGenSize);
740 // minimum size for generation zero
741 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
742 RtsFlags.GcFlags.minAllocAreaSize);
744 // Auto-enable compaction when the residency reaches a
745 // certain percentage of the maximum heap size (default: 30%).
746 if (RtsFlags.GcFlags.generations > 1 &&
747 (RtsFlags.GcFlags.compact ||
749 oldest_gen->steps[0].n_blocks >
750 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
751 oldest_gen->steps[0].is_compacted = 1;
752 // fprintf(stderr,"compaction: on\n", live);
754 oldest_gen->steps[0].is_compacted = 0;
755 // fprintf(stderr,"compaction: off\n", live);
758 // if we're going to go over the maximum heap size, reduce the
759 // size of the generations accordingly. The calculation is
760 // different if compaction is turned on, because we don't need
761 // to double the space required to collect the old generation.
764 // this test is necessary to ensure that the calculations
765 // below don't have any negative results - we're working
766 // with unsigned values here.
767 if (max < min_alloc) {
771 if (oldest_gen->steps[0].is_compacted) {
772 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
773 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
776 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
777 size = (max - min_alloc) / ((gens - 1) * 2);
787 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
788 min_alloc, size, max);
791 for (g = 0; g < gens; g++) {
792 generations[g].max_blocks = size;
796 // Guess the amount of live data for stats.
799 /* Free the small objects allocated via allocate(), since this will
800 * all have been copied into G0S1 now.
802 if (small_alloc_list != NULL) {
803 freeChain(small_alloc_list);
805 small_alloc_list = NULL;
809 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
811 // Start a new pinned_object_block
812 pinned_object_block = NULL;
814 /* Free the mark stack.
816 if (mark_stack_bdescr != NULL) {
817 freeGroup(mark_stack_bdescr);
822 for (g = 0; g <= N; g++) {
823 for (s = 0; s < generations[g].n_steps; s++) {
824 stp = &generations[g].steps[s];
825 if (stp->is_compacted && stp->bitmap != NULL) {
826 freeGroup(stp->bitmap);
831 /* Two-space collector:
832 * Free the old to-space, and estimate the amount of live data.
834 if (RtsFlags.GcFlags.generations == 1) {
837 if (old_to_blocks != NULL) {
838 freeChain(old_to_blocks);
840 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
841 bd->flags = 0; // now from-space
844 /* For a two-space collector, we need to resize the nursery. */
846 /* set up a new nursery. Allocate a nursery size based on a
847 * function of the amount of live data (by default a factor of 2)
848 * Use the blocks from the old nursery if possible, freeing up any
851 * If we get near the maximum heap size, then adjust our nursery
852 * size accordingly. If the nursery is the same size as the live
853 * data (L), then we need 3L bytes. We can reduce the size of the
854 * nursery to bring the required memory down near 2L bytes.
856 * A normal 2-space collector would need 4L bytes to give the same
857 * performance we get from 3L bytes, reducing to the same
858 * performance at 2L bytes.
860 blocks = g0s0->n_to_blocks;
862 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
863 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
864 RtsFlags.GcFlags.maxHeapSize ) {
865 long adjusted_blocks; // signed on purpose
868 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
869 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
870 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
871 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
874 blocks = adjusted_blocks;
877 blocks *= RtsFlags.GcFlags.oldGenFactor;
878 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
879 blocks = RtsFlags.GcFlags.minAllocAreaSize;
882 resizeNursery(blocks);
885 /* Generational collector:
886 * If the user has given us a suggested heap size, adjust our
887 * allocation area to make best use of the memory available.
890 if (RtsFlags.GcFlags.heapSizeSuggestion) {
892 nat needed = calcNeeded(); // approx blocks needed at next GC
894 /* Guess how much will be live in generation 0 step 0 next time.
895 * A good approximation is obtained by finding the
896 * percentage of g0s0 that was live at the last minor GC.
899 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
902 /* Estimate a size for the allocation area based on the
903 * information available. We might end up going slightly under
904 * or over the suggested heap size, but we should be pretty
907 * Formula: suggested - needed
908 * ----------------------------
909 * 1 + g0s0_pcnt_kept/100
911 * where 'needed' is the amount of memory needed at the next
912 * collection for collecting all steps except g0s0.
915 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
916 (100 + (long)g0s0_pcnt_kept);
918 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
919 blocks = RtsFlags.GcFlags.minAllocAreaSize;
922 resizeNursery((nat)blocks);
925 // we might have added extra large blocks to the nursery, so
926 // resize back to minAllocAreaSize again.
927 resizeNursery(RtsFlags.GcFlags.minAllocAreaSize);
931 // mark the garbage collected CAFs as dead
932 #if 0 && defined(DEBUG) // doesn't work at the moment
933 if (major_gc) { gcCAFs(); }
936 // zero the scavenged static object list
938 zero_static_object_list(scavenged_static_objects);
944 // start any pending finalizers
945 scheduleFinalizers(old_weak_ptr_list);
947 // send exceptions to any threads which were about to die
948 resurrectThreads(resurrected_threads);
950 // Update the stable pointer hash table.
951 updateStablePtrTable(major_gc);
953 // check sanity after GC
954 IF_DEBUG(sanity, checkSanity());
956 // extra GC trace info
957 IF_DEBUG(gc, statDescribeGens());
960 // symbol-table based profiling
961 /* heapCensus(to_blocks); */ /* ToDo */
964 // restore enclosing cost centre
970 // check for memory leaks if sanity checking is on
971 IF_DEBUG(sanity, memInventory());
973 #ifdef RTS_GTK_FRONTPANEL
974 if (RtsFlags.GcFlags.frontpanel) {
975 updateFrontPanelAfterGC( N, live );
979 // ok, GC over: tell the stats department what happened.
980 stat_endGC(allocated, collected, live, copied, N);
986 /* -----------------------------------------------------------------------------
989 traverse_weak_ptr_list is called possibly many times during garbage
990 collection. It returns a flag indicating whether it did any work
991 (i.e. called evacuate on any live pointers).
993 Invariant: traverse_weak_ptr_list is called when the heap is in an
994 idempotent state. That means that there are no pending
995 evacuate/scavenge operations. This invariant helps the weak
996 pointer code decide which weak pointers are dead - if there are no
997 new live weak pointers, then all the currently unreachable ones are
1000 For generational GC: we just don't try to finalize weak pointers in
1001 older generations than the one we're collecting. This could
1002 probably be optimised by keeping per-generation lists of weak
1003 pointers, but for a few weak pointers this scheme will work.
1004 -------------------------------------------------------------------------- */
1007 traverse_weak_ptr_list(void)
1009 StgWeak *w, **last_w, *next_w;
1011 rtsBool flag = rtsFalse;
1013 if (weak_done) { return rtsFalse; }
1015 /* doesn't matter where we evacuate values/finalizers to, since
1016 * these pointers are treated as roots (iff the keys are alive).
1020 last_w = &old_weak_ptr_list;
1021 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1023 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1024 * called on a live weak pointer object. Just remove it.
1026 if (w->header.info == &stg_DEAD_WEAK_info) {
1027 next_w = ((StgDeadWeak *)w)->link;
1032 ASSERT(get_itbl(w)->type == WEAK);
1034 /* Now, check whether the key is reachable.
1036 new = isAlive(w->key);
1039 // evacuate the value and finalizer
1040 w->value = evacuate(w->value);
1041 w->finalizer = evacuate(w->finalizer);
1042 // remove this weak ptr from the old_weak_ptr list
1044 // and put it on the new weak ptr list
1046 w->link = weak_ptr_list;
1049 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1053 last_w = &(w->link);
1059 /* Now deal with the all_threads list, which behaves somewhat like
1060 * the weak ptr list. If we discover any threads that are about to
1061 * become garbage, we wake them up and administer an exception.
1064 StgTSO *t, *tmp, *next, **prev;
1066 prev = &old_all_threads;
1067 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1069 (StgClosure *)tmp = isAlive((StgClosure *)t);
1075 ASSERT(get_itbl(t)->type == TSO);
1076 switch (t->what_next) {
1077 case ThreadRelocated:
1082 case ThreadComplete:
1083 // finshed or died. The thread might still be alive, but we
1084 // don't keep it on the all_threads list. Don't forget to
1085 // stub out its global_link field.
1086 next = t->global_link;
1087 t->global_link = END_TSO_QUEUE;
1095 // not alive (yet): leave this thread on the old_all_threads list.
1096 prev = &(t->global_link);
1097 next = t->global_link;
1100 // alive: move this thread onto the all_threads list.
1101 next = t->global_link;
1102 t->global_link = all_threads;
1109 /* If we didn't make any changes, then we can go round and kill all
1110 * the dead weak pointers. The old_weak_ptr list is used as a list
1111 * of pending finalizers later on.
1113 if (flag == rtsFalse) {
1114 for (w = old_weak_ptr_list; w; w = w->link) {
1115 w->finalizer = evacuate(w->finalizer);
1118 /* And resurrect any threads which were about to become garbage.
1121 StgTSO *t, *tmp, *next;
1122 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1123 next = t->global_link;
1124 (StgClosure *)tmp = evacuate((StgClosure *)t);
1125 tmp->global_link = resurrected_threads;
1126 resurrected_threads = tmp;
1130 weak_done = rtsTrue;
1136 /* -----------------------------------------------------------------------------
1137 After GC, the live weak pointer list may have forwarding pointers
1138 on it, because a weak pointer object was evacuated after being
1139 moved to the live weak pointer list. We remove those forwarding
1142 Also, we don't consider weak pointer objects to be reachable, but
1143 we must nevertheless consider them to be "live" and retain them.
1144 Therefore any weak pointer objects which haven't as yet been
1145 evacuated need to be evacuated now.
1146 -------------------------------------------------------------------------- */
1150 mark_weak_ptr_list ( StgWeak **list )
1152 StgWeak *w, **last_w;
1155 for (w = *list; w; w = w->link) {
1156 (StgClosure *)w = evacuate((StgClosure *)w);
1158 last_w = &(w->link);
1162 /* -----------------------------------------------------------------------------
1163 isAlive determines whether the given closure is still alive (after
1164 a garbage collection) or not. It returns the new address of the
1165 closure if it is alive, or NULL otherwise.
1167 NOTE: Use it before compaction only!
1168 -------------------------------------------------------------------------- */
1172 isAlive(StgClosure *p)
1174 const StgInfoTable *info;
1181 /* ToDo: for static closures, check the static link field.
1182 * Problem here is that we sometimes don't set the link field, eg.
1183 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1188 // ignore closures in generations that we're not collecting.
1189 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1192 // large objects have an evacuated flag
1193 if (bd->flags & BF_LARGE) {
1194 if (bd->flags & BF_EVACUATED) {
1200 // check the mark bit for compacted steps
1201 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1205 switch (info->type) {
1210 case IND_OLDGEN: // rely on compatible layout with StgInd
1211 case IND_OLDGEN_PERM:
1212 // follow indirections
1213 p = ((StgInd *)p)->indirectee;
1218 return ((StgEvacuated *)p)->evacuee;
1221 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1222 p = (StgClosure *)((StgTSO *)p)->link;
1234 mark_root(StgClosure **root)
1236 *root = evacuate(*root);
1242 bdescr *bd = allocBlock();
1243 bd->gen_no = stp->gen_no;
1246 if (stp->gen_no <= N) {
1247 bd->flags = BF_EVACUATED;
1252 stp->hp_bd->free = stp->hp;
1253 stp->hp_bd->link = bd;
1254 stp->hp = bd->start;
1255 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1262 static __inline__ void
1263 upd_evacuee(StgClosure *p, StgClosure *dest)
1265 p->header.info = &stg_EVACUATED_info;
1266 ((StgEvacuated *)p)->evacuee = dest;
1270 static __inline__ StgClosure *
1271 copy(StgClosure *src, nat size, step *stp)
1275 TICK_GC_WORDS_COPIED(size);
1276 /* Find out where we're going, using the handy "to" pointer in
1277 * the step of the source object. If it turns out we need to
1278 * evacuate to an older generation, adjust it here (see comment
1281 if (stp->gen_no < evac_gen) {
1282 #ifdef NO_EAGER_PROMOTION
1283 failed_to_evac = rtsTrue;
1285 stp = &generations[evac_gen].steps[0];
1289 /* chain a new block onto the to-space for the destination step if
1292 if (stp->hp + size >= stp->hpLim) {
1296 for(to = stp->hp, from = (P_)src; size>0; --size) {
1302 upd_evacuee(src,(StgClosure *)dest);
1303 return (StgClosure *)dest;
1306 /* Special version of copy() for when we only want to copy the info
1307 * pointer of an object, but reserve some padding after it. This is
1308 * used to optimise evacuation of BLACKHOLEs.
1312 static __inline__ StgClosure *
1313 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1317 TICK_GC_WORDS_COPIED(size_to_copy);
1318 if (stp->gen_no < evac_gen) {
1319 #ifdef NO_EAGER_PROMOTION
1320 failed_to_evac = rtsTrue;
1322 stp = &generations[evac_gen].steps[0];
1326 if (stp->hp + size_to_reserve >= stp->hpLim) {
1330 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1335 stp->hp += size_to_reserve;
1336 upd_evacuee(src,(StgClosure *)dest);
1337 return (StgClosure *)dest;
1341 /* -----------------------------------------------------------------------------
1342 Evacuate a large object
1344 This just consists of removing the object from the (doubly-linked)
1345 large_alloc_list, and linking it on to the (singly-linked)
1346 new_large_objects list, from where it will be scavenged later.
1348 Convention: bd->flags has BF_EVACUATED set for a large object
1349 that has been evacuated, or unset otherwise.
1350 -------------------------------------------------------------------------- */
1354 evacuate_large(StgPtr p)
1356 bdescr *bd = Bdescr(p);
1359 // object must be at the beginning of the block (or be a ByteArray)
1360 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1361 (((W_)p & BLOCK_MASK) == 0));
1363 // already evacuated?
1364 if (bd->flags & BF_EVACUATED) {
1365 /* Don't forget to set the failed_to_evac flag if we didn't get
1366 * the desired destination (see comments in evacuate()).
1368 if (bd->gen_no < evac_gen) {
1369 failed_to_evac = rtsTrue;
1370 TICK_GC_FAILED_PROMOTION();
1376 // remove from large_object list
1378 bd->u.back->link = bd->link;
1379 } else { // first object in the list
1380 stp->large_objects = bd->link;
1383 bd->link->u.back = bd->u.back;
1386 /* link it on to the evacuated large object list of the destination step
1389 if (stp->gen_no < evac_gen) {
1390 #ifdef NO_EAGER_PROMOTION
1391 failed_to_evac = rtsTrue;
1393 stp = &generations[evac_gen].steps[0];
1398 bd->gen_no = stp->gen_no;
1399 bd->link = stp->new_large_objects;
1400 stp->new_large_objects = bd;
1401 bd->flags |= BF_EVACUATED;
1404 /* -----------------------------------------------------------------------------
1405 Adding a MUT_CONS to an older generation.
1407 This is necessary from time to time when we end up with an
1408 old-to-new generation pointer in a non-mutable object. We defer
1409 the promotion until the next GC.
1410 -------------------------------------------------------------------------- */
1414 mkMutCons(StgClosure *ptr, generation *gen)
1419 stp = &gen->steps[0];
1421 /* chain a new block onto the to-space for the destination step if
1424 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1428 q = (StgMutVar *)stp->hp;
1429 stp->hp += sizeofW(StgMutVar);
1431 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1433 recordOldToNewPtrs((StgMutClosure *)q);
1435 return (StgClosure *)q;
1438 /* -----------------------------------------------------------------------------
1441 This is called (eventually) for every live object in the system.
1443 The caller to evacuate specifies a desired generation in the
1444 evac_gen global variable. The following conditions apply to
1445 evacuating an object which resides in generation M when we're
1446 collecting up to generation N
1450 else evac to step->to
1452 if M < evac_gen evac to evac_gen, step 0
1454 if the object is already evacuated, then we check which generation
1457 if M >= evac_gen do nothing
1458 if M < evac_gen set failed_to_evac flag to indicate that we
1459 didn't manage to evacuate this object into evac_gen.
1461 -------------------------------------------------------------------------- */
1464 evacuate(StgClosure *q)
1469 const StgInfoTable *info;
1472 if (HEAP_ALLOCED(q)) {
1475 // not a group head: find the group head
1476 if (bd->blocks == 0) { bd = bd->link; }
1478 if (bd->gen_no > N) {
1479 /* Can't evacuate this object, because it's in a generation
1480 * older than the ones we're collecting. Let's hope that it's
1481 * in evac_gen or older, or we will have to arrange to track
1482 * this pointer using the mutable list.
1484 if (bd->gen_no < evac_gen) {
1486 failed_to_evac = rtsTrue;
1487 TICK_GC_FAILED_PROMOTION();
1492 /* evacuate large objects by re-linking them onto a different list.
1494 if (bd->flags & BF_LARGE) {
1496 if (info->type == TSO &&
1497 ((StgTSO *)q)->what_next == ThreadRelocated) {
1498 q = (StgClosure *)((StgTSO *)q)->link;
1501 evacuate_large((P_)q);
1505 /* If the object is in a step that we're compacting, then we
1506 * need to use an alternative evacuate procedure.
1508 if (bd->step->is_compacted) {
1509 if (!is_marked((P_)q,bd)) {
1511 if (mark_stack_full()) {
1512 mark_stack_overflowed = rtsTrue;
1515 push_mark_stack((P_)q);
1523 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1526 // make sure the info pointer is into text space
1527 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1528 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1531 switch (info -> type) {
1535 to = copy(q,sizeW_fromITBL(info),stp);
1540 StgWord w = (StgWord)q->payload[0];
1541 if (q->header.info == Czh_con_info &&
1542 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1543 (StgChar)w <= MAX_CHARLIKE) {
1544 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1546 if (q->header.info == Izh_con_info &&
1547 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1548 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1550 // else, fall through ...
1556 return copy(q,sizeofW(StgHeader)+1,stp);
1558 case THUNK_1_0: // here because of MIN_UPD_SIZE
1563 #ifdef NO_PROMOTE_THUNKS
1564 if (bd->gen_no == 0 &&
1565 bd->step->no != 0 &&
1566 bd->step->no == generations[bd->gen_no].n_steps-1) {
1570 return copy(q,sizeofW(StgHeader)+2,stp);
1578 return copy(q,sizeofW(StgHeader)+2,stp);
1584 case IND_OLDGEN_PERM:
1589 return copy(q,sizeW_fromITBL(info),stp);
1592 case SE_CAF_BLACKHOLE:
1595 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1598 to = copy(q,BLACKHOLE_sizeW(),stp);
1601 case THUNK_SELECTOR:
1603 const StgInfoTable* selectee_info;
1604 StgClosure* selectee = ((StgSelector*)q)->selectee;
1607 selectee_info = get_itbl(selectee);
1608 switch (selectee_info->type) {
1616 case CONSTR_NOCAF_STATIC:
1618 StgWord offset = info->layout.selector_offset;
1620 // check that the size is in range
1622 (StgWord32)(selectee_info->layout.payload.ptrs +
1623 selectee_info->layout.payload.nptrs));
1625 // perform the selection!
1626 q = selectee->payload[offset];
1628 /* if we're already in to-space, there's no need to continue
1629 * with the evacuation, just update the source address with
1630 * a pointer to the (evacuated) constructor field.
1632 if (HEAP_ALLOCED(q)) {
1633 bdescr *bd = Bdescr((P_)q);
1634 if (bd->flags & BF_EVACUATED) {
1635 if (bd->gen_no < evac_gen) {
1636 failed_to_evac = rtsTrue;
1637 TICK_GC_FAILED_PROMOTION();
1643 /* otherwise, carry on and evacuate this constructor field,
1644 * (but not the constructor itself)
1653 case IND_OLDGEN_PERM:
1654 selectee = ((StgInd *)selectee)->indirectee;
1658 selectee = ((StgEvacuated *)selectee)->evacuee;
1661 case THUNK_SELECTOR:
1663 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1664 something) to go into an infinite loop when the nightly
1665 stage2 compiles PrelTup.lhs. */
1667 /* we can't recurse indefinitely in evacuate(), so set a
1668 * limit on the number of times we can go around this
1671 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1673 bd = Bdescr((P_)selectee);
1674 if (!bd->flags & BF_EVACUATED) {
1675 thunk_selector_depth++;
1676 selectee = evacuate(selectee);
1677 thunk_selector_depth--;
1681 // otherwise, fall through...
1693 case SE_CAF_BLACKHOLE:
1697 // not evaluated yet
1701 // a copy of the top-level cases below
1702 case RBH: // cf. BLACKHOLE_BQ
1704 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1705 to = copy(q,BLACKHOLE_sizeW(),stp);
1706 //ToDo: derive size etc from reverted IP
1707 //to = copy(q,size,stp);
1708 // recordMutable((StgMutClosure *)to);
1713 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1714 to = copy(q,sizeofW(StgBlockedFetch),stp);
1721 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1722 to = copy(q,sizeofW(StgFetchMe),stp);
1726 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1727 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1732 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1733 (int)(selectee_info->type));
1736 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1740 // follow chains of indirections, don't evacuate them
1741 q = ((StgInd*)q)->indirectee;
1745 if (info->srt_len > 0 && major_gc &&
1746 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1747 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1748 static_objects = (StgClosure *)q;
1753 if (info->srt_len > 0 && major_gc &&
1754 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1755 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1756 static_objects = (StgClosure *)q;
1761 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1762 * on the CAF list, so don't do anything with it here (we'll
1763 * scavenge it later).
1766 && ((StgIndStatic *)q)->saved_info == NULL
1767 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1768 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1769 static_objects = (StgClosure *)q;
1774 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1775 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1776 static_objects = (StgClosure *)q;
1780 case CONSTR_INTLIKE:
1781 case CONSTR_CHARLIKE:
1782 case CONSTR_NOCAF_STATIC:
1783 /* no need to put these on the static linked list, they don't need
1798 // shouldn't see these
1799 barf("evacuate: stack frame at %p\n", q);
1803 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1804 * of stack, tagging and all.
1806 return copy(q,pap_sizeW((StgPAP*)q),stp);
1809 /* Already evacuated, just return the forwarding address.
1810 * HOWEVER: if the requested destination generation (evac_gen) is
1811 * older than the actual generation (because the object was
1812 * already evacuated to a younger generation) then we have to
1813 * set the failed_to_evac flag to indicate that we couldn't
1814 * manage to promote the object to the desired generation.
1816 if (evac_gen > 0) { // optimisation
1817 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1818 if (Bdescr((P_)p)->gen_no < evac_gen) {
1819 failed_to_evac = rtsTrue;
1820 TICK_GC_FAILED_PROMOTION();
1823 return ((StgEvacuated*)q)->evacuee;
1826 // just copy the block
1827 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1830 case MUT_ARR_PTRS_FROZEN:
1831 // just copy the block
1832 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1836 StgTSO *tso = (StgTSO *)q;
1838 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1840 if (tso->what_next == ThreadRelocated) {
1841 q = (StgClosure *)tso->link;
1845 /* To evacuate a small TSO, we need to relocate the update frame
1849 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1850 move_TSO(tso, new_tso);
1851 return (StgClosure *)new_tso;
1856 case RBH: // cf. BLACKHOLE_BQ
1858 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1859 to = copy(q,BLACKHOLE_sizeW(),stp);
1860 //ToDo: derive size etc from reverted IP
1861 //to = copy(q,size,stp);
1863 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1864 q, info_type(q), to, info_type(to)));
1869 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1870 to = copy(q,sizeofW(StgBlockedFetch),stp);
1872 belch("@@ evacuate: %p (%s) to %p (%s)",
1873 q, info_type(q), to, info_type(to)));
1880 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1881 to = copy(q,sizeofW(StgFetchMe),stp);
1883 belch("@@ evacuate: %p (%s) to %p (%s)",
1884 q, info_type(q), to, info_type(to)));
1888 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1889 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1891 belch("@@ evacuate: %p (%s) to %p (%s)",
1892 q, info_type(q), to, info_type(to)));
1897 barf("evacuate: strange closure type %d", (int)(info->type));
1903 /* -----------------------------------------------------------------------------
1904 move_TSO is called to update the TSO structure after it has been
1905 moved from one place to another.
1906 -------------------------------------------------------------------------- */
1909 move_TSO(StgTSO *src, StgTSO *dest)
1913 // relocate the stack pointers...
1914 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1915 dest->sp = (StgPtr)dest->sp + diff;
1916 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1918 relocate_stack(dest, diff);
1921 /* -----------------------------------------------------------------------------
1922 relocate_stack is called to update the linkage between
1923 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1925 -------------------------------------------------------------------------- */
1928 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1936 while ((P_)su < dest->stack + dest->stack_size) {
1937 switch (get_itbl(su)->type) {
1939 // GCC actually manages to common up these three cases!
1942 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1947 cf = (StgCatchFrame *)su;
1948 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1953 sf = (StgSeqFrame *)su;
1954 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1963 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1974 scavenge_srt(const StgInfoTable *info)
1976 StgClosure **srt, **srt_end;
1978 /* evacuate the SRT. If srt_len is zero, then there isn't an
1979 * srt field in the info table. That's ok, because we'll
1980 * never dereference it.
1982 srt = (StgClosure **)(info->srt);
1983 srt_end = srt + info->srt_len;
1984 for (; srt < srt_end; srt++) {
1985 /* Special-case to handle references to closures hiding out in DLLs, since
1986 double indirections required to get at those. The code generator knows
1987 which is which when generating the SRT, so it stores the (indirect)
1988 reference to the DLL closure in the table by first adding one to it.
1989 We check for this here, and undo the addition before evacuating it.
1991 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1992 closure that's fixed at link-time, and no extra magic is required.
1994 #ifdef ENABLE_WIN32_DLL_SUPPORT
1995 if ( (unsigned long)(*srt) & 0x1 ) {
1996 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2006 /* -----------------------------------------------------------------------------
2008 -------------------------------------------------------------------------- */
2011 scavengeTSO (StgTSO *tso)
2013 // chase the link field for any TSOs on the same queue
2014 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
2015 if ( tso->why_blocked == BlockedOnMVar
2016 || tso->why_blocked == BlockedOnBlackHole
2017 || tso->why_blocked == BlockedOnException
2019 || tso->why_blocked == BlockedOnGA
2020 || tso->why_blocked == BlockedOnGA_NoSend
2023 tso->block_info.closure = evacuate(tso->block_info.closure);
2025 if ( tso->blocked_exceptions != NULL ) {
2026 tso->blocked_exceptions =
2027 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2029 // scavenge this thread's stack
2030 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2033 /* -----------------------------------------------------------------------------
2034 Scavenge a given step until there are no more objects in this step
2037 evac_gen is set by the caller to be either zero (for a step in a
2038 generation < N) or G where G is the generation of the step being
2041 We sometimes temporarily change evac_gen back to zero if we're
2042 scavenging a mutable object where early promotion isn't such a good
2044 -------------------------------------------------------------------------- */
2052 nat saved_evac_gen = evac_gen;
2057 failed_to_evac = rtsFalse;
2059 /* scavenge phase - standard breadth-first scavenging of the
2063 while (bd != stp->hp_bd || p < stp->hp) {
2065 // If we're at the end of this block, move on to the next block
2066 if (bd != stp->hp_bd && p == bd->free) {
2072 info = get_itbl((StgClosure *)p);
2073 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2076 switch (info->type) {
2079 /* treat MVars specially, because we don't want to evacuate the
2080 * mut_link field in the middle of the closure.
2083 StgMVar *mvar = ((StgMVar *)p);
2085 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2086 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2087 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2088 evac_gen = saved_evac_gen;
2089 recordMutable((StgMutClosure *)mvar);
2090 failed_to_evac = rtsFalse; // mutable.
2091 p += sizeofW(StgMVar);
2099 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2100 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2101 p += sizeofW(StgHeader) + 2;
2106 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2107 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2113 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2114 p += sizeofW(StgHeader) + 1;
2119 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2125 p += sizeofW(StgHeader) + 1;
2132 p += sizeofW(StgHeader) + 2;
2139 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2140 p += sizeofW(StgHeader) + 2;
2156 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2157 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2158 (StgClosure *)*p = evacuate((StgClosure *)*p);
2160 p += info->layout.payload.nptrs;
2165 if (stp->gen_no != 0) {
2166 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2169 case IND_OLDGEN_PERM:
2170 ((StgIndOldGen *)p)->indirectee =
2171 evacuate(((StgIndOldGen *)p)->indirectee);
2172 if (failed_to_evac) {
2173 failed_to_evac = rtsFalse;
2174 recordOldToNewPtrs((StgMutClosure *)p);
2176 p += sizeofW(StgIndOldGen);
2181 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2182 evac_gen = saved_evac_gen;
2183 recordMutable((StgMutClosure *)p);
2184 failed_to_evac = rtsFalse; // mutable anyhow
2185 p += sizeofW(StgMutVar);
2190 failed_to_evac = rtsFalse; // mutable anyhow
2191 p += sizeofW(StgMutVar);
2195 case SE_CAF_BLACKHOLE:
2198 p += BLACKHOLE_sizeW();
2203 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2204 (StgClosure *)bh->blocking_queue =
2205 evacuate((StgClosure *)bh->blocking_queue);
2206 recordMutable((StgMutClosure *)bh);
2207 failed_to_evac = rtsFalse;
2208 p += BLACKHOLE_sizeW();
2212 case THUNK_SELECTOR:
2214 StgSelector *s = (StgSelector *)p;
2215 s->selectee = evacuate(s->selectee);
2216 p += THUNK_SELECTOR_sizeW();
2220 case AP_UPD: // same as PAPs
2222 /* Treat a PAP just like a section of stack, not forgetting to
2223 * evacuate the function pointer too...
2226 StgPAP* pap = (StgPAP *)p;
2228 pap->fun = evacuate(pap->fun);
2229 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2230 p += pap_sizeW(pap);
2235 // nothing to follow
2236 p += arr_words_sizeW((StgArrWords *)p);
2240 // follow everything
2244 evac_gen = 0; // repeatedly mutable
2245 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2246 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2247 (StgClosure *)*p = evacuate((StgClosure *)*p);
2249 evac_gen = saved_evac_gen;
2250 recordMutable((StgMutClosure *)q);
2251 failed_to_evac = rtsFalse; // mutable anyhow.
2255 case MUT_ARR_PTRS_FROZEN:
2256 // follow everything
2260 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2261 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2262 (StgClosure *)*p = evacuate((StgClosure *)*p);
2264 // it's tempting to recordMutable() if failed_to_evac is
2265 // false, but that breaks some assumptions (eg. every
2266 // closure on the mutable list is supposed to have the MUT
2267 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2273 StgTSO *tso = (StgTSO *)p;
2276 evac_gen = saved_evac_gen;
2277 recordMutable((StgMutClosure *)tso);
2278 failed_to_evac = rtsFalse; // mutable anyhow.
2279 p += tso_sizeW(tso);
2284 case RBH: // cf. BLACKHOLE_BQ
2287 nat size, ptrs, nonptrs, vhs;
2289 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2291 StgRBH *rbh = (StgRBH *)p;
2292 (StgClosure *)rbh->blocking_queue =
2293 evacuate((StgClosure *)rbh->blocking_queue);
2294 recordMutable((StgMutClosure *)to);
2295 failed_to_evac = rtsFalse; // mutable anyhow.
2297 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2298 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2299 // ToDo: use size of reverted closure here!
2300 p += BLACKHOLE_sizeW();
2306 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2307 // follow the pointer to the node which is being demanded
2308 (StgClosure *)bf->node =
2309 evacuate((StgClosure *)bf->node);
2310 // follow the link to the rest of the blocking queue
2311 (StgClosure *)bf->link =
2312 evacuate((StgClosure *)bf->link);
2313 if (failed_to_evac) {
2314 failed_to_evac = rtsFalse;
2315 recordMutable((StgMutClosure *)bf);
2318 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2319 bf, info_type((StgClosure *)bf),
2320 bf->node, info_type(bf->node)));
2321 p += sizeofW(StgBlockedFetch);
2329 p += sizeofW(StgFetchMe);
2330 break; // nothing to do in this case
2332 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2334 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2335 (StgClosure *)fmbq->blocking_queue =
2336 evacuate((StgClosure *)fmbq->blocking_queue);
2337 if (failed_to_evac) {
2338 failed_to_evac = rtsFalse;
2339 recordMutable((StgMutClosure *)fmbq);
2342 belch("@@ scavenge: %p (%s) exciting, isn't it",
2343 p, info_type((StgClosure *)p)));
2344 p += sizeofW(StgFetchMeBlockingQueue);
2350 barf("scavenge: unimplemented/strange closure type %d @ %p",
2354 /* If we didn't manage to promote all the objects pointed to by
2355 * the current object, then we have to designate this object as
2356 * mutable (because it contains old-to-new generation pointers).
2358 if (failed_to_evac) {
2359 failed_to_evac = rtsFalse;
2360 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2368 /* -----------------------------------------------------------------------------
2369 Scavenge everything on the mark stack.
2371 This is slightly different from scavenge():
2372 - we don't walk linearly through the objects, so the scavenger
2373 doesn't need to advance the pointer on to the next object.
2374 -------------------------------------------------------------------------- */
2377 scavenge_mark_stack(void)
2383 evac_gen = oldest_gen->no;
2384 saved_evac_gen = evac_gen;
2387 while (!mark_stack_empty()) {
2388 p = pop_mark_stack();
2390 info = get_itbl((StgClosure *)p);
2391 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2394 switch (info->type) {
2397 /* treat MVars specially, because we don't want to evacuate the
2398 * mut_link field in the middle of the closure.
2401 StgMVar *mvar = ((StgMVar *)p);
2403 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2404 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2405 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2406 evac_gen = saved_evac_gen;
2407 failed_to_evac = rtsFalse; // mutable.
2415 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2416 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2426 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2451 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2452 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2453 (StgClosure *)*p = evacuate((StgClosure *)*p);
2459 // don't need to do anything here: the only possible case
2460 // is that we're in a 1-space compacting collector, with
2461 // no "old" generation.
2465 case IND_OLDGEN_PERM:
2466 ((StgIndOldGen *)p)->indirectee =
2467 evacuate(((StgIndOldGen *)p)->indirectee);
2468 if (failed_to_evac) {
2469 recordOldToNewPtrs((StgMutClosure *)p);
2471 failed_to_evac = rtsFalse;
2476 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2477 evac_gen = saved_evac_gen;
2478 failed_to_evac = rtsFalse;
2483 failed_to_evac = rtsFalse;
2487 case SE_CAF_BLACKHOLE:
2495 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2496 (StgClosure *)bh->blocking_queue =
2497 evacuate((StgClosure *)bh->blocking_queue);
2498 failed_to_evac = rtsFalse;
2502 case THUNK_SELECTOR:
2504 StgSelector *s = (StgSelector *)p;
2505 s->selectee = evacuate(s->selectee);
2509 case AP_UPD: // same as PAPs
2511 /* Treat a PAP just like a section of stack, not forgetting to
2512 * evacuate the function pointer too...
2515 StgPAP* pap = (StgPAP *)p;
2517 pap->fun = evacuate(pap->fun);
2518 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2523 // follow everything
2527 evac_gen = 0; // repeatedly mutable
2528 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2529 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2530 (StgClosure *)*p = evacuate((StgClosure *)*p);
2532 evac_gen = saved_evac_gen;
2533 failed_to_evac = rtsFalse; // mutable anyhow.
2537 case MUT_ARR_PTRS_FROZEN:
2538 // follow everything
2542 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2543 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2544 (StgClosure *)*p = evacuate((StgClosure *)*p);
2551 StgTSO *tso = (StgTSO *)p;
2554 evac_gen = saved_evac_gen;
2555 failed_to_evac = rtsFalse;
2560 case RBH: // cf. BLACKHOLE_BQ
2563 nat size, ptrs, nonptrs, vhs;
2565 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2567 StgRBH *rbh = (StgRBH *)p;
2568 (StgClosure *)rbh->blocking_queue =
2569 evacuate((StgClosure *)rbh->blocking_queue);
2570 recordMutable((StgMutClosure *)rbh);
2571 failed_to_evac = rtsFalse; // mutable anyhow.
2573 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2574 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2580 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2581 // follow the pointer to the node which is being demanded
2582 (StgClosure *)bf->node =
2583 evacuate((StgClosure *)bf->node);
2584 // follow the link to the rest of the blocking queue
2585 (StgClosure *)bf->link =
2586 evacuate((StgClosure *)bf->link);
2587 if (failed_to_evac) {
2588 failed_to_evac = rtsFalse;
2589 recordMutable((StgMutClosure *)bf);
2592 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2593 bf, info_type((StgClosure *)bf),
2594 bf->node, info_type(bf->node)));
2602 break; // nothing to do in this case
2604 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2606 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2607 (StgClosure *)fmbq->blocking_queue =
2608 evacuate((StgClosure *)fmbq->blocking_queue);
2609 if (failed_to_evac) {
2610 failed_to_evac = rtsFalse;
2611 recordMutable((StgMutClosure *)fmbq);
2614 belch("@@ scavenge: %p (%s) exciting, isn't it",
2615 p, info_type((StgClosure *)p)));
2621 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2625 if (failed_to_evac) {
2626 failed_to_evac = rtsFalse;
2627 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2630 // mark the next bit to indicate "scavenged"
2631 mark(q+1, Bdescr(q));
2633 } // while (!mark_stack_empty())
2635 // start a new linear scan if the mark stack overflowed at some point
2636 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2637 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2638 mark_stack_overflowed = rtsFalse;
2639 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2640 oldgen_scan = oldgen_scan_bd->start;
2643 if (oldgen_scan_bd) {
2644 // push a new thing on the mark stack
2646 // find a closure that is marked but not scavenged, and start
2648 while (oldgen_scan < oldgen_scan_bd->free
2649 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2653 if (oldgen_scan < oldgen_scan_bd->free) {
2655 // already scavenged?
2656 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2657 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2660 push_mark_stack(oldgen_scan);
2661 // ToDo: bump the linear scan by the actual size of the object
2662 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2666 oldgen_scan_bd = oldgen_scan_bd->link;
2667 if (oldgen_scan_bd != NULL) {
2668 oldgen_scan = oldgen_scan_bd->start;
2674 /* -----------------------------------------------------------------------------
2675 Scavenge one object.
2677 This is used for objects that are temporarily marked as mutable
2678 because they contain old-to-new generation pointers. Only certain
2679 objects can have this property.
2680 -------------------------------------------------------------------------- */
2683 scavenge_one(StgPtr p)
2685 const StgInfoTable *info;
2686 nat saved_evac_gen = evac_gen;
2689 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2690 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2692 info = get_itbl((StgClosure *)p);
2694 switch (info->type) {
2697 case FUN_1_0: // hardly worth specialising these guys
2717 case IND_OLDGEN_PERM:
2721 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2722 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2723 (StgClosure *)*q = evacuate((StgClosure *)*q);
2729 case SE_CAF_BLACKHOLE:
2734 case THUNK_SELECTOR:
2736 StgSelector *s = (StgSelector *)p;
2737 s->selectee = evacuate(s->selectee);
2742 // nothing to follow
2747 // follow everything
2750 evac_gen = 0; // repeatedly mutable
2751 recordMutable((StgMutClosure *)p);
2752 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2753 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2754 (StgClosure *)*p = evacuate((StgClosure *)*p);
2756 evac_gen = saved_evac_gen;
2757 failed_to_evac = rtsFalse;
2761 case MUT_ARR_PTRS_FROZEN:
2763 // follow everything
2766 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2767 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2768 (StgClosure *)*p = evacuate((StgClosure *)*p);
2775 StgTSO *tso = (StgTSO *)p;
2777 evac_gen = 0; // repeatedly mutable
2779 recordMutable((StgMutClosure *)tso);
2780 evac_gen = saved_evac_gen;
2781 failed_to_evac = rtsFalse;
2788 StgPAP* pap = (StgPAP *)p;
2789 pap->fun = evacuate(pap->fun);
2790 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2795 // This might happen if for instance a MUT_CONS was pointing to a
2796 // THUNK which has since been updated. The IND_OLDGEN will
2797 // be on the mutable list anyway, so we don't need to do anything
2802 barf("scavenge_one: strange object %d", (int)(info->type));
2805 no_luck = failed_to_evac;
2806 failed_to_evac = rtsFalse;
2810 /* -----------------------------------------------------------------------------
2811 Scavenging mutable lists.
2813 We treat the mutable list of each generation > N (i.e. all the
2814 generations older than the one being collected) as roots. We also
2815 remove non-mutable objects from the mutable list at this point.
2816 -------------------------------------------------------------------------- */
2819 scavenge_mut_once_list(generation *gen)
2821 const StgInfoTable *info;
2822 StgMutClosure *p, *next, *new_list;
2824 p = gen->mut_once_list;
2825 new_list = END_MUT_LIST;
2829 failed_to_evac = rtsFalse;
2831 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2833 // make sure the info pointer is into text space
2834 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2835 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2839 if (info->type==RBH)
2840 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2842 switch(info->type) {
2845 case IND_OLDGEN_PERM:
2847 /* Try to pull the indirectee into this generation, so we can
2848 * remove the indirection from the mutable list.
2850 ((StgIndOldGen *)p)->indirectee =
2851 evacuate(((StgIndOldGen *)p)->indirectee);
2853 #if 0 && defined(DEBUG)
2854 if (RtsFlags.DebugFlags.gc)
2855 /* Debugging code to print out the size of the thing we just
2859 StgPtr start = gen->steps[0].scan;
2860 bdescr *start_bd = gen->steps[0].scan_bd;
2862 scavenge(&gen->steps[0]);
2863 if (start_bd != gen->steps[0].scan_bd) {
2864 size += (P_)BLOCK_ROUND_UP(start) - start;
2865 start_bd = start_bd->link;
2866 while (start_bd != gen->steps[0].scan_bd) {
2867 size += BLOCK_SIZE_W;
2868 start_bd = start_bd->link;
2870 size += gen->steps[0].scan -
2871 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2873 size = gen->steps[0].scan - start;
2875 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2879 /* failed_to_evac might happen if we've got more than two
2880 * generations, we're collecting only generation 0, the
2881 * indirection resides in generation 2 and the indirectee is
2884 if (failed_to_evac) {
2885 failed_to_evac = rtsFalse;
2886 p->mut_link = new_list;
2889 /* the mut_link field of an IND_STATIC is overloaded as the
2890 * static link field too (it just so happens that we don't need
2891 * both at the same time), so we need to NULL it out when
2892 * removing this object from the mutable list because the static
2893 * link fields are all assumed to be NULL before doing a major
2901 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2902 * it from the mutable list if possible by promoting whatever it
2905 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2906 /* didn't manage to promote everything, so put the
2907 * MUT_CONS back on the list.
2909 p->mut_link = new_list;
2915 // shouldn't have anything else on the mutables list
2916 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2920 gen->mut_once_list = new_list;
2925 scavenge_mutable_list(generation *gen)
2927 const StgInfoTable *info;
2928 StgMutClosure *p, *next;
2930 p = gen->saved_mut_list;
2934 failed_to_evac = rtsFalse;
2936 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2938 // make sure the info pointer is into text space
2939 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2940 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2944 if (info->type==RBH)
2945 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2947 switch(info->type) {
2950 // follow everything
2951 p->mut_link = gen->mut_list;
2956 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2957 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2958 (StgClosure *)*q = evacuate((StgClosure *)*q);
2963 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2964 case MUT_ARR_PTRS_FROZEN:
2969 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2970 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2971 (StgClosure *)*q = evacuate((StgClosure *)*q);
2975 if (failed_to_evac) {
2976 failed_to_evac = rtsFalse;
2977 mkMutCons((StgClosure *)p, gen);
2983 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2984 p->mut_link = gen->mut_list;
2990 StgMVar *mvar = (StgMVar *)p;
2991 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2992 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2993 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2994 p->mut_link = gen->mut_list;
3001 StgTSO *tso = (StgTSO *)p;
3005 /* Don't take this TSO off the mutable list - it might still
3006 * point to some younger objects (because we set evac_gen to 0
3009 tso->mut_link = gen->mut_list;
3010 gen->mut_list = (StgMutClosure *)tso;
3016 StgBlockingQueue *bh = (StgBlockingQueue *)p;
3017 (StgClosure *)bh->blocking_queue =
3018 evacuate((StgClosure *)bh->blocking_queue);
3019 p->mut_link = gen->mut_list;
3024 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3027 case IND_OLDGEN_PERM:
3028 /* Try to pull the indirectee into this generation, so we can
3029 * remove the indirection from the mutable list.
3032 ((StgIndOldGen *)p)->indirectee =
3033 evacuate(((StgIndOldGen *)p)->indirectee);
3036 if (failed_to_evac) {
3037 failed_to_evac = rtsFalse;
3038 p->mut_link = gen->mut_once_list;
3039 gen->mut_once_list = p;
3046 // HWL: check whether all of these are necessary
3048 case RBH: // cf. BLACKHOLE_BQ
3050 // nat size, ptrs, nonptrs, vhs;
3052 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3053 StgRBH *rbh = (StgRBH *)p;
3054 (StgClosure *)rbh->blocking_queue =
3055 evacuate((StgClosure *)rbh->blocking_queue);
3056 if (failed_to_evac) {
3057 failed_to_evac = rtsFalse;
3058 recordMutable((StgMutClosure *)rbh);
3060 // ToDo: use size of reverted closure here!
3061 p += BLACKHOLE_sizeW();
3067 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3068 // follow the pointer to the node which is being demanded
3069 (StgClosure *)bf->node =
3070 evacuate((StgClosure *)bf->node);
3071 // follow the link to the rest of the blocking queue
3072 (StgClosure *)bf->link =
3073 evacuate((StgClosure *)bf->link);
3074 if (failed_to_evac) {
3075 failed_to_evac = rtsFalse;
3076 recordMutable((StgMutClosure *)bf);
3078 p += sizeofW(StgBlockedFetch);
3084 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3087 p += sizeofW(StgFetchMe);
3088 break; // nothing to do in this case
3090 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3092 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3093 (StgClosure *)fmbq->blocking_queue =
3094 evacuate((StgClosure *)fmbq->blocking_queue);
3095 if (failed_to_evac) {
3096 failed_to_evac = rtsFalse;
3097 recordMutable((StgMutClosure *)fmbq);
3099 p += sizeofW(StgFetchMeBlockingQueue);
3105 // shouldn't have anything else on the mutables list
3106 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3113 scavenge_static(void)
3115 StgClosure* p = static_objects;
3116 const StgInfoTable *info;
3118 /* Always evacuate straight to the oldest generation for static
3120 evac_gen = oldest_gen->no;
3122 /* keep going until we've scavenged all the objects on the linked
3124 while (p != END_OF_STATIC_LIST) {
3128 if (info->type==RBH)
3129 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3131 // make sure the info pointer is into text space
3132 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3133 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3135 /* Take this object *off* the static_objects list,
3136 * and put it on the scavenged_static_objects list.
3138 static_objects = STATIC_LINK(info,p);
3139 STATIC_LINK(info,p) = scavenged_static_objects;
3140 scavenged_static_objects = p;
3142 switch (info -> type) {
3146 StgInd *ind = (StgInd *)p;
3147 ind->indirectee = evacuate(ind->indirectee);
3149 /* might fail to evacuate it, in which case we have to pop it
3150 * back on the mutable list (and take it off the
3151 * scavenged_static list because the static link and mut link
3152 * pointers are one and the same).
3154 if (failed_to_evac) {
3155 failed_to_evac = rtsFalse;
3156 scavenged_static_objects = IND_STATIC_LINK(p);
3157 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3158 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3172 next = (P_)p->payload + info->layout.payload.ptrs;
3173 // evacuate the pointers
3174 for (q = (P_)p->payload; q < next; q++) {
3175 (StgClosure *)*q = evacuate((StgClosure *)*q);
3181 barf("scavenge_static: strange closure %d", (int)(info->type));
3184 ASSERT(failed_to_evac == rtsFalse);
3186 /* get the next static object from the list. Remember, there might
3187 * be more stuff on this list now that we've done some evacuating!
3188 * (static_objects is a global)
3194 /* -----------------------------------------------------------------------------
3195 scavenge_stack walks over a section of stack and evacuates all the
3196 objects pointed to by it. We can use the same code for walking
3197 PAPs, since these are just sections of copied stack.
3198 -------------------------------------------------------------------------- */
3201 scavenge_stack(StgPtr p, StgPtr stack_end)
3204 const StgInfoTable* info;
3207 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3210 * Each time around this loop, we are looking at a chunk of stack
3211 * that starts with either a pending argument section or an
3212 * activation record.
3215 while (p < stack_end) {
3218 // If we've got a tag, skip over that many words on the stack
3219 if (IS_ARG_TAG((W_)q)) {
3224 /* Is q a pointer to a closure?
3226 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3228 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3229 ASSERT(closure_STATIC((StgClosure *)q));
3231 // otherwise, must be a pointer into the allocation space.
3234 (StgClosure *)*p = evacuate((StgClosure *)q);
3240 * Otherwise, q must be the info pointer of an activation
3241 * record. All activation records have 'bitmap' style layout
3244 info = get_itbl((StgClosure *)p);
3246 switch (info->type) {
3248 // Dynamic bitmap: the mask is stored on the stack
3250 bitmap = ((StgRetDyn *)p)->liveness;
3251 p = (P_)&((StgRetDyn *)p)->payload[0];
3254 // probably a slow-entry point return address:
3262 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3263 old_p, p, old_p+1));
3265 p++; // what if FHS!=1 !? -- HWL
3270 /* Specialised code for update frames, since they're so common.
3271 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3272 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3276 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3278 p += sizeofW(StgUpdateFrame);
3281 frame->updatee = evacuate(frame->updatee);
3283 #else // specialised code for update frames, not sure if it's worth it.
3285 nat type = get_itbl(frame->updatee)->type;
3287 if (type == EVACUATED) {
3288 frame->updatee = evacuate(frame->updatee);
3291 bdescr *bd = Bdescr((P_)frame->updatee);
3293 if (bd->gen_no > N) {
3294 if (bd->gen_no < evac_gen) {
3295 failed_to_evac = rtsTrue;
3300 // Don't promote blackholes
3302 if (!(stp->gen_no == 0 &&
3304 stp->no == stp->gen->n_steps-1)) {
3311 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3312 sizeofW(StgHeader), stp);
3313 frame->updatee = to;
3316 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3317 frame->updatee = to;
3318 recordMutable((StgMutClosure *)to);
3321 /* will never be SE_{,CAF_}BLACKHOLE, since we
3322 don't push an update frame for single-entry thunks. KSW 1999-01. */
3323 barf("scavenge_stack: UPDATE_FRAME updatee");
3329 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3336 bitmap = info->layout.bitmap;
3338 // this assumes that the payload starts immediately after the info-ptr
3340 while (bitmap != 0) {
3341 if ((bitmap & 1) == 0) {
3342 (StgClosure *)*p = evacuate((StgClosure *)*p);
3345 bitmap = bitmap >> 1;
3352 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3357 StgLargeBitmap *large_bitmap;
3360 large_bitmap = info->layout.large_bitmap;
3363 for (i=0; i<large_bitmap->size; i++) {
3364 bitmap = large_bitmap->bitmap[i];
3365 q = p + BITS_IN(W_);
3366 while (bitmap != 0) {
3367 if ((bitmap & 1) == 0) {
3368 (StgClosure *)*p = evacuate((StgClosure *)*p);
3371 bitmap = bitmap >> 1;
3373 if (i+1 < large_bitmap->size) {
3375 (StgClosure *)*p = evacuate((StgClosure *)*p);
3381 // and don't forget to follow the SRT
3386 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3391 /*-----------------------------------------------------------------------------
3392 scavenge the large object list.
3394 evac_gen set by caller; similar games played with evac_gen as with
3395 scavenge() - see comment at the top of scavenge(). Most large
3396 objects are (repeatedly) mutable, so most of the time evac_gen will
3398 --------------------------------------------------------------------------- */
3401 scavenge_large(step *stp)
3406 bd = stp->new_large_objects;
3408 for (; bd != NULL; bd = stp->new_large_objects) {
3410 /* take this object *off* the large objects list and put it on
3411 * the scavenged large objects list. This is so that we can
3412 * treat new_large_objects as a stack and push new objects on
3413 * the front when evacuating.
3415 stp->new_large_objects = bd->link;
3416 dbl_link_onto(bd, &stp->scavenged_large_objects);
3418 // update the block count in this step.
3419 stp->n_scavenged_large_blocks += bd->blocks;
3422 if (scavenge_one(p)) {
3423 mkMutCons((StgClosure *)p, stp->gen);
3428 /* -----------------------------------------------------------------------------
3429 Initialising the static object & mutable lists
3430 -------------------------------------------------------------------------- */
3433 zero_static_object_list(StgClosure* first_static)
3437 const StgInfoTable *info;
3439 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3441 link = STATIC_LINK(info, p);
3442 STATIC_LINK(info,p) = NULL;
3446 /* This function is only needed because we share the mutable link
3447 * field with the static link field in an IND_STATIC, so we have to
3448 * zero the mut_link field before doing a major GC, which needs the
3449 * static link field.
3451 * It doesn't do any harm to zero all the mutable link fields on the
3456 zero_mutable_list( StgMutClosure *first )
3458 StgMutClosure *next, *c;
3460 for (c = first; c != END_MUT_LIST; c = next) {
3466 /* -----------------------------------------------------------------------------
3468 -------------------------------------------------------------------------- */
3475 for (c = (StgIndStatic *)caf_list; c != NULL;
3476 c = (StgIndStatic *)c->static_link)
3478 c->header.info = c->saved_info;
3479 c->saved_info = NULL;
3480 // could, but not necessary: c->static_link = NULL;
3486 markCAFs( evac_fn evac )
3490 for (c = (StgIndStatic *)caf_list; c != NULL;
3491 c = (StgIndStatic *)c->static_link)
3493 evac(&c->indirectee);
3497 /* -----------------------------------------------------------------------------
3498 Sanity code for CAF garbage collection.
3500 With DEBUG turned on, we manage a CAF list in addition to the SRT
3501 mechanism. After GC, we run down the CAF list and blackhole any
3502 CAFs which have been garbage collected. This means we get an error
3503 whenever the program tries to enter a garbage collected CAF.
3505 Any garbage collected CAFs are taken off the CAF list at the same
3507 -------------------------------------------------------------------------- */
3509 #if 0 && defined(DEBUG)
3516 const StgInfoTable *info;
3527 ASSERT(info->type == IND_STATIC);
3529 if (STATIC_LINK(info,p) == NULL) {
3530 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3532 SET_INFO(p,&stg_BLACKHOLE_info);
3533 p = STATIC_LINK2(info,p);
3537 pp = &STATIC_LINK2(info,p);
3544 // belch("%d CAFs live", i);
3549 /* -----------------------------------------------------------------------------
3552 Whenever a thread returns to the scheduler after possibly doing
3553 some work, we have to run down the stack and black-hole all the
3554 closures referred to by update frames.
3555 -------------------------------------------------------------------------- */
3558 threadLazyBlackHole(StgTSO *tso)
3560 StgUpdateFrame *update_frame;
3561 StgBlockingQueue *bh;
3564 stack_end = &tso->stack[tso->stack_size];
3565 update_frame = tso->su;
3568 switch (get_itbl(update_frame)->type) {
3571 update_frame = ((StgCatchFrame *)update_frame)->link;
3575 bh = (StgBlockingQueue *)update_frame->updatee;
3577 /* if the thunk is already blackholed, it means we've also
3578 * already blackholed the rest of the thunks on this stack,
3579 * so we can stop early.
3581 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3582 * don't interfere with this optimisation.
3584 if (bh->header.info == &stg_BLACKHOLE_info) {
3588 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3589 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3590 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3591 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3593 SET_INFO(bh,&stg_BLACKHOLE_info);
3596 update_frame = update_frame->link;
3600 update_frame = ((StgSeqFrame *)update_frame)->link;
3606 barf("threadPaused");
3612 /* -----------------------------------------------------------------------------
3615 * Code largely pinched from old RTS, then hacked to bits. We also do
3616 * lazy black holing here.
3618 * -------------------------------------------------------------------------- */
3621 threadSqueezeStack(StgTSO *tso)
3623 lnat displacement = 0;
3624 StgUpdateFrame *frame;
3625 StgUpdateFrame *next_frame; // Temporally next
3626 StgUpdateFrame *prev_frame; // Temporally previous
3628 rtsBool prev_was_update_frame;
3630 StgUpdateFrame *top_frame;
3631 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3633 void printObj( StgClosure *obj ); // from Printer.c
3635 top_frame = tso->su;
3638 bottom = &(tso->stack[tso->stack_size]);
3641 /* There must be at least one frame, namely the STOP_FRAME.
3643 ASSERT((P_)frame < bottom);
3645 /* Walk down the stack, reversing the links between frames so that
3646 * we can walk back up as we squeeze from the bottom. Note that
3647 * next_frame and prev_frame refer to next and previous as they were
3648 * added to the stack, rather than the way we see them in this
3649 * walk. (It makes the next loop less confusing.)
3651 * Stop if we find an update frame pointing to a black hole
3652 * (see comment in threadLazyBlackHole()).
3656 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3657 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3658 prev_frame = frame->link;
3659 frame->link = next_frame;
3664 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3665 printObj((StgClosure *)prev_frame);
3666 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3669 switch (get_itbl(frame)->type) {
3672 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3685 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3687 printObj((StgClosure *)prev_frame);
3690 if (get_itbl(frame)->type == UPDATE_FRAME
3691 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3696 /* Now, we're at the bottom. Frame points to the lowest update
3697 * frame on the stack, and its link actually points to the frame
3698 * above. We have to walk back up the stack, squeezing out empty
3699 * update frames and turning the pointers back around on the way
3702 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3703 * we never want to eliminate it anyway. Just walk one step up
3704 * before starting to squeeze. When you get to the topmost frame,
3705 * remember that there are still some words above it that might have
3712 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3715 * Loop through all of the frames (everything except the very
3716 * bottom). Things are complicated by the fact that we have
3717 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3718 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3720 while (frame != NULL) {
3722 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3723 rtsBool is_update_frame;
3725 next_frame = frame->link;
3726 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3729 * 1. both the previous and current frame are update frames
3730 * 2. the current frame is empty
3732 if (prev_was_update_frame && is_update_frame &&
3733 (P_)prev_frame == frame_bottom + displacement) {
3735 // Now squeeze out the current frame
3736 StgClosure *updatee_keep = prev_frame->updatee;
3737 StgClosure *updatee_bypass = frame->updatee;
3740 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3744 /* Deal with blocking queues. If both updatees have blocked
3745 * threads, then we should merge the queues into the update
3746 * frame that we're keeping.
3748 * Alternatively, we could just wake them up: they'll just go
3749 * straight to sleep on the proper blackhole! This is less code
3750 * and probably less bug prone, although it's probably much
3753 #if 0 // do it properly...
3754 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3755 # error Unimplemented lazy BH warning. (KSW 1999-01)
3757 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3758 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3760 // Sigh. It has one. Don't lose those threads!
3761 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3762 // Urgh. Two queues. Merge them.
3763 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3765 while (keep_tso->link != END_TSO_QUEUE) {
3766 keep_tso = keep_tso->link;
3768 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3771 // For simplicity, just swap the BQ for the BH
3772 P_ temp = updatee_keep;
3774 updatee_keep = updatee_bypass;
3775 updatee_bypass = temp;
3777 // Record the swap in the kept frame (below)
3778 prev_frame->updatee = updatee_keep;
3783 TICK_UPD_SQUEEZED();
3784 /* wasn't there something about update squeezing and ticky to be
3785 * sorted out? oh yes: we aren't counting each enter properly
3786 * in this case. See the log somewhere. KSW 1999-04-21
3788 * Check two things: that the two update frames don't point to
3789 * the same object, and that the updatee_bypass isn't already an
3790 * indirection. Both of these cases only happen when we're in a
3791 * block hole-style loop (and there are multiple update frames
3792 * on the stack pointing to the same closure), but they can both
3793 * screw us up if we don't check.
3795 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3796 // this wakes the threads up
3797 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3800 sp = (P_)frame - 1; // sp = stuff to slide
3801 displacement += sizeofW(StgUpdateFrame);
3804 // No squeeze for this frame
3805 sp = frame_bottom - 1; // Keep the current frame
3807 /* Do lazy black-holing.
3809 if (is_update_frame) {
3810 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3811 if (bh->header.info != &stg_BLACKHOLE_info &&
3812 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3813 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3814 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3815 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3818 /* zero out the slop so that the sanity checker can tell
3819 * where the next closure is.
3822 StgInfoTable *info = get_itbl(bh);
3823 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3824 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3825 * info is used for a different purpose, and it's exactly the
3826 * same size as a BLACKHOLE in any case.
3828 if (info->type != THUNK_SELECTOR) {
3829 for (i = np; i < np + nw; i++) {
3830 ((StgClosure *)bh)->payload[i] = 0;
3835 SET_INFO(bh,&stg_BLACKHOLE_info);
3839 // Fix the link in the current frame (should point to the frame below)
3840 frame->link = prev_frame;
3841 prev_was_update_frame = is_update_frame;
3844 // Now slide all words from sp up to the next frame
3846 if (displacement > 0) {
3847 P_ next_frame_bottom;
3849 if (next_frame != NULL)
3850 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3852 next_frame_bottom = tso->sp - 1;
3856 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3860 while (sp >= next_frame_bottom) {
3861 sp[displacement] = *sp;
3865 (P_)prev_frame = (P_)frame + displacement;
3869 tso->sp += displacement;
3870 tso->su = prev_frame;
3873 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3874 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3879 /* -----------------------------------------------------------------------------
3882 * We have to prepare for GC - this means doing lazy black holing
3883 * here. We also take the opportunity to do stack squeezing if it's
3885 * -------------------------------------------------------------------------- */
3887 threadPaused(StgTSO *tso)
3889 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3890 threadSqueezeStack(tso); // does black holing too
3892 threadLazyBlackHole(tso);
3895 /* -----------------------------------------------------------------------------
3897 * -------------------------------------------------------------------------- */
3901 printMutOnceList(generation *gen)
3903 StgMutClosure *p, *next;
3905 p = gen->mut_once_list;
3908 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3909 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3910 fprintf(stderr, "%p (%s), ",
3911 p, info_type((StgClosure *)p));
3913 fputc('\n', stderr);
3917 printMutableList(generation *gen)
3919 StgMutClosure *p, *next;
3924 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3925 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3926 fprintf(stderr, "%p (%s), ",
3927 p, info_type((StgClosure *)p));
3929 fputc('\n', stderr);
3932 static inline rtsBool
3933 maybeLarge(StgClosure *closure)
3935 StgInfoTable *info = get_itbl(closure);
3937 /* closure types that may be found on the new_large_objects list;
3938 see scavenge_large */
3939 return (info->type == MUT_ARR_PTRS ||
3940 info->type == MUT_ARR_PTRS_FROZEN ||
3941 info->type == TSO ||
3942 info->type == ARR_WORDS);