1 /* -----------------------------------------------------------------------------
2 * $Id: GC.c,v 1.118 2001/08/08 14:14:08 simonmar Exp $
4 * (c) The GHC Team 1998-1999
6 * Generational garbage collector
8 * ---------------------------------------------------------------------------*/
14 #include "StoragePriv.h"
17 #include "SchedAPI.h" // for ReverCAFs prototype
19 #include "BlockAlloc.h"
25 #include "StablePriv.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #if defined(GRAN) || defined(PAR)
30 # include "GranSimRts.h"
31 # include "ParallelRts.h"
35 # include "ParallelDebug.h"
40 #if defined(RTS_GTK_FRONTPANEL)
41 #include "FrontPanel.h"
44 /* STATIC OBJECT LIST.
47 * We maintain a linked list of static objects that are still live.
48 * The requirements for this list are:
50 * - we need to scan the list while adding to it, in order to
51 * scavenge all the static objects (in the same way that
52 * breadth-first scavenging works for dynamic objects).
54 * - we need to be able to tell whether an object is already on
55 * the list, to break loops.
57 * Each static object has a "static link field", which we use for
58 * linking objects on to the list. We use a stack-type list, consing
59 * objects on the front as they are added (this means that the
60 * scavenge phase is depth-first, not breadth-first, but that
63 * A separate list is kept for objects that have been scavenged
64 * already - this is so that we can zero all the marks afterwards.
66 * An object is on the list if its static link field is non-zero; this
67 * means that we have to mark the end of the list with '1', not NULL.
69 * Extra notes for generational GC:
71 * Each generation has a static object list associated with it. When
72 * collecting generations up to N, we treat the static object lists
73 * from generations > N as roots.
75 * We build up a static object list while collecting generations 0..N,
76 * which is then appended to the static object list of generation N+1.
78 StgClosure* static_objects; // live static objects
79 StgClosure* scavenged_static_objects; // static objects scavenged so far
81 /* N is the oldest generation being collected, where the generations
82 * are numbered starting at 0. A major GC (indicated by the major_gc
83 * flag) is when we're collecting all generations. We only attempt to
84 * deal with static objects and GC CAFs when doing a major GC.
87 static rtsBool major_gc;
89 /* Youngest generation that objects should be evacuated to in
90 * evacuate(). (Logically an argument to evacuate, but it's static
91 * a lot of the time so we optimise it into a global variable).
97 StgWeak *old_weak_ptr_list; // also pending finaliser list
98 static rtsBool weak_done; // all done for this pass
100 /* List of all threads during GC
102 static StgTSO *old_all_threads;
103 static StgTSO *resurrected_threads;
105 /* Flag indicating failure to evacuate an object to the desired
108 static rtsBool failed_to_evac;
110 /* Old to-space (used for two-space collector only)
112 bdescr *old_to_blocks;
114 /* Data used for allocation area sizing.
116 lnat new_blocks; // blocks allocated during this GC
117 lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
119 /* Used to avoid long recursion due to selector thunks
121 lnat thunk_selector_depth = 0;
122 #define MAX_THUNK_SELECTOR_DEPTH 256
124 /* -----------------------------------------------------------------------------
125 Static function declarations
126 -------------------------------------------------------------------------- */
128 static void mark_root ( StgClosure **root );
129 static StgClosure * evacuate ( StgClosure *q );
130 static void zero_static_object_list ( StgClosure* first_static );
131 static void zero_mutable_list ( StgMutClosure *first );
133 static rtsBool traverse_weak_ptr_list ( void );
134 static void mark_weak_ptr_list ( StgWeak **list );
136 static void scavenge ( step * );
137 static void scavenge_mark_stack ( void );
138 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
139 static rtsBool scavenge_one ( StgPtr p );
140 static void scavenge_large ( step * );
141 static void scavenge_static ( void );
142 static void scavenge_mutable_list ( generation *g );
143 static void scavenge_mut_once_list ( generation *g );
144 static void scavengeCAFs ( void );
146 #if 0 && defined(DEBUG)
147 static void gcCAFs ( void );
150 /* -----------------------------------------------------------------------------
151 inline functions etc. for dealing with the mark bitmap & stack.
152 -------------------------------------------------------------------------- */
154 #define MARK_STACK_BLOCKS 4
156 static bdescr *mark_stack_bdescr;
157 static StgPtr *mark_stack;
158 static StgPtr *mark_sp;
159 static StgPtr *mark_splim;
161 // Flag and pointers used for falling back to a linear scan when the
162 // mark stack overflows.
163 static rtsBool mark_stack_overflowed;
164 static bdescr *oldgen_scan_bd;
165 static StgPtr oldgen_scan;
167 static inline rtsBool
168 mark_stack_empty(void)
170 return mark_sp == mark_stack;
173 static inline rtsBool
174 mark_stack_full(void)
176 return mark_sp >= mark_splim;
180 reset_mark_stack(void)
182 mark_sp = mark_stack;
186 push_mark_stack(StgPtr p)
197 /* -----------------------------------------------------------------------------
200 For garbage collecting generation N (and all younger generations):
202 - follow all pointers in the root set. the root set includes all
203 mutable objects in all steps in all generations.
205 - for each pointer, evacuate the object it points to into either
206 + to-space in the next higher step in that generation, if one exists,
207 + if the object's generation == N, then evacuate it to the next
208 generation if one exists, or else to-space in the current
210 + if the object's generation < N, then evacuate it to to-space
211 in the next generation.
213 - repeatedly scavenge to-space from each step in each generation
214 being collected until no more objects can be evacuated.
216 - free from-space in each step, and set from-space = to-space.
218 -------------------------------------------------------------------------- */
221 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
225 lnat live, allocated, collected = 0, copied = 0;
226 lnat oldgen_saved_blocks = 0;
230 CostCentreStack *prev_CCS;
233 #if defined(DEBUG) && defined(GRAN)
234 IF_DEBUG(gc, belch("@@ Starting garbage collection at %ld (%lx)\n",
238 // tell the stats department that we've started a GC
241 // Init stats and print par specific (timing) info
242 PAR_TICKY_PAR_START();
244 // attribute any costs to CCS_GC
250 /* Approximate how much we allocated.
251 * Todo: only when generating stats?
253 allocated = calcAllocated();
255 /* Figure out which generation to collect
257 if (force_major_gc) {
258 N = RtsFlags.GcFlags.generations - 1;
262 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
263 if (generations[g].steps[0].n_blocks +
264 generations[g].steps[0].n_large_blocks
265 >= generations[g].max_blocks) {
269 major_gc = (N == RtsFlags.GcFlags.generations-1);
272 #ifdef RTS_GTK_FRONTPANEL
273 if (RtsFlags.GcFlags.frontpanel) {
274 updateFrontPanelBeforeGC(N);
278 // check stack sanity *before* GC (ToDo: check all threads)
280 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
282 IF_DEBUG(sanity, checkFreeListSanity());
284 /* Initialise the static object lists
286 static_objects = END_OF_STATIC_LIST;
287 scavenged_static_objects = END_OF_STATIC_LIST;
289 /* zero the mutable list for the oldest generation (see comment by
290 * zero_mutable_list below).
293 zero_mutable_list(generations[RtsFlags.GcFlags.generations-1].mut_once_list);
296 /* Save the old to-space if we're doing a two-space collection
298 if (RtsFlags.GcFlags.generations == 1) {
299 old_to_blocks = g0s0->to_blocks;
300 g0s0->to_blocks = NULL;
303 /* Keep a count of how many new blocks we allocated during this GC
304 * (used for resizing the allocation area, later).
308 /* Initialise to-space in all the generations/steps that we're
311 for (g = 0; g <= N; g++) {
312 generations[g].mut_once_list = END_MUT_LIST;
313 generations[g].mut_list = END_MUT_LIST;
315 for (s = 0; s < generations[g].n_steps; s++) {
317 // generation 0, step 0 doesn't need to-space
318 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
322 /* Get a free block for to-space. Extra blocks will be chained on
326 stp = &generations[g].steps[s];
327 ASSERT(stp->gen_no == g);
328 ASSERT(stp->hp ? Bdescr(stp->hp)->step == stp : rtsTrue);
332 bd->flags = BF_EVACUATED; // it's a to-space block
334 stp->hpLim = stp->hp + BLOCK_SIZE_W;
337 stp->n_to_blocks = 1;
338 stp->scan = bd->start;
340 stp->new_large_objects = NULL;
341 stp->scavenged_large_objects = NULL;
342 stp->n_scavenged_large_blocks = 0;
344 // mark the large objects as not evacuated yet
345 for (bd = stp->large_objects; bd; bd = bd->link) {
346 bd->flags = BF_LARGE;
349 // for a compacted step, we need to allocate the bitmap
350 if (stp->is_compacted) {
351 nat bitmap_size; // in bytes
352 bdescr *bitmap_bdescr;
355 bitmap_size = stp->n_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
357 if (bitmap_size > 0) {
358 bitmap_bdescr = allocGroup((nat)BLOCK_ROUND_UP(bitmap_size)
360 stp->bitmap = bitmap_bdescr;
361 bitmap = bitmap_bdescr->start;
363 IF_DEBUG(gc, belch("bitmap_size: %d, bitmap: %p",
364 bitmap_size, bitmap););
366 // don't forget to fill it with zeros!
367 memset(bitmap, 0, bitmap_size);
369 // for each block in this step, point to its bitmap from the
371 for (bd=stp->blocks; bd != NULL; bd = bd->link) {
372 bd->u.bitmap = bitmap;
373 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
380 /* make sure the older generations have at least one block to
381 * allocate into (this makes things easier for copy(), see below.
383 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
384 for (s = 0; s < generations[g].n_steps; s++) {
385 stp = &generations[g].steps[s];
386 if (stp->hp_bd == NULL) {
387 ASSERT(stp->blocks == NULL);
392 bd->flags = 0; // *not* a to-space block or a large object
394 stp->hpLim = stp->hp + BLOCK_SIZE_W;
400 /* Set the scan pointer for older generations: remember we
401 * still have to scavenge objects that have been promoted. */
403 stp->scan_bd = stp->hp_bd;
404 stp->to_blocks = NULL;
405 stp->n_to_blocks = 0;
406 stp->new_large_objects = NULL;
407 stp->scavenged_large_objects = NULL;
408 stp->n_scavenged_large_blocks = 0;
412 /* Allocate a mark stack if we're doing a major collection.
415 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
416 mark_stack = (StgPtr *)mark_stack_bdescr->start;
417 mark_sp = mark_stack;
418 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
420 mark_stack_bdescr = NULL;
423 /* -----------------------------------------------------------------------
424 * follow all the roots that we know about:
425 * - mutable lists from each generation > N
426 * we want to *scavenge* these roots, not evacuate them: they're not
427 * going to move in this GC.
428 * Also: do them in reverse generation order. This is because we
429 * often want to promote objects that are pointed to by older
430 * generations early, so we don't have to repeatedly copy them.
431 * Doing the generations in reverse order ensures that we don't end
432 * up in the situation where we want to evac an object to gen 3 and
433 * it has already been evaced to gen 2.
437 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
438 generations[g].saved_mut_list = generations[g].mut_list;
439 generations[g].mut_list = END_MUT_LIST;
442 // Do the mut-once lists first
443 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
444 IF_PAR_DEBUG(verbose,
445 printMutOnceList(&generations[g]));
446 scavenge_mut_once_list(&generations[g]);
448 for (st = generations[g].n_steps-1; st >= 0; st--) {
449 scavenge(&generations[g].steps[st]);
453 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
454 IF_PAR_DEBUG(verbose,
455 printMutableList(&generations[g]));
456 scavenge_mutable_list(&generations[g]);
458 for (st = generations[g].n_steps-1; st >= 0; st--) {
459 scavenge(&generations[g].steps[st]);
466 /* follow all the roots that the application knows about.
469 get_roots(mark_root);
472 /* And don't forget to mark the TSO if we got here direct from
474 /* Not needed in a seq version?
476 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
480 // Mark the entries in the GALA table of the parallel system
481 markLocalGAs(major_gc);
482 // Mark all entries on the list of pending fetches
483 markPendingFetches(major_gc);
486 /* Mark the weak pointer list, and prepare to detect dead weak
489 mark_weak_ptr_list(&weak_ptr_list);
490 old_weak_ptr_list = weak_ptr_list;
491 weak_ptr_list = NULL;
492 weak_done = rtsFalse;
494 /* The all_threads list is like the weak_ptr_list.
495 * See traverse_weak_ptr_list() for the details.
497 old_all_threads = all_threads;
498 all_threads = END_TSO_QUEUE;
499 resurrected_threads = END_TSO_QUEUE;
501 /* Mark the stable pointer table.
503 markStablePtrTable(mark_root);
507 /* ToDo: To fix the caf leak, we need to make the commented out
508 * parts of this code do something sensible - as described in
511 extern void markHugsObjects(void);
516 /* -------------------------------------------------------------------------
517 * Repeatedly scavenge all the areas we know about until there's no
518 * more scavenging to be done.
525 // scavenge static objects
526 if (major_gc && static_objects != END_OF_STATIC_LIST) {
527 IF_DEBUG(sanity, checkStaticObjects(static_objects));
531 /* When scavenging the older generations: Objects may have been
532 * evacuated from generations <= N into older generations, and we
533 * need to scavenge these objects. We're going to try to ensure that
534 * any evacuations that occur move the objects into at least the
535 * same generation as the object being scavenged, otherwise we
536 * have to create new entries on the mutable list for the older
540 // scavenge each step in generations 0..maxgen
546 // scavenge objects in compacted generation
547 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
548 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
549 scavenge_mark_stack();
553 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
554 for (st = generations[gen].n_steps; --st >= 0; ) {
555 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
558 stp = &generations[gen].steps[st];
560 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
565 if (stp->new_large_objects != NULL) {
574 if (flag) { goto loop; }
577 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
583 // Reconstruct the Global Address tables used in GUM
584 rebuildGAtables(major_gc);
585 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
588 // Now see which stable names are still alive.
591 // Tidy the end of the to-space chains
592 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
593 for (s = 0; s < generations[g].n_steps; s++) {
594 stp = &generations[g].steps[s];
595 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
596 stp->hp_bd->free = stp->hp;
597 stp->hp_bd->link = NULL;
602 // NO MORE EVACUATION AFTER THIS POINT!
603 // Finally: compaction of the oldest generation.
604 if (major_gc && oldest_gen->steps[0].is_compacted) {
605 // save number of blocks for stats
606 oldgen_saved_blocks = oldest_gen->steps[0].n_blocks;
610 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
612 /* run through all the generations/steps and tidy up
614 copied = new_blocks * BLOCK_SIZE_W;
615 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
618 generations[g].collections++; // for stats
621 for (s = 0; s < generations[g].n_steps; s++) {
623 stp = &generations[g].steps[s];
625 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
626 // stats information: how much we copied
628 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
633 // for generations we collected...
636 // rough calculation of garbage collected, for stats output
637 if (stp->is_compacted) {
638 collected += (oldgen_saved_blocks - stp->n_blocks) * BLOCK_SIZE_W;
640 collected += stp->n_blocks * BLOCK_SIZE_W;
643 /* free old memory and shift to-space into from-space for all
644 * the collected steps (except the allocation area). These
645 * freed blocks will probaby be quickly recycled.
647 if (!(g == 0 && s == 0)) {
648 if (stp->is_compacted) {
649 // for a compacted step, just shift the new to-space
650 // onto the front of the now-compacted existing blocks.
651 for (bd = stp->to_blocks; bd != NULL; bd = bd->link) {
652 bd->flags &= ~BF_EVACUATED; // now from-space
654 // tack the new blocks on the end of the existing blocks
655 if (stp->blocks == NULL) {
656 stp->blocks = stp->to_blocks;
658 for (bd = stp->blocks; bd != NULL; bd = next) {
661 bd->link = stp->to_blocks;
665 // add the new blocks to the block tally
666 stp->n_blocks += stp->n_to_blocks;
668 freeChain(stp->blocks);
669 stp->blocks = stp->to_blocks;
670 stp->n_blocks = stp->n_to_blocks;
671 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
672 bd->flags &= ~BF_EVACUATED; // now from-space
675 stp->to_blocks = NULL;
676 stp->n_to_blocks = 0;
679 /* LARGE OBJECTS. The current live large objects are chained on
680 * scavenged_large, having been moved during garbage
681 * collection from large_objects. Any objects left on
682 * large_objects list are therefore dead, so we free them here.
684 for (bd = stp->large_objects; bd != NULL; bd = next) {
690 // update the count of blocks used by large objects
691 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
692 bd->flags &= ~BF_EVACUATED;
694 stp->large_objects = stp->scavenged_large_objects;
695 stp->n_large_blocks = stp->n_scavenged_large_blocks;
698 // for older generations...
700 /* For older generations, we need to append the
701 * scavenged_large_object list (i.e. large objects that have been
702 * promoted during this GC) to the large_object list for that step.
704 for (bd = stp->scavenged_large_objects; bd; bd = next) {
706 bd->flags &= ~BF_EVACUATED;
707 dbl_link_onto(bd, &stp->large_objects);
710 // add the new blocks we promoted during this GC
711 stp->n_blocks += stp->n_to_blocks;
712 stp->n_large_blocks += stp->n_scavenged_large_blocks;
717 /* Reset the sizes of the older generations when we do a major
720 * CURRENT STRATEGY: make all generations except zero the same size.
721 * We have to stay within the maximum heap size, and leave a certain
722 * percentage of the maximum heap size available to allocate into.
724 if (major_gc && RtsFlags.GcFlags.generations > 1) {
725 nat live, size, min_alloc;
726 nat max = RtsFlags.GcFlags.maxHeapSize;
727 nat gens = RtsFlags.GcFlags.generations;
729 // live in the oldest generations
730 live = oldest_gen->steps[0].n_blocks +
731 oldest_gen->steps[0].n_large_blocks;
733 // default max size for all generations except zero
734 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
735 RtsFlags.GcFlags.minOldGenSize);
737 // minimum size for generation zero
738 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
739 RtsFlags.GcFlags.minAllocAreaSize);
741 // Auto-enable compaction when the residency reaches a
742 // certain percentage of the maximum heap size (default: 30%).
743 if (RtsFlags.GcFlags.compact ||
745 oldest_gen->steps[0].n_blocks >
746 (RtsFlags.GcFlags.compactThreshold * max) / 100)) {
747 oldest_gen->steps[0].is_compacted = 1;
748 // fprintf(stderr,"compaction: on\n", live);
750 oldest_gen->steps[0].is_compacted = 0;
751 // fprintf(stderr,"compaction: off\n", live);
754 // if we're going to go over the maximum heap size, reduce the
755 // size of the generations accordingly. The calculation is
756 // different if compaction is turned on, because we don't need
757 // to double the space required to collect the old generation.
759 if (oldest_gen->steps[0].is_compacted) {
760 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
761 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
764 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
765 size = (max - min_alloc) / ((gens - 1) * 2);
775 fprintf(stderr,"live: %d, min_alloc: %d, size : %d, max = %d\n", live,
776 min_alloc, size, max);
779 for (g = 0; g < gens; g++) {
780 generations[g].max_blocks = size;
784 // Guess the amount of live data for stats.
787 /* Free the small objects allocated via allocate(), since this will
788 * all have been copied into G0S1 now.
790 if (small_alloc_list != NULL) {
791 freeChain(small_alloc_list);
793 small_alloc_list = NULL;
797 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
799 // Start a new pinned_object_block
800 pinned_object_block = NULL;
802 /* Free the mark stack.
804 if (mark_stack_bdescr != NULL) {
805 freeGroup(mark_stack_bdescr);
810 for (g = 0; g <= N; g++) {
811 for (s = 0; s < generations[g].n_steps; s++) {
812 stp = &generations[g].steps[s];
813 if (stp->is_compacted && stp->bitmap != NULL) {
814 freeGroup(stp->bitmap);
819 /* Two-space collector:
820 * Free the old to-space, and estimate the amount of live data.
822 if (RtsFlags.GcFlags.generations == 1) {
825 if (old_to_blocks != NULL) {
826 freeChain(old_to_blocks);
828 for (bd = g0s0->to_blocks; bd != NULL; bd = bd->link) {
829 bd->flags = 0; // now from-space
832 /* For a two-space collector, we need to resize the nursery. */
834 /* set up a new nursery. Allocate a nursery size based on a
835 * function of the amount of live data (by default a factor of 2)
836 * Use the blocks from the old nursery if possible, freeing up any
839 * If we get near the maximum heap size, then adjust our nursery
840 * size accordingly. If the nursery is the same size as the live
841 * data (L), then we need 3L bytes. We can reduce the size of the
842 * nursery to bring the required memory down near 2L bytes.
844 * A normal 2-space collector would need 4L bytes to give the same
845 * performance we get from 3L bytes, reducing to the same
846 * performance at 2L bytes.
848 blocks = g0s0->n_to_blocks;
850 if ( blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
851 RtsFlags.GcFlags.maxHeapSize ) {
852 long adjusted_blocks; // signed on purpose
855 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
856 IF_DEBUG(gc, belch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
857 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
858 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
861 blocks = adjusted_blocks;
864 blocks *= RtsFlags.GcFlags.oldGenFactor;
865 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
866 blocks = RtsFlags.GcFlags.minAllocAreaSize;
869 resizeNursery(blocks);
872 /* Generational collector:
873 * If the user has given us a suggested heap size, adjust our
874 * allocation area to make best use of the memory available.
877 if (RtsFlags.GcFlags.heapSizeSuggestion) {
879 nat needed = calcNeeded(); // approx blocks needed at next GC
881 /* Guess how much will be live in generation 0 step 0 next time.
882 * A good approximation is obtained by finding the
883 * percentage of g0s0 that was live at the last minor GC.
886 g0s0_pcnt_kept = (new_blocks * 100) / g0s0->n_blocks;
889 /* Estimate a size for the allocation area based on the
890 * information available. We might end up going slightly under
891 * or over the suggested heap size, but we should be pretty
894 * Formula: suggested - needed
895 * ----------------------------
896 * 1 + g0s0_pcnt_kept/100
898 * where 'needed' is the amount of memory needed at the next
899 * collection for collecting all steps except g0s0.
902 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
903 (100 + (long)g0s0_pcnt_kept);
905 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
906 blocks = RtsFlags.GcFlags.minAllocAreaSize;
909 resizeNursery((nat)blocks);
913 // mark the garbage collected CAFs as dead
914 #if 0 && defined(DEBUG) // doesn't work at the moment
915 if (major_gc) { gcCAFs(); }
918 // zero the scavenged static object list
920 zero_static_object_list(scavenged_static_objects);
926 // start any pending finalizers
927 scheduleFinalizers(old_weak_ptr_list);
929 // send exceptions to any threads which were about to die
930 resurrectThreads(resurrected_threads);
932 // Update the stable pointer hash table.
933 updateStablePtrTable(major_gc);
935 // check sanity after GC
936 IF_DEBUG(sanity, checkSanity());
938 // extra GC trace info
939 IF_DEBUG(gc, statDescribeGens());
942 // symbol-table based profiling
943 /* heapCensus(to_blocks); */ /* ToDo */
946 // restore enclosing cost centre
952 // check for memory leaks if sanity checking is on
953 IF_DEBUG(sanity, memInventory());
955 #ifdef RTS_GTK_FRONTPANEL
956 if (RtsFlags.GcFlags.frontpanel) {
957 updateFrontPanelAfterGC( N, live );
961 // ok, GC over: tell the stats department what happened.
962 stat_endGC(allocated, collected, live, copied, N);
968 /* -----------------------------------------------------------------------------
971 traverse_weak_ptr_list is called possibly many times during garbage
972 collection. It returns a flag indicating whether it did any work
973 (i.e. called evacuate on any live pointers).
975 Invariant: traverse_weak_ptr_list is called when the heap is in an
976 idempotent state. That means that there are no pending
977 evacuate/scavenge operations. This invariant helps the weak
978 pointer code decide which weak pointers are dead - if there are no
979 new live weak pointers, then all the currently unreachable ones are
982 For generational GC: we just don't try to finalize weak pointers in
983 older generations than the one we're collecting. This could
984 probably be optimised by keeping per-generation lists of weak
985 pointers, but for a few weak pointers this scheme will work.
986 -------------------------------------------------------------------------- */
989 traverse_weak_ptr_list(void)
991 StgWeak *w, **last_w, *next_w;
993 rtsBool flag = rtsFalse;
995 if (weak_done) { return rtsFalse; }
997 /* doesn't matter where we evacuate values/finalizers to, since
998 * these pointers are treated as roots (iff the keys are alive).
1002 last_w = &old_weak_ptr_list;
1003 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1005 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1006 * called on a live weak pointer object. Just remove it.
1008 if (w->header.info == &stg_DEAD_WEAK_info) {
1009 next_w = ((StgDeadWeak *)w)->link;
1014 ASSERT(get_itbl(w)->type == WEAK);
1016 /* Now, check whether the key is reachable.
1018 new = isAlive(w->key);
1021 // evacuate the value and finalizer
1022 w->value = evacuate(w->value);
1023 w->finalizer = evacuate(w->finalizer);
1024 // remove this weak ptr from the old_weak_ptr list
1026 // and put it on the new weak ptr list
1028 w->link = weak_ptr_list;
1031 IF_DEBUG(weak, belch("Weak pointer still alive at %p -> %p", w, w->key));
1035 last_w = &(w->link);
1041 /* Now deal with the all_threads list, which behaves somewhat like
1042 * the weak ptr list. If we discover any threads that are about to
1043 * become garbage, we wake them up and administer an exception.
1046 StgTSO *t, *tmp, *next, **prev;
1048 prev = &old_all_threads;
1049 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1051 (StgClosure *)tmp = isAlive((StgClosure *)t);
1057 ASSERT(get_itbl(t)->type == TSO);
1058 switch (t->what_next) {
1059 case ThreadRelocated:
1064 case ThreadComplete:
1065 // finshed or died. The thread might still be alive, but we
1066 // don't keep it on the all_threads list. Don't forget to
1067 // stub out its global_link field.
1068 next = t->global_link;
1069 t->global_link = END_TSO_QUEUE;
1077 // not alive (yet): leave this thread on the old_all_threads list.
1078 prev = &(t->global_link);
1079 next = t->global_link;
1082 // alive: move this thread onto the all_threads list.
1083 next = t->global_link;
1084 t->global_link = all_threads;
1091 /* If we didn't make any changes, then we can go round and kill all
1092 * the dead weak pointers. The old_weak_ptr list is used as a list
1093 * of pending finalizers later on.
1095 if (flag == rtsFalse) {
1096 for (w = old_weak_ptr_list; w; w = w->link) {
1097 w->finalizer = evacuate(w->finalizer);
1100 /* And resurrect any threads which were about to become garbage.
1103 StgTSO *t, *tmp, *next;
1104 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1105 next = t->global_link;
1106 (StgClosure *)tmp = evacuate((StgClosure *)t);
1107 tmp->global_link = resurrected_threads;
1108 resurrected_threads = tmp;
1112 weak_done = rtsTrue;
1118 /* -----------------------------------------------------------------------------
1119 After GC, the live weak pointer list may have forwarding pointers
1120 on it, because a weak pointer object was evacuated after being
1121 moved to the live weak pointer list. We remove those forwarding
1124 Also, we don't consider weak pointer objects to be reachable, but
1125 we must nevertheless consider them to be "live" and retain them.
1126 Therefore any weak pointer objects which haven't as yet been
1127 evacuated need to be evacuated now.
1128 -------------------------------------------------------------------------- */
1132 mark_weak_ptr_list ( StgWeak **list )
1134 StgWeak *w, **last_w;
1137 for (w = *list; w; w = w->link) {
1138 (StgClosure *)w = evacuate((StgClosure *)w);
1140 last_w = &(w->link);
1144 /* -----------------------------------------------------------------------------
1145 isAlive determines whether the given closure is still alive (after
1146 a garbage collection) or not. It returns the new address of the
1147 closure if it is alive, or NULL otherwise.
1149 NOTE: Use it before compaction only!
1150 -------------------------------------------------------------------------- */
1154 isAlive(StgClosure *p)
1156 const StgInfoTable *info;
1163 /* ToDo: for static closures, check the static link field.
1164 * Problem here is that we sometimes don't set the link field, eg.
1165 * for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1170 // ignore closures in generations that we're not collecting.
1171 if (LOOKS_LIKE_STATIC(p) || bd->gen_no > N) {
1174 // large objects have an evacuated flag
1175 if (bd->flags & BF_LARGE) {
1176 if (bd->flags & BF_EVACUATED) {
1182 // check the mark bit for compacted steps
1183 if (bd->step->is_compacted && is_marked((P_)p,bd)) {
1187 switch (info->type) {
1192 case IND_OLDGEN: // rely on compatible layout with StgInd
1193 case IND_OLDGEN_PERM:
1194 // follow indirections
1195 p = ((StgInd *)p)->indirectee;
1200 return ((StgEvacuated *)p)->evacuee;
1203 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1204 p = (StgClosure *)((StgTSO *)p)->link;
1216 mark_root(StgClosure **root)
1218 *root = evacuate(*root);
1224 bdescr *bd = allocBlock();
1225 bd->gen_no = stp->gen_no;
1228 if (stp->gen_no <= N) {
1229 bd->flags = BF_EVACUATED;
1234 stp->hp_bd->free = stp->hp;
1235 stp->hp_bd->link = bd;
1236 stp->hp = bd->start;
1237 stp->hpLim = stp->hp + BLOCK_SIZE_W;
1244 static __inline__ void
1245 upd_evacuee(StgClosure *p, StgClosure *dest)
1247 p->header.info = &stg_EVACUATED_info;
1248 ((StgEvacuated *)p)->evacuee = dest;
1252 static __inline__ StgClosure *
1253 copy(StgClosure *src, nat size, step *stp)
1257 TICK_GC_WORDS_COPIED(size);
1258 /* Find out where we're going, using the handy "to" pointer in
1259 * the step of the source object. If it turns out we need to
1260 * evacuate to an older generation, adjust it here (see comment
1263 if (stp->gen_no < evac_gen) {
1264 #ifdef NO_EAGER_PROMOTION
1265 failed_to_evac = rtsTrue;
1267 stp = &generations[evac_gen].steps[0];
1271 /* chain a new block onto the to-space for the destination step if
1274 if (stp->hp + size >= stp->hpLim) {
1278 for(to = stp->hp, from = (P_)src; size>0; --size) {
1284 upd_evacuee(src,(StgClosure *)dest);
1285 return (StgClosure *)dest;
1288 /* Special version of copy() for when we only want to copy the info
1289 * pointer of an object, but reserve some padding after it. This is
1290 * used to optimise evacuation of BLACKHOLEs.
1294 static __inline__ StgClosure *
1295 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1299 TICK_GC_WORDS_COPIED(size_to_copy);
1300 if (stp->gen_no < evac_gen) {
1301 #ifdef NO_EAGER_PROMOTION
1302 failed_to_evac = rtsTrue;
1304 stp = &generations[evac_gen].steps[0];
1308 if (stp->hp + size_to_reserve >= stp->hpLim) {
1312 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1317 stp->hp += size_to_reserve;
1318 upd_evacuee(src,(StgClosure *)dest);
1319 return (StgClosure *)dest;
1323 /* -----------------------------------------------------------------------------
1324 Evacuate a large object
1326 This just consists of removing the object from the (doubly-linked)
1327 large_alloc_list, and linking it on to the (singly-linked)
1328 new_large_objects list, from where it will be scavenged later.
1330 Convention: bd->flags has BF_EVACUATED set for a large object
1331 that has been evacuated, or unset otherwise.
1332 -------------------------------------------------------------------------- */
1336 evacuate_large(StgPtr p)
1338 bdescr *bd = Bdescr(p);
1341 // should point to the beginning of the block
1342 ASSERT(((W_)p & BLOCK_MASK) == 0);
1344 // already evacuated?
1345 if (bd->flags & BF_EVACUATED) {
1346 /* Don't forget to set the failed_to_evac flag if we didn't get
1347 * the desired destination (see comments in evacuate()).
1349 if (bd->gen_no < evac_gen) {
1350 failed_to_evac = rtsTrue;
1351 TICK_GC_FAILED_PROMOTION();
1357 // remove from large_object list
1359 bd->u.back->link = bd->link;
1360 } else { // first object in the list
1361 stp->large_objects = bd->link;
1364 bd->link->u.back = bd->u.back;
1367 /* link it on to the evacuated large object list of the destination step
1370 if (stp->gen_no < evac_gen) {
1371 #ifdef NO_EAGER_PROMOTION
1372 failed_to_evac = rtsTrue;
1374 stp = &generations[evac_gen].steps[0];
1379 bd->gen_no = stp->gen_no;
1380 bd->link = stp->new_large_objects;
1381 stp->new_large_objects = bd;
1382 bd->flags |= BF_EVACUATED;
1385 /* -----------------------------------------------------------------------------
1386 Adding a MUT_CONS to an older generation.
1388 This is necessary from time to time when we end up with an
1389 old-to-new generation pointer in a non-mutable object. We defer
1390 the promotion until the next GC.
1391 -------------------------------------------------------------------------- */
1395 mkMutCons(StgClosure *ptr, generation *gen)
1400 stp = &gen->steps[0];
1402 /* chain a new block onto the to-space for the destination step if
1405 if (stp->hp + sizeofW(StgIndOldGen) >= stp->hpLim) {
1409 q = (StgMutVar *)stp->hp;
1410 stp->hp += sizeofW(StgMutVar);
1412 SET_HDR(q,&stg_MUT_CONS_info,CCS_GC);
1414 recordOldToNewPtrs((StgMutClosure *)q);
1416 return (StgClosure *)q;
1419 /* -----------------------------------------------------------------------------
1422 This is called (eventually) for every live object in the system.
1424 The caller to evacuate specifies a desired generation in the
1425 evac_gen global variable. The following conditions apply to
1426 evacuating an object which resides in generation M when we're
1427 collecting up to generation N
1431 else evac to step->to
1433 if M < evac_gen evac to evac_gen, step 0
1435 if the object is already evacuated, then we check which generation
1438 if M >= evac_gen do nothing
1439 if M < evac_gen set failed_to_evac flag to indicate that we
1440 didn't manage to evacuate this object into evac_gen.
1442 -------------------------------------------------------------------------- */
1445 evacuate(StgClosure *q)
1450 const StgInfoTable *info;
1453 if (HEAP_ALLOCED(q)) {
1456 if (bd->gen_no > N) {
1457 /* Can't evacuate this object, because it's in a generation
1458 * older than the ones we're collecting. Let's hope that it's
1459 * in evac_gen or older, or we will have to arrange to track
1460 * this pointer using the mutable list.
1462 if (bd->gen_no < evac_gen) {
1464 failed_to_evac = rtsTrue;
1465 TICK_GC_FAILED_PROMOTION();
1470 /* evacuate large objects by re-linking them onto a different list.
1472 if (bd->flags & BF_LARGE) {
1474 if (info->type == TSO &&
1475 ((StgTSO *)q)->what_next == ThreadRelocated) {
1476 q = (StgClosure *)((StgTSO *)q)->link;
1479 evacuate_large((P_)q);
1483 /* If the object is in a step that we're compacting, then we
1484 * need to use an alternative evacuate procedure.
1486 if (bd->step->is_compacted) {
1487 if (!is_marked((P_)q,bd)) {
1489 if (mark_stack_full()) {
1490 mark_stack_overflowed = rtsTrue;
1493 push_mark_stack((P_)q);
1501 else stp = NULL; // make sure copy() will crash if HEAP_ALLOCED is wrong
1504 // make sure the info pointer is into text space
1505 ASSERT(q && (LOOKS_LIKE_GHC_INFO(GET_INFO(q))
1506 || IS_HUGS_CONSTR_INFO(GET_INFO(q))));
1509 switch (info -> type) {
1513 to = copy(q,sizeW_fromITBL(info),stp);
1518 StgWord w = (StgWord)q->payload[0];
1519 if (q->header.info == Czh_con_info &&
1520 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1521 (StgChar)w <= MAX_CHARLIKE) {
1522 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1524 if (q->header.info == Izh_con_info &&
1525 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1526 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1528 // else, fall through ...
1534 return copy(q,sizeofW(StgHeader)+1,stp);
1536 case THUNK_1_0: // here because of MIN_UPD_SIZE
1541 #ifdef NO_PROMOTE_THUNKS
1542 if (bd->gen_no == 0 &&
1543 bd->step->no != 0 &&
1544 bd->step->no == generations[bd->gen_no].n_steps-1) {
1548 return copy(q,sizeofW(StgHeader)+2,stp);
1556 return copy(q,sizeofW(StgHeader)+2,stp);
1562 case IND_OLDGEN_PERM:
1567 return copy(q,sizeW_fromITBL(info),stp);
1570 case SE_CAF_BLACKHOLE:
1573 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
1576 to = copy(q,BLACKHOLE_sizeW(),stp);
1579 case THUNK_SELECTOR:
1581 const StgInfoTable* selectee_info;
1582 StgClosure* selectee = ((StgSelector*)q)->selectee;
1585 selectee_info = get_itbl(selectee);
1586 switch (selectee_info->type) {
1595 StgWord offset = info->layout.selector_offset;
1597 // check that the size is in range
1599 (StgWord32)(selectee_info->layout.payload.ptrs +
1600 selectee_info->layout.payload.nptrs));
1602 // perform the selection!
1603 q = selectee->payload[offset];
1605 /* if we're already in to-space, there's no need to continue
1606 * with the evacuation, just update the source address with
1607 * a pointer to the (evacuated) constructor field.
1609 if (HEAP_ALLOCED(q)) {
1610 bdescr *bd = Bdescr((P_)q);
1611 if (bd->flags & BF_EVACUATED) {
1612 if (bd->gen_no < evac_gen) {
1613 failed_to_evac = rtsTrue;
1614 TICK_GC_FAILED_PROMOTION();
1620 /* otherwise, carry on and evacuate this constructor field,
1621 * (but not the constructor itself)
1630 case IND_OLDGEN_PERM:
1631 selectee = ((StgInd *)selectee)->indirectee;
1635 selectee = ((StgEvacuated *)selectee)->evacuee;
1638 case THUNK_SELECTOR:
1640 /* Disabled 03 April 2001 by JRS; it seems to cause the GC (or
1641 something) to go into an infinite loop when the nightly
1642 stage2 compiles PrelTup.lhs. */
1644 /* we can't recurse indefinitely in evacuate(), so set a
1645 * limit on the number of times we can go around this
1648 if (thunk_selector_depth < MAX_THUNK_SELECTOR_DEPTH) {
1650 bd = Bdescr((P_)selectee);
1651 if (!bd->flags & BF_EVACUATED) {
1652 thunk_selector_depth++;
1653 selectee = evacuate(selectee);
1654 thunk_selector_depth--;
1658 // otherwise, fall through...
1670 case SE_CAF_BLACKHOLE:
1674 // not evaluated yet
1678 // a copy of the top-level cases below
1679 case RBH: // cf. BLACKHOLE_BQ
1681 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1682 to = copy(q,BLACKHOLE_sizeW(),stp);
1683 //ToDo: derive size etc from reverted IP
1684 //to = copy(q,size,stp);
1685 // recordMutable((StgMutClosure *)to);
1690 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1691 to = copy(q,sizeofW(StgBlockedFetch),stp);
1698 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1699 to = copy(q,sizeofW(StgFetchMe),stp);
1703 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1704 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1709 barf("evacuate: THUNK_SELECTOR: strange selectee %d",
1710 (int)(selectee_info->type));
1713 return copy(q,THUNK_SELECTOR_sizeW(),stp);
1717 // follow chains of indirections, don't evacuate them
1718 q = ((StgInd*)q)->indirectee;
1722 if (info->srt_len > 0 && major_gc &&
1723 THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1724 THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1725 static_objects = (StgClosure *)q;
1730 if (info->srt_len > 0 && major_gc &&
1731 FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1732 FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1733 static_objects = (StgClosure *)q;
1738 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1739 * on the CAF list, so don't do anything with it here (we'll
1740 * scavenge it later).
1743 && ((StgIndStatic *)q)->saved_info == NULL
1744 && IND_STATIC_LINK((StgClosure *)q) == NULL) {
1745 IND_STATIC_LINK((StgClosure *)q) = static_objects;
1746 static_objects = (StgClosure *)q;
1751 if (major_gc && STATIC_LINK(info,(StgClosure *)q) == NULL) {
1752 STATIC_LINK(info,(StgClosure *)q) = static_objects;
1753 static_objects = (StgClosure *)q;
1757 case CONSTR_INTLIKE:
1758 case CONSTR_CHARLIKE:
1759 case CONSTR_NOCAF_STATIC:
1760 /* no need to put these on the static linked list, they don't need
1775 // shouldn't see these
1776 barf("evacuate: stack frame at %p\n", q);
1780 /* PAPs and AP_UPDs are special - the payload is a copy of a chunk
1781 * of stack, tagging and all.
1783 return copy(q,pap_sizeW((StgPAP*)q),stp);
1786 /* Already evacuated, just return the forwarding address.
1787 * HOWEVER: if the requested destination generation (evac_gen) is
1788 * older than the actual generation (because the object was
1789 * already evacuated to a younger generation) then we have to
1790 * set the failed_to_evac flag to indicate that we couldn't
1791 * manage to promote the object to the desired generation.
1793 if (evac_gen > 0) { // optimisation
1794 StgClosure *p = ((StgEvacuated*)q)->evacuee;
1795 if (Bdescr((P_)p)->gen_no < evac_gen) {
1796 failed_to_evac = rtsTrue;
1797 TICK_GC_FAILED_PROMOTION();
1800 return ((StgEvacuated*)q)->evacuee;
1803 // just copy the block
1804 return copy(q,arr_words_sizeW((StgArrWords *)q),stp);
1807 case MUT_ARR_PTRS_FROZEN:
1808 // just copy the block
1809 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
1813 StgTSO *tso = (StgTSO *)q;
1815 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
1817 if (tso->what_next == ThreadRelocated) {
1818 q = (StgClosure *)tso->link;
1822 /* To evacuate a small TSO, we need to relocate the update frame
1826 StgTSO *new_tso = (StgTSO *)copy((StgClosure *)tso,tso_sizeW(tso),stp);
1827 move_TSO(tso, new_tso);
1828 return (StgClosure *)new_tso;
1833 case RBH: // cf. BLACKHOLE_BQ
1835 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
1836 to = copy(q,BLACKHOLE_sizeW(),stp);
1837 //ToDo: derive size etc from reverted IP
1838 //to = copy(q,size,stp);
1840 belch("@@ evacuate: RBH %p (%s) to %p (%s)",
1841 q, info_type(q), to, info_type(to)));
1846 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
1847 to = copy(q,sizeofW(StgBlockedFetch),stp);
1849 belch("@@ evacuate: %p (%s) to %p (%s)",
1850 q, info_type(q), to, info_type(to)));
1857 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1858 to = copy(q,sizeofW(StgFetchMe),stp);
1860 belch("@@ evacuate: %p (%s) to %p (%s)",
1861 q, info_type(q), to, info_type(to)));
1865 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
1866 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
1868 belch("@@ evacuate: %p (%s) to %p (%s)",
1869 q, info_type(q), to, info_type(to)));
1874 barf("evacuate: strange closure type %d", (int)(info->type));
1880 /* -----------------------------------------------------------------------------
1881 move_TSO is called to update the TSO structure after it has been
1882 moved from one place to another.
1883 -------------------------------------------------------------------------- */
1886 move_TSO(StgTSO *src, StgTSO *dest)
1890 // relocate the stack pointers...
1891 diff = (StgPtr)dest - (StgPtr)src; // In *words*
1892 dest->sp = (StgPtr)dest->sp + diff;
1893 dest->su = (StgUpdateFrame *) ((P_)dest->su + diff);
1895 relocate_stack(dest, diff);
1898 /* -----------------------------------------------------------------------------
1899 relocate_stack is called to update the linkage between
1900 UPDATE_FRAMEs (and SEQ_FRAMEs etc.) when a stack is moved from one
1902 -------------------------------------------------------------------------- */
1905 relocate_stack(StgTSO *dest, ptrdiff_t diff)
1913 while ((P_)su < dest->stack + dest->stack_size) {
1914 switch (get_itbl(su)->type) {
1916 // GCC actually manages to common up these three cases!
1919 su->link = (StgUpdateFrame *) ((StgPtr)su->link + diff);
1924 cf = (StgCatchFrame *)su;
1925 cf->link = (StgUpdateFrame *) ((StgPtr)cf->link + diff);
1930 sf = (StgSeqFrame *)su;
1931 sf->link = (StgUpdateFrame *) ((StgPtr)sf->link + diff);
1940 barf("relocate_stack %d", (int)(get_itbl(su)->type));
1951 scavenge_srt(const StgInfoTable *info)
1953 StgClosure **srt, **srt_end;
1955 /* evacuate the SRT. If srt_len is zero, then there isn't an
1956 * srt field in the info table. That's ok, because we'll
1957 * never dereference it.
1959 srt = (StgClosure **)(info->srt);
1960 srt_end = srt + info->srt_len;
1961 for (; srt < srt_end; srt++) {
1962 /* Special-case to handle references to closures hiding out in DLLs, since
1963 double indirections required to get at those. The code generator knows
1964 which is which when generating the SRT, so it stores the (indirect)
1965 reference to the DLL closure in the table by first adding one to it.
1966 We check for this here, and undo the addition before evacuating it.
1968 If the SRT entry hasn't got bit 0 set, the SRT entry points to a
1969 closure that's fixed at link-time, and no extra magic is required.
1971 #ifdef ENABLE_WIN32_DLL_SUPPORT
1972 if ( (unsigned long)(*srt) & 0x1 ) {
1973 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
1983 /* -----------------------------------------------------------------------------
1985 -------------------------------------------------------------------------- */
1988 scavengeTSO (StgTSO *tso)
1990 // chase the link field for any TSOs on the same queue
1991 (StgClosure *)tso->link = evacuate((StgClosure *)tso->link);
1992 if ( tso->why_blocked == BlockedOnMVar
1993 || tso->why_blocked == BlockedOnBlackHole
1994 || tso->why_blocked == BlockedOnException
1996 || tso->why_blocked == BlockedOnGA
1997 || tso->why_blocked == BlockedOnGA_NoSend
2000 tso->block_info.closure = evacuate(tso->block_info.closure);
2002 if ( tso->blocked_exceptions != NULL ) {
2003 tso->blocked_exceptions =
2004 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2006 // scavenge this thread's stack
2007 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2010 /* -----------------------------------------------------------------------------
2011 Scavenge a given step until there are no more objects in this step
2014 evac_gen is set by the caller to be either zero (for a step in a
2015 generation < N) or G where G is the generation of the step being
2018 We sometimes temporarily change evac_gen back to zero if we're
2019 scavenging a mutable object where early promotion isn't such a good
2021 -------------------------------------------------------------------------- */
2029 nat saved_evac_gen = evac_gen;
2034 failed_to_evac = rtsFalse;
2036 /* scavenge phase - standard breadth-first scavenging of the
2040 while (bd != stp->hp_bd || p < stp->hp) {
2042 // If we're at the end of this block, move on to the next block
2043 if (bd != stp->hp_bd && p == bd->free) {
2049 info = get_itbl((StgClosure *)p);
2050 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2053 switch (info->type) {
2056 /* treat MVars specially, because we don't want to evacuate the
2057 * mut_link field in the middle of the closure.
2060 StgMVar *mvar = ((StgMVar *)p);
2062 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2063 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2064 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2065 evac_gen = saved_evac_gen;
2066 recordMutable((StgMutClosure *)mvar);
2067 failed_to_evac = rtsFalse; // mutable.
2068 p += sizeofW(StgMVar);
2076 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2077 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2078 p += sizeofW(StgHeader) + 2;
2083 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2084 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2090 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2091 p += sizeofW(StgHeader) + 1;
2096 p += sizeofW(StgHeader) + 2; // MIN_UPD_SIZE
2102 p += sizeofW(StgHeader) + 1;
2109 p += sizeofW(StgHeader) + 2;
2116 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2117 p += sizeofW(StgHeader) + 2;
2133 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2134 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2135 (StgClosure *)*p = evacuate((StgClosure *)*p);
2137 p += info->layout.payload.nptrs;
2142 if (stp->gen_no != 0) {
2143 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2146 case IND_OLDGEN_PERM:
2147 ((StgIndOldGen *)p)->indirectee =
2148 evacuate(((StgIndOldGen *)p)->indirectee);
2149 if (failed_to_evac) {
2150 failed_to_evac = rtsFalse;
2151 recordOldToNewPtrs((StgMutClosure *)p);
2153 p += sizeofW(StgIndOldGen);
2158 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2159 evac_gen = saved_evac_gen;
2160 recordMutable((StgMutClosure *)p);
2161 failed_to_evac = rtsFalse; // mutable anyhow
2162 p += sizeofW(StgMutVar);
2167 failed_to_evac = rtsFalse; // mutable anyhow
2168 p += sizeofW(StgMutVar);
2172 case SE_CAF_BLACKHOLE:
2175 p += BLACKHOLE_sizeW();
2180 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2181 (StgClosure *)bh->blocking_queue =
2182 evacuate((StgClosure *)bh->blocking_queue);
2183 recordMutable((StgMutClosure *)bh);
2184 failed_to_evac = rtsFalse;
2185 p += BLACKHOLE_sizeW();
2189 case THUNK_SELECTOR:
2191 StgSelector *s = (StgSelector *)p;
2192 s->selectee = evacuate(s->selectee);
2193 p += THUNK_SELECTOR_sizeW();
2197 case AP_UPD: // same as PAPs
2199 /* Treat a PAP just like a section of stack, not forgetting to
2200 * evacuate the function pointer too...
2203 StgPAP* pap = (StgPAP *)p;
2205 pap->fun = evacuate(pap->fun);
2206 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2207 p += pap_sizeW(pap);
2212 // nothing to follow
2213 p += arr_words_sizeW((StgArrWords *)p);
2217 // follow everything
2221 evac_gen = 0; // repeatedly mutable
2222 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2223 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2224 (StgClosure *)*p = evacuate((StgClosure *)*p);
2226 evac_gen = saved_evac_gen;
2227 recordMutable((StgMutClosure *)q);
2228 failed_to_evac = rtsFalse; // mutable anyhow.
2232 case MUT_ARR_PTRS_FROZEN:
2233 // follow everything
2237 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2238 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2239 (StgClosure *)*p = evacuate((StgClosure *)*p);
2241 // it's tempting to recordMutable() if failed_to_evac is
2242 // false, but that breaks some assumptions (eg. every
2243 // closure on the mutable list is supposed to have the MUT
2244 // flag set, and MUT_ARR_PTRS_FROZEN doesn't).
2250 StgTSO *tso = (StgTSO *)p;
2253 evac_gen = saved_evac_gen;
2254 recordMutable((StgMutClosure *)tso);
2255 failed_to_evac = rtsFalse; // mutable anyhow.
2256 p += tso_sizeW(tso);
2261 case RBH: // cf. BLACKHOLE_BQ
2264 nat size, ptrs, nonptrs, vhs;
2266 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2268 StgRBH *rbh = (StgRBH *)p;
2269 (StgClosure *)rbh->blocking_queue =
2270 evacuate((StgClosure *)rbh->blocking_queue);
2271 recordMutable((StgMutClosure *)to);
2272 failed_to_evac = rtsFalse; // mutable anyhow.
2274 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2275 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2276 // ToDo: use size of reverted closure here!
2277 p += BLACKHOLE_sizeW();
2283 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2284 // follow the pointer to the node which is being demanded
2285 (StgClosure *)bf->node =
2286 evacuate((StgClosure *)bf->node);
2287 // follow the link to the rest of the blocking queue
2288 (StgClosure *)bf->link =
2289 evacuate((StgClosure *)bf->link);
2290 if (failed_to_evac) {
2291 failed_to_evac = rtsFalse;
2292 recordMutable((StgMutClosure *)bf);
2295 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2296 bf, info_type((StgClosure *)bf),
2297 bf->node, info_type(bf->node)));
2298 p += sizeofW(StgBlockedFetch);
2306 p += sizeofW(StgFetchMe);
2307 break; // nothing to do in this case
2309 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2311 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2312 (StgClosure *)fmbq->blocking_queue =
2313 evacuate((StgClosure *)fmbq->blocking_queue);
2314 if (failed_to_evac) {
2315 failed_to_evac = rtsFalse;
2316 recordMutable((StgMutClosure *)fmbq);
2319 belch("@@ scavenge: %p (%s) exciting, isn't it",
2320 p, info_type((StgClosure *)p)));
2321 p += sizeofW(StgFetchMeBlockingQueue);
2327 barf("scavenge: unimplemented/strange closure type %d @ %p",
2331 /* If we didn't manage to promote all the objects pointed to by
2332 * the current object, then we have to designate this object as
2333 * mutable (because it contains old-to-new generation pointers).
2335 if (failed_to_evac) {
2336 failed_to_evac = rtsFalse;
2337 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2345 /* -----------------------------------------------------------------------------
2346 Scavenge everything on the mark stack.
2348 This is slightly different from scavenge():
2349 - we don't walk linearly through the objects, so the scavenger
2350 doesn't need to advance the pointer on to the next object.
2351 -------------------------------------------------------------------------- */
2354 scavenge_mark_stack(void)
2360 evac_gen = oldest_gen->no;
2361 saved_evac_gen = evac_gen;
2364 while (!mark_stack_empty()) {
2365 p = pop_mark_stack();
2367 info = get_itbl((StgClosure *)p);
2368 ASSERT(p && (LOOKS_LIKE_GHC_INFO(info) || IS_HUGS_CONSTR_INFO(info)));
2371 switch (info->type) {
2374 /* treat MVars specially, because we don't want to evacuate the
2375 * mut_link field in the middle of the closure.
2378 StgMVar *mvar = ((StgMVar *)p);
2380 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2381 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2382 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2383 evac_gen = saved_evac_gen;
2384 failed_to_evac = rtsFalse; // mutable.
2392 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2393 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2403 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2428 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2429 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2430 (StgClosure *)*p = evacuate((StgClosure *)*p);
2436 // don't need to do anything here: the only possible case
2437 // is that we're in a 1-space compacting collector, with
2438 // no "old" generation.
2442 case IND_OLDGEN_PERM:
2443 ((StgIndOldGen *)p)->indirectee =
2444 evacuate(((StgIndOldGen *)p)->indirectee);
2445 if (failed_to_evac) {
2446 recordOldToNewPtrs((StgMutClosure *)p);
2448 failed_to_evac = rtsFalse;
2453 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2454 evac_gen = saved_evac_gen;
2455 failed_to_evac = rtsFalse;
2460 failed_to_evac = rtsFalse;
2464 case SE_CAF_BLACKHOLE:
2472 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2473 (StgClosure *)bh->blocking_queue =
2474 evacuate((StgClosure *)bh->blocking_queue);
2475 failed_to_evac = rtsFalse;
2479 case THUNK_SELECTOR:
2481 StgSelector *s = (StgSelector *)p;
2482 s->selectee = evacuate(s->selectee);
2486 case AP_UPD: // same as PAPs
2488 /* Treat a PAP just like a section of stack, not forgetting to
2489 * evacuate the function pointer too...
2492 StgPAP* pap = (StgPAP *)p;
2494 pap->fun = evacuate(pap->fun);
2495 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2500 // follow everything
2504 evac_gen = 0; // repeatedly mutable
2505 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2506 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2507 (StgClosure *)*p = evacuate((StgClosure *)*p);
2509 evac_gen = saved_evac_gen;
2510 failed_to_evac = rtsFalse; // mutable anyhow.
2514 case MUT_ARR_PTRS_FROZEN:
2515 // follow everything
2519 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2520 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2521 (StgClosure *)*p = evacuate((StgClosure *)*p);
2528 StgTSO *tso = (StgTSO *)p;
2531 evac_gen = saved_evac_gen;
2532 failed_to_evac = rtsFalse;
2537 case RBH: // cf. BLACKHOLE_BQ
2540 nat size, ptrs, nonptrs, vhs;
2542 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2544 StgRBH *rbh = (StgRBH *)p;
2545 (StgClosure *)rbh->blocking_queue =
2546 evacuate((StgClosure *)rbh->blocking_queue);
2547 recordMutable((StgMutClosure *)rbh);
2548 failed_to_evac = rtsFalse; // mutable anyhow.
2550 belch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2551 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2557 StgBlockedFetch *bf = (StgBlockedFetch *)p;
2558 // follow the pointer to the node which is being demanded
2559 (StgClosure *)bf->node =
2560 evacuate((StgClosure *)bf->node);
2561 // follow the link to the rest of the blocking queue
2562 (StgClosure *)bf->link =
2563 evacuate((StgClosure *)bf->link);
2564 if (failed_to_evac) {
2565 failed_to_evac = rtsFalse;
2566 recordMutable((StgMutClosure *)bf);
2569 belch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
2570 bf, info_type((StgClosure *)bf),
2571 bf->node, info_type(bf->node)));
2579 break; // nothing to do in this case
2581 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
2583 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
2584 (StgClosure *)fmbq->blocking_queue =
2585 evacuate((StgClosure *)fmbq->blocking_queue);
2586 if (failed_to_evac) {
2587 failed_to_evac = rtsFalse;
2588 recordMutable((StgMutClosure *)fmbq);
2591 belch("@@ scavenge: %p (%s) exciting, isn't it",
2592 p, info_type((StgClosure *)p)));
2598 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
2602 if (failed_to_evac) {
2603 failed_to_evac = rtsFalse;
2604 mkMutCons((StgClosure *)q, &generations[evac_gen]);
2607 // mark the next bit to indicate "scavenged"
2608 mark(q+1, Bdescr(q));
2610 } // while (!mark_stack_empty())
2612 // start a new linear scan if the mark stack overflowed at some point
2613 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
2614 IF_DEBUG(gc, belch("scavenge_mark_stack: starting linear scan"));
2615 mark_stack_overflowed = rtsFalse;
2616 oldgen_scan_bd = oldest_gen->steps[0].blocks;
2617 oldgen_scan = oldgen_scan_bd->start;
2620 if (oldgen_scan_bd) {
2621 // push a new thing on the mark stack
2623 // find a closure that is marked but not scavenged, and start
2625 while (oldgen_scan < oldgen_scan_bd->free
2626 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
2630 if (oldgen_scan < oldgen_scan_bd->free) {
2632 // already scavenged?
2633 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
2634 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2637 push_mark_stack(oldgen_scan);
2638 // ToDo: bump the linear scan by the actual size of the object
2639 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
2643 oldgen_scan_bd = oldgen_scan_bd->link;
2644 if (oldgen_scan_bd != NULL) {
2645 oldgen_scan = oldgen_scan_bd->start;
2651 /* -----------------------------------------------------------------------------
2652 Scavenge one object.
2654 This is used for objects that are temporarily marked as mutable
2655 because they contain old-to-new generation pointers. Only certain
2656 objects can have this property.
2657 -------------------------------------------------------------------------- */
2660 scavenge_one(StgPtr p)
2662 const StgInfoTable *info;
2663 nat saved_evac_gen = evac_gen;
2666 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO((StgClosure *)p))
2667 || IS_HUGS_CONSTR_INFO(GET_INFO((StgClosure *)p))));
2669 info = get_itbl((StgClosure *)p);
2671 switch (info->type) {
2674 case FUN_1_0: // hardly worth specialising these guys
2694 case IND_OLDGEN_PERM:
2698 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2699 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
2700 (StgClosure *)*q = evacuate((StgClosure *)*q);
2706 case SE_CAF_BLACKHOLE:
2711 case THUNK_SELECTOR:
2713 StgSelector *s = (StgSelector *)p;
2714 s->selectee = evacuate(s->selectee);
2719 // nothing to follow
2724 // follow everything
2727 evac_gen = 0; // repeatedly mutable
2728 recordMutable((StgMutClosure *)p);
2729 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2730 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2731 (StgClosure *)*p = evacuate((StgClosure *)*p);
2733 evac_gen = saved_evac_gen;
2734 failed_to_evac = rtsFalse;
2738 case MUT_ARR_PTRS_FROZEN:
2740 // follow everything
2743 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2744 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2745 (StgClosure *)*p = evacuate((StgClosure *)*p);
2752 StgTSO *tso = (StgTSO *)p;
2754 evac_gen = 0; // repeatedly mutable
2756 recordMutable((StgMutClosure *)tso);
2757 evac_gen = saved_evac_gen;
2758 failed_to_evac = rtsFalse;
2765 StgPAP* pap = (StgPAP *)p;
2766 pap->fun = evacuate(pap->fun);
2767 scavenge_stack((P_)pap->payload, (P_)pap->payload + pap->n_args);
2772 // This might happen if for instance a MUT_CONS was pointing to a
2773 // THUNK which has since been updated. The IND_OLDGEN will
2774 // be on the mutable list anyway, so we don't need to do anything
2779 barf("scavenge_one: strange object %d", (int)(info->type));
2782 no_luck = failed_to_evac;
2783 failed_to_evac = rtsFalse;
2787 /* -----------------------------------------------------------------------------
2788 Scavenging mutable lists.
2790 We treat the mutable list of each generation > N (i.e. all the
2791 generations older than the one being collected) as roots. We also
2792 remove non-mutable objects from the mutable list at this point.
2793 -------------------------------------------------------------------------- */
2796 scavenge_mut_once_list(generation *gen)
2798 const StgInfoTable *info;
2799 StgMutClosure *p, *next, *new_list;
2801 p = gen->mut_once_list;
2802 new_list = END_MUT_LIST;
2806 failed_to_evac = rtsFalse;
2808 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2810 // make sure the info pointer is into text space
2811 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2812 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2816 if (info->type==RBH)
2817 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2819 switch(info->type) {
2822 case IND_OLDGEN_PERM:
2824 /* Try to pull the indirectee into this generation, so we can
2825 * remove the indirection from the mutable list.
2827 ((StgIndOldGen *)p)->indirectee =
2828 evacuate(((StgIndOldGen *)p)->indirectee);
2830 #if 0 && defined(DEBUG)
2831 if (RtsFlags.DebugFlags.gc)
2832 /* Debugging code to print out the size of the thing we just
2836 StgPtr start = gen->steps[0].scan;
2837 bdescr *start_bd = gen->steps[0].scan_bd;
2839 scavenge(&gen->steps[0]);
2840 if (start_bd != gen->steps[0].scan_bd) {
2841 size += (P_)BLOCK_ROUND_UP(start) - start;
2842 start_bd = start_bd->link;
2843 while (start_bd != gen->steps[0].scan_bd) {
2844 size += BLOCK_SIZE_W;
2845 start_bd = start_bd->link;
2847 size += gen->steps[0].scan -
2848 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
2850 size = gen->steps[0].scan - start;
2852 belch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
2856 /* failed_to_evac might happen if we've got more than two
2857 * generations, we're collecting only generation 0, the
2858 * indirection resides in generation 2 and the indirectee is
2861 if (failed_to_evac) {
2862 failed_to_evac = rtsFalse;
2863 p->mut_link = new_list;
2866 /* the mut_link field of an IND_STATIC is overloaded as the
2867 * static link field too (it just so happens that we don't need
2868 * both at the same time), so we need to NULL it out when
2869 * removing this object from the mutable list because the static
2870 * link fields are all assumed to be NULL before doing a major
2878 /* MUT_CONS is a kind of MUT_VAR, except it that we try to remove
2879 * it from the mutable list if possible by promoting whatever it
2882 if (scavenge_one((StgPtr)((StgMutVar *)p)->var)) {
2883 /* didn't manage to promote everything, so put the
2884 * MUT_CONS back on the list.
2886 p->mut_link = new_list;
2892 // shouldn't have anything else on the mutables list
2893 barf("scavenge_mut_once_list: strange object? %d", (int)(info->type));
2897 gen->mut_once_list = new_list;
2902 scavenge_mutable_list(generation *gen)
2904 const StgInfoTable *info;
2905 StgMutClosure *p, *next;
2907 p = gen->saved_mut_list;
2911 failed_to_evac = rtsFalse;
2913 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
2915 // make sure the info pointer is into text space
2916 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
2917 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
2921 if (info->type==RBH)
2922 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
2924 switch(info->type) {
2927 // follow everything
2928 p->mut_link = gen->mut_list;
2933 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2934 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2935 (StgClosure *)*q = evacuate((StgClosure *)*q);
2940 // Happens if a MUT_ARR_PTRS in the old generation is frozen
2941 case MUT_ARR_PTRS_FROZEN:
2946 end = (P_)p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2947 for (q = (P_)((StgMutArrPtrs *)p)->payload; q < end; q++) {
2948 (StgClosure *)*q = evacuate((StgClosure *)*q);
2952 if (failed_to_evac) {
2953 failed_to_evac = rtsFalse;
2954 mkMutCons((StgClosure *)p, gen);
2960 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2961 p->mut_link = gen->mut_list;
2967 StgMVar *mvar = (StgMVar *)p;
2968 (StgClosure *)mvar->head = evacuate((StgClosure *)mvar->head);
2969 (StgClosure *)mvar->tail = evacuate((StgClosure *)mvar->tail);
2970 (StgClosure *)mvar->value = evacuate((StgClosure *)mvar->value);
2971 p->mut_link = gen->mut_list;
2978 StgTSO *tso = (StgTSO *)p;
2982 /* Don't take this TSO off the mutable list - it might still
2983 * point to some younger objects (because we set evac_gen to 0
2986 tso->mut_link = gen->mut_list;
2987 gen->mut_list = (StgMutClosure *)tso;
2993 StgBlockingQueue *bh = (StgBlockingQueue *)p;
2994 (StgClosure *)bh->blocking_queue =
2995 evacuate((StgClosure *)bh->blocking_queue);
2996 p->mut_link = gen->mut_list;
3001 /* Happens if a BLACKHOLE_BQ in the old generation is updated:
3004 case IND_OLDGEN_PERM:
3005 /* Try to pull the indirectee into this generation, so we can
3006 * remove the indirection from the mutable list.
3009 ((StgIndOldGen *)p)->indirectee =
3010 evacuate(((StgIndOldGen *)p)->indirectee);
3013 if (failed_to_evac) {
3014 failed_to_evac = rtsFalse;
3015 p->mut_link = gen->mut_once_list;
3016 gen->mut_once_list = p;
3023 // HWL: check whether all of these are necessary
3025 case RBH: // cf. BLACKHOLE_BQ
3027 // nat size, ptrs, nonptrs, vhs;
3029 // StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3030 StgRBH *rbh = (StgRBH *)p;
3031 (StgClosure *)rbh->blocking_queue =
3032 evacuate((StgClosure *)rbh->blocking_queue);
3033 if (failed_to_evac) {
3034 failed_to_evac = rtsFalse;
3035 recordMutable((StgMutClosure *)rbh);
3037 // ToDo: use size of reverted closure here!
3038 p += BLACKHOLE_sizeW();
3044 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3045 // follow the pointer to the node which is being demanded
3046 (StgClosure *)bf->node =
3047 evacuate((StgClosure *)bf->node);
3048 // follow the link to the rest of the blocking queue
3049 (StgClosure *)bf->link =
3050 evacuate((StgClosure *)bf->link);
3051 if (failed_to_evac) {
3052 failed_to_evac = rtsFalse;
3053 recordMutable((StgMutClosure *)bf);
3055 p += sizeofW(StgBlockedFetch);
3061 barf("scavenge_mutable_list: REMOTE_REF %d", (int)(info->type));
3064 p += sizeofW(StgFetchMe);
3065 break; // nothing to do in this case
3067 case FETCH_ME_BQ: // cf. BLACKHOLE_BQ
3069 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3070 (StgClosure *)fmbq->blocking_queue =
3071 evacuate((StgClosure *)fmbq->blocking_queue);
3072 if (failed_to_evac) {
3073 failed_to_evac = rtsFalse;
3074 recordMutable((StgMutClosure *)fmbq);
3076 p += sizeofW(StgFetchMeBlockingQueue);
3082 // shouldn't have anything else on the mutables list
3083 barf("scavenge_mutable_list: strange object? %d", (int)(info->type));
3090 scavenge_static(void)
3092 StgClosure* p = static_objects;
3093 const StgInfoTable *info;
3095 /* Always evacuate straight to the oldest generation for static
3097 evac_gen = oldest_gen->no;
3099 /* keep going until we've scavenged all the objects on the linked
3101 while (p != END_OF_STATIC_LIST) {
3105 if (info->type==RBH)
3106 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3108 // make sure the info pointer is into text space
3109 ASSERT(p && (LOOKS_LIKE_GHC_INFO(GET_INFO(p))
3110 || IS_HUGS_CONSTR_INFO(GET_INFO(p))));
3112 /* Take this object *off* the static_objects list,
3113 * and put it on the scavenged_static_objects list.
3115 static_objects = STATIC_LINK(info,p);
3116 STATIC_LINK(info,p) = scavenged_static_objects;
3117 scavenged_static_objects = p;
3119 switch (info -> type) {
3123 StgInd *ind = (StgInd *)p;
3124 ind->indirectee = evacuate(ind->indirectee);
3126 /* might fail to evacuate it, in which case we have to pop it
3127 * back on the mutable list (and take it off the
3128 * scavenged_static list because the static link and mut link
3129 * pointers are one and the same).
3131 if (failed_to_evac) {
3132 failed_to_evac = rtsFalse;
3133 scavenged_static_objects = IND_STATIC_LINK(p);
3134 ((StgMutClosure *)ind)->mut_link = oldest_gen->mut_once_list;
3135 oldest_gen->mut_once_list = (StgMutClosure *)ind;
3149 next = (P_)p->payload + info->layout.payload.ptrs;
3150 // evacuate the pointers
3151 for (q = (P_)p->payload; q < next; q++) {
3152 (StgClosure *)*q = evacuate((StgClosure *)*q);
3158 barf("scavenge_static: strange closure %d", (int)(info->type));
3161 ASSERT(failed_to_evac == rtsFalse);
3163 /* get the next static object from the list. Remember, there might
3164 * be more stuff on this list now that we've done some evacuating!
3165 * (static_objects is a global)
3171 /* -----------------------------------------------------------------------------
3172 scavenge_stack walks over a section of stack and evacuates all the
3173 objects pointed to by it. We can use the same code for walking
3174 PAPs, since these are just sections of copied stack.
3175 -------------------------------------------------------------------------- */
3178 scavenge_stack(StgPtr p, StgPtr stack_end)
3181 const StgInfoTable* info;
3184 //IF_DEBUG(sanity, belch(" scavenging stack between %p and %p", p, stack_end));
3187 * Each time around this loop, we are looking at a chunk of stack
3188 * that starts with either a pending argument section or an
3189 * activation record.
3192 while (p < stack_end) {
3195 // If we've got a tag, skip over that many words on the stack
3196 if (IS_ARG_TAG((W_)q)) {
3201 /* Is q a pointer to a closure?
3203 if (! LOOKS_LIKE_GHC_INFO(q) ) {
3205 if ( 0 && LOOKS_LIKE_STATIC_CLOSURE(q) ) { // Is it a static closure?
3206 ASSERT(closure_STATIC((StgClosure *)q));
3208 // otherwise, must be a pointer into the allocation space.
3211 (StgClosure *)*p = evacuate((StgClosure *)q);
3217 * Otherwise, q must be the info pointer of an activation
3218 * record. All activation records have 'bitmap' style layout
3221 info = get_itbl((StgClosure *)p);
3223 switch (info->type) {
3225 // Dynamic bitmap: the mask is stored on the stack
3227 bitmap = ((StgRetDyn *)p)->liveness;
3228 p = (P_)&((StgRetDyn *)p)->payload[0];
3231 // probably a slow-entry point return address:
3239 belch("HWL: scavenge_stack: FUN(_STATIC) adjusting p from %p to %p (instead of %p)",
3240 old_p, p, old_p+1));
3242 p++; // what if FHS!=1 !? -- HWL
3247 /* Specialised code for update frames, since they're so common.
3248 * We *know* the updatee points to a BLACKHOLE, CAF_BLACKHOLE,
3249 * or BLACKHOLE_BQ, so just inline the code to evacuate it here.
3253 StgUpdateFrame *frame = (StgUpdateFrame *)p;
3255 p += sizeofW(StgUpdateFrame);
3258 frame->updatee = evacuate(frame->updatee);
3260 #else // specialised code for update frames, not sure if it's worth it.
3262 nat type = get_itbl(frame->updatee)->type;
3264 if (type == EVACUATED) {
3265 frame->updatee = evacuate(frame->updatee);
3268 bdescr *bd = Bdescr((P_)frame->updatee);
3270 if (bd->gen_no > N) {
3271 if (bd->gen_no < evac_gen) {
3272 failed_to_evac = rtsTrue;
3277 // Don't promote blackholes
3279 if (!(stp->gen_no == 0 &&
3281 stp->no == stp->gen->n_steps-1)) {
3288 to = copyPart(frame->updatee, BLACKHOLE_sizeW(),
3289 sizeofW(StgHeader), stp);
3290 frame->updatee = to;
3293 to = copy(frame->updatee, BLACKHOLE_sizeW(), stp);
3294 frame->updatee = to;
3295 recordMutable((StgMutClosure *)to);
3298 /* will never be SE_{,CAF_}BLACKHOLE, since we
3299 don't push an update frame for single-entry thunks. KSW 1999-01. */
3300 barf("scavenge_stack: UPDATE_FRAME updatee");
3306 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
3313 bitmap = info->layout.bitmap;
3315 // this assumes that the payload starts immediately after the info-ptr
3317 while (bitmap != 0) {
3318 if ((bitmap & 1) == 0) {
3319 (StgClosure *)*p = evacuate((StgClosure *)*p);
3322 bitmap = bitmap >> 1;
3329 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
3334 StgLargeBitmap *large_bitmap;
3337 large_bitmap = info->layout.large_bitmap;
3340 for (i=0; i<large_bitmap->size; i++) {
3341 bitmap = large_bitmap->bitmap[i];
3342 q = p + BITS_IN(W_);
3343 while (bitmap != 0) {
3344 if ((bitmap & 1) == 0) {
3345 (StgClosure *)*p = evacuate((StgClosure *)*p);
3348 bitmap = bitmap >> 1;
3350 if (i+1 < large_bitmap->size) {
3352 (StgClosure *)*p = evacuate((StgClosure *)*p);
3358 // and don't forget to follow the SRT
3363 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->type));
3368 /*-----------------------------------------------------------------------------
3369 scavenge the large object list.
3371 evac_gen set by caller; similar games played with evac_gen as with
3372 scavenge() - see comment at the top of scavenge(). Most large
3373 objects are (repeatedly) mutable, so most of the time evac_gen will
3375 --------------------------------------------------------------------------- */
3378 scavenge_large(step *stp)
3383 bd = stp->new_large_objects;
3385 for (; bd != NULL; bd = stp->new_large_objects) {
3387 /* take this object *off* the large objects list and put it on
3388 * the scavenged large objects list. This is so that we can
3389 * treat new_large_objects as a stack and push new objects on
3390 * the front when evacuating.
3392 stp->new_large_objects = bd->link;
3393 dbl_link_onto(bd, &stp->scavenged_large_objects);
3395 // update the block count in this step.
3396 stp->n_scavenged_large_blocks += bd->blocks;
3399 if (scavenge_one(p)) {
3400 mkMutCons((StgClosure *)p, stp->gen);
3405 /* -----------------------------------------------------------------------------
3406 Initialising the static object & mutable lists
3407 -------------------------------------------------------------------------- */
3410 zero_static_object_list(StgClosure* first_static)
3414 const StgInfoTable *info;
3416 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
3418 link = STATIC_LINK(info, p);
3419 STATIC_LINK(info,p) = NULL;
3423 /* This function is only needed because we share the mutable link
3424 * field with the static link field in an IND_STATIC, so we have to
3425 * zero the mut_link field before doing a major GC, which needs the
3426 * static link field.
3428 * It doesn't do any harm to zero all the mutable link fields on the
3433 zero_mutable_list( StgMutClosure *first )
3435 StgMutClosure *next, *c;
3437 for (c = first; c != END_MUT_LIST; c = next) {
3443 /* -----------------------------------------------------------------------------
3445 -------------------------------------------------------------------------- */
3452 for (c = (StgIndStatic *)caf_list; c != NULL;
3453 c = (StgIndStatic *)c->static_link)
3455 c->header.info = c->saved_info;
3456 c->saved_info = NULL;
3457 // could, but not necessary: c->static_link = NULL;
3463 scavengeCAFs( void )
3468 for (c = (StgIndStatic *)caf_list; c != NULL;
3469 c = (StgIndStatic *)c->static_link)
3471 c->indirectee = evacuate(c->indirectee);
3475 /* -----------------------------------------------------------------------------
3476 Sanity code for CAF garbage collection.
3478 With DEBUG turned on, we manage a CAF list in addition to the SRT
3479 mechanism. After GC, we run down the CAF list and blackhole any
3480 CAFs which have been garbage collected. This means we get an error
3481 whenever the program tries to enter a garbage collected CAF.
3483 Any garbage collected CAFs are taken off the CAF list at the same
3485 -------------------------------------------------------------------------- */
3487 #if 0 && defined(DEBUG)
3494 const StgInfoTable *info;
3505 ASSERT(info->type == IND_STATIC);
3507 if (STATIC_LINK(info,p) == NULL) {
3508 IF_DEBUG(gccafs, belch("CAF gc'd at 0x%04lx", (long)p));
3510 SET_INFO(p,&stg_BLACKHOLE_info);
3511 p = STATIC_LINK2(info,p);
3515 pp = &STATIC_LINK2(info,p);
3522 // belch("%d CAFs live", i);
3527 /* -----------------------------------------------------------------------------
3530 Whenever a thread returns to the scheduler after possibly doing
3531 some work, we have to run down the stack and black-hole all the
3532 closures referred to by update frames.
3533 -------------------------------------------------------------------------- */
3536 threadLazyBlackHole(StgTSO *tso)
3538 StgUpdateFrame *update_frame;
3539 StgBlockingQueue *bh;
3542 stack_end = &tso->stack[tso->stack_size];
3543 update_frame = tso->su;
3546 switch (get_itbl(update_frame)->type) {
3549 update_frame = ((StgCatchFrame *)update_frame)->link;
3553 bh = (StgBlockingQueue *)update_frame->updatee;
3555 /* if the thunk is already blackholed, it means we've also
3556 * already blackholed the rest of the thunks on this stack,
3557 * so we can stop early.
3559 * The blackhole made for a CAF is a CAF_BLACKHOLE, so they
3560 * don't interfere with this optimisation.
3562 if (bh->header.info == &stg_BLACKHOLE_info) {
3566 if (bh->header.info != &stg_BLACKHOLE_BQ_info &&
3567 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3568 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3569 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3571 SET_INFO(bh,&stg_BLACKHOLE_info);
3574 update_frame = update_frame->link;
3578 update_frame = ((StgSeqFrame *)update_frame)->link;
3584 barf("threadPaused");
3590 /* -----------------------------------------------------------------------------
3593 * Code largely pinched from old RTS, then hacked to bits. We also do
3594 * lazy black holing here.
3596 * -------------------------------------------------------------------------- */
3599 threadSqueezeStack(StgTSO *tso)
3601 lnat displacement = 0;
3602 StgUpdateFrame *frame;
3603 StgUpdateFrame *next_frame; // Temporally next
3604 StgUpdateFrame *prev_frame; // Temporally previous
3606 rtsBool prev_was_update_frame;
3608 StgUpdateFrame *top_frame;
3609 nat upd_frames=0, stop_frames=0, catch_frames=0, seq_frames=0,
3611 void printObj( StgClosure *obj ); // from Printer.c
3613 top_frame = tso->su;
3616 bottom = &(tso->stack[tso->stack_size]);
3619 /* There must be at least one frame, namely the STOP_FRAME.
3621 ASSERT((P_)frame < bottom);
3623 /* Walk down the stack, reversing the links between frames so that
3624 * we can walk back up as we squeeze from the bottom. Note that
3625 * next_frame and prev_frame refer to next and previous as they were
3626 * added to the stack, rather than the way we see them in this
3627 * walk. (It makes the next loop less confusing.)
3629 * Stop if we find an update frame pointing to a black hole
3630 * (see comment in threadLazyBlackHole()).
3634 // bottom - sizeof(StgStopFrame) is the STOP_FRAME
3635 while ((P_)frame < bottom - sizeofW(StgStopFrame)) {
3636 prev_frame = frame->link;
3637 frame->link = next_frame;
3642 if (!(frame>=top_frame && frame<=(StgUpdateFrame *)bottom)) {
3643 printObj((StgClosure *)prev_frame);
3644 barf("threadSqueezeStack: current frame is rubbish %p; previous was %p\n",
3647 switch (get_itbl(frame)->type) {
3650 if (frame->updatee->header.info == &stg_BLACKHOLE_info)
3663 barf("Found non-frame during stack squeezing at %p (prev frame was %p)\n",
3665 printObj((StgClosure *)prev_frame);
3668 if (get_itbl(frame)->type == UPDATE_FRAME
3669 && frame->updatee->header.info == &stg_BLACKHOLE_info) {
3674 /* Now, we're at the bottom. Frame points to the lowest update
3675 * frame on the stack, and its link actually points to the frame
3676 * above. We have to walk back up the stack, squeezing out empty
3677 * update frames and turning the pointers back around on the way
3680 * The bottom-most frame (the STOP_FRAME) has not been altered, and
3681 * we never want to eliminate it anyway. Just walk one step up
3682 * before starting to squeeze. When you get to the topmost frame,
3683 * remember that there are still some words above it that might have
3690 prev_was_update_frame = (get_itbl(prev_frame)->type == UPDATE_FRAME);
3693 * Loop through all of the frames (everything except the very
3694 * bottom). Things are complicated by the fact that we have
3695 * CATCH_FRAMEs and SEQ_FRAMEs interspersed with the update frames.
3696 * We can only squeeze when there are two consecutive UPDATE_FRAMEs.
3698 while (frame != NULL) {
3700 StgPtr frame_bottom = (P_)frame + sizeofW(StgUpdateFrame);
3701 rtsBool is_update_frame;
3703 next_frame = frame->link;
3704 is_update_frame = (get_itbl(frame)->type == UPDATE_FRAME);
3707 * 1. both the previous and current frame are update frames
3708 * 2. the current frame is empty
3710 if (prev_was_update_frame && is_update_frame &&
3711 (P_)prev_frame == frame_bottom + displacement) {
3713 // Now squeeze out the current frame
3714 StgClosure *updatee_keep = prev_frame->updatee;
3715 StgClosure *updatee_bypass = frame->updatee;
3718 IF_DEBUG(gc, belch("@@ squeezing frame at %p", frame));
3722 /* Deal with blocking queues. If both updatees have blocked
3723 * threads, then we should merge the queues into the update
3724 * frame that we're keeping.
3726 * Alternatively, we could just wake them up: they'll just go
3727 * straight to sleep on the proper blackhole! This is less code
3728 * and probably less bug prone, although it's probably much
3731 #if 0 // do it properly...
3732 # if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3733 # error Unimplemented lazy BH warning. (KSW 1999-01)
3735 if (GET_INFO(updatee_bypass) == stg_BLACKHOLE_BQ_info
3736 || GET_INFO(updatee_bypass) == stg_CAF_BLACKHOLE_info
3738 // Sigh. It has one. Don't lose those threads!
3739 if (GET_INFO(updatee_keep) == stg_BLACKHOLE_BQ_info) {
3740 // Urgh. Two queues. Merge them.
3741 P_ keep_tso = ((StgBlockingQueue *)updatee_keep)->blocking_queue;
3743 while (keep_tso->link != END_TSO_QUEUE) {
3744 keep_tso = keep_tso->link;
3746 keep_tso->link = ((StgBlockingQueue *)updatee_bypass)->blocking_queue;
3749 // For simplicity, just swap the BQ for the BH
3750 P_ temp = updatee_keep;
3752 updatee_keep = updatee_bypass;
3753 updatee_bypass = temp;
3755 // Record the swap in the kept frame (below)
3756 prev_frame->updatee = updatee_keep;
3761 TICK_UPD_SQUEEZED();
3762 /* wasn't there something about update squeezing and ticky to be
3763 * sorted out? oh yes: we aren't counting each enter properly
3764 * in this case. See the log somewhere. KSW 1999-04-21
3766 * Check two things: that the two update frames don't point to
3767 * the same object, and that the updatee_bypass isn't already an
3768 * indirection. Both of these cases only happen when we're in a
3769 * block hole-style loop (and there are multiple update frames
3770 * on the stack pointing to the same closure), but they can both
3771 * screw us up if we don't check.
3773 if (updatee_bypass != updatee_keep && !closure_IND(updatee_bypass)) {
3774 // this wakes the threads up
3775 UPD_IND_NOLOCK(updatee_bypass, updatee_keep);
3778 sp = (P_)frame - 1; // sp = stuff to slide
3779 displacement += sizeofW(StgUpdateFrame);
3782 // No squeeze for this frame
3783 sp = frame_bottom - 1; // Keep the current frame
3785 /* Do lazy black-holing.
3787 if (is_update_frame) {
3788 StgBlockingQueue *bh = (StgBlockingQueue *)frame->updatee;
3789 if (bh->header.info != &stg_BLACKHOLE_info &&
3790 bh->header.info != &stg_BLACKHOLE_BQ_info &&
3791 bh->header.info != &stg_CAF_BLACKHOLE_info) {
3792 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
3793 belch("Unexpected lazy BHing required at 0x%04x",(int)bh);
3796 /* zero out the slop so that the sanity checker can tell
3797 * where the next closure is.
3800 StgInfoTable *info = get_itbl(bh);
3801 nat np = info->layout.payload.ptrs, nw = info->layout.payload.nptrs, i;
3802 /* don't zero out slop for a THUNK_SELECTOR, because its layout
3803 * info is used for a different purpose, and it's exactly the
3804 * same size as a BLACKHOLE in any case.
3806 if (info->type != THUNK_SELECTOR) {
3807 for (i = np; i < np + nw; i++) {
3808 ((StgClosure *)bh)->payload[i] = 0;
3813 SET_INFO(bh,&stg_BLACKHOLE_info);
3817 // Fix the link in the current frame (should point to the frame below)
3818 frame->link = prev_frame;
3819 prev_was_update_frame = is_update_frame;
3822 // Now slide all words from sp up to the next frame
3824 if (displacement > 0) {
3825 P_ next_frame_bottom;
3827 if (next_frame != NULL)
3828 next_frame_bottom = (P_)next_frame + sizeofW(StgUpdateFrame);
3830 next_frame_bottom = tso->sp - 1;
3834 belch("sliding [%p, %p] by %ld", sp, next_frame_bottom,
3838 while (sp >= next_frame_bottom) {
3839 sp[displacement] = *sp;
3843 (P_)prev_frame = (P_)frame + displacement;
3847 tso->sp += displacement;
3848 tso->su = prev_frame;
3851 belch("@@ threadSqueezeStack: squeezed %d update-frames; found %d BHs; found %d update-, %d stop-, %d catch, %d seq-frames",
3852 squeezes, bhs, upd_frames, stop_frames, catch_frames, seq_frames))
3857 /* -----------------------------------------------------------------------------
3860 * We have to prepare for GC - this means doing lazy black holing
3861 * here. We also take the opportunity to do stack squeezing if it's
3863 * -------------------------------------------------------------------------- */
3865 threadPaused(StgTSO *tso)
3867 if ( RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue )
3868 threadSqueezeStack(tso); // does black holing too
3870 threadLazyBlackHole(tso);
3873 /* -----------------------------------------------------------------------------
3875 * -------------------------------------------------------------------------- */
3879 printMutOnceList(generation *gen)
3881 StgMutClosure *p, *next;
3883 p = gen->mut_once_list;
3886 fprintf(stderr, "@@ Mut once list %p: ", gen->mut_once_list);
3887 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3888 fprintf(stderr, "%p (%s), ",
3889 p, info_type((StgClosure *)p));
3891 fputc('\n', stderr);
3895 printMutableList(generation *gen)
3897 StgMutClosure *p, *next;
3902 fprintf(stderr, "@@ Mutable list %p: ", gen->mut_list);
3903 for (; p != END_MUT_LIST; p = next, next = p->mut_link) {
3904 fprintf(stderr, "%p (%s), ",
3905 p, info_type((StgClosure *)p));
3907 fputc('\n', stderr);
3910 static inline rtsBool
3911 maybeLarge(StgClosure *closure)
3913 StgInfoTable *info = get_itbl(closure);
3915 /* closure types that may be found on the new_large_objects list;
3916 see scavenge_large */
3917 return (info->type == MUT_ARR_PTRS ||
3918 info->type == MUT_ARR_PTRS_FROZEN ||
3919 info->type == TSO ||
3920 info->type == ARR_WORDS);