1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
16 #include "LdvProfile.h"
21 #include "BlockAlloc.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
50 // Turn off inlining when debugging - it obfuscates things
53 # define STATIC_INLINE static
56 /* STATIC OBJECT LIST.
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
81 * Extra notes for generational GC:
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
99 static rtsBool major_gc;
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
109 StgWeak *old_weak_ptr_list; // also pending finaliser list
111 /* Which stage of processing various kinds of weak pointer are we at?
112 * (see traverse_weak_ptr_list() below for discussion).
114 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
115 static WeakStage weak_stage;
117 /* List of all threads during GC
119 static StgTSO *old_all_threads;
120 StgTSO *resurrected_threads;
122 /* Flag indicating failure to evacuate an object to the desired
125 static rtsBool failed_to_evac;
127 /* Saved nursery (used for 2-space collector only)
129 static bdescr *saved_nursery;
130 static nat saved_n_blocks;
132 /* Data used for allocation area sizing.
134 static lnat new_blocks; // blocks allocated during this GC
135 static lnat new_scavd_blocks; // ditto, but depth-first blocks
136 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
138 /* Used to avoid long recursion due to selector thunks
140 static lnat thunk_selector_depth = 0;
141 #define MAX_THUNK_SELECTOR_DEPTH 8
143 /* -----------------------------------------------------------------------------
144 Static function declarations
145 -------------------------------------------------------------------------- */
147 static bdescr * gc_alloc_block ( step *stp );
148 static void mark_root ( StgClosure **root );
150 // Use a register argument for evacuate, if available.
152 #define REGPARM1 __attribute__((regparm(1)))
157 REGPARM1 static StgClosure * evacuate (StgClosure *q);
159 static void zero_static_object_list ( StgClosure* first_static );
161 static rtsBool traverse_weak_ptr_list ( void );
162 static void mark_weak_ptr_list ( StgWeak **list );
164 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
167 static void scavenge ( step * );
168 static void scavenge_mark_stack ( void );
169 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
170 static rtsBool scavenge_one ( StgPtr p );
171 static void scavenge_large ( step * );
172 static void scavenge_static ( void );
173 static void scavenge_mutable_list ( generation *g );
175 static void scavenge_large_bitmap ( StgPtr p,
176 StgLargeBitmap *large_bitmap,
179 #if 0 && defined(DEBUG)
180 static void gcCAFs ( void );
183 /* -----------------------------------------------------------------------------
184 inline functions etc. for dealing with the mark bitmap & stack.
185 -------------------------------------------------------------------------- */
187 #define MARK_STACK_BLOCKS 4
189 static bdescr *mark_stack_bdescr;
190 static StgPtr *mark_stack;
191 static StgPtr *mark_sp;
192 static StgPtr *mark_splim;
194 // Flag and pointers used for falling back to a linear scan when the
195 // mark stack overflows.
196 static rtsBool mark_stack_overflowed;
197 static bdescr *oldgen_scan_bd;
198 static StgPtr oldgen_scan;
200 STATIC_INLINE rtsBool
201 mark_stack_empty(void)
203 return mark_sp == mark_stack;
206 STATIC_INLINE rtsBool
207 mark_stack_full(void)
209 return mark_sp >= mark_splim;
213 reset_mark_stack(void)
215 mark_sp = mark_stack;
219 push_mark_stack(StgPtr p)
230 /* -----------------------------------------------------------------------------
231 Allocate a new to-space block in the given step.
232 -------------------------------------------------------------------------- */
235 gc_alloc_block(step *stp)
237 bdescr *bd = allocBlock();
238 bd->gen_no = stp->gen_no;
242 // blocks in to-space in generations up to and including N
243 // get the BF_EVACUATED flag.
244 if (stp->gen_no <= N) {
245 bd->flags = BF_EVACUATED;
250 // Start a new to-space block, chain it on after the previous one.
251 if (stp->hp_bd != NULL) {
252 stp->hp_bd->free = stp->hp;
253 stp->hp_bd->link = bd;
258 stp->hpLim = stp->hp + BLOCK_SIZE_W;
267 gc_alloc_scavd_block(step *stp)
269 bdescr *bd = allocBlock();
270 bd->gen_no = stp->gen_no;
273 // blocks in to-space in generations up to and including N
274 // get the BF_EVACUATED flag.
275 if (stp->gen_no <= N) {
276 bd->flags = BF_EVACUATED;
281 bd->link = stp->blocks;
284 if (stp->scavd_hp != NULL) {
285 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
287 stp->scavd_hp = bd->start;
288 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
296 /* -----------------------------------------------------------------------------
299 Rough outline of the algorithm: for garbage collecting generation N
300 (and all younger generations):
302 - follow all pointers in the root set. the root set includes all
303 mutable objects in all generations (mutable_list).
305 - for each pointer, evacuate the object it points to into either
307 + to-space of the step given by step->to, which is the next
308 highest step in this generation or the first step in the next
309 generation if this is the last step.
311 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
312 When we evacuate an object we attempt to evacuate
313 everything it points to into the same generation - this is
314 achieved by setting evac_gen to the desired generation. If
315 we can't do this, then an entry in the mut list has to
316 be made for the cross-generation pointer.
318 + if the object is already in a generation > N, then leave
321 - repeatedly scavenge to-space from each step in each generation
322 being collected until no more objects can be evacuated.
324 - free from-space in each step, and set from-space = to-space.
326 Locks held: all capabilities are held throughout GarbageCollect().
328 -------------------------------------------------------------------------- */
331 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
335 lnat live, allocated, collected = 0, copied = 0, scavd_copied = 0;
336 lnat oldgen_saved_blocks = 0;
342 CostCentreStack *prev_CCS;
345 #if defined(DEBUG) && defined(GRAN)
346 IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
350 #if defined(RTS_USER_SIGNALS)
355 // tell the STM to discard any cached closures its hoping to re-use
358 // tell the stats department that we've started a GC
362 // check for memory leaks if DEBUG is on
366 // Init stats and print par specific (timing) info
367 PAR_TICKY_PAR_START();
369 // attribute any costs to CCS_GC
375 /* Approximate how much we allocated.
376 * Todo: only when generating stats?
378 allocated = calcAllocated();
380 /* Figure out which generation to collect
382 if (force_major_gc) {
383 N = RtsFlags.GcFlags.generations - 1;
387 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
388 if (generations[g].steps[0].n_blocks +
389 generations[g].steps[0].n_large_blocks
390 >= generations[g].max_blocks) {
394 major_gc = (N == RtsFlags.GcFlags.generations-1);
397 #ifdef RTS_GTK_FRONTPANEL
398 if (RtsFlags.GcFlags.frontpanel) {
399 updateFrontPanelBeforeGC(N);
403 // check stack sanity *before* GC (ToDo: check all threads)
405 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
407 IF_DEBUG(sanity, checkFreeListSanity());
409 /* Initialise the static object lists
411 static_objects = END_OF_STATIC_LIST;
412 scavenged_static_objects = END_OF_STATIC_LIST;
414 /* Save the nursery if we're doing a two-space collection.
415 * g0s0->blocks will be used for to-space, so we need to get the
416 * nursery out of the way.
418 if (RtsFlags.GcFlags.generations == 1) {
419 saved_nursery = g0s0->blocks;
420 saved_n_blocks = g0s0->n_blocks;
425 /* Keep a count of how many new blocks we allocated during this GC
426 * (used for resizing the allocation area, later).
429 new_scavd_blocks = 0;
431 // Initialise to-space in all the generations/steps that we're
434 for (g = 0; g <= N; g++) {
436 // throw away the mutable list. Invariant: the mutable list
437 // always has at least one block; this means we can avoid a check for
438 // NULL in recordMutable().
440 freeChain(generations[g].mut_list);
441 generations[g].mut_list = allocBlock();
442 for (i = 0; i < n_capabilities; i++) {
443 freeChain(capabilities[i].mut_lists[g]);
444 capabilities[i].mut_lists[g] = allocBlock();
448 for (s = 0; s < generations[g].n_steps; s++) {
450 // generation 0, step 0 doesn't need to-space
451 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
455 stp = &generations[g].steps[s];
456 ASSERT(stp->gen_no == g);
458 // start a new to-space for this step.
459 stp->old_blocks = stp->blocks;
460 stp->n_old_blocks = stp->n_blocks;
462 // allocate the first to-space block; extra blocks will be
463 // chained on as necessary.
465 bd = gc_alloc_block(stp);
468 stp->scan = bd->start;
471 // allocate a block for "already scavenged" objects. This goes
472 // on the front of the stp->blocks list, so it won't be
473 // traversed by the scavenging sweep.
474 gc_alloc_scavd_block(stp);
476 // initialise the large object queues.
477 stp->new_large_objects = NULL;
478 stp->scavenged_large_objects = NULL;
479 stp->n_scavenged_large_blocks = 0;
481 // mark the large objects as not evacuated yet
482 for (bd = stp->large_objects; bd; bd = bd->link) {
483 bd->flags &= ~BF_EVACUATED;
486 // for a compacted step, we need to allocate the bitmap
487 if (stp->is_compacted) {
488 nat bitmap_size; // in bytes
489 bdescr *bitmap_bdescr;
492 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
494 if (bitmap_size > 0) {
495 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
497 stp->bitmap = bitmap_bdescr;
498 bitmap = bitmap_bdescr->start;
500 IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
501 bitmap_size, bitmap););
503 // don't forget to fill it with zeros!
504 memset(bitmap, 0, bitmap_size);
506 // For each block in this step, point to its bitmap from the
508 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
509 bd->u.bitmap = bitmap;
510 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
512 // Also at this point we set the BF_COMPACTED flag
513 // for this block. The invariant is that
514 // BF_COMPACTED is always unset, except during GC
515 // when it is set on those blocks which will be
517 bd->flags |= BF_COMPACTED;
524 /* make sure the older generations have at least one block to
525 * allocate into (this makes things easier for copy(), see below).
527 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
528 for (s = 0; s < generations[g].n_steps; s++) {
529 stp = &generations[g].steps[s];
530 if (stp->hp_bd == NULL) {
531 ASSERT(stp->blocks == NULL);
532 bd = gc_alloc_block(stp);
536 if (stp->scavd_hp == NULL) {
537 gc_alloc_scavd_block(stp);
540 /* Set the scan pointer for older generations: remember we
541 * still have to scavenge objects that have been promoted. */
543 stp->scan_bd = stp->hp_bd;
544 stp->new_large_objects = NULL;
545 stp->scavenged_large_objects = NULL;
546 stp->n_scavenged_large_blocks = 0;
549 /* Move the private mutable lists from each capability onto the
550 * main mutable list for the generation.
552 for (i = 0; i < n_capabilities; i++) {
553 for (bd = capabilities[i].mut_lists[g];
554 bd->link != NULL; bd = bd->link) {
557 bd->link = generations[g].mut_list;
558 generations[g].mut_list = capabilities[i].mut_lists[g];
559 capabilities[i].mut_lists[g] = allocBlock();
563 /* Allocate a mark stack if we're doing a major collection.
566 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
567 mark_stack = (StgPtr *)mark_stack_bdescr->start;
568 mark_sp = mark_stack;
569 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
571 mark_stack_bdescr = NULL;
574 /* -----------------------------------------------------------------------
575 * follow all the roots that we know about:
576 * - mutable lists from each generation > N
577 * we want to *scavenge* these roots, not evacuate them: they're not
578 * going to move in this GC.
579 * Also: do them in reverse generation order. This is because we
580 * often want to promote objects that are pointed to by older
581 * generations early, so we don't have to repeatedly copy them.
582 * Doing the generations in reverse order ensures that we don't end
583 * up in the situation where we want to evac an object to gen 3 and
584 * it has already been evaced to gen 2.
588 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
589 generations[g].saved_mut_list = generations[g].mut_list;
590 generations[g].mut_list = allocBlock();
591 // mut_list always has at least one block.
594 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
595 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
596 scavenge_mutable_list(&generations[g]);
598 for (st = generations[g].n_steps-1; st >= 0; st--) {
599 scavenge(&generations[g].steps[st]);
604 /* follow roots from the CAF list (used by GHCi)
609 /* follow all the roots that the application knows about.
612 get_roots(mark_root);
615 /* And don't forget to mark the TSO if we got here direct from
617 /* Not needed in a seq version?
619 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
623 // Mark the entries in the GALA table of the parallel system
624 markLocalGAs(major_gc);
625 // Mark all entries on the list of pending fetches
626 markPendingFetches(major_gc);
629 /* Mark the weak pointer list, and prepare to detect dead weak
632 mark_weak_ptr_list(&weak_ptr_list);
633 old_weak_ptr_list = weak_ptr_list;
634 weak_ptr_list = NULL;
635 weak_stage = WeakPtrs;
637 /* The all_threads list is like the weak_ptr_list.
638 * See traverse_weak_ptr_list() for the details.
640 old_all_threads = all_threads;
641 all_threads = END_TSO_QUEUE;
642 resurrected_threads = END_TSO_QUEUE;
644 /* Mark the stable pointer table.
646 markStablePtrTable(mark_root);
648 /* -------------------------------------------------------------------------
649 * Repeatedly scavenge all the areas we know about until there's no
650 * more scavenging to be done.
657 // scavenge static objects
658 if (major_gc && static_objects != END_OF_STATIC_LIST) {
659 IF_DEBUG(sanity, checkStaticObjects(static_objects));
663 /* When scavenging the older generations: Objects may have been
664 * evacuated from generations <= N into older generations, and we
665 * need to scavenge these objects. We're going to try to ensure that
666 * any evacuations that occur move the objects into at least the
667 * same generation as the object being scavenged, otherwise we
668 * have to create new entries on the mutable list for the older
672 // scavenge each step in generations 0..maxgen
678 // scavenge objects in compacted generation
679 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
680 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
681 scavenge_mark_stack();
685 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
686 for (st = generations[gen].n_steps; --st >= 0; ) {
687 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
690 stp = &generations[gen].steps[st];
692 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
697 if (stp->new_large_objects != NULL) {
706 if (flag) { goto loop; }
708 // must be last... invariant is that everything is fully
709 // scavenged at this point.
710 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
715 /* Update the pointers from the task list - these are
716 * treated as weak pointers because we want to allow a main thread
717 * to get a BlockedOnDeadMVar exception in the same way as any other
718 * thread. Note that the threads should all have been retained by
719 * GC by virtue of being on the all_threads list, we're just
720 * updating pointers here.
725 for (task = all_tasks; task != NULL; task = task->all_link) {
726 if (!task->stopped && task->tso) {
727 ASSERT(task->tso->bound == task);
728 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
730 barf("task %p: main thread %d has been GC'd",
744 // Reconstruct the Global Address tables used in GUM
745 rebuildGAtables(major_gc);
746 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
749 // Now see which stable names are still alive.
752 // Tidy the end of the to-space chains
753 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
754 for (s = 0; s < generations[g].n_steps; s++) {
755 stp = &generations[g].steps[s];
756 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
757 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
758 stp->hp_bd->free = stp->hp;
759 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
765 // We call processHeapClosureForDead() on every closure destroyed during
766 // the current garbage collection, so we invoke LdvCensusForDead().
767 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
768 || RtsFlags.ProfFlags.bioSelector != NULL)
772 // NO MORE EVACUATION AFTER THIS POINT!
773 // Finally: compaction of the oldest generation.
774 if (major_gc && oldest_gen->steps[0].is_compacted) {
775 // save number of blocks for stats
776 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
780 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
782 /* run through all the generations/steps and tidy up
784 copied = new_blocks * BLOCK_SIZE_W;
785 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
786 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
789 generations[g].collections++; // for stats
792 // Count the mutable list as bytes "copied" for the purposes of
793 // stats. Every mutable list is copied during every GC.
795 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
796 copied += (bd->free - bd->start) * sizeof(StgWord);
800 for (s = 0; s < generations[g].n_steps; s++) {
802 stp = &generations[g].steps[s];
804 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
805 // stats information: how much we copied
807 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
809 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
813 // for generations we collected...
816 // rough calculation of garbage collected, for stats output
817 if (stp->is_compacted) {
818 collected += (oldgen_saved_blocks - stp->n_old_blocks) * BLOCK_SIZE_W;
820 if (g == 0 && s == 0) {
821 collected += countNurseryBlocks() * BLOCK_SIZE_W;
822 collected += alloc_blocks;
824 collected += stp->n_old_blocks * BLOCK_SIZE_W;
828 /* free old memory and shift to-space into from-space for all
829 * the collected steps (except the allocation area). These
830 * freed blocks will probaby be quickly recycled.
832 if (!(g == 0 && s == 0)) {
833 if (stp->is_compacted) {
834 // for a compacted step, just shift the new to-space
835 // onto the front of the now-compacted existing blocks.
836 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
837 bd->flags &= ~BF_EVACUATED; // now from-space
839 // tack the new blocks on the end of the existing blocks
840 if (stp->old_blocks != NULL) {
841 for (bd = stp->old_blocks; bd != NULL; bd = next) {
842 // NB. this step might not be compacted next
843 // time, so reset the BF_COMPACTED flags.
844 // They are set before GC if we're going to
845 // compact. (search for BF_COMPACTED above).
846 bd->flags &= ~BF_COMPACTED;
849 bd->link = stp->blocks;
852 stp->blocks = stp->old_blocks;
854 // add the new blocks to the block tally
855 stp->n_blocks += stp->n_old_blocks;
856 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
858 freeChain(stp->old_blocks);
859 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
860 bd->flags &= ~BF_EVACUATED; // now from-space
863 stp->old_blocks = NULL;
864 stp->n_old_blocks = 0;
867 /* LARGE OBJECTS. The current live large objects are chained on
868 * scavenged_large, having been moved during garbage
869 * collection from large_objects. Any objects left on
870 * large_objects list are therefore dead, so we free them here.
872 for (bd = stp->large_objects; bd != NULL; bd = next) {
878 // update the count of blocks used by large objects
879 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
880 bd->flags &= ~BF_EVACUATED;
882 stp->large_objects = stp->scavenged_large_objects;
883 stp->n_large_blocks = stp->n_scavenged_large_blocks;
886 // for older generations...
888 /* For older generations, we need to append the
889 * scavenged_large_object list (i.e. large objects that have been
890 * promoted during this GC) to the large_object list for that step.
892 for (bd = stp->scavenged_large_objects; bd; bd = next) {
894 bd->flags &= ~BF_EVACUATED;
895 dbl_link_onto(bd, &stp->large_objects);
898 // add the new blocks we promoted during this GC
899 stp->n_large_blocks += stp->n_scavenged_large_blocks;
904 /* Reset the sizes of the older generations when we do a major
907 * CURRENT STRATEGY: make all generations except zero the same size.
908 * We have to stay within the maximum heap size, and leave a certain
909 * percentage of the maximum heap size available to allocate into.
911 if (major_gc && RtsFlags.GcFlags.generations > 1) {
912 nat live, size, min_alloc;
913 nat max = RtsFlags.GcFlags.maxHeapSize;
914 nat gens = RtsFlags.GcFlags.generations;
916 // live in the oldest generations
917 live = oldest_gen->steps[0].n_blocks +
918 oldest_gen->steps[0].n_large_blocks;
920 // default max size for all generations except zero
921 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
922 RtsFlags.GcFlags.minOldGenSize);
924 // minimum size for generation zero
925 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
926 RtsFlags.GcFlags.minAllocAreaSize);
928 // Auto-enable compaction when the residency reaches a
929 // certain percentage of the maximum heap size (default: 30%).
930 if (RtsFlags.GcFlags.generations > 1 &&
931 (RtsFlags.GcFlags.compact ||
933 oldest_gen->steps[0].n_blocks >
934 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
935 oldest_gen->steps[0].is_compacted = 1;
936 // debugBelch("compaction: on\n", live);
938 oldest_gen->steps[0].is_compacted = 0;
939 // debugBelch("compaction: off\n", live);
942 // if we're going to go over the maximum heap size, reduce the
943 // size of the generations accordingly. The calculation is
944 // different if compaction is turned on, because we don't need
945 // to double the space required to collect the old generation.
948 // this test is necessary to ensure that the calculations
949 // below don't have any negative results - we're working
950 // with unsigned values here.
951 if (max < min_alloc) {
955 if (oldest_gen->steps[0].is_compacted) {
956 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
957 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
960 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
961 size = (max - min_alloc) / ((gens - 1) * 2);
971 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
972 min_alloc, size, max);
975 for (g = 0; g < gens; g++) {
976 generations[g].max_blocks = size;
980 // Guess the amount of live data for stats.
983 /* Free the small objects allocated via allocate(), since this will
984 * all have been copied into G0S1 now.
986 if (small_alloc_list != NULL) {
987 freeChain(small_alloc_list);
989 small_alloc_list = NULL;
993 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
995 // Start a new pinned_object_block
996 pinned_object_block = NULL;
998 /* Free the mark stack.
1000 if (mark_stack_bdescr != NULL) {
1001 freeGroup(mark_stack_bdescr);
1004 /* Free any bitmaps.
1006 for (g = 0; g <= N; g++) {
1007 for (s = 0; s < generations[g].n_steps; s++) {
1008 stp = &generations[g].steps[s];
1009 if (stp->bitmap != NULL) {
1010 freeGroup(stp->bitmap);
1016 /* Two-space collector:
1017 * Free the old to-space, and estimate the amount of live data.
1019 if (RtsFlags.GcFlags.generations == 1) {
1022 if (g0s0->old_blocks != NULL) {
1023 freeChain(g0s0->old_blocks);
1025 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1026 bd->flags = 0; // now from-space
1028 g0s0->old_blocks = g0s0->blocks;
1029 g0s0->n_old_blocks = g0s0->n_blocks;
1030 g0s0->blocks = saved_nursery;
1031 g0s0->n_blocks = saved_n_blocks;
1033 /* For a two-space collector, we need to resize the nursery. */
1035 /* set up a new nursery. Allocate a nursery size based on a
1036 * function of the amount of live data (by default a factor of 2)
1037 * Use the blocks from the old nursery if possible, freeing up any
1040 * If we get near the maximum heap size, then adjust our nursery
1041 * size accordingly. If the nursery is the same size as the live
1042 * data (L), then we need 3L bytes. We can reduce the size of the
1043 * nursery to bring the required memory down near 2L bytes.
1045 * A normal 2-space collector would need 4L bytes to give the same
1046 * performance we get from 3L bytes, reducing to the same
1047 * performance at 2L bytes.
1049 blocks = g0s0->n_old_blocks;
1051 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1052 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1053 RtsFlags.GcFlags.maxHeapSize ) {
1054 long adjusted_blocks; // signed on purpose
1057 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1058 IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
1059 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1060 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1063 blocks = adjusted_blocks;
1066 blocks *= RtsFlags.GcFlags.oldGenFactor;
1067 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1068 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1071 resizeNurseries(blocks);
1074 /* Generational collector:
1075 * If the user has given us a suggested heap size, adjust our
1076 * allocation area to make best use of the memory available.
1079 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1081 nat needed = calcNeeded(); // approx blocks needed at next GC
1083 /* Guess how much will be live in generation 0 step 0 next time.
1084 * A good approximation is obtained by finding the
1085 * percentage of g0s0 that was live at the last minor GC.
1088 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1091 /* Estimate a size for the allocation area based on the
1092 * information available. We might end up going slightly under
1093 * or over the suggested heap size, but we should be pretty
1096 * Formula: suggested - needed
1097 * ----------------------------
1098 * 1 + g0s0_pcnt_kept/100
1100 * where 'needed' is the amount of memory needed at the next
1101 * collection for collecting all steps except g0s0.
1104 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1105 (100 + (long)g0s0_pcnt_kept);
1107 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1108 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1111 resizeNurseries((nat)blocks);
1114 // we might have added extra large blocks to the nursery, so
1115 // resize back to minAllocAreaSize again.
1116 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1120 // mark the garbage collected CAFs as dead
1121 #if 0 && defined(DEBUG) // doesn't work at the moment
1122 if (major_gc) { gcCAFs(); }
1126 // resetStaticObjectForRetainerProfiling() must be called before
1128 resetStaticObjectForRetainerProfiling();
1131 // zero the scavenged static object list
1133 zero_static_object_list(scavenged_static_objects);
1136 // Reset the nursery
1139 // start any pending finalizers
1141 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1144 // send exceptions to any threads which were about to die
1145 resurrectThreads(resurrected_threads);
1147 // Update the stable pointer hash table.
1148 updateStablePtrTable(major_gc);
1150 // check sanity after GC
1151 IF_DEBUG(sanity, checkSanity());
1153 // extra GC trace info
1154 IF_DEBUG(gc, statDescribeGens());
1157 // symbol-table based profiling
1158 /* heapCensus(to_blocks); */ /* ToDo */
1161 // restore enclosing cost centre
1167 // check for memory leaks if DEBUG is on
1171 #ifdef RTS_GTK_FRONTPANEL
1172 if (RtsFlags.GcFlags.frontpanel) {
1173 updateFrontPanelAfterGC( N, live );
1177 // ok, GC over: tell the stats department what happened.
1178 stat_endGC(allocated, collected, live, copied, scavd_copied, N);
1180 #if defined(RTS_USER_SIGNALS)
1181 // unblock signals again
1182 unblockUserSignals();
1191 /* -----------------------------------------------------------------------------
1194 traverse_weak_ptr_list is called possibly many times during garbage
1195 collection. It returns a flag indicating whether it did any work
1196 (i.e. called evacuate on any live pointers).
1198 Invariant: traverse_weak_ptr_list is called when the heap is in an
1199 idempotent state. That means that there are no pending
1200 evacuate/scavenge operations. This invariant helps the weak
1201 pointer code decide which weak pointers are dead - if there are no
1202 new live weak pointers, then all the currently unreachable ones are
1205 For generational GC: we just don't try to finalize weak pointers in
1206 older generations than the one we're collecting. This could
1207 probably be optimised by keeping per-generation lists of weak
1208 pointers, but for a few weak pointers this scheme will work.
1210 There are three distinct stages to processing weak pointers:
1212 - weak_stage == WeakPtrs
1214 We process all the weak pointers whos keys are alive (evacuate
1215 their values and finalizers), and repeat until we can find no new
1216 live keys. If no live keys are found in this pass, then we
1217 evacuate the finalizers of all the dead weak pointers in order to
1220 - weak_stage == WeakThreads
1222 Now, we discover which *threads* are still alive. Pointers to
1223 threads from the all_threads and main thread lists are the
1224 weakest of all: a pointers from the finalizer of a dead weak
1225 pointer can keep a thread alive. Any threads found to be unreachable
1226 are evacuated and placed on the resurrected_threads list so we
1227 can send them a signal later.
1229 - weak_stage == WeakDone
1231 No more evacuation is done.
1233 -------------------------------------------------------------------------- */
1236 traverse_weak_ptr_list(void)
1238 StgWeak *w, **last_w, *next_w;
1240 rtsBool flag = rtsFalse;
1242 switch (weak_stage) {
1248 /* doesn't matter where we evacuate values/finalizers to, since
1249 * these pointers are treated as roots (iff the keys are alive).
1253 last_w = &old_weak_ptr_list;
1254 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1256 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1257 * called on a live weak pointer object. Just remove it.
1259 if (w->header.info == &stg_DEAD_WEAK_info) {
1260 next_w = ((StgDeadWeak *)w)->link;
1265 switch (get_itbl(w)->type) {
1268 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1273 /* Now, check whether the key is reachable.
1275 new = isAlive(w->key);
1278 // evacuate the value and finalizer
1279 w->value = evacuate(w->value);
1280 w->finalizer = evacuate(w->finalizer);
1281 // remove this weak ptr from the old_weak_ptr list
1283 // and put it on the new weak ptr list
1285 w->link = weak_ptr_list;
1288 IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
1293 last_w = &(w->link);
1299 barf("traverse_weak_ptr_list: not WEAK");
1303 /* If we didn't make any changes, then we can go round and kill all
1304 * the dead weak pointers. The old_weak_ptr list is used as a list
1305 * of pending finalizers later on.
1307 if (flag == rtsFalse) {
1308 for (w = old_weak_ptr_list; w; w = w->link) {
1309 w->finalizer = evacuate(w->finalizer);
1312 // Next, move to the WeakThreads stage after fully
1313 // scavenging the finalizers we've just evacuated.
1314 weak_stage = WeakThreads;
1320 /* Now deal with the all_threads list, which behaves somewhat like
1321 * the weak ptr list. If we discover any threads that are about to
1322 * become garbage, we wake them up and administer an exception.
1325 StgTSO *t, *tmp, *next, **prev;
1327 prev = &old_all_threads;
1328 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1330 tmp = (StgTSO *)isAlive((StgClosure *)t);
1336 ASSERT(get_itbl(t)->type == TSO);
1337 switch (t->what_next) {
1338 case ThreadRelocated:
1343 case ThreadComplete:
1344 // finshed or died. The thread might still be alive, but we
1345 // don't keep it on the all_threads list. Don't forget to
1346 // stub out its global_link field.
1347 next = t->global_link;
1348 t->global_link = END_TSO_QUEUE;
1355 // Threads blocked on black holes: if the black hole
1356 // is alive, then the thread is alive too.
1357 if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
1358 if (isAlive(t->block_info.closure)) {
1359 t = (StgTSO *)evacuate((StgClosure *)t);
1366 // not alive (yet): leave this thread on the
1367 // old_all_threads list.
1368 prev = &(t->global_link);
1369 next = t->global_link;
1372 // alive: move this thread onto the all_threads list.
1373 next = t->global_link;
1374 t->global_link = all_threads;
1381 /* If we evacuated any threads, we need to go back to the scavenger.
1383 if (flag) return rtsTrue;
1385 /* And resurrect any threads which were about to become garbage.
1388 StgTSO *t, *tmp, *next;
1389 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1390 next = t->global_link;
1391 tmp = (StgTSO *)evacuate((StgClosure *)t);
1392 tmp->global_link = resurrected_threads;
1393 resurrected_threads = tmp;
1397 /* Finally, we can update the blackhole_queue. This queue
1398 * simply strings together TSOs blocked on black holes, it is
1399 * not intended to keep anything alive. Hence, we do not follow
1400 * pointers on the blackhole_queue until now, when we have
1401 * determined which TSOs are otherwise reachable. We know at
1402 * this point that all TSOs have been evacuated, however.
1406 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1407 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1408 ASSERT(*pt != NULL);
1412 weak_stage = WeakDone; // *now* we're done,
1413 return rtsTrue; // but one more round of scavenging, please
1416 barf("traverse_weak_ptr_list");
1422 /* -----------------------------------------------------------------------------
1423 After GC, the live weak pointer list may have forwarding pointers
1424 on it, because a weak pointer object was evacuated after being
1425 moved to the live weak pointer list. We remove those forwarding
1428 Also, we don't consider weak pointer objects to be reachable, but
1429 we must nevertheless consider them to be "live" and retain them.
1430 Therefore any weak pointer objects which haven't as yet been
1431 evacuated need to be evacuated now.
1432 -------------------------------------------------------------------------- */
1436 mark_weak_ptr_list ( StgWeak **list )
1438 StgWeak *w, **last_w;
1441 for (w = *list; w; w = w->link) {
1442 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1443 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1444 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1445 w = (StgWeak *)evacuate((StgClosure *)w);
1447 last_w = &(w->link);
1451 /* -----------------------------------------------------------------------------
1452 isAlive determines whether the given closure is still alive (after
1453 a garbage collection) or not. It returns the new address of the
1454 closure if it is alive, or NULL otherwise.
1456 NOTE: Use it before compaction only!
1457 -------------------------------------------------------------------------- */
1461 isAlive(StgClosure *p)
1463 const StgInfoTable *info;
1468 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1471 // ignore static closures
1473 // ToDo: for static closures, check the static link field.
1474 // Problem here is that we sometimes don't set the link field, eg.
1475 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1477 if (!HEAP_ALLOCED(p)) {
1481 // ignore closures in generations that we're not collecting.
1483 if (bd->gen_no > N) {
1487 // if it's a pointer into to-space, then we're done
1488 if (bd->flags & BF_EVACUATED) {
1492 // large objects use the evacuated flag
1493 if (bd->flags & BF_LARGE) {
1497 // check the mark bit for compacted steps
1498 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1502 switch (info->type) {
1507 case IND_OLDGEN: // rely on compatible layout with StgInd
1508 case IND_OLDGEN_PERM:
1509 // follow indirections
1510 p = ((StgInd *)p)->indirectee;
1515 return ((StgEvacuated *)p)->evacuee;
1518 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1519 p = (StgClosure *)((StgTSO *)p)->link;
1532 mark_root(StgClosure **root)
1534 *root = evacuate(*root);
1538 upd_evacuee(StgClosure *p, StgClosure *dest)
1540 // not true: (ToDo: perhaps it should be)
1541 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1542 SET_INFO(p, &stg_EVACUATED_info);
1543 ((StgEvacuated *)p)->evacuee = dest;
1547 STATIC_INLINE StgClosure *
1548 copy(StgClosure *src, nat size, step *stp)
1554 nat size_org = size;
1557 TICK_GC_WORDS_COPIED(size);
1558 /* Find out where we're going, using the handy "to" pointer in
1559 * the step of the source object. If it turns out we need to
1560 * evacuate to an older generation, adjust it here (see comment
1563 if (stp->gen_no < evac_gen) {
1564 #ifdef NO_EAGER_PROMOTION
1565 failed_to_evac = rtsTrue;
1567 stp = &generations[evac_gen].steps[0];
1571 /* chain a new block onto the to-space for the destination step if
1574 if (stp->hp + size >= stp->hpLim) {
1575 gc_alloc_block(stp);
1580 stp->hp = to + size;
1581 for (i = 0; i < size; i++) { // unroll for small i
1584 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1587 // We store the size of the just evacuated object in the LDV word so that
1588 // the profiler can guess the position of the next object later.
1589 SET_EVACUAEE_FOR_LDV(from, size_org);
1591 return (StgClosure *)to;
1594 // Same as copy() above, except the object will be allocated in memory
1595 // that will not be scavenged. Used for object that have no pointer
1597 STATIC_INLINE StgClosure *
1598 copy_noscav(StgClosure *src, nat size, step *stp)
1604 nat size_org = size;
1607 TICK_GC_WORDS_COPIED(size);
1608 /* Find out where we're going, using the handy "to" pointer in
1609 * the step of the source object. If it turns out we need to
1610 * evacuate to an older generation, adjust it here (see comment
1613 if (stp->gen_no < evac_gen) {
1614 #ifdef NO_EAGER_PROMOTION
1615 failed_to_evac = rtsTrue;
1617 stp = &generations[evac_gen].steps[0];
1621 /* chain a new block onto the to-space for the destination step if
1624 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1625 gc_alloc_scavd_block(stp);
1630 stp->scavd_hp = to + size;
1631 for (i = 0; i < size; i++) { // unroll for small i
1634 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1637 // We store the size of the just evacuated object in the LDV word so that
1638 // the profiler can guess the position of the next object later.
1639 SET_EVACUAEE_FOR_LDV(from, size_org);
1641 return (StgClosure *)to;
1644 /* Special version of copy() for when we only want to copy the info
1645 * pointer of an object, but reserve some padding after it. This is
1646 * used to optimise evacuation of BLACKHOLEs.
1651 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1656 nat size_to_copy_org = size_to_copy;
1659 TICK_GC_WORDS_COPIED(size_to_copy);
1660 if (stp->gen_no < evac_gen) {
1661 #ifdef NO_EAGER_PROMOTION
1662 failed_to_evac = rtsTrue;
1664 stp = &generations[evac_gen].steps[0];
1668 if (stp->hp + size_to_reserve >= stp->hpLim) {
1669 gc_alloc_block(stp);
1672 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1677 stp->hp += size_to_reserve;
1678 upd_evacuee(src,(StgClosure *)dest);
1680 // We store the size of the just evacuated object in the LDV word so that
1681 // the profiler can guess the position of the next object later.
1682 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1684 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1686 if (size_to_reserve - size_to_copy_org > 0)
1687 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1689 return (StgClosure *)dest;
1693 /* -----------------------------------------------------------------------------
1694 Evacuate a large object
1696 This just consists of removing the object from the (doubly-linked)
1697 step->large_objects list, and linking it on to the (singly-linked)
1698 step->new_large_objects list, from where it will be scavenged later.
1700 Convention: bd->flags has BF_EVACUATED set for a large object
1701 that has been evacuated, or unset otherwise.
1702 -------------------------------------------------------------------------- */
1706 evacuate_large(StgPtr p)
1708 bdescr *bd = Bdescr(p);
1711 // object must be at the beginning of the block (or be a ByteArray)
1712 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1713 (((W_)p & BLOCK_MASK) == 0));
1715 // already evacuated?
1716 if (bd->flags & BF_EVACUATED) {
1717 /* Don't forget to set the failed_to_evac flag if we didn't get
1718 * the desired destination (see comments in evacuate()).
1720 if (bd->gen_no < evac_gen) {
1721 failed_to_evac = rtsTrue;
1722 TICK_GC_FAILED_PROMOTION();
1728 // remove from large_object list
1730 bd->u.back->link = bd->link;
1731 } else { // first object in the list
1732 stp->large_objects = bd->link;
1735 bd->link->u.back = bd->u.back;
1738 /* link it on to the evacuated large object list of the destination step
1741 if (stp->gen_no < evac_gen) {
1742 #ifdef NO_EAGER_PROMOTION
1743 failed_to_evac = rtsTrue;
1745 stp = &generations[evac_gen].steps[0];
1750 bd->gen_no = stp->gen_no;
1751 bd->link = stp->new_large_objects;
1752 stp->new_large_objects = bd;
1753 bd->flags |= BF_EVACUATED;
1756 /* -----------------------------------------------------------------------------
1759 This is called (eventually) for every live object in the system.
1761 The caller to evacuate specifies a desired generation in the
1762 evac_gen global variable. The following conditions apply to
1763 evacuating an object which resides in generation M when we're
1764 collecting up to generation N
1768 else evac to step->to
1770 if M < evac_gen evac to evac_gen, step 0
1772 if the object is already evacuated, then we check which generation
1775 if M >= evac_gen do nothing
1776 if M < evac_gen set failed_to_evac flag to indicate that we
1777 didn't manage to evacuate this object into evac_gen.
1782 evacuate() is the single most important function performance-wise
1783 in the GC. Various things have been tried to speed it up, but as
1784 far as I can tell the code generated by gcc 3.2 with -O2 is about
1785 as good as it's going to get. We pass the argument to evacuate()
1786 in a register using the 'regparm' attribute (see the prototype for
1787 evacuate() near the top of this file).
1789 Changing evacuate() to take an (StgClosure **) rather than
1790 returning the new pointer seems attractive, because we can avoid
1791 writing back the pointer when it hasn't changed (eg. for a static
1792 object, or an object in a generation > N). However, I tried it and
1793 it doesn't help. One reason is that the (StgClosure **) pointer
1794 gets spilled to the stack inside evacuate(), resulting in far more
1795 extra reads/writes than we save.
1796 -------------------------------------------------------------------------- */
1798 REGPARM1 static StgClosure *
1799 evacuate(StgClosure *q)
1806 const StgInfoTable *info;
1809 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1811 if (!HEAP_ALLOCED(q)) {
1813 if (!major_gc) return q;
1816 switch (info->type) {
1819 if (info->srt_bitmap != 0 &&
1820 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1821 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1822 static_objects = (StgClosure *)q;
1827 if (info->srt_bitmap != 0 &&
1828 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1829 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1830 static_objects = (StgClosure *)q;
1835 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1836 * on the CAF list, so don't do anything with it here (we'll
1837 * scavenge it later).
1839 if (((StgIndStatic *)q)->saved_info == NULL
1840 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1841 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1842 static_objects = (StgClosure *)q;
1847 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1848 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1849 static_objects = (StgClosure *)q;
1853 case CONSTR_INTLIKE:
1854 case CONSTR_CHARLIKE:
1855 case CONSTR_NOCAF_STATIC:
1856 /* no need to put these on the static linked list, they don't need
1862 barf("evacuate(static): strange closure type %d", (int)(info->type));
1868 if (bd->gen_no > N) {
1869 /* Can't evacuate this object, because it's in a generation
1870 * older than the ones we're collecting. Let's hope that it's
1871 * in evac_gen or older, or we will have to arrange to track
1872 * this pointer using the mutable list.
1874 if (bd->gen_no < evac_gen) {
1876 failed_to_evac = rtsTrue;
1877 TICK_GC_FAILED_PROMOTION();
1882 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1884 /* pointer into to-space: just return it. This normally
1885 * shouldn't happen, but alllowing it makes certain things
1886 * slightly easier (eg. the mutable list can contain the same
1887 * object twice, for example).
1889 if (bd->flags & BF_EVACUATED) {
1890 if (bd->gen_no < evac_gen) {
1891 failed_to_evac = rtsTrue;
1892 TICK_GC_FAILED_PROMOTION();
1897 /* evacuate large objects by re-linking them onto a different list.
1899 if (bd->flags & BF_LARGE) {
1901 if (info->type == TSO &&
1902 ((StgTSO *)q)->what_next == ThreadRelocated) {
1903 q = (StgClosure *)((StgTSO *)q)->link;
1906 evacuate_large((P_)q);
1910 /* If the object is in a step that we're compacting, then we
1911 * need to use an alternative evacuate procedure.
1913 if (bd->flags & BF_COMPACTED) {
1914 if (!is_marked((P_)q,bd)) {
1916 if (mark_stack_full()) {
1917 mark_stack_overflowed = rtsTrue;
1920 push_mark_stack((P_)q);
1930 switch (info->type) {
1934 return copy(q,sizeW_fromITBL(info),stp);
1938 StgWord w = (StgWord)q->payload[0];
1939 if (q->header.info == Czh_con_info &&
1940 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1941 (StgChar)w <= MAX_CHARLIKE) {
1942 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1944 if (q->header.info == Izh_con_info &&
1945 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1946 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1949 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1955 return copy(q,sizeofW(StgHeader)+1,stp);
1959 return copy(q,sizeofW(StgThunk)+1,stp);
1964 #ifdef NO_PROMOTE_THUNKS
1965 if (bd->gen_no == 0 &&
1966 bd->step->no != 0 &&
1967 bd->step->no == generations[bd->gen_no].n_steps-1) {
1971 return copy(q,sizeofW(StgThunk)+2,stp);
1978 return copy(q,sizeofW(StgHeader)+2,stp);
1981 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
1984 return copy(q,thunk_sizeW_fromITBL(info),stp);
1989 case IND_OLDGEN_PERM:
1992 return copy(q,sizeW_fromITBL(info),stp);
1995 return copy(q,bco_sizeW((StgBCO *)q),stp);
1998 case SE_CAF_BLACKHOLE:
2001 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2003 case THUNK_SELECTOR:
2007 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2008 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2011 p = eval_thunk_selector(info->layout.selector_offset,
2015 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2018 // q is still BLACKHOLE'd.
2019 thunk_selector_depth++;
2021 thunk_selector_depth--;
2023 // Update the THUNK_SELECTOR with an indirection to the
2024 // EVACUATED closure now at p. Why do this rather than
2025 // upd_evacuee(q,p)? Because we have an invariant that an
2026 // EVACUATED closure always points to an object in the
2027 // same or an older generation (required by the short-cut
2028 // test in the EVACUATED case, below).
2029 SET_INFO(q, &stg_IND_info);
2030 ((StgInd *)q)->indirectee = p;
2033 // We store the size of the just evacuated object in the
2034 // LDV word so that the profiler can guess the position of
2035 // the next object later.
2036 SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
2044 // follow chains of indirections, don't evacuate them
2045 q = ((StgInd*)q)->indirectee;
2057 case CATCH_STM_FRAME:
2058 case CATCH_RETRY_FRAME:
2059 case ATOMICALLY_FRAME:
2060 // shouldn't see these
2061 barf("evacuate: stack frame at %p\n", q);
2064 return copy(q,pap_sizeW((StgPAP*)q),stp);
2067 return copy(q,ap_sizeW((StgAP*)q),stp);
2070 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2073 /* Already evacuated, just return the forwarding address.
2074 * HOWEVER: if the requested destination generation (evac_gen) is
2075 * older than the actual generation (because the object was
2076 * already evacuated to a younger generation) then we have to
2077 * set the failed_to_evac flag to indicate that we couldn't
2078 * manage to promote the object to the desired generation.
2081 * Optimisation: the check is fairly expensive, but we can often
2082 * shortcut it if either the required generation is 0, or the
2083 * current object (the EVACUATED) is in a high enough generation.
2084 * We know that an EVACUATED always points to an object in the
2085 * same or an older generation. stp is the lowest step that the
2086 * current object would be evacuated to, so we only do the full
2087 * check if stp is too low.
2089 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2090 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2091 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2092 failed_to_evac = rtsTrue;
2093 TICK_GC_FAILED_PROMOTION();
2096 return ((StgEvacuated*)q)->evacuee;
2099 // just copy the block
2100 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2103 case MUT_ARR_PTRS_FROZEN:
2104 case MUT_ARR_PTRS_FROZEN0:
2105 // just copy the block
2106 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2110 StgTSO *tso = (StgTSO *)q;
2112 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2114 if (tso->what_next == ThreadRelocated) {
2115 q = (StgClosure *)tso->link;
2119 /* To evacuate a small TSO, we need to relocate the update frame
2126 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2128 sizeofW(StgTSO), stp);
2129 move_TSO(tso, new_tso);
2130 for (p = tso->sp, q = new_tso->sp;
2131 p < tso->stack+tso->stack_size;) {
2135 return (StgClosure *)new_tso;
2142 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2143 to = copy(q,BLACKHOLE_sizeW(),stp);
2144 //ToDo: derive size etc from reverted IP
2145 //to = copy(q,size,stp);
2147 debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
2148 q, info_type(q), to, info_type(to)));
2153 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
2154 to = copy(q,sizeofW(StgBlockedFetch),stp);
2156 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2157 q, info_type(q), to, info_type(to)));
2164 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2165 to = copy(q,sizeofW(StgFetchMe),stp);
2167 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2168 q, info_type(q), to, info_type(to)));
2172 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2173 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2175 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2176 q, info_type(q), to, info_type(to)));
2181 return copy(q,sizeofW(StgTRecHeader),stp);
2183 case TVAR_WAIT_QUEUE:
2184 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2187 return copy(q,sizeofW(StgTVar),stp);
2190 return copy(q,sizeofW(StgTRecChunk),stp);
2193 barf("evacuate: strange closure type %d", (int)(info->type));
2199 /* -----------------------------------------------------------------------------
2200 Evaluate a THUNK_SELECTOR if possible.
2202 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2203 a closure pointer if we evaluated it and this is the result. Note
2204 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2205 reducing it to HNF, just that we have eliminated the selection.
2206 The result might be another thunk, or even another THUNK_SELECTOR.
2208 If the return value is non-NULL, the original selector thunk has
2209 been BLACKHOLE'd, and should be updated with an indirection or a
2210 forwarding pointer. If the return value is NULL, then the selector
2214 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2215 following way (from a suggestion by Ian Lynagh):
2217 We can have a chain like this:
2221 |-----> sel_0 --> (a,b)
2223 |-----> sel_0 --> ...
2225 and the depth limit means we don't go all the way to the end of the
2226 chain, which results in a space leak. This affects the recursive
2227 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2228 the recursive call to eval_thunk_selector() in
2229 eval_thunk_selector().
2231 We could eliminate the depth bound in this case, in the following
2234 - traverse the chain once to discover the *value* of the
2235 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2236 visit on the way as having been visited already (somehow).
2238 - in a second pass, traverse the chain again updating all
2239 THUNK_SEELCTORS that we find on the way with indirections to
2242 - if we encounter a "marked" THUNK_SELECTOR in a normal
2243 evacuate(), we konw it can't be updated so just evac it.
2245 Program that illustrates the problem:
2248 foo (x:xs) = let (ys, zs) = foo xs
2249 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2251 main = bar [1..(100000000::Int)]
2252 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2254 -------------------------------------------------------------------------- */
2256 static inline rtsBool
2257 is_to_space ( StgClosure *p )
2261 bd = Bdescr((StgPtr)p);
2262 if (HEAP_ALLOCED(p) &&
2263 ((bd->flags & BF_EVACUATED)
2264 || ((bd->flags & BF_COMPACTED) &&
2265 is_marked((P_)p,bd)))) {
2273 eval_thunk_selector( nat field, StgSelector * p )
2276 const StgInfoTable *info_ptr;
2277 StgClosure *selectee;
2279 selectee = p->selectee;
2281 // Save the real info pointer (NOTE: not the same as get_itbl()).
2282 info_ptr = p->header.info;
2284 // If the THUNK_SELECTOR is in a generation that we are not
2285 // collecting, then bail out early. We won't be able to save any
2286 // space in any case, and updating with an indirection is trickier
2288 if (Bdescr((StgPtr)p)->gen_no > N) {
2292 // BLACKHOLE the selector thunk, since it is now under evaluation.
2293 // This is important to stop us going into an infinite loop if
2294 // this selector thunk eventually refers to itself.
2295 SET_INFO(p,&stg_BLACKHOLE_info);
2299 // We don't want to end up in to-space, because this causes
2300 // problems when the GC later tries to evacuate the result of
2301 // eval_thunk_selector(). There are various ways this could
2304 // 1. following an IND_STATIC
2306 // 2. when the old generation is compacted, the mark phase updates
2307 // from-space pointers to be to-space pointers, and we can't
2308 // reliably tell which we're following (eg. from an IND_STATIC).
2310 // 3. compacting GC again: if we're looking at a constructor in
2311 // the compacted generation, it might point directly to objects
2312 // in to-space. We must bale out here, otherwise doing the selection
2313 // will result in a to-space pointer being returned.
2315 // (1) is dealt with using a BF_EVACUATED test on the
2316 // selectee. (2) and (3): we can tell if we're looking at an
2317 // object in the compacted generation that might point to
2318 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2319 // the compacted generation is being collected, and (c) the
2320 // object is marked. Only a marked object may have pointers that
2321 // point to to-space objects, because that happens when
2324 // The to-space test is now embodied in the in_to_space() inline
2325 // function, as it is re-used below.
2327 if (is_to_space(selectee)) {
2331 info = get_itbl(selectee);
2332 switch (info->type) {
2340 case CONSTR_NOCAF_STATIC:
2341 // check that the size is in range
2342 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2343 info->layout.payload.nptrs));
2345 // Select the right field from the constructor, and check
2346 // that the result isn't in to-space. It might be in
2347 // to-space if, for example, this constructor contains
2348 // pointers to younger-gen objects (and is on the mut-once
2353 q = selectee->payload[field];
2354 if (is_to_space(q)) {
2364 case IND_OLDGEN_PERM:
2366 selectee = ((StgInd *)selectee)->indirectee;
2370 // We don't follow pointers into to-space; the constructor
2371 // has already been evacuated, so we won't save any space
2372 // leaks by evaluating this selector thunk anyhow.
2375 case THUNK_SELECTOR:
2379 // check that we don't recurse too much, re-using the
2380 // depth bound also used in evacuate().
2381 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2384 thunk_selector_depth++;
2386 val = eval_thunk_selector(info->layout.selector_offset,
2387 (StgSelector *)selectee);
2389 thunk_selector_depth--;
2394 // We evaluated this selector thunk, so update it with
2395 // an indirection. NOTE: we don't use UPD_IND here,
2396 // because we are guaranteed that p is in a generation
2397 // that we are collecting, and we never want to put the
2398 // indirection on a mutable list.
2400 // For the purposes of LDV profiling, we have destroyed
2401 // the original selector thunk.
2402 SET_INFO(p, info_ptr);
2403 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2405 ((StgInd *)selectee)->indirectee = val;
2406 SET_INFO(selectee,&stg_IND_info);
2408 // For the purposes of LDV profiling, we have created an
2410 LDV_RECORD_CREATE(selectee);
2427 case SE_CAF_BLACKHOLE:
2439 // not evaluated yet
2443 barf("eval_thunk_selector: strange selectee %d",
2448 // We didn't manage to evaluate this thunk; restore the old info pointer
2449 SET_INFO(p, info_ptr);
2453 /* -----------------------------------------------------------------------------
2454 move_TSO is called to update the TSO structure after it has been
2455 moved from one place to another.
2456 -------------------------------------------------------------------------- */
2459 move_TSO (StgTSO *src, StgTSO *dest)
2463 // relocate the stack pointer...
2464 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2465 dest->sp = (StgPtr)dest->sp + diff;
2468 /* Similar to scavenge_large_bitmap(), but we don't write back the
2469 * pointers we get back from evacuate().
2472 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2479 bitmap = large_srt->l.bitmap[b];
2480 size = (nat)large_srt->l.size;
2481 p = (StgClosure **)large_srt->srt;
2482 for (i = 0; i < size; ) {
2483 if ((bitmap & 1) != 0) {
2488 if (i % BITS_IN(W_) == 0) {
2490 bitmap = large_srt->l.bitmap[b];
2492 bitmap = bitmap >> 1;
2497 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2498 * srt field in the info table. That's ok, because we'll
2499 * never dereference it.
2502 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2507 bitmap = srt_bitmap;
2510 if (bitmap == (StgHalfWord)(-1)) {
2511 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2515 while (bitmap != 0) {
2516 if ((bitmap & 1) != 0) {
2517 #ifdef ENABLE_WIN32_DLL_SUPPORT
2518 // Special-case to handle references to closures hiding out in DLLs, since
2519 // double indirections required to get at those. The code generator knows
2520 // which is which when generating the SRT, so it stores the (indirect)
2521 // reference to the DLL closure in the table by first adding one to it.
2522 // We check for this here, and undo the addition before evacuating it.
2524 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2525 // closure that's fixed at link-time, and no extra magic is required.
2526 if ( (unsigned long)(*srt) & 0x1 ) {
2527 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2536 bitmap = bitmap >> 1;
2542 scavenge_thunk_srt(const StgInfoTable *info)
2544 StgThunkInfoTable *thunk_info;
2546 if (!major_gc) return;
2548 thunk_info = itbl_to_thunk_itbl(info);
2549 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2553 scavenge_fun_srt(const StgInfoTable *info)
2555 StgFunInfoTable *fun_info;
2557 if (!major_gc) return;
2559 fun_info = itbl_to_fun_itbl(info);
2560 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2563 /* -----------------------------------------------------------------------------
2565 -------------------------------------------------------------------------- */
2568 scavengeTSO (StgTSO *tso)
2570 if ( tso->why_blocked == BlockedOnMVar
2571 || tso->why_blocked == BlockedOnBlackHole
2572 || tso->why_blocked == BlockedOnException
2574 || tso->why_blocked == BlockedOnGA
2575 || tso->why_blocked == BlockedOnGA_NoSend
2578 tso->block_info.closure = evacuate(tso->block_info.closure);
2580 if ( tso->blocked_exceptions != NULL ) {
2581 tso->blocked_exceptions =
2582 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2585 // We don't always chase the link field: TSOs on the blackhole
2586 // queue are not automatically alive, so the link field is a
2587 // "weak" pointer in that case.
2588 if (tso->why_blocked != BlockedOnBlackHole) {
2589 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2592 // scavange current transaction record
2593 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2595 // scavenge this thread's stack
2596 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2599 /* -----------------------------------------------------------------------------
2600 Blocks of function args occur on the stack (at the top) and
2602 -------------------------------------------------------------------------- */
2604 STATIC_INLINE StgPtr
2605 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2612 switch (fun_info->f.fun_type) {
2614 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2615 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2618 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2619 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2623 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2624 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2627 if ((bitmap & 1) == 0) {
2628 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2631 bitmap = bitmap >> 1;
2639 STATIC_INLINE StgPtr
2640 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2644 StgFunInfoTable *fun_info;
2646 fun_info = get_fun_itbl(fun);
2647 ASSERT(fun_info->i.type != PAP);
2648 p = (StgPtr)payload;
2650 switch (fun_info->f.fun_type) {
2652 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2655 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2659 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2663 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2666 if ((bitmap & 1) == 0) {
2667 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2670 bitmap = bitmap >> 1;
2678 STATIC_INLINE StgPtr
2679 scavenge_PAP (StgPAP *pap)
2681 pap->fun = evacuate(pap->fun);
2682 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2685 STATIC_INLINE StgPtr
2686 scavenge_AP (StgAP *ap)
2688 ap->fun = evacuate(ap->fun);
2689 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2692 /* -----------------------------------------------------------------------------
2693 Scavenge a given step until there are no more objects in this step
2696 evac_gen is set by the caller to be either zero (for a step in a
2697 generation < N) or G where G is the generation of the step being
2700 We sometimes temporarily change evac_gen back to zero if we're
2701 scavenging a mutable object where early promotion isn't such a good
2703 -------------------------------------------------------------------------- */
2711 nat saved_evac_gen = evac_gen;
2716 failed_to_evac = rtsFalse;
2718 /* scavenge phase - standard breadth-first scavenging of the
2722 while (bd != stp->hp_bd || p < stp->hp) {
2724 // If we're at the end of this block, move on to the next block
2725 if (bd != stp->hp_bd && p == bd->free) {
2731 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2732 info = get_itbl((StgClosure *)p);
2734 ASSERT(thunk_selector_depth == 0);
2737 switch (info->type) {
2741 StgMVar *mvar = ((StgMVar *)p);
2743 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2744 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2745 mvar->value = evacuate((StgClosure *)mvar->value);
2746 evac_gen = saved_evac_gen;
2747 failed_to_evac = rtsTrue; // mutable.
2748 p += sizeofW(StgMVar);
2753 scavenge_fun_srt(info);
2754 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2755 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2756 p += sizeofW(StgHeader) + 2;
2760 scavenge_thunk_srt(info);
2761 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2762 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2763 p += sizeofW(StgThunk) + 2;
2767 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2768 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2769 p += sizeofW(StgHeader) + 2;
2773 scavenge_thunk_srt(info);
2774 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2775 p += sizeofW(StgThunk) + 1;
2779 scavenge_fun_srt(info);
2781 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2782 p += sizeofW(StgHeader) + 1;
2786 scavenge_thunk_srt(info);
2787 p += sizeofW(StgThunk) + 1;
2791 scavenge_fun_srt(info);
2793 p += sizeofW(StgHeader) + 1;
2797 scavenge_thunk_srt(info);
2798 p += sizeofW(StgThunk) + 2;
2802 scavenge_fun_srt(info);
2804 p += sizeofW(StgHeader) + 2;
2808 scavenge_thunk_srt(info);
2809 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2810 p += sizeofW(StgThunk) + 2;
2814 scavenge_fun_srt(info);
2816 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2817 p += sizeofW(StgHeader) + 2;
2821 scavenge_fun_srt(info);
2828 scavenge_thunk_srt(info);
2829 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2830 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2831 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2833 p += info->layout.payload.nptrs;
2844 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2845 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2846 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2848 p += info->layout.payload.nptrs;
2853 StgBCO *bco = (StgBCO *)p;
2854 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2855 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2856 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2857 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2858 p += bco_sizeW(bco);
2863 if (stp->gen->no != 0) {
2866 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2867 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2868 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2871 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2873 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2875 // We pretend that p has just been created.
2876 LDV_RECORD_CREATE((StgClosure *)p);
2879 case IND_OLDGEN_PERM:
2880 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2881 p += sizeofW(StgInd);
2886 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2887 evac_gen = saved_evac_gen;
2888 failed_to_evac = rtsTrue; // mutable anyhow
2889 p += sizeofW(StgMutVar);
2893 case SE_CAF_BLACKHOLE:
2896 p += BLACKHOLE_sizeW();
2899 case THUNK_SELECTOR:
2901 StgSelector *s = (StgSelector *)p;
2902 s->selectee = evacuate(s->selectee);
2903 p += THUNK_SELECTOR_sizeW();
2907 // A chunk of stack saved in a heap object
2910 StgAP_STACK *ap = (StgAP_STACK *)p;
2912 ap->fun = evacuate(ap->fun);
2913 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2914 p = (StgPtr)ap->payload + ap->size;
2919 p = scavenge_PAP((StgPAP *)p);
2923 p = scavenge_AP((StgAP *)p);
2927 // nothing to follow
2928 p += arr_words_sizeW((StgArrWords *)p);
2932 // follow everything
2936 evac_gen = 0; // repeatedly mutable
2937 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2938 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2939 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2941 evac_gen = saved_evac_gen;
2942 failed_to_evac = rtsTrue; // mutable anyhow.
2946 case MUT_ARR_PTRS_FROZEN:
2947 case MUT_ARR_PTRS_FROZEN0:
2948 // follow everything
2952 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2953 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2954 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2957 // If we're going to put this object on the mutable list, then
2958 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
2959 if (failed_to_evac) {
2960 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
2962 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
2969 StgTSO *tso = (StgTSO *)p;
2972 evac_gen = saved_evac_gen;
2973 failed_to_evac = rtsTrue; // mutable anyhow.
2974 p += tso_sizeW(tso);
2982 nat size, ptrs, nonptrs, vhs;
2984 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2986 StgRBH *rbh = (StgRBH *)p;
2987 (StgClosure *)rbh->blocking_queue =
2988 evacuate((StgClosure *)rbh->blocking_queue);
2989 failed_to_evac = rtsTrue; // mutable anyhow.
2991 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2992 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2993 // ToDo: use size of reverted closure here!
2994 p += BLACKHOLE_sizeW();
3000 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3001 // follow the pointer to the node which is being demanded
3002 (StgClosure *)bf->node =
3003 evacuate((StgClosure *)bf->node);
3004 // follow the link to the rest of the blocking queue
3005 (StgClosure *)bf->link =
3006 evacuate((StgClosure *)bf->link);
3008 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3009 bf, info_type((StgClosure *)bf),
3010 bf->node, info_type(bf->node)));
3011 p += sizeofW(StgBlockedFetch);
3019 p += sizeofW(StgFetchMe);
3020 break; // nothing to do in this case
3024 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3025 (StgClosure *)fmbq->blocking_queue =
3026 evacuate((StgClosure *)fmbq->blocking_queue);
3028 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3029 p, info_type((StgClosure *)p)));
3030 p += sizeofW(StgFetchMeBlockingQueue);
3035 case TVAR_WAIT_QUEUE:
3037 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3039 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3040 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3041 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3042 evac_gen = saved_evac_gen;
3043 failed_to_evac = rtsTrue; // mutable
3044 p += sizeofW(StgTVarWaitQueue);
3050 StgTVar *tvar = ((StgTVar *) p);
3052 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3053 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3054 evac_gen = saved_evac_gen;
3055 failed_to_evac = rtsTrue; // mutable
3056 p += sizeofW(StgTVar);
3062 StgTRecHeader *trec = ((StgTRecHeader *) p);
3064 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3065 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3066 evac_gen = saved_evac_gen;
3067 failed_to_evac = rtsTrue; // mutable
3068 p += sizeofW(StgTRecHeader);
3075 StgTRecChunk *tc = ((StgTRecChunk *) p);
3076 TRecEntry *e = &(tc -> entries[0]);
3078 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3079 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3080 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3081 e->expected_value = evacuate((StgClosure*)e->expected_value);
3082 e->new_value = evacuate((StgClosure*)e->new_value);
3084 evac_gen = saved_evac_gen;
3085 failed_to_evac = rtsTrue; // mutable
3086 p += sizeofW(StgTRecChunk);
3091 barf("scavenge: unimplemented/strange closure type %d @ %p",
3096 * We need to record the current object on the mutable list if
3097 * (a) It is actually mutable, or
3098 * (b) It contains pointers to a younger generation.
3099 * Case (b) arises if we didn't manage to promote everything that
3100 * the current object points to into the current generation.
3102 if (failed_to_evac) {
3103 failed_to_evac = rtsFalse;
3104 if (stp->gen_no > 0) {
3105 recordMutableGen((StgClosure *)q, stp->gen);
3114 /* -----------------------------------------------------------------------------
3115 Scavenge everything on the mark stack.
3117 This is slightly different from scavenge():
3118 - we don't walk linearly through the objects, so the scavenger
3119 doesn't need to advance the pointer on to the next object.
3120 -------------------------------------------------------------------------- */
3123 scavenge_mark_stack(void)
3129 evac_gen = oldest_gen->no;
3130 saved_evac_gen = evac_gen;
3133 while (!mark_stack_empty()) {
3134 p = pop_mark_stack();
3136 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3137 info = get_itbl((StgClosure *)p);
3140 switch (info->type) {
3144 StgMVar *mvar = ((StgMVar *)p);
3146 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3147 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3148 mvar->value = evacuate((StgClosure *)mvar->value);
3149 evac_gen = saved_evac_gen;
3150 failed_to_evac = rtsTrue; // mutable.
3155 scavenge_fun_srt(info);
3156 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3157 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3161 scavenge_thunk_srt(info);
3162 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3163 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3167 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3168 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3173 scavenge_fun_srt(info);
3174 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3179 scavenge_thunk_srt(info);
3180 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3185 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3190 scavenge_fun_srt(info);
3195 scavenge_thunk_srt(info);
3203 scavenge_fun_srt(info);
3210 scavenge_thunk_srt(info);
3211 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3212 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3213 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3225 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3226 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3227 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3233 StgBCO *bco = (StgBCO *)p;
3234 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3235 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3236 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3237 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3242 // don't need to do anything here: the only possible case
3243 // is that we're in a 1-space compacting collector, with
3244 // no "old" generation.
3248 case IND_OLDGEN_PERM:
3249 ((StgInd *)p)->indirectee =
3250 evacuate(((StgInd *)p)->indirectee);
3255 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3256 evac_gen = saved_evac_gen;
3257 failed_to_evac = rtsTrue;
3261 case SE_CAF_BLACKHOLE:
3267 case THUNK_SELECTOR:
3269 StgSelector *s = (StgSelector *)p;
3270 s->selectee = evacuate(s->selectee);
3274 // A chunk of stack saved in a heap object
3277 StgAP_STACK *ap = (StgAP_STACK *)p;
3279 ap->fun = evacuate(ap->fun);
3280 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3285 scavenge_PAP((StgPAP *)p);
3289 scavenge_AP((StgAP *)p);
3293 // follow everything
3297 evac_gen = 0; // repeatedly mutable
3298 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3299 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3300 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3302 evac_gen = saved_evac_gen;
3303 failed_to_evac = rtsTrue; // mutable anyhow.
3307 case MUT_ARR_PTRS_FROZEN:
3308 case MUT_ARR_PTRS_FROZEN0:
3309 // follow everything
3313 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3314 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3315 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3318 // If we're going to put this object on the mutable list, then
3319 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3320 if (failed_to_evac) {
3321 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3323 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3330 StgTSO *tso = (StgTSO *)p;
3333 evac_gen = saved_evac_gen;
3334 failed_to_evac = rtsTrue;
3342 nat size, ptrs, nonptrs, vhs;
3344 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3346 StgRBH *rbh = (StgRBH *)p;
3347 bh->blocking_queue =
3348 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3349 failed_to_evac = rtsTrue; // mutable anyhow.
3351 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3352 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3358 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3359 // follow the pointer to the node which is being demanded
3360 (StgClosure *)bf->node =
3361 evacuate((StgClosure *)bf->node);
3362 // follow the link to the rest of the blocking queue
3363 (StgClosure *)bf->link =
3364 evacuate((StgClosure *)bf->link);
3366 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3367 bf, info_type((StgClosure *)bf),
3368 bf->node, info_type(bf->node)));
3376 break; // nothing to do in this case
3380 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3381 (StgClosure *)fmbq->blocking_queue =
3382 evacuate((StgClosure *)fmbq->blocking_queue);
3384 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3385 p, info_type((StgClosure *)p)));
3390 case TVAR_WAIT_QUEUE:
3392 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3394 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3395 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3396 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3397 evac_gen = saved_evac_gen;
3398 failed_to_evac = rtsTrue; // mutable
3404 StgTVar *tvar = ((StgTVar *) p);
3406 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3407 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3408 evac_gen = saved_evac_gen;
3409 failed_to_evac = rtsTrue; // mutable
3416 StgTRecChunk *tc = ((StgTRecChunk *) p);
3417 TRecEntry *e = &(tc -> entries[0]);
3419 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3420 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3421 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3422 e->expected_value = evacuate((StgClosure*)e->expected_value);
3423 e->new_value = evacuate((StgClosure*)e->new_value);
3425 evac_gen = saved_evac_gen;
3426 failed_to_evac = rtsTrue; // mutable
3432 StgTRecHeader *trec = ((StgTRecHeader *) p);
3434 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3435 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3436 evac_gen = saved_evac_gen;
3437 failed_to_evac = rtsTrue; // mutable
3442 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3446 if (failed_to_evac) {
3447 failed_to_evac = rtsFalse;
3449 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3453 // mark the next bit to indicate "scavenged"
3454 mark(q+1, Bdescr(q));
3456 } // while (!mark_stack_empty())
3458 // start a new linear scan if the mark stack overflowed at some point
3459 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3460 IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
3461 mark_stack_overflowed = rtsFalse;
3462 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3463 oldgen_scan = oldgen_scan_bd->start;
3466 if (oldgen_scan_bd) {
3467 // push a new thing on the mark stack
3469 // find a closure that is marked but not scavenged, and start
3471 while (oldgen_scan < oldgen_scan_bd->free
3472 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3476 if (oldgen_scan < oldgen_scan_bd->free) {
3478 // already scavenged?
3479 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3480 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3483 push_mark_stack(oldgen_scan);
3484 // ToDo: bump the linear scan by the actual size of the object
3485 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3489 oldgen_scan_bd = oldgen_scan_bd->link;
3490 if (oldgen_scan_bd != NULL) {
3491 oldgen_scan = oldgen_scan_bd->start;
3497 /* -----------------------------------------------------------------------------
3498 Scavenge one object.
3500 This is used for objects that are temporarily marked as mutable
3501 because they contain old-to-new generation pointers. Only certain
3502 objects can have this property.
3503 -------------------------------------------------------------------------- */
3506 scavenge_one(StgPtr p)
3508 const StgInfoTable *info;
3509 nat saved_evac_gen = evac_gen;
3512 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3513 info = get_itbl((StgClosure *)p);
3515 switch (info->type) {
3519 StgMVar *mvar = ((StgMVar *)p);
3521 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3522 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3523 mvar->value = evacuate((StgClosure *)mvar->value);
3524 evac_gen = saved_evac_gen;
3525 failed_to_evac = rtsTrue; // mutable.
3538 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3539 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3540 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3546 case FUN_1_0: // hardly worth specialising these guys
3562 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3563 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3564 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3571 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3572 evac_gen = saved_evac_gen;
3573 failed_to_evac = rtsTrue; // mutable anyhow
3577 case SE_CAF_BLACKHOLE:
3582 case THUNK_SELECTOR:
3584 StgSelector *s = (StgSelector *)p;
3585 s->selectee = evacuate(s->selectee);
3591 StgAP_STACK *ap = (StgAP_STACK *)p;
3593 ap->fun = evacuate(ap->fun);
3594 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3595 p = (StgPtr)ap->payload + ap->size;
3600 p = scavenge_PAP((StgPAP *)p);
3604 p = scavenge_AP((StgAP *)p);
3608 // nothing to follow
3613 // follow everything
3616 evac_gen = 0; // repeatedly mutable
3617 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3618 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3619 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3621 evac_gen = saved_evac_gen;
3622 failed_to_evac = rtsTrue;
3626 case MUT_ARR_PTRS_FROZEN:
3627 case MUT_ARR_PTRS_FROZEN0:
3629 // follow everything
3632 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3633 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3634 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3637 // If we're going to put this object on the mutable list, then
3638 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3639 if (failed_to_evac) {
3640 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3642 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3649 StgTSO *tso = (StgTSO *)p;
3651 evac_gen = 0; // repeatedly mutable
3653 evac_gen = saved_evac_gen;
3654 failed_to_evac = rtsTrue;
3662 nat size, ptrs, nonptrs, vhs;
3664 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3666 StgRBH *rbh = (StgRBH *)p;
3667 (StgClosure *)rbh->blocking_queue =
3668 evacuate((StgClosure *)rbh->blocking_queue);
3669 failed_to_evac = rtsTrue; // mutable anyhow.
3671 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3672 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3673 // ToDo: use size of reverted closure here!
3679 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3680 // follow the pointer to the node which is being demanded
3681 (StgClosure *)bf->node =
3682 evacuate((StgClosure *)bf->node);
3683 // follow the link to the rest of the blocking queue
3684 (StgClosure *)bf->link =
3685 evacuate((StgClosure *)bf->link);
3687 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3688 bf, info_type((StgClosure *)bf),
3689 bf->node, info_type(bf->node)));
3697 break; // nothing to do in this case
3701 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3702 (StgClosure *)fmbq->blocking_queue =
3703 evacuate((StgClosure *)fmbq->blocking_queue);
3705 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3706 p, info_type((StgClosure *)p)));
3711 case TVAR_WAIT_QUEUE:
3713 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3715 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3716 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3717 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3718 evac_gen = saved_evac_gen;
3719 failed_to_evac = rtsTrue; // mutable
3725 StgTVar *tvar = ((StgTVar *) p);
3727 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3728 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3729 evac_gen = saved_evac_gen;
3730 failed_to_evac = rtsTrue; // mutable
3736 StgTRecHeader *trec = ((StgTRecHeader *) p);
3738 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3739 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3740 evac_gen = saved_evac_gen;
3741 failed_to_evac = rtsTrue; // mutable
3748 StgTRecChunk *tc = ((StgTRecChunk *) p);
3749 TRecEntry *e = &(tc -> entries[0]);
3751 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3752 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3753 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3754 e->expected_value = evacuate((StgClosure*)e->expected_value);
3755 e->new_value = evacuate((StgClosure*)e->new_value);
3757 evac_gen = saved_evac_gen;
3758 failed_to_evac = rtsTrue; // mutable
3763 case IND_OLDGEN_PERM:
3766 /* Careful here: a THUNK can be on the mutable list because
3767 * it contains pointers to young gen objects. If such a thunk
3768 * is updated, the IND_OLDGEN will be added to the mutable
3769 * list again, and we'll scavenge it twice. evacuate()
3770 * doesn't check whether the object has already been
3771 * evacuated, so we perform that check here.
3773 StgClosure *q = ((StgInd *)p)->indirectee;
3774 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3777 ((StgInd *)p)->indirectee = evacuate(q);
3780 #if 0 && defined(DEBUG)
3781 if (RtsFlags.DebugFlags.gc)
3782 /* Debugging code to print out the size of the thing we just
3786 StgPtr start = gen->steps[0].scan;
3787 bdescr *start_bd = gen->steps[0].scan_bd;
3789 scavenge(&gen->steps[0]);
3790 if (start_bd != gen->steps[0].scan_bd) {
3791 size += (P_)BLOCK_ROUND_UP(start) - start;
3792 start_bd = start_bd->link;
3793 while (start_bd != gen->steps[0].scan_bd) {
3794 size += BLOCK_SIZE_W;
3795 start_bd = start_bd->link;
3797 size += gen->steps[0].scan -
3798 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3800 size = gen->steps[0].scan - start;
3802 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3808 barf("scavenge_one: strange object %d", (int)(info->type));
3811 no_luck = failed_to_evac;
3812 failed_to_evac = rtsFalse;
3816 /* -----------------------------------------------------------------------------
3817 Scavenging mutable lists.
3819 We treat the mutable list of each generation > N (i.e. all the
3820 generations older than the one being collected) as roots. We also
3821 remove non-mutable objects from the mutable list at this point.
3822 -------------------------------------------------------------------------- */
3825 scavenge_mutable_list(generation *gen)
3830 bd = gen->saved_mut_list;
3833 for (; bd != NULL; bd = bd->link) {
3834 for (q = bd->start; q < bd->free; q++) {
3836 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3837 if (scavenge_one(p)) {
3838 /* didn't manage to promote everything, so put the
3839 * object back on the list.
3841 recordMutableGen((StgClosure *)p,gen);
3846 // free the old mut_list
3847 freeChain(gen->saved_mut_list);
3848 gen->saved_mut_list = NULL;
3853 scavenge_static(void)
3855 StgClosure* p = static_objects;
3856 const StgInfoTable *info;
3858 /* Always evacuate straight to the oldest generation for static
3860 evac_gen = oldest_gen->no;
3862 /* keep going until we've scavenged all the objects on the linked
3864 while (p != END_OF_STATIC_LIST) {
3866 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3869 if (info->type==RBH)
3870 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3872 // make sure the info pointer is into text space
3874 /* Take this object *off* the static_objects list,
3875 * and put it on the scavenged_static_objects list.
3877 static_objects = *STATIC_LINK(info,p);
3878 *STATIC_LINK(info,p) = scavenged_static_objects;
3879 scavenged_static_objects = p;
3881 switch (info -> type) {
3885 StgInd *ind = (StgInd *)p;
3886 ind->indirectee = evacuate(ind->indirectee);
3888 /* might fail to evacuate it, in which case we have to pop it
3889 * back on the mutable list of the oldest generation. We
3890 * leave it *on* the scavenged_static_objects list, though,
3891 * in case we visit this object again.
3893 if (failed_to_evac) {
3894 failed_to_evac = rtsFalse;
3895 recordMutableGen((StgClosure *)p,oldest_gen);
3901 scavenge_thunk_srt(info);
3905 scavenge_fun_srt(info);
3912 next = (P_)p->payload + info->layout.payload.ptrs;
3913 // evacuate the pointers
3914 for (q = (P_)p->payload; q < next; q++) {
3915 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3921 barf("scavenge_static: strange closure %d", (int)(info->type));
3924 ASSERT(failed_to_evac == rtsFalse);
3926 /* get the next static object from the list. Remember, there might
3927 * be more stuff on this list now that we've done some evacuating!
3928 * (static_objects is a global)
3934 /* -----------------------------------------------------------------------------
3935 scavenge a chunk of memory described by a bitmap
3936 -------------------------------------------------------------------------- */
3939 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
3945 bitmap = large_bitmap->bitmap[b];
3946 for (i = 0; i < size; ) {
3947 if ((bitmap & 1) == 0) {
3948 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3952 if (i % BITS_IN(W_) == 0) {
3954 bitmap = large_bitmap->bitmap[b];
3956 bitmap = bitmap >> 1;
3961 STATIC_INLINE StgPtr
3962 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
3965 if ((bitmap & 1) == 0) {
3966 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3969 bitmap = bitmap >> 1;
3975 /* -----------------------------------------------------------------------------
3976 scavenge_stack walks over a section of stack and evacuates all the
3977 objects pointed to by it. We can use the same code for walking
3978 AP_STACK_UPDs, since these are just sections of copied stack.
3979 -------------------------------------------------------------------------- */
3983 scavenge_stack(StgPtr p, StgPtr stack_end)
3985 const StgRetInfoTable* info;
3989 //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
3992 * Each time around this loop, we are looking at a chunk of stack
3993 * that starts with an activation record.
3996 while (p < stack_end) {
3997 info = get_ret_itbl((StgClosure *)p);
3999 switch (info->i.type) {
4002 // In SMP, we can get update frames that point to indirections
4003 // when two threads evaluate the same thunk. We do attempt to
4004 // discover this situation in threadPaused(), but it's
4005 // possible that the following sequence occurs:
4014 // Now T is an indirection, and the update frame is already
4015 // marked on A's stack, so we won't traverse it again in
4016 // threadPaused(). We could traverse the whole stack again
4017 // before GC, but that seems like overkill.
4019 // Scavenging this update frame as normal would be disastrous;
4020 // the updatee would end up pointing to the value. So we turn
4021 // the indirection into an IND_PERM, so that evacuate will
4022 // copy the indirection into the old generation instead of
4024 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
4025 ((StgUpdateFrame *)p)->updatee->header.info =
4026 (StgInfoTable *)&stg_IND_PERM_info;
4028 ((StgUpdateFrame *)p)->updatee
4029 = evacuate(((StgUpdateFrame *)p)->updatee);
4030 p += sizeofW(StgUpdateFrame);
4033 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
4034 case CATCH_STM_FRAME:
4035 case CATCH_RETRY_FRAME:
4036 case ATOMICALLY_FRAME:
4041 bitmap = BITMAP_BITS(info->i.layout.bitmap);
4042 size = BITMAP_SIZE(info->i.layout.bitmap);
4043 // NOTE: the payload starts immediately after the info-ptr, we
4044 // don't have an StgHeader in the same sense as a heap closure.
4046 p = scavenge_small_bitmap(p, size, bitmap);
4050 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
4058 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4061 size = BCO_BITMAP_SIZE(bco);
4062 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
4067 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
4073 size = GET_LARGE_BITMAP(&info->i)->size;
4075 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
4077 // and don't forget to follow the SRT
4081 // Dynamic bitmap: the mask is stored on the stack, and
4082 // there are a number of non-pointers followed by a number
4083 // of pointers above the bitmapped area. (see StgMacros.h,
4088 dyn = ((StgRetDyn *)p)->liveness;
4090 // traverse the bitmap first
4091 bitmap = RET_DYN_LIVENESS(dyn);
4092 p = (P_)&((StgRetDyn *)p)->payload[0];
4093 size = RET_DYN_BITMAP_SIZE;
4094 p = scavenge_small_bitmap(p, size, bitmap);
4096 // skip over the non-ptr words
4097 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
4099 // follow the ptr words
4100 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
4101 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4109 StgRetFun *ret_fun = (StgRetFun *)p;
4110 StgFunInfoTable *fun_info;
4112 ret_fun->fun = evacuate(ret_fun->fun);
4113 fun_info = get_fun_itbl(ret_fun->fun);
4114 p = scavenge_arg_block(fun_info, ret_fun->payload);
4119 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
4124 /*-----------------------------------------------------------------------------
4125 scavenge the large object list.
4127 evac_gen set by caller; similar games played with evac_gen as with
4128 scavenge() - see comment at the top of scavenge(). Most large
4129 objects are (repeatedly) mutable, so most of the time evac_gen will
4131 --------------------------------------------------------------------------- */
4134 scavenge_large(step *stp)
4139 bd = stp->new_large_objects;
4141 for (; bd != NULL; bd = stp->new_large_objects) {
4143 /* take this object *off* the large objects list and put it on
4144 * the scavenged large objects list. This is so that we can
4145 * treat new_large_objects as a stack and push new objects on
4146 * the front when evacuating.
4148 stp->new_large_objects = bd->link;
4149 dbl_link_onto(bd, &stp->scavenged_large_objects);
4151 // update the block count in this step.
4152 stp->n_scavenged_large_blocks += bd->blocks;
4155 if (scavenge_one(p)) {
4156 if (stp->gen_no > 0) {
4157 recordMutableGen((StgClosure *)p, stp->gen);
4163 /* -----------------------------------------------------------------------------
4164 Initialising the static object & mutable lists
4165 -------------------------------------------------------------------------- */
4168 zero_static_object_list(StgClosure* first_static)
4172 const StgInfoTable *info;
4174 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
4176 link = *STATIC_LINK(info, p);
4177 *STATIC_LINK(info,p) = NULL;
4181 /* -----------------------------------------------------------------------------
4183 -------------------------------------------------------------------------- */
4190 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4191 c = (StgIndStatic *)c->static_link)
4193 SET_INFO(c, c->saved_info);
4194 c->saved_info = NULL;
4195 // could, but not necessary: c->static_link = NULL;
4197 revertible_caf_list = NULL;
4201 markCAFs( evac_fn evac )
4205 for (c = (StgIndStatic *)caf_list; c != NULL;
4206 c = (StgIndStatic *)c->static_link)
4208 evac(&c->indirectee);
4210 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4211 c = (StgIndStatic *)c->static_link)
4213 evac(&c->indirectee);
4217 /* -----------------------------------------------------------------------------
4218 Sanity code for CAF garbage collection.
4220 With DEBUG turned on, we manage a CAF list in addition to the SRT
4221 mechanism. After GC, we run down the CAF list and blackhole any
4222 CAFs which have been garbage collected. This means we get an error
4223 whenever the program tries to enter a garbage collected CAF.
4225 Any garbage collected CAFs are taken off the CAF list at the same
4227 -------------------------------------------------------------------------- */
4229 #if 0 && defined(DEBUG)
4236 const StgInfoTable *info;
4247 ASSERT(info->type == IND_STATIC);
4249 if (STATIC_LINK(info,p) == NULL) {
4250 IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
4252 SET_INFO(p,&stg_BLACKHOLE_info);
4253 p = STATIC_LINK2(info,p);
4257 pp = &STATIC_LINK2(info,p);
4264 // debugBelch("%d CAFs live", i);
4269 /* -----------------------------------------------------------------------------
4272 * Code largely pinched from old RTS, then hacked to bits. We also do
4273 * lazy black holing here.
4275 * -------------------------------------------------------------------------- */
4277 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4280 stackSqueeze(StgTSO *tso, StgPtr bottom)
4283 rtsBool prev_was_update_frame;
4284 StgClosure *updatee = NULL;
4285 StgRetInfoTable *info;
4286 StgWord current_gap_size;
4287 struct stack_gap *gap;
4290 // Traverse the stack upwards, replacing adjacent update frames
4291 // with a single update frame and a "stack gap". A stack gap
4292 // contains two values: the size of the gap, and the distance
4293 // to the next gap (or the stack top).
4297 ASSERT(frame < bottom);
4299 prev_was_update_frame = rtsFalse;
4300 current_gap_size = 0;
4301 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4303 while (frame < bottom) {
4305 info = get_ret_itbl((StgClosure *)frame);
4306 switch (info->i.type) {
4310 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4312 if (prev_was_update_frame) {
4314 TICK_UPD_SQUEEZED();
4315 /* wasn't there something about update squeezing and ticky to be
4316 * sorted out? oh yes: we aren't counting each enter properly
4317 * in this case. See the log somewhere. KSW 1999-04-21
4319 * Check two things: that the two update frames don't point to
4320 * the same object, and that the updatee_bypass isn't already an
4321 * indirection. Both of these cases only happen when we're in a
4322 * block hole-style loop (and there are multiple update frames
4323 * on the stack pointing to the same closure), but they can both
4324 * screw us up if we don't check.
4326 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4327 UPD_IND_NOLOCK(upd->updatee, updatee);
4330 // now mark this update frame as a stack gap. The gap
4331 // marker resides in the bottom-most update frame of
4332 // the series of adjacent frames, and covers all the
4333 // frames in this series.
4334 current_gap_size += sizeofW(StgUpdateFrame);
4335 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4336 ((struct stack_gap *)frame)->next_gap = gap;
4338 frame += sizeofW(StgUpdateFrame);
4342 // single update frame, or the topmost update frame in a series
4344 prev_was_update_frame = rtsTrue;
4345 updatee = upd->updatee;
4346 frame += sizeofW(StgUpdateFrame);
4352 prev_was_update_frame = rtsFalse;
4354 // we're not in a gap... check whether this is the end of a gap
4355 // (an update frame can't be the end of a gap).
4356 if (current_gap_size != 0) {
4357 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4359 current_gap_size = 0;
4361 frame += stack_frame_sizeW((StgClosure *)frame);
4366 if (current_gap_size != 0) {
4367 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4370 // Now we have a stack with gaps in it, and we have to walk down
4371 // shoving the stack up to fill in the gaps. A diagram might
4375 // | ********* | <- sp
4379 // | stack_gap | <- gap | chunk_size
4381 // | ......... | <- gap_end v
4387 // 'sp' points the the current top-of-stack
4388 // 'gap' points to the stack_gap structure inside the gap
4389 // ***** indicates real stack data
4390 // ..... indicates gap
4391 // <empty> indicates unused
4395 void *gap_start, *next_gap_start, *gap_end;
4398 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4399 sp = next_gap_start;
4401 while ((StgPtr)gap > tso->sp) {
4403 // we're working in *bytes* now...
4404 gap_start = next_gap_start;
4405 gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
4407 gap = gap->next_gap;
4408 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4410 chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
4412 memmove(sp, next_gap_start, chunk_size);
4415 tso->sp = (StgPtr)sp;
4419 /* -----------------------------------------------------------------------------
4422 * We have to prepare for GC - this means doing lazy black holing
4423 * here. We also take the opportunity to do stack squeezing if it's
4425 * -------------------------------------------------------------------------- */
4427 threadPaused(Capability *cap, StgTSO *tso)
4430 StgRetInfoTable *info;
4433 nat words_to_squeeze = 0;
4435 nat weight_pending = 0;
4436 rtsBool prev_was_update_frame;
4438 stack_end = &tso->stack[tso->stack_size];
4440 frame = (StgClosure *)tso->sp;
4443 // If we've already marked this frame, then stop here.
4444 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
4448 info = get_ret_itbl(frame);
4450 switch (info->i.type) {
4454 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
4456 bh = ((StgUpdateFrame *)frame)->updatee;
4458 if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
4459 IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
4461 // If this closure is already an indirection, then
4462 // suspend the computation up to this point:
4463 suspendComputation(cap,tso,(StgPtr)frame);
4465 // Now drop the update frame, and arrange to return
4466 // the value to the frame underneath:
4467 tso->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
4468 tso->sp[1] = (StgWord)bh;
4469 tso->sp[0] = (W_)&stg_enter_info;
4471 // And continue with threadPaused; there might be
4472 // yet more computation to suspend.
4473 threadPaused(cap,tso);
4477 if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
4478 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4479 debugBelch("Unexpected lazy BHing required at 0x%04lx\n",(long)bh);
4481 // zero out the slop so that the sanity checker can tell
4482 // where the next closure is.
4483 DEBUG_FILL_SLOP(bh);
4486 // We pretend that bh is now dead.
4487 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4489 SET_INFO(bh,&stg_BLACKHOLE_info);
4491 // We pretend that bh has just been created.
4492 LDV_RECORD_CREATE(bh);
4495 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4496 if (prev_was_update_frame) {
4497 words_to_squeeze += sizeofW(StgUpdateFrame);
4498 weight += weight_pending;
4501 prev_was_update_frame = rtsTrue;
4507 // normal stack frames; do nothing except advance the pointer
4510 nat frame_size = stack_frame_sizeW(frame);
4511 weight_pending += frame_size;
4512 frame = (StgClosure *)((StgPtr)frame + frame_size);
4513 prev_was_update_frame = rtsFalse;
4520 debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
4521 words_to_squeeze, weight,
4522 weight < words_to_squeeze ? "YES" : "NO"));
4524 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
4525 // the number of words we have to shift down is less than the
4526 // number of stack words we squeeze away by doing so.
4527 if (1 /*RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
4528 weight < words_to_squeeze*/) {
4529 stackSqueeze(tso, (StgPtr)frame);
4533 /* -----------------------------------------------------------------------------
4535 * -------------------------------------------------------------------------- */
4539 printMutableList(generation *gen)
4544 debugBelch("@@ Mutable list %p: ", gen->mut_list);
4546 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
4547 for (p = bd->start; p < bd->free; p++) {
4548 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));