1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
16 #include "LdvProfile.h"
21 #include "BlockAlloc.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
50 // Turn off inlining when debugging - it obfuscates things
53 # define STATIC_INLINE static
56 /* STATIC OBJECT LIST.
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
81 * Extra notes for generational GC:
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
99 static rtsBool major_gc;
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
109 StgWeak *old_weak_ptr_list; // also pending finaliser list
111 /* Which stage of processing various kinds of weak pointer are we at?
112 * (see traverse_weak_ptr_list() below for discussion).
114 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
115 static WeakStage weak_stage;
117 /* List of all threads during GC
119 static StgTSO *old_all_threads;
120 StgTSO *resurrected_threads;
122 /* Flag indicating failure to evacuate an object to the desired
125 static rtsBool failed_to_evac;
127 /* Saved nursery (used for 2-space collector only)
129 static bdescr *saved_nursery;
130 static nat saved_n_blocks;
132 /* Data used for allocation area sizing.
134 static lnat new_blocks; // blocks allocated during this GC
135 static lnat new_scavd_blocks; // ditto, but depth-first blocks
136 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
138 /* Used to avoid long recursion due to selector thunks
140 static lnat thunk_selector_depth = 0;
141 #define MAX_THUNK_SELECTOR_DEPTH 8
151 /* -----------------------------------------------------------------------------
152 Static function declarations
153 -------------------------------------------------------------------------- */
155 static bdescr * gc_alloc_block ( step *stp );
156 static void mark_root ( StgClosure **root );
158 // Use a register argument for evacuate, if available.
160 #define REGPARM1 __attribute__((regparm(1)))
165 REGPARM1 static StgClosure * evacuate (StgClosure *q);
167 static void zero_static_object_list ( StgClosure* first_static );
169 static rtsBool traverse_weak_ptr_list ( void );
170 static void mark_weak_ptr_list ( StgWeak **list );
172 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
175 static void scavenge ( step * );
176 static void scavenge_mark_stack ( void );
177 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
178 static rtsBool scavenge_one ( StgPtr p );
179 static void scavenge_large ( step * );
180 static void scavenge_static ( void );
181 static void scavenge_mutable_list ( generation *g );
183 static void scavenge_large_bitmap ( StgPtr p,
184 StgLargeBitmap *large_bitmap,
187 #if 0 && defined(DEBUG)
188 static void gcCAFs ( void );
191 /* -----------------------------------------------------------------------------
192 inline functions etc. for dealing with the mark bitmap & stack.
193 -------------------------------------------------------------------------- */
195 #define MARK_STACK_BLOCKS 4
197 static bdescr *mark_stack_bdescr;
198 static StgPtr *mark_stack;
199 static StgPtr *mark_sp;
200 static StgPtr *mark_splim;
202 // Flag and pointers used for falling back to a linear scan when the
203 // mark stack overflows.
204 static rtsBool mark_stack_overflowed;
205 static bdescr *oldgen_scan_bd;
206 static StgPtr oldgen_scan;
208 STATIC_INLINE rtsBool
209 mark_stack_empty(void)
211 return mark_sp == mark_stack;
214 STATIC_INLINE rtsBool
215 mark_stack_full(void)
217 return mark_sp >= mark_splim;
221 reset_mark_stack(void)
223 mark_sp = mark_stack;
227 push_mark_stack(StgPtr p)
238 /* -----------------------------------------------------------------------------
239 Allocate a new to-space block in the given step.
240 -------------------------------------------------------------------------- */
243 gc_alloc_block(step *stp)
245 bdescr *bd = allocBlock();
246 bd->gen_no = stp->gen_no;
250 // blocks in to-space in generations up to and including N
251 // get the BF_EVACUATED flag.
252 if (stp->gen_no <= N) {
253 bd->flags = BF_EVACUATED;
258 // Start a new to-space block, chain it on after the previous one.
259 if (stp->hp_bd != NULL) {
260 stp->hp_bd->free = stp->hp;
261 stp->hp_bd->link = bd;
266 stp->hpLim = stp->hp + BLOCK_SIZE_W;
275 gc_alloc_scavd_block(step *stp)
277 bdescr *bd = allocBlock();
278 bd->gen_no = stp->gen_no;
281 // blocks in to-space in generations up to and including N
282 // get the BF_EVACUATED flag.
283 if (stp->gen_no <= N) {
284 bd->flags = BF_EVACUATED;
289 bd->link = stp->blocks;
292 if (stp->scavd_hp != NULL) {
293 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
295 stp->scavd_hp = bd->start;
296 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
304 /* -----------------------------------------------------------------------------
307 Rough outline of the algorithm: for garbage collecting generation N
308 (and all younger generations):
310 - follow all pointers in the root set. the root set includes all
311 mutable objects in all generations (mutable_list).
313 - for each pointer, evacuate the object it points to into either
315 + to-space of the step given by step->to, which is the next
316 highest step in this generation or the first step in the next
317 generation if this is the last step.
319 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
320 When we evacuate an object we attempt to evacuate
321 everything it points to into the same generation - this is
322 achieved by setting evac_gen to the desired generation. If
323 we can't do this, then an entry in the mut list has to
324 be made for the cross-generation pointer.
326 + if the object is already in a generation > N, then leave
329 - repeatedly scavenge to-space from each step in each generation
330 being collected until no more objects can be evacuated.
332 - free from-space in each step, and set from-space = to-space.
334 Locks held: all capabilities are held throughout GarbageCollect().
336 -------------------------------------------------------------------------- */
339 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
343 lnat live, allocated, copied = 0, scavd_copied = 0;
344 lnat oldgen_saved_blocks = 0;
350 CostCentreStack *prev_CCS;
353 #if defined(DEBUG) && defined(GRAN)
354 IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
358 #if defined(RTS_USER_SIGNALS)
363 // tell the STM to discard any cached closures its hoping to re-use
366 // tell the stats department that we've started a GC
370 // check for memory leaks if DEBUG is on
380 // Init stats and print par specific (timing) info
381 PAR_TICKY_PAR_START();
383 // attribute any costs to CCS_GC
389 /* Approximate how much we allocated.
390 * Todo: only when generating stats?
392 allocated = calcAllocated();
394 /* Figure out which generation to collect
396 if (force_major_gc) {
397 N = RtsFlags.GcFlags.generations - 1;
401 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
402 if (generations[g].steps[0].n_blocks +
403 generations[g].steps[0].n_large_blocks
404 >= generations[g].max_blocks) {
408 major_gc = (N == RtsFlags.GcFlags.generations-1);
411 #ifdef RTS_GTK_FRONTPANEL
412 if (RtsFlags.GcFlags.frontpanel) {
413 updateFrontPanelBeforeGC(N);
417 // check stack sanity *before* GC (ToDo: check all threads)
419 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
421 IF_DEBUG(sanity, checkFreeListSanity());
423 /* Initialise the static object lists
425 static_objects = END_OF_STATIC_LIST;
426 scavenged_static_objects = END_OF_STATIC_LIST;
428 /* Save the nursery if we're doing a two-space collection.
429 * g0s0->blocks will be used for to-space, so we need to get the
430 * nursery out of the way.
432 if (RtsFlags.GcFlags.generations == 1) {
433 saved_nursery = g0s0->blocks;
434 saved_n_blocks = g0s0->n_blocks;
439 /* Keep a count of how many new blocks we allocated during this GC
440 * (used for resizing the allocation area, later).
443 new_scavd_blocks = 0;
445 // Initialise to-space in all the generations/steps that we're
448 for (g = 0; g <= N; g++) {
450 // throw away the mutable list. Invariant: the mutable list
451 // always has at least one block; this means we can avoid a check for
452 // NULL in recordMutable().
454 freeChain(generations[g].mut_list);
455 generations[g].mut_list = allocBlock();
456 for (i = 0; i < n_capabilities; i++) {
457 freeChain(capabilities[i].mut_lists[g]);
458 capabilities[i].mut_lists[g] = allocBlock();
462 for (s = 0; s < generations[g].n_steps; s++) {
464 // generation 0, step 0 doesn't need to-space
465 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
469 stp = &generations[g].steps[s];
470 ASSERT(stp->gen_no == g);
472 // start a new to-space for this step.
473 stp->old_blocks = stp->blocks;
474 stp->n_old_blocks = stp->n_blocks;
476 // allocate the first to-space block; extra blocks will be
477 // chained on as necessary.
479 bd = gc_alloc_block(stp);
482 stp->scan = bd->start;
485 // allocate a block for "already scavenged" objects. This goes
486 // on the front of the stp->blocks list, so it won't be
487 // traversed by the scavenging sweep.
488 gc_alloc_scavd_block(stp);
490 // initialise the large object queues.
491 stp->new_large_objects = NULL;
492 stp->scavenged_large_objects = NULL;
493 stp->n_scavenged_large_blocks = 0;
495 // mark the large objects as not evacuated yet
496 for (bd = stp->large_objects; bd; bd = bd->link) {
497 bd->flags &= ~BF_EVACUATED;
500 // for a compacted step, we need to allocate the bitmap
501 if (stp->is_compacted) {
502 nat bitmap_size; // in bytes
503 bdescr *bitmap_bdescr;
506 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
508 if (bitmap_size > 0) {
509 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
511 stp->bitmap = bitmap_bdescr;
512 bitmap = bitmap_bdescr->start;
514 IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
515 bitmap_size, bitmap););
517 // don't forget to fill it with zeros!
518 memset(bitmap, 0, bitmap_size);
520 // For each block in this step, point to its bitmap from the
522 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
523 bd->u.bitmap = bitmap;
524 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
526 // Also at this point we set the BF_COMPACTED flag
527 // for this block. The invariant is that
528 // BF_COMPACTED is always unset, except during GC
529 // when it is set on those blocks which will be
531 bd->flags |= BF_COMPACTED;
538 /* make sure the older generations have at least one block to
539 * allocate into (this makes things easier for copy(), see below).
541 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
542 for (s = 0; s < generations[g].n_steps; s++) {
543 stp = &generations[g].steps[s];
544 if (stp->hp_bd == NULL) {
545 ASSERT(stp->blocks == NULL);
546 bd = gc_alloc_block(stp);
550 if (stp->scavd_hp == NULL) {
551 gc_alloc_scavd_block(stp);
554 /* Set the scan pointer for older generations: remember we
555 * still have to scavenge objects that have been promoted. */
557 stp->scan_bd = stp->hp_bd;
558 stp->new_large_objects = NULL;
559 stp->scavenged_large_objects = NULL;
560 stp->n_scavenged_large_blocks = 0;
563 /* Move the private mutable lists from each capability onto the
564 * main mutable list for the generation.
566 for (i = 0; i < n_capabilities; i++) {
567 for (bd = capabilities[i].mut_lists[g];
568 bd->link != NULL; bd = bd->link) {
571 bd->link = generations[g].mut_list;
572 generations[g].mut_list = capabilities[i].mut_lists[g];
573 capabilities[i].mut_lists[g] = allocBlock();
577 /* Allocate a mark stack if we're doing a major collection.
580 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
581 mark_stack = (StgPtr *)mark_stack_bdescr->start;
582 mark_sp = mark_stack;
583 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
585 mark_stack_bdescr = NULL;
588 /* -----------------------------------------------------------------------
589 * follow all the roots that we know about:
590 * - mutable lists from each generation > N
591 * we want to *scavenge* these roots, not evacuate them: they're not
592 * going to move in this GC.
593 * Also: do them in reverse generation order. This is because we
594 * often want to promote objects that are pointed to by older
595 * generations early, so we don't have to repeatedly copy them.
596 * Doing the generations in reverse order ensures that we don't end
597 * up in the situation where we want to evac an object to gen 3 and
598 * it has already been evaced to gen 2.
602 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
603 generations[g].saved_mut_list = generations[g].mut_list;
604 generations[g].mut_list = allocBlock();
605 // mut_list always has at least one block.
608 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
609 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
610 scavenge_mutable_list(&generations[g]);
612 for (st = generations[g].n_steps-1; st >= 0; st--) {
613 scavenge(&generations[g].steps[st]);
618 /* follow roots from the CAF list (used by GHCi)
623 /* follow all the roots that the application knows about.
626 get_roots(mark_root);
629 /* And don't forget to mark the TSO if we got here direct from
631 /* Not needed in a seq version?
633 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
637 // Mark the entries in the GALA table of the parallel system
638 markLocalGAs(major_gc);
639 // Mark all entries on the list of pending fetches
640 markPendingFetches(major_gc);
643 /* Mark the weak pointer list, and prepare to detect dead weak
646 mark_weak_ptr_list(&weak_ptr_list);
647 old_weak_ptr_list = weak_ptr_list;
648 weak_ptr_list = NULL;
649 weak_stage = WeakPtrs;
651 /* The all_threads list is like the weak_ptr_list.
652 * See traverse_weak_ptr_list() for the details.
654 old_all_threads = all_threads;
655 all_threads = END_TSO_QUEUE;
656 resurrected_threads = END_TSO_QUEUE;
658 /* Mark the stable pointer table.
660 markStablePtrTable(mark_root);
662 /* -------------------------------------------------------------------------
663 * Repeatedly scavenge all the areas we know about until there's no
664 * more scavenging to be done.
671 // scavenge static objects
672 if (major_gc && static_objects != END_OF_STATIC_LIST) {
673 IF_DEBUG(sanity, checkStaticObjects(static_objects));
677 /* When scavenging the older generations: Objects may have been
678 * evacuated from generations <= N into older generations, and we
679 * need to scavenge these objects. We're going to try to ensure that
680 * any evacuations that occur move the objects into at least the
681 * same generation as the object being scavenged, otherwise we
682 * have to create new entries on the mutable list for the older
686 // scavenge each step in generations 0..maxgen
692 // scavenge objects in compacted generation
693 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
694 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
695 scavenge_mark_stack();
699 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
700 for (st = generations[gen].n_steps; --st >= 0; ) {
701 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
704 stp = &generations[gen].steps[st];
706 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
711 if (stp->new_large_objects != NULL) {
720 if (flag) { goto loop; }
722 // must be last... invariant is that everything is fully
723 // scavenged at this point.
724 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
729 /* Update the pointers from the task list - these are
730 * treated as weak pointers because we want to allow a main thread
731 * to get a BlockedOnDeadMVar exception in the same way as any other
732 * thread. Note that the threads should all have been retained by
733 * GC by virtue of being on the all_threads list, we're just
734 * updating pointers here.
739 for (task = all_tasks; task != NULL; task = task->all_link) {
740 if (!task->stopped && task->tso) {
741 ASSERT(task->tso->bound == task);
742 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
744 barf("task %p: main thread %d has been GC'd",
758 // Reconstruct the Global Address tables used in GUM
759 rebuildGAtables(major_gc);
760 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
763 // Now see which stable names are still alive.
766 // Tidy the end of the to-space chains
767 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
768 for (s = 0; s < generations[g].n_steps; s++) {
769 stp = &generations[g].steps[s];
770 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
771 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
772 stp->hp_bd->free = stp->hp;
773 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
779 // We call processHeapClosureForDead() on every closure destroyed during
780 // the current garbage collection, so we invoke LdvCensusForDead().
781 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
782 || RtsFlags.ProfFlags.bioSelector != NULL)
786 // NO MORE EVACUATION AFTER THIS POINT!
787 // Finally: compaction of the oldest generation.
788 if (major_gc && oldest_gen->steps[0].is_compacted) {
789 // save number of blocks for stats
790 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
794 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
796 /* run through all the generations/steps and tidy up
798 copied = new_blocks * BLOCK_SIZE_W;
799 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
800 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
803 generations[g].collections++; // for stats
806 // Count the mutable list as bytes "copied" for the purposes of
807 // stats. Every mutable list is copied during every GC.
809 nat mut_list_size = 0;
810 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
811 mut_list_size += bd->free - bd->start;
813 copied += mut_list_size;
815 IF_DEBUG(gc, debugBelch("mut_list_size: %d (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
818 for (s = 0; s < generations[g].n_steps; s++) {
820 stp = &generations[g].steps[s];
822 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
823 // stats information: how much we copied
825 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
827 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
831 // for generations we collected...
834 /* free old memory and shift to-space into from-space for all
835 * the collected steps (except the allocation area). These
836 * freed blocks will probaby be quickly recycled.
838 if (!(g == 0 && s == 0)) {
839 if (stp->is_compacted) {
840 // for a compacted step, just shift the new to-space
841 // onto the front of the now-compacted existing blocks.
842 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
843 bd->flags &= ~BF_EVACUATED; // now from-space
845 // tack the new blocks on the end of the existing blocks
846 if (stp->old_blocks != NULL) {
847 for (bd = stp->old_blocks; bd != NULL; bd = next) {
848 // NB. this step might not be compacted next
849 // time, so reset the BF_COMPACTED flags.
850 // They are set before GC if we're going to
851 // compact. (search for BF_COMPACTED above).
852 bd->flags &= ~BF_COMPACTED;
855 bd->link = stp->blocks;
858 stp->blocks = stp->old_blocks;
860 // add the new blocks to the block tally
861 stp->n_blocks += stp->n_old_blocks;
862 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
864 freeChain(stp->old_blocks);
865 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
866 bd->flags &= ~BF_EVACUATED; // now from-space
869 stp->old_blocks = NULL;
870 stp->n_old_blocks = 0;
873 /* LARGE OBJECTS. The current live large objects are chained on
874 * scavenged_large, having been moved during garbage
875 * collection from large_objects. Any objects left on
876 * large_objects list are therefore dead, so we free them here.
878 for (bd = stp->large_objects; bd != NULL; bd = next) {
884 // update the count of blocks used by large objects
885 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
886 bd->flags &= ~BF_EVACUATED;
888 stp->large_objects = stp->scavenged_large_objects;
889 stp->n_large_blocks = stp->n_scavenged_large_blocks;
892 // for older generations...
894 /* For older generations, we need to append the
895 * scavenged_large_object list (i.e. large objects that have been
896 * promoted during this GC) to the large_object list for that step.
898 for (bd = stp->scavenged_large_objects; bd; bd = next) {
900 bd->flags &= ~BF_EVACUATED;
901 dbl_link_onto(bd, &stp->large_objects);
904 // add the new blocks we promoted during this GC
905 stp->n_large_blocks += stp->n_scavenged_large_blocks;
910 /* Reset the sizes of the older generations when we do a major
913 * CURRENT STRATEGY: make all generations except zero the same size.
914 * We have to stay within the maximum heap size, and leave a certain
915 * percentage of the maximum heap size available to allocate into.
917 if (major_gc && RtsFlags.GcFlags.generations > 1) {
918 nat live, size, min_alloc;
919 nat max = RtsFlags.GcFlags.maxHeapSize;
920 nat gens = RtsFlags.GcFlags.generations;
922 // live in the oldest generations
923 live = oldest_gen->steps[0].n_blocks +
924 oldest_gen->steps[0].n_large_blocks;
926 // default max size for all generations except zero
927 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
928 RtsFlags.GcFlags.minOldGenSize);
930 // minimum size for generation zero
931 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
932 RtsFlags.GcFlags.minAllocAreaSize);
934 // Auto-enable compaction when the residency reaches a
935 // certain percentage of the maximum heap size (default: 30%).
936 if (RtsFlags.GcFlags.generations > 1 &&
937 (RtsFlags.GcFlags.compact ||
939 oldest_gen->steps[0].n_blocks >
940 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
941 oldest_gen->steps[0].is_compacted = 1;
942 // debugBelch("compaction: on\n", live);
944 oldest_gen->steps[0].is_compacted = 0;
945 // debugBelch("compaction: off\n", live);
948 // if we're going to go over the maximum heap size, reduce the
949 // size of the generations accordingly. The calculation is
950 // different if compaction is turned on, because we don't need
951 // to double the space required to collect the old generation.
954 // this test is necessary to ensure that the calculations
955 // below don't have any negative results - we're working
956 // with unsigned values here.
957 if (max < min_alloc) {
961 if (oldest_gen->steps[0].is_compacted) {
962 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
963 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
966 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
967 size = (max - min_alloc) / ((gens - 1) * 2);
977 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
978 min_alloc, size, max);
981 for (g = 0; g < gens; g++) {
982 generations[g].max_blocks = size;
986 // Guess the amount of live data for stats.
989 /* Free the small objects allocated via allocate(), since this will
990 * all have been copied into G0S1 now.
992 if (small_alloc_list != NULL) {
993 freeChain(small_alloc_list);
995 small_alloc_list = NULL;
999 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
1001 // Start a new pinned_object_block
1002 pinned_object_block = NULL;
1004 /* Free the mark stack.
1006 if (mark_stack_bdescr != NULL) {
1007 freeGroup(mark_stack_bdescr);
1010 /* Free any bitmaps.
1012 for (g = 0; g <= N; g++) {
1013 for (s = 0; s < generations[g].n_steps; s++) {
1014 stp = &generations[g].steps[s];
1015 if (stp->bitmap != NULL) {
1016 freeGroup(stp->bitmap);
1022 /* Two-space collector:
1023 * Free the old to-space, and estimate the amount of live data.
1025 if (RtsFlags.GcFlags.generations == 1) {
1028 if (g0s0->old_blocks != NULL) {
1029 freeChain(g0s0->old_blocks);
1031 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1032 bd->flags = 0; // now from-space
1034 g0s0->old_blocks = g0s0->blocks;
1035 g0s0->n_old_blocks = g0s0->n_blocks;
1036 g0s0->blocks = saved_nursery;
1037 g0s0->n_blocks = saved_n_blocks;
1039 /* For a two-space collector, we need to resize the nursery. */
1041 /* set up a new nursery. Allocate a nursery size based on a
1042 * function of the amount of live data (by default a factor of 2)
1043 * Use the blocks from the old nursery if possible, freeing up any
1046 * If we get near the maximum heap size, then adjust our nursery
1047 * size accordingly. If the nursery is the same size as the live
1048 * data (L), then we need 3L bytes. We can reduce the size of the
1049 * nursery to bring the required memory down near 2L bytes.
1051 * A normal 2-space collector would need 4L bytes to give the same
1052 * performance we get from 3L bytes, reducing to the same
1053 * performance at 2L bytes.
1055 blocks = g0s0->n_old_blocks;
1057 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1058 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1059 RtsFlags.GcFlags.maxHeapSize ) {
1060 long adjusted_blocks; // signed on purpose
1063 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1064 IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
1065 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1066 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1069 blocks = adjusted_blocks;
1072 blocks *= RtsFlags.GcFlags.oldGenFactor;
1073 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1074 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1077 resizeNurseries(blocks);
1080 /* Generational collector:
1081 * If the user has given us a suggested heap size, adjust our
1082 * allocation area to make best use of the memory available.
1085 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1087 nat needed = calcNeeded(); // approx blocks needed at next GC
1089 /* Guess how much will be live in generation 0 step 0 next time.
1090 * A good approximation is obtained by finding the
1091 * percentage of g0s0 that was live at the last minor GC.
1094 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1097 /* Estimate a size for the allocation area based on the
1098 * information available. We might end up going slightly under
1099 * or over the suggested heap size, but we should be pretty
1102 * Formula: suggested - needed
1103 * ----------------------------
1104 * 1 + g0s0_pcnt_kept/100
1106 * where 'needed' is the amount of memory needed at the next
1107 * collection for collecting all steps except g0s0.
1110 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1111 (100 + (long)g0s0_pcnt_kept);
1113 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1114 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1117 resizeNurseries((nat)blocks);
1120 // we might have added extra large blocks to the nursery, so
1121 // resize back to minAllocAreaSize again.
1122 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1126 // mark the garbage collected CAFs as dead
1127 #if 0 && defined(DEBUG) // doesn't work at the moment
1128 if (major_gc) { gcCAFs(); }
1132 // resetStaticObjectForRetainerProfiling() must be called before
1134 resetStaticObjectForRetainerProfiling();
1137 // zero the scavenged static object list
1139 zero_static_object_list(scavenged_static_objects);
1142 // Reset the nursery
1145 // start any pending finalizers
1147 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1150 // send exceptions to any threads which were about to die
1151 resurrectThreads(resurrected_threads);
1153 // Update the stable pointer hash table.
1154 updateStablePtrTable(major_gc);
1156 // check sanity after GC
1157 IF_DEBUG(sanity, checkSanity());
1159 // extra GC trace info
1160 IF_DEBUG(gc, statDescribeGens());
1163 // symbol-table based profiling
1164 /* heapCensus(to_blocks); */ /* ToDo */
1167 // restore enclosing cost centre
1173 // check for memory leaks if DEBUG is on
1177 #ifdef RTS_GTK_FRONTPANEL
1178 if (RtsFlags.GcFlags.frontpanel) {
1179 updateFrontPanelAfterGC( N, live );
1183 // ok, GC over: tell the stats department what happened.
1184 stat_endGC(allocated, live, copied, scavd_copied, N);
1186 #if defined(RTS_USER_SIGNALS)
1187 // unblock signals again
1188 unblockUserSignals();
1197 /* -----------------------------------------------------------------------------
1200 traverse_weak_ptr_list is called possibly many times during garbage
1201 collection. It returns a flag indicating whether it did any work
1202 (i.e. called evacuate on any live pointers).
1204 Invariant: traverse_weak_ptr_list is called when the heap is in an
1205 idempotent state. That means that there are no pending
1206 evacuate/scavenge operations. This invariant helps the weak
1207 pointer code decide which weak pointers are dead - if there are no
1208 new live weak pointers, then all the currently unreachable ones are
1211 For generational GC: we just don't try to finalize weak pointers in
1212 older generations than the one we're collecting. This could
1213 probably be optimised by keeping per-generation lists of weak
1214 pointers, but for a few weak pointers this scheme will work.
1216 There are three distinct stages to processing weak pointers:
1218 - weak_stage == WeakPtrs
1220 We process all the weak pointers whos keys are alive (evacuate
1221 their values and finalizers), and repeat until we can find no new
1222 live keys. If no live keys are found in this pass, then we
1223 evacuate the finalizers of all the dead weak pointers in order to
1226 - weak_stage == WeakThreads
1228 Now, we discover which *threads* are still alive. Pointers to
1229 threads from the all_threads and main thread lists are the
1230 weakest of all: a pointers from the finalizer of a dead weak
1231 pointer can keep a thread alive. Any threads found to be unreachable
1232 are evacuated and placed on the resurrected_threads list so we
1233 can send them a signal later.
1235 - weak_stage == WeakDone
1237 No more evacuation is done.
1239 -------------------------------------------------------------------------- */
1242 traverse_weak_ptr_list(void)
1244 StgWeak *w, **last_w, *next_w;
1246 rtsBool flag = rtsFalse;
1248 switch (weak_stage) {
1254 /* doesn't matter where we evacuate values/finalizers to, since
1255 * these pointers are treated as roots (iff the keys are alive).
1259 last_w = &old_weak_ptr_list;
1260 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1262 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1263 * called on a live weak pointer object. Just remove it.
1265 if (w->header.info == &stg_DEAD_WEAK_info) {
1266 next_w = ((StgDeadWeak *)w)->link;
1271 switch (get_itbl(w)->type) {
1274 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1279 /* Now, check whether the key is reachable.
1281 new = isAlive(w->key);
1284 // evacuate the value and finalizer
1285 w->value = evacuate(w->value);
1286 w->finalizer = evacuate(w->finalizer);
1287 // remove this weak ptr from the old_weak_ptr list
1289 // and put it on the new weak ptr list
1291 w->link = weak_ptr_list;
1294 IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
1299 last_w = &(w->link);
1305 barf("traverse_weak_ptr_list: not WEAK");
1309 /* If we didn't make any changes, then we can go round and kill all
1310 * the dead weak pointers. The old_weak_ptr list is used as a list
1311 * of pending finalizers later on.
1313 if (flag == rtsFalse) {
1314 for (w = old_weak_ptr_list; w; w = w->link) {
1315 w->finalizer = evacuate(w->finalizer);
1318 // Next, move to the WeakThreads stage after fully
1319 // scavenging the finalizers we've just evacuated.
1320 weak_stage = WeakThreads;
1326 /* Now deal with the all_threads list, which behaves somewhat like
1327 * the weak ptr list. If we discover any threads that are about to
1328 * become garbage, we wake them up and administer an exception.
1331 StgTSO *t, *tmp, *next, **prev;
1333 prev = &old_all_threads;
1334 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1336 tmp = (StgTSO *)isAlive((StgClosure *)t);
1342 ASSERT(get_itbl(t)->type == TSO);
1343 switch (t->what_next) {
1344 case ThreadRelocated:
1349 case ThreadComplete:
1350 // finshed or died. The thread might still be alive, but we
1351 // don't keep it on the all_threads list. Don't forget to
1352 // stub out its global_link field.
1353 next = t->global_link;
1354 t->global_link = END_TSO_QUEUE;
1361 // Threads blocked on black holes: if the black hole
1362 // is alive, then the thread is alive too.
1363 if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
1364 if (isAlive(t->block_info.closure)) {
1365 t = (StgTSO *)evacuate((StgClosure *)t);
1372 // not alive (yet): leave this thread on the
1373 // old_all_threads list.
1374 prev = &(t->global_link);
1375 next = t->global_link;
1378 // alive: move this thread onto the all_threads list.
1379 next = t->global_link;
1380 t->global_link = all_threads;
1387 /* If we evacuated any threads, we need to go back to the scavenger.
1389 if (flag) return rtsTrue;
1391 /* And resurrect any threads which were about to become garbage.
1394 StgTSO *t, *tmp, *next;
1395 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1396 next = t->global_link;
1397 tmp = (StgTSO *)evacuate((StgClosure *)t);
1398 tmp->global_link = resurrected_threads;
1399 resurrected_threads = tmp;
1403 /* Finally, we can update the blackhole_queue. This queue
1404 * simply strings together TSOs blocked on black holes, it is
1405 * not intended to keep anything alive. Hence, we do not follow
1406 * pointers on the blackhole_queue until now, when we have
1407 * determined which TSOs are otherwise reachable. We know at
1408 * this point that all TSOs have been evacuated, however.
1412 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1413 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1414 ASSERT(*pt != NULL);
1418 weak_stage = WeakDone; // *now* we're done,
1419 return rtsTrue; // but one more round of scavenging, please
1422 barf("traverse_weak_ptr_list");
1428 /* -----------------------------------------------------------------------------
1429 After GC, the live weak pointer list may have forwarding pointers
1430 on it, because a weak pointer object was evacuated after being
1431 moved to the live weak pointer list. We remove those forwarding
1434 Also, we don't consider weak pointer objects to be reachable, but
1435 we must nevertheless consider them to be "live" and retain them.
1436 Therefore any weak pointer objects which haven't as yet been
1437 evacuated need to be evacuated now.
1438 -------------------------------------------------------------------------- */
1442 mark_weak_ptr_list ( StgWeak **list )
1444 StgWeak *w, **last_w;
1447 for (w = *list; w; w = w->link) {
1448 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1449 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1450 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1451 w = (StgWeak *)evacuate((StgClosure *)w);
1453 last_w = &(w->link);
1457 /* -----------------------------------------------------------------------------
1458 isAlive determines whether the given closure is still alive (after
1459 a garbage collection) or not. It returns the new address of the
1460 closure if it is alive, or NULL otherwise.
1462 NOTE: Use it before compaction only!
1463 -------------------------------------------------------------------------- */
1467 isAlive(StgClosure *p)
1469 const StgInfoTable *info;
1474 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1477 // ignore static closures
1479 // ToDo: for static closures, check the static link field.
1480 // Problem here is that we sometimes don't set the link field, eg.
1481 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1483 if (!HEAP_ALLOCED(p)) {
1487 // ignore closures in generations that we're not collecting.
1489 if (bd->gen_no > N) {
1493 // if it's a pointer into to-space, then we're done
1494 if (bd->flags & BF_EVACUATED) {
1498 // large objects use the evacuated flag
1499 if (bd->flags & BF_LARGE) {
1503 // check the mark bit for compacted steps
1504 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1508 switch (info->type) {
1513 case IND_OLDGEN: // rely on compatible layout with StgInd
1514 case IND_OLDGEN_PERM:
1515 // follow indirections
1516 p = ((StgInd *)p)->indirectee;
1521 return ((StgEvacuated *)p)->evacuee;
1524 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1525 p = (StgClosure *)((StgTSO *)p)->link;
1538 mark_root(StgClosure **root)
1540 *root = evacuate(*root);
1544 upd_evacuee(StgClosure *p, StgClosure *dest)
1546 // not true: (ToDo: perhaps it should be)
1547 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1548 SET_INFO(p, &stg_EVACUATED_info);
1549 ((StgEvacuated *)p)->evacuee = dest;
1553 STATIC_INLINE StgClosure *
1554 copy(StgClosure *src, nat size, step *stp)
1560 nat size_org = size;
1563 TICK_GC_WORDS_COPIED(size);
1564 /* Find out where we're going, using the handy "to" pointer in
1565 * the step of the source object. If it turns out we need to
1566 * evacuate to an older generation, adjust it here (see comment
1569 if (stp->gen_no < evac_gen) {
1570 #ifdef NO_EAGER_PROMOTION
1571 failed_to_evac = rtsTrue;
1573 stp = &generations[evac_gen].steps[0];
1577 /* chain a new block onto the to-space for the destination step if
1580 if (stp->hp + size >= stp->hpLim) {
1581 gc_alloc_block(stp);
1586 stp->hp = to + size;
1587 for (i = 0; i < size; i++) { // unroll for small i
1590 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1593 // We store the size of the just evacuated object in the LDV word so that
1594 // the profiler can guess the position of the next object later.
1595 SET_EVACUAEE_FOR_LDV(from, size_org);
1597 return (StgClosure *)to;
1600 // Same as copy() above, except the object will be allocated in memory
1601 // that will not be scavenged. Used for object that have no pointer
1603 STATIC_INLINE StgClosure *
1604 copy_noscav(StgClosure *src, nat size, step *stp)
1610 nat size_org = size;
1613 TICK_GC_WORDS_COPIED(size);
1614 /* Find out where we're going, using the handy "to" pointer in
1615 * the step of the source object. If it turns out we need to
1616 * evacuate to an older generation, adjust it here (see comment
1619 if (stp->gen_no < evac_gen) {
1620 #ifdef NO_EAGER_PROMOTION
1621 failed_to_evac = rtsTrue;
1623 stp = &generations[evac_gen].steps[0];
1627 /* chain a new block onto the to-space for the destination step if
1630 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1631 gc_alloc_scavd_block(stp);
1636 stp->scavd_hp = to + size;
1637 for (i = 0; i < size; i++) { // unroll for small i
1640 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1643 // We store the size of the just evacuated object in the LDV word so that
1644 // the profiler can guess the position of the next object later.
1645 SET_EVACUAEE_FOR_LDV(from, size_org);
1647 return (StgClosure *)to;
1650 /* Special version of copy() for when we only want to copy the info
1651 * pointer of an object, but reserve some padding after it. This is
1652 * used to optimise evacuation of BLACKHOLEs.
1657 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1662 nat size_to_copy_org = size_to_copy;
1665 TICK_GC_WORDS_COPIED(size_to_copy);
1666 if (stp->gen_no < evac_gen) {
1667 #ifdef NO_EAGER_PROMOTION
1668 failed_to_evac = rtsTrue;
1670 stp = &generations[evac_gen].steps[0];
1674 if (stp->hp + size_to_reserve >= stp->hpLim) {
1675 gc_alloc_block(stp);
1678 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1683 stp->hp += size_to_reserve;
1684 upd_evacuee(src,(StgClosure *)dest);
1686 // We store the size of the just evacuated object in the LDV word so that
1687 // the profiler can guess the position of the next object later.
1688 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1690 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1692 if (size_to_reserve - size_to_copy_org > 0)
1693 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1695 return (StgClosure *)dest;
1699 /* -----------------------------------------------------------------------------
1700 Evacuate a large object
1702 This just consists of removing the object from the (doubly-linked)
1703 step->large_objects list, and linking it on to the (singly-linked)
1704 step->new_large_objects list, from where it will be scavenged later.
1706 Convention: bd->flags has BF_EVACUATED set for a large object
1707 that has been evacuated, or unset otherwise.
1708 -------------------------------------------------------------------------- */
1712 evacuate_large(StgPtr p)
1714 bdescr *bd = Bdescr(p);
1717 // object must be at the beginning of the block (or be a ByteArray)
1718 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1719 (((W_)p & BLOCK_MASK) == 0));
1721 // already evacuated?
1722 if (bd->flags & BF_EVACUATED) {
1723 /* Don't forget to set the failed_to_evac flag if we didn't get
1724 * the desired destination (see comments in evacuate()).
1726 if (bd->gen_no < evac_gen) {
1727 failed_to_evac = rtsTrue;
1728 TICK_GC_FAILED_PROMOTION();
1734 // remove from large_object list
1736 bd->u.back->link = bd->link;
1737 } else { // first object in the list
1738 stp->large_objects = bd->link;
1741 bd->link->u.back = bd->u.back;
1744 /* link it on to the evacuated large object list of the destination step
1747 if (stp->gen_no < evac_gen) {
1748 #ifdef NO_EAGER_PROMOTION
1749 failed_to_evac = rtsTrue;
1751 stp = &generations[evac_gen].steps[0];
1756 bd->gen_no = stp->gen_no;
1757 bd->link = stp->new_large_objects;
1758 stp->new_large_objects = bd;
1759 bd->flags |= BF_EVACUATED;
1762 /* -----------------------------------------------------------------------------
1765 This is called (eventually) for every live object in the system.
1767 The caller to evacuate specifies a desired generation in the
1768 evac_gen global variable. The following conditions apply to
1769 evacuating an object which resides in generation M when we're
1770 collecting up to generation N
1774 else evac to step->to
1776 if M < evac_gen evac to evac_gen, step 0
1778 if the object is already evacuated, then we check which generation
1781 if M >= evac_gen do nothing
1782 if M < evac_gen set failed_to_evac flag to indicate that we
1783 didn't manage to evacuate this object into evac_gen.
1788 evacuate() is the single most important function performance-wise
1789 in the GC. Various things have been tried to speed it up, but as
1790 far as I can tell the code generated by gcc 3.2 with -O2 is about
1791 as good as it's going to get. We pass the argument to evacuate()
1792 in a register using the 'regparm' attribute (see the prototype for
1793 evacuate() near the top of this file).
1795 Changing evacuate() to take an (StgClosure **) rather than
1796 returning the new pointer seems attractive, because we can avoid
1797 writing back the pointer when it hasn't changed (eg. for a static
1798 object, or an object in a generation > N). However, I tried it and
1799 it doesn't help. One reason is that the (StgClosure **) pointer
1800 gets spilled to the stack inside evacuate(), resulting in far more
1801 extra reads/writes than we save.
1802 -------------------------------------------------------------------------- */
1804 REGPARM1 static StgClosure *
1805 evacuate(StgClosure *q)
1812 const StgInfoTable *info;
1815 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1817 if (!HEAP_ALLOCED(q)) {
1819 if (!major_gc) return q;
1822 switch (info->type) {
1825 if (info->srt_bitmap != 0 &&
1826 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1827 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1828 static_objects = (StgClosure *)q;
1833 if (info->srt_bitmap != 0 &&
1834 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1835 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1836 static_objects = (StgClosure *)q;
1841 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1842 * on the CAF list, so don't do anything with it here (we'll
1843 * scavenge it later).
1845 if (((StgIndStatic *)q)->saved_info == NULL
1846 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1847 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1848 static_objects = (StgClosure *)q;
1853 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1854 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1855 static_objects = (StgClosure *)q;
1859 case CONSTR_INTLIKE:
1860 case CONSTR_CHARLIKE:
1861 case CONSTR_NOCAF_STATIC:
1862 /* no need to put these on the static linked list, they don't need
1868 barf("evacuate(static): strange closure type %d", (int)(info->type));
1874 if (bd->gen_no > N) {
1875 /* Can't evacuate this object, because it's in a generation
1876 * older than the ones we're collecting. Let's hope that it's
1877 * in evac_gen or older, or we will have to arrange to track
1878 * this pointer using the mutable list.
1880 if (bd->gen_no < evac_gen) {
1882 failed_to_evac = rtsTrue;
1883 TICK_GC_FAILED_PROMOTION();
1888 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1890 /* pointer into to-space: just return it. This normally
1891 * shouldn't happen, but alllowing it makes certain things
1892 * slightly easier (eg. the mutable list can contain the same
1893 * object twice, for example).
1895 if (bd->flags & BF_EVACUATED) {
1896 if (bd->gen_no < evac_gen) {
1897 failed_to_evac = rtsTrue;
1898 TICK_GC_FAILED_PROMOTION();
1903 /* evacuate large objects by re-linking them onto a different list.
1905 if (bd->flags & BF_LARGE) {
1907 if (info->type == TSO &&
1908 ((StgTSO *)q)->what_next == ThreadRelocated) {
1909 q = (StgClosure *)((StgTSO *)q)->link;
1912 evacuate_large((P_)q);
1916 /* If the object is in a step that we're compacting, then we
1917 * need to use an alternative evacuate procedure.
1919 if (bd->flags & BF_COMPACTED) {
1920 if (!is_marked((P_)q,bd)) {
1922 if (mark_stack_full()) {
1923 mark_stack_overflowed = rtsTrue;
1926 push_mark_stack((P_)q);
1936 switch (info->type) {
1940 return copy(q,sizeW_fromITBL(info),stp);
1944 StgWord w = (StgWord)q->payload[0];
1945 if (q->header.info == Czh_con_info &&
1946 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1947 (StgChar)w <= MAX_CHARLIKE) {
1948 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1950 if (q->header.info == Izh_con_info &&
1951 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1952 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1955 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1961 return copy(q,sizeofW(StgHeader)+1,stp);
1965 return copy(q,sizeofW(StgThunk)+1,stp);
1970 #ifdef NO_PROMOTE_THUNKS
1971 if (bd->gen_no == 0 &&
1972 bd->step->no != 0 &&
1973 bd->step->no == generations[bd->gen_no].n_steps-1) {
1977 return copy(q,sizeofW(StgThunk)+2,stp);
1984 return copy(q,sizeofW(StgHeader)+2,stp);
1987 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
1990 return copy(q,thunk_sizeW_fromITBL(info),stp);
1995 case IND_OLDGEN_PERM:
1998 return copy(q,sizeW_fromITBL(info),stp);
2001 return copy(q,bco_sizeW((StgBCO *)q),stp);
2004 case SE_CAF_BLACKHOLE:
2007 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2009 case THUNK_SELECTOR:
2013 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2014 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2017 p = eval_thunk_selector(info->layout.selector_offset,
2021 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2024 // q is still BLACKHOLE'd.
2025 thunk_selector_depth++;
2027 thunk_selector_depth--;
2029 // Update the THUNK_SELECTOR with an indirection to the
2030 // EVACUATED closure now at p. Why do this rather than
2031 // upd_evacuee(q,p)? Because we have an invariant that an
2032 // EVACUATED closure always points to an object in the
2033 // same or an older generation (required by the short-cut
2034 // test in the EVACUATED case, below).
2035 SET_INFO(q, &stg_IND_info);
2036 ((StgInd *)q)->indirectee = p;
2039 // We store the size of the just evacuated object in the
2040 // LDV word so that the profiler can guess the position of
2041 // the next object later.
2042 SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
2050 // follow chains of indirections, don't evacuate them
2051 q = ((StgInd*)q)->indirectee;
2063 case CATCH_STM_FRAME:
2064 case CATCH_RETRY_FRAME:
2065 case ATOMICALLY_FRAME:
2066 // shouldn't see these
2067 barf("evacuate: stack frame at %p\n", q);
2070 return copy(q,pap_sizeW((StgPAP*)q),stp);
2073 return copy(q,ap_sizeW((StgAP*)q),stp);
2076 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2079 /* Already evacuated, just return the forwarding address.
2080 * HOWEVER: if the requested destination generation (evac_gen) is
2081 * older than the actual generation (because the object was
2082 * already evacuated to a younger generation) then we have to
2083 * set the failed_to_evac flag to indicate that we couldn't
2084 * manage to promote the object to the desired generation.
2087 * Optimisation: the check is fairly expensive, but we can often
2088 * shortcut it if either the required generation is 0, or the
2089 * current object (the EVACUATED) is in a high enough generation.
2090 * We know that an EVACUATED always points to an object in the
2091 * same or an older generation. stp is the lowest step that the
2092 * current object would be evacuated to, so we only do the full
2093 * check if stp is too low.
2095 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2096 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2097 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2098 failed_to_evac = rtsTrue;
2099 TICK_GC_FAILED_PROMOTION();
2102 return ((StgEvacuated*)q)->evacuee;
2105 // just copy the block
2106 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2109 case MUT_ARR_PTRS_FROZEN:
2110 case MUT_ARR_PTRS_FROZEN0:
2111 // just copy the block
2112 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2116 StgTSO *tso = (StgTSO *)q;
2118 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2120 if (tso->what_next == ThreadRelocated) {
2121 q = (StgClosure *)tso->link;
2125 /* To evacuate a small TSO, we need to relocate the update frame
2132 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2134 sizeofW(StgTSO), stp);
2135 move_TSO(tso, new_tso);
2136 for (p = tso->sp, q = new_tso->sp;
2137 p < tso->stack+tso->stack_size;) {
2141 return (StgClosure *)new_tso;
2148 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2149 to = copy(q,BLACKHOLE_sizeW(),stp);
2150 //ToDo: derive size etc from reverted IP
2151 //to = copy(q,size,stp);
2153 debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
2154 q, info_type(q), to, info_type(to)));
2159 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
2160 to = copy(q,sizeofW(StgBlockedFetch),stp);
2162 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2163 q, info_type(q), to, info_type(to)));
2170 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2171 to = copy(q,sizeofW(StgFetchMe),stp);
2173 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2174 q, info_type(q), to, info_type(to)));
2178 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2179 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2181 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2182 q, info_type(q), to, info_type(to)));
2187 return copy(q,sizeofW(StgTRecHeader),stp);
2189 case TVAR_WAIT_QUEUE:
2190 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2193 return copy(q,sizeofW(StgTVar),stp);
2196 return copy(q,sizeofW(StgTRecChunk),stp);
2199 barf("evacuate: strange closure type %d", (int)(info->type));
2205 /* -----------------------------------------------------------------------------
2206 Evaluate a THUNK_SELECTOR if possible.
2208 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2209 a closure pointer if we evaluated it and this is the result. Note
2210 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2211 reducing it to HNF, just that we have eliminated the selection.
2212 The result might be another thunk, or even another THUNK_SELECTOR.
2214 If the return value is non-NULL, the original selector thunk has
2215 been BLACKHOLE'd, and should be updated with an indirection or a
2216 forwarding pointer. If the return value is NULL, then the selector
2220 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2221 following way (from a suggestion by Ian Lynagh):
2223 We can have a chain like this:
2227 |-----> sel_0 --> (a,b)
2229 |-----> sel_0 --> ...
2231 and the depth limit means we don't go all the way to the end of the
2232 chain, which results in a space leak. This affects the recursive
2233 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2234 the recursive call to eval_thunk_selector() in
2235 eval_thunk_selector().
2237 We could eliminate the depth bound in this case, in the following
2240 - traverse the chain once to discover the *value* of the
2241 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2242 visit on the way as having been visited already (somehow).
2244 - in a second pass, traverse the chain again updating all
2245 THUNK_SEELCTORS that we find on the way with indirections to
2248 - if we encounter a "marked" THUNK_SELECTOR in a normal
2249 evacuate(), we konw it can't be updated so just evac it.
2251 Program that illustrates the problem:
2254 foo (x:xs) = let (ys, zs) = foo xs
2255 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2257 main = bar [1..(100000000::Int)]
2258 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2260 -------------------------------------------------------------------------- */
2262 static inline rtsBool
2263 is_to_space ( StgClosure *p )
2267 bd = Bdescr((StgPtr)p);
2268 if (HEAP_ALLOCED(p) &&
2269 ((bd->flags & BF_EVACUATED)
2270 || ((bd->flags & BF_COMPACTED) &&
2271 is_marked((P_)p,bd)))) {
2279 eval_thunk_selector( nat field, StgSelector * p )
2282 const StgInfoTable *info_ptr;
2283 StgClosure *selectee;
2285 selectee = p->selectee;
2287 // Save the real info pointer (NOTE: not the same as get_itbl()).
2288 info_ptr = p->header.info;
2290 // If the THUNK_SELECTOR is in a generation that we are not
2291 // collecting, then bail out early. We won't be able to save any
2292 // space in any case, and updating with an indirection is trickier
2294 if (Bdescr((StgPtr)p)->gen_no > N) {
2298 // BLACKHOLE the selector thunk, since it is now under evaluation.
2299 // This is important to stop us going into an infinite loop if
2300 // this selector thunk eventually refers to itself.
2301 SET_INFO(p,&stg_BLACKHOLE_info);
2305 // We don't want to end up in to-space, because this causes
2306 // problems when the GC later tries to evacuate the result of
2307 // eval_thunk_selector(). There are various ways this could
2310 // 1. following an IND_STATIC
2312 // 2. when the old generation is compacted, the mark phase updates
2313 // from-space pointers to be to-space pointers, and we can't
2314 // reliably tell which we're following (eg. from an IND_STATIC).
2316 // 3. compacting GC again: if we're looking at a constructor in
2317 // the compacted generation, it might point directly to objects
2318 // in to-space. We must bale out here, otherwise doing the selection
2319 // will result in a to-space pointer being returned.
2321 // (1) is dealt with using a BF_EVACUATED test on the
2322 // selectee. (2) and (3): we can tell if we're looking at an
2323 // object in the compacted generation that might point to
2324 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2325 // the compacted generation is being collected, and (c) the
2326 // object is marked. Only a marked object may have pointers that
2327 // point to to-space objects, because that happens when
2330 // The to-space test is now embodied in the in_to_space() inline
2331 // function, as it is re-used below.
2333 if (is_to_space(selectee)) {
2337 info = get_itbl(selectee);
2338 switch (info->type) {
2346 case CONSTR_NOCAF_STATIC:
2347 // check that the size is in range
2348 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2349 info->layout.payload.nptrs));
2351 // Select the right field from the constructor, and check
2352 // that the result isn't in to-space. It might be in
2353 // to-space if, for example, this constructor contains
2354 // pointers to younger-gen objects (and is on the mut-once
2359 q = selectee->payload[field];
2360 if (is_to_space(q)) {
2370 case IND_OLDGEN_PERM:
2372 selectee = ((StgInd *)selectee)->indirectee;
2376 // We don't follow pointers into to-space; the constructor
2377 // has already been evacuated, so we won't save any space
2378 // leaks by evaluating this selector thunk anyhow.
2381 case THUNK_SELECTOR:
2385 // check that we don't recurse too much, re-using the
2386 // depth bound also used in evacuate().
2387 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2390 thunk_selector_depth++;
2392 val = eval_thunk_selector(info->layout.selector_offset,
2393 (StgSelector *)selectee);
2395 thunk_selector_depth--;
2400 // We evaluated this selector thunk, so update it with
2401 // an indirection. NOTE: we don't use UPD_IND here,
2402 // because we are guaranteed that p is in a generation
2403 // that we are collecting, and we never want to put the
2404 // indirection on a mutable list.
2406 // For the purposes of LDV profiling, we have destroyed
2407 // the original selector thunk.
2408 SET_INFO(p, info_ptr);
2409 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2411 ((StgInd *)selectee)->indirectee = val;
2412 SET_INFO(selectee,&stg_IND_info);
2414 // For the purposes of LDV profiling, we have created an
2416 LDV_RECORD_CREATE(selectee);
2433 case SE_CAF_BLACKHOLE:
2445 // not evaluated yet
2449 barf("eval_thunk_selector: strange selectee %d",
2454 // We didn't manage to evaluate this thunk; restore the old info pointer
2455 SET_INFO(p, info_ptr);
2459 /* -----------------------------------------------------------------------------
2460 move_TSO is called to update the TSO structure after it has been
2461 moved from one place to another.
2462 -------------------------------------------------------------------------- */
2465 move_TSO (StgTSO *src, StgTSO *dest)
2469 // relocate the stack pointer...
2470 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2471 dest->sp = (StgPtr)dest->sp + diff;
2474 /* Similar to scavenge_large_bitmap(), but we don't write back the
2475 * pointers we get back from evacuate().
2478 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2485 bitmap = large_srt->l.bitmap[b];
2486 size = (nat)large_srt->l.size;
2487 p = (StgClosure **)large_srt->srt;
2488 for (i = 0; i < size; ) {
2489 if ((bitmap & 1) != 0) {
2494 if (i % BITS_IN(W_) == 0) {
2496 bitmap = large_srt->l.bitmap[b];
2498 bitmap = bitmap >> 1;
2503 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2504 * srt field in the info table. That's ok, because we'll
2505 * never dereference it.
2508 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2513 bitmap = srt_bitmap;
2516 if (bitmap == (StgHalfWord)(-1)) {
2517 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2521 while (bitmap != 0) {
2522 if ((bitmap & 1) != 0) {
2523 #ifdef ENABLE_WIN32_DLL_SUPPORT
2524 // Special-case to handle references to closures hiding out in DLLs, since
2525 // double indirections required to get at those. The code generator knows
2526 // which is which when generating the SRT, so it stores the (indirect)
2527 // reference to the DLL closure in the table by first adding one to it.
2528 // We check for this here, and undo the addition before evacuating it.
2530 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2531 // closure that's fixed at link-time, and no extra magic is required.
2532 if ( (unsigned long)(*srt) & 0x1 ) {
2533 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2542 bitmap = bitmap >> 1;
2548 scavenge_thunk_srt(const StgInfoTable *info)
2550 StgThunkInfoTable *thunk_info;
2552 if (!major_gc) return;
2554 thunk_info = itbl_to_thunk_itbl(info);
2555 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2559 scavenge_fun_srt(const StgInfoTable *info)
2561 StgFunInfoTable *fun_info;
2563 if (!major_gc) return;
2565 fun_info = itbl_to_fun_itbl(info);
2566 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2569 /* -----------------------------------------------------------------------------
2571 -------------------------------------------------------------------------- */
2574 scavengeTSO (StgTSO *tso)
2576 if ( tso->why_blocked == BlockedOnMVar
2577 || tso->why_blocked == BlockedOnBlackHole
2578 || tso->why_blocked == BlockedOnException
2580 || tso->why_blocked == BlockedOnGA
2581 || tso->why_blocked == BlockedOnGA_NoSend
2584 tso->block_info.closure = evacuate(tso->block_info.closure);
2586 if ( tso->blocked_exceptions != NULL ) {
2587 tso->blocked_exceptions =
2588 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2591 // We don't always chase the link field: TSOs on the blackhole
2592 // queue are not automatically alive, so the link field is a
2593 // "weak" pointer in that case.
2594 if (tso->why_blocked != BlockedOnBlackHole) {
2595 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2598 // scavange current transaction record
2599 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2601 // scavenge this thread's stack
2602 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2605 /* -----------------------------------------------------------------------------
2606 Blocks of function args occur on the stack (at the top) and
2608 -------------------------------------------------------------------------- */
2610 STATIC_INLINE StgPtr
2611 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2618 switch (fun_info->f.fun_type) {
2620 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2621 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2624 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2625 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2629 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2630 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2633 if ((bitmap & 1) == 0) {
2634 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2637 bitmap = bitmap >> 1;
2645 STATIC_INLINE StgPtr
2646 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2650 StgFunInfoTable *fun_info;
2652 fun_info = get_fun_itbl(fun);
2653 ASSERT(fun_info->i.type != PAP);
2654 p = (StgPtr)payload;
2656 switch (fun_info->f.fun_type) {
2658 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2661 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2665 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2669 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2672 if ((bitmap & 1) == 0) {
2673 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2676 bitmap = bitmap >> 1;
2684 STATIC_INLINE StgPtr
2685 scavenge_PAP (StgPAP *pap)
2687 pap->fun = evacuate(pap->fun);
2688 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2691 STATIC_INLINE StgPtr
2692 scavenge_AP (StgAP *ap)
2694 ap->fun = evacuate(ap->fun);
2695 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2698 /* -----------------------------------------------------------------------------
2699 Scavenge a given step until there are no more objects in this step
2702 evac_gen is set by the caller to be either zero (for a step in a
2703 generation < N) or G where G is the generation of the step being
2706 We sometimes temporarily change evac_gen back to zero if we're
2707 scavenging a mutable object where early promotion isn't such a good
2709 -------------------------------------------------------------------------- */
2717 nat saved_evac_gen = evac_gen;
2722 failed_to_evac = rtsFalse;
2724 /* scavenge phase - standard breadth-first scavenging of the
2728 while (bd != stp->hp_bd || p < stp->hp) {
2730 // If we're at the end of this block, move on to the next block
2731 if (bd != stp->hp_bd && p == bd->free) {
2737 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2738 info = get_itbl((StgClosure *)p);
2740 ASSERT(thunk_selector_depth == 0);
2743 switch (info->type) {
2747 StgMVar *mvar = ((StgMVar *)p);
2749 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2750 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2751 mvar->value = evacuate((StgClosure *)mvar->value);
2752 evac_gen = saved_evac_gen;
2753 failed_to_evac = rtsTrue; // mutable.
2754 p += sizeofW(StgMVar);
2759 scavenge_fun_srt(info);
2760 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2761 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2762 p += sizeofW(StgHeader) + 2;
2766 scavenge_thunk_srt(info);
2767 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2768 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2769 p += sizeofW(StgThunk) + 2;
2773 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2774 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2775 p += sizeofW(StgHeader) + 2;
2779 scavenge_thunk_srt(info);
2780 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2781 p += sizeofW(StgThunk) + 1;
2785 scavenge_fun_srt(info);
2787 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2788 p += sizeofW(StgHeader) + 1;
2792 scavenge_thunk_srt(info);
2793 p += sizeofW(StgThunk) + 1;
2797 scavenge_fun_srt(info);
2799 p += sizeofW(StgHeader) + 1;
2803 scavenge_thunk_srt(info);
2804 p += sizeofW(StgThunk) + 2;
2808 scavenge_fun_srt(info);
2810 p += sizeofW(StgHeader) + 2;
2814 scavenge_thunk_srt(info);
2815 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2816 p += sizeofW(StgThunk) + 2;
2820 scavenge_fun_srt(info);
2822 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2823 p += sizeofW(StgHeader) + 2;
2827 scavenge_fun_srt(info);
2834 scavenge_thunk_srt(info);
2835 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2836 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2837 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2839 p += info->layout.payload.nptrs;
2850 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2851 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2852 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2854 p += info->layout.payload.nptrs;
2859 StgBCO *bco = (StgBCO *)p;
2860 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2861 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2862 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2863 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2864 p += bco_sizeW(bco);
2869 if (stp->gen->no != 0) {
2872 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2873 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2874 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2877 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2879 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2881 // We pretend that p has just been created.
2882 LDV_RECORD_CREATE((StgClosure *)p);
2885 case IND_OLDGEN_PERM:
2886 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2887 p += sizeofW(StgInd);
2892 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2893 evac_gen = saved_evac_gen;
2894 failed_to_evac = rtsTrue; // mutable anyhow
2895 p += sizeofW(StgMutVar);
2899 case SE_CAF_BLACKHOLE:
2902 p += BLACKHOLE_sizeW();
2905 case THUNK_SELECTOR:
2907 StgSelector *s = (StgSelector *)p;
2908 s->selectee = evacuate(s->selectee);
2909 p += THUNK_SELECTOR_sizeW();
2913 // A chunk of stack saved in a heap object
2916 StgAP_STACK *ap = (StgAP_STACK *)p;
2918 ap->fun = evacuate(ap->fun);
2919 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2920 p = (StgPtr)ap->payload + ap->size;
2925 p = scavenge_PAP((StgPAP *)p);
2929 p = scavenge_AP((StgAP *)p);
2933 // nothing to follow
2934 p += arr_words_sizeW((StgArrWords *)p);
2938 // follow everything
2942 evac_gen = 0; // repeatedly mutable
2943 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2944 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2945 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2947 evac_gen = saved_evac_gen;
2948 failed_to_evac = rtsTrue; // mutable anyhow.
2952 case MUT_ARR_PTRS_FROZEN:
2953 case MUT_ARR_PTRS_FROZEN0:
2954 // follow everything
2958 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2959 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2960 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2963 // If we're going to put this object on the mutable list, then
2964 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
2965 if (failed_to_evac) {
2966 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
2968 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
2975 StgTSO *tso = (StgTSO *)p;
2978 evac_gen = saved_evac_gen;
2979 failed_to_evac = rtsTrue; // mutable anyhow.
2980 p += tso_sizeW(tso);
2988 nat size, ptrs, nonptrs, vhs;
2990 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
2992 StgRBH *rbh = (StgRBH *)p;
2993 (StgClosure *)rbh->blocking_queue =
2994 evacuate((StgClosure *)rbh->blocking_queue);
2995 failed_to_evac = rtsTrue; // mutable anyhow.
2997 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
2998 p, info_type(p), (StgClosure *)rbh->blocking_queue));
2999 // ToDo: use size of reverted closure here!
3000 p += BLACKHOLE_sizeW();
3006 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3007 // follow the pointer to the node which is being demanded
3008 (StgClosure *)bf->node =
3009 evacuate((StgClosure *)bf->node);
3010 // follow the link to the rest of the blocking queue
3011 (StgClosure *)bf->link =
3012 evacuate((StgClosure *)bf->link);
3014 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3015 bf, info_type((StgClosure *)bf),
3016 bf->node, info_type(bf->node)));
3017 p += sizeofW(StgBlockedFetch);
3025 p += sizeofW(StgFetchMe);
3026 break; // nothing to do in this case
3030 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3031 (StgClosure *)fmbq->blocking_queue =
3032 evacuate((StgClosure *)fmbq->blocking_queue);
3034 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3035 p, info_type((StgClosure *)p)));
3036 p += sizeofW(StgFetchMeBlockingQueue);
3041 case TVAR_WAIT_QUEUE:
3043 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3045 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3046 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3047 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3048 evac_gen = saved_evac_gen;
3049 failed_to_evac = rtsTrue; // mutable
3050 p += sizeofW(StgTVarWaitQueue);
3056 StgTVar *tvar = ((StgTVar *) p);
3058 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3059 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3060 evac_gen = saved_evac_gen;
3061 failed_to_evac = rtsTrue; // mutable
3062 p += sizeofW(StgTVar);
3068 StgTRecHeader *trec = ((StgTRecHeader *) p);
3070 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3071 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3072 evac_gen = saved_evac_gen;
3073 failed_to_evac = rtsTrue; // mutable
3074 p += sizeofW(StgTRecHeader);
3081 StgTRecChunk *tc = ((StgTRecChunk *) p);
3082 TRecEntry *e = &(tc -> entries[0]);
3084 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3085 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3086 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3087 e->expected_value = evacuate((StgClosure*)e->expected_value);
3088 e->new_value = evacuate((StgClosure*)e->new_value);
3090 evac_gen = saved_evac_gen;
3091 failed_to_evac = rtsTrue; // mutable
3092 p += sizeofW(StgTRecChunk);
3097 barf("scavenge: unimplemented/strange closure type %d @ %p",
3102 * We need to record the current object on the mutable list if
3103 * (a) It is actually mutable, or
3104 * (b) It contains pointers to a younger generation.
3105 * Case (b) arises if we didn't manage to promote everything that
3106 * the current object points to into the current generation.
3108 if (failed_to_evac) {
3109 failed_to_evac = rtsFalse;
3110 if (stp->gen_no > 0) {
3111 recordMutableGen((StgClosure *)q, stp->gen);
3120 /* -----------------------------------------------------------------------------
3121 Scavenge everything on the mark stack.
3123 This is slightly different from scavenge():
3124 - we don't walk linearly through the objects, so the scavenger
3125 doesn't need to advance the pointer on to the next object.
3126 -------------------------------------------------------------------------- */
3129 scavenge_mark_stack(void)
3135 evac_gen = oldest_gen->no;
3136 saved_evac_gen = evac_gen;
3139 while (!mark_stack_empty()) {
3140 p = pop_mark_stack();
3142 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3143 info = get_itbl((StgClosure *)p);
3146 switch (info->type) {
3150 StgMVar *mvar = ((StgMVar *)p);
3152 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3153 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3154 mvar->value = evacuate((StgClosure *)mvar->value);
3155 evac_gen = saved_evac_gen;
3156 failed_to_evac = rtsTrue; // mutable.
3161 scavenge_fun_srt(info);
3162 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3163 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3167 scavenge_thunk_srt(info);
3168 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3169 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3173 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3174 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3179 scavenge_fun_srt(info);
3180 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3185 scavenge_thunk_srt(info);
3186 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3191 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3196 scavenge_fun_srt(info);
3201 scavenge_thunk_srt(info);
3209 scavenge_fun_srt(info);
3216 scavenge_thunk_srt(info);
3217 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3218 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3219 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3231 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3232 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3233 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3239 StgBCO *bco = (StgBCO *)p;
3240 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3241 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3242 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3243 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3248 // don't need to do anything here: the only possible case
3249 // is that we're in a 1-space compacting collector, with
3250 // no "old" generation.
3254 case IND_OLDGEN_PERM:
3255 ((StgInd *)p)->indirectee =
3256 evacuate(((StgInd *)p)->indirectee);
3261 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3262 evac_gen = saved_evac_gen;
3263 failed_to_evac = rtsTrue;
3267 case SE_CAF_BLACKHOLE:
3273 case THUNK_SELECTOR:
3275 StgSelector *s = (StgSelector *)p;
3276 s->selectee = evacuate(s->selectee);
3280 // A chunk of stack saved in a heap object
3283 StgAP_STACK *ap = (StgAP_STACK *)p;
3285 ap->fun = evacuate(ap->fun);
3286 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3291 scavenge_PAP((StgPAP *)p);
3295 scavenge_AP((StgAP *)p);
3299 // follow everything
3303 evac_gen = 0; // repeatedly mutable
3304 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3305 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3306 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3308 evac_gen = saved_evac_gen;
3309 failed_to_evac = rtsTrue; // mutable anyhow.
3313 case MUT_ARR_PTRS_FROZEN:
3314 case MUT_ARR_PTRS_FROZEN0:
3315 // follow everything
3319 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3320 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3321 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3324 // If we're going to put this object on the mutable list, then
3325 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3326 if (failed_to_evac) {
3327 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3329 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3336 StgTSO *tso = (StgTSO *)p;
3339 evac_gen = saved_evac_gen;
3340 failed_to_evac = rtsTrue;
3348 nat size, ptrs, nonptrs, vhs;
3350 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3352 StgRBH *rbh = (StgRBH *)p;
3353 bh->blocking_queue =
3354 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3355 failed_to_evac = rtsTrue; // mutable anyhow.
3357 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3358 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3364 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3365 // follow the pointer to the node which is being demanded
3366 (StgClosure *)bf->node =
3367 evacuate((StgClosure *)bf->node);
3368 // follow the link to the rest of the blocking queue
3369 (StgClosure *)bf->link =
3370 evacuate((StgClosure *)bf->link);
3372 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3373 bf, info_type((StgClosure *)bf),
3374 bf->node, info_type(bf->node)));
3382 break; // nothing to do in this case
3386 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3387 (StgClosure *)fmbq->blocking_queue =
3388 evacuate((StgClosure *)fmbq->blocking_queue);
3390 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3391 p, info_type((StgClosure *)p)));
3396 case TVAR_WAIT_QUEUE:
3398 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3400 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3401 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3402 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3403 evac_gen = saved_evac_gen;
3404 failed_to_evac = rtsTrue; // mutable
3410 StgTVar *tvar = ((StgTVar *) p);
3412 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3413 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3414 evac_gen = saved_evac_gen;
3415 failed_to_evac = rtsTrue; // mutable
3422 StgTRecChunk *tc = ((StgTRecChunk *) p);
3423 TRecEntry *e = &(tc -> entries[0]);
3425 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3426 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3427 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3428 e->expected_value = evacuate((StgClosure*)e->expected_value);
3429 e->new_value = evacuate((StgClosure*)e->new_value);
3431 evac_gen = saved_evac_gen;
3432 failed_to_evac = rtsTrue; // mutable
3438 StgTRecHeader *trec = ((StgTRecHeader *) p);
3440 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3441 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3442 evac_gen = saved_evac_gen;
3443 failed_to_evac = rtsTrue; // mutable
3448 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3452 if (failed_to_evac) {
3453 failed_to_evac = rtsFalse;
3455 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3459 // mark the next bit to indicate "scavenged"
3460 mark(q+1, Bdescr(q));
3462 } // while (!mark_stack_empty())
3464 // start a new linear scan if the mark stack overflowed at some point
3465 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3466 IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
3467 mark_stack_overflowed = rtsFalse;
3468 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3469 oldgen_scan = oldgen_scan_bd->start;
3472 if (oldgen_scan_bd) {
3473 // push a new thing on the mark stack
3475 // find a closure that is marked but not scavenged, and start
3477 while (oldgen_scan < oldgen_scan_bd->free
3478 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3482 if (oldgen_scan < oldgen_scan_bd->free) {
3484 // already scavenged?
3485 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3486 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3489 push_mark_stack(oldgen_scan);
3490 // ToDo: bump the linear scan by the actual size of the object
3491 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3495 oldgen_scan_bd = oldgen_scan_bd->link;
3496 if (oldgen_scan_bd != NULL) {
3497 oldgen_scan = oldgen_scan_bd->start;
3503 /* -----------------------------------------------------------------------------
3504 Scavenge one object.
3506 This is used for objects that are temporarily marked as mutable
3507 because they contain old-to-new generation pointers. Only certain
3508 objects can have this property.
3509 -------------------------------------------------------------------------- */
3512 scavenge_one(StgPtr p)
3514 const StgInfoTable *info;
3515 nat saved_evac_gen = evac_gen;
3518 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3519 info = get_itbl((StgClosure *)p);
3521 switch (info->type) {
3525 StgMVar *mvar = ((StgMVar *)p);
3527 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3528 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3529 mvar->value = evacuate((StgClosure *)mvar->value);
3530 evac_gen = saved_evac_gen;
3531 failed_to_evac = rtsTrue; // mutable.
3544 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3545 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3546 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3552 case FUN_1_0: // hardly worth specialising these guys
3568 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3569 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3570 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3577 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3578 evac_gen = saved_evac_gen;
3579 failed_to_evac = rtsTrue; // mutable anyhow
3583 case SE_CAF_BLACKHOLE:
3588 case THUNK_SELECTOR:
3590 StgSelector *s = (StgSelector *)p;
3591 s->selectee = evacuate(s->selectee);
3597 StgAP_STACK *ap = (StgAP_STACK *)p;
3599 ap->fun = evacuate(ap->fun);
3600 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3601 p = (StgPtr)ap->payload + ap->size;
3606 p = scavenge_PAP((StgPAP *)p);
3610 p = scavenge_AP((StgAP *)p);
3614 // nothing to follow
3619 // follow everything
3622 evac_gen = 0; // repeatedly mutable
3623 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3624 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3625 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3627 evac_gen = saved_evac_gen;
3628 failed_to_evac = rtsTrue;
3632 case MUT_ARR_PTRS_FROZEN:
3633 case MUT_ARR_PTRS_FROZEN0:
3635 // follow everything
3638 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3639 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3640 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3643 // If we're going to put this object on the mutable list, then
3644 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3645 if (failed_to_evac) {
3646 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3648 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3655 StgTSO *tso = (StgTSO *)p;
3657 evac_gen = 0; // repeatedly mutable
3659 evac_gen = saved_evac_gen;
3660 failed_to_evac = rtsTrue;
3668 nat size, ptrs, nonptrs, vhs;
3670 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3672 StgRBH *rbh = (StgRBH *)p;
3673 (StgClosure *)rbh->blocking_queue =
3674 evacuate((StgClosure *)rbh->blocking_queue);
3675 failed_to_evac = rtsTrue; // mutable anyhow.
3677 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3678 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3679 // ToDo: use size of reverted closure here!
3685 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3686 // follow the pointer to the node which is being demanded
3687 (StgClosure *)bf->node =
3688 evacuate((StgClosure *)bf->node);
3689 // follow the link to the rest of the blocking queue
3690 (StgClosure *)bf->link =
3691 evacuate((StgClosure *)bf->link);
3693 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3694 bf, info_type((StgClosure *)bf),
3695 bf->node, info_type(bf->node)));
3703 break; // nothing to do in this case
3707 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3708 (StgClosure *)fmbq->blocking_queue =
3709 evacuate((StgClosure *)fmbq->blocking_queue);
3711 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3712 p, info_type((StgClosure *)p)));
3717 case TVAR_WAIT_QUEUE:
3719 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3721 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3722 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3723 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3724 evac_gen = saved_evac_gen;
3725 failed_to_evac = rtsTrue; // mutable
3731 StgTVar *tvar = ((StgTVar *) p);
3733 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3734 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3735 evac_gen = saved_evac_gen;
3736 failed_to_evac = rtsTrue; // mutable
3742 StgTRecHeader *trec = ((StgTRecHeader *) p);
3744 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3745 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3746 evac_gen = saved_evac_gen;
3747 failed_to_evac = rtsTrue; // mutable
3754 StgTRecChunk *tc = ((StgTRecChunk *) p);
3755 TRecEntry *e = &(tc -> entries[0]);
3757 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3758 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3759 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3760 e->expected_value = evacuate((StgClosure*)e->expected_value);
3761 e->new_value = evacuate((StgClosure*)e->new_value);
3763 evac_gen = saved_evac_gen;
3764 failed_to_evac = rtsTrue; // mutable
3769 case IND_OLDGEN_PERM:
3772 /* Careful here: a THUNK can be on the mutable list because
3773 * it contains pointers to young gen objects. If such a thunk
3774 * is updated, the IND_OLDGEN will be added to the mutable
3775 * list again, and we'll scavenge it twice. evacuate()
3776 * doesn't check whether the object has already been
3777 * evacuated, so we perform that check here.
3779 StgClosure *q = ((StgInd *)p)->indirectee;
3780 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3783 ((StgInd *)p)->indirectee = evacuate(q);
3786 #if 0 && defined(DEBUG)
3787 if (RtsFlags.DebugFlags.gc)
3788 /* Debugging code to print out the size of the thing we just
3792 StgPtr start = gen->steps[0].scan;
3793 bdescr *start_bd = gen->steps[0].scan_bd;
3795 scavenge(&gen->steps[0]);
3796 if (start_bd != gen->steps[0].scan_bd) {
3797 size += (P_)BLOCK_ROUND_UP(start) - start;
3798 start_bd = start_bd->link;
3799 while (start_bd != gen->steps[0].scan_bd) {
3800 size += BLOCK_SIZE_W;
3801 start_bd = start_bd->link;
3803 size += gen->steps[0].scan -
3804 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3806 size = gen->steps[0].scan - start;
3808 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3814 barf("scavenge_one: strange object %d", (int)(info->type));
3817 no_luck = failed_to_evac;
3818 failed_to_evac = rtsFalse;
3822 /* -----------------------------------------------------------------------------
3823 Scavenging mutable lists.
3825 We treat the mutable list of each generation > N (i.e. all the
3826 generations older than the one being collected) as roots. We also
3827 remove non-mutable objects from the mutable list at this point.
3828 -------------------------------------------------------------------------- */
3831 scavenge_mutable_list(generation *gen)
3836 bd = gen->saved_mut_list;
3839 for (; bd != NULL; bd = bd->link) {
3840 for (q = bd->start; q < bd->free; q++) {
3842 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3845 switch (get_itbl((StgClosure *)p)->type) {
3847 mutlist_MUTVARS++; break;
3849 case MUT_ARR_PTRS_FROZEN:
3850 case MUT_ARR_PTRS_FROZEN0:
3851 mutlist_MUTARRS++; break;
3853 mutlist_OTHERS++; break;
3857 if (scavenge_one(p)) {
3858 /* didn't manage to promote everything, so put the
3859 * object back on the list.
3861 recordMutableGen((StgClosure *)p,gen);
3866 // free the old mut_list
3867 freeChain(gen->saved_mut_list);
3868 gen->saved_mut_list = NULL;
3873 scavenge_static(void)
3875 StgClosure* p = static_objects;
3876 const StgInfoTable *info;
3878 /* Always evacuate straight to the oldest generation for static
3880 evac_gen = oldest_gen->no;
3882 /* keep going until we've scavenged all the objects on the linked
3884 while (p != END_OF_STATIC_LIST) {
3886 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3889 if (info->type==RBH)
3890 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3892 // make sure the info pointer is into text space
3894 /* Take this object *off* the static_objects list,
3895 * and put it on the scavenged_static_objects list.
3897 static_objects = *STATIC_LINK(info,p);
3898 *STATIC_LINK(info,p) = scavenged_static_objects;
3899 scavenged_static_objects = p;
3901 switch (info -> type) {
3905 StgInd *ind = (StgInd *)p;
3906 ind->indirectee = evacuate(ind->indirectee);
3908 /* might fail to evacuate it, in which case we have to pop it
3909 * back on the mutable list of the oldest generation. We
3910 * leave it *on* the scavenged_static_objects list, though,
3911 * in case we visit this object again.
3913 if (failed_to_evac) {
3914 failed_to_evac = rtsFalse;
3915 recordMutableGen((StgClosure *)p,oldest_gen);
3921 scavenge_thunk_srt(info);
3925 scavenge_fun_srt(info);
3932 next = (P_)p->payload + info->layout.payload.ptrs;
3933 // evacuate the pointers
3934 for (q = (P_)p->payload; q < next; q++) {
3935 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3941 barf("scavenge_static: strange closure %d", (int)(info->type));
3944 ASSERT(failed_to_evac == rtsFalse);
3946 /* get the next static object from the list. Remember, there might
3947 * be more stuff on this list now that we've done some evacuating!
3948 * (static_objects is a global)
3954 /* -----------------------------------------------------------------------------
3955 scavenge a chunk of memory described by a bitmap
3956 -------------------------------------------------------------------------- */
3959 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
3965 bitmap = large_bitmap->bitmap[b];
3966 for (i = 0; i < size; ) {
3967 if ((bitmap & 1) == 0) {
3968 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3972 if (i % BITS_IN(W_) == 0) {
3974 bitmap = large_bitmap->bitmap[b];
3976 bitmap = bitmap >> 1;
3981 STATIC_INLINE StgPtr
3982 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
3985 if ((bitmap & 1) == 0) {
3986 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3989 bitmap = bitmap >> 1;
3995 /* -----------------------------------------------------------------------------
3996 scavenge_stack walks over a section of stack and evacuates all the
3997 objects pointed to by it. We can use the same code for walking
3998 AP_STACK_UPDs, since these are just sections of copied stack.
3999 -------------------------------------------------------------------------- */
4003 scavenge_stack(StgPtr p, StgPtr stack_end)
4005 const StgRetInfoTable* info;
4009 //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
4012 * Each time around this loop, we are looking at a chunk of stack
4013 * that starts with an activation record.
4016 while (p < stack_end) {
4017 info = get_ret_itbl((StgClosure *)p);
4019 switch (info->i.type) {
4022 // In SMP, we can get update frames that point to indirections
4023 // when two threads evaluate the same thunk. We do attempt to
4024 // discover this situation in threadPaused(), but it's
4025 // possible that the following sequence occurs:
4034 // Now T is an indirection, and the update frame is already
4035 // marked on A's stack, so we won't traverse it again in
4036 // threadPaused(). We could traverse the whole stack again
4037 // before GC, but that seems like overkill.
4039 // Scavenging this update frame as normal would be disastrous;
4040 // the updatee would end up pointing to the value. So we turn
4041 // the indirection into an IND_PERM, so that evacuate will
4042 // copy the indirection into the old generation instead of
4044 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
4045 ((StgUpdateFrame *)p)->updatee->header.info =
4046 (StgInfoTable *)&stg_IND_PERM_info;
4048 ((StgUpdateFrame *)p)->updatee
4049 = evacuate(((StgUpdateFrame *)p)->updatee);
4050 p += sizeofW(StgUpdateFrame);
4053 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
4054 case CATCH_STM_FRAME:
4055 case CATCH_RETRY_FRAME:
4056 case ATOMICALLY_FRAME:
4061 bitmap = BITMAP_BITS(info->i.layout.bitmap);
4062 size = BITMAP_SIZE(info->i.layout.bitmap);
4063 // NOTE: the payload starts immediately after the info-ptr, we
4064 // don't have an StgHeader in the same sense as a heap closure.
4066 p = scavenge_small_bitmap(p, size, bitmap);
4070 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
4078 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4081 size = BCO_BITMAP_SIZE(bco);
4082 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
4087 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
4093 size = GET_LARGE_BITMAP(&info->i)->size;
4095 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
4097 // and don't forget to follow the SRT
4101 // Dynamic bitmap: the mask is stored on the stack, and
4102 // there are a number of non-pointers followed by a number
4103 // of pointers above the bitmapped area. (see StgMacros.h,
4108 dyn = ((StgRetDyn *)p)->liveness;
4110 // traverse the bitmap first
4111 bitmap = RET_DYN_LIVENESS(dyn);
4112 p = (P_)&((StgRetDyn *)p)->payload[0];
4113 size = RET_DYN_BITMAP_SIZE;
4114 p = scavenge_small_bitmap(p, size, bitmap);
4116 // skip over the non-ptr words
4117 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
4119 // follow the ptr words
4120 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
4121 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4129 StgRetFun *ret_fun = (StgRetFun *)p;
4130 StgFunInfoTable *fun_info;
4132 ret_fun->fun = evacuate(ret_fun->fun);
4133 fun_info = get_fun_itbl(ret_fun->fun);
4134 p = scavenge_arg_block(fun_info, ret_fun->payload);
4139 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
4144 /*-----------------------------------------------------------------------------
4145 scavenge the large object list.
4147 evac_gen set by caller; similar games played with evac_gen as with
4148 scavenge() - see comment at the top of scavenge(). Most large
4149 objects are (repeatedly) mutable, so most of the time evac_gen will
4151 --------------------------------------------------------------------------- */
4154 scavenge_large(step *stp)
4159 bd = stp->new_large_objects;
4161 for (; bd != NULL; bd = stp->new_large_objects) {
4163 /* take this object *off* the large objects list and put it on
4164 * the scavenged large objects list. This is so that we can
4165 * treat new_large_objects as a stack and push new objects on
4166 * the front when evacuating.
4168 stp->new_large_objects = bd->link;
4169 dbl_link_onto(bd, &stp->scavenged_large_objects);
4171 // update the block count in this step.
4172 stp->n_scavenged_large_blocks += bd->blocks;
4175 if (scavenge_one(p)) {
4176 if (stp->gen_no > 0) {
4177 recordMutableGen((StgClosure *)p, stp->gen);
4183 /* -----------------------------------------------------------------------------
4184 Initialising the static object & mutable lists
4185 -------------------------------------------------------------------------- */
4188 zero_static_object_list(StgClosure* first_static)
4192 const StgInfoTable *info;
4194 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
4196 link = *STATIC_LINK(info, p);
4197 *STATIC_LINK(info,p) = NULL;
4201 /* -----------------------------------------------------------------------------
4203 -------------------------------------------------------------------------- */
4210 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4211 c = (StgIndStatic *)c->static_link)
4213 SET_INFO(c, c->saved_info);
4214 c->saved_info = NULL;
4215 // could, but not necessary: c->static_link = NULL;
4217 revertible_caf_list = NULL;
4221 markCAFs( evac_fn evac )
4225 for (c = (StgIndStatic *)caf_list; c != NULL;
4226 c = (StgIndStatic *)c->static_link)
4228 evac(&c->indirectee);
4230 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4231 c = (StgIndStatic *)c->static_link)
4233 evac(&c->indirectee);
4237 /* -----------------------------------------------------------------------------
4238 Sanity code for CAF garbage collection.
4240 With DEBUG turned on, we manage a CAF list in addition to the SRT
4241 mechanism. After GC, we run down the CAF list and blackhole any
4242 CAFs which have been garbage collected. This means we get an error
4243 whenever the program tries to enter a garbage collected CAF.
4245 Any garbage collected CAFs are taken off the CAF list at the same
4247 -------------------------------------------------------------------------- */
4249 #if 0 && defined(DEBUG)
4256 const StgInfoTable *info;
4267 ASSERT(info->type == IND_STATIC);
4269 if (STATIC_LINK(info,p) == NULL) {
4270 IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
4272 SET_INFO(p,&stg_BLACKHOLE_info);
4273 p = STATIC_LINK2(info,p);
4277 pp = &STATIC_LINK2(info,p);
4284 // debugBelch("%d CAFs live", i);
4289 /* -----------------------------------------------------------------------------
4292 * Code largely pinched from old RTS, then hacked to bits. We also do
4293 * lazy black holing here.
4295 * -------------------------------------------------------------------------- */
4297 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4300 stackSqueeze(StgTSO *tso, StgPtr bottom)
4303 rtsBool prev_was_update_frame;
4304 StgClosure *updatee = NULL;
4305 StgRetInfoTable *info;
4306 StgWord current_gap_size;
4307 struct stack_gap *gap;
4310 // Traverse the stack upwards, replacing adjacent update frames
4311 // with a single update frame and a "stack gap". A stack gap
4312 // contains two values: the size of the gap, and the distance
4313 // to the next gap (or the stack top).
4317 ASSERT(frame < bottom);
4319 prev_was_update_frame = rtsFalse;
4320 current_gap_size = 0;
4321 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4323 while (frame < bottom) {
4325 info = get_ret_itbl((StgClosure *)frame);
4326 switch (info->i.type) {
4330 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4332 if (prev_was_update_frame) {
4334 TICK_UPD_SQUEEZED();
4335 /* wasn't there something about update squeezing and ticky to be
4336 * sorted out? oh yes: we aren't counting each enter properly
4337 * in this case. See the log somewhere. KSW 1999-04-21
4339 * Check two things: that the two update frames don't point to
4340 * the same object, and that the updatee_bypass isn't already an
4341 * indirection. Both of these cases only happen when we're in a
4342 * block hole-style loop (and there are multiple update frames
4343 * on the stack pointing to the same closure), but they can both
4344 * screw us up if we don't check.
4346 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4347 UPD_IND_NOLOCK(upd->updatee, updatee);
4350 // now mark this update frame as a stack gap. The gap
4351 // marker resides in the bottom-most update frame of
4352 // the series of adjacent frames, and covers all the
4353 // frames in this series.
4354 current_gap_size += sizeofW(StgUpdateFrame);
4355 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4356 ((struct stack_gap *)frame)->next_gap = gap;
4358 frame += sizeofW(StgUpdateFrame);
4362 // single update frame, or the topmost update frame in a series
4364 prev_was_update_frame = rtsTrue;
4365 updatee = upd->updatee;
4366 frame += sizeofW(StgUpdateFrame);
4372 prev_was_update_frame = rtsFalse;
4374 // we're not in a gap... check whether this is the end of a gap
4375 // (an update frame can't be the end of a gap).
4376 if (current_gap_size != 0) {
4377 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4379 current_gap_size = 0;
4381 frame += stack_frame_sizeW((StgClosure *)frame);
4386 if (current_gap_size != 0) {
4387 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4390 // Now we have a stack with gaps in it, and we have to walk down
4391 // shoving the stack up to fill in the gaps. A diagram might
4395 // | ********* | <- sp
4399 // | stack_gap | <- gap | chunk_size
4401 // | ......... | <- gap_end v
4407 // 'sp' points the the current top-of-stack
4408 // 'gap' points to the stack_gap structure inside the gap
4409 // ***** indicates real stack data
4410 // ..... indicates gap
4411 // <empty> indicates unused
4415 void *gap_start, *next_gap_start, *gap_end;
4418 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4419 sp = next_gap_start;
4421 while ((StgPtr)gap > tso->sp) {
4423 // we're working in *bytes* now...
4424 gap_start = next_gap_start;
4425 gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
4427 gap = gap->next_gap;
4428 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4430 chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
4432 memmove(sp, next_gap_start, chunk_size);
4435 tso->sp = (StgPtr)sp;
4439 /* -----------------------------------------------------------------------------
4442 * We have to prepare for GC - this means doing lazy black holing
4443 * here. We also take the opportunity to do stack squeezing if it's
4445 * -------------------------------------------------------------------------- */
4447 threadPaused(Capability *cap, StgTSO *tso)
4450 StgRetInfoTable *info;
4453 nat words_to_squeeze = 0;
4455 nat weight_pending = 0;
4456 rtsBool prev_was_update_frame;
4458 stack_end = &tso->stack[tso->stack_size];
4460 frame = (StgClosure *)tso->sp;
4463 // If we've already marked this frame, then stop here.
4464 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
4468 info = get_ret_itbl(frame);
4470 switch (info->i.type) {
4474 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
4476 bh = ((StgUpdateFrame *)frame)->updatee;
4478 if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
4479 IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
4481 // If this closure is already an indirection, then
4482 // suspend the computation up to this point:
4483 suspendComputation(cap,tso,(StgPtr)frame);
4485 // Now drop the update frame, and arrange to return
4486 // the value to the frame underneath:
4487 tso->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
4488 tso->sp[1] = (StgWord)bh;
4489 tso->sp[0] = (W_)&stg_enter_info;
4491 // And continue with threadPaused; there might be
4492 // yet more computation to suspend.
4493 threadPaused(cap,tso);
4497 if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
4498 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4499 debugBelch("Unexpected lazy BHing required at 0x%04lx\n",(long)bh);
4501 // zero out the slop so that the sanity checker can tell
4502 // where the next closure is.
4503 DEBUG_FILL_SLOP(bh);
4506 // We pretend that bh is now dead.
4507 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4509 SET_INFO(bh,&stg_BLACKHOLE_info);
4511 // We pretend that bh has just been created.
4512 LDV_RECORD_CREATE(bh);
4515 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4516 if (prev_was_update_frame) {
4517 words_to_squeeze += sizeofW(StgUpdateFrame);
4518 weight += weight_pending;
4521 prev_was_update_frame = rtsTrue;
4527 // normal stack frames; do nothing except advance the pointer
4530 nat frame_size = stack_frame_sizeW(frame);
4531 weight_pending += frame_size;
4532 frame = (StgClosure *)((StgPtr)frame + frame_size);
4533 prev_was_update_frame = rtsFalse;
4540 debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
4541 words_to_squeeze, weight,
4542 weight < words_to_squeeze ? "YES" : "NO"));
4544 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
4545 // the number of words we have to shift down is less than the
4546 // number of stack words we squeeze away by doing so.
4547 if (1 /*RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
4548 weight < words_to_squeeze*/) {
4549 stackSqueeze(tso, (StgPtr)frame);
4553 /* -----------------------------------------------------------------------------
4555 * -------------------------------------------------------------------------- */
4559 printMutableList(generation *gen)
4564 debugBelch("@@ Mutable list %p: ", gen->mut_list);
4566 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
4567 for (p = bd->start; p < bd->free; p++) {
4568 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));