1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
16 #include "LdvProfile.h"
21 #include "BlockAlloc.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
50 // Turn off inlining when debugging - it obfuscates things
53 # define STATIC_INLINE static
56 /* STATIC OBJECT LIST.
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
81 * Extra notes for generational GC:
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
99 static rtsBool major_gc;
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
107 /* Whether to do eager promotion or not.
109 static rtsBool eager_promotion;
113 StgWeak *old_weak_ptr_list; // also pending finaliser list
115 /* Which stage of processing various kinds of weak pointer are we at?
116 * (see traverse_weak_ptr_list() below for discussion).
118 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
119 static WeakStage weak_stage;
121 /* List of all threads during GC
123 static StgTSO *old_all_threads;
124 StgTSO *resurrected_threads;
126 /* Flag indicating failure to evacuate an object to the desired
129 static rtsBool failed_to_evac;
131 /* Saved nursery (used for 2-space collector only)
133 static bdescr *saved_nursery;
134 static nat saved_n_blocks;
136 /* Data used for allocation area sizing.
138 static lnat new_blocks; // blocks allocated during this GC
139 static lnat new_scavd_blocks; // ditto, but depth-first blocks
140 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
142 /* Used to avoid long recursion due to selector thunks
144 static lnat thunk_selector_depth = 0;
145 #define MAX_THUNK_SELECTOR_DEPTH 8
155 /* -----------------------------------------------------------------------------
156 Static function declarations
157 -------------------------------------------------------------------------- */
159 static bdescr * gc_alloc_block ( step *stp );
160 static void mark_root ( StgClosure **root );
162 // Use a register argument for evacuate, if available.
164 #define REGPARM1 __attribute__((regparm(1)))
169 REGPARM1 static StgClosure * evacuate (StgClosure *q);
171 static void zero_static_object_list ( StgClosure* first_static );
173 static rtsBool traverse_weak_ptr_list ( void );
174 static void mark_weak_ptr_list ( StgWeak **list );
176 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
179 static void scavenge ( step * );
180 static void scavenge_mark_stack ( void );
181 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
182 static rtsBool scavenge_one ( StgPtr p );
183 static void scavenge_large ( step * );
184 static void scavenge_static ( void );
185 static void scavenge_mutable_list ( generation *g );
187 static void scavenge_large_bitmap ( StgPtr p,
188 StgLargeBitmap *large_bitmap,
191 #if 0 && defined(DEBUG)
192 static void gcCAFs ( void );
195 /* -----------------------------------------------------------------------------
196 inline functions etc. for dealing with the mark bitmap & stack.
197 -------------------------------------------------------------------------- */
199 #define MARK_STACK_BLOCKS 4
201 static bdescr *mark_stack_bdescr;
202 static StgPtr *mark_stack;
203 static StgPtr *mark_sp;
204 static StgPtr *mark_splim;
206 // Flag and pointers used for falling back to a linear scan when the
207 // mark stack overflows.
208 static rtsBool mark_stack_overflowed;
209 static bdescr *oldgen_scan_bd;
210 static StgPtr oldgen_scan;
212 STATIC_INLINE rtsBool
213 mark_stack_empty(void)
215 return mark_sp == mark_stack;
218 STATIC_INLINE rtsBool
219 mark_stack_full(void)
221 return mark_sp >= mark_splim;
225 reset_mark_stack(void)
227 mark_sp = mark_stack;
231 push_mark_stack(StgPtr p)
242 /* -----------------------------------------------------------------------------
243 Allocate a new to-space block in the given step.
244 -------------------------------------------------------------------------- */
247 gc_alloc_block(step *stp)
249 bdescr *bd = allocBlock();
250 bd->gen_no = stp->gen_no;
254 // blocks in to-space in generations up to and including N
255 // get the BF_EVACUATED flag.
256 if (stp->gen_no <= N) {
257 bd->flags = BF_EVACUATED;
262 // Start a new to-space block, chain it on after the previous one.
263 if (stp->hp_bd != NULL) {
264 stp->hp_bd->free = stp->hp;
265 stp->hp_bd->link = bd;
270 stp->hpLim = stp->hp + BLOCK_SIZE_W;
279 gc_alloc_scavd_block(step *stp)
281 bdescr *bd = allocBlock();
282 bd->gen_no = stp->gen_no;
285 // blocks in to-space in generations up to and including N
286 // get the BF_EVACUATED flag.
287 if (stp->gen_no <= N) {
288 bd->flags = BF_EVACUATED;
293 bd->link = stp->blocks;
296 if (stp->scavd_hp != NULL) {
297 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
299 stp->scavd_hp = bd->start;
300 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
308 /* -----------------------------------------------------------------------------
311 Rough outline of the algorithm: for garbage collecting generation N
312 (and all younger generations):
314 - follow all pointers in the root set. the root set includes all
315 mutable objects in all generations (mutable_list).
317 - for each pointer, evacuate the object it points to into either
319 + to-space of the step given by step->to, which is the next
320 highest step in this generation or the first step in the next
321 generation if this is the last step.
323 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
324 When we evacuate an object we attempt to evacuate
325 everything it points to into the same generation - this is
326 achieved by setting evac_gen to the desired generation. If
327 we can't do this, then an entry in the mut list has to
328 be made for the cross-generation pointer.
330 + if the object is already in a generation > N, then leave
333 - repeatedly scavenge to-space from each step in each generation
334 being collected until no more objects can be evacuated.
336 - free from-space in each step, and set from-space = to-space.
338 Locks held: all capabilities are held throughout GarbageCollect().
340 -------------------------------------------------------------------------- */
343 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
347 lnat live, allocated, copied = 0, scavd_copied = 0;
348 lnat oldgen_saved_blocks = 0;
354 CostCentreStack *prev_CCS;
357 #if defined(DEBUG) && defined(GRAN)
358 IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
362 #if defined(RTS_USER_SIGNALS)
367 // tell the STM to discard any cached closures its hoping to re-use
370 // tell the stats department that we've started a GC
374 // check for memory leaks if DEBUG is on
384 // Init stats and print par specific (timing) info
385 PAR_TICKY_PAR_START();
387 // attribute any costs to CCS_GC
393 /* Approximate how much we allocated.
394 * Todo: only when generating stats?
396 allocated = calcAllocated();
398 /* Figure out which generation to collect
400 if (force_major_gc) {
401 N = RtsFlags.GcFlags.generations - 1;
405 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
406 if (generations[g].steps[0].n_blocks +
407 generations[g].steps[0].n_large_blocks
408 >= generations[g].max_blocks) {
412 major_gc = (N == RtsFlags.GcFlags.generations-1);
415 #ifdef RTS_GTK_FRONTPANEL
416 if (RtsFlags.GcFlags.frontpanel) {
417 updateFrontPanelBeforeGC(N);
421 // check stack sanity *before* GC (ToDo: check all threads)
423 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
425 IF_DEBUG(sanity, checkFreeListSanity());
427 /* Initialise the static object lists
429 static_objects = END_OF_STATIC_LIST;
430 scavenged_static_objects = END_OF_STATIC_LIST;
432 /* Save the nursery if we're doing a two-space collection.
433 * g0s0->blocks will be used for to-space, so we need to get the
434 * nursery out of the way.
436 if (RtsFlags.GcFlags.generations == 1) {
437 saved_nursery = g0s0->blocks;
438 saved_n_blocks = g0s0->n_blocks;
443 /* Keep a count of how many new blocks we allocated during this GC
444 * (used for resizing the allocation area, later).
447 new_scavd_blocks = 0;
449 // Initialise to-space in all the generations/steps that we're
452 for (g = 0; g <= N; g++) {
454 // throw away the mutable list. Invariant: the mutable list
455 // always has at least one block; this means we can avoid a check for
456 // NULL in recordMutable().
458 freeChain(generations[g].mut_list);
459 generations[g].mut_list = allocBlock();
460 for (i = 0; i < n_capabilities; i++) {
461 freeChain(capabilities[i].mut_lists[g]);
462 capabilities[i].mut_lists[g] = allocBlock();
466 for (s = 0; s < generations[g].n_steps; s++) {
468 // generation 0, step 0 doesn't need to-space
469 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
473 stp = &generations[g].steps[s];
474 ASSERT(stp->gen_no == g);
476 // start a new to-space for this step.
477 stp->old_blocks = stp->blocks;
478 stp->n_old_blocks = stp->n_blocks;
480 // allocate the first to-space block; extra blocks will be
481 // chained on as necessary.
483 bd = gc_alloc_block(stp);
486 stp->scan = bd->start;
489 // allocate a block for "already scavenged" objects. This goes
490 // on the front of the stp->blocks list, so it won't be
491 // traversed by the scavenging sweep.
492 gc_alloc_scavd_block(stp);
494 // initialise the large object queues.
495 stp->new_large_objects = NULL;
496 stp->scavenged_large_objects = NULL;
497 stp->n_scavenged_large_blocks = 0;
499 // mark the large objects as not evacuated yet
500 for (bd = stp->large_objects; bd; bd = bd->link) {
501 bd->flags &= ~BF_EVACUATED;
504 // for a compacted step, we need to allocate the bitmap
505 if (stp->is_compacted) {
506 nat bitmap_size; // in bytes
507 bdescr *bitmap_bdescr;
510 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
512 if (bitmap_size > 0) {
513 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
515 stp->bitmap = bitmap_bdescr;
516 bitmap = bitmap_bdescr->start;
518 IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
519 bitmap_size, bitmap););
521 // don't forget to fill it with zeros!
522 memset(bitmap, 0, bitmap_size);
524 // For each block in this step, point to its bitmap from the
526 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
527 bd->u.bitmap = bitmap;
528 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
530 // Also at this point we set the BF_COMPACTED flag
531 // for this block. The invariant is that
532 // BF_COMPACTED is always unset, except during GC
533 // when it is set on those blocks which will be
535 bd->flags |= BF_COMPACTED;
542 /* make sure the older generations have at least one block to
543 * allocate into (this makes things easier for copy(), see below).
545 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
546 for (s = 0; s < generations[g].n_steps; s++) {
547 stp = &generations[g].steps[s];
548 if (stp->hp_bd == NULL) {
549 ASSERT(stp->blocks == NULL);
550 bd = gc_alloc_block(stp);
554 if (stp->scavd_hp == NULL) {
555 gc_alloc_scavd_block(stp);
558 /* Set the scan pointer for older generations: remember we
559 * still have to scavenge objects that have been promoted. */
561 stp->scan_bd = stp->hp_bd;
562 stp->new_large_objects = NULL;
563 stp->scavenged_large_objects = NULL;
564 stp->n_scavenged_large_blocks = 0;
567 /* Move the private mutable lists from each capability onto the
568 * main mutable list for the generation.
570 for (i = 0; i < n_capabilities; i++) {
571 for (bd = capabilities[i].mut_lists[g];
572 bd->link != NULL; bd = bd->link) {
575 bd->link = generations[g].mut_list;
576 generations[g].mut_list = capabilities[i].mut_lists[g];
577 capabilities[i].mut_lists[g] = allocBlock();
581 /* Allocate a mark stack if we're doing a major collection.
584 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
585 mark_stack = (StgPtr *)mark_stack_bdescr->start;
586 mark_sp = mark_stack;
587 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
589 mark_stack_bdescr = NULL;
592 eager_promotion = rtsTrue; // for now
594 /* -----------------------------------------------------------------------
595 * follow all the roots that we know about:
596 * - mutable lists from each generation > N
597 * we want to *scavenge* these roots, not evacuate them: they're not
598 * going to move in this GC.
599 * Also: do them in reverse generation order. This is because we
600 * often want to promote objects that are pointed to by older
601 * generations early, so we don't have to repeatedly copy them.
602 * Doing the generations in reverse order ensures that we don't end
603 * up in the situation where we want to evac an object to gen 3 and
604 * it has already been evaced to gen 2.
608 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
609 generations[g].saved_mut_list = generations[g].mut_list;
610 generations[g].mut_list = allocBlock();
611 // mut_list always has at least one block.
614 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
615 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
616 scavenge_mutable_list(&generations[g]);
618 for (st = generations[g].n_steps-1; st >= 0; st--) {
619 scavenge(&generations[g].steps[st]);
624 /* follow roots from the CAF list (used by GHCi)
629 /* follow all the roots that the application knows about.
632 get_roots(mark_root);
635 /* And don't forget to mark the TSO if we got here direct from
637 /* Not needed in a seq version?
639 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
643 // Mark the entries in the GALA table of the parallel system
644 markLocalGAs(major_gc);
645 // Mark all entries on the list of pending fetches
646 markPendingFetches(major_gc);
649 /* Mark the weak pointer list, and prepare to detect dead weak
652 mark_weak_ptr_list(&weak_ptr_list);
653 old_weak_ptr_list = weak_ptr_list;
654 weak_ptr_list = NULL;
655 weak_stage = WeakPtrs;
657 /* The all_threads list is like the weak_ptr_list.
658 * See traverse_weak_ptr_list() for the details.
660 old_all_threads = all_threads;
661 all_threads = END_TSO_QUEUE;
662 resurrected_threads = END_TSO_QUEUE;
664 /* Mark the stable pointer table.
666 markStablePtrTable(mark_root);
668 /* -------------------------------------------------------------------------
669 * Repeatedly scavenge all the areas we know about until there's no
670 * more scavenging to be done.
677 // scavenge static objects
678 if (major_gc && static_objects != END_OF_STATIC_LIST) {
679 IF_DEBUG(sanity, checkStaticObjects(static_objects));
683 /* When scavenging the older generations: Objects may have been
684 * evacuated from generations <= N into older generations, and we
685 * need to scavenge these objects. We're going to try to ensure that
686 * any evacuations that occur move the objects into at least the
687 * same generation as the object being scavenged, otherwise we
688 * have to create new entries on the mutable list for the older
692 // scavenge each step in generations 0..maxgen
698 // scavenge objects in compacted generation
699 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
700 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
701 scavenge_mark_stack();
705 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
706 for (st = generations[gen].n_steps; --st >= 0; ) {
707 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
710 stp = &generations[gen].steps[st];
712 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
717 if (stp->new_large_objects != NULL) {
726 if (flag) { goto loop; }
728 // must be last... invariant is that everything is fully
729 // scavenged at this point.
730 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
735 /* Update the pointers from the task list - these are
736 * treated as weak pointers because we want to allow a main thread
737 * to get a BlockedOnDeadMVar exception in the same way as any other
738 * thread. Note that the threads should all have been retained by
739 * GC by virtue of being on the all_threads list, we're just
740 * updating pointers here.
745 for (task = all_tasks; task != NULL; task = task->all_link) {
746 if (!task->stopped && task->tso) {
747 ASSERT(task->tso->bound == task);
748 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
750 barf("task %p: main thread %d has been GC'd",
764 // Reconstruct the Global Address tables used in GUM
765 rebuildGAtables(major_gc);
766 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
769 // Now see which stable names are still alive.
772 // Tidy the end of the to-space chains
773 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
774 for (s = 0; s < generations[g].n_steps; s++) {
775 stp = &generations[g].steps[s];
776 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
777 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
778 stp->hp_bd->free = stp->hp;
779 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
785 // We call processHeapClosureForDead() on every closure destroyed during
786 // the current garbage collection, so we invoke LdvCensusForDead().
787 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
788 || RtsFlags.ProfFlags.bioSelector != NULL)
792 // NO MORE EVACUATION AFTER THIS POINT!
793 // Finally: compaction of the oldest generation.
794 if (major_gc && oldest_gen->steps[0].is_compacted) {
795 // save number of blocks for stats
796 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
800 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
802 /* run through all the generations/steps and tidy up
804 copied = new_blocks * BLOCK_SIZE_W;
805 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
806 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
809 generations[g].collections++; // for stats
812 // Count the mutable list as bytes "copied" for the purposes of
813 // stats. Every mutable list is copied during every GC.
815 nat mut_list_size = 0;
816 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
817 mut_list_size += bd->free - bd->start;
819 copied += mut_list_size;
821 IF_DEBUG(gc, debugBelch("mut_list_size: %d (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
824 for (s = 0; s < generations[g].n_steps; s++) {
826 stp = &generations[g].steps[s];
828 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
829 // stats information: how much we copied
831 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
833 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
837 // for generations we collected...
840 /* free old memory and shift to-space into from-space for all
841 * the collected steps (except the allocation area). These
842 * freed blocks will probaby be quickly recycled.
844 if (!(g == 0 && s == 0)) {
845 if (stp->is_compacted) {
846 // for a compacted step, just shift the new to-space
847 // onto the front of the now-compacted existing blocks.
848 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
849 bd->flags &= ~BF_EVACUATED; // now from-space
851 // tack the new blocks on the end of the existing blocks
852 if (stp->old_blocks != NULL) {
853 for (bd = stp->old_blocks; bd != NULL; bd = next) {
854 // NB. this step might not be compacted next
855 // time, so reset the BF_COMPACTED flags.
856 // They are set before GC if we're going to
857 // compact. (search for BF_COMPACTED above).
858 bd->flags &= ~BF_COMPACTED;
861 bd->link = stp->blocks;
864 stp->blocks = stp->old_blocks;
866 // add the new blocks to the block tally
867 stp->n_blocks += stp->n_old_blocks;
868 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
870 freeChain(stp->old_blocks);
871 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
872 bd->flags &= ~BF_EVACUATED; // now from-space
875 stp->old_blocks = NULL;
876 stp->n_old_blocks = 0;
879 /* LARGE OBJECTS. The current live large objects are chained on
880 * scavenged_large, having been moved during garbage
881 * collection from large_objects. Any objects left on
882 * large_objects list are therefore dead, so we free them here.
884 for (bd = stp->large_objects; bd != NULL; bd = next) {
890 // update the count of blocks used by large objects
891 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
892 bd->flags &= ~BF_EVACUATED;
894 stp->large_objects = stp->scavenged_large_objects;
895 stp->n_large_blocks = stp->n_scavenged_large_blocks;
898 // for older generations...
900 /* For older generations, we need to append the
901 * scavenged_large_object list (i.e. large objects that have been
902 * promoted during this GC) to the large_object list for that step.
904 for (bd = stp->scavenged_large_objects; bd; bd = next) {
906 bd->flags &= ~BF_EVACUATED;
907 dbl_link_onto(bd, &stp->large_objects);
910 // add the new blocks we promoted during this GC
911 stp->n_large_blocks += stp->n_scavenged_large_blocks;
916 /* Reset the sizes of the older generations when we do a major
919 * CURRENT STRATEGY: make all generations except zero the same size.
920 * We have to stay within the maximum heap size, and leave a certain
921 * percentage of the maximum heap size available to allocate into.
923 if (major_gc && RtsFlags.GcFlags.generations > 1) {
924 nat live, size, min_alloc;
925 nat max = RtsFlags.GcFlags.maxHeapSize;
926 nat gens = RtsFlags.GcFlags.generations;
928 // live in the oldest generations
929 live = oldest_gen->steps[0].n_blocks +
930 oldest_gen->steps[0].n_large_blocks;
932 // default max size for all generations except zero
933 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
934 RtsFlags.GcFlags.minOldGenSize);
936 // minimum size for generation zero
937 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
938 RtsFlags.GcFlags.minAllocAreaSize);
940 // Auto-enable compaction when the residency reaches a
941 // certain percentage of the maximum heap size (default: 30%).
942 if (RtsFlags.GcFlags.generations > 1 &&
943 (RtsFlags.GcFlags.compact ||
945 oldest_gen->steps[0].n_blocks >
946 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
947 oldest_gen->steps[0].is_compacted = 1;
948 // debugBelch("compaction: on\n", live);
950 oldest_gen->steps[0].is_compacted = 0;
951 // debugBelch("compaction: off\n", live);
954 // if we're going to go over the maximum heap size, reduce the
955 // size of the generations accordingly. The calculation is
956 // different if compaction is turned on, because we don't need
957 // to double the space required to collect the old generation.
960 // this test is necessary to ensure that the calculations
961 // below don't have any negative results - we're working
962 // with unsigned values here.
963 if (max < min_alloc) {
967 if (oldest_gen->steps[0].is_compacted) {
968 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
969 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
972 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
973 size = (max - min_alloc) / ((gens - 1) * 2);
983 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
984 min_alloc, size, max);
987 for (g = 0; g < gens; g++) {
988 generations[g].max_blocks = size;
992 // Guess the amount of live data for stats.
995 /* Free the small objects allocated via allocate(), since this will
996 * all have been copied into G0S1 now.
998 if (small_alloc_list != NULL) {
999 freeChain(small_alloc_list);
1001 small_alloc_list = NULL;
1005 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
1007 // Start a new pinned_object_block
1008 pinned_object_block = NULL;
1010 /* Free the mark stack.
1012 if (mark_stack_bdescr != NULL) {
1013 freeGroup(mark_stack_bdescr);
1016 /* Free any bitmaps.
1018 for (g = 0; g <= N; g++) {
1019 for (s = 0; s < generations[g].n_steps; s++) {
1020 stp = &generations[g].steps[s];
1021 if (stp->bitmap != NULL) {
1022 freeGroup(stp->bitmap);
1028 /* Two-space collector:
1029 * Free the old to-space, and estimate the amount of live data.
1031 if (RtsFlags.GcFlags.generations == 1) {
1034 if (g0s0->old_blocks != NULL) {
1035 freeChain(g0s0->old_blocks);
1037 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1038 bd->flags = 0; // now from-space
1040 g0s0->old_blocks = g0s0->blocks;
1041 g0s0->n_old_blocks = g0s0->n_blocks;
1042 g0s0->blocks = saved_nursery;
1043 g0s0->n_blocks = saved_n_blocks;
1045 /* For a two-space collector, we need to resize the nursery. */
1047 /* set up a new nursery. Allocate a nursery size based on a
1048 * function of the amount of live data (by default a factor of 2)
1049 * Use the blocks from the old nursery if possible, freeing up any
1052 * If we get near the maximum heap size, then adjust our nursery
1053 * size accordingly. If the nursery is the same size as the live
1054 * data (L), then we need 3L bytes. We can reduce the size of the
1055 * nursery to bring the required memory down near 2L bytes.
1057 * A normal 2-space collector would need 4L bytes to give the same
1058 * performance we get from 3L bytes, reducing to the same
1059 * performance at 2L bytes.
1061 blocks = g0s0->n_old_blocks;
1063 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1064 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1065 RtsFlags.GcFlags.maxHeapSize ) {
1066 long adjusted_blocks; // signed on purpose
1069 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1070 IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
1071 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1072 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1075 blocks = adjusted_blocks;
1078 blocks *= RtsFlags.GcFlags.oldGenFactor;
1079 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1080 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1083 resizeNurseries(blocks);
1086 /* Generational collector:
1087 * If the user has given us a suggested heap size, adjust our
1088 * allocation area to make best use of the memory available.
1091 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1093 nat needed = calcNeeded(); // approx blocks needed at next GC
1095 /* Guess how much will be live in generation 0 step 0 next time.
1096 * A good approximation is obtained by finding the
1097 * percentage of g0s0 that was live at the last minor GC.
1100 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1103 /* Estimate a size for the allocation area based on the
1104 * information available. We might end up going slightly under
1105 * or over the suggested heap size, but we should be pretty
1108 * Formula: suggested - needed
1109 * ----------------------------
1110 * 1 + g0s0_pcnt_kept/100
1112 * where 'needed' is the amount of memory needed at the next
1113 * collection for collecting all steps except g0s0.
1116 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1117 (100 + (long)g0s0_pcnt_kept);
1119 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1120 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1123 resizeNurseries((nat)blocks);
1126 // we might have added extra large blocks to the nursery, so
1127 // resize back to minAllocAreaSize again.
1128 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1132 // mark the garbage collected CAFs as dead
1133 #if 0 && defined(DEBUG) // doesn't work at the moment
1134 if (major_gc) { gcCAFs(); }
1138 // resetStaticObjectForRetainerProfiling() must be called before
1140 resetStaticObjectForRetainerProfiling();
1143 // zero the scavenged static object list
1145 zero_static_object_list(scavenged_static_objects);
1148 // Reset the nursery
1151 // start any pending finalizers
1153 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1156 // send exceptions to any threads which were about to die
1157 resurrectThreads(resurrected_threads);
1159 // Update the stable pointer hash table.
1160 updateStablePtrTable(major_gc);
1162 // check sanity after GC
1163 IF_DEBUG(sanity, checkSanity());
1165 // extra GC trace info
1166 IF_DEBUG(gc, statDescribeGens());
1169 // symbol-table based profiling
1170 /* heapCensus(to_blocks); */ /* ToDo */
1173 // restore enclosing cost centre
1179 // check for memory leaks if DEBUG is on
1183 #ifdef RTS_GTK_FRONTPANEL
1184 if (RtsFlags.GcFlags.frontpanel) {
1185 updateFrontPanelAfterGC( N, live );
1189 // ok, GC over: tell the stats department what happened.
1190 stat_endGC(allocated, live, copied, scavd_copied, N);
1192 #if defined(RTS_USER_SIGNALS)
1193 // unblock signals again
1194 unblockUserSignals();
1203 /* -----------------------------------------------------------------------------
1206 traverse_weak_ptr_list is called possibly many times during garbage
1207 collection. It returns a flag indicating whether it did any work
1208 (i.e. called evacuate on any live pointers).
1210 Invariant: traverse_weak_ptr_list is called when the heap is in an
1211 idempotent state. That means that there are no pending
1212 evacuate/scavenge operations. This invariant helps the weak
1213 pointer code decide which weak pointers are dead - if there are no
1214 new live weak pointers, then all the currently unreachable ones are
1217 For generational GC: we just don't try to finalize weak pointers in
1218 older generations than the one we're collecting. This could
1219 probably be optimised by keeping per-generation lists of weak
1220 pointers, but for a few weak pointers this scheme will work.
1222 There are three distinct stages to processing weak pointers:
1224 - weak_stage == WeakPtrs
1226 We process all the weak pointers whos keys are alive (evacuate
1227 their values and finalizers), and repeat until we can find no new
1228 live keys. If no live keys are found in this pass, then we
1229 evacuate the finalizers of all the dead weak pointers in order to
1232 - weak_stage == WeakThreads
1234 Now, we discover which *threads* are still alive. Pointers to
1235 threads from the all_threads and main thread lists are the
1236 weakest of all: a pointers from the finalizer of a dead weak
1237 pointer can keep a thread alive. Any threads found to be unreachable
1238 are evacuated and placed on the resurrected_threads list so we
1239 can send them a signal later.
1241 - weak_stage == WeakDone
1243 No more evacuation is done.
1245 -------------------------------------------------------------------------- */
1248 traverse_weak_ptr_list(void)
1250 StgWeak *w, **last_w, *next_w;
1252 rtsBool flag = rtsFalse;
1254 switch (weak_stage) {
1260 /* doesn't matter where we evacuate values/finalizers to, since
1261 * these pointers are treated as roots (iff the keys are alive).
1265 last_w = &old_weak_ptr_list;
1266 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1268 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1269 * called on a live weak pointer object. Just remove it.
1271 if (w->header.info == &stg_DEAD_WEAK_info) {
1272 next_w = ((StgDeadWeak *)w)->link;
1277 switch (get_itbl(w)->type) {
1280 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1285 /* Now, check whether the key is reachable.
1287 new = isAlive(w->key);
1290 // evacuate the value and finalizer
1291 w->value = evacuate(w->value);
1292 w->finalizer = evacuate(w->finalizer);
1293 // remove this weak ptr from the old_weak_ptr list
1295 // and put it on the new weak ptr list
1297 w->link = weak_ptr_list;
1300 IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
1305 last_w = &(w->link);
1311 barf("traverse_weak_ptr_list: not WEAK");
1315 /* If we didn't make any changes, then we can go round and kill all
1316 * the dead weak pointers. The old_weak_ptr list is used as a list
1317 * of pending finalizers later on.
1319 if (flag == rtsFalse) {
1320 for (w = old_weak_ptr_list; w; w = w->link) {
1321 w->finalizer = evacuate(w->finalizer);
1324 // Next, move to the WeakThreads stage after fully
1325 // scavenging the finalizers we've just evacuated.
1326 weak_stage = WeakThreads;
1332 /* Now deal with the all_threads list, which behaves somewhat like
1333 * the weak ptr list. If we discover any threads that are about to
1334 * become garbage, we wake them up and administer an exception.
1337 StgTSO *t, *tmp, *next, **prev;
1339 prev = &old_all_threads;
1340 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1342 tmp = (StgTSO *)isAlive((StgClosure *)t);
1348 ASSERT(get_itbl(t)->type == TSO);
1349 switch (t->what_next) {
1350 case ThreadRelocated:
1355 case ThreadComplete:
1356 // finshed or died. The thread might still be alive, but we
1357 // don't keep it on the all_threads list. Don't forget to
1358 // stub out its global_link field.
1359 next = t->global_link;
1360 t->global_link = END_TSO_QUEUE;
1367 // Threads blocked on black holes: if the black hole
1368 // is alive, then the thread is alive too.
1369 if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
1370 if (isAlive(t->block_info.closure)) {
1371 t = (StgTSO *)evacuate((StgClosure *)t);
1378 // not alive (yet): leave this thread on the
1379 // old_all_threads list.
1380 prev = &(t->global_link);
1381 next = t->global_link;
1384 // alive: move this thread onto the all_threads list.
1385 next = t->global_link;
1386 t->global_link = all_threads;
1393 /* If we evacuated any threads, we need to go back to the scavenger.
1395 if (flag) return rtsTrue;
1397 /* And resurrect any threads which were about to become garbage.
1400 StgTSO *t, *tmp, *next;
1401 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1402 next = t->global_link;
1403 tmp = (StgTSO *)evacuate((StgClosure *)t);
1404 tmp->global_link = resurrected_threads;
1405 resurrected_threads = tmp;
1409 /* Finally, we can update the blackhole_queue. This queue
1410 * simply strings together TSOs blocked on black holes, it is
1411 * not intended to keep anything alive. Hence, we do not follow
1412 * pointers on the blackhole_queue until now, when we have
1413 * determined which TSOs are otherwise reachable. We know at
1414 * this point that all TSOs have been evacuated, however.
1418 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1419 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1420 ASSERT(*pt != NULL);
1424 weak_stage = WeakDone; // *now* we're done,
1425 return rtsTrue; // but one more round of scavenging, please
1428 barf("traverse_weak_ptr_list");
1434 /* -----------------------------------------------------------------------------
1435 After GC, the live weak pointer list may have forwarding pointers
1436 on it, because a weak pointer object was evacuated after being
1437 moved to the live weak pointer list. We remove those forwarding
1440 Also, we don't consider weak pointer objects to be reachable, but
1441 we must nevertheless consider them to be "live" and retain them.
1442 Therefore any weak pointer objects which haven't as yet been
1443 evacuated need to be evacuated now.
1444 -------------------------------------------------------------------------- */
1448 mark_weak_ptr_list ( StgWeak **list )
1450 StgWeak *w, **last_w;
1453 for (w = *list; w; w = w->link) {
1454 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1455 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1456 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1457 w = (StgWeak *)evacuate((StgClosure *)w);
1459 last_w = &(w->link);
1463 /* -----------------------------------------------------------------------------
1464 isAlive determines whether the given closure is still alive (after
1465 a garbage collection) or not. It returns the new address of the
1466 closure if it is alive, or NULL otherwise.
1468 NOTE: Use it before compaction only!
1469 -------------------------------------------------------------------------- */
1473 isAlive(StgClosure *p)
1475 const StgInfoTable *info;
1480 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1483 // ignore static closures
1485 // ToDo: for static closures, check the static link field.
1486 // Problem here is that we sometimes don't set the link field, eg.
1487 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1489 if (!HEAP_ALLOCED(p)) {
1493 // ignore closures in generations that we're not collecting.
1495 if (bd->gen_no > N) {
1499 // if it's a pointer into to-space, then we're done
1500 if (bd->flags & BF_EVACUATED) {
1504 // large objects use the evacuated flag
1505 if (bd->flags & BF_LARGE) {
1509 // check the mark bit for compacted steps
1510 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1514 switch (info->type) {
1519 case IND_OLDGEN: // rely on compatible layout with StgInd
1520 case IND_OLDGEN_PERM:
1521 // follow indirections
1522 p = ((StgInd *)p)->indirectee;
1527 return ((StgEvacuated *)p)->evacuee;
1530 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1531 p = (StgClosure *)((StgTSO *)p)->link;
1544 mark_root(StgClosure **root)
1546 *root = evacuate(*root);
1550 upd_evacuee(StgClosure *p, StgClosure *dest)
1552 // not true: (ToDo: perhaps it should be)
1553 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1554 SET_INFO(p, &stg_EVACUATED_info);
1555 ((StgEvacuated *)p)->evacuee = dest;
1559 STATIC_INLINE StgClosure *
1560 copy(StgClosure *src, nat size, step *stp)
1566 nat size_org = size;
1569 TICK_GC_WORDS_COPIED(size);
1570 /* Find out where we're going, using the handy "to" pointer in
1571 * the step of the source object. If it turns out we need to
1572 * evacuate to an older generation, adjust it here (see comment
1575 if (stp->gen_no < evac_gen) {
1576 if (eager_promotion) {
1577 stp = &generations[evac_gen].steps[0];
1579 failed_to_evac = rtsTrue;
1583 /* chain a new block onto the to-space for the destination step if
1586 if (stp->hp + size >= stp->hpLim) {
1587 gc_alloc_block(stp);
1592 stp->hp = to + size;
1593 for (i = 0; i < size; i++) { // unroll for small i
1596 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1599 // We store the size of the just evacuated object in the LDV word so that
1600 // the profiler can guess the position of the next object later.
1601 SET_EVACUAEE_FOR_LDV(from, size_org);
1603 return (StgClosure *)to;
1606 // Same as copy() above, except the object will be allocated in memory
1607 // that will not be scavenged. Used for object that have no pointer
1609 STATIC_INLINE StgClosure *
1610 copy_noscav(StgClosure *src, nat size, step *stp)
1616 nat size_org = size;
1619 TICK_GC_WORDS_COPIED(size);
1620 /* Find out where we're going, using the handy "to" pointer in
1621 * the step of the source object. If it turns out we need to
1622 * evacuate to an older generation, adjust it here (see comment
1625 if (stp->gen_no < evac_gen) {
1626 if (eager_promotion) {
1627 stp = &generations[evac_gen].steps[0];
1629 failed_to_evac = rtsTrue;
1633 /* chain a new block onto the to-space for the destination step if
1636 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1637 gc_alloc_scavd_block(stp);
1642 stp->scavd_hp = to + size;
1643 for (i = 0; i < size; i++) { // unroll for small i
1646 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1649 // We store the size of the just evacuated object in the LDV word so that
1650 // the profiler can guess the position of the next object later.
1651 SET_EVACUAEE_FOR_LDV(from, size_org);
1653 return (StgClosure *)to;
1656 /* Special version of copy() for when we only want to copy the info
1657 * pointer of an object, but reserve some padding after it. This is
1658 * used to optimise evacuation of BLACKHOLEs.
1663 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1668 nat size_to_copy_org = size_to_copy;
1671 TICK_GC_WORDS_COPIED(size_to_copy);
1672 if (stp->gen_no < evac_gen) {
1673 if (eager_promotion) {
1674 stp = &generations[evac_gen].steps[0];
1676 failed_to_evac = rtsTrue;
1680 if (stp->hp + size_to_reserve >= stp->hpLim) {
1681 gc_alloc_block(stp);
1684 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1689 stp->hp += size_to_reserve;
1690 upd_evacuee(src,(StgClosure *)dest);
1692 // We store the size of the just evacuated object in the LDV word so that
1693 // the profiler can guess the position of the next object later.
1694 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1696 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1698 if (size_to_reserve - size_to_copy_org > 0)
1699 FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1701 return (StgClosure *)dest;
1705 /* -----------------------------------------------------------------------------
1706 Evacuate a large object
1708 This just consists of removing the object from the (doubly-linked)
1709 step->large_objects list, and linking it on to the (singly-linked)
1710 step->new_large_objects list, from where it will be scavenged later.
1712 Convention: bd->flags has BF_EVACUATED set for a large object
1713 that has been evacuated, or unset otherwise.
1714 -------------------------------------------------------------------------- */
1718 evacuate_large(StgPtr p)
1720 bdescr *bd = Bdescr(p);
1723 // object must be at the beginning of the block (or be a ByteArray)
1724 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1725 (((W_)p & BLOCK_MASK) == 0));
1727 // already evacuated?
1728 if (bd->flags & BF_EVACUATED) {
1729 /* Don't forget to set the failed_to_evac flag if we didn't get
1730 * the desired destination (see comments in evacuate()).
1732 if (bd->gen_no < evac_gen) {
1733 failed_to_evac = rtsTrue;
1734 TICK_GC_FAILED_PROMOTION();
1740 // remove from large_object list
1742 bd->u.back->link = bd->link;
1743 } else { // first object in the list
1744 stp->large_objects = bd->link;
1747 bd->link->u.back = bd->u.back;
1750 /* link it on to the evacuated large object list of the destination step
1753 if (stp->gen_no < evac_gen) {
1754 if (eager_promotion) {
1755 stp = &generations[evac_gen].steps[0];
1757 failed_to_evac = rtsTrue;
1762 bd->gen_no = stp->gen_no;
1763 bd->link = stp->new_large_objects;
1764 stp->new_large_objects = bd;
1765 bd->flags |= BF_EVACUATED;
1768 /* -----------------------------------------------------------------------------
1771 This is called (eventually) for every live object in the system.
1773 The caller to evacuate specifies a desired generation in the
1774 evac_gen global variable. The following conditions apply to
1775 evacuating an object which resides in generation M when we're
1776 collecting up to generation N
1780 else evac to step->to
1782 if M < evac_gen evac to evac_gen, step 0
1784 if the object is already evacuated, then we check which generation
1787 if M >= evac_gen do nothing
1788 if M < evac_gen set failed_to_evac flag to indicate that we
1789 didn't manage to evacuate this object into evac_gen.
1794 evacuate() is the single most important function performance-wise
1795 in the GC. Various things have been tried to speed it up, but as
1796 far as I can tell the code generated by gcc 3.2 with -O2 is about
1797 as good as it's going to get. We pass the argument to evacuate()
1798 in a register using the 'regparm' attribute (see the prototype for
1799 evacuate() near the top of this file).
1801 Changing evacuate() to take an (StgClosure **) rather than
1802 returning the new pointer seems attractive, because we can avoid
1803 writing back the pointer when it hasn't changed (eg. for a static
1804 object, or an object in a generation > N). However, I tried it and
1805 it doesn't help. One reason is that the (StgClosure **) pointer
1806 gets spilled to the stack inside evacuate(), resulting in far more
1807 extra reads/writes than we save.
1808 -------------------------------------------------------------------------- */
1810 REGPARM1 static StgClosure *
1811 evacuate(StgClosure *q)
1818 const StgInfoTable *info;
1821 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1823 if (!HEAP_ALLOCED(q)) {
1825 if (!major_gc) return q;
1828 switch (info->type) {
1831 if (info->srt_bitmap != 0 &&
1832 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1833 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1834 static_objects = (StgClosure *)q;
1839 if (info->srt_bitmap != 0 &&
1840 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1841 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1842 static_objects = (StgClosure *)q;
1847 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1848 * on the CAF list, so don't do anything with it here (we'll
1849 * scavenge it later).
1851 if (((StgIndStatic *)q)->saved_info == NULL
1852 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1853 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1854 static_objects = (StgClosure *)q;
1859 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1860 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1861 static_objects = (StgClosure *)q;
1865 case CONSTR_INTLIKE:
1866 case CONSTR_CHARLIKE:
1867 case CONSTR_NOCAF_STATIC:
1868 /* no need to put these on the static linked list, they don't need
1874 barf("evacuate(static): strange closure type %d", (int)(info->type));
1880 if (bd->gen_no > N) {
1881 /* Can't evacuate this object, because it's in a generation
1882 * older than the ones we're collecting. Let's hope that it's
1883 * in evac_gen or older, or we will have to arrange to track
1884 * this pointer using the mutable list.
1886 if (bd->gen_no < evac_gen) {
1888 failed_to_evac = rtsTrue;
1889 TICK_GC_FAILED_PROMOTION();
1894 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1896 /* pointer into to-space: just return it. This normally
1897 * shouldn't happen, but alllowing it makes certain things
1898 * slightly easier (eg. the mutable list can contain the same
1899 * object twice, for example).
1901 if (bd->flags & BF_EVACUATED) {
1902 if (bd->gen_no < evac_gen) {
1903 failed_to_evac = rtsTrue;
1904 TICK_GC_FAILED_PROMOTION();
1909 /* evacuate large objects by re-linking them onto a different list.
1911 if (bd->flags & BF_LARGE) {
1913 if (info->type == TSO &&
1914 ((StgTSO *)q)->what_next == ThreadRelocated) {
1915 q = (StgClosure *)((StgTSO *)q)->link;
1918 evacuate_large((P_)q);
1922 /* If the object is in a step that we're compacting, then we
1923 * need to use an alternative evacuate procedure.
1925 if (bd->flags & BF_COMPACTED) {
1926 if (!is_marked((P_)q,bd)) {
1928 if (mark_stack_full()) {
1929 mark_stack_overflowed = rtsTrue;
1932 push_mark_stack((P_)q);
1942 switch (info->type) {
1946 return copy(q,sizeW_fromITBL(info),stp);
1950 StgWord w = (StgWord)q->payload[0];
1951 if (q->header.info == Czh_con_info &&
1952 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1953 (StgChar)w <= MAX_CHARLIKE) {
1954 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1956 if (q->header.info == Izh_con_info &&
1957 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1958 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1961 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1967 return copy(q,sizeofW(StgHeader)+1,stp);
1971 return copy(q,sizeofW(StgThunk)+1,stp);
1976 #ifdef NO_PROMOTE_THUNKS
1977 if (bd->gen_no == 0 &&
1978 bd->step->no != 0 &&
1979 bd->step->no == generations[bd->gen_no].n_steps-1) {
1983 return copy(q,sizeofW(StgThunk)+2,stp);
1990 return copy(q,sizeofW(StgHeader)+2,stp);
1993 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
1996 return copy(q,thunk_sizeW_fromITBL(info),stp);
2001 case IND_OLDGEN_PERM:
2004 return copy(q,sizeW_fromITBL(info),stp);
2007 return copy(q,bco_sizeW((StgBCO *)q),stp);
2010 case SE_CAF_BLACKHOLE:
2013 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2015 case THUNK_SELECTOR:
2019 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2020 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2023 p = eval_thunk_selector(info->layout.selector_offset,
2027 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2030 // q is still BLACKHOLE'd.
2031 thunk_selector_depth++;
2033 thunk_selector_depth--;
2035 // Update the THUNK_SELECTOR with an indirection to the
2036 // EVACUATED closure now at p. Why do this rather than
2037 // upd_evacuee(q,p)? Because we have an invariant that an
2038 // EVACUATED closure always points to an object in the
2039 // same or an older generation (required by the short-cut
2040 // test in the EVACUATED case, below).
2041 SET_INFO(q, &stg_IND_info);
2042 ((StgInd *)q)->indirectee = p;
2045 // We store the size of the just evacuated object in the
2046 // LDV word so that the profiler can guess the position of
2047 // the next object later.
2048 SET_EVACUAEE_FOR_LDV(q, THUNK_SELECTOR_sizeW());
2056 // follow chains of indirections, don't evacuate them
2057 q = ((StgInd*)q)->indirectee;
2069 case CATCH_STM_FRAME:
2070 case CATCH_RETRY_FRAME:
2071 case ATOMICALLY_FRAME:
2072 // shouldn't see these
2073 barf("evacuate: stack frame at %p\n", q);
2076 return copy(q,pap_sizeW((StgPAP*)q),stp);
2079 return copy(q,ap_sizeW((StgAP*)q),stp);
2082 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2085 /* Already evacuated, just return the forwarding address.
2086 * HOWEVER: if the requested destination generation (evac_gen) is
2087 * older than the actual generation (because the object was
2088 * already evacuated to a younger generation) then we have to
2089 * set the failed_to_evac flag to indicate that we couldn't
2090 * manage to promote the object to the desired generation.
2093 * Optimisation: the check is fairly expensive, but we can often
2094 * shortcut it if either the required generation is 0, or the
2095 * current object (the EVACUATED) is in a high enough generation.
2096 * We know that an EVACUATED always points to an object in the
2097 * same or an older generation. stp is the lowest step that the
2098 * current object would be evacuated to, so we only do the full
2099 * check if stp is too low.
2101 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2102 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2103 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2104 failed_to_evac = rtsTrue;
2105 TICK_GC_FAILED_PROMOTION();
2108 return ((StgEvacuated*)q)->evacuee;
2111 // just copy the block
2112 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2114 case MUT_ARR_PTRS_CLEAN:
2115 case MUT_ARR_PTRS_DIRTY:
2116 case MUT_ARR_PTRS_FROZEN:
2117 case MUT_ARR_PTRS_FROZEN0:
2118 // just copy the block
2119 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2123 StgTSO *tso = (StgTSO *)q;
2125 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2127 if (tso->what_next == ThreadRelocated) {
2128 q = (StgClosure *)tso->link;
2132 /* To evacuate a small TSO, we need to relocate the update frame
2139 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2141 sizeofW(StgTSO), stp);
2142 move_TSO(tso, new_tso);
2143 for (p = tso->sp, q = new_tso->sp;
2144 p < tso->stack+tso->stack_size;) {
2148 return (StgClosure *)new_tso;
2155 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2156 to = copy(q,BLACKHOLE_sizeW(),stp);
2157 //ToDo: derive size etc from reverted IP
2158 //to = copy(q,size,stp);
2160 debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
2161 q, info_type(q), to, info_type(to)));
2166 ASSERT(sizeofW(StgBlockedFetch) >= MIN_NONUPD_SIZE);
2167 to = copy(q,sizeofW(StgBlockedFetch),stp);
2169 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2170 q, info_type(q), to, info_type(to)));
2177 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2178 to = copy(q,sizeofW(StgFetchMe),stp);
2180 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2181 q, info_type(q), to, info_type(to)));
2185 ASSERT(sizeofW(StgBlockedFetch) >= MIN_UPD_SIZE);
2186 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2188 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2189 q, info_type(q), to, info_type(to)));
2194 return copy(q,sizeofW(StgTRecHeader),stp);
2196 case TVAR_WAIT_QUEUE:
2197 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2200 return copy(q,sizeofW(StgTVar),stp);
2203 return copy(q,sizeofW(StgTRecChunk),stp);
2206 barf("evacuate: strange closure type %d", (int)(info->type));
2212 /* -----------------------------------------------------------------------------
2213 Evaluate a THUNK_SELECTOR if possible.
2215 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2216 a closure pointer if we evaluated it and this is the result. Note
2217 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2218 reducing it to HNF, just that we have eliminated the selection.
2219 The result might be another thunk, or even another THUNK_SELECTOR.
2221 If the return value is non-NULL, the original selector thunk has
2222 been BLACKHOLE'd, and should be updated with an indirection or a
2223 forwarding pointer. If the return value is NULL, then the selector
2227 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2228 following way (from a suggestion by Ian Lynagh):
2230 We can have a chain like this:
2234 |-----> sel_0 --> (a,b)
2236 |-----> sel_0 --> ...
2238 and the depth limit means we don't go all the way to the end of the
2239 chain, which results in a space leak. This affects the recursive
2240 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2241 the recursive call to eval_thunk_selector() in
2242 eval_thunk_selector().
2244 We could eliminate the depth bound in this case, in the following
2247 - traverse the chain once to discover the *value* of the
2248 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2249 visit on the way as having been visited already (somehow).
2251 - in a second pass, traverse the chain again updating all
2252 THUNK_SEELCTORS that we find on the way with indirections to
2255 - if we encounter a "marked" THUNK_SELECTOR in a normal
2256 evacuate(), we konw it can't be updated so just evac it.
2258 Program that illustrates the problem:
2261 foo (x:xs) = let (ys, zs) = foo xs
2262 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2264 main = bar [1..(100000000::Int)]
2265 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2267 -------------------------------------------------------------------------- */
2269 static inline rtsBool
2270 is_to_space ( StgClosure *p )
2274 bd = Bdescr((StgPtr)p);
2275 if (HEAP_ALLOCED(p) &&
2276 ((bd->flags & BF_EVACUATED)
2277 || ((bd->flags & BF_COMPACTED) &&
2278 is_marked((P_)p,bd)))) {
2286 eval_thunk_selector( nat field, StgSelector * p )
2289 const StgInfoTable *info_ptr;
2290 StgClosure *selectee;
2292 selectee = p->selectee;
2294 // Save the real info pointer (NOTE: not the same as get_itbl()).
2295 info_ptr = p->header.info;
2297 // If the THUNK_SELECTOR is in a generation that we are not
2298 // collecting, then bail out early. We won't be able to save any
2299 // space in any case, and updating with an indirection is trickier
2301 if (Bdescr((StgPtr)p)->gen_no > N) {
2305 // BLACKHOLE the selector thunk, since it is now under evaluation.
2306 // This is important to stop us going into an infinite loop if
2307 // this selector thunk eventually refers to itself.
2308 SET_INFO(p,&stg_BLACKHOLE_info);
2312 // We don't want to end up in to-space, because this causes
2313 // problems when the GC later tries to evacuate the result of
2314 // eval_thunk_selector(). There are various ways this could
2317 // 1. following an IND_STATIC
2319 // 2. when the old generation is compacted, the mark phase updates
2320 // from-space pointers to be to-space pointers, and we can't
2321 // reliably tell which we're following (eg. from an IND_STATIC).
2323 // 3. compacting GC again: if we're looking at a constructor in
2324 // the compacted generation, it might point directly to objects
2325 // in to-space. We must bale out here, otherwise doing the selection
2326 // will result in a to-space pointer being returned.
2328 // (1) is dealt with using a BF_EVACUATED test on the
2329 // selectee. (2) and (3): we can tell if we're looking at an
2330 // object in the compacted generation that might point to
2331 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2332 // the compacted generation is being collected, and (c) the
2333 // object is marked. Only a marked object may have pointers that
2334 // point to to-space objects, because that happens when
2337 // The to-space test is now embodied in the in_to_space() inline
2338 // function, as it is re-used below.
2340 if (is_to_space(selectee)) {
2344 info = get_itbl(selectee);
2345 switch (info->type) {
2353 case CONSTR_NOCAF_STATIC:
2354 // check that the size is in range
2355 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2356 info->layout.payload.nptrs));
2358 // Select the right field from the constructor, and check
2359 // that the result isn't in to-space. It might be in
2360 // to-space if, for example, this constructor contains
2361 // pointers to younger-gen objects (and is on the mut-once
2366 q = selectee->payload[field];
2367 if (is_to_space(q)) {
2377 case IND_OLDGEN_PERM:
2379 selectee = ((StgInd *)selectee)->indirectee;
2383 // We don't follow pointers into to-space; the constructor
2384 // has already been evacuated, so we won't save any space
2385 // leaks by evaluating this selector thunk anyhow.
2388 case THUNK_SELECTOR:
2392 // check that we don't recurse too much, re-using the
2393 // depth bound also used in evacuate().
2394 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2397 thunk_selector_depth++;
2399 val = eval_thunk_selector(info->layout.selector_offset,
2400 (StgSelector *)selectee);
2402 thunk_selector_depth--;
2407 // We evaluated this selector thunk, so update it with
2408 // an indirection. NOTE: we don't use UPD_IND here,
2409 // because we are guaranteed that p is in a generation
2410 // that we are collecting, and we never want to put the
2411 // indirection on a mutable list.
2413 // For the purposes of LDV profiling, we have destroyed
2414 // the original selector thunk.
2415 SET_INFO(p, info_ptr);
2416 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2418 ((StgInd *)selectee)->indirectee = val;
2419 SET_INFO(selectee,&stg_IND_info);
2421 // For the purposes of LDV profiling, we have created an
2423 LDV_RECORD_CREATE(selectee);
2440 case SE_CAF_BLACKHOLE:
2452 // not evaluated yet
2456 barf("eval_thunk_selector: strange selectee %d",
2461 // We didn't manage to evaluate this thunk; restore the old info pointer
2462 SET_INFO(p, info_ptr);
2466 /* -----------------------------------------------------------------------------
2467 move_TSO is called to update the TSO structure after it has been
2468 moved from one place to another.
2469 -------------------------------------------------------------------------- */
2472 move_TSO (StgTSO *src, StgTSO *dest)
2476 // relocate the stack pointer...
2477 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2478 dest->sp = (StgPtr)dest->sp + diff;
2481 /* Similar to scavenge_large_bitmap(), but we don't write back the
2482 * pointers we get back from evacuate().
2485 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2492 bitmap = large_srt->l.bitmap[b];
2493 size = (nat)large_srt->l.size;
2494 p = (StgClosure **)large_srt->srt;
2495 for (i = 0; i < size; ) {
2496 if ((bitmap & 1) != 0) {
2501 if (i % BITS_IN(W_) == 0) {
2503 bitmap = large_srt->l.bitmap[b];
2505 bitmap = bitmap >> 1;
2510 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2511 * srt field in the info table. That's ok, because we'll
2512 * never dereference it.
2515 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2520 bitmap = srt_bitmap;
2523 if (bitmap == (StgHalfWord)(-1)) {
2524 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2528 while (bitmap != 0) {
2529 if ((bitmap & 1) != 0) {
2530 #ifdef ENABLE_WIN32_DLL_SUPPORT
2531 // Special-case to handle references to closures hiding out in DLLs, since
2532 // double indirections required to get at those. The code generator knows
2533 // which is which when generating the SRT, so it stores the (indirect)
2534 // reference to the DLL closure in the table by first adding one to it.
2535 // We check for this here, and undo the addition before evacuating it.
2537 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2538 // closure that's fixed at link-time, and no extra magic is required.
2539 if ( (unsigned long)(*srt) & 0x1 ) {
2540 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2549 bitmap = bitmap >> 1;
2555 scavenge_thunk_srt(const StgInfoTable *info)
2557 StgThunkInfoTable *thunk_info;
2559 if (!major_gc) return;
2561 thunk_info = itbl_to_thunk_itbl(info);
2562 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2566 scavenge_fun_srt(const StgInfoTable *info)
2568 StgFunInfoTable *fun_info;
2570 if (!major_gc) return;
2572 fun_info = itbl_to_fun_itbl(info);
2573 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2576 /* -----------------------------------------------------------------------------
2578 -------------------------------------------------------------------------- */
2581 scavengeTSO (StgTSO *tso)
2583 if ( tso->why_blocked == BlockedOnMVar
2584 || tso->why_blocked == BlockedOnBlackHole
2585 || tso->why_blocked == BlockedOnException
2587 || tso->why_blocked == BlockedOnGA
2588 || tso->why_blocked == BlockedOnGA_NoSend
2591 tso->block_info.closure = evacuate(tso->block_info.closure);
2593 if ( tso->blocked_exceptions != NULL ) {
2594 tso->blocked_exceptions =
2595 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2598 // We don't always chase the link field: TSOs on the blackhole
2599 // queue are not automatically alive, so the link field is a
2600 // "weak" pointer in that case.
2601 if (tso->why_blocked != BlockedOnBlackHole) {
2602 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2605 // scavange current transaction record
2606 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2608 // scavenge this thread's stack
2609 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2612 /* -----------------------------------------------------------------------------
2613 Blocks of function args occur on the stack (at the top) and
2615 -------------------------------------------------------------------------- */
2617 STATIC_INLINE StgPtr
2618 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2625 switch (fun_info->f.fun_type) {
2627 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2628 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2631 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2632 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2636 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2637 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2640 if ((bitmap & 1) == 0) {
2641 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2644 bitmap = bitmap >> 1;
2652 STATIC_INLINE StgPtr
2653 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2657 StgFunInfoTable *fun_info;
2659 fun_info = get_fun_itbl(fun);
2660 ASSERT(fun_info->i.type != PAP);
2661 p = (StgPtr)payload;
2663 switch (fun_info->f.fun_type) {
2665 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2668 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2672 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2676 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2679 if ((bitmap & 1) == 0) {
2680 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2683 bitmap = bitmap >> 1;
2691 STATIC_INLINE StgPtr
2692 scavenge_PAP (StgPAP *pap)
2694 pap->fun = evacuate(pap->fun);
2695 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2698 STATIC_INLINE StgPtr
2699 scavenge_AP (StgAP *ap)
2701 ap->fun = evacuate(ap->fun);
2702 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2705 /* -----------------------------------------------------------------------------
2706 Scavenge a given step until there are no more objects in this step
2709 evac_gen is set by the caller to be either zero (for a step in a
2710 generation < N) or G where G is the generation of the step being
2713 We sometimes temporarily change evac_gen back to zero if we're
2714 scavenging a mutable object where early promotion isn't such a good
2716 -------------------------------------------------------------------------- */
2724 nat saved_evac_gen = evac_gen;
2729 failed_to_evac = rtsFalse;
2731 /* scavenge phase - standard breadth-first scavenging of the
2735 while (bd != stp->hp_bd || p < stp->hp) {
2737 // If we're at the end of this block, move on to the next block
2738 if (bd != stp->hp_bd && p == bd->free) {
2744 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2745 info = get_itbl((StgClosure *)p);
2747 ASSERT(thunk_selector_depth == 0);
2750 switch (info->type) {
2754 StgMVar *mvar = ((StgMVar *)p);
2756 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2757 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2758 mvar->value = evacuate((StgClosure *)mvar->value);
2759 evac_gen = saved_evac_gen;
2760 failed_to_evac = rtsTrue; // mutable.
2761 p += sizeofW(StgMVar);
2766 scavenge_fun_srt(info);
2767 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2768 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2769 p += sizeofW(StgHeader) + 2;
2773 scavenge_thunk_srt(info);
2774 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2775 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2776 p += sizeofW(StgThunk) + 2;
2780 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2781 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2782 p += sizeofW(StgHeader) + 2;
2786 scavenge_thunk_srt(info);
2787 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2788 p += sizeofW(StgThunk) + 1;
2792 scavenge_fun_srt(info);
2794 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2795 p += sizeofW(StgHeader) + 1;
2799 scavenge_thunk_srt(info);
2800 p += sizeofW(StgThunk) + 1;
2804 scavenge_fun_srt(info);
2806 p += sizeofW(StgHeader) + 1;
2810 scavenge_thunk_srt(info);
2811 p += sizeofW(StgThunk) + 2;
2815 scavenge_fun_srt(info);
2817 p += sizeofW(StgHeader) + 2;
2821 scavenge_thunk_srt(info);
2822 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2823 p += sizeofW(StgThunk) + 2;
2827 scavenge_fun_srt(info);
2829 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2830 p += sizeofW(StgHeader) + 2;
2834 scavenge_fun_srt(info);
2841 scavenge_thunk_srt(info);
2842 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2843 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2844 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2846 p += info->layout.payload.nptrs;
2857 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2858 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2859 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2861 p += info->layout.payload.nptrs;
2866 StgBCO *bco = (StgBCO *)p;
2867 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2868 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2869 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2870 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2871 p += bco_sizeW(bco);
2876 if (stp->gen->no != 0) {
2879 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2880 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2881 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2884 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2886 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2888 // We pretend that p has just been created.
2889 LDV_RECORD_CREATE((StgClosure *)p);
2892 case IND_OLDGEN_PERM:
2893 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2894 p += sizeofW(StgInd);
2899 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2900 evac_gen = saved_evac_gen;
2901 failed_to_evac = rtsTrue; // mutable anyhow
2902 p += sizeofW(StgMutVar);
2906 case SE_CAF_BLACKHOLE:
2909 p += BLACKHOLE_sizeW();
2912 case THUNK_SELECTOR:
2914 StgSelector *s = (StgSelector *)p;
2915 s->selectee = evacuate(s->selectee);
2916 p += THUNK_SELECTOR_sizeW();
2920 // A chunk of stack saved in a heap object
2923 StgAP_STACK *ap = (StgAP_STACK *)p;
2925 ap->fun = evacuate(ap->fun);
2926 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2927 p = (StgPtr)ap->payload + ap->size;
2932 p = scavenge_PAP((StgPAP *)p);
2936 p = scavenge_AP((StgAP *)p);
2940 // nothing to follow
2941 p += arr_words_sizeW((StgArrWords *)p);
2944 case MUT_ARR_PTRS_CLEAN:
2945 case MUT_ARR_PTRS_DIRTY:
2946 // follow everything
2949 rtsBool saved_eager;
2951 // We don't eagerly promote objects pointed to by a mutable
2952 // array, but if we find the array only points to objects in
2953 // the same or an older generation, we mark it "clean" and
2954 // avoid traversing it during minor GCs.
2955 saved_eager = eager_promotion;
2956 eager_promotion = rtsFalse;
2957 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2958 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2959 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2961 eager_promotion = saved_eager;
2963 if (failed_to_evac) {
2964 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
2966 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
2969 failed_to_evac = rtsTrue; // always put it on the mutable list.
2973 case MUT_ARR_PTRS_FROZEN:
2974 case MUT_ARR_PTRS_FROZEN0:
2975 // follow everything
2979 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2980 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2981 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2984 // If we're going to put this object on the mutable list, then
2985 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
2986 if (failed_to_evac) {
2987 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
2989 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
2996 StgTSO *tso = (StgTSO *)p;
2999 evac_gen = saved_evac_gen;
3000 failed_to_evac = rtsTrue; // mutable anyhow.
3001 p += tso_sizeW(tso);
3009 nat size, ptrs, nonptrs, vhs;
3011 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3013 StgRBH *rbh = (StgRBH *)p;
3014 (StgClosure *)rbh->blocking_queue =
3015 evacuate((StgClosure *)rbh->blocking_queue);
3016 failed_to_evac = rtsTrue; // mutable anyhow.
3018 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3019 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3020 // ToDo: use size of reverted closure here!
3021 p += BLACKHOLE_sizeW();
3027 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3028 // follow the pointer to the node which is being demanded
3029 (StgClosure *)bf->node =
3030 evacuate((StgClosure *)bf->node);
3031 // follow the link to the rest of the blocking queue
3032 (StgClosure *)bf->link =
3033 evacuate((StgClosure *)bf->link);
3035 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3036 bf, info_type((StgClosure *)bf),
3037 bf->node, info_type(bf->node)));
3038 p += sizeofW(StgBlockedFetch);
3046 p += sizeofW(StgFetchMe);
3047 break; // nothing to do in this case
3051 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3052 (StgClosure *)fmbq->blocking_queue =
3053 evacuate((StgClosure *)fmbq->blocking_queue);
3055 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3056 p, info_type((StgClosure *)p)));
3057 p += sizeofW(StgFetchMeBlockingQueue);
3062 case TVAR_WAIT_QUEUE:
3064 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3066 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3067 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3068 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3069 evac_gen = saved_evac_gen;
3070 failed_to_evac = rtsTrue; // mutable
3071 p += sizeofW(StgTVarWaitQueue);
3077 StgTVar *tvar = ((StgTVar *) p);
3079 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3080 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3081 evac_gen = saved_evac_gen;
3082 failed_to_evac = rtsTrue; // mutable
3083 p += sizeofW(StgTVar);
3089 StgTRecHeader *trec = ((StgTRecHeader *) p);
3091 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3092 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3093 evac_gen = saved_evac_gen;
3094 failed_to_evac = rtsTrue; // mutable
3095 p += sizeofW(StgTRecHeader);
3102 StgTRecChunk *tc = ((StgTRecChunk *) p);
3103 TRecEntry *e = &(tc -> entries[0]);
3105 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3106 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3107 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3108 e->expected_value = evacuate((StgClosure*)e->expected_value);
3109 e->new_value = evacuate((StgClosure*)e->new_value);
3111 evac_gen = saved_evac_gen;
3112 failed_to_evac = rtsTrue; // mutable
3113 p += sizeofW(StgTRecChunk);
3118 barf("scavenge: unimplemented/strange closure type %d @ %p",
3123 * We need to record the current object on the mutable list if
3124 * (a) It is actually mutable, or
3125 * (b) It contains pointers to a younger generation.
3126 * Case (b) arises if we didn't manage to promote everything that
3127 * the current object points to into the current generation.
3129 if (failed_to_evac) {
3130 failed_to_evac = rtsFalse;
3131 if (stp->gen_no > 0) {
3132 recordMutableGen((StgClosure *)q, stp->gen);
3141 /* -----------------------------------------------------------------------------
3142 Scavenge everything on the mark stack.
3144 This is slightly different from scavenge():
3145 - we don't walk linearly through the objects, so the scavenger
3146 doesn't need to advance the pointer on to the next object.
3147 -------------------------------------------------------------------------- */
3150 scavenge_mark_stack(void)
3156 evac_gen = oldest_gen->no;
3157 saved_evac_gen = evac_gen;
3160 while (!mark_stack_empty()) {
3161 p = pop_mark_stack();
3163 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3164 info = get_itbl((StgClosure *)p);
3167 switch (info->type) {
3171 StgMVar *mvar = ((StgMVar *)p);
3173 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3174 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3175 mvar->value = evacuate((StgClosure *)mvar->value);
3176 evac_gen = saved_evac_gen;
3177 failed_to_evac = rtsTrue; // mutable.
3182 scavenge_fun_srt(info);
3183 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3184 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3188 scavenge_thunk_srt(info);
3189 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3190 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3194 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3195 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3200 scavenge_fun_srt(info);
3201 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3206 scavenge_thunk_srt(info);
3207 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3212 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3217 scavenge_fun_srt(info);
3222 scavenge_thunk_srt(info);
3230 scavenge_fun_srt(info);
3237 scavenge_thunk_srt(info);
3238 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3239 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3240 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3252 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3253 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3254 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3260 StgBCO *bco = (StgBCO *)p;
3261 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3262 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3263 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3264 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3269 // don't need to do anything here: the only possible case
3270 // is that we're in a 1-space compacting collector, with
3271 // no "old" generation.
3275 case IND_OLDGEN_PERM:
3276 ((StgInd *)p)->indirectee =
3277 evacuate(((StgInd *)p)->indirectee);
3282 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3283 evac_gen = saved_evac_gen;
3284 failed_to_evac = rtsTrue;
3288 case SE_CAF_BLACKHOLE:
3294 case THUNK_SELECTOR:
3296 StgSelector *s = (StgSelector *)p;
3297 s->selectee = evacuate(s->selectee);
3301 // A chunk of stack saved in a heap object
3304 StgAP_STACK *ap = (StgAP_STACK *)p;
3306 ap->fun = evacuate(ap->fun);
3307 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3312 scavenge_PAP((StgPAP *)p);
3316 scavenge_AP((StgAP *)p);
3319 case MUT_ARR_PTRS_CLEAN:
3320 case MUT_ARR_PTRS_DIRTY:
3321 // follow everything
3324 rtsBool saved_eager;
3326 // We don't eagerly promote objects pointed to by a mutable
3327 // array, but if we find the array only points to objects in
3328 // the same or an older generation, we mark it "clean" and
3329 // avoid traversing it during minor GCs.
3330 saved_eager = eager_promotion;
3331 eager_promotion = rtsFalse;
3332 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3333 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3334 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3336 eager_promotion = saved_eager;
3338 if (failed_to_evac) {
3339 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3341 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3344 failed_to_evac = rtsTrue; // mutable anyhow.
3348 case MUT_ARR_PTRS_FROZEN:
3349 case MUT_ARR_PTRS_FROZEN0:
3350 // follow everything
3354 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3355 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3356 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3359 // If we're going to put this object on the mutable list, then
3360 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3361 if (failed_to_evac) {
3362 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3364 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3371 StgTSO *tso = (StgTSO *)p;
3374 evac_gen = saved_evac_gen;
3375 failed_to_evac = rtsTrue;
3383 nat size, ptrs, nonptrs, vhs;
3385 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3387 StgRBH *rbh = (StgRBH *)p;
3388 bh->blocking_queue =
3389 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3390 failed_to_evac = rtsTrue; // mutable anyhow.
3392 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3393 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3399 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3400 // follow the pointer to the node which is being demanded
3401 (StgClosure *)bf->node =
3402 evacuate((StgClosure *)bf->node);
3403 // follow the link to the rest of the blocking queue
3404 (StgClosure *)bf->link =
3405 evacuate((StgClosure *)bf->link);
3407 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3408 bf, info_type((StgClosure *)bf),
3409 bf->node, info_type(bf->node)));
3417 break; // nothing to do in this case
3421 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3422 (StgClosure *)fmbq->blocking_queue =
3423 evacuate((StgClosure *)fmbq->blocking_queue);
3425 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3426 p, info_type((StgClosure *)p)));
3431 case TVAR_WAIT_QUEUE:
3433 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3435 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3436 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3437 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3438 evac_gen = saved_evac_gen;
3439 failed_to_evac = rtsTrue; // mutable
3445 StgTVar *tvar = ((StgTVar *) p);
3447 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3448 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3449 evac_gen = saved_evac_gen;
3450 failed_to_evac = rtsTrue; // mutable
3457 StgTRecChunk *tc = ((StgTRecChunk *) p);
3458 TRecEntry *e = &(tc -> entries[0]);
3460 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3461 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3462 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3463 e->expected_value = evacuate((StgClosure*)e->expected_value);
3464 e->new_value = evacuate((StgClosure*)e->new_value);
3466 evac_gen = saved_evac_gen;
3467 failed_to_evac = rtsTrue; // mutable
3473 StgTRecHeader *trec = ((StgTRecHeader *) p);
3475 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3476 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3477 evac_gen = saved_evac_gen;
3478 failed_to_evac = rtsTrue; // mutable
3483 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3487 if (failed_to_evac) {
3488 failed_to_evac = rtsFalse;
3490 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3494 // mark the next bit to indicate "scavenged"
3495 mark(q+1, Bdescr(q));
3497 } // while (!mark_stack_empty())
3499 // start a new linear scan if the mark stack overflowed at some point
3500 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3501 IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
3502 mark_stack_overflowed = rtsFalse;
3503 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3504 oldgen_scan = oldgen_scan_bd->start;
3507 if (oldgen_scan_bd) {
3508 // push a new thing on the mark stack
3510 // find a closure that is marked but not scavenged, and start
3512 while (oldgen_scan < oldgen_scan_bd->free
3513 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3517 if (oldgen_scan < oldgen_scan_bd->free) {
3519 // already scavenged?
3520 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3521 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3524 push_mark_stack(oldgen_scan);
3525 // ToDo: bump the linear scan by the actual size of the object
3526 oldgen_scan += sizeofW(StgHeader) + MIN_NONUPD_SIZE;
3530 oldgen_scan_bd = oldgen_scan_bd->link;
3531 if (oldgen_scan_bd != NULL) {
3532 oldgen_scan = oldgen_scan_bd->start;
3538 /* -----------------------------------------------------------------------------
3539 Scavenge one object.
3541 This is used for objects that are temporarily marked as mutable
3542 because they contain old-to-new generation pointers. Only certain
3543 objects can have this property.
3544 -------------------------------------------------------------------------- */
3547 scavenge_one(StgPtr p)
3549 const StgInfoTable *info;
3550 nat saved_evac_gen = evac_gen;
3553 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3554 info = get_itbl((StgClosure *)p);
3556 switch (info->type) {
3560 StgMVar *mvar = ((StgMVar *)p);
3562 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3563 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3564 mvar->value = evacuate((StgClosure *)mvar->value);
3565 evac_gen = saved_evac_gen;
3566 failed_to_evac = rtsTrue; // mutable.
3579 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3580 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3581 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3587 case FUN_1_0: // hardly worth specialising these guys
3603 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3604 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3605 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3612 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3613 evac_gen = saved_evac_gen;
3614 failed_to_evac = rtsTrue; // mutable anyhow
3618 case SE_CAF_BLACKHOLE:
3623 case THUNK_SELECTOR:
3625 StgSelector *s = (StgSelector *)p;
3626 s->selectee = evacuate(s->selectee);
3632 StgAP_STACK *ap = (StgAP_STACK *)p;
3634 ap->fun = evacuate(ap->fun);
3635 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3636 p = (StgPtr)ap->payload + ap->size;
3641 p = scavenge_PAP((StgPAP *)p);
3645 p = scavenge_AP((StgAP *)p);
3649 // nothing to follow
3652 case MUT_ARR_PTRS_CLEAN:
3653 case MUT_ARR_PTRS_DIRTY:
3656 rtsBool saved_eager;
3658 // We don't eagerly promote objects pointed to by a mutable
3659 // array, but if we find the array only points to objects in
3660 // the same or an older generation, we mark it "clean" and
3661 // avoid traversing it during minor GCs.
3662 saved_eager = eager_promotion;
3663 eager_promotion = rtsFalse;
3665 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3666 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3667 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3669 eager_promotion = saved_eager;
3671 if (failed_to_evac) {
3672 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3674 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3677 failed_to_evac = rtsTrue;
3681 case MUT_ARR_PTRS_FROZEN:
3682 case MUT_ARR_PTRS_FROZEN0:
3684 // follow everything
3687 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3688 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3689 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3692 // If we're going to put this object on the mutable list, then
3693 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3694 if (failed_to_evac) {
3695 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3697 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3704 StgTSO *tso = (StgTSO *)p;
3706 evac_gen = 0; // repeatedly mutable
3708 evac_gen = saved_evac_gen;
3709 failed_to_evac = rtsTrue;
3717 nat size, ptrs, nonptrs, vhs;
3719 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3721 StgRBH *rbh = (StgRBH *)p;
3722 (StgClosure *)rbh->blocking_queue =
3723 evacuate((StgClosure *)rbh->blocking_queue);
3724 failed_to_evac = rtsTrue; // mutable anyhow.
3726 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3727 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3728 // ToDo: use size of reverted closure here!
3734 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3735 // follow the pointer to the node which is being demanded
3736 (StgClosure *)bf->node =
3737 evacuate((StgClosure *)bf->node);
3738 // follow the link to the rest of the blocking queue
3739 (StgClosure *)bf->link =
3740 evacuate((StgClosure *)bf->link);
3742 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3743 bf, info_type((StgClosure *)bf),
3744 bf->node, info_type(bf->node)));
3752 break; // nothing to do in this case
3756 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3757 (StgClosure *)fmbq->blocking_queue =
3758 evacuate((StgClosure *)fmbq->blocking_queue);
3760 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3761 p, info_type((StgClosure *)p)));
3766 case TVAR_WAIT_QUEUE:
3768 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3770 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3771 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3772 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3773 evac_gen = saved_evac_gen;
3774 failed_to_evac = rtsTrue; // mutable
3780 StgTVar *tvar = ((StgTVar *) p);
3782 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3783 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3784 evac_gen = saved_evac_gen;
3785 failed_to_evac = rtsTrue; // mutable
3791 StgTRecHeader *trec = ((StgTRecHeader *) p);
3793 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3794 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3795 evac_gen = saved_evac_gen;
3796 failed_to_evac = rtsTrue; // mutable
3803 StgTRecChunk *tc = ((StgTRecChunk *) p);
3804 TRecEntry *e = &(tc -> entries[0]);
3806 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3807 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3808 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3809 e->expected_value = evacuate((StgClosure*)e->expected_value);
3810 e->new_value = evacuate((StgClosure*)e->new_value);
3812 evac_gen = saved_evac_gen;
3813 failed_to_evac = rtsTrue; // mutable
3818 case IND_OLDGEN_PERM:
3821 /* Careful here: a THUNK can be on the mutable list because
3822 * it contains pointers to young gen objects. If such a thunk
3823 * is updated, the IND_OLDGEN will be added to the mutable
3824 * list again, and we'll scavenge it twice. evacuate()
3825 * doesn't check whether the object has already been
3826 * evacuated, so we perform that check here.
3828 StgClosure *q = ((StgInd *)p)->indirectee;
3829 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3832 ((StgInd *)p)->indirectee = evacuate(q);
3835 #if 0 && defined(DEBUG)
3836 if (RtsFlags.DebugFlags.gc)
3837 /* Debugging code to print out the size of the thing we just
3841 StgPtr start = gen->steps[0].scan;
3842 bdescr *start_bd = gen->steps[0].scan_bd;
3844 scavenge(&gen->steps[0]);
3845 if (start_bd != gen->steps[0].scan_bd) {
3846 size += (P_)BLOCK_ROUND_UP(start) - start;
3847 start_bd = start_bd->link;
3848 while (start_bd != gen->steps[0].scan_bd) {
3849 size += BLOCK_SIZE_W;
3850 start_bd = start_bd->link;
3852 size += gen->steps[0].scan -
3853 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3855 size = gen->steps[0].scan - start;
3857 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3863 barf("scavenge_one: strange object %d", (int)(info->type));
3866 no_luck = failed_to_evac;
3867 failed_to_evac = rtsFalse;
3871 /* -----------------------------------------------------------------------------
3872 Scavenging mutable lists.
3874 We treat the mutable list of each generation > N (i.e. all the
3875 generations older than the one being collected) as roots. We also
3876 remove non-mutable objects from the mutable list at this point.
3877 -------------------------------------------------------------------------- */
3880 scavenge_mutable_list(generation *gen)
3885 bd = gen->saved_mut_list;
3888 for (; bd != NULL; bd = bd->link) {
3889 for (q = bd->start; q < bd->free; q++) {
3891 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3894 switch (get_itbl((StgClosure *)p)->type) {
3896 mutlist_MUTVARS++; break;
3897 case MUT_ARR_PTRS_CLEAN:
3898 case MUT_ARR_PTRS_DIRTY:
3899 case MUT_ARR_PTRS_FROZEN:
3900 case MUT_ARR_PTRS_FROZEN0:
3901 mutlist_MUTARRS++; break;
3903 mutlist_OTHERS++; break;
3907 // We don't need to scavenge clean arrays. This is the
3908 // Whole Point of MUT_ARR_PTRS_CLEAN.
3909 if (get_itbl((StgClosure *)p)->type == MUT_ARR_PTRS_CLEAN) {
3910 recordMutableGen((StgClosure *)p,gen);
3914 if (scavenge_one(p)) {
3915 /* didn't manage to promote everything, so put the
3916 * object back on the list.
3918 recordMutableGen((StgClosure *)p,gen);
3923 // free the old mut_list
3924 freeChain(gen->saved_mut_list);
3925 gen->saved_mut_list = NULL;
3930 scavenge_static(void)
3932 StgClosure* p = static_objects;
3933 const StgInfoTable *info;
3935 /* Always evacuate straight to the oldest generation for static
3937 evac_gen = oldest_gen->no;
3939 /* keep going until we've scavenged all the objects on the linked
3941 while (p != END_OF_STATIC_LIST) {
3943 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3946 if (info->type==RBH)
3947 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
3949 // make sure the info pointer is into text space
3951 /* Take this object *off* the static_objects list,
3952 * and put it on the scavenged_static_objects list.
3954 static_objects = *STATIC_LINK(info,p);
3955 *STATIC_LINK(info,p) = scavenged_static_objects;
3956 scavenged_static_objects = p;
3958 switch (info -> type) {
3962 StgInd *ind = (StgInd *)p;
3963 ind->indirectee = evacuate(ind->indirectee);
3965 /* might fail to evacuate it, in which case we have to pop it
3966 * back on the mutable list of the oldest generation. We
3967 * leave it *on* the scavenged_static_objects list, though,
3968 * in case we visit this object again.
3970 if (failed_to_evac) {
3971 failed_to_evac = rtsFalse;
3972 recordMutableGen((StgClosure *)p,oldest_gen);
3978 scavenge_thunk_srt(info);
3982 scavenge_fun_srt(info);
3989 next = (P_)p->payload + info->layout.payload.ptrs;
3990 // evacuate the pointers
3991 for (q = (P_)p->payload; q < next; q++) {
3992 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3998 barf("scavenge_static: strange closure %d", (int)(info->type));
4001 ASSERT(failed_to_evac == rtsFalse);
4003 /* get the next static object from the list. Remember, there might
4004 * be more stuff on this list now that we've done some evacuating!
4005 * (static_objects is a global)
4011 /* -----------------------------------------------------------------------------
4012 scavenge a chunk of memory described by a bitmap
4013 -------------------------------------------------------------------------- */
4016 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
4022 bitmap = large_bitmap->bitmap[b];
4023 for (i = 0; i < size; ) {
4024 if ((bitmap & 1) == 0) {
4025 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4029 if (i % BITS_IN(W_) == 0) {
4031 bitmap = large_bitmap->bitmap[b];
4033 bitmap = bitmap >> 1;
4038 STATIC_INLINE StgPtr
4039 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
4042 if ((bitmap & 1) == 0) {
4043 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4046 bitmap = bitmap >> 1;
4052 /* -----------------------------------------------------------------------------
4053 scavenge_stack walks over a section of stack and evacuates all the
4054 objects pointed to by it. We can use the same code for walking
4055 AP_STACK_UPDs, since these are just sections of copied stack.
4056 -------------------------------------------------------------------------- */
4060 scavenge_stack(StgPtr p, StgPtr stack_end)
4062 const StgRetInfoTable* info;
4066 //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
4069 * Each time around this loop, we are looking at a chunk of stack
4070 * that starts with an activation record.
4073 while (p < stack_end) {
4074 info = get_ret_itbl((StgClosure *)p);
4076 switch (info->i.type) {
4079 // In SMP, we can get update frames that point to indirections
4080 // when two threads evaluate the same thunk. We do attempt to
4081 // discover this situation in threadPaused(), but it's
4082 // possible that the following sequence occurs:
4091 // Now T is an indirection, and the update frame is already
4092 // marked on A's stack, so we won't traverse it again in
4093 // threadPaused(). We could traverse the whole stack again
4094 // before GC, but that seems like overkill.
4096 // Scavenging this update frame as normal would be disastrous;
4097 // the updatee would end up pointing to the value. So we turn
4098 // the indirection into an IND_PERM, so that evacuate will
4099 // copy the indirection into the old generation instead of
4101 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
4102 ((StgUpdateFrame *)p)->updatee->header.info =
4103 (StgInfoTable *)&stg_IND_PERM_info;
4105 ((StgUpdateFrame *)p)->updatee
4106 = evacuate(((StgUpdateFrame *)p)->updatee);
4107 p += sizeofW(StgUpdateFrame);
4110 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
4111 case CATCH_STM_FRAME:
4112 case CATCH_RETRY_FRAME:
4113 case ATOMICALLY_FRAME:
4118 bitmap = BITMAP_BITS(info->i.layout.bitmap);
4119 size = BITMAP_SIZE(info->i.layout.bitmap);
4120 // NOTE: the payload starts immediately after the info-ptr, we
4121 // don't have an StgHeader in the same sense as a heap closure.
4123 p = scavenge_small_bitmap(p, size, bitmap);
4127 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
4135 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4138 size = BCO_BITMAP_SIZE(bco);
4139 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
4144 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
4150 size = GET_LARGE_BITMAP(&info->i)->size;
4152 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
4154 // and don't forget to follow the SRT
4158 // Dynamic bitmap: the mask is stored on the stack, and
4159 // there are a number of non-pointers followed by a number
4160 // of pointers above the bitmapped area. (see StgMacros.h,
4165 dyn = ((StgRetDyn *)p)->liveness;
4167 // traverse the bitmap first
4168 bitmap = RET_DYN_LIVENESS(dyn);
4169 p = (P_)&((StgRetDyn *)p)->payload[0];
4170 size = RET_DYN_BITMAP_SIZE;
4171 p = scavenge_small_bitmap(p, size, bitmap);
4173 // skip over the non-ptr words
4174 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
4176 // follow the ptr words
4177 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
4178 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4186 StgRetFun *ret_fun = (StgRetFun *)p;
4187 StgFunInfoTable *fun_info;
4189 ret_fun->fun = evacuate(ret_fun->fun);
4190 fun_info = get_fun_itbl(ret_fun->fun);
4191 p = scavenge_arg_block(fun_info, ret_fun->payload);
4196 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
4201 /*-----------------------------------------------------------------------------
4202 scavenge the large object list.
4204 evac_gen set by caller; similar games played with evac_gen as with
4205 scavenge() - see comment at the top of scavenge(). Most large
4206 objects are (repeatedly) mutable, so most of the time evac_gen will
4208 --------------------------------------------------------------------------- */
4211 scavenge_large(step *stp)
4216 bd = stp->new_large_objects;
4218 for (; bd != NULL; bd = stp->new_large_objects) {
4220 /* take this object *off* the large objects list and put it on
4221 * the scavenged large objects list. This is so that we can
4222 * treat new_large_objects as a stack and push new objects on
4223 * the front when evacuating.
4225 stp->new_large_objects = bd->link;
4226 dbl_link_onto(bd, &stp->scavenged_large_objects);
4228 // update the block count in this step.
4229 stp->n_scavenged_large_blocks += bd->blocks;
4232 if (scavenge_one(p)) {
4233 if (stp->gen_no > 0) {
4234 recordMutableGen((StgClosure *)p, stp->gen);
4240 /* -----------------------------------------------------------------------------
4241 Initialising the static object & mutable lists
4242 -------------------------------------------------------------------------- */
4245 zero_static_object_list(StgClosure* first_static)
4249 const StgInfoTable *info;
4251 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
4253 link = *STATIC_LINK(info, p);
4254 *STATIC_LINK(info,p) = NULL;
4258 /* -----------------------------------------------------------------------------
4260 -------------------------------------------------------------------------- */
4267 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4268 c = (StgIndStatic *)c->static_link)
4270 SET_INFO(c, c->saved_info);
4271 c->saved_info = NULL;
4272 // could, but not necessary: c->static_link = NULL;
4274 revertible_caf_list = NULL;
4278 markCAFs( evac_fn evac )
4282 for (c = (StgIndStatic *)caf_list; c != NULL;
4283 c = (StgIndStatic *)c->static_link)
4285 evac(&c->indirectee);
4287 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4288 c = (StgIndStatic *)c->static_link)
4290 evac(&c->indirectee);
4294 /* -----------------------------------------------------------------------------
4295 Sanity code for CAF garbage collection.
4297 With DEBUG turned on, we manage a CAF list in addition to the SRT
4298 mechanism. After GC, we run down the CAF list and blackhole any
4299 CAFs which have been garbage collected. This means we get an error
4300 whenever the program tries to enter a garbage collected CAF.
4302 Any garbage collected CAFs are taken off the CAF list at the same
4304 -------------------------------------------------------------------------- */
4306 #if 0 && defined(DEBUG)
4313 const StgInfoTable *info;
4324 ASSERT(info->type == IND_STATIC);
4326 if (STATIC_LINK(info,p) == NULL) {
4327 IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
4329 SET_INFO(p,&stg_BLACKHOLE_info);
4330 p = STATIC_LINK2(info,p);
4334 pp = &STATIC_LINK2(info,p);
4341 // debugBelch("%d CAFs live", i);
4346 /* -----------------------------------------------------------------------------
4349 * Code largely pinched from old RTS, then hacked to bits. We also do
4350 * lazy black holing here.
4352 * -------------------------------------------------------------------------- */
4354 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4357 stackSqueeze(StgTSO *tso, StgPtr bottom)
4360 rtsBool prev_was_update_frame;
4361 StgClosure *updatee = NULL;
4362 StgRetInfoTable *info;
4363 StgWord current_gap_size;
4364 struct stack_gap *gap;
4367 // Traverse the stack upwards, replacing adjacent update frames
4368 // with a single update frame and a "stack gap". A stack gap
4369 // contains two values: the size of the gap, and the distance
4370 // to the next gap (or the stack top).
4374 ASSERT(frame < bottom);
4376 prev_was_update_frame = rtsFalse;
4377 current_gap_size = 0;
4378 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4380 while (frame < bottom) {
4382 info = get_ret_itbl((StgClosure *)frame);
4383 switch (info->i.type) {
4387 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4389 if (prev_was_update_frame) {
4391 TICK_UPD_SQUEEZED();
4392 /* wasn't there something about update squeezing and ticky to be
4393 * sorted out? oh yes: we aren't counting each enter properly
4394 * in this case. See the log somewhere. KSW 1999-04-21
4396 * Check two things: that the two update frames don't point to
4397 * the same object, and that the updatee_bypass isn't already an
4398 * indirection. Both of these cases only happen when we're in a
4399 * block hole-style loop (and there are multiple update frames
4400 * on the stack pointing to the same closure), but they can both
4401 * screw us up if we don't check.
4403 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4404 UPD_IND_NOLOCK(upd->updatee, updatee);
4407 // now mark this update frame as a stack gap. The gap
4408 // marker resides in the bottom-most update frame of
4409 // the series of adjacent frames, and covers all the
4410 // frames in this series.
4411 current_gap_size += sizeofW(StgUpdateFrame);
4412 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4413 ((struct stack_gap *)frame)->next_gap = gap;
4415 frame += sizeofW(StgUpdateFrame);
4419 // single update frame, or the topmost update frame in a series
4421 prev_was_update_frame = rtsTrue;
4422 updatee = upd->updatee;
4423 frame += sizeofW(StgUpdateFrame);
4429 prev_was_update_frame = rtsFalse;
4431 // we're not in a gap... check whether this is the end of a gap
4432 // (an update frame can't be the end of a gap).
4433 if (current_gap_size != 0) {
4434 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4436 current_gap_size = 0;
4438 frame += stack_frame_sizeW((StgClosure *)frame);
4443 if (current_gap_size != 0) {
4444 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4447 // Now we have a stack with gaps in it, and we have to walk down
4448 // shoving the stack up to fill in the gaps. A diagram might
4452 // | ********* | <- sp
4456 // | stack_gap | <- gap | chunk_size
4458 // | ......... | <- gap_end v
4464 // 'sp' points the the current top-of-stack
4465 // 'gap' points to the stack_gap structure inside the gap
4466 // ***** indicates real stack data
4467 // ..... indicates gap
4468 // <empty> indicates unused
4472 void *gap_start, *next_gap_start, *gap_end;
4475 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4476 sp = next_gap_start;
4478 while ((StgPtr)gap > tso->sp) {
4480 // we're working in *bytes* now...
4481 gap_start = next_gap_start;
4482 gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
4484 gap = gap->next_gap;
4485 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4487 chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
4489 memmove(sp, next_gap_start, chunk_size);
4492 tso->sp = (StgPtr)sp;
4496 /* -----------------------------------------------------------------------------
4499 * We have to prepare for GC - this means doing lazy black holing
4500 * here. We also take the opportunity to do stack squeezing if it's
4502 * -------------------------------------------------------------------------- */
4504 threadPaused(Capability *cap, StgTSO *tso)
4507 StgRetInfoTable *info;
4510 nat words_to_squeeze = 0;
4512 nat weight_pending = 0;
4513 rtsBool prev_was_update_frame;
4515 stack_end = &tso->stack[tso->stack_size];
4517 frame = (StgClosure *)tso->sp;
4520 // If we've already marked this frame, then stop here.
4521 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
4525 info = get_ret_itbl(frame);
4527 switch (info->i.type) {
4531 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
4533 bh = ((StgUpdateFrame *)frame)->updatee;
4535 if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
4536 IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
4538 // If this closure is already an indirection, then
4539 // suspend the computation up to this point:
4540 suspendComputation(cap,tso,(StgPtr)frame);
4542 // Now drop the update frame, and arrange to return
4543 // the value to the frame underneath:
4544 tso->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
4545 tso->sp[1] = (StgWord)bh;
4546 tso->sp[0] = (W_)&stg_enter_info;
4548 // And continue with threadPaused; there might be
4549 // yet more computation to suspend.
4550 threadPaused(cap,tso);
4554 if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
4555 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4556 debugBelch("Unexpected lazy BHing required at 0x%04lx\n",(long)bh);
4558 // zero out the slop so that the sanity checker can tell
4559 // where the next closure is.
4560 DEBUG_FILL_SLOP(bh);
4563 // We pretend that bh is now dead.
4564 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4566 SET_INFO(bh,&stg_BLACKHOLE_info);
4568 // We pretend that bh has just been created.
4569 LDV_RECORD_CREATE(bh);
4572 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4573 if (prev_was_update_frame) {
4574 words_to_squeeze += sizeofW(StgUpdateFrame);
4575 weight += weight_pending;
4578 prev_was_update_frame = rtsTrue;
4584 // normal stack frames; do nothing except advance the pointer
4587 nat frame_size = stack_frame_sizeW(frame);
4588 weight_pending += frame_size;
4589 frame = (StgClosure *)((StgPtr)frame + frame_size);
4590 prev_was_update_frame = rtsFalse;
4597 debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
4598 words_to_squeeze, weight,
4599 weight < words_to_squeeze ? "YES" : "NO"));
4601 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
4602 // the number of words we have to shift down is less than the
4603 // number of stack words we squeeze away by doing so.
4604 if (1 /*RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
4605 weight < words_to_squeeze*/) {
4606 stackSqueeze(tso, (StgPtr)frame);
4610 /* -----------------------------------------------------------------------------
4612 * -------------------------------------------------------------------------- */
4616 printMutableList(generation *gen)
4621 debugBelch("@@ Mutable list %p: ", gen->mut_list);
4623 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
4624 for (p = bd->start; p < bd->free; p++) {
4625 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));