1 /* -----------------------------------------------------------------------------
3 * (c) The GHC Team 1998-2003
5 * Generational garbage collector
7 * ---------------------------------------------------------------------------*/
9 #include "PosixSource.h"
14 #include "OSThreads.h"
16 #include "LdvProfile.h"
21 #include "BlockAlloc.h"
27 #include "ParTicky.h" // ToDo: move into Rts.h
28 #include "GCCompact.h"
29 #include "RtsSignals.h"
31 #if defined(GRAN) || defined(PAR)
32 # include "GranSimRts.h"
33 # include "ParallelRts.h"
37 # include "ParallelDebug.h"
42 #if defined(RTS_GTK_FRONTPANEL)
43 #include "FrontPanel.h"
46 #include "RetainerProfile.h"
50 // Turn off inlining when debugging - it obfuscates things
53 # define STATIC_INLINE static
56 /* STATIC OBJECT LIST.
59 * We maintain a linked list of static objects that are still live.
60 * The requirements for this list are:
62 * - we need to scan the list while adding to it, in order to
63 * scavenge all the static objects (in the same way that
64 * breadth-first scavenging works for dynamic objects).
66 * - we need to be able to tell whether an object is already on
67 * the list, to break loops.
69 * Each static object has a "static link field", which we use for
70 * linking objects on to the list. We use a stack-type list, consing
71 * objects on the front as they are added (this means that the
72 * scavenge phase is depth-first, not breadth-first, but that
75 * A separate list is kept for objects that have been scavenged
76 * already - this is so that we can zero all the marks afterwards.
78 * An object is on the list if its static link field is non-zero; this
79 * means that we have to mark the end of the list with '1', not NULL.
81 * Extra notes for generational GC:
83 * Each generation has a static object list associated with it. When
84 * collecting generations up to N, we treat the static object lists
85 * from generations > N as roots.
87 * We build up a static object list while collecting generations 0..N,
88 * which is then appended to the static object list of generation N+1.
90 static StgClosure* static_objects; // live static objects
91 StgClosure* scavenged_static_objects; // static objects scavenged so far
93 /* N is the oldest generation being collected, where the generations
94 * are numbered starting at 0. A major GC (indicated by the major_gc
95 * flag) is when we're collecting all generations. We only attempt to
96 * deal with static objects and GC CAFs when doing a major GC.
99 static rtsBool major_gc;
101 /* Youngest generation that objects should be evacuated to in
102 * evacuate(). (Logically an argument to evacuate, but it's static
103 * a lot of the time so we optimise it into a global variable).
107 /* Whether to do eager promotion or not.
109 static rtsBool eager_promotion;
113 StgWeak *old_weak_ptr_list; // also pending finaliser list
115 /* Which stage of processing various kinds of weak pointer are we at?
116 * (see traverse_weak_ptr_list() below for discussion).
118 typedef enum { WeakPtrs, WeakThreads, WeakDone } WeakStage;
119 static WeakStage weak_stage;
121 /* List of all threads during GC
123 static StgTSO *old_all_threads;
124 StgTSO *resurrected_threads;
126 /* Flag indicating failure to evacuate an object to the desired
129 static rtsBool failed_to_evac;
131 /* Saved nursery (used for 2-space collector only)
133 static bdescr *saved_nursery;
134 static nat saved_n_blocks;
136 /* Data used for allocation area sizing.
138 static lnat new_blocks; // blocks allocated during this GC
139 static lnat new_scavd_blocks; // ditto, but depth-first blocks
140 static lnat g0s0_pcnt_kept = 30; // percentage of g0s0 live at last minor GC
142 /* Used to avoid long recursion due to selector thunks
144 static lnat thunk_selector_depth = 0;
145 #define MAX_THUNK_SELECTOR_DEPTH 8
155 /* -----------------------------------------------------------------------------
156 Static function declarations
157 -------------------------------------------------------------------------- */
159 static bdescr * gc_alloc_block ( step *stp );
160 static void mark_root ( StgClosure **root );
162 // Use a register argument for evacuate, if available.
164 #define REGPARM1 __attribute__((regparm(1)))
169 REGPARM1 static StgClosure * evacuate (StgClosure *q);
171 static void zero_static_object_list ( StgClosure* first_static );
173 static rtsBool traverse_weak_ptr_list ( void );
174 static void mark_weak_ptr_list ( StgWeak **list );
176 static StgClosure * eval_thunk_selector ( nat field, StgSelector * p );
179 static void scavenge ( step * );
180 static void scavenge_mark_stack ( void );
181 static void scavenge_stack ( StgPtr p, StgPtr stack_end );
182 static rtsBool scavenge_one ( StgPtr p );
183 static void scavenge_large ( step * );
184 static void scavenge_static ( void );
185 static void scavenge_mutable_list ( generation *g );
187 static void scavenge_large_bitmap ( StgPtr p,
188 StgLargeBitmap *large_bitmap,
191 #if 0 && defined(DEBUG)
192 static void gcCAFs ( void );
195 /* -----------------------------------------------------------------------------
196 inline functions etc. for dealing with the mark bitmap & stack.
197 -------------------------------------------------------------------------- */
199 #define MARK_STACK_BLOCKS 4
201 static bdescr *mark_stack_bdescr;
202 static StgPtr *mark_stack;
203 static StgPtr *mark_sp;
204 static StgPtr *mark_splim;
206 // Flag and pointers used for falling back to a linear scan when the
207 // mark stack overflows.
208 static rtsBool mark_stack_overflowed;
209 static bdescr *oldgen_scan_bd;
210 static StgPtr oldgen_scan;
212 STATIC_INLINE rtsBool
213 mark_stack_empty(void)
215 return mark_sp == mark_stack;
218 STATIC_INLINE rtsBool
219 mark_stack_full(void)
221 return mark_sp >= mark_splim;
225 reset_mark_stack(void)
227 mark_sp = mark_stack;
231 push_mark_stack(StgPtr p)
242 /* -----------------------------------------------------------------------------
243 Allocate a new to-space block in the given step.
244 -------------------------------------------------------------------------- */
247 gc_alloc_block(step *stp)
249 bdescr *bd = allocBlock();
250 bd->gen_no = stp->gen_no;
254 // blocks in to-space in generations up to and including N
255 // get the BF_EVACUATED flag.
256 if (stp->gen_no <= N) {
257 bd->flags = BF_EVACUATED;
262 // Start a new to-space block, chain it on after the previous one.
263 if (stp->hp_bd != NULL) {
264 stp->hp_bd->free = stp->hp;
265 stp->hp_bd->link = bd;
270 stp->hpLim = stp->hp + BLOCK_SIZE_W;
279 gc_alloc_scavd_block(step *stp)
281 bdescr *bd = allocBlock();
282 bd->gen_no = stp->gen_no;
285 // blocks in to-space in generations up to and including N
286 // get the BF_EVACUATED flag.
287 if (stp->gen_no <= N) {
288 bd->flags = BF_EVACUATED;
293 bd->link = stp->blocks;
296 if (stp->scavd_hp != NULL) {
297 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
299 stp->scavd_hp = bd->start;
300 stp->scavd_hpLim = stp->scavd_hp + BLOCK_SIZE_W;
308 /* -----------------------------------------------------------------------------
311 Rough outline of the algorithm: for garbage collecting generation N
312 (and all younger generations):
314 - follow all pointers in the root set. the root set includes all
315 mutable objects in all generations (mutable_list).
317 - for each pointer, evacuate the object it points to into either
319 + to-space of the step given by step->to, which is the next
320 highest step in this generation or the first step in the next
321 generation if this is the last step.
323 + to-space of generations[evac_gen]->steps[0], if evac_gen != 0.
324 When we evacuate an object we attempt to evacuate
325 everything it points to into the same generation - this is
326 achieved by setting evac_gen to the desired generation. If
327 we can't do this, then an entry in the mut list has to
328 be made for the cross-generation pointer.
330 + if the object is already in a generation > N, then leave
333 - repeatedly scavenge to-space from each step in each generation
334 being collected until no more objects can be evacuated.
336 - free from-space in each step, and set from-space = to-space.
338 Locks held: all capabilities are held throughout GarbageCollect().
340 -------------------------------------------------------------------------- */
343 GarbageCollect ( void (*get_roots)(evac_fn), rtsBool force_major_gc )
347 lnat live, allocated, copied = 0, scavd_copied = 0;
348 lnat oldgen_saved_blocks = 0;
354 CostCentreStack *prev_CCS;
357 #if defined(DEBUG) && defined(GRAN)
358 IF_DEBUG(gc, debugBelch("@@ Starting garbage collection at %ld (%lx)\n",
362 #if defined(RTS_USER_SIGNALS)
367 // tell the STM to discard any cached closures its hoping to re-use
370 // tell the stats department that we've started a GC
374 // check for memory leaks if DEBUG is on
384 // Init stats and print par specific (timing) info
385 PAR_TICKY_PAR_START();
387 // attribute any costs to CCS_GC
393 /* Approximate how much we allocated.
394 * Todo: only when generating stats?
396 allocated = calcAllocated();
398 /* Figure out which generation to collect
400 if (force_major_gc) {
401 N = RtsFlags.GcFlags.generations - 1;
405 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
406 if (generations[g].steps[0].n_blocks +
407 generations[g].steps[0].n_large_blocks
408 >= generations[g].max_blocks) {
412 major_gc = (N == RtsFlags.GcFlags.generations-1);
415 #ifdef RTS_GTK_FRONTPANEL
416 if (RtsFlags.GcFlags.frontpanel) {
417 updateFrontPanelBeforeGC(N);
421 // check stack sanity *before* GC (ToDo: check all threads)
423 // ToDo!: check sanity IF_DEBUG(sanity, checkTSOsSanity());
425 IF_DEBUG(sanity, checkFreeListSanity());
427 /* Initialise the static object lists
429 static_objects = END_OF_STATIC_LIST;
430 scavenged_static_objects = END_OF_STATIC_LIST;
432 /* Save the nursery if we're doing a two-space collection.
433 * g0s0->blocks will be used for to-space, so we need to get the
434 * nursery out of the way.
436 if (RtsFlags.GcFlags.generations == 1) {
437 saved_nursery = g0s0->blocks;
438 saved_n_blocks = g0s0->n_blocks;
443 /* Keep a count of how many new blocks we allocated during this GC
444 * (used for resizing the allocation area, later).
447 new_scavd_blocks = 0;
449 // Initialise to-space in all the generations/steps that we're
452 for (g = 0; g <= N; g++) {
454 // throw away the mutable list. Invariant: the mutable list
455 // always has at least one block; this means we can avoid a check for
456 // NULL in recordMutable().
458 freeChain(generations[g].mut_list);
459 generations[g].mut_list = allocBlock();
460 for (i = 0; i < n_capabilities; i++) {
461 freeChain(capabilities[i].mut_lists[g]);
462 capabilities[i].mut_lists[g] = allocBlock();
466 for (s = 0; s < generations[g].n_steps; s++) {
468 // generation 0, step 0 doesn't need to-space
469 if (g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1) {
473 stp = &generations[g].steps[s];
474 ASSERT(stp->gen_no == g);
476 // start a new to-space for this step.
477 stp->old_blocks = stp->blocks;
478 stp->n_old_blocks = stp->n_blocks;
480 // allocate the first to-space block; extra blocks will be
481 // chained on as necessary.
483 bd = gc_alloc_block(stp);
486 stp->scan = bd->start;
489 // allocate a block for "already scavenged" objects. This goes
490 // on the front of the stp->blocks list, so it won't be
491 // traversed by the scavenging sweep.
492 gc_alloc_scavd_block(stp);
494 // initialise the large object queues.
495 stp->new_large_objects = NULL;
496 stp->scavenged_large_objects = NULL;
497 stp->n_scavenged_large_blocks = 0;
499 // mark the large objects as not evacuated yet
500 for (bd = stp->large_objects; bd; bd = bd->link) {
501 bd->flags &= ~BF_EVACUATED;
504 // for a compacted step, we need to allocate the bitmap
505 if (stp->is_compacted) {
506 nat bitmap_size; // in bytes
507 bdescr *bitmap_bdescr;
510 bitmap_size = stp->n_old_blocks * BLOCK_SIZE / (sizeof(W_)*BITS_PER_BYTE);
512 if (bitmap_size > 0) {
513 bitmap_bdescr = allocGroup((lnat)BLOCK_ROUND_UP(bitmap_size)
515 stp->bitmap = bitmap_bdescr;
516 bitmap = bitmap_bdescr->start;
518 IF_DEBUG(gc, debugBelch("bitmap_size: %d, bitmap: %p",
519 bitmap_size, bitmap););
521 // don't forget to fill it with zeros!
522 memset(bitmap, 0, bitmap_size);
524 // For each block in this step, point to its bitmap from the
526 for (bd=stp->old_blocks; bd != NULL; bd = bd->link) {
527 bd->u.bitmap = bitmap;
528 bitmap += BLOCK_SIZE_W / (sizeof(W_)*BITS_PER_BYTE);
530 // Also at this point we set the BF_COMPACTED flag
531 // for this block. The invariant is that
532 // BF_COMPACTED is always unset, except during GC
533 // when it is set on those blocks which will be
535 bd->flags |= BF_COMPACTED;
542 /* make sure the older generations have at least one block to
543 * allocate into (this makes things easier for copy(), see below).
545 for (g = N+1; g < RtsFlags.GcFlags.generations; g++) {
546 for (s = 0; s < generations[g].n_steps; s++) {
547 stp = &generations[g].steps[s];
548 if (stp->hp_bd == NULL) {
549 ASSERT(stp->blocks == NULL);
550 bd = gc_alloc_block(stp);
554 if (stp->scavd_hp == NULL) {
555 gc_alloc_scavd_block(stp);
558 /* Set the scan pointer for older generations: remember we
559 * still have to scavenge objects that have been promoted. */
561 stp->scan_bd = stp->hp_bd;
562 stp->new_large_objects = NULL;
563 stp->scavenged_large_objects = NULL;
564 stp->n_scavenged_large_blocks = 0;
567 /* Move the private mutable lists from each capability onto the
568 * main mutable list for the generation.
570 for (i = 0; i < n_capabilities; i++) {
571 for (bd = capabilities[i].mut_lists[g];
572 bd->link != NULL; bd = bd->link) {
575 bd->link = generations[g].mut_list;
576 generations[g].mut_list = capabilities[i].mut_lists[g];
577 capabilities[i].mut_lists[g] = allocBlock();
581 /* Allocate a mark stack if we're doing a major collection.
584 mark_stack_bdescr = allocGroup(MARK_STACK_BLOCKS);
585 mark_stack = (StgPtr *)mark_stack_bdescr->start;
586 mark_sp = mark_stack;
587 mark_splim = mark_stack + (MARK_STACK_BLOCKS * BLOCK_SIZE_W);
589 mark_stack_bdescr = NULL;
592 eager_promotion = rtsTrue; // for now
594 /* -----------------------------------------------------------------------
595 * follow all the roots that we know about:
596 * - mutable lists from each generation > N
597 * we want to *scavenge* these roots, not evacuate them: they're not
598 * going to move in this GC.
599 * Also: do them in reverse generation order. This is because we
600 * often want to promote objects that are pointed to by older
601 * generations early, so we don't have to repeatedly copy them.
602 * Doing the generations in reverse order ensures that we don't end
603 * up in the situation where we want to evac an object to gen 3 and
604 * it has already been evaced to gen 2.
608 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
609 generations[g].saved_mut_list = generations[g].mut_list;
610 generations[g].mut_list = allocBlock();
611 // mut_list always has at least one block.
614 for (g = RtsFlags.GcFlags.generations-1; g > N; g--) {
615 IF_PAR_DEBUG(verbose, printMutableList(&generations[g]));
616 scavenge_mutable_list(&generations[g]);
618 for (st = generations[g].n_steps-1; st >= 0; st--) {
619 scavenge(&generations[g].steps[st]);
624 /* follow roots from the CAF list (used by GHCi)
629 /* follow all the roots that the application knows about.
632 get_roots(mark_root);
635 /* And don't forget to mark the TSO if we got here direct from
637 /* Not needed in a seq version?
639 CurrentTSO = (StgTSO *)MarkRoot((StgClosure *)CurrentTSO);
643 // Mark the entries in the GALA table of the parallel system
644 markLocalGAs(major_gc);
645 // Mark all entries on the list of pending fetches
646 markPendingFetches(major_gc);
649 /* Mark the weak pointer list, and prepare to detect dead weak
652 mark_weak_ptr_list(&weak_ptr_list);
653 old_weak_ptr_list = weak_ptr_list;
654 weak_ptr_list = NULL;
655 weak_stage = WeakPtrs;
657 /* The all_threads list is like the weak_ptr_list.
658 * See traverse_weak_ptr_list() for the details.
660 old_all_threads = all_threads;
661 all_threads = END_TSO_QUEUE;
662 resurrected_threads = END_TSO_QUEUE;
664 /* Mark the stable pointer table.
666 markStablePtrTable(mark_root);
668 /* -------------------------------------------------------------------------
669 * Repeatedly scavenge all the areas we know about until there's no
670 * more scavenging to be done.
677 // scavenge static objects
678 if (major_gc && static_objects != END_OF_STATIC_LIST) {
679 IF_DEBUG(sanity, checkStaticObjects(static_objects));
683 /* When scavenging the older generations: Objects may have been
684 * evacuated from generations <= N into older generations, and we
685 * need to scavenge these objects. We're going to try to ensure that
686 * any evacuations that occur move the objects into at least the
687 * same generation as the object being scavenged, otherwise we
688 * have to create new entries on the mutable list for the older
692 // scavenge each step in generations 0..maxgen
698 // scavenge objects in compacted generation
699 if (mark_stack_overflowed || oldgen_scan_bd != NULL ||
700 (mark_stack_bdescr != NULL && !mark_stack_empty())) {
701 scavenge_mark_stack();
705 for (gen = RtsFlags.GcFlags.generations; --gen >= 0; ) {
706 for (st = generations[gen].n_steps; --st >= 0; ) {
707 if (gen == 0 && st == 0 && RtsFlags.GcFlags.generations > 1) {
710 stp = &generations[gen].steps[st];
712 if (stp->hp_bd != stp->scan_bd || stp->scan < stp->hp) {
717 if (stp->new_large_objects != NULL) {
726 if (flag) { goto loop; }
728 // must be last... invariant is that everything is fully
729 // scavenged at this point.
730 if (traverse_weak_ptr_list()) { // returns rtsTrue if evaced something
735 /* Update the pointers from the task list - these are
736 * treated as weak pointers because we want to allow a main thread
737 * to get a BlockedOnDeadMVar exception in the same way as any other
738 * thread. Note that the threads should all have been retained by
739 * GC by virtue of being on the all_threads list, we're just
740 * updating pointers here.
745 for (task = all_tasks; task != NULL; task = task->all_link) {
746 if (!task->stopped && task->tso) {
747 ASSERT(task->tso->bound == task);
748 tso = (StgTSO *) isAlive((StgClosure *)task->tso);
750 barf("task %p: main thread %d has been GC'd",
764 // Reconstruct the Global Address tables used in GUM
765 rebuildGAtables(major_gc);
766 IF_DEBUG(sanity, checkLAGAtable(rtsTrue/*check closures, too*/));
769 // Now see which stable names are still alive.
772 // Tidy the end of the to-space chains
773 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
774 for (s = 0; s < generations[g].n_steps; s++) {
775 stp = &generations[g].steps[s];
776 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
777 ASSERT(Bdescr(stp->hp) == stp->hp_bd);
778 stp->hp_bd->free = stp->hp;
779 Bdescr(stp->scavd_hp)->free = stp->scavd_hp;
785 // We call processHeapClosureForDead() on every closure destroyed during
786 // the current garbage collection, so we invoke LdvCensusForDead().
787 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_LDV
788 || RtsFlags.ProfFlags.bioSelector != NULL)
792 // NO MORE EVACUATION AFTER THIS POINT!
793 // Finally: compaction of the oldest generation.
794 if (major_gc && oldest_gen->steps[0].is_compacted) {
795 // save number of blocks for stats
796 oldgen_saved_blocks = oldest_gen->steps[0].n_old_blocks;
800 IF_DEBUG(sanity, checkGlobalTSOList(rtsFalse));
802 /* run through all the generations/steps and tidy up
804 copied = new_blocks * BLOCK_SIZE_W;
805 scavd_copied = new_scavd_blocks * BLOCK_SIZE_W;
806 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
809 generations[g].collections++; // for stats
812 // Count the mutable list as bytes "copied" for the purposes of
813 // stats. Every mutable list is copied during every GC.
815 nat mut_list_size = 0;
816 for (bd = generations[g].mut_list; bd != NULL; bd = bd->link) {
817 mut_list_size += bd->free - bd->start;
819 copied += mut_list_size;
821 IF_DEBUG(gc, debugBelch("mut_list_size: %d (%d vars, %d arrays, %d others)\n", mut_list_size * sizeof(W_), mutlist_MUTVARS, mutlist_MUTARRS, mutlist_OTHERS));
824 for (s = 0; s < generations[g].n_steps; s++) {
826 stp = &generations[g].steps[s];
828 if (!(g == 0 && s == 0 && RtsFlags.GcFlags.generations > 1)) {
829 // stats information: how much we copied
831 copied -= stp->hp_bd->start + BLOCK_SIZE_W -
833 scavd_copied -= (P_)(BLOCK_ROUND_UP(stp->scavd_hp)) - stp->scavd_hp;
837 // for generations we collected...
840 /* free old memory and shift to-space into from-space for all
841 * the collected steps (except the allocation area). These
842 * freed blocks will probaby be quickly recycled.
844 if (!(g == 0 && s == 0)) {
845 if (stp->is_compacted) {
846 // for a compacted step, just shift the new to-space
847 // onto the front of the now-compacted existing blocks.
848 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
849 bd->flags &= ~BF_EVACUATED; // now from-space
851 // tack the new blocks on the end of the existing blocks
852 if (stp->old_blocks != NULL) {
853 for (bd = stp->old_blocks; bd != NULL; bd = next) {
854 // NB. this step might not be compacted next
855 // time, so reset the BF_COMPACTED flags.
856 // They are set before GC if we're going to
857 // compact. (search for BF_COMPACTED above).
858 bd->flags &= ~BF_COMPACTED;
861 bd->link = stp->blocks;
864 stp->blocks = stp->old_blocks;
866 // add the new blocks to the block tally
867 stp->n_blocks += stp->n_old_blocks;
868 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
870 freeChain(stp->old_blocks);
871 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
872 bd->flags &= ~BF_EVACUATED; // now from-space
875 stp->old_blocks = NULL;
876 stp->n_old_blocks = 0;
879 /* LARGE OBJECTS. The current live large objects are chained on
880 * scavenged_large, having been moved during garbage
881 * collection from large_objects. Any objects left on
882 * large_objects list are therefore dead, so we free them here.
884 for (bd = stp->large_objects; bd != NULL; bd = next) {
890 // update the count of blocks used by large objects
891 for (bd = stp->scavenged_large_objects; bd != NULL; bd = bd->link) {
892 bd->flags &= ~BF_EVACUATED;
894 stp->large_objects = stp->scavenged_large_objects;
895 stp->n_large_blocks = stp->n_scavenged_large_blocks;
898 // for older generations...
900 /* For older generations, we need to append the
901 * scavenged_large_object list (i.e. large objects that have been
902 * promoted during this GC) to the large_object list for that step.
904 for (bd = stp->scavenged_large_objects; bd; bd = next) {
906 bd->flags &= ~BF_EVACUATED;
907 dbl_link_onto(bd, &stp->large_objects);
910 // add the new blocks we promoted during this GC
911 stp->n_large_blocks += stp->n_scavenged_large_blocks;
916 /* Reset the sizes of the older generations when we do a major
919 * CURRENT STRATEGY: make all generations except zero the same size.
920 * We have to stay within the maximum heap size, and leave a certain
921 * percentage of the maximum heap size available to allocate into.
923 if (major_gc && RtsFlags.GcFlags.generations > 1) {
924 nat live, size, min_alloc;
925 nat max = RtsFlags.GcFlags.maxHeapSize;
926 nat gens = RtsFlags.GcFlags.generations;
928 // live in the oldest generations
929 live = oldest_gen->steps[0].n_blocks +
930 oldest_gen->steps[0].n_large_blocks;
932 // default max size for all generations except zero
933 size = stg_max(live * RtsFlags.GcFlags.oldGenFactor,
934 RtsFlags.GcFlags.minOldGenSize);
936 // minimum size for generation zero
937 min_alloc = stg_max((RtsFlags.GcFlags.pcFreeHeap * max) / 200,
938 RtsFlags.GcFlags.minAllocAreaSize);
940 // Auto-enable compaction when the residency reaches a
941 // certain percentage of the maximum heap size (default: 30%).
942 if (RtsFlags.GcFlags.generations > 1 &&
943 (RtsFlags.GcFlags.compact ||
945 oldest_gen->steps[0].n_blocks >
946 (RtsFlags.GcFlags.compactThreshold * max) / 100))) {
947 oldest_gen->steps[0].is_compacted = 1;
948 // debugBelch("compaction: on\n", live);
950 oldest_gen->steps[0].is_compacted = 0;
951 // debugBelch("compaction: off\n", live);
954 // if we're going to go over the maximum heap size, reduce the
955 // size of the generations accordingly. The calculation is
956 // different if compaction is turned on, because we don't need
957 // to double the space required to collect the old generation.
960 // this test is necessary to ensure that the calculations
961 // below don't have any negative results - we're working
962 // with unsigned values here.
963 if (max < min_alloc) {
967 if (oldest_gen->steps[0].is_compacted) {
968 if ( (size + (size - 1) * (gens - 2) * 2) + min_alloc > max ) {
969 size = (max - min_alloc) / ((gens - 1) * 2 - 1);
972 if ( (size * (gens - 1) * 2) + min_alloc > max ) {
973 size = (max - min_alloc) / ((gens - 1) * 2);
983 debugBelch("live: %d, min_alloc: %d, size : %d, max = %d\n", live,
984 min_alloc, size, max);
987 for (g = 0; g < gens; g++) {
988 generations[g].max_blocks = size;
992 // Guess the amount of live data for stats.
995 /* Free the small objects allocated via allocate(), since this will
996 * all have been copied into G0S1 now.
998 if (small_alloc_list != NULL) {
999 freeChain(small_alloc_list);
1001 small_alloc_list = NULL;
1005 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
1007 // Start a new pinned_object_block
1008 pinned_object_block = NULL;
1010 /* Free the mark stack.
1012 if (mark_stack_bdescr != NULL) {
1013 freeGroup(mark_stack_bdescr);
1016 /* Free any bitmaps.
1018 for (g = 0; g <= N; g++) {
1019 for (s = 0; s < generations[g].n_steps; s++) {
1020 stp = &generations[g].steps[s];
1021 if (stp->bitmap != NULL) {
1022 freeGroup(stp->bitmap);
1028 /* Two-space collector:
1029 * Free the old to-space, and estimate the amount of live data.
1031 if (RtsFlags.GcFlags.generations == 1) {
1034 if (g0s0->old_blocks != NULL) {
1035 freeChain(g0s0->old_blocks);
1037 for (bd = g0s0->blocks; bd != NULL; bd = bd->link) {
1038 bd->flags = 0; // now from-space
1040 g0s0->old_blocks = g0s0->blocks;
1041 g0s0->n_old_blocks = g0s0->n_blocks;
1042 g0s0->blocks = saved_nursery;
1043 g0s0->n_blocks = saved_n_blocks;
1045 /* For a two-space collector, we need to resize the nursery. */
1047 /* set up a new nursery. Allocate a nursery size based on a
1048 * function of the amount of live data (by default a factor of 2)
1049 * Use the blocks from the old nursery if possible, freeing up any
1052 * If we get near the maximum heap size, then adjust our nursery
1053 * size accordingly. If the nursery is the same size as the live
1054 * data (L), then we need 3L bytes. We can reduce the size of the
1055 * nursery to bring the required memory down near 2L bytes.
1057 * A normal 2-space collector would need 4L bytes to give the same
1058 * performance we get from 3L bytes, reducing to the same
1059 * performance at 2L bytes.
1061 blocks = g0s0->n_old_blocks;
1063 if ( RtsFlags.GcFlags.maxHeapSize != 0 &&
1064 blocks * RtsFlags.GcFlags.oldGenFactor * 2 >
1065 RtsFlags.GcFlags.maxHeapSize ) {
1066 long adjusted_blocks; // signed on purpose
1069 adjusted_blocks = (RtsFlags.GcFlags.maxHeapSize - 2 * blocks);
1070 IF_DEBUG(gc, debugBelch("@@ Near maximum heap size of 0x%x blocks, blocks = %d, adjusted to %ld", RtsFlags.GcFlags.maxHeapSize, blocks, adjusted_blocks));
1071 pc_free = adjusted_blocks * 100 / RtsFlags.GcFlags.maxHeapSize;
1072 if (pc_free < RtsFlags.GcFlags.pcFreeHeap) /* might even be < 0 */ {
1075 blocks = adjusted_blocks;
1078 blocks *= RtsFlags.GcFlags.oldGenFactor;
1079 if (blocks < RtsFlags.GcFlags.minAllocAreaSize) {
1080 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1083 resizeNurseries(blocks);
1086 /* Generational collector:
1087 * If the user has given us a suggested heap size, adjust our
1088 * allocation area to make best use of the memory available.
1091 if (RtsFlags.GcFlags.heapSizeSuggestion) {
1093 nat needed = calcNeeded(); // approx blocks needed at next GC
1095 /* Guess how much will be live in generation 0 step 0 next time.
1096 * A good approximation is obtained by finding the
1097 * percentage of g0s0 that was live at the last minor GC.
1100 g0s0_pcnt_kept = (new_blocks * 100) / countNurseryBlocks();
1103 /* Estimate a size for the allocation area based on the
1104 * information available. We might end up going slightly under
1105 * or over the suggested heap size, but we should be pretty
1108 * Formula: suggested - needed
1109 * ----------------------------
1110 * 1 + g0s0_pcnt_kept/100
1112 * where 'needed' is the amount of memory needed at the next
1113 * collection for collecting all steps except g0s0.
1116 (((long)RtsFlags.GcFlags.heapSizeSuggestion - (long)needed) * 100) /
1117 (100 + (long)g0s0_pcnt_kept);
1119 if (blocks < (long)RtsFlags.GcFlags.minAllocAreaSize) {
1120 blocks = RtsFlags.GcFlags.minAllocAreaSize;
1123 resizeNurseries((nat)blocks);
1126 // we might have added extra large blocks to the nursery, so
1127 // resize back to minAllocAreaSize again.
1128 resizeNurseriesFixed(RtsFlags.GcFlags.minAllocAreaSize);
1132 // mark the garbage collected CAFs as dead
1133 #if 0 && defined(DEBUG) // doesn't work at the moment
1134 if (major_gc) { gcCAFs(); }
1138 // resetStaticObjectForRetainerProfiling() must be called before
1140 resetStaticObjectForRetainerProfiling();
1143 // zero the scavenged static object list
1145 zero_static_object_list(scavenged_static_objects);
1148 // Reset the nursery
1151 // start any pending finalizers
1153 scheduleFinalizers(last_free_capability, old_weak_ptr_list);
1156 // send exceptions to any threads which were about to die
1158 resurrectThreads(resurrected_threads);
1161 // Update the stable pointer hash table.
1162 updateStablePtrTable(major_gc);
1164 // check sanity after GC
1165 IF_DEBUG(sanity, checkSanity());
1167 // extra GC trace info
1168 IF_DEBUG(gc, statDescribeGens());
1171 // symbol-table based profiling
1172 /* heapCensus(to_blocks); */ /* ToDo */
1175 // restore enclosing cost centre
1181 // check for memory leaks if DEBUG is on
1185 #ifdef RTS_GTK_FRONTPANEL
1186 if (RtsFlags.GcFlags.frontpanel) {
1187 updateFrontPanelAfterGC( N, live );
1191 // ok, GC over: tell the stats department what happened.
1192 stat_endGC(allocated, live, copied, scavd_copied, N);
1194 #if defined(RTS_USER_SIGNALS)
1195 // unblock signals again
1196 unblockUserSignals();
1205 /* -----------------------------------------------------------------------------
1208 traverse_weak_ptr_list is called possibly many times during garbage
1209 collection. It returns a flag indicating whether it did any work
1210 (i.e. called evacuate on any live pointers).
1212 Invariant: traverse_weak_ptr_list is called when the heap is in an
1213 idempotent state. That means that there are no pending
1214 evacuate/scavenge operations. This invariant helps the weak
1215 pointer code decide which weak pointers are dead - if there are no
1216 new live weak pointers, then all the currently unreachable ones are
1219 For generational GC: we just don't try to finalize weak pointers in
1220 older generations than the one we're collecting. This could
1221 probably be optimised by keeping per-generation lists of weak
1222 pointers, but for a few weak pointers this scheme will work.
1224 There are three distinct stages to processing weak pointers:
1226 - weak_stage == WeakPtrs
1228 We process all the weak pointers whos keys are alive (evacuate
1229 their values and finalizers), and repeat until we can find no new
1230 live keys. If no live keys are found in this pass, then we
1231 evacuate the finalizers of all the dead weak pointers in order to
1234 - weak_stage == WeakThreads
1236 Now, we discover which *threads* are still alive. Pointers to
1237 threads from the all_threads and main thread lists are the
1238 weakest of all: a pointers from the finalizer of a dead weak
1239 pointer can keep a thread alive. Any threads found to be unreachable
1240 are evacuated and placed on the resurrected_threads list so we
1241 can send them a signal later.
1243 - weak_stage == WeakDone
1245 No more evacuation is done.
1247 -------------------------------------------------------------------------- */
1250 traverse_weak_ptr_list(void)
1252 StgWeak *w, **last_w, *next_w;
1254 rtsBool flag = rtsFalse;
1256 switch (weak_stage) {
1262 /* doesn't matter where we evacuate values/finalizers to, since
1263 * these pointers are treated as roots (iff the keys are alive).
1267 last_w = &old_weak_ptr_list;
1268 for (w = old_weak_ptr_list; w != NULL; w = next_w) {
1270 /* There might be a DEAD_WEAK on the list if finalizeWeak# was
1271 * called on a live weak pointer object. Just remove it.
1273 if (w->header.info == &stg_DEAD_WEAK_info) {
1274 next_w = ((StgDeadWeak *)w)->link;
1279 switch (get_itbl(w)->type) {
1282 next_w = (StgWeak *)((StgEvacuated *)w)->evacuee;
1287 /* Now, check whether the key is reachable.
1289 new = isAlive(w->key);
1292 // evacuate the value and finalizer
1293 w->value = evacuate(w->value);
1294 w->finalizer = evacuate(w->finalizer);
1295 // remove this weak ptr from the old_weak_ptr list
1297 // and put it on the new weak ptr list
1299 w->link = weak_ptr_list;
1302 IF_DEBUG(weak, debugBelch("Weak pointer still alive at %p -> %p",
1307 last_w = &(w->link);
1313 barf("traverse_weak_ptr_list: not WEAK");
1317 /* If we didn't make any changes, then we can go round and kill all
1318 * the dead weak pointers. The old_weak_ptr list is used as a list
1319 * of pending finalizers later on.
1321 if (flag == rtsFalse) {
1322 for (w = old_weak_ptr_list; w; w = w->link) {
1323 w->finalizer = evacuate(w->finalizer);
1326 // Next, move to the WeakThreads stage after fully
1327 // scavenging the finalizers we've just evacuated.
1328 weak_stage = WeakThreads;
1334 /* Now deal with the all_threads list, which behaves somewhat like
1335 * the weak ptr list. If we discover any threads that are about to
1336 * become garbage, we wake them up and administer an exception.
1339 StgTSO *t, *tmp, *next, **prev;
1341 prev = &old_all_threads;
1342 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1344 tmp = (StgTSO *)isAlive((StgClosure *)t);
1350 ASSERT(get_itbl(t)->type == TSO);
1351 switch (t->what_next) {
1352 case ThreadRelocated:
1357 case ThreadComplete:
1358 // finshed or died. The thread might still be alive, but we
1359 // don't keep it on the all_threads list. Don't forget to
1360 // stub out its global_link field.
1361 next = t->global_link;
1362 t->global_link = END_TSO_QUEUE;
1369 // Threads blocked on black holes: if the black hole
1370 // is alive, then the thread is alive too.
1371 if (tmp == NULL && t->why_blocked == BlockedOnBlackHole) {
1372 if (isAlive(t->block_info.closure)) {
1373 t = (StgTSO *)evacuate((StgClosure *)t);
1380 // not alive (yet): leave this thread on the
1381 // old_all_threads list.
1382 prev = &(t->global_link);
1383 next = t->global_link;
1386 // alive: move this thread onto the all_threads list.
1387 next = t->global_link;
1388 t->global_link = all_threads;
1395 /* If we evacuated any threads, we need to go back to the scavenger.
1397 if (flag) return rtsTrue;
1399 /* And resurrect any threads which were about to become garbage.
1402 StgTSO *t, *tmp, *next;
1403 for (t = old_all_threads; t != END_TSO_QUEUE; t = next) {
1404 next = t->global_link;
1405 tmp = (StgTSO *)evacuate((StgClosure *)t);
1406 tmp->global_link = resurrected_threads;
1407 resurrected_threads = tmp;
1411 /* Finally, we can update the blackhole_queue. This queue
1412 * simply strings together TSOs blocked on black holes, it is
1413 * not intended to keep anything alive. Hence, we do not follow
1414 * pointers on the blackhole_queue until now, when we have
1415 * determined which TSOs are otherwise reachable. We know at
1416 * this point that all TSOs have been evacuated, however.
1420 for (pt = &blackhole_queue; *pt != END_TSO_QUEUE; pt = &((*pt)->link)) {
1421 *pt = (StgTSO *)isAlive((StgClosure *)*pt);
1422 ASSERT(*pt != NULL);
1426 weak_stage = WeakDone; // *now* we're done,
1427 return rtsTrue; // but one more round of scavenging, please
1430 barf("traverse_weak_ptr_list");
1436 /* -----------------------------------------------------------------------------
1437 After GC, the live weak pointer list may have forwarding pointers
1438 on it, because a weak pointer object was evacuated after being
1439 moved to the live weak pointer list. We remove those forwarding
1442 Also, we don't consider weak pointer objects to be reachable, but
1443 we must nevertheless consider them to be "live" and retain them.
1444 Therefore any weak pointer objects which haven't as yet been
1445 evacuated need to be evacuated now.
1446 -------------------------------------------------------------------------- */
1450 mark_weak_ptr_list ( StgWeak **list )
1452 StgWeak *w, **last_w;
1455 for (w = *list; w; w = w->link) {
1456 // w might be WEAK, EVACUATED, or DEAD_WEAK (actually CON_STATIC) here
1457 ASSERT(w->header.info == &stg_DEAD_WEAK_info
1458 || get_itbl(w)->type == WEAK || get_itbl(w)->type == EVACUATED);
1459 w = (StgWeak *)evacuate((StgClosure *)w);
1461 last_w = &(w->link);
1465 /* -----------------------------------------------------------------------------
1466 isAlive determines whether the given closure is still alive (after
1467 a garbage collection) or not. It returns the new address of the
1468 closure if it is alive, or NULL otherwise.
1470 NOTE: Use it before compaction only!
1471 -------------------------------------------------------------------------- */
1475 isAlive(StgClosure *p)
1477 const StgInfoTable *info;
1482 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
1485 // ignore static closures
1487 // ToDo: for static closures, check the static link field.
1488 // Problem here is that we sometimes don't set the link field, eg.
1489 // for static closures with an empty SRT or CONSTR_STATIC_NOCAFs.
1491 if (!HEAP_ALLOCED(p)) {
1495 // ignore closures in generations that we're not collecting.
1497 if (bd->gen_no > N) {
1501 // if it's a pointer into to-space, then we're done
1502 if (bd->flags & BF_EVACUATED) {
1506 // large objects use the evacuated flag
1507 if (bd->flags & BF_LARGE) {
1511 // check the mark bit for compacted steps
1512 if ((bd->flags & BF_COMPACTED) && is_marked((P_)p,bd)) {
1516 switch (info->type) {
1521 case IND_OLDGEN: // rely on compatible layout with StgInd
1522 case IND_OLDGEN_PERM:
1523 // follow indirections
1524 p = ((StgInd *)p)->indirectee;
1529 return ((StgEvacuated *)p)->evacuee;
1532 if (((StgTSO *)p)->what_next == ThreadRelocated) {
1533 p = (StgClosure *)((StgTSO *)p)->link;
1546 mark_root(StgClosure **root)
1548 *root = evacuate(*root);
1552 upd_evacuee(StgClosure *p, StgClosure *dest)
1554 // not true: (ToDo: perhaps it should be)
1555 // ASSERT(Bdescr((P_)dest)->flags & BF_EVACUATED);
1556 SET_INFO(p, &stg_EVACUATED_info);
1557 ((StgEvacuated *)p)->evacuee = dest;
1561 STATIC_INLINE StgClosure *
1562 copy(StgClosure *src, nat size, step *stp)
1568 nat size_org = size;
1571 TICK_GC_WORDS_COPIED(size);
1572 /* Find out where we're going, using the handy "to" pointer in
1573 * the step of the source object. If it turns out we need to
1574 * evacuate to an older generation, adjust it here (see comment
1577 if (stp->gen_no < evac_gen) {
1578 if (eager_promotion) {
1579 stp = &generations[evac_gen].steps[0];
1581 failed_to_evac = rtsTrue;
1585 /* chain a new block onto the to-space for the destination step if
1588 if (stp->hp + size >= stp->hpLim) {
1589 gc_alloc_block(stp);
1594 stp->hp = to + size;
1595 for (i = 0; i < size; i++) { // unroll for small i
1598 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1601 // We store the size of the just evacuated object in the LDV word so that
1602 // the profiler can guess the position of the next object later.
1603 SET_EVACUAEE_FOR_LDV(from, size_org);
1605 return (StgClosure *)to;
1608 // Same as copy() above, except the object will be allocated in memory
1609 // that will not be scavenged. Used for object that have no pointer
1611 STATIC_INLINE StgClosure *
1612 copy_noscav(StgClosure *src, nat size, step *stp)
1618 nat size_org = size;
1621 TICK_GC_WORDS_COPIED(size);
1622 /* Find out where we're going, using the handy "to" pointer in
1623 * the step of the source object. If it turns out we need to
1624 * evacuate to an older generation, adjust it here (see comment
1627 if (stp->gen_no < evac_gen) {
1628 if (eager_promotion) {
1629 stp = &generations[evac_gen].steps[0];
1631 failed_to_evac = rtsTrue;
1635 /* chain a new block onto the to-space for the destination step if
1638 if (stp->scavd_hp + size >= stp->scavd_hpLim) {
1639 gc_alloc_scavd_block(stp);
1644 stp->scavd_hp = to + size;
1645 for (i = 0; i < size; i++) { // unroll for small i
1648 upd_evacuee((StgClosure *)from,(StgClosure *)to);
1651 // We store the size of the just evacuated object in the LDV word so that
1652 // the profiler can guess the position of the next object later.
1653 SET_EVACUAEE_FOR_LDV(from, size_org);
1655 return (StgClosure *)to;
1658 /* Special version of copy() for when we only want to copy the info
1659 * pointer of an object, but reserve some padding after it. This is
1660 * used to optimise evacuation of BLACKHOLEs.
1665 copyPart(StgClosure *src, nat size_to_reserve, nat size_to_copy, step *stp)
1670 nat size_to_copy_org = size_to_copy;
1673 TICK_GC_WORDS_COPIED(size_to_copy);
1674 if (stp->gen_no < evac_gen) {
1675 if (eager_promotion) {
1676 stp = &generations[evac_gen].steps[0];
1678 failed_to_evac = rtsTrue;
1682 if (stp->hp + size_to_reserve >= stp->hpLim) {
1683 gc_alloc_block(stp);
1686 for(to = stp->hp, from = (P_)src; size_to_copy>0; --size_to_copy) {
1691 stp->hp += size_to_reserve;
1692 upd_evacuee(src,(StgClosure *)dest);
1694 // We store the size of the just evacuated object in the LDV word so that
1695 // the profiler can guess the position of the next object later.
1696 // size_to_copy_org is wrong because the closure already occupies size_to_reserve
1698 SET_EVACUAEE_FOR_LDV(src, size_to_reserve);
1700 if (size_to_reserve - size_to_copy_org > 0)
1701 LDV_FILL_SLOP(stp->hp - 1, (int)(size_to_reserve - size_to_copy_org));
1703 return (StgClosure *)dest;
1707 /* -----------------------------------------------------------------------------
1708 Evacuate a large object
1710 This just consists of removing the object from the (doubly-linked)
1711 step->large_objects list, and linking it on to the (singly-linked)
1712 step->new_large_objects list, from where it will be scavenged later.
1714 Convention: bd->flags has BF_EVACUATED set for a large object
1715 that has been evacuated, or unset otherwise.
1716 -------------------------------------------------------------------------- */
1720 evacuate_large(StgPtr p)
1722 bdescr *bd = Bdescr(p);
1725 // object must be at the beginning of the block (or be a ByteArray)
1726 ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS ||
1727 (((W_)p & BLOCK_MASK) == 0));
1729 // already evacuated?
1730 if (bd->flags & BF_EVACUATED) {
1731 /* Don't forget to set the failed_to_evac flag if we didn't get
1732 * the desired destination (see comments in evacuate()).
1734 if (bd->gen_no < evac_gen) {
1735 failed_to_evac = rtsTrue;
1736 TICK_GC_FAILED_PROMOTION();
1742 // remove from large_object list
1744 bd->u.back->link = bd->link;
1745 } else { // first object in the list
1746 stp->large_objects = bd->link;
1749 bd->link->u.back = bd->u.back;
1752 /* link it on to the evacuated large object list of the destination step
1755 if (stp->gen_no < evac_gen) {
1756 if (eager_promotion) {
1757 stp = &generations[evac_gen].steps[0];
1759 failed_to_evac = rtsTrue;
1764 bd->gen_no = stp->gen_no;
1765 bd->link = stp->new_large_objects;
1766 stp->new_large_objects = bd;
1767 bd->flags |= BF_EVACUATED;
1770 /* -----------------------------------------------------------------------------
1773 This is called (eventually) for every live object in the system.
1775 The caller to evacuate specifies a desired generation in the
1776 evac_gen global variable. The following conditions apply to
1777 evacuating an object which resides in generation M when we're
1778 collecting up to generation N
1782 else evac to step->to
1784 if M < evac_gen evac to evac_gen, step 0
1786 if the object is already evacuated, then we check which generation
1789 if M >= evac_gen do nothing
1790 if M < evac_gen set failed_to_evac flag to indicate that we
1791 didn't manage to evacuate this object into evac_gen.
1796 evacuate() is the single most important function performance-wise
1797 in the GC. Various things have been tried to speed it up, but as
1798 far as I can tell the code generated by gcc 3.2 with -O2 is about
1799 as good as it's going to get. We pass the argument to evacuate()
1800 in a register using the 'regparm' attribute (see the prototype for
1801 evacuate() near the top of this file).
1803 Changing evacuate() to take an (StgClosure **) rather than
1804 returning the new pointer seems attractive, because we can avoid
1805 writing back the pointer when it hasn't changed (eg. for a static
1806 object, or an object in a generation > N). However, I tried it and
1807 it doesn't help. One reason is that the (StgClosure **) pointer
1808 gets spilled to the stack inside evacuate(), resulting in far more
1809 extra reads/writes than we save.
1810 -------------------------------------------------------------------------- */
1812 REGPARM1 static StgClosure *
1813 evacuate(StgClosure *q)
1820 const StgInfoTable *info;
1823 ASSERT(LOOKS_LIKE_CLOSURE_PTR(q));
1825 if (!HEAP_ALLOCED(q)) {
1827 if (!major_gc) return q;
1830 switch (info->type) {
1833 if (info->srt_bitmap != 0 &&
1834 *THUNK_STATIC_LINK((StgClosure *)q) == NULL) {
1835 *THUNK_STATIC_LINK((StgClosure *)q) = static_objects;
1836 static_objects = (StgClosure *)q;
1841 if (info->srt_bitmap != 0 &&
1842 *FUN_STATIC_LINK((StgClosure *)q) == NULL) {
1843 *FUN_STATIC_LINK((StgClosure *)q) = static_objects;
1844 static_objects = (StgClosure *)q;
1849 /* If q->saved_info != NULL, then it's a revertible CAF - it'll be
1850 * on the CAF list, so don't do anything with it here (we'll
1851 * scavenge it later).
1853 if (((StgIndStatic *)q)->saved_info == NULL
1854 && *IND_STATIC_LINK((StgClosure *)q) == NULL) {
1855 *IND_STATIC_LINK((StgClosure *)q) = static_objects;
1856 static_objects = (StgClosure *)q;
1861 if (*STATIC_LINK(info,(StgClosure *)q) == NULL) {
1862 *STATIC_LINK(info,(StgClosure *)q) = static_objects;
1863 static_objects = (StgClosure *)q;
1867 case CONSTR_INTLIKE:
1868 case CONSTR_CHARLIKE:
1869 case CONSTR_NOCAF_STATIC:
1870 /* no need to put these on the static linked list, they don't need
1876 barf("evacuate(static): strange closure type %d", (int)(info->type));
1882 if (bd->gen_no > N) {
1883 /* Can't evacuate this object, because it's in a generation
1884 * older than the ones we're collecting. Let's hope that it's
1885 * in evac_gen or older, or we will have to arrange to track
1886 * this pointer using the mutable list.
1888 if (bd->gen_no < evac_gen) {
1890 failed_to_evac = rtsTrue;
1891 TICK_GC_FAILED_PROMOTION();
1896 if ((bd->flags & (BF_LARGE | BF_COMPACTED | BF_EVACUATED)) != 0) {
1898 /* pointer into to-space: just return it. This normally
1899 * shouldn't happen, but alllowing it makes certain things
1900 * slightly easier (eg. the mutable list can contain the same
1901 * object twice, for example).
1903 if (bd->flags & BF_EVACUATED) {
1904 if (bd->gen_no < evac_gen) {
1905 failed_to_evac = rtsTrue;
1906 TICK_GC_FAILED_PROMOTION();
1911 /* evacuate large objects by re-linking them onto a different list.
1913 if (bd->flags & BF_LARGE) {
1915 if (info->type == TSO &&
1916 ((StgTSO *)q)->what_next == ThreadRelocated) {
1917 q = (StgClosure *)((StgTSO *)q)->link;
1920 evacuate_large((P_)q);
1924 /* If the object is in a step that we're compacting, then we
1925 * need to use an alternative evacuate procedure.
1927 if (bd->flags & BF_COMPACTED) {
1928 if (!is_marked((P_)q,bd)) {
1930 if (mark_stack_full()) {
1931 mark_stack_overflowed = rtsTrue;
1934 push_mark_stack((P_)q);
1944 switch (info->type) {
1949 return copy(q,sizeW_fromITBL(info),stp);
1953 StgWord w = (StgWord)q->payload[0];
1954 if (q->header.info == Czh_con_info &&
1955 // unsigned, so always true: (StgChar)w >= MIN_CHARLIKE &&
1956 (StgChar)w <= MAX_CHARLIKE) {
1957 return (StgClosure *)CHARLIKE_CLOSURE((StgChar)w);
1959 if (q->header.info == Izh_con_info &&
1960 (StgInt)w >= MIN_INTLIKE && (StgInt)w <= MAX_INTLIKE) {
1961 return (StgClosure *)INTLIKE_CLOSURE((StgInt)w);
1964 return copy_noscav(q,sizeofW(StgHeader)+1,stp);
1970 return copy(q,sizeofW(StgHeader)+1,stp);
1974 return copy(q,sizeofW(StgThunk)+1,stp);
1979 #ifdef NO_PROMOTE_THUNKS
1980 if (bd->gen_no == 0 &&
1981 bd->step->no != 0 &&
1982 bd->step->no == generations[bd->gen_no].n_steps-1) {
1986 return copy(q,sizeofW(StgThunk)+2,stp);
1993 return copy(q,sizeofW(StgHeader)+2,stp);
1996 return copy_noscav(q,sizeofW(StgHeader)+2,stp);
1999 return copy(q,thunk_sizeW_fromITBL(info),stp);
2004 case IND_OLDGEN_PERM:
2007 return copy(q,sizeW_fromITBL(info),stp);
2010 return copy(q,bco_sizeW((StgBCO *)q),stp);
2013 case SE_CAF_BLACKHOLE:
2016 return copyPart(q,BLACKHOLE_sizeW(),sizeofW(StgHeader),stp);
2018 case THUNK_SELECTOR:
2021 const StgInfoTable *info_ptr;
2023 if (thunk_selector_depth > MAX_THUNK_SELECTOR_DEPTH) {
2024 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2027 // stashed away for LDV profiling, see below
2028 info_ptr = q->header.info;
2030 p = eval_thunk_selector(info->layout.selector_offset,
2034 return copy(q,THUNK_SELECTOR_sizeW(),stp);
2037 // q is still BLACKHOLE'd.
2038 thunk_selector_depth++;
2040 thunk_selector_depth--;
2043 // For the purposes of LDV profiling, we have destroyed
2044 // the original selector thunk.
2045 SET_INFO(q, info_ptr);
2046 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(q);
2049 // Update the THUNK_SELECTOR with an indirection to the
2050 // EVACUATED closure now at p. Why do this rather than
2051 // upd_evacuee(q,p)? Because we have an invariant that an
2052 // EVACUATED closure always points to an object in the
2053 // same or an older generation (required by the short-cut
2054 // test in the EVACUATED case, below).
2055 SET_INFO(q, &stg_IND_info);
2056 ((StgInd *)q)->indirectee = p;
2058 // For the purposes of LDV profiling, we have created an
2060 LDV_RECORD_CREATE(q);
2068 // follow chains of indirections, don't evacuate them
2069 q = ((StgInd*)q)->indirectee;
2081 case CATCH_STM_FRAME:
2082 case CATCH_RETRY_FRAME:
2083 case ATOMICALLY_FRAME:
2084 // shouldn't see these
2085 barf("evacuate: stack frame at %p\n", q);
2088 return copy(q,pap_sizeW((StgPAP*)q),stp);
2091 return copy(q,ap_sizeW((StgAP*)q),stp);
2094 return copy(q,ap_stack_sizeW((StgAP_STACK*)q),stp);
2097 /* Already evacuated, just return the forwarding address.
2098 * HOWEVER: if the requested destination generation (evac_gen) is
2099 * older than the actual generation (because the object was
2100 * already evacuated to a younger generation) then we have to
2101 * set the failed_to_evac flag to indicate that we couldn't
2102 * manage to promote the object to the desired generation.
2105 * Optimisation: the check is fairly expensive, but we can often
2106 * shortcut it if either the required generation is 0, or the
2107 * current object (the EVACUATED) is in a high enough generation.
2108 * We know that an EVACUATED always points to an object in the
2109 * same or an older generation. stp is the lowest step that the
2110 * current object would be evacuated to, so we only do the full
2111 * check if stp is too low.
2113 if (evac_gen > 0 && stp->gen_no < evac_gen) { // optimisation
2114 StgClosure *p = ((StgEvacuated*)q)->evacuee;
2115 if (HEAP_ALLOCED(p) && Bdescr((P_)p)->gen_no < evac_gen) {
2116 failed_to_evac = rtsTrue;
2117 TICK_GC_FAILED_PROMOTION();
2120 return ((StgEvacuated*)q)->evacuee;
2123 // just copy the block
2124 return copy_noscav(q,arr_words_sizeW((StgArrWords *)q),stp);
2126 case MUT_ARR_PTRS_CLEAN:
2127 case MUT_ARR_PTRS_DIRTY:
2128 case MUT_ARR_PTRS_FROZEN:
2129 case MUT_ARR_PTRS_FROZEN0:
2130 // just copy the block
2131 return copy(q,mut_arr_ptrs_sizeW((StgMutArrPtrs *)q),stp);
2135 StgTSO *tso = (StgTSO *)q;
2137 /* Deal with redirected TSOs (a TSO that's had its stack enlarged).
2139 if (tso->what_next == ThreadRelocated) {
2140 q = (StgClosure *)tso->link;
2144 /* To evacuate a small TSO, we need to relocate the update frame
2151 new_tso = (StgTSO *)copyPart((StgClosure *)tso,
2153 sizeofW(StgTSO), stp);
2154 move_TSO(tso, new_tso);
2155 for (p = tso->sp, q = new_tso->sp;
2156 p < tso->stack+tso->stack_size;) {
2160 return (StgClosure *)new_tso;
2167 //StgInfoTable *rip = get_closure_info(q, &size, &ptrs, &nonptrs, &vhs, str);
2168 to = copy(q,BLACKHOLE_sizeW(),stp);
2169 //ToDo: derive size etc from reverted IP
2170 //to = copy(q,size,stp);
2172 debugBelch("@@ evacuate: RBH %p (%s) to %p (%s)",
2173 q, info_type(q), to, info_type(to)));
2178 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOD_SIZE);
2179 to = copy(q,sizeofW(StgBlockedFetch),stp);
2181 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2182 q, info_type(q), to, info_type(to)));
2189 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2190 to = copy(q,sizeofW(StgFetchMe),stp);
2192 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2193 q, info_type(q), to, info_type(to)));
2197 ASSERT(sizeofW(StgBlockedFetch) >= MIN_PAYLOAD_SIZE);
2198 to = copy(q,sizeofW(StgFetchMeBlockingQueue),stp);
2200 debugBelch("@@ evacuate: %p (%s) to %p (%s)",
2201 q, info_type(q), to, info_type(to)));
2206 return copy(q,sizeofW(StgTRecHeader),stp);
2208 case TVAR_WAIT_QUEUE:
2209 return copy(q,sizeofW(StgTVarWaitQueue),stp);
2212 return copy(q,sizeofW(StgTVar),stp);
2215 return copy(q,sizeofW(StgTRecChunk),stp);
2218 barf("evacuate: strange closure type %d", (int)(info->type));
2224 /* -----------------------------------------------------------------------------
2225 Evaluate a THUNK_SELECTOR if possible.
2227 returns: NULL if we couldn't evaluate this THUNK_SELECTOR, or
2228 a closure pointer if we evaluated it and this is the result. Note
2229 that "evaluating" the THUNK_SELECTOR doesn't necessarily mean
2230 reducing it to HNF, just that we have eliminated the selection.
2231 The result might be another thunk, or even another THUNK_SELECTOR.
2233 If the return value is non-NULL, the original selector thunk has
2234 been BLACKHOLE'd, and should be updated with an indirection or a
2235 forwarding pointer. If the return value is NULL, then the selector
2239 ToDo: the treatment of THUNK_SELECTORS could be improved in the
2240 following way (from a suggestion by Ian Lynagh):
2242 We can have a chain like this:
2246 |-----> sel_0 --> (a,b)
2248 |-----> sel_0 --> ...
2250 and the depth limit means we don't go all the way to the end of the
2251 chain, which results in a space leak. This affects the recursive
2252 call to evacuate() in the THUNK_SELECTOR case in evacuate(): *not*
2253 the recursive call to eval_thunk_selector() in
2254 eval_thunk_selector().
2256 We could eliminate the depth bound in this case, in the following
2259 - traverse the chain once to discover the *value* of the
2260 THUNK_SELECTOR. Mark all THUNK_SELECTORS that we
2261 visit on the way as having been visited already (somehow).
2263 - in a second pass, traverse the chain again updating all
2264 THUNK_SEELCTORS that we find on the way with indirections to
2267 - if we encounter a "marked" THUNK_SELECTOR in a normal
2268 evacuate(), we konw it can't be updated so just evac it.
2270 Program that illustrates the problem:
2273 foo (x:xs) = let (ys, zs) = foo xs
2274 in if x >= 0 then (x:ys, zs) else (ys, x:zs)
2276 main = bar [1..(100000000::Int)]
2277 bar xs = (\(ys, zs) -> print ys >> print zs) (foo xs)
2279 -------------------------------------------------------------------------- */
2281 static inline rtsBool
2282 is_to_space ( StgClosure *p )
2286 bd = Bdescr((StgPtr)p);
2287 if (HEAP_ALLOCED(p) &&
2288 ((bd->flags & BF_EVACUATED)
2289 || ((bd->flags & BF_COMPACTED) &&
2290 is_marked((P_)p,bd)))) {
2298 eval_thunk_selector( nat field, StgSelector * p )
2301 const StgInfoTable *info_ptr;
2302 StgClosure *selectee;
2304 selectee = p->selectee;
2306 // Save the real info pointer (NOTE: not the same as get_itbl()).
2307 info_ptr = p->header.info;
2309 // If the THUNK_SELECTOR is in a generation that we are not
2310 // collecting, then bail out early. We won't be able to save any
2311 // space in any case, and updating with an indirection is trickier
2313 if (Bdescr((StgPtr)p)->gen_no > N) {
2317 // BLACKHOLE the selector thunk, since it is now under evaluation.
2318 // This is important to stop us going into an infinite loop if
2319 // this selector thunk eventually refers to itself.
2320 SET_INFO(p,&stg_BLACKHOLE_info);
2324 // We don't want to end up in to-space, because this causes
2325 // problems when the GC later tries to evacuate the result of
2326 // eval_thunk_selector(). There are various ways this could
2329 // 1. following an IND_STATIC
2331 // 2. when the old generation is compacted, the mark phase updates
2332 // from-space pointers to be to-space pointers, and we can't
2333 // reliably tell which we're following (eg. from an IND_STATIC).
2335 // 3. compacting GC again: if we're looking at a constructor in
2336 // the compacted generation, it might point directly to objects
2337 // in to-space. We must bale out here, otherwise doing the selection
2338 // will result in a to-space pointer being returned.
2340 // (1) is dealt with using a BF_EVACUATED test on the
2341 // selectee. (2) and (3): we can tell if we're looking at an
2342 // object in the compacted generation that might point to
2343 // to-space objects by testing that (a) it is BF_COMPACTED, (b)
2344 // the compacted generation is being collected, and (c) the
2345 // object is marked. Only a marked object may have pointers that
2346 // point to to-space objects, because that happens when
2349 // The to-space test is now embodied in the in_to_space() inline
2350 // function, as it is re-used below.
2352 if (is_to_space(selectee)) {
2356 info = get_itbl(selectee);
2357 switch (info->type) {
2365 case CONSTR_NOCAF_STATIC:
2366 // check that the size is in range
2367 ASSERT(field < (StgWord32)(info->layout.payload.ptrs +
2368 info->layout.payload.nptrs));
2370 // Select the right field from the constructor, and check
2371 // that the result isn't in to-space. It might be in
2372 // to-space if, for example, this constructor contains
2373 // pointers to younger-gen objects (and is on the mut-once
2378 q = selectee->payload[field];
2379 if (is_to_space(q)) {
2389 case IND_OLDGEN_PERM:
2391 selectee = ((StgInd *)selectee)->indirectee;
2395 // We don't follow pointers into to-space; the constructor
2396 // has already been evacuated, so we won't save any space
2397 // leaks by evaluating this selector thunk anyhow.
2400 case THUNK_SELECTOR:
2404 // check that we don't recurse too much, re-using the
2405 // depth bound also used in evacuate().
2406 if (thunk_selector_depth >= MAX_THUNK_SELECTOR_DEPTH) {
2409 thunk_selector_depth++;
2411 val = eval_thunk_selector(info->layout.selector_offset,
2412 (StgSelector *)selectee);
2414 thunk_selector_depth--;
2419 // We evaluated this selector thunk, so update it with
2420 // an indirection. NOTE: we don't use UPD_IND here,
2421 // because we are guaranteed that p is in a generation
2422 // that we are collecting, and we never want to put the
2423 // indirection on a mutable list.
2425 // For the purposes of LDV profiling, we have destroyed
2426 // the original selector thunk.
2427 SET_INFO(p, info_ptr);
2428 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(selectee);
2430 ((StgInd *)selectee)->indirectee = val;
2431 SET_INFO(selectee,&stg_IND_info);
2433 // For the purposes of LDV profiling, we have created an
2435 LDV_RECORD_CREATE(selectee);
2452 case SE_CAF_BLACKHOLE:
2464 // not evaluated yet
2468 barf("eval_thunk_selector: strange selectee %d",
2473 // We didn't manage to evaluate this thunk; restore the old info pointer
2474 SET_INFO(p, info_ptr);
2478 /* -----------------------------------------------------------------------------
2479 move_TSO is called to update the TSO structure after it has been
2480 moved from one place to another.
2481 -------------------------------------------------------------------------- */
2484 move_TSO (StgTSO *src, StgTSO *dest)
2488 // relocate the stack pointer...
2489 diff = (StgPtr)dest - (StgPtr)src; // In *words*
2490 dest->sp = (StgPtr)dest->sp + diff;
2493 /* Similar to scavenge_large_bitmap(), but we don't write back the
2494 * pointers we get back from evacuate().
2497 scavenge_large_srt_bitmap( StgLargeSRT *large_srt )
2504 bitmap = large_srt->l.bitmap[b];
2505 size = (nat)large_srt->l.size;
2506 p = (StgClosure **)large_srt->srt;
2507 for (i = 0; i < size; ) {
2508 if ((bitmap & 1) != 0) {
2513 if (i % BITS_IN(W_) == 0) {
2515 bitmap = large_srt->l.bitmap[b];
2517 bitmap = bitmap >> 1;
2522 /* evacuate the SRT. If srt_bitmap is zero, then there isn't an
2523 * srt field in the info table. That's ok, because we'll
2524 * never dereference it.
2527 scavenge_srt (StgClosure **srt, nat srt_bitmap)
2532 bitmap = srt_bitmap;
2535 if (bitmap == (StgHalfWord)(-1)) {
2536 scavenge_large_srt_bitmap( (StgLargeSRT *)srt );
2540 while (bitmap != 0) {
2541 if ((bitmap & 1) != 0) {
2542 #ifdef ENABLE_WIN32_DLL_SUPPORT
2543 // Special-case to handle references to closures hiding out in DLLs, since
2544 // double indirections required to get at those. The code generator knows
2545 // which is which when generating the SRT, so it stores the (indirect)
2546 // reference to the DLL closure in the table by first adding one to it.
2547 // We check for this here, and undo the addition before evacuating it.
2549 // If the SRT entry hasn't got bit 0 set, the SRT entry points to a
2550 // closure that's fixed at link-time, and no extra magic is required.
2551 if ( (unsigned long)(*srt) & 0x1 ) {
2552 evacuate(*stgCast(StgClosure**,(stgCast(unsigned long, *srt) & ~0x1)));
2561 bitmap = bitmap >> 1;
2567 scavenge_thunk_srt(const StgInfoTable *info)
2569 StgThunkInfoTable *thunk_info;
2571 if (!major_gc) return;
2573 thunk_info = itbl_to_thunk_itbl(info);
2574 scavenge_srt((StgClosure **)GET_SRT(thunk_info), thunk_info->i.srt_bitmap);
2578 scavenge_fun_srt(const StgInfoTable *info)
2580 StgFunInfoTable *fun_info;
2582 if (!major_gc) return;
2584 fun_info = itbl_to_fun_itbl(info);
2585 scavenge_srt((StgClosure **)GET_FUN_SRT(fun_info), fun_info->i.srt_bitmap);
2588 /* -----------------------------------------------------------------------------
2590 -------------------------------------------------------------------------- */
2593 scavengeTSO (StgTSO *tso)
2595 if ( tso->why_blocked == BlockedOnMVar
2596 || tso->why_blocked == BlockedOnBlackHole
2597 || tso->why_blocked == BlockedOnException
2599 || tso->why_blocked == BlockedOnGA
2600 || tso->why_blocked == BlockedOnGA_NoSend
2603 tso->block_info.closure = evacuate(tso->block_info.closure);
2605 if ( tso->blocked_exceptions != NULL ) {
2606 tso->blocked_exceptions =
2607 (StgTSO *)evacuate((StgClosure *)tso->blocked_exceptions);
2610 // We don't always chase the link field: TSOs on the blackhole
2611 // queue are not automatically alive, so the link field is a
2612 // "weak" pointer in that case.
2613 if (tso->why_blocked != BlockedOnBlackHole) {
2614 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
2617 // scavange current transaction record
2618 tso->trec = (StgTRecHeader *)evacuate((StgClosure *)tso->trec);
2620 // scavenge this thread's stack
2621 scavenge_stack(tso->sp, &(tso->stack[tso->stack_size]));
2624 /* -----------------------------------------------------------------------------
2625 Blocks of function args occur on the stack (at the top) and
2627 -------------------------------------------------------------------------- */
2629 STATIC_INLINE StgPtr
2630 scavenge_arg_block (StgFunInfoTable *fun_info, StgClosure **args)
2637 switch (fun_info->f.fun_type) {
2639 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2640 size = BITMAP_SIZE(fun_info->f.b.bitmap);
2643 size = GET_FUN_LARGE_BITMAP(fun_info)->size;
2644 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2648 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2649 size = BITMAP_SIZE(stg_arg_bitmaps[fun_info->f.fun_type]);
2652 if ((bitmap & 1) == 0) {
2653 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2656 bitmap = bitmap >> 1;
2664 STATIC_INLINE StgPtr
2665 scavenge_PAP_payload (StgClosure *fun, StgClosure **payload, StgWord size)
2669 StgFunInfoTable *fun_info;
2671 fun_info = get_fun_itbl(fun);
2672 ASSERT(fun_info->i.type != PAP);
2673 p = (StgPtr)payload;
2675 switch (fun_info->f.fun_type) {
2677 bitmap = BITMAP_BITS(fun_info->f.b.bitmap);
2680 scavenge_large_bitmap(p, GET_FUN_LARGE_BITMAP(fun_info), size);
2684 scavenge_large_bitmap((StgPtr)payload, BCO_BITMAP(fun), size);
2688 bitmap = BITMAP_BITS(stg_arg_bitmaps[fun_info->f.fun_type]);
2691 if ((bitmap & 1) == 0) {
2692 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2695 bitmap = bitmap >> 1;
2703 STATIC_INLINE StgPtr
2704 scavenge_PAP (StgPAP *pap)
2706 pap->fun = evacuate(pap->fun);
2707 return scavenge_PAP_payload (pap->fun, pap->payload, pap->n_args);
2710 STATIC_INLINE StgPtr
2711 scavenge_AP (StgAP *ap)
2713 ap->fun = evacuate(ap->fun);
2714 return scavenge_PAP_payload (ap->fun, ap->payload, ap->n_args);
2717 /* -----------------------------------------------------------------------------
2718 Scavenge a given step until there are no more objects in this step
2721 evac_gen is set by the caller to be either zero (for a step in a
2722 generation < N) or G where G is the generation of the step being
2725 We sometimes temporarily change evac_gen back to zero if we're
2726 scavenging a mutable object where early promotion isn't such a good
2728 -------------------------------------------------------------------------- */
2736 nat saved_evac_gen = evac_gen;
2741 failed_to_evac = rtsFalse;
2743 /* scavenge phase - standard breadth-first scavenging of the
2747 while (bd != stp->hp_bd || p < stp->hp) {
2749 // If we're at the end of this block, move on to the next block
2750 if (bd != stp->hp_bd && p == bd->free) {
2756 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
2757 info = get_itbl((StgClosure *)p);
2759 ASSERT(thunk_selector_depth == 0);
2762 switch (info->type) {
2766 StgMVar *mvar = ((StgMVar *)p);
2768 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
2769 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
2770 mvar->value = evacuate((StgClosure *)mvar->value);
2771 evac_gen = saved_evac_gen;
2772 failed_to_evac = rtsTrue; // mutable.
2773 p += sizeofW(StgMVar);
2778 scavenge_fun_srt(info);
2779 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2780 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2781 p += sizeofW(StgHeader) + 2;
2785 scavenge_thunk_srt(info);
2786 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
2787 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2788 p += sizeofW(StgThunk) + 2;
2792 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
2793 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2794 p += sizeofW(StgHeader) + 2;
2798 scavenge_thunk_srt(info);
2799 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2800 p += sizeofW(StgThunk) + 1;
2804 scavenge_fun_srt(info);
2806 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2807 p += sizeofW(StgHeader) + 1;
2811 scavenge_thunk_srt(info);
2812 p += sizeofW(StgThunk) + 1;
2816 scavenge_fun_srt(info);
2818 p += sizeofW(StgHeader) + 1;
2822 scavenge_thunk_srt(info);
2823 p += sizeofW(StgThunk) + 2;
2827 scavenge_fun_srt(info);
2829 p += sizeofW(StgHeader) + 2;
2833 scavenge_thunk_srt(info);
2834 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
2835 p += sizeofW(StgThunk) + 2;
2839 scavenge_fun_srt(info);
2841 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
2842 p += sizeofW(StgHeader) + 2;
2846 scavenge_fun_srt(info);
2853 scavenge_thunk_srt(info);
2854 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
2855 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
2856 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2858 p += info->layout.payload.nptrs;
2869 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
2870 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
2871 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2873 p += info->layout.payload.nptrs;
2878 StgBCO *bco = (StgBCO *)p;
2879 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
2880 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
2881 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
2882 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
2883 p += bco_sizeW(bco);
2888 if (stp->gen->no != 0) {
2891 // No need to call LDV_recordDead_FILL_SLOP_DYNAMIC() because an
2892 // IND_OLDGEN_PERM closure is larger than an IND_PERM closure.
2893 LDV_recordDead((StgClosure *)p, sizeofW(StgInd));
2896 // Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
2898 SET_INFO(((StgClosure *)p), &stg_IND_OLDGEN_PERM_info);
2900 // We pretend that p has just been created.
2901 LDV_RECORD_CREATE((StgClosure *)p);
2904 case IND_OLDGEN_PERM:
2905 ((StgInd *)p)->indirectee = evacuate(((StgInd *)p)->indirectee);
2906 p += sizeofW(StgInd);
2910 case MUT_VAR_DIRTY: {
2911 rtsBool saved_eager_promotion = eager_promotion;
2913 eager_promotion = rtsFalse;
2914 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
2915 eager_promotion = saved_eager_promotion;
2917 if (failed_to_evac) {
2918 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
2920 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
2922 p += sizeofW(StgMutVar);
2927 case SE_CAF_BLACKHOLE:
2930 p += BLACKHOLE_sizeW();
2933 case THUNK_SELECTOR:
2935 StgSelector *s = (StgSelector *)p;
2936 s->selectee = evacuate(s->selectee);
2937 p += THUNK_SELECTOR_sizeW();
2941 // A chunk of stack saved in a heap object
2944 StgAP_STACK *ap = (StgAP_STACK *)p;
2946 ap->fun = evacuate(ap->fun);
2947 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
2948 p = (StgPtr)ap->payload + ap->size;
2953 p = scavenge_PAP((StgPAP *)p);
2957 p = scavenge_AP((StgAP *)p);
2961 // nothing to follow
2962 p += arr_words_sizeW((StgArrWords *)p);
2965 case MUT_ARR_PTRS_CLEAN:
2966 case MUT_ARR_PTRS_DIRTY:
2967 // follow everything
2970 rtsBool saved_eager;
2972 // We don't eagerly promote objects pointed to by a mutable
2973 // array, but if we find the array only points to objects in
2974 // the same or an older generation, we mark it "clean" and
2975 // avoid traversing it during minor GCs.
2976 saved_eager = eager_promotion;
2977 eager_promotion = rtsFalse;
2978 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
2979 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
2980 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
2982 eager_promotion = saved_eager;
2984 if (failed_to_evac) {
2985 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
2987 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
2990 failed_to_evac = rtsTrue; // always put it on the mutable list.
2994 case MUT_ARR_PTRS_FROZEN:
2995 case MUT_ARR_PTRS_FROZEN0:
2996 // follow everything
3000 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3001 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3002 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3005 // If we're going to put this object on the mutable list, then
3006 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3007 if (failed_to_evac) {
3008 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3010 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3017 StgTSO *tso = (StgTSO *)p;
3018 rtsBool saved_eager = eager_promotion;
3020 eager_promotion = rtsFalse;
3022 eager_promotion = saved_eager;
3024 if (failed_to_evac) {
3025 tso->flags |= TSO_DIRTY;
3027 tso->flags &= ~TSO_DIRTY;
3030 failed_to_evac = rtsTrue; // always on the mutable list
3031 p += tso_sizeW(tso);
3039 nat size, ptrs, nonptrs, vhs;
3041 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3043 StgRBH *rbh = (StgRBH *)p;
3044 (StgClosure *)rbh->blocking_queue =
3045 evacuate((StgClosure *)rbh->blocking_queue);
3046 failed_to_evac = rtsTrue; // mutable anyhow.
3048 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3049 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3050 // ToDo: use size of reverted closure here!
3051 p += BLACKHOLE_sizeW();
3057 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3058 // follow the pointer to the node which is being demanded
3059 (StgClosure *)bf->node =
3060 evacuate((StgClosure *)bf->node);
3061 // follow the link to the rest of the blocking queue
3062 (StgClosure *)bf->link =
3063 evacuate((StgClosure *)bf->link);
3065 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3066 bf, info_type((StgClosure *)bf),
3067 bf->node, info_type(bf->node)));
3068 p += sizeofW(StgBlockedFetch);
3076 p += sizeofW(StgFetchMe);
3077 break; // nothing to do in this case
3081 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3082 (StgClosure *)fmbq->blocking_queue =
3083 evacuate((StgClosure *)fmbq->blocking_queue);
3085 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3086 p, info_type((StgClosure *)p)));
3087 p += sizeofW(StgFetchMeBlockingQueue);
3092 case TVAR_WAIT_QUEUE:
3094 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3096 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3097 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3098 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3099 evac_gen = saved_evac_gen;
3100 failed_to_evac = rtsTrue; // mutable
3101 p += sizeofW(StgTVarWaitQueue);
3107 StgTVar *tvar = ((StgTVar *) p);
3109 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3110 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3111 evac_gen = saved_evac_gen;
3112 failed_to_evac = rtsTrue; // mutable
3113 p += sizeofW(StgTVar);
3119 StgTRecHeader *trec = ((StgTRecHeader *) p);
3121 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3122 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3123 evac_gen = saved_evac_gen;
3124 failed_to_evac = rtsTrue; // mutable
3125 p += sizeofW(StgTRecHeader);
3132 StgTRecChunk *tc = ((StgTRecChunk *) p);
3133 TRecEntry *e = &(tc -> entries[0]);
3135 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3136 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3137 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3138 e->expected_value = evacuate((StgClosure*)e->expected_value);
3139 e->new_value = evacuate((StgClosure*)e->new_value);
3141 evac_gen = saved_evac_gen;
3142 failed_to_evac = rtsTrue; // mutable
3143 p += sizeofW(StgTRecChunk);
3148 barf("scavenge: unimplemented/strange closure type %d @ %p",
3153 * We need to record the current object on the mutable list if
3154 * (a) It is actually mutable, or
3155 * (b) It contains pointers to a younger generation.
3156 * Case (b) arises if we didn't manage to promote everything that
3157 * the current object points to into the current generation.
3159 if (failed_to_evac) {
3160 failed_to_evac = rtsFalse;
3161 if (stp->gen_no > 0) {
3162 recordMutableGen((StgClosure *)q, stp->gen);
3171 /* -----------------------------------------------------------------------------
3172 Scavenge everything on the mark stack.
3174 This is slightly different from scavenge():
3175 - we don't walk linearly through the objects, so the scavenger
3176 doesn't need to advance the pointer on to the next object.
3177 -------------------------------------------------------------------------- */
3180 scavenge_mark_stack(void)
3186 evac_gen = oldest_gen->no;
3187 saved_evac_gen = evac_gen;
3190 while (!mark_stack_empty()) {
3191 p = pop_mark_stack();
3193 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3194 info = get_itbl((StgClosure *)p);
3197 switch (info->type) {
3201 StgMVar *mvar = ((StgMVar *)p);
3203 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3204 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3205 mvar->value = evacuate((StgClosure *)mvar->value);
3206 evac_gen = saved_evac_gen;
3207 failed_to_evac = rtsTrue; // mutable.
3212 scavenge_fun_srt(info);
3213 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3214 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3218 scavenge_thunk_srt(info);
3219 ((StgThunk *)p)->payload[1] = evacuate(((StgThunk *)p)->payload[1]);
3220 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3224 ((StgClosure *)p)->payload[1] = evacuate(((StgClosure *)p)->payload[1]);
3225 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3230 scavenge_fun_srt(info);
3231 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3236 scavenge_thunk_srt(info);
3237 ((StgThunk *)p)->payload[0] = evacuate(((StgThunk *)p)->payload[0]);
3242 ((StgClosure *)p)->payload[0] = evacuate(((StgClosure *)p)->payload[0]);
3247 scavenge_fun_srt(info);
3252 scavenge_thunk_srt(info);
3260 scavenge_fun_srt(info);
3267 scavenge_thunk_srt(info);
3268 end = (P_)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3269 for (p = (P_)((StgThunk *)p)->payload; p < end; p++) {
3270 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3282 end = (P_)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3283 for (p = (P_)((StgClosure *)p)->payload; p < end; p++) {
3284 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3290 StgBCO *bco = (StgBCO *)p;
3291 bco->instrs = (StgArrWords *)evacuate((StgClosure *)bco->instrs);
3292 bco->literals = (StgArrWords *)evacuate((StgClosure *)bco->literals);
3293 bco->ptrs = (StgMutArrPtrs *)evacuate((StgClosure *)bco->ptrs);
3294 bco->itbls = (StgArrWords *)evacuate((StgClosure *)bco->itbls);
3299 // don't need to do anything here: the only possible case
3300 // is that we're in a 1-space compacting collector, with
3301 // no "old" generation.
3305 case IND_OLDGEN_PERM:
3306 ((StgInd *)p)->indirectee =
3307 evacuate(((StgInd *)p)->indirectee);
3311 case MUT_VAR_DIRTY: {
3312 rtsBool saved_eager_promotion = eager_promotion;
3314 eager_promotion = rtsFalse;
3315 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3316 eager_promotion = saved_eager_promotion;
3318 if (failed_to_evac) {
3319 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3321 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3327 case SE_CAF_BLACKHOLE:
3333 case THUNK_SELECTOR:
3335 StgSelector *s = (StgSelector *)p;
3336 s->selectee = evacuate(s->selectee);
3340 // A chunk of stack saved in a heap object
3343 StgAP_STACK *ap = (StgAP_STACK *)p;
3345 ap->fun = evacuate(ap->fun);
3346 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3351 scavenge_PAP((StgPAP *)p);
3355 scavenge_AP((StgAP *)p);
3358 case MUT_ARR_PTRS_CLEAN:
3359 case MUT_ARR_PTRS_DIRTY:
3360 // follow everything
3363 rtsBool saved_eager;
3365 // We don't eagerly promote objects pointed to by a mutable
3366 // array, but if we find the array only points to objects in
3367 // the same or an older generation, we mark it "clean" and
3368 // avoid traversing it during minor GCs.
3369 saved_eager = eager_promotion;
3370 eager_promotion = rtsFalse;
3371 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3372 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3373 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3375 eager_promotion = saved_eager;
3377 if (failed_to_evac) {
3378 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3380 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3383 failed_to_evac = rtsTrue; // mutable anyhow.
3387 case MUT_ARR_PTRS_FROZEN:
3388 case MUT_ARR_PTRS_FROZEN0:
3389 // follow everything
3393 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3394 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3395 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3398 // If we're going to put this object on the mutable list, then
3399 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3400 if (failed_to_evac) {
3401 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3403 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3410 StgTSO *tso = (StgTSO *)p;
3411 rtsBool saved_eager = eager_promotion;
3413 eager_promotion = rtsFalse;
3415 eager_promotion = saved_eager;
3417 if (failed_to_evac) {
3418 tso->flags |= TSO_DIRTY;
3420 tso->flags &= ~TSO_DIRTY;
3423 failed_to_evac = rtsTrue; // always on the mutable list
3431 nat size, ptrs, nonptrs, vhs;
3433 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3435 StgRBH *rbh = (StgRBH *)p;
3436 bh->blocking_queue =
3437 (StgTSO *)evacuate((StgClosure *)bh->blocking_queue);
3438 failed_to_evac = rtsTrue; // mutable anyhow.
3440 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3441 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3447 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3448 // follow the pointer to the node which is being demanded
3449 (StgClosure *)bf->node =
3450 evacuate((StgClosure *)bf->node);
3451 // follow the link to the rest of the blocking queue
3452 (StgClosure *)bf->link =
3453 evacuate((StgClosure *)bf->link);
3455 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3456 bf, info_type((StgClosure *)bf),
3457 bf->node, info_type(bf->node)));
3465 break; // nothing to do in this case
3469 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3470 (StgClosure *)fmbq->blocking_queue =
3471 evacuate((StgClosure *)fmbq->blocking_queue);
3473 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3474 p, info_type((StgClosure *)p)));
3479 case TVAR_WAIT_QUEUE:
3481 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3483 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3484 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3485 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3486 evac_gen = saved_evac_gen;
3487 failed_to_evac = rtsTrue; // mutable
3493 StgTVar *tvar = ((StgTVar *) p);
3495 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3496 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3497 evac_gen = saved_evac_gen;
3498 failed_to_evac = rtsTrue; // mutable
3505 StgTRecChunk *tc = ((StgTRecChunk *) p);
3506 TRecEntry *e = &(tc -> entries[0]);
3508 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3509 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3510 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3511 e->expected_value = evacuate((StgClosure*)e->expected_value);
3512 e->new_value = evacuate((StgClosure*)e->new_value);
3514 evac_gen = saved_evac_gen;
3515 failed_to_evac = rtsTrue; // mutable
3521 StgTRecHeader *trec = ((StgTRecHeader *) p);
3523 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3524 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3525 evac_gen = saved_evac_gen;
3526 failed_to_evac = rtsTrue; // mutable
3531 barf("scavenge_mark_stack: unimplemented/strange closure type %d @ %p",
3535 if (failed_to_evac) {
3536 failed_to_evac = rtsFalse;
3538 recordMutableGen((StgClosure *)q, &generations[evac_gen]);
3542 // mark the next bit to indicate "scavenged"
3543 mark(q+1, Bdescr(q));
3545 } // while (!mark_stack_empty())
3547 // start a new linear scan if the mark stack overflowed at some point
3548 if (mark_stack_overflowed && oldgen_scan_bd == NULL) {
3549 IF_DEBUG(gc, debugBelch("scavenge_mark_stack: starting linear scan"));
3550 mark_stack_overflowed = rtsFalse;
3551 oldgen_scan_bd = oldest_gen->steps[0].old_blocks;
3552 oldgen_scan = oldgen_scan_bd->start;
3555 if (oldgen_scan_bd) {
3556 // push a new thing on the mark stack
3558 // find a closure that is marked but not scavenged, and start
3560 while (oldgen_scan < oldgen_scan_bd->free
3561 && !is_marked(oldgen_scan,oldgen_scan_bd)) {
3565 if (oldgen_scan < oldgen_scan_bd->free) {
3567 // already scavenged?
3568 if (is_marked(oldgen_scan+1,oldgen_scan_bd)) {
3569 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3572 push_mark_stack(oldgen_scan);
3573 // ToDo: bump the linear scan by the actual size of the object
3574 oldgen_scan += sizeofW(StgHeader) + MIN_PAYLOAD_SIZE;
3578 oldgen_scan_bd = oldgen_scan_bd->link;
3579 if (oldgen_scan_bd != NULL) {
3580 oldgen_scan = oldgen_scan_bd->start;
3586 /* -----------------------------------------------------------------------------
3587 Scavenge one object.
3589 This is used for objects that are temporarily marked as mutable
3590 because they contain old-to-new generation pointers. Only certain
3591 objects can have this property.
3592 -------------------------------------------------------------------------- */
3595 scavenge_one(StgPtr p)
3597 const StgInfoTable *info;
3598 nat saved_evac_gen = evac_gen;
3601 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3602 info = get_itbl((StgClosure *)p);
3604 switch (info->type) {
3608 StgMVar *mvar = ((StgMVar *)p);
3610 mvar->head = (StgTSO *)evacuate((StgClosure *)mvar->head);
3611 mvar->tail = (StgTSO *)evacuate((StgClosure *)mvar->tail);
3612 mvar->value = evacuate((StgClosure *)mvar->value);
3613 evac_gen = saved_evac_gen;
3614 failed_to_evac = rtsTrue; // mutable.
3627 end = (StgPtr)((StgThunk *)p)->payload + info->layout.payload.ptrs;
3628 for (q = (StgPtr)((StgThunk *)p)->payload; q < end; q++) {
3629 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3635 case FUN_1_0: // hardly worth specialising these guys
3651 end = (StgPtr)((StgClosure *)p)->payload + info->layout.payload.ptrs;
3652 for (q = (StgPtr)((StgClosure *)p)->payload; q < end; q++) {
3653 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
3659 case MUT_VAR_DIRTY: {
3661 rtsBool saved_eager_promotion = eager_promotion;
3663 eager_promotion = rtsFalse;
3664 ((StgMutVar *)p)->var = evacuate(((StgMutVar *)p)->var);
3665 eager_promotion = saved_eager_promotion;
3667 if (failed_to_evac) {
3668 ((StgClosure *)q)->header.info = &stg_MUT_VAR_DIRTY_info;
3670 ((StgClosure *)q)->header.info = &stg_MUT_VAR_CLEAN_info;
3676 case SE_CAF_BLACKHOLE:
3681 case THUNK_SELECTOR:
3683 StgSelector *s = (StgSelector *)p;
3684 s->selectee = evacuate(s->selectee);
3690 StgAP_STACK *ap = (StgAP_STACK *)p;
3692 ap->fun = evacuate(ap->fun);
3693 scavenge_stack((StgPtr)ap->payload, (StgPtr)ap->payload + ap->size);
3694 p = (StgPtr)ap->payload + ap->size;
3699 p = scavenge_PAP((StgPAP *)p);
3703 p = scavenge_AP((StgAP *)p);
3707 // nothing to follow
3710 case MUT_ARR_PTRS_CLEAN:
3711 case MUT_ARR_PTRS_DIRTY:
3714 rtsBool saved_eager;
3716 // We don't eagerly promote objects pointed to by a mutable
3717 // array, but if we find the array only points to objects in
3718 // the same or an older generation, we mark it "clean" and
3719 // avoid traversing it during minor GCs.
3720 saved_eager = eager_promotion;
3721 eager_promotion = rtsFalse;
3723 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3724 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3725 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3727 eager_promotion = saved_eager;
3729 if (failed_to_evac) {
3730 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_DIRTY_info;
3732 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_CLEAN_info;
3735 failed_to_evac = rtsTrue;
3739 case MUT_ARR_PTRS_FROZEN:
3740 case MUT_ARR_PTRS_FROZEN0:
3742 // follow everything
3745 next = p + mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
3746 for (p = (P_)((StgMutArrPtrs *)p)->payload; p < next; p++) {
3747 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
3750 // If we're going to put this object on the mutable list, then
3751 // set its info ptr to MUT_ARR_PTRS_FROZEN0 to indicate that.
3752 if (failed_to_evac) {
3753 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN0_info;
3755 ((StgClosure *)q)->header.info = &stg_MUT_ARR_PTRS_FROZEN_info;
3762 StgTSO *tso = (StgTSO *)p;
3763 rtsBool saved_eager = eager_promotion;
3765 eager_promotion = rtsFalse;
3767 eager_promotion = saved_eager;
3769 if (failed_to_evac) {
3770 tso->flags |= TSO_DIRTY;
3772 tso->flags &= ~TSO_DIRTY;
3775 failed_to_evac = rtsTrue; // always on the mutable list
3783 nat size, ptrs, nonptrs, vhs;
3785 StgInfoTable *rip = get_closure_info(p, &size, &ptrs, &nonptrs, &vhs, str);
3787 StgRBH *rbh = (StgRBH *)p;
3788 (StgClosure *)rbh->blocking_queue =
3789 evacuate((StgClosure *)rbh->blocking_queue);
3790 failed_to_evac = rtsTrue; // mutable anyhow.
3792 debugBelch("@@ scavenge: RBH %p (%s) (new blocking_queue link=%p)",
3793 p, info_type(p), (StgClosure *)rbh->blocking_queue));
3794 // ToDo: use size of reverted closure here!
3800 StgBlockedFetch *bf = (StgBlockedFetch *)p;
3801 // follow the pointer to the node which is being demanded
3802 (StgClosure *)bf->node =
3803 evacuate((StgClosure *)bf->node);
3804 // follow the link to the rest of the blocking queue
3805 (StgClosure *)bf->link =
3806 evacuate((StgClosure *)bf->link);
3808 debugBelch("@@ scavenge: %p (%s); node is now %p; exciting, isn't it",
3809 bf, info_type((StgClosure *)bf),
3810 bf->node, info_type(bf->node)));
3818 break; // nothing to do in this case
3822 StgFetchMeBlockingQueue *fmbq = (StgFetchMeBlockingQueue *)p;
3823 (StgClosure *)fmbq->blocking_queue =
3824 evacuate((StgClosure *)fmbq->blocking_queue);
3826 debugBelch("@@ scavenge: %p (%s) exciting, isn't it",
3827 p, info_type((StgClosure *)p)));
3832 case TVAR_WAIT_QUEUE:
3834 StgTVarWaitQueue *wq = ((StgTVarWaitQueue *) p);
3836 wq->waiting_tso = (StgTSO *)evacuate((StgClosure*)wq->waiting_tso);
3837 wq->next_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->next_queue_entry);
3838 wq->prev_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)wq->prev_queue_entry);
3839 evac_gen = saved_evac_gen;
3840 failed_to_evac = rtsTrue; // mutable
3846 StgTVar *tvar = ((StgTVar *) p);
3848 tvar->current_value = evacuate((StgClosure*)tvar->current_value);
3849 tvar->first_wait_queue_entry = (StgTVarWaitQueue *)evacuate((StgClosure*)tvar->first_wait_queue_entry);
3850 evac_gen = saved_evac_gen;
3851 failed_to_evac = rtsTrue; // mutable
3857 StgTRecHeader *trec = ((StgTRecHeader *) p);
3859 trec->enclosing_trec = (StgTRecHeader *)evacuate((StgClosure*)trec->enclosing_trec);
3860 trec->current_chunk = (StgTRecChunk *)evacuate((StgClosure*)trec->current_chunk);
3861 evac_gen = saved_evac_gen;
3862 failed_to_evac = rtsTrue; // mutable
3869 StgTRecChunk *tc = ((StgTRecChunk *) p);
3870 TRecEntry *e = &(tc -> entries[0]);
3872 tc->prev_chunk = (StgTRecChunk *)evacuate((StgClosure*)tc->prev_chunk);
3873 for (i = 0; i < tc -> next_entry_idx; i ++, e++ ) {
3874 e->tvar = (StgTVar *)evacuate((StgClosure*)e->tvar);
3875 e->expected_value = evacuate((StgClosure*)e->expected_value);
3876 e->new_value = evacuate((StgClosure*)e->new_value);
3878 evac_gen = saved_evac_gen;
3879 failed_to_evac = rtsTrue; // mutable
3884 case IND_OLDGEN_PERM:
3887 /* Careful here: a THUNK can be on the mutable list because
3888 * it contains pointers to young gen objects. If such a thunk
3889 * is updated, the IND_OLDGEN will be added to the mutable
3890 * list again, and we'll scavenge it twice. evacuate()
3891 * doesn't check whether the object has already been
3892 * evacuated, so we perform that check here.
3894 StgClosure *q = ((StgInd *)p)->indirectee;
3895 if (HEAP_ALLOCED(q) && Bdescr((StgPtr)q)->flags & BF_EVACUATED) {
3898 ((StgInd *)p)->indirectee = evacuate(q);
3901 #if 0 && defined(DEBUG)
3902 if (RtsFlags.DebugFlags.gc)
3903 /* Debugging code to print out the size of the thing we just
3907 StgPtr start = gen->steps[0].scan;
3908 bdescr *start_bd = gen->steps[0].scan_bd;
3910 scavenge(&gen->steps[0]);
3911 if (start_bd != gen->steps[0].scan_bd) {
3912 size += (P_)BLOCK_ROUND_UP(start) - start;
3913 start_bd = start_bd->link;
3914 while (start_bd != gen->steps[0].scan_bd) {
3915 size += BLOCK_SIZE_W;
3916 start_bd = start_bd->link;
3918 size += gen->steps[0].scan -
3919 (P_)BLOCK_ROUND_DOWN(gen->steps[0].scan);
3921 size = gen->steps[0].scan - start;
3923 debugBelch("evac IND_OLDGEN: %ld bytes", size * sizeof(W_));
3929 barf("scavenge_one: strange object %d", (int)(info->type));
3932 no_luck = failed_to_evac;
3933 failed_to_evac = rtsFalse;
3937 /* -----------------------------------------------------------------------------
3938 Scavenging mutable lists.
3940 We treat the mutable list of each generation > N (i.e. all the
3941 generations older than the one being collected) as roots. We also
3942 remove non-mutable objects from the mutable list at this point.
3943 -------------------------------------------------------------------------- */
3946 scavenge_mutable_list(generation *gen)
3951 bd = gen->saved_mut_list;
3954 for (; bd != NULL; bd = bd->link) {
3955 for (q = bd->start; q < bd->free; q++) {
3957 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
3960 switch (get_itbl((StgClosure *)p)->type) {
3962 barf("MUT_VAR_CLEAN on mutable list");
3964 mutlist_MUTVARS++; break;
3965 case MUT_ARR_PTRS_CLEAN:
3966 case MUT_ARR_PTRS_DIRTY:
3967 case MUT_ARR_PTRS_FROZEN:
3968 case MUT_ARR_PTRS_FROZEN0:
3969 mutlist_MUTARRS++; break;
3971 mutlist_OTHERS++; break;
3975 // Check whether this object is "clean", that is it
3976 // definitely doesn't point into a young generation.
3977 // Clean objects don't need to be scavenged. Some clean
3978 // objects (MUT_VAR_CLEAN) are not kept on the mutable
3979 // list at all; others, such as MUT_ARR_PTRS_CLEAN and
3980 // TSO, are always on the mutable list.
3982 switch (get_itbl((StgClosure *)p)->type) {
3983 case MUT_ARR_PTRS_CLEAN:
3984 recordMutableGen((StgClosure *)p,gen);
3987 StgTSO *tso = (StgTSO *)p;
3988 if ((tso->flags & TSO_DIRTY) == 0) {
3989 // A clean TSO: we don't have to traverse its
3990 // stack. However, we *do* follow the link field:
3991 // we don't want to have to mark a TSO dirty just
3992 // because we put it on a different queue.
3993 if (tso->why_blocked != BlockedOnBlackHole) {
3994 tso->link = (StgTSO *)evacuate((StgClosure *)tso->link);
3996 recordMutableGen((StgClosure *)p,gen);
4004 if (scavenge_one(p)) {
4005 // didn't manage to promote everything, so put the
4006 // object back on the list.
4007 recordMutableGen((StgClosure *)p,gen);
4012 // free the old mut_list
4013 freeChain(gen->saved_mut_list);
4014 gen->saved_mut_list = NULL;
4019 scavenge_static(void)
4021 StgClosure* p = static_objects;
4022 const StgInfoTable *info;
4024 /* Always evacuate straight to the oldest generation for static
4026 evac_gen = oldest_gen->no;
4028 /* keep going until we've scavenged all the objects on the linked
4030 while (p != END_OF_STATIC_LIST) {
4032 ASSERT(LOOKS_LIKE_CLOSURE_PTR(p));
4035 if (info->type==RBH)
4036 info = REVERT_INFOPTR(info); // if it's an RBH, look at the orig closure
4038 // make sure the info pointer is into text space
4040 /* Take this object *off* the static_objects list,
4041 * and put it on the scavenged_static_objects list.
4043 static_objects = *STATIC_LINK(info,p);
4044 *STATIC_LINK(info,p) = scavenged_static_objects;
4045 scavenged_static_objects = p;
4047 switch (info -> type) {
4051 StgInd *ind = (StgInd *)p;
4052 ind->indirectee = evacuate(ind->indirectee);
4054 /* might fail to evacuate it, in which case we have to pop it
4055 * back on the mutable list of the oldest generation. We
4056 * leave it *on* the scavenged_static_objects list, though,
4057 * in case we visit this object again.
4059 if (failed_to_evac) {
4060 failed_to_evac = rtsFalse;
4061 recordMutableGen((StgClosure *)p,oldest_gen);
4067 scavenge_thunk_srt(info);
4071 scavenge_fun_srt(info);
4078 next = (P_)p->payload + info->layout.payload.ptrs;
4079 // evacuate the pointers
4080 for (q = (P_)p->payload; q < next; q++) {
4081 *q = (StgWord)(StgPtr)evacuate((StgClosure *)*q);
4087 barf("scavenge_static: strange closure %d", (int)(info->type));
4090 ASSERT(failed_to_evac == rtsFalse);
4092 /* get the next static object from the list. Remember, there might
4093 * be more stuff on this list now that we've done some evacuating!
4094 * (static_objects is a global)
4100 /* -----------------------------------------------------------------------------
4101 scavenge a chunk of memory described by a bitmap
4102 -------------------------------------------------------------------------- */
4105 scavenge_large_bitmap( StgPtr p, StgLargeBitmap *large_bitmap, nat size )
4111 bitmap = large_bitmap->bitmap[b];
4112 for (i = 0; i < size; ) {
4113 if ((bitmap & 1) == 0) {
4114 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4118 if (i % BITS_IN(W_) == 0) {
4120 bitmap = large_bitmap->bitmap[b];
4122 bitmap = bitmap >> 1;
4127 STATIC_INLINE StgPtr
4128 scavenge_small_bitmap (StgPtr p, nat size, StgWord bitmap)
4131 if ((bitmap & 1) == 0) {
4132 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4135 bitmap = bitmap >> 1;
4141 /* -----------------------------------------------------------------------------
4142 scavenge_stack walks over a section of stack and evacuates all the
4143 objects pointed to by it. We can use the same code for walking
4144 AP_STACK_UPDs, since these are just sections of copied stack.
4145 -------------------------------------------------------------------------- */
4149 scavenge_stack(StgPtr p, StgPtr stack_end)
4151 const StgRetInfoTable* info;
4155 //IF_DEBUG(sanity, debugBelch(" scavenging stack between %p and %p", p, stack_end));
4158 * Each time around this loop, we are looking at a chunk of stack
4159 * that starts with an activation record.
4162 while (p < stack_end) {
4163 info = get_ret_itbl((StgClosure *)p);
4165 switch (info->i.type) {
4168 // In SMP, we can get update frames that point to indirections
4169 // when two threads evaluate the same thunk. We do attempt to
4170 // discover this situation in threadPaused(), but it's
4171 // possible that the following sequence occurs:
4180 // Now T is an indirection, and the update frame is already
4181 // marked on A's stack, so we won't traverse it again in
4182 // threadPaused(). We could traverse the whole stack again
4183 // before GC, but that seems like overkill.
4185 // Scavenging this update frame as normal would be disastrous;
4186 // the updatee would end up pointing to the value. So we turn
4187 // the indirection into an IND_PERM, so that evacuate will
4188 // copy the indirection into the old generation instead of
4190 if (get_itbl(((StgUpdateFrame *)p)->updatee)->type == IND) {
4191 ((StgUpdateFrame *)p)->updatee->header.info =
4192 (StgInfoTable *)&stg_IND_PERM_info;
4194 ((StgUpdateFrame *)p)->updatee
4195 = evacuate(((StgUpdateFrame *)p)->updatee);
4196 p += sizeofW(StgUpdateFrame);
4199 // small bitmap (< 32 entries, or 64 on a 64-bit machine)
4200 case CATCH_STM_FRAME:
4201 case CATCH_RETRY_FRAME:
4202 case ATOMICALLY_FRAME:
4207 bitmap = BITMAP_BITS(info->i.layout.bitmap);
4208 size = BITMAP_SIZE(info->i.layout.bitmap);
4209 // NOTE: the payload starts immediately after the info-ptr, we
4210 // don't have an StgHeader in the same sense as a heap closure.
4212 p = scavenge_small_bitmap(p, size, bitmap);
4216 scavenge_srt((StgClosure **)GET_SRT(info), info->i.srt_bitmap);
4224 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4227 size = BCO_BITMAP_SIZE(bco);
4228 scavenge_large_bitmap(p, BCO_BITMAP(bco), size);
4233 // large bitmap (> 32 entries, or > 64 on a 64-bit machine)
4239 size = GET_LARGE_BITMAP(&info->i)->size;
4241 scavenge_large_bitmap(p, GET_LARGE_BITMAP(&info->i), size);
4243 // and don't forget to follow the SRT
4247 // Dynamic bitmap: the mask is stored on the stack, and
4248 // there are a number of non-pointers followed by a number
4249 // of pointers above the bitmapped area. (see StgMacros.h,
4254 dyn = ((StgRetDyn *)p)->liveness;
4256 // traverse the bitmap first
4257 bitmap = RET_DYN_LIVENESS(dyn);
4258 p = (P_)&((StgRetDyn *)p)->payload[0];
4259 size = RET_DYN_BITMAP_SIZE;
4260 p = scavenge_small_bitmap(p, size, bitmap);
4262 // skip over the non-ptr words
4263 p += RET_DYN_NONPTRS(dyn) + RET_DYN_NONPTR_REGS_SIZE;
4265 // follow the ptr words
4266 for (size = RET_DYN_PTRS(dyn); size > 0; size--) {
4267 *p = (StgWord)(StgPtr)evacuate((StgClosure *)*p);
4275 StgRetFun *ret_fun = (StgRetFun *)p;
4276 StgFunInfoTable *fun_info;
4278 ret_fun->fun = evacuate(ret_fun->fun);
4279 fun_info = get_fun_itbl(ret_fun->fun);
4280 p = scavenge_arg_block(fun_info, ret_fun->payload);
4285 barf("scavenge_stack: weird activation record found on stack: %d", (int)(info->i.type));
4290 /*-----------------------------------------------------------------------------
4291 scavenge the large object list.
4293 evac_gen set by caller; similar games played with evac_gen as with
4294 scavenge() - see comment at the top of scavenge(). Most large
4295 objects are (repeatedly) mutable, so most of the time evac_gen will
4297 --------------------------------------------------------------------------- */
4300 scavenge_large(step *stp)
4305 bd = stp->new_large_objects;
4307 for (; bd != NULL; bd = stp->new_large_objects) {
4309 /* take this object *off* the large objects list and put it on
4310 * the scavenged large objects list. This is so that we can
4311 * treat new_large_objects as a stack and push new objects on
4312 * the front when evacuating.
4314 stp->new_large_objects = bd->link;
4315 dbl_link_onto(bd, &stp->scavenged_large_objects);
4317 // update the block count in this step.
4318 stp->n_scavenged_large_blocks += bd->blocks;
4321 if (scavenge_one(p)) {
4322 if (stp->gen_no > 0) {
4323 recordMutableGen((StgClosure *)p, stp->gen);
4329 /* -----------------------------------------------------------------------------
4330 Initialising the static object & mutable lists
4331 -------------------------------------------------------------------------- */
4334 zero_static_object_list(StgClosure* first_static)
4338 const StgInfoTable *info;
4340 for (p = first_static; p != END_OF_STATIC_LIST; p = link) {
4342 link = *STATIC_LINK(info, p);
4343 *STATIC_LINK(info,p) = NULL;
4347 /* -----------------------------------------------------------------------------
4349 -------------------------------------------------------------------------- */
4356 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4357 c = (StgIndStatic *)c->static_link)
4359 SET_INFO(c, c->saved_info);
4360 c->saved_info = NULL;
4361 // could, but not necessary: c->static_link = NULL;
4363 revertible_caf_list = NULL;
4367 markCAFs( evac_fn evac )
4371 for (c = (StgIndStatic *)caf_list; c != NULL;
4372 c = (StgIndStatic *)c->static_link)
4374 evac(&c->indirectee);
4376 for (c = (StgIndStatic *)revertible_caf_list; c != NULL;
4377 c = (StgIndStatic *)c->static_link)
4379 evac(&c->indirectee);
4383 /* -----------------------------------------------------------------------------
4384 Sanity code for CAF garbage collection.
4386 With DEBUG turned on, we manage a CAF list in addition to the SRT
4387 mechanism. After GC, we run down the CAF list and blackhole any
4388 CAFs which have been garbage collected. This means we get an error
4389 whenever the program tries to enter a garbage collected CAF.
4391 Any garbage collected CAFs are taken off the CAF list at the same
4393 -------------------------------------------------------------------------- */
4395 #if 0 && defined(DEBUG)
4402 const StgInfoTable *info;
4413 ASSERT(info->type == IND_STATIC);
4415 if (STATIC_LINK(info,p) == NULL) {
4416 IF_DEBUG(gccafs, debugBelch("CAF gc'd at 0x%04lx", (long)p));
4418 SET_INFO(p,&stg_BLACKHOLE_info);
4419 p = STATIC_LINK2(info,p);
4423 pp = &STATIC_LINK2(info,p);
4430 // debugBelch("%d CAFs live", i);
4435 /* -----------------------------------------------------------------------------
4438 * Code largely pinched from old RTS, then hacked to bits. We also do
4439 * lazy black holing here.
4441 * -------------------------------------------------------------------------- */
4443 struct stack_gap { StgWord gap_size; struct stack_gap *next_gap; };
4446 stackSqueeze(StgTSO *tso, StgPtr bottom)
4449 rtsBool prev_was_update_frame;
4450 StgClosure *updatee = NULL;
4451 StgRetInfoTable *info;
4452 StgWord current_gap_size;
4453 struct stack_gap *gap;
4456 // Traverse the stack upwards, replacing adjacent update frames
4457 // with a single update frame and a "stack gap". A stack gap
4458 // contains two values: the size of the gap, and the distance
4459 // to the next gap (or the stack top).
4463 ASSERT(frame < bottom);
4465 prev_was_update_frame = rtsFalse;
4466 current_gap_size = 0;
4467 gap = (struct stack_gap *) (tso->sp - sizeofW(StgUpdateFrame));
4469 while (frame < bottom) {
4471 info = get_ret_itbl((StgClosure *)frame);
4472 switch (info->i.type) {
4476 StgUpdateFrame *upd = (StgUpdateFrame *)frame;
4478 if (prev_was_update_frame) {
4480 TICK_UPD_SQUEEZED();
4481 /* wasn't there something about update squeezing and ticky to be
4482 * sorted out? oh yes: we aren't counting each enter properly
4483 * in this case. See the log somewhere. KSW 1999-04-21
4485 * Check two things: that the two update frames don't point to
4486 * the same object, and that the updatee_bypass isn't already an
4487 * indirection. Both of these cases only happen when we're in a
4488 * block hole-style loop (and there are multiple update frames
4489 * on the stack pointing to the same closure), but they can both
4490 * screw us up if we don't check.
4492 if (upd->updatee != updatee && !closure_IND(upd->updatee)) {
4493 UPD_IND_NOLOCK(upd->updatee, updatee);
4496 // now mark this update frame as a stack gap. The gap
4497 // marker resides in the bottom-most update frame of
4498 // the series of adjacent frames, and covers all the
4499 // frames in this series.
4500 current_gap_size += sizeofW(StgUpdateFrame);
4501 ((struct stack_gap *)frame)->gap_size = current_gap_size;
4502 ((struct stack_gap *)frame)->next_gap = gap;
4504 frame += sizeofW(StgUpdateFrame);
4508 // single update frame, or the topmost update frame in a series
4510 prev_was_update_frame = rtsTrue;
4511 updatee = upd->updatee;
4512 frame += sizeofW(StgUpdateFrame);
4518 prev_was_update_frame = rtsFalse;
4520 // we're not in a gap... check whether this is the end of a gap
4521 // (an update frame can't be the end of a gap).
4522 if (current_gap_size != 0) {
4523 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4525 current_gap_size = 0;
4527 frame += stack_frame_sizeW((StgClosure *)frame);
4532 if (current_gap_size != 0) {
4533 gap = (struct stack_gap *) (frame - sizeofW(StgUpdateFrame));
4536 // Now we have a stack with gaps in it, and we have to walk down
4537 // shoving the stack up to fill in the gaps. A diagram might
4541 // | ********* | <- sp
4545 // | stack_gap | <- gap | chunk_size
4547 // | ......... | <- gap_end v
4553 // 'sp' points the the current top-of-stack
4554 // 'gap' points to the stack_gap structure inside the gap
4555 // ***** indicates real stack data
4556 // ..... indicates gap
4557 // <empty> indicates unused
4561 void *gap_start, *next_gap_start, *gap_end;
4564 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4565 sp = next_gap_start;
4567 while ((StgPtr)gap > tso->sp) {
4569 // we're working in *bytes* now...
4570 gap_start = next_gap_start;
4571 gap_end = (void*) ((unsigned char*)gap_start - gap->gap_size * sizeof(W_));
4573 gap = gap->next_gap;
4574 next_gap_start = (void *)((unsigned char*)gap + sizeof(StgUpdateFrame));
4576 chunk_size = (unsigned char*)gap_end - (unsigned char*)next_gap_start;
4578 memmove(sp, next_gap_start, chunk_size);
4581 tso->sp = (StgPtr)sp;
4585 /* -----------------------------------------------------------------------------
4588 * We have to prepare for GC - this means doing lazy black holing
4589 * here. We also take the opportunity to do stack squeezing if it's
4591 * -------------------------------------------------------------------------- */
4593 threadPaused(Capability *cap, StgTSO *tso)
4596 StgRetInfoTable *info;
4599 nat words_to_squeeze = 0;
4601 nat weight_pending = 0;
4602 rtsBool prev_was_update_frame;
4604 stack_end = &tso->stack[tso->stack_size];
4606 frame = (StgClosure *)tso->sp;
4609 // If we've already marked this frame, then stop here.
4610 if (frame->header.info == (StgInfoTable *)&stg_marked_upd_frame_info) {
4614 info = get_ret_itbl(frame);
4616 switch (info->i.type) {
4620 SET_INFO(frame, (StgInfoTable *)&stg_marked_upd_frame_info);
4622 bh = ((StgUpdateFrame *)frame)->updatee;
4624 if (closure_IND(bh) || bh->header.info == &stg_BLACKHOLE_info) {
4625 IF_DEBUG(squeeze, debugBelch("suspending duplicate work: %ld words of stack\n", (StgPtr)frame - tso->sp));
4627 // If this closure is already an indirection, then
4628 // suspend the computation up to this point:
4629 suspendComputation(cap,tso,(StgPtr)frame);
4631 // Now drop the update frame, and arrange to return
4632 // the value to the frame underneath:
4633 tso->sp = (StgPtr)frame + sizeofW(StgUpdateFrame) - 2;
4634 tso->sp[1] = (StgWord)bh;
4635 tso->sp[0] = (W_)&stg_enter_info;
4637 // And continue with threadPaused; there might be
4638 // yet more computation to suspend.
4639 threadPaused(cap,tso);
4643 if (bh->header.info != &stg_CAF_BLACKHOLE_info) {
4644 #if (!defined(LAZY_BLACKHOLING)) && defined(DEBUG)
4645 debugBelch("Unexpected lazy BHing required at 0x%04lx\n",(long)bh);
4647 // zero out the slop so that the sanity checker can tell
4648 // where the next closure is.
4649 DEBUG_FILL_SLOP(bh);
4652 // We pretend that bh is now dead.
4653 LDV_recordDead_FILL_SLOP_DYNAMIC((StgClosure *)bh);
4655 SET_INFO(bh,&stg_BLACKHOLE_info);
4657 // We pretend that bh has just been created.
4658 LDV_RECORD_CREATE(bh);
4661 frame = (StgClosure *) ((StgUpdateFrame *)frame + 1);
4662 if (prev_was_update_frame) {
4663 words_to_squeeze += sizeofW(StgUpdateFrame);
4664 weight += weight_pending;
4667 prev_was_update_frame = rtsTrue;
4673 // normal stack frames; do nothing except advance the pointer
4676 nat frame_size = stack_frame_sizeW(frame);
4677 weight_pending += frame_size;
4678 frame = (StgClosure *)((StgPtr)frame + frame_size);
4679 prev_was_update_frame = rtsFalse;
4686 debugBelch("words_to_squeeze: %d, weight: %d, squeeze: %s\n",
4687 words_to_squeeze, weight,
4688 weight < words_to_squeeze ? "YES" : "NO"));
4690 // Should we squeeze or not? Arbitrary heuristic: we squeeze if
4691 // the number of words we have to shift down is less than the
4692 // number of stack words we squeeze away by doing so.
4693 if (RtsFlags.GcFlags.squeezeUpdFrames == rtsTrue &&
4694 weight < words_to_squeeze) {
4695 stackSqueeze(tso, (StgPtr)frame);
4699 /* -----------------------------------------------------------------------------
4701 * -------------------------------------------------------------------------- */
4705 printMutableList(generation *gen)
4710 debugBelch("@@ Mutable list %p: ", gen->mut_list);
4712 for (bd = gen->mut_list; bd != NULL; bd = bd->link) {
4713 for (p = bd->start; p < bd->free; p++) {
4714 debugBelch("%p (%s), ", (void *)*p, info_type((StgClosure *)*p));